mindstudio-probe 1.0.3__py3-none-any.whl → 1.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (262) hide show
  1. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.0.4.dist-info}/LICENSE +201 -201
  2. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.0.4.dist-info}/METADATA +36 -34
  3. mindstudio_probe-1.0.4.dist-info/RECORD +276 -0
  4. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.0.4.dist-info}/WHEEL +1 -1
  5. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.0.4.dist-info}/entry_points.txt +1 -0
  6. msprobe/README.md +101 -237
  7. msprobe/{config/config.json → config.json} +49 -49
  8. msprobe/core/advisor/advisor.py +124 -124
  9. msprobe/core/advisor/advisor_const.py +59 -59
  10. msprobe/core/advisor/advisor_result.py +58 -58
  11. msprobe/core/common/const.py +341 -318
  12. msprobe/core/common/exceptions.py +99 -99
  13. msprobe/core/common/{file_check.py → file_utils.py} +478 -283
  14. msprobe/core/common/log.py +76 -69
  15. msprobe/core/common/utils.py +385 -616
  16. msprobe/core/common_config.py +85 -71
  17. msprobe/core/compare/acc_compare.py +299 -298
  18. msprobe/core/compare/check.py +95 -95
  19. msprobe/core/compare/compare_cli.py +49 -49
  20. msprobe/core/compare/highlight.py +223 -222
  21. msprobe/core/compare/multiprocessing_compute.py +149 -149
  22. msprobe/core/compare/npy_compare.py +295 -295
  23. msprobe/core/compare/utils.py +430 -429
  24. msprobe/core/data_dump/data_collector.py +154 -144
  25. msprobe/core/data_dump/data_processor/base.py +314 -293
  26. msprobe/core/data_dump/data_processor/factory.py +59 -59
  27. msprobe/core/data_dump/data_processor/mindspore_processor.py +186 -198
  28. msprobe/core/data_dump/data_processor/pytorch_processor.py +366 -389
  29. msprobe/core/data_dump/json_writer.py +96 -116
  30. msprobe/core/data_dump/scope.py +178 -178
  31. msprobe/core/grad_probe/constant.py +70 -70
  32. msprobe/core/grad_probe/grad_compare.py +171 -175
  33. msprobe/core/grad_probe/utils.py +64 -52
  34. msprobe/docs/01.installation.md +89 -0
  35. msprobe/docs/02.config_introduction.md +165 -0
  36. msprobe/docs/03.config_examples.md +247 -0
  37. msprobe/docs/04.acl_config_examples.md +76 -0
  38. msprobe/docs/05.data_dump_PyTorch.md +198 -0
  39. msprobe/docs/06.data_dump_MindSpore.md +243 -0
  40. msprobe/docs/07.accuracy_checker_PyTorch.md +274 -0
  41. msprobe/docs/08.accuracy_checker_online_PyTorch.md +198 -0
  42. msprobe/docs/09.accuracy_checker_MindSpore.md +68 -0
  43. msprobe/docs/10.accuracy_compare_PyTorch.md +245 -0
  44. msprobe/docs/11.accuracy_compare_MindSpore.md +202 -0
  45. msprobe/docs/12.overflow_check_PyTorch.md +79 -0
  46. msprobe/docs/13.overflow_check_MindSpore.md +31 -0
  47. msprobe/{pytorch/doc/parse_tool.md → docs/14.data_parse_PyTorch.md} +283 -286
  48. msprobe/docs/15.free_benchmarking_PyTorch.md +164 -0
  49. msprobe/{doc/grad_probe/grad_probe.md → docs/17.grad_probe.md} +207 -207
  50. msprobe/docs/FAQ_PyTorch.md +177 -0
  51. msprobe/docs/S02.report_free_benchmarking_validation_performance_baseline.md +146 -0
  52. msprobe/docs/img/free_benchmark_framework.png +0 -0
  53. msprobe/mindspore/__init__.py +1 -1
  54. msprobe/mindspore/api_accuracy_checker/api_accuracy_checker.py +254 -245
  55. msprobe/mindspore/api_accuracy_checker/api_info.py +69 -69
  56. msprobe/mindspore/api_accuracy_checker/api_runner.py +155 -151
  57. msprobe/mindspore/api_accuracy_checker/base_compare_algorithm.py +196 -196
  58. msprobe/mindspore/api_accuracy_checker/cmd_parser.py +6 -0
  59. msprobe/mindspore/api_accuracy_checker/compute_element.py +238 -223
  60. msprobe/mindspore/api_accuracy_checker/main.py +8 -15
  61. msprobe/mindspore/api_accuracy_checker/type_mapping.py +113 -113
  62. msprobe/mindspore/api_accuracy_checker/utils.py +79 -62
  63. msprobe/mindspore/cell_processor.py +34 -34
  64. msprobe/mindspore/common/const.py +106 -87
  65. msprobe/mindspore/common/log.py +37 -37
  66. msprobe/mindspore/common/utils.py +81 -57
  67. msprobe/mindspore/compare/distributed_compare.py +75 -75
  68. msprobe/mindspore/compare/ms_compare.py +219 -117
  69. msprobe/mindspore/compare/ms_graph_compare.py +348 -317
  70. msprobe/mindspore/compare/ms_to_pt_api.yaml +399 -399
  71. msprobe/mindspore/debugger/debugger_config.py +66 -74
  72. msprobe/mindspore/debugger/precision_debugger.py +126 -107
  73. msprobe/mindspore/dump/dump_tool_factory.py +35 -35
  74. msprobe/mindspore/dump/hook_cell/api_registry.py +118 -104
  75. msprobe/mindspore/dump/hook_cell/hook_cell.py +55 -53
  76. msprobe/mindspore/dump/hook_cell/support_wrap_ops.yaml +922 -925
  77. msprobe/mindspore/dump/hook_cell/wrap_api.py +113 -0
  78. msprobe/mindspore/dump/jit_dump.py +72 -56
  79. msprobe/mindspore/dump/kernel_graph_dump.py +59 -60
  80. msprobe/mindspore/dump/kernel_kbyk_dump.py +64 -65
  81. msprobe/mindspore/free_benchmark/api_pynative_self_check.py +116 -116
  82. msprobe/mindspore/free_benchmark/common/config.py +12 -12
  83. msprobe/mindspore/free_benchmark/common/handler_params.py +17 -17
  84. msprobe/mindspore/free_benchmark/common/utils.py +71 -71
  85. msprobe/mindspore/free_benchmark/data/support_wrap_ops.yaml +842 -842
  86. msprobe/mindspore/free_benchmark/decorator/dec_forward.py +43 -42
  87. msprobe/mindspore/free_benchmark/decorator/decorator_factory.py +107 -107
  88. msprobe/mindspore/free_benchmark/handler/base_handler.py +90 -90
  89. msprobe/mindspore/free_benchmark/handler/check_handler.py +41 -41
  90. msprobe/mindspore/free_benchmark/handler/fix_handler.py +36 -36
  91. msprobe/mindspore/free_benchmark/handler/handler_factory.py +21 -21
  92. msprobe/mindspore/free_benchmark/perturbation/add_noise.py +67 -67
  93. msprobe/mindspore/free_benchmark/perturbation/base_perturbation.py +21 -21
  94. msprobe/mindspore/free_benchmark/perturbation/bit_noise.py +63 -63
  95. msprobe/mindspore/free_benchmark/perturbation/exchange_value.py +51 -0
  96. msprobe/mindspore/free_benchmark/perturbation/improve_precision.py +35 -34
  97. msprobe/mindspore/free_benchmark/perturbation/no_change.py +12 -12
  98. msprobe/mindspore/free_benchmark/perturbation/perturbation_factory.py +29 -27
  99. msprobe/mindspore/free_benchmark/self_check_tool_factory.py +33 -33
  100. msprobe/mindspore/grad_probe/global_context.py +90 -91
  101. msprobe/mindspore/grad_probe/grad_analyzer.py +231 -231
  102. msprobe/mindspore/grad_probe/grad_monitor.py +27 -27
  103. msprobe/mindspore/grad_probe/grad_stat_csv.py +131 -131
  104. msprobe/mindspore/grad_probe/hook.py +94 -92
  105. msprobe/mindspore/grad_probe/utils.py +29 -28
  106. msprobe/mindspore/ms_config.py +128 -126
  107. msprobe/mindspore/overflow_check/kernel_graph_overflow_check.py +44 -45
  108. msprobe/mindspore/overflow_check/overflow_check_tool_factory.py +34 -34
  109. msprobe/mindspore/runtime.py +4 -4
  110. msprobe/mindspore/service.py +378 -354
  111. msprobe/mindspore/task_handler_factory.py +24 -24
  112. msprobe/msprobe.py +105 -107
  113. msprobe/pytorch/__init__.py +3 -3
  114. msprobe/pytorch/api_accuracy_checker/common/config.py +53 -55
  115. msprobe/pytorch/api_accuracy_checker/common/utils.py +214 -165
  116. msprobe/pytorch/api_accuracy_checker/compare/algorithm.py +213 -213
  117. msprobe/pytorch/api_accuracy_checker/compare/api_precision_compare.py +606 -581
  118. msprobe/pytorch/api_accuracy_checker/compare/api_precision_standard.yaml +132 -132
  119. msprobe/pytorch/api_accuracy_checker/compare/api_precision_threshold.yaml +390 -390
  120. msprobe/pytorch/api_accuracy_checker/compare/compare.py +386 -381
  121. msprobe/pytorch/api_accuracy_checker/compare/compare_column.py +73 -73
  122. msprobe/pytorch/api_accuracy_checker/compare/compare_utils.py +245 -244
  123. msprobe/pytorch/api_accuracy_checker/config.yaml +10 -10
  124. msprobe/pytorch/api_accuracy_checker/run_ut/data_generate.py +335 -332
  125. msprobe/pytorch/api_accuracy_checker/run_ut/multi_run_ut.py +200 -199
  126. msprobe/pytorch/api_accuracy_checker/run_ut/run_overflow_check.py +133 -134
  127. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut.py +592 -581
  128. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut_utils.py +70 -74
  129. msprobe/pytorch/api_accuracy_checker/run_ut/torch_ut_setting.json +7 -4
  130. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/attl.py +197 -202
  131. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/client.py +325 -324
  132. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/device_dispatch.py +204 -204
  133. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/server.py +219 -218
  134. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/ssl_config.py +10 -10
  135. msprobe/pytorch/bench_functions/__init__.py +15 -15
  136. msprobe/pytorch/bench_functions/apply_adam_w.py +28 -28
  137. msprobe/pytorch/bench_functions/confusion_transpose.py +19 -19
  138. msprobe/pytorch/bench_functions/fast_gelu.py +55 -55
  139. msprobe/pytorch/bench_functions/layer_norm_eval.py +6 -6
  140. msprobe/pytorch/bench_functions/linear.py +12 -12
  141. msprobe/pytorch/bench_functions/matmul_backward.py +48 -48
  142. msprobe/pytorch/bench_functions/npu_fusion_attention.py +509 -421
  143. msprobe/pytorch/bench_functions/rms_norm.py +15 -15
  144. msprobe/pytorch/bench_functions/rotary_mul.py +52 -52
  145. msprobe/pytorch/bench_functions/scaled_mask_softmax.py +26 -26
  146. msprobe/pytorch/bench_functions/swiglu.py +55 -55
  147. msprobe/pytorch/common/__init__.py +2 -2
  148. msprobe/pytorch/common/compare_script.template +14 -14
  149. msprobe/pytorch/common/log.py +20 -31
  150. msprobe/pytorch/common/parse_json.py +39 -39
  151. msprobe/pytorch/common/utils.py +305 -300
  152. msprobe/pytorch/compare/distributed_compare.py +66 -66
  153. msprobe/pytorch/compare/mapping.yaml +607 -607
  154. msprobe/pytorch/compare/match.py +34 -33
  155. msprobe/pytorch/compare/pt_compare.py +50 -40
  156. msprobe/pytorch/debugger/debugger_config.py +95 -95
  157. msprobe/pytorch/debugger/precision_debugger.py +125 -125
  158. msprobe/pytorch/free_benchmark/__init__.py +8 -8
  159. msprobe/pytorch/free_benchmark/common/constant.py +70 -70
  160. msprobe/pytorch/free_benchmark/common/counter.py +71 -71
  161. msprobe/pytorch/free_benchmark/common/enums.py +37 -37
  162. msprobe/pytorch/free_benchmark/common/params.py +129 -129
  163. msprobe/pytorch/free_benchmark/common/utils.py +102 -102
  164. msprobe/pytorch/free_benchmark/compare/grad_saver.py +179 -179
  165. msprobe/pytorch/free_benchmark/compare/single_benchmark.py +104 -104
  166. msprobe/pytorch/free_benchmark/main.py +105 -105
  167. msprobe/pytorch/free_benchmark/perturbed_layers/base_layer.py +13 -13
  168. msprobe/pytorch/free_benchmark/perturbed_layers/layer_factory.py +41 -41
  169. msprobe/pytorch/free_benchmark/perturbed_layers/npu/add_noise.py +90 -90
  170. msprobe/pytorch/free_benchmark/perturbed_layers/npu/bit_noise.py +104 -104
  171. msprobe/pytorch/free_benchmark/perturbed_layers/npu/change_value.py +63 -63
  172. msprobe/pytorch/free_benchmark/perturbed_layers/npu/improve_precision.py +68 -68
  173. msprobe/pytorch/free_benchmark/perturbed_layers/npu/no_change.py +28 -28
  174. msprobe/pytorch/free_benchmark/perturbed_layers/npu/npu_base_layser.py +45 -45
  175. msprobe/pytorch/free_benchmark/perturbed_layers/run_cpu.py +19 -19
  176. msprobe/pytorch/free_benchmark/result_handlers/base_handler.py +217 -217
  177. msprobe/pytorch/free_benchmark/result_handlers/check_handler.py +39 -39
  178. msprobe/pytorch/free_benchmark/result_handlers/fix_handler.py +23 -23
  179. msprobe/pytorch/free_benchmark/result_handlers/handler_factory.py +30 -30
  180. msprobe/pytorch/free_benchmark/result_handlers/preheat_handler.py +170 -170
  181. msprobe/pytorch/function_factory.py +76 -75
  182. msprobe/pytorch/functional/dump_module.py +39 -39
  183. msprobe/pytorch/grad_probe/grad_monitor.py +91 -90
  184. msprobe/pytorch/grad_probe/grad_stat_csv.py +128 -128
  185. msprobe/pytorch/hook_module/api_registry.py +161 -161
  186. msprobe/pytorch/hook_module/hook_module.py +120 -120
  187. msprobe/pytorch/hook_module/support_wrap_ops.yaml +1879 -1877
  188. msprobe/pytorch/hook_module/utils.py +30 -29
  189. msprobe/pytorch/hook_module/wrap_aten.py +110 -110
  190. msprobe/pytorch/hook_module/wrap_distributed.py +78 -78
  191. msprobe/pytorch/hook_module/wrap_functional.py +105 -105
  192. msprobe/pytorch/hook_module/wrap_npu_custom.py +93 -84
  193. msprobe/pytorch/hook_module/wrap_tensor.py +71 -71
  194. msprobe/pytorch/hook_module/wrap_torch.py +86 -86
  195. msprobe/pytorch/hook_module/wrap_vf.py +62 -62
  196. msprobe/pytorch/module_processer.py +138 -138
  197. msprobe/pytorch/online_dispatch/__init__.py +20 -20
  198. msprobe/pytorch/online_dispatch/compare.py +236 -236
  199. msprobe/pytorch/online_dispatch/dispatch.py +271 -271
  200. msprobe/pytorch/online_dispatch/dump_compare.py +155 -156
  201. msprobe/pytorch/online_dispatch/single_compare.py +391 -391
  202. msprobe/pytorch/online_dispatch/torch_ops_config.yaml +49 -49
  203. msprobe/pytorch/online_dispatch/utils.py +130 -146
  204. msprobe/pytorch/parse.py +4 -4
  205. msprobe/pytorch/parse_tool/cli.py +32 -32
  206. msprobe/pytorch/parse_tool/lib/compare.py +260 -271
  207. msprobe/pytorch/parse_tool/lib/config.py +52 -52
  208. msprobe/pytorch/parse_tool/lib/file_desc.py +31 -31
  209. msprobe/pytorch/parse_tool/lib/interactive_cli.py +102 -102
  210. msprobe/pytorch/parse_tool/lib/parse_exception.py +54 -54
  211. msprobe/pytorch/parse_tool/lib/parse_tool.py +158 -158
  212. msprobe/pytorch/parse_tool/lib/utils.py +316 -321
  213. msprobe/pytorch/parse_tool/lib/visualization.py +85 -91
  214. msprobe/pytorch/pt_config.py +188 -187
  215. msprobe/pytorch/service.py +246 -252
  216. mindstudio_probe-1.0.3.dist-info/RECORD +0 -272
  217. msprobe/config/README.md +0 -539
  218. msprobe/mindspore/doc/compare.md +0 -58
  219. msprobe/mindspore/doc/dump.md +0 -217
  220. msprobe/mindspore/dump/hook_cell/wrap_functional.py +0 -91
  221. msprobe/mindspore/dump/hook_cell/wrap_tensor.py +0 -63
  222. msprobe/pytorch/doc/FAQ.md +0 -193
  223. msprobe/pytorch/doc/api_accuracy_checker.md +0 -313
  224. msprobe/pytorch/doc/api_accuracy_checker_online.md +0 -187
  225. msprobe/pytorch/doc/dump.md +0 -260
  226. msprobe/pytorch/doc/msprobe/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/206/320/245/342/226/221/321/206/320/235/320/276dump/321/206/320/260/320/227/321/205/320/227/320/226/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -182
  227. msprobe/pytorch/doc/ptdbg_ascend_compare.md +0 -240
  228. msprobe/pytorch/doc/ptdbg_ascend_overview.md +0 -68
  229. msprobe/pytorch/doc/ptdbg_ascend_quickstart.md +0 -381
  230. msprobe/pytorch/doc/run_overflow_check.md +0 -25
  231. msprobe/pytorch/doc//321/205/320/254/320/270/321/207/342/225/221/342/224/220/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/206/320/277/320/244/321/205/320/277/342/225/243.md +0 -90
  232. msprobe/pytorch/doc//321/206/320/247/320/260/321/206/320/260/320/227/321/206/320/255/320/226/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/205/320/254/342/225/221/321/206/320/251/320/277/321/211/320/272/320/234/321/210/320/277/320/221/321/205/320/242/320/234/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -151
  233. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.0.4.dist-info}/top_level.txt +0 -0
  234. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_1.png +0 -0
  235. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_2.png +0 -0
  236. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_3.png +0 -0
  237. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_4.png +0 -0
  238. /msprobe/{pytorch/doc → docs}/img/GPT-3_1.png +0 -0
  239. /msprobe/{pytorch/doc → docs}/img/GPT-3_2.png +0 -0
  240. /msprobe/{pytorch/doc → docs}/img/GPT-3_3.png +0 -0
  241. /msprobe/{pytorch/doc → docs}/img/GPT-3_4.png +0 -0
  242. /msprobe/{pytorch/doc → docs}/img/GPT-3_5.png +0 -0
  243. /msprobe/{pytorch/doc → docs}/img/GPT-3_6.png +0 -0
  244. /msprobe/{pytorch/doc → docs}/img/GPT-3_7.png +0 -0
  245. /msprobe/{pytorch/doc → docs}/img/GPT-3_8.png +0 -0
  246. /msprobe/{pytorch/doc → docs}/img/YOLOV5S_1.png +0 -0
  247. /msprobe/{pytorch/doc → docs}/img/YOLOV5S_2.png +0 -0
  248. /msprobe/{pytorch/doc → docs}/img/accuracy_checking_details.png +0 -0
  249. /msprobe/{pytorch/doc → docs}/img/accuracy_checking_result.png +0 -0
  250. /msprobe/{pytorch/doc → docs}/img/api_precision_compare_details.png +0 -0
  251. /msprobe/{pytorch/doc → docs}/img/api_precision_compare_result.png +0 -0
  252. /msprobe/{pytorch/doc → docs}/img/auto_analyze_log.png +0 -0
  253. /msprobe/{pytorch/doc → docs}/img/compare_result_pkl.png +0 -0
  254. /msprobe/{pytorch/doc → docs}/img/compare_result_pkl_md5.png.png +0 -0
  255. /msprobe/{pytorch/doc → docs}/img/cpu_info.png +0 -0
  256. /msprobe/{config → docs}/img/free_benchmark.png +0 -0
  257. /msprobe/{doc/grad_probe/img/image-1.png → docs/img/grad_probe_image-1.png} +0 -0
  258. /msprobe/{doc/grad_probe/img/image-2.png → docs/img/grad_probe_image-2.png} +0 -0
  259. /msprobe/{doc/grad_probe/img/image-3.png → docs/img/grad_probe_image-3.png} +0 -0
  260. /msprobe/{doc/grad_probe/img/image-4.png → docs/img/grad_probe_image-4.png} +0 -0
  261. /msprobe/{doc/grad_probe/img/image.png → docs/img/grad_probe_image.png} +0 -0
  262. /msprobe/{pytorch/doc → docs}/img/module_compare.png +0 -0
@@ -1,272 +1,272 @@
1
- import os
2
- import time
3
- import json
4
- from pathlib import Path
5
- from multiprocessing import Manager, Pool
6
-
7
- import torch
8
-
9
- from torch.utils._python_dispatch import TorchDispatchMode
10
-
11
- try:
12
- import torch_npu
13
- except ImportError:
14
- is_npu = False
15
- else:
16
- is_npu = True
17
-
18
- from msprobe.core.common.utils import check_file_or_directory_path, check_path_before_create, load_yaml
19
- from msprobe.core.common.const import Const, CompareConst
20
- from msprobe.pytorch.common.log import logger
21
- from .dump_compare import dispatch_workflow, dispatch_multiprocess, error_call, TimeStatistics, \
22
- DispatchRunParam, DisPatchDataInfo
23
- from .utils import get_callstack, data_to_cpu, get_sys_info, DispatchException, COMPARE_LOGO
24
- from .compare import Comparator
25
-
26
-
27
- current_time = time.strftime("%Y%m%d%H%M%S")
28
- RESULT_FILE_NAME = "accuracy_checking_result_" + current_time + ".csv"
29
- DETAILS_FILE_NAME = "accuracy_checking_details_" + current_time + ".csv"
30
-
31
-
32
- class PtdbgDispatch(TorchDispatchMode):
33
- def __init__(self, dump_mode=Const.OFF, api_list=None, debug=False, dump_path=None, tag=None, process_num=0):
34
- super(PtdbgDispatch, self).__init__()
35
- logger.info(COMPARE_LOGO)
36
- if not is_npu:
37
- logger.error("Please confirm you run environment installed torch_npu!")
38
- return
39
- if dump_path is None:
40
- logger.error("Please set dump_path when dump_mode is config!")
41
- check_file_or_directory_path(dump_path, True)
42
-
43
- self.device_id = torch_npu._C._npu_getDevice()
44
- self.dump_mode = dump_mode
45
- self.dump_api_list = api_list
46
- self.debug_flag = debug
47
- self.api_index = 0
48
- self.single_api_index_dict = {}
49
- self.device_dump_path_cpu = None
50
- self.device_dump_path_npu = None
51
- self.all_summary = []
52
- self.call_stack_list = []
53
- self.process_num = process_num
54
- self.filter_dump_api()
55
- self.check_param()
56
- dir_name = self.get_dir_name(tag)
57
- self.root_path = os.path.join(os.path.realpath(dump_path), dir_name)
58
- self.root_cpu_path = os.path.join(self.root_path, f'cpu')
59
- self.root_npu_path = os.path.join(self.root_path, f'npu')
60
- check_path_before_create(self.root_cpu_path)
61
- check_path_before_create(self.root_npu_path)
62
- Path(self.root_cpu_path).mkdir(mode=0o750, parents=True, exist_ok=True)
63
- Path(self.root_npu_path).mkdir(mode=0o750, parents=True, exist_ok=True)
64
-
65
- self.result_csv_path = os.path.join(self.root_path, RESULT_FILE_NAME)
66
- self.detail_csv_path = os.path.join(self.root_path, DETAILS_FILE_NAME)
67
- self.comparator = Comparator(self.result_csv_path, self.detail_csv_path, False)
68
-
69
- self.aten_ops_blacklist = []
70
- self.npu_adjust_autogard = []
71
- yaml_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "torch_ops_config.yaml")
72
- self.get_ops(yaml_path)
73
-
74
- self.lock = None
75
- if process_num > 0:
76
- self.pool = Pool(process_num)
77
- if debug:
78
- logger.info(f'Main pid:{os.getpid()} device:{self.device_id} dump_list:{self.dump_api_list} '
79
- f'dump_mode:{self.dump_mode} cpu_path[{self.root_cpu_path}], npu_path[{self.root_npu_path}], '
80
- f'process[{process_num}]')
81
-
82
- def __exit__(self, exc_type, exc_val, exc_tb):
83
- super().__exit__(exc_type, exc_val, exc_tb)
84
-
85
- if not is_npu:
86
- return
87
- logger.info(f'start write compare csv: Rank[{self.device_id}], Pid[{os.getpid()}')
88
-
89
- if self.process_num > 0:
90
- self.pool.close()
91
- self.pool.join()
92
- summary_path = os.path.join(self.root_cpu_path, f'summary.json')
93
- if not os.path.exists(summary_path):
94
- logger.error("Please check train log, An exception may have occurred!")
95
- return
96
- check_file_or_directory_path(summary_path, False)
97
- fp_handle = open(summary_path, "r")
98
- while True:
99
- json_line_data = fp_handle.readline()
100
- if json_line_data == '\n':
101
- continue
102
- if len(json_line_data) == 0:
103
- break
104
- msg = json.loads(json_line_data)
105
- self.all_summary[msg[0]] = msg[1]
106
- fp_handle.close()
107
-
108
- if self.debug_flag:
109
- input_num = 0
110
- output_num = 0
111
- total_num = 0
112
-
113
- for list_data in self.all_summary:
114
- for data in list_data:
115
- logger.info(f'summary: Device[{self.device_id}], Pid[{os.getpid()}], Data[{data}]')
116
- if "_input" in data[CompareConst.NPU_NAME]:
117
- input_num = input_num + 1
118
- if "_output" in data[CompareConst.NPU_NAME]:
119
- output_num = output_num + 1
120
- total_num = total_num + 1
121
- logger.info(f'Dispatch exit: Device[{self.device_id}], Pid[{os.getpid()} Input[{input_num}] '
122
- f'Output[{output_num}] Total[{total_num}] API_Total[{self.api_index}]]')
123
-
124
- def __torch_dispatch__(self, func, types, args=(), kwargs=None):
125
- if not is_npu:
126
- logger.error("Please confirm you run environment installed torch_npu!")
127
- return func(*args, **kwargs)
128
-
129
- func_name_split_list = func.__name__.split(".")
130
- aten_api = func_name_split_list[0]
131
- try:
132
- aten_api_overload_name = func_name_split_list[1]
133
- except IndexError:
134
- logger.error(f"Please check the func name {func.__name__}!")
135
- return func(*args, **kwargs)
136
-
137
- self.enable_autogard(aten_api)
138
- if aten_api in self.aten_ops_blacklist:
139
- npu_out = func(*args, **kwargs)
140
- return npu_out
141
-
142
- call_stack = get_callstack()
143
- self.call_stack_list.append(call_stack)
144
- self.api_index += 1
145
- if aten_api not in self.single_api_index_dict:
146
- self.single_api_index_dict[aten_api] = 1
147
- else:
148
- self.single_api_index_dict[aten_api] += 1
149
-
150
- run_param = self.get_run_param(aten_api, func.__name__, aten_api_overload_name)
151
-
152
- if self.debug_flag:
153
- logger.info(f'Dispatch Info: Rank[{self.device_id}], Pid[{os.getpid()}], Func[{func.__name__}], '
154
- f'Name[{run_param.aten_api}_{run_param.single_api_index}], '
155
- f'Count[{self.api_index}], Sys[{get_sys_info()}]')
156
-
157
- cpu_args = []
158
- cpu_kwargs = []
159
- data_to_cpu(args, 0, cpu_args)
160
- data_to_cpu(kwargs, 0, cpu_kwargs)
161
- cpu_args = cpu_args[0]
162
- cpu_kwargs = cpu_kwargs[0]
163
-
164
- with TimeStatistics("NPU RUN", run_param):
165
- npu_out = func(*args, **kwargs)
166
- npu_out_cpu = []
167
- data_to_cpu(npu_out, 0, npu_out_cpu)
168
- npu_out_cpu = npu_out_cpu[0]
169
-
170
- with TimeStatistics("CPU RUN", run_param):
171
- cpu_out = func(*cpu_args, **cpu_kwargs)
172
-
173
- if isinstance(cpu_out, torch.Tensor) and cpu_out.dtype in [torch.bfloat16, torch.float16, torch.half]:
174
- cpu_out = cpu_out.float()
175
-
176
- if self.process_num == 0:
177
- self.all_summary.append([])
178
- data_info = DisPatchDataInfo(cpu_args, cpu_kwargs, self.all_summary, func, npu_out_cpu, cpu_out, self.lock)
179
- dispatch_workflow(run_param, data_info)
180
- else:
181
- self.lock.acquire()
182
- self.all_summary.append([])
183
- self.lock.release()
184
- run_param.process_flag = True
185
- if self.check_fun(func, run_param):
186
- data_info = DisPatchDataInfo(cpu_args, cpu_kwargs, self.all_summary, None, npu_out_cpu, cpu_out,
187
- self.lock)
188
- self.pool.apply_async(func=dispatch_multiprocess, args=(run_param, data_info),
189
- error_callback=error_call)
190
- else:
191
- logger.error("can not get correct function please set process_num=0")
192
- return npu_out
193
-
194
- @staticmethod
195
- def check_fun(func, run_param):
196
- if hasattr(torch.ops.aten, run_param.aten_api):
197
- aten_func = getattr(torch.ops.aten, run_param.aten_api)
198
- if hasattr(aten_func, run_param.aten_api_overload_name):
199
- aten_overload_func = getattr(aten_func, run_param.aten_api_overload_name)
200
- if id(aten_overload_func) == id(func):
201
- run_param.func_namespace = "aten"
202
- return True
203
- return False
204
-
205
- def get_dir_name(self, tag):
206
- # guarantee file uniqueness
207
- time.sleep(1)
208
- time_now = time.strftime("%Y%m%d%H%M%S", time.localtime(time.time()))
209
- if tag is None or not isinstance(tag, str):
210
- logger.warning('There is not tag or the type of tag is not string.')
211
- dir_name = f'msprobe_rank{self.device_id}_{time_now}'
212
- else:
213
- dir_name = f'msprobe_{tag}_rank{self.device_id}_{time_now}'
214
- return dir_name
215
-
216
- def get_ops(self, file_path):
217
- yaml_file = load_yaml(file_path)
218
- self.aten_ops_blacklist = yaml_file.get('aten_ops_blacklist')
219
- self.npu_adjust_autogard = yaml_file.get('npu_adjust_autogard')
220
-
221
- def filter_dump_api(self):
222
- if self.dump_mode != Const.LIST or not self.dump_api_list:
223
- self.dump_api_list = []
224
- return
225
- aten_api_list = dir(torch.ops.aten)
226
- dump_api_list = []
227
- for aten_api in self.dump_api_list:
228
- if aten_api in aten_api_list:
229
- dump_api_list.append(aten_api)
230
- else:
231
- logger.warning(f'{aten_api} is not aten api will not dump, please refer to torch.ops.aten')
232
- self.dump_api_list = dump_api_list
233
-
234
- def get_run_param(self, aten_api, func_name, aten_api_overload_name):
235
- run_param = DispatchRunParam(self.debug_flag, self.device_id, self.root_npu_path, self.root_cpu_path,
236
- self.process_num, self.comparator)
237
- run_param.dump_flag, run_param.auto_dump_flag = self.get_dump_flag(aten_api)
238
- run_param.func_name = func_name
239
- run_param.aten_api = aten_api
240
- run_param.aten_api_overload_name = aten_api_overload_name
241
- run_param.single_api_index = self.single_api_index_dict[aten_api]
242
- run_param.api_index = self.api_index
243
- return run_param
244
-
245
- def get_dump_flag(self, aten_api):
246
- dump_flag = False
247
- auto_dump_flag = False
248
- if self.dump_mode == Const.ALL:
249
- dump_flag = True
250
- if self.dump_mode == Const.LIST and aten_api in self.dump_api_list:
251
- dump_flag = True
252
- if self.dump_mode == Const.AUTO:
253
- auto_dump_flag = True
254
- return dump_flag, auto_dump_flag
255
-
256
- def check_param(self):
257
- if self.dump_mode not in Const.ONLINE_DUMP_MODE:
258
- logger.error('The parameter "dump mode" can only be one of {}.'.format(Const.ONLINE_DUMP_MODE))
259
- raise DispatchException(DispatchException.INVALID_PARAMETER)
260
- if not isinstance(self.dump_api_list, list):
261
- logger.error('The type of parameter "api_list" can only be list.')
262
- raise DispatchException(DispatchException.INVALID_PARAMETER)
263
- if not isinstance(self.debug_flag, bool):
264
- logger.error('The type of parameter "debug" can only be bool.')
265
- raise DispatchException(DispatchException.INVALID_PARAMETER)
266
- if not isinstance(self.process_num, int) or self.process_num < 0:
267
- logger.error('The type of parameter "process_num" can only be int and it should not be less than 0.')
268
- raise DispatchException(DispatchException.INVALID_PARAMETER)
269
-
270
- def enable_autogard(self, aten_api):
271
- if aten_api in self.npu_adjust_autogard:
1
+ import os
2
+ import time
3
+ import json
4
+ from multiprocessing import Pool
5
+
6
+ import torch
7
+
8
+ from torch.utils._python_dispatch import TorchDispatchMode
9
+
10
+ try:
11
+ import torch_npu
12
+ except ImportError:
13
+ is_npu = False
14
+ else:
15
+ is_npu = True
16
+
17
+ from msprobe.core.common.file_utils import check_path_before_create, check_file_or_directory_path, load_yaml
18
+ from msprobe.core.common.const import Const, CompareConst
19
+ from msprobe.pytorch.common.log import logger
20
+ from msprobe.pytorch.online_dispatch.dump_compare import dispatch_workflow, dispatch_multiprocess, error_call, TimeStatistics, \
21
+ DispatchRunParam, DisPatchDataInfo
22
+ from msprobe.pytorch.online_dispatch.utils import get_callstack, data_to_cpu, get_sys_info, DispatchException, COMPARE_LOGO
23
+ from msprobe.pytorch.online_dispatch.compare import Comparator
24
+ from msprobe.core.common.file_utils import FileOpen, create_directory
25
+
26
+
27
+ current_time = time.strftime("%Y%m%d%H%M%S")
28
+ RESULT_FILE_NAME = "accuracy_checking_result_" + current_time + ".csv"
29
+ DETAILS_FILE_NAME = "accuracy_checking_details_" + current_time + ".csv"
30
+
31
+
32
+ class PtdbgDispatch(TorchDispatchMode):
33
+ def __init__(self, dump_mode=Const.OFF, api_list=None, debug=False, dump_path=None, tag=None, process_num=0):
34
+ super(PtdbgDispatch, self).__init__()
35
+ logger.info(COMPARE_LOGO)
36
+ if not is_npu:
37
+ logger.error("Please confirm you run environment installed torch_npu!")
38
+ return
39
+ if dump_path is None:
40
+ logger.error("Please set dump_path when dump_mode is config!")
41
+ check_file_or_directory_path(dump_path, True)
42
+
43
+ self.device_id = torch_npu._C._npu_getDevice()
44
+ self.dump_mode = dump_mode
45
+ self.dump_api_list = api_list
46
+ self.debug_flag = debug
47
+ self.api_index = 0
48
+ self.single_api_index_dict = {}
49
+ self.device_dump_path_cpu = None
50
+ self.device_dump_path_npu = None
51
+ self.all_summary = []
52
+ self.call_stack_list = []
53
+ self.process_num = process_num
54
+ self.filter_dump_api()
55
+ self.check_param()
56
+ dir_name = self.get_dir_name(tag)
57
+ self.root_path = os.path.join(os.path.realpath(dump_path), dir_name)
58
+ self.root_cpu_path = os.path.join(self.root_path, f'cpu')
59
+ self.root_npu_path = os.path.join(self.root_path, f'npu')
60
+ check_path_before_create(self.root_cpu_path)
61
+ check_path_before_create(self.root_npu_path)
62
+ create_directory(self.root_cpu_path)
63
+ create_directory(self.root_npu_path)
64
+
65
+ self.result_csv_path = os.path.join(self.root_path, RESULT_FILE_NAME)
66
+ self.detail_csv_path = os.path.join(self.root_path, DETAILS_FILE_NAME)
67
+ self.comparator = Comparator(self.result_csv_path, self.detail_csv_path, False)
68
+
69
+ self.aten_ops_blacklist = []
70
+ self.npu_adjust_autogard = []
71
+ yaml_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "torch_ops_config.yaml")
72
+ self.get_ops(yaml_path)
73
+
74
+ self.lock = None
75
+ if process_num > 0:
76
+ self.pool = Pool(process_num)
77
+ if debug:
78
+ logger.info(f'Main pid:{os.getpid()} device:{self.device_id} dump_list:{self.dump_api_list} '
79
+ f'dump_mode:{self.dump_mode} cpu_path[{self.root_cpu_path}], npu_path[{self.root_npu_path}], '
80
+ f'process[{process_num}]')
81
+
82
+ def __exit__(self, exc_type, exc_val, exc_tb):
83
+ super().__exit__(exc_type, exc_val, exc_tb)
84
+
85
+ if not is_npu:
86
+ return
87
+ logger.info(f'start write compare csv: Rank[{self.device_id}], Pid[{os.getpid()}')
88
+
89
+ if self.process_num > 0:
90
+ self.pool.close()
91
+ self.pool.join()
92
+ summary_path = os.path.join(self.root_cpu_path, f'summary.json')
93
+ if not os.path.exists(summary_path):
94
+ logger.error("Please check train log, An exception may have occurred!")
95
+ return
96
+ check_file_or_directory_path(summary_path, False)
97
+ fp_handle = FileOpen(summary_path, "r")
98
+ while True:
99
+ json_line_data = fp_handle.readline()
100
+ if json_line_data == '\n':
101
+ continue
102
+ if len(json_line_data) == 0:
103
+ break
104
+ msg = json.loads(json_line_data)
105
+ self.all_summary[msg[0]] = msg[1]
106
+ fp_handle.close()
107
+
108
+ if self.debug_flag:
109
+ input_num = 0
110
+ output_num = 0
111
+ total_num = 0
112
+
113
+ for list_data in self.all_summary:
114
+ for data in list_data:
115
+ logger.info(f'summary: Device[{self.device_id}], Pid[{os.getpid()}], Data[{data}]')
116
+ if "_input" in data[CompareConst.NPU_NAME]:
117
+ input_num = input_num + 1
118
+ if "_output" in data[CompareConst.NPU_NAME]:
119
+ output_num = output_num + 1
120
+ total_num = total_num + 1
121
+ logger.info(f'Dispatch exit: Device[{self.device_id}], Pid[{os.getpid()} Input[{input_num}] '
122
+ f'Output[{output_num}] Total[{total_num}] API_Total[{self.api_index}]]')
123
+
124
+ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
125
+ if not is_npu:
126
+ logger.error("Please confirm you run environment installed torch_npu!")
127
+ return func(*args, **kwargs)
128
+
129
+ func_name_split_list = func.__name__.split(".")
130
+ aten_api = func_name_split_list[0]
131
+ try:
132
+ aten_api_overload_name = func_name_split_list[1]
133
+ except IndexError:
134
+ logger.error(f"Please check the func name {func.__name__}!")
135
+ return func(*args, **kwargs)
136
+
137
+ self.enable_autogard(aten_api)
138
+ if aten_api in self.aten_ops_blacklist:
139
+ npu_out = func(*args, **kwargs)
140
+ return npu_out
141
+
142
+ call_stack = get_callstack()
143
+ self.call_stack_list.append(call_stack)
144
+ self.api_index += 1
145
+ if aten_api not in self.single_api_index_dict:
146
+ self.single_api_index_dict[aten_api] = 1
147
+ else:
148
+ self.single_api_index_dict[aten_api] += 1
149
+
150
+ run_param = self.get_run_param(aten_api, func.__name__, aten_api_overload_name)
151
+
152
+ if self.debug_flag:
153
+ logger.info(f'Dispatch Info: Rank[{self.device_id}], Pid[{os.getpid()}], Func[{func.__name__}], '
154
+ f'Name[{run_param.aten_api}_{run_param.single_api_index}], '
155
+ f'Count[{self.api_index}], Sys[{get_sys_info()}]')
156
+
157
+ cpu_args = []
158
+ cpu_kwargs = []
159
+ data_to_cpu(args, 0, cpu_args)
160
+ data_to_cpu(kwargs, 0, cpu_kwargs)
161
+ cpu_args = cpu_args[0]
162
+ cpu_kwargs = cpu_kwargs[0]
163
+
164
+ with TimeStatistics("NPU RUN", run_param):
165
+ npu_out = func(*args, **kwargs)
166
+ npu_out_cpu = []
167
+ data_to_cpu(npu_out, 0, npu_out_cpu)
168
+ npu_out_cpu = npu_out_cpu[0]
169
+
170
+ with TimeStatistics("CPU RUN", run_param):
171
+ cpu_out = func(*cpu_args, **cpu_kwargs)
172
+
173
+ if isinstance(cpu_out, torch.Tensor) and cpu_out.dtype in [torch.bfloat16, torch.float16, torch.half]:
174
+ cpu_out = cpu_out.float()
175
+
176
+ if self.process_num == 0:
177
+ self.all_summary.append([])
178
+ data_info = DisPatchDataInfo(cpu_args, cpu_kwargs, self.all_summary, func, npu_out_cpu, cpu_out, self.lock)
179
+ dispatch_workflow(run_param, data_info)
180
+ else:
181
+ self.lock.acquire()
182
+ self.all_summary.append([])
183
+ self.lock.release()
184
+ run_param.process_flag = True
185
+ if self.check_fun(func, run_param):
186
+ data_info = DisPatchDataInfo(cpu_args, cpu_kwargs, self.all_summary, None, npu_out_cpu, cpu_out,
187
+ self.lock)
188
+ self.pool.apply_async(func=dispatch_multiprocess, args=(run_param, data_info),
189
+ error_callback=error_call)
190
+ else:
191
+ logger.error("can not get correct function please set process_num=0")
192
+ return npu_out
193
+
194
+ @staticmethod
195
+ def check_fun(func, run_param):
196
+ if hasattr(torch.ops.aten, run_param.aten_api):
197
+ aten_func = getattr(torch.ops.aten, run_param.aten_api)
198
+ if hasattr(aten_func, run_param.aten_api_overload_name):
199
+ aten_overload_func = getattr(aten_func, run_param.aten_api_overload_name)
200
+ if id(aten_overload_func) == id(func):
201
+ run_param.func_namespace = "aten"
202
+ return True
203
+ return False
204
+
205
+ def get_dir_name(self, tag):
206
+ # guarantee file uniqueness
207
+ time.sleep(1)
208
+ time_now = time.strftime("%Y%m%d%H%M%S", time.localtime(time.time()))
209
+ if tag is None or not isinstance(tag, str):
210
+ logger.warning('There is not tag or the type of tag is not string.')
211
+ dir_name = f'msprobe_rank{self.device_id}_{time_now}'
212
+ else:
213
+ dir_name = f'msprobe_{tag}_rank{self.device_id}_{time_now}'
214
+ return dir_name
215
+
216
+ def get_ops(self, file_path):
217
+ yaml_file = load_yaml(file_path)
218
+ self.aten_ops_blacklist = yaml_file.get('aten_ops_blacklist')
219
+ self.npu_adjust_autogard = yaml_file.get('npu_adjust_autogard')
220
+
221
+ def filter_dump_api(self):
222
+ if self.dump_mode != Const.LIST or not self.dump_api_list:
223
+ self.dump_api_list = []
224
+ return
225
+ aten_api_list = dir(torch.ops.aten)
226
+ dump_api_list = []
227
+ for aten_api in self.dump_api_list:
228
+ if aten_api in aten_api_list:
229
+ dump_api_list.append(aten_api)
230
+ else:
231
+ logger.warning(f'{aten_api} is not aten api will not dump, please refer to torch.ops.aten')
232
+ self.dump_api_list = dump_api_list
233
+
234
+ def get_run_param(self, aten_api, func_name, aten_api_overload_name):
235
+ run_param = DispatchRunParam(self.debug_flag, self.device_id, self.root_npu_path, self.root_cpu_path,
236
+ self.process_num, self.comparator)
237
+ run_param.dump_flag, run_param.auto_dump_flag = self.get_dump_flag(aten_api)
238
+ run_param.func_name = func_name
239
+ run_param.aten_api = aten_api
240
+ run_param.aten_api_overload_name = aten_api_overload_name
241
+ run_param.single_api_index = self.single_api_index_dict[aten_api]
242
+ run_param.api_index = self.api_index
243
+ return run_param
244
+
245
+ def get_dump_flag(self, aten_api):
246
+ dump_flag = False
247
+ auto_dump_flag = False
248
+ if self.dump_mode == Const.ALL:
249
+ dump_flag = True
250
+ if self.dump_mode == Const.LIST and aten_api in self.dump_api_list:
251
+ dump_flag = True
252
+ if self.dump_mode == Const.AUTO:
253
+ auto_dump_flag = True
254
+ return dump_flag, auto_dump_flag
255
+
256
+ def check_param(self):
257
+ if self.dump_mode not in Const.ONLINE_DUMP_MODE:
258
+ logger.error('The parameter "dump mode" can only be one of {}.'.format(Const.ONLINE_DUMP_MODE))
259
+ raise DispatchException(DispatchException.INVALID_PARAMETER)
260
+ if not isinstance(self.dump_api_list, list):
261
+ logger.error('The type of parameter "api_list" can only be list.')
262
+ raise DispatchException(DispatchException.INVALID_PARAMETER)
263
+ if not isinstance(self.debug_flag, bool):
264
+ logger.error('The type of parameter "debug" can only be bool.')
265
+ raise DispatchException(DispatchException.INVALID_PARAMETER)
266
+ if not isinstance(self.process_num, int) or self.process_num < 0:
267
+ logger.error('The type of parameter "process_num" can only be int and it should not be less than 0.')
268
+ raise DispatchException(DispatchException.INVALID_PARAMETER)
269
+
270
+ def enable_autogard(self, aten_api):
271
+ if aten_api in self.npu_adjust_autogard:
272
272
  torch._C._dispatch_tls_set_dispatch_key_excluded(torch._C.DispatchKey.AutogradFunctionality, False)