mindstudio-probe 1.0.3__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (278) hide show
  1. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/LICENSE +201 -201
  2. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/METADATA +36 -34
  3. mindstudio_probe-1.1.0.dist-info/RECORD +287 -0
  4. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/WHEEL +1 -1
  5. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/entry_points.txt +1 -0
  6. msprobe/README.md +131 -237
  7. msprobe/__init__.py +16 -1
  8. msprobe/{config/config.json → config.json} +47 -49
  9. msprobe/core/advisor/advisor.py +124 -124
  10. msprobe/core/advisor/advisor_const.py +58 -59
  11. msprobe/core/advisor/advisor_result.py +58 -58
  12. msprobe/core/common/const.py +402 -318
  13. msprobe/core/common/exceptions.py +99 -99
  14. msprobe/core/common/{file_check.py → file_utils.py} +523 -283
  15. msprobe/core/common/inplace_op_checker.py +38 -0
  16. msprobe/core/common/inplace_ops.yaml +251 -0
  17. msprobe/core/common/log.py +86 -69
  18. msprobe/core/common/utils.py +371 -616
  19. msprobe/core/common_config.py +78 -71
  20. msprobe/core/compare/acc_compare.py +472 -298
  21. msprobe/core/compare/check.py +180 -95
  22. msprobe/core/compare/compare_cli.py +69 -49
  23. msprobe/core/compare/highlight.py +259 -222
  24. msprobe/core/compare/multiprocessing_compute.py +174 -149
  25. msprobe/core/compare/npy_compare.py +310 -295
  26. msprobe/core/compare/utils.py +464 -429
  27. msprobe/core/data_dump/data_collector.py +153 -144
  28. msprobe/core/data_dump/data_processor/base.py +337 -293
  29. msprobe/core/data_dump/data_processor/factory.py +76 -59
  30. msprobe/core/data_dump/data_processor/mindspore_processor.py +192 -198
  31. msprobe/core/data_dump/data_processor/pytorch_processor.py +383 -389
  32. msprobe/core/data_dump/json_writer.py +117 -116
  33. msprobe/core/data_dump/scope.py +194 -178
  34. msprobe/core/grad_probe/constant.py +74 -70
  35. msprobe/core/grad_probe/grad_compare.py +170 -175
  36. msprobe/core/grad_probe/utils.py +77 -52
  37. msprobe/docs/01.installation.md +99 -0
  38. msprobe/docs/02.config_introduction.md +137 -0
  39. msprobe/docs/03.config_examples.md +237 -0
  40. msprobe/docs/04.acl_config_examples.md +78 -0
  41. msprobe/docs/05.data_dump_PyTorch.md +326 -0
  42. msprobe/docs/06.data_dump_MindSpore.md +285 -0
  43. msprobe/docs/07.accuracy_checker_PyTorch.md +297 -0
  44. msprobe/docs/08.accuracy_checker_online_PyTorch.md +238 -0
  45. msprobe/docs/09.accuracy_checker_MindSpore.md +68 -0
  46. msprobe/docs/10.accuracy_compare_PyTorch.md +327 -0
  47. msprobe/docs/11.accuracy_compare_MindSpore.md +333 -0
  48. msprobe/docs/12.overflow_check_PyTorch.md +79 -0
  49. msprobe/docs/13.overflow_check_MindSpore.md +31 -0
  50. msprobe/{pytorch/doc/parse_tool.md → docs/14.data_parse_PyTorch.md} +283 -286
  51. msprobe/docs/15.free_benchmarking_PyTorch.md +170 -0
  52. msprobe/docs/16.free_benchmarking_MindSpore.md +140 -0
  53. msprobe/{doc/grad_probe/grad_probe.md → docs/17.grad_probe.md} +205 -207
  54. msprobe/{pytorch/doc//321/205/320/254/320/270/321/207/342/225/221/342/224/220/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/206/320/277/320/244/321/205/320/277/342/225/243.md → docs/18.online_dispatch.md} +89 -90
  55. msprobe/docs/FAQ.md +189 -0
  56. msprobe/docs/S02.report_free_benchmarking_validation_performance_baseline.md +146 -0
  57. msprobe/docs/img/free_benchmark_framework.png +0 -0
  58. msprobe/docs/img/ms_dump.png +0 -0
  59. msprobe/docs/img/ms_layer.png +0 -0
  60. msprobe/docs/img/pt_dump.png +0 -0
  61. msprobe/mindspore/__init__.py +2 -1
  62. msprobe/mindspore/api_accuracy_checker/api_accuracy_checker.py +278 -245
  63. msprobe/mindspore/api_accuracy_checker/api_info.py +76 -69
  64. msprobe/mindspore/api_accuracy_checker/api_runner.py +155 -151
  65. msprobe/mindspore/api_accuracy_checker/base_compare_algorithm.py +196 -196
  66. msprobe/mindspore/api_accuracy_checker/cmd_parser.py +6 -0
  67. msprobe/mindspore/api_accuracy_checker/compute_element.py +238 -223
  68. msprobe/mindspore/api_accuracy_checker/main.py +8 -15
  69. msprobe/mindspore/api_accuracy_checker/type_mapping.py +113 -113
  70. msprobe/mindspore/api_accuracy_checker/utils.py +79 -62
  71. msprobe/mindspore/cell_processor.py +58 -34
  72. msprobe/mindspore/common/const.py +108 -87
  73. msprobe/mindspore/common/log.py +37 -37
  74. msprobe/mindspore/common/utils.py +97 -57
  75. msprobe/mindspore/compare/distributed_compare.py +62 -75
  76. msprobe/mindspore/compare/layer_mapping.py +146 -0
  77. msprobe/mindspore/compare/modify_mapping.py +107 -0
  78. msprobe/mindspore/compare/ms_compare.py +357 -117
  79. msprobe/mindspore/compare/ms_graph_compare.py +364 -317
  80. msprobe/mindspore/compare/ms_to_pt_api.yaml +399 -399
  81. msprobe/mindspore/debugger/debugger_config.py +69 -74
  82. msprobe/mindspore/debugger/precision_debugger.py +150 -107
  83. msprobe/mindspore/dump/dump_tool_factory.py +50 -35
  84. msprobe/mindspore/dump/hook_cell/api_registry.py +128 -104
  85. msprobe/mindspore/dump/hook_cell/hook_cell.py +55 -53
  86. msprobe/mindspore/dump/hook_cell/primitive_hooks.py +206 -0
  87. msprobe/mindspore/dump/hook_cell/support_wrap_ops.yaml +994 -925
  88. msprobe/mindspore/dump/hook_cell/wrap_api.py +121 -0
  89. msprobe/mindspore/dump/jit_dump.py +96 -56
  90. msprobe/mindspore/dump/kernel_graph_dump.py +75 -60
  91. msprobe/mindspore/dump/kernel_kbyk_dump.py +79 -65
  92. msprobe/mindspore/free_benchmark/api_pynative_self_check.py +131 -116
  93. msprobe/mindspore/free_benchmark/common/config.py +27 -12
  94. msprobe/mindspore/free_benchmark/common/handler_params.py +32 -17
  95. msprobe/mindspore/free_benchmark/common/utils.py +85 -71
  96. msprobe/mindspore/free_benchmark/data/support_wrap_ops.yaml +842 -842
  97. msprobe/mindspore/free_benchmark/decorator/dec_forward.py +57 -42
  98. msprobe/mindspore/free_benchmark/decorator/decorator_factory.py +122 -107
  99. msprobe/mindspore/free_benchmark/handler/base_handler.py +105 -90
  100. msprobe/mindspore/free_benchmark/handler/check_handler.py +56 -41
  101. msprobe/mindspore/free_benchmark/handler/fix_handler.py +51 -36
  102. msprobe/mindspore/free_benchmark/handler/handler_factory.py +36 -21
  103. msprobe/mindspore/free_benchmark/perturbation/add_noise.py +82 -67
  104. msprobe/mindspore/free_benchmark/perturbation/base_perturbation.py +36 -21
  105. msprobe/mindspore/free_benchmark/perturbation/bit_noise.py +78 -63
  106. msprobe/mindspore/free_benchmark/perturbation/exchange_value.py +77 -0
  107. msprobe/mindspore/free_benchmark/perturbation/improve_precision.py +49 -34
  108. msprobe/mindspore/free_benchmark/perturbation/no_change.py +27 -12
  109. msprobe/mindspore/free_benchmark/perturbation/perturbation_factory.py +44 -27
  110. msprobe/mindspore/free_benchmark/self_check_tool_factory.py +48 -33
  111. msprobe/mindspore/grad_probe/global_context.py +100 -91
  112. msprobe/mindspore/grad_probe/grad_analyzer.py +231 -231
  113. msprobe/mindspore/grad_probe/grad_monitor.py +27 -27
  114. msprobe/mindspore/grad_probe/grad_stat_csv.py +131 -131
  115. msprobe/mindspore/grad_probe/hook.py +94 -92
  116. msprobe/mindspore/grad_probe/utils.py +29 -28
  117. msprobe/mindspore/ms_config.py +128 -126
  118. msprobe/mindspore/overflow_check/kernel_graph_overflow_check.py +60 -45
  119. msprobe/mindspore/overflow_check/overflow_check_tool_factory.py +49 -34
  120. msprobe/mindspore/runtime.py +4 -4
  121. msprobe/mindspore/service.py +297 -354
  122. msprobe/mindspore/task_handler_factory.py +24 -24
  123. msprobe/msprobe.py +105 -107
  124. msprobe/pytorch/__init__.py +23 -4
  125. msprobe/pytorch/api_accuracy_checker/common/config.py +70 -55
  126. msprobe/pytorch/api_accuracy_checker/common/utils.py +246 -165
  127. msprobe/pytorch/api_accuracy_checker/compare/algorithm.py +230 -213
  128. msprobe/pytorch/api_accuracy_checker/compare/api_precision_compare.py +632 -581
  129. msprobe/pytorch/api_accuracy_checker/compare/api_precision_standard.yaml +132 -132
  130. msprobe/pytorch/api_accuracy_checker/compare/api_precision_threshold.yaml +390 -390
  131. msprobe/pytorch/api_accuracy_checker/compare/compare.py +416 -381
  132. msprobe/pytorch/api_accuracy_checker/compare/compare_column.py +90 -73
  133. msprobe/pytorch/api_accuracy_checker/compare/compare_utils.py +265 -244
  134. msprobe/pytorch/api_accuracy_checker/config.yaml +10 -10
  135. msprobe/pytorch/api_accuracy_checker/run_ut/data_generate.py +370 -332
  136. msprobe/pytorch/api_accuracy_checker/run_ut/multi_run_ut.py +221 -199
  137. msprobe/pytorch/api_accuracy_checker/run_ut/run_overflow_check.py +150 -134
  138. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut.py +518 -581
  139. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut_utils.py +213 -74
  140. msprobe/pytorch/api_accuracy_checker/run_ut/torch_ut_setting.json +7 -4
  141. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/attl.py +218 -202
  142. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/client.py +370 -324
  143. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/device_dispatch.py +227 -204
  144. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/dump_dispatch.py +110 -0
  145. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/server.py +244 -218
  146. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/torch_ops_config.yaml +63 -0
  147. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/utils.py +44 -0
  148. msprobe/pytorch/bench_functions/__init__.py +30 -15
  149. msprobe/pytorch/bench_functions/apply_adam_w.py +43 -28
  150. msprobe/pytorch/bench_functions/confusion_transpose.py +34 -19
  151. msprobe/pytorch/bench_functions/fast_gelu.py +70 -55
  152. msprobe/pytorch/bench_functions/layer_norm_eval.py +21 -6
  153. msprobe/pytorch/bench_functions/linear.py +27 -12
  154. msprobe/pytorch/bench_functions/matmul_backward.py +63 -48
  155. msprobe/pytorch/bench_functions/npu_fusion_attention.py +538 -421
  156. msprobe/pytorch/bench_functions/rms_norm.py +30 -15
  157. msprobe/pytorch/bench_functions/rotary_mul.py +71 -52
  158. msprobe/pytorch/bench_functions/scaled_mask_softmax.py +41 -26
  159. msprobe/pytorch/bench_functions/swiglu.py +70 -55
  160. msprobe/pytorch/common/__init__.py +17 -2
  161. msprobe/pytorch/common/compare_script.template +14 -14
  162. msprobe/pytorch/common/log.py +33 -32
  163. msprobe/pytorch/common/parse_json.py +54 -39
  164. msprobe/pytorch/common/utils.py +310 -300
  165. msprobe/pytorch/compare/distributed_compare.py +66 -66
  166. msprobe/pytorch/compare/mapping.yaml +607 -607
  167. msprobe/pytorch/compare/match.py +49 -33
  168. msprobe/pytorch/compare/pt_compare.py +82 -40
  169. msprobe/pytorch/debugger/debugger_config.py +108 -95
  170. msprobe/pytorch/debugger/precision_debugger.py +173 -125
  171. msprobe/pytorch/free_benchmark/__init__.py +23 -8
  172. msprobe/pytorch/free_benchmark/common/constant.py +70 -70
  173. msprobe/pytorch/free_benchmark/common/counter.py +71 -71
  174. msprobe/pytorch/free_benchmark/common/enums.py +65 -37
  175. msprobe/pytorch/free_benchmark/common/params.py +144 -129
  176. msprobe/pytorch/free_benchmark/common/utils.py +118 -102
  177. msprobe/pytorch/free_benchmark/compare/grad_saver.py +200 -179
  178. msprobe/pytorch/free_benchmark/compare/single_benchmark.py +119 -104
  179. msprobe/pytorch/free_benchmark/main.py +120 -105
  180. msprobe/pytorch/free_benchmark/perturbed_layers/base_layer.py +28 -13
  181. msprobe/pytorch/free_benchmark/perturbed_layers/layer_factory.py +56 -41
  182. msprobe/pytorch/free_benchmark/perturbed_layers/npu/add_noise.py +105 -90
  183. msprobe/pytorch/free_benchmark/perturbed_layers/npu/bit_noise.py +119 -104
  184. msprobe/pytorch/free_benchmark/perturbed_layers/npu/change_value.py +87 -63
  185. msprobe/pytorch/free_benchmark/perturbed_layers/npu/improve_precision.py +83 -68
  186. msprobe/pytorch/free_benchmark/perturbed_layers/npu/no_change.py +43 -28
  187. msprobe/pytorch/free_benchmark/perturbed_layers/npu/npu_base_layser.py +60 -45
  188. msprobe/pytorch/free_benchmark/perturbed_layers/run_cpu.py +34 -19
  189. msprobe/pytorch/free_benchmark/result_handlers/base_handler.py +256 -217
  190. msprobe/pytorch/free_benchmark/result_handlers/check_handler.py +54 -39
  191. msprobe/pytorch/free_benchmark/result_handlers/fix_handler.py +38 -23
  192. msprobe/pytorch/free_benchmark/result_handlers/handler_factory.py +45 -30
  193. msprobe/pytorch/free_benchmark/result_handlers/preheat_handler.py +185 -170
  194. msprobe/pytorch/function_factory.py +91 -75
  195. msprobe/pytorch/functional/module_dump.py +84 -0
  196. msprobe/pytorch/grad_probe/grad_monitor.py +91 -90
  197. msprobe/pytorch/grad_probe/grad_stat_csv.py +128 -128
  198. msprobe/pytorch/hook_module/__init__.py +16 -1
  199. msprobe/pytorch/hook_module/api_registry.py +166 -161
  200. msprobe/pytorch/hook_module/hook_module.py +118 -120
  201. msprobe/pytorch/hook_module/support_wrap_ops.yaml +1879 -1877
  202. msprobe/pytorch/hook_module/utils.py +28 -29
  203. msprobe/pytorch/hook_module/wrap_aten.py +111 -110
  204. msprobe/pytorch/hook_module/wrap_distributed.py +77 -78
  205. msprobe/pytorch/hook_module/wrap_functional.py +104 -105
  206. msprobe/pytorch/hook_module/wrap_npu_custom.py +85 -84
  207. msprobe/pytorch/hook_module/wrap_tensor.py +69 -71
  208. msprobe/pytorch/hook_module/wrap_torch.py +84 -86
  209. msprobe/pytorch/hook_module/wrap_vf.py +60 -62
  210. msprobe/pytorch/module_processer.py +153 -138
  211. msprobe/pytorch/online_dispatch/__init__.py +20 -20
  212. msprobe/pytorch/online_dispatch/compare.py +235 -236
  213. msprobe/pytorch/online_dispatch/dispatch.py +271 -271
  214. msprobe/pytorch/online_dispatch/dump_compare.py +155 -156
  215. msprobe/pytorch/online_dispatch/single_compare.py +391 -391
  216. msprobe/pytorch/online_dispatch/torch_ops_config.yaml +57 -49
  217. msprobe/pytorch/online_dispatch/utils.py +127 -146
  218. msprobe/pytorch/parse.py +19 -4
  219. msprobe/pytorch/parse_tool/cli.py +31 -32
  220. msprobe/pytorch/parse_tool/lib/compare.py +259 -271
  221. msprobe/pytorch/parse_tool/lib/config.py +52 -52
  222. msprobe/pytorch/parse_tool/lib/file_desc.py +31 -31
  223. msprobe/pytorch/parse_tool/lib/interactive_cli.py +102 -102
  224. msprobe/pytorch/parse_tool/lib/parse_exception.py +54 -54
  225. msprobe/pytorch/parse_tool/lib/parse_tool.py +161 -158
  226. msprobe/pytorch/parse_tool/lib/utils.py +320 -321
  227. msprobe/pytorch/parse_tool/lib/visualization.py +85 -91
  228. msprobe/pytorch/pt_config.py +317 -187
  229. msprobe/pytorch/service.py +311 -252
  230. mindstudio_probe-1.0.3.dist-info/RECORD +0 -272
  231. msprobe/config/README.md +0 -539
  232. msprobe/mindspore/doc/compare.md +0 -58
  233. msprobe/mindspore/doc/dump.md +0 -217
  234. msprobe/mindspore/dump/hook_cell/wrap_functional.py +0 -91
  235. msprobe/mindspore/dump/hook_cell/wrap_tensor.py +0 -63
  236. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/ssl_config.py +0 -10
  237. msprobe/pytorch/doc/FAQ.md +0 -193
  238. msprobe/pytorch/doc/api_accuracy_checker.md +0 -313
  239. msprobe/pytorch/doc/api_accuracy_checker_online.md +0 -187
  240. msprobe/pytorch/doc/dump.md +0 -260
  241. msprobe/pytorch/doc/msprobe/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/206/320/245/342/226/221/321/206/320/235/320/276dump/321/206/320/260/320/227/321/205/320/227/320/226/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -182
  242. msprobe/pytorch/doc/ptdbg_ascend_compare.md +0 -240
  243. msprobe/pytorch/doc/ptdbg_ascend_overview.md +0 -68
  244. msprobe/pytorch/doc/ptdbg_ascend_quickstart.md +0 -381
  245. msprobe/pytorch/doc/run_overflow_check.md +0 -25
  246. msprobe/pytorch/doc//321/206/320/247/320/260/321/206/320/260/320/227/321/206/320/255/320/226/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/205/320/254/342/225/221/321/206/320/251/320/277/321/211/320/272/320/234/321/210/320/277/320/221/321/205/320/242/320/234/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -151
  247. msprobe/pytorch/functional/data_processor.py +0 -0
  248. msprobe/pytorch/functional/dump_module.py +0 -39
  249. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/top_level.txt +0 -0
  250. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_1.png +0 -0
  251. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_2.png +0 -0
  252. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_3.png +0 -0
  253. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_4.png +0 -0
  254. /msprobe/{pytorch/doc → docs}/img/GPT-3_1.png +0 -0
  255. /msprobe/{pytorch/doc → docs}/img/GPT-3_2.png +0 -0
  256. /msprobe/{pytorch/doc → docs}/img/GPT-3_3.png +0 -0
  257. /msprobe/{pytorch/doc → docs}/img/GPT-3_4.png +0 -0
  258. /msprobe/{pytorch/doc → docs}/img/GPT-3_5.png +0 -0
  259. /msprobe/{pytorch/doc → docs}/img/GPT-3_6.png +0 -0
  260. /msprobe/{pytorch/doc → docs}/img/GPT-3_7.png +0 -0
  261. /msprobe/{pytorch/doc → docs}/img/GPT-3_8.png +0 -0
  262. /msprobe/{pytorch/doc → docs}/img/YOLOV5S_1.png +0 -0
  263. /msprobe/{pytorch/doc → docs}/img/YOLOV5S_2.png +0 -0
  264. /msprobe/{pytorch/doc → docs}/img/accuracy_checking_details.png +0 -0
  265. /msprobe/{pytorch/doc → docs}/img/accuracy_checking_result.png +0 -0
  266. /msprobe/{pytorch/doc → docs}/img/api_precision_compare_details.png +0 -0
  267. /msprobe/{pytorch/doc → docs}/img/api_precision_compare_result.png +0 -0
  268. /msprobe/{pytorch/doc → docs}/img/auto_analyze_log.png +0 -0
  269. /msprobe/{pytorch/doc → docs}/img/compare_result_pkl.png +0 -0
  270. /msprobe/{pytorch/doc → docs}/img/compare_result_pkl_md5.png.png +0 -0
  271. /msprobe/{pytorch/doc → docs}/img/cpu_info.png +0 -0
  272. /msprobe/{config → docs}/img/free_benchmark.png +0 -0
  273. /msprobe/{doc/grad_probe/img/image-1.png → docs/img/grad_probe_image-1.png} +0 -0
  274. /msprobe/{doc/grad_probe/img/image-2.png → docs/img/grad_probe_image-2.png} +0 -0
  275. /msprobe/{doc/grad_probe/img/image-3.png → docs/img/grad_probe_image-3.png} +0 -0
  276. /msprobe/{doc/grad_probe/img/image-4.png → docs/img/grad_probe_image-4.png} +0 -0
  277. /msprobe/{doc/grad_probe/img/image.png → docs/img/grad_probe_image.png} +0 -0
  278. /msprobe/{pytorch/doc → docs}/img/module_compare.png +0 -0
@@ -1,129 +1,129 @@
1
- from abc import ABC, abstractmethod
2
- from collections import namedtuple
3
- import hashlib
4
- import torch
5
- from msprobe.core.grad_probe.constant import GradConst
6
-
7
- CSV_header_input = namedtuple("CSV_header_input", ["bounds"])
8
- CSV_content_input = namedtuple("CSV_content_input", ["grad", "bounds"])
9
-
10
-
11
- class GradStatCsv:
12
- csv = {}
13
-
14
- @staticmethod
15
- def generate_csv_header(level, bounds):
16
- header = ["param_name"]
17
- for key in level["header"]:
18
- csv_header_input = CSV_header_input(bounds=bounds)
19
- header.extend(GradStatCsv.csv[key].generate_csv_header(csv_header_input))
20
- return header
21
-
22
- @staticmethod
23
- def generate_csv_line(param_name, level, grad, bounds):
24
- line = [param_name]
25
- for key in level["header"]:
26
- csv_content_input = CSV_content_input(grad=grad, bounds=bounds)
27
- line.extend(GradStatCsv.csv[key].generate_csv_content(csv_content_input))
28
- return line
29
-
30
-
31
- def register_csv_item(key, cls=None):
32
- if cls is None:
33
- # 无参数时,返回装饰器函数
34
- return lambda cls: register_csv_item(key, cls)
35
- GradStatCsv.csv[key] = cls
36
- return cls
37
-
38
-
39
- class CsvItem(ABC):
40
- @abstractmethod
41
- def generate_csv_header(csv_header_input):
42
- pass
43
-
44
- @abstractmethod
45
- def generate_csv_content(csv_content_input):
46
- pass
47
-
48
-
49
- @register_csv_item(GradConst.MD5)
50
- class CSV_md5(CsvItem):
51
- def generate_csv_header(csv_header_input):
52
- return ["MD5"]
53
-
54
- def generate_csv_content(csv_content_input):
55
- grad = csv_content_input.grad
56
- tensor_bytes = grad.cpu().detach().float().numpy().tobytes()
57
- md5_hash = hashlib.md5(tensor_bytes)
58
- return [md5_hash.hexdigest()]
59
-
60
-
61
- @register_csv_item(GradConst.DISTRIBUTION)
62
- class CSV_distribution(CsvItem):
63
- def generate_csv_header(csv_header_input):
64
- bounds = csv_header_input.bounds
65
- intervals = []
66
- if bounds:
67
- intervals.append(f"(-inf, {bounds[0]}]")
68
- for i in range(1, len(bounds)):
69
- intervals.append(f"({bounds[i-1]}, {bounds[i]}]")
70
- if intervals:
71
- intervals.append(f"({bounds[-1]}, inf)")
72
- intervals.append("=0")
73
-
74
- return intervals
75
-
76
- def generate_csv_content(csv_content_input):
77
- grad = csv_content_input.grad
78
- bounds = csv_content_input.bounds
79
- grad = grad.cpu().detach()
80
- if grad.dtype == torch.bfloat16:
81
- grad = grad.to(torch.float32)
82
- element_num = grad.numel()
83
- grad_equal_0_num = (grad == 0).sum().item()
84
- bound = torch.Tensor(bounds)
85
- bucketsize_result = torch.bucketize(grad, bound)
86
- interval_nums = [(bucketsize_result == i).sum().item() for i in range(len(bound) + 1)]
87
- interval_nums.append(grad_equal_0_num)
88
- return_list = [x / element_num if element_num != 0 else 0 for x in interval_nums]
89
- return return_list
90
-
91
-
92
- @register_csv_item(GradConst.MAX)
93
- class CSV_max(CsvItem):
94
- def generate_csv_header(csv_header_input):
95
- return ["max"]
96
-
97
- def generate_csv_content(csv_content_input):
98
- grad = csv_content_input.grad
99
- return [torch.max(grad).cpu().detach().float().numpy().tolist()]
100
-
101
-
102
- @register_csv_item(GradConst.MIN)
103
- class CSV_max(CsvItem):
104
- def generate_csv_header(csv_header_input):
105
- return ["min"]
106
-
107
- def generate_csv_content(csv_content_input):
108
- grad = csv_content_input.grad
109
- return [torch.min(grad).cpu().detach().float().numpy().tolist()]
110
-
111
-
112
- @register_csv_item(GradConst.NORM)
113
- class CSV_max(CsvItem):
114
- def generate_csv_header(csv_header_input):
115
- return ["norm"]
116
-
117
- def generate_csv_content(csv_content_input):
118
- grad = csv_content_input.grad
119
- return [torch.norm(grad).cpu().detach().float().numpy().tolist()]
120
-
121
-
122
- @register_csv_item(GradConst.SHAPE)
123
- class CSV_shape(CsvItem):
124
- def generate_csv_header(csv_header_input):
125
- return ["shape"]
126
-
127
- def generate_csv_content(csv_content_input):
128
- grad = csv_content_input.grad
1
+ from abc import ABC, abstractmethod
2
+ from collections import namedtuple
3
+ import hashlib
4
+ import torch
5
+ from msprobe.core.grad_probe.constant import GradConst
6
+
7
+ CSV_header_input = namedtuple("CSV_header_input", ["bounds"])
8
+ CSV_content_input = namedtuple("CSV_content_input", ["grad", "bounds"])
9
+
10
+
11
+ class GradStatCsv:
12
+ csv = {}
13
+
14
+ @staticmethod
15
+ def generate_csv_header(level, bounds):
16
+ header = ["param_name"]
17
+ for key in level["header"]:
18
+ csv_header_input = CSV_header_input(bounds=bounds)
19
+ header.extend(GradStatCsv.csv[key].generate_csv_header(csv_header_input))
20
+ return header
21
+
22
+ @staticmethod
23
+ def generate_csv_line(param_name, level, grad, bounds):
24
+ line = [param_name]
25
+ for key in level["header"]:
26
+ csv_content_input = CSV_content_input(grad=grad, bounds=bounds)
27
+ line.extend(GradStatCsv.csv[key].generate_csv_content(csv_content_input))
28
+ return line
29
+
30
+
31
+ def register_csv_item(key, cls=None):
32
+ if cls is None:
33
+ # 无参数时,返回装饰器函数
34
+ return lambda cls: register_csv_item(key, cls)
35
+ GradStatCsv.csv[key] = cls
36
+ return cls
37
+
38
+
39
+ class CsvItem(ABC):
40
+ @abstractmethod
41
+ def generate_csv_header(csv_header_input):
42
+ pass
43
+
44
+ @abstractmethod
45
+ def generate_csv_content(csv_content_input):
46
+ pass
47
+
48
+
49
+ @register_csv_item(GradConst.MD5)
50
+ class CSV_md5(CsvItem):
51
+ def generate_csv_header(csv_header_input):
52
+ return ["MD5"]
53
+
54
+ def generate_csv_content(csv_content_input):
55
+ grad = csv_content_input.grad
56
+ tensor_bytes = grad.cpu().detach().float().numpy().tobytes()
57
+ md5_hash = hashlib.md5(tensor_bytes)
58
+ return [md5_hash.hexdigest()]
59
+
60
+
61
+ @register_csv_item(GradConst.DISTRIBUTION)
62
+ class CSV_distribution(CsvItem):
63
+ def generate_csv_header(csv_header_input):
64
+ bounds = csv_header_input.bounds
65
+ intervals = []
66
+ if bounds:
67
+ intervals.append(f"(-inf, {bounds[0]}]")
68
+ for i in range(1, len(bounds)):
69
+ intervals.append(f"({bounds[i-1]}, {bounds[i]}]")
70
+ if intervals:
71
+ intervals.append(f"({bounds[-1]}, inf)")
72
+ intervals.append("=0")
73
+
74
+ return intervals
75
+
76
+ def generate_csv_content(csv_content_input):
77
+ grad = csv_content_input.grad
78
+ bounds = csv_content_input.bounds
79
+ grad = grad.cpu().detach()
80
+ if grad.dtype == torch.bfloat16:
81
+ grad = grad.to(torch.float32)
82
+ element_num = grad.numel()
83
+ grad_equal_0_num = (grad == 0).sum().item()
84
+ bound = torch.Tensor(bounds)
85
+ bucketsize_result = torch.bucketize(grad, bound)
86
+ interval_nums = [(bucketsize_result == i).sum().item() for i in range(len(bound) + 1)]
87
+ interval_nums.append(grad_equal_0_num)
88
+ return_list = [x / element_num if element_num != 0 else 0 for x in interval_nums]
89
+ return return_list
90
+
91
+
92
+ @register_csv_item(GradConst.MAX)
93
+ class CSV_max(CsvItem):
94
+ def generate_csv_header(csv_header_input):
95
+ return ["max"]
96
+
97
+ def generate_csv_content(csv_content_input):
98
+ grad = csv_content_input.grad
99
+ return [torch.max(grad).cpu().detach().float().numpy().tolist()]
100
+
101
+
102
+ @register_csv_item(GradConst.MIN)
103
+ class CSV_min(CsvItem):
104
+ def generate_csv_header(csv_header_input):
105
+ return ["min"]
106
+
107
+ def generate_csv_content(csv_content_input):
108
+ grad = csv_content_input.grad
109
+ return [torch.min(grad).cpu().detach().float().numpy().tolist()]
110
+
111
+
112
+ @register_csv_item(GradConst.NORM)
113
+ class CSV_norm(CsvItem):
114
+ def generate_csv_header(csv_header_input):
115
+ return ["norm"]
116
+
117
+ def generate_csv_content(csv_content_input):
118
+ grad = csv_content_input.grad
119
+ return [torch.norm(grad).cpu().detach().float().numpy().tolist()]
120
+
121
+
122
+ @register_csv_item(GradConst.SHAPE)
123
+ class CSV_shape(CsvItem):
124
+ def generate_csv_header(csv_header_input):
125
+ return ["shape"]
126
+
127
+ def generate_csv_content(csv_content_input):
128
+ grad = csv_content_input.grad
129
129
  return [list(grad.shape)]
@@ -1 +1,16 @@
1
- from .wrap_functional import remove_dropout
1
+ # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from .wrap_functional import remove_dropout
@@ -1,161 +1,166 @@
1
- #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
3
- """
4
- # Copyright (C) 2022-2023. Huawei Technologies Co., Ltd. All rights reserved.
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
- """
17
-
18
- import torch
19
- import torch.distributed as dist
20
-
21
- from msprobe.pytorch.hook_module import wrap_torch, wrap_functional, wrap_tensor, wrap_vf, wrap_distributed, wrap_aten
22
- from msprobe.pytorch.hook_module.wrap_aten import get_aten_ops
23
- from msprobe.pytorch.hook_module.wrap_distributed import get_distributed_ops
24
- from msprobe.pytorch.hook_module.wrap_functional import get_functional_ops
25
- from msprobe.pytorch.hook_module.wrap_tensor import get_tensor_ops
26
- from msprobe.pytorch.hook_module.wrap_torch import get_torch_ops
27
- from msprobe.pytorch.hook_module.wrap_vf import get_vf_ops
28
- from msprobe.pytorch.common.utils import torch_without_guard_version, npu_distributed_api, is_gpu
29
- from msprobe.core.common.const import Const
30
-
31
- torch_version_above_2 = torch.__version__.split('+')[0] > '2.0'
32
-
33
- if not is_gpu:
34
- import torch_npu
35
- from . import wrap_npu_custom
36
- from .wrap_npu_custom import get_npu_ops
37
-
38
-
39
- class ApiRegistry:
40
- def __init__(self):
41
- self.tensor_ori_attr = {}
42
- self.torch_ori_attr = {}
43
- self.functional_ori_attr = {}
44
- self.distributed_ori_attr = {}
45
- self.npu_distributed_ori_attr = {}
46
- self.vf_ori_attr = {}
47
- self.aten_ori_attr = {}
48
- self.torch_npu_ori_attr = {}
49
-
50
- self.tensor_hook_attr = {}
51
- self.torch_hook_attr = {}
52
- self.functional_hook_attr = {}
53
- self.distributed_hook_attr = {}
54
- self.npu_distributed_hook_attr = {}
55
- self.vf_hook_attr = {}
56
- self.aten_hook_attr = {}
57
- self.torch_npu_hook_attr = {}
58
-
59
- @staticmethod
60
- def store_ori_attr(ori_api_group, api_list, api_ori_attr):
61
- for api in api_list:
62
- if '.' in api:
63
- sub_module_name, sub_op = api.rsplit('.', 1)
64
- sub_module = getattr(ori_api_group, sub_module_name)
65
- api_ori_attr[api] = getattr(sub_module, sub_op)
66
- else:
67
- api_ori_attr[api] = getattr(ori_api_group, api)
68
-
69
- @staticmethod
70
- def set_api_attr(api_group, attr_dict):
71
- for api, api_attr in attr_dict.items():
72
- if '.' in api:
73
- sub_module_name, sub_op = api.rsplit('.', 1)
74
- sub_module = getattr(api_group, sub_module_name, None)
75
- if sub_module is not None:
76
- setattr(sub_module, sub_op, api_attr)
77
- else:
78
- setattr(api_group, api, api_attr)
79
-
80
- def api_modularity(self):
81
- self.set_api_attr(torch.Tensor, self.tensor_hook_attr)
82
- self.set_api_attr(torch, self.torch_hook_attr)
83
- self.set_api_attr(torch.nn.functional, self.functional_hook_attr)
84
- self.set_api_attr(dist, self.distributed_hook_attr)
85
- self.set_api_attr(dist.distributed_c10d, self.distributed_hook_attr)
86
- if not is_gpu and not torch_without_guard_version:
87
- self.set_api_attr(torch_npu.distributed, self.npu_distributed_hook_attr)
88
- self.set_api_attr(torch_npu.distributed.distributed_c10d, self.npu_distributed_hook_attr)
89
- if torch_version_above_2:
90
- self.set_api_attr(torch.ops.aten, self.aten_hook_attr)
91
- self.set_api_attr(torch._VF, self.vf_hook_attr)
92
- if not is_gpu:
93
- self.set_api_attr(torch_npu, self.torch_npu_hook_attr)
94
-
95
- def api_originality(self):
96
- self.set_api_attr(torch.Tensor, self.tensor_ori_attr)
97
- self.set_api_attr(torch, self.torch_ori_attr)
98
- self.set_api_attr(torch.nn.functional, self.functional_ori_attr)
99
- self.set_api_attr(dist, self.distributed_ori_attr)
100
- self.set_api_attr(dist.distributed_c10d, self.distributed_ori_attr)
101
- if not is_gpu and not torch_without_guard_version:
102
- self.set_api_attr(torch_npu.distributed, self.npu_distributed_ori_attr)
103
- self.set_api_attr(torch_npu.distributed.distributed_c10d, self.npu_distributed_ori_attr)
104
- if torch_version_above_2:
105
- self.set_api_attr(torch.ops.aten, self.aten_ori_attr)
106
- self.set_api_attr(torch._VF, self.vf_ori_attr)
107
- if not is_gpu:
108
- self.set_api_attr(torch_npu, self.torch_npu_ori_attr)
109
-
110
- def initialize_hook(self, hook):
111
- self.store_ori_attr(torch.Tensor, get_tensor_ops(), self.tensor_ori_attr)
112
- wrap_tensor.wrap_tensor_ops_and_bind(hook)
113
- for attr_name in dir(wrap_tensor.HOOKTensor):
114
- if attr_name.startswith(Const.ATTR_NAME_PREFIX):
115
- self.tensor_hook_attr[attr_name[5:]] = getattr(wrap_tensor.HOOKTensor, attr_name)
116
-
117
- self.store_ori_attr(torch, get_torch_ops(), self.torch_ori_attr)
118
- wrap_torch.wrap_torch_ops_and_bind(hook)
119
- for attr_name in dir(wrap_torch.HOOKTorchOP):
120
- if attr_name.startswith(Const.ATTR_NAME_PREFIX):
121
- self.torch_hook_attr[attr_name[5:]] = getattr(wrap_torch.HOOKTorchOP, attr_name)
122
-
123
- self.store_ori_attr(torch.nn.functional, get_functional_ops(), self.functional_ori_attr)
124
- wrap_functional.wrap_functional_ops_and_bind(hook)
125
- for attr_name in dir(wrap_functional.HOOKFunctionalOP):
126
- if attr_name.startswith(Const.ATTR_NAME_PREFIX):
127
- self.functional_hook_attr[attr_name[5:]] = getattr(wrap_functional.HOOKFunctionalOP, attr_name)
128
-
129
- self.store_ori_attr(dist, get_distributed_ops(), self.distributed_ori_attr)
130
- wrap_distributed.wrap_distributed_ops_and_bind(hook)
131
- if not is_gpu and not torch_without_guard_version:
132
- self.store_ori_attr(torch_npu.distributed, npu_distributed_api, self.npu_distributed_ori_attr)
133
- for attr_name in dir(wrap_distributed.HOOKDistributedOP):
134
- if attr_name.startswith(Const.ATTR_NAME_PREFIX):
135
- self.distributed_hook_attr[attr_name[5:]] = getattr(wrap_distributed.HOOKDistributedOP, attr_name)
136
- if not is_gpu and not torch_without_guard_version and attr_name[5:] in npu_distributed_api:
137
- self.npu_distributed_hook_attr[attr_name[5:]] = getattr(wrap_distributed.HOOKDistributedOP,
138
- attr_name)
139
-
140
- if torch_version_above_2:
141
- self.store_ori_attr(torch.ops.aten, get_aten_ops(), self.aten_ori_attr)
142
- wrap_aten.wrap_aten_ops_and_bind(hook)
143
- for attr_name in dir(wrap_aten.HOOKAtenOP):
144
- if attr_name.startswith(Const.ATTR_NAME_PREFIX):
145
- self.aten_hook_attr[attr_name[5:]] = getattr(wrap_aten.HOOKAtenOP, attr_name)
146
-
147
- self.store_ori_attr(torch._VF, get_vf_ops(), self.vf_ori_attr)
148
- wrap_vf.wrap_vf_ops_and_bind(hook)
149
- for attr_name in dir(wrap_vf.HOOKVfOP):
150
- if attr_name.startswith(Const.ATTR_NAME_PREFIX):
151
- self.vf_hook_attr[attr_name[5:]] = getattr(wrap_vf.HOOKVfOP, attr_name)
152
-
153
- if not is_gpu:
154
- self.store_ori_attr(torch_npu, get_npu_ops(), self.torch_npu_ori_attr)
155
- wrap_npu_custom.wrap_npu_ops_and_bind(hook)
156
- for attr_name in dir(wrap_npu_custom.HOOKNpuOP):
157
- if attr_name.startswith(Const.ATTR_NAME_PREFIX):
158
- self.torch_npu_hook_attr[attr_name[5:]] = getattr(wrap_npu_custom.HOOKNpuOP, attr_name)
159
-
160
-
161
- api_register = ApiRegistry()
1
+ # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import torch
17
+ import torch.distributed as dist
18
+
19
+ from msprobe.pytorch.hook_module import wrap_torch, wrap_functional, wrap_tensor, wrap_vf, wrap_distributed, wrap_aten
20
+ from msprobe.pytorch.hook_module.wrap_aten import get_aten_ops
21
+ from msprobe.pytorch.hook_module.wrap_distributed import get_distributed_ops
22
+ from msprobe.pytorch.hook_module.wrap_functional import get_functional_ops
23
+ from msprobe.pytorch.hook_module.wrap_tensor import get_tensor_ops
24
+ from msprobe.pytorch.hook_module.wrap_torch import get_torch_ops
25
+ from msprobe.pytorch.hook_module.wrap_vf import get_vf_ops
26
+ from msprobe.pytorch.common.utils import torch_without_guard_version, npu_distributed_api, is_gpu
27
+ from msprobe.core.common.const import Const
28
+
29
+ torch_version_above_2 = torch.__version__.split('+')[0] > '2.0'
30
+
31
+ if not is_gpu:
32
+ import torch_npu
33
+ from . import wrap_npu_custom
34
+ from .wrap_npu_custom import get_npu_ops
35
+
36
+
37
+ class ApiRegistry:
38
+ def __init__(self):
39
+ self.tensor_ori_attr = {}
40
+ self.torch_ori_attr = {}
41
+ self.functional_ori_attr = {}
42
+ self.distributed_ori_attr = {}
43
+ self.npu_distributed_ori_attr = {}
44
+ self.vf_ori_attr = {}
45
+ self.aten_ori_attr = {}
46
+ self.torch_npu_ori_attr = {}
47
+
48
+ self.tensor_hook_attr = {}
49
+ self.torch_hook_attr = {}
50
+ self.functional_hook_attr = {}
51
+ self.distributed_hook_attr = {}
52
+ self.npu_distributed_hook_attr = {}
53
+ self.vf_hook_attr = {}
54
+ self.aten_hook_attr = {}
55
+ self.torch_npu_hook_attr = {}
56
+
57
+ @staticmethod
58
+ def store_ori_attr(ori_api_group, api_list, api_ori_attr):
59
+ for api in api_list:
60
+ if '.' in api:
61
+ sub_module_name, sub_op = api.rsplit('.', 1)
62
+ sub_module = getattr(ori_api_group, sub_module_name)
63
+ api_ori_attr[api] = getattr(sub_module, sub_op)
64
+ else:
65
+ api_ori_attr[api] = getattr(ori_api_group, api)
66
+
67
+ @staticmethod
68
+ def set_api_attr(api_group, attr_dict):
69
+ for api, api_attr in attr_dict.items():
70
+ if '.' in api:
71
+ sub_module_name, sub_op = api.rsplit('.', 1)
72
+ sub_module = getattr(api_group, sub_module_name, None)
73
+ if sub_module is not None:
74
+ setattr(sub_module, sub_op, api_attr)
75
+ else:
76
+ setattr(api_group, api, api_attr)
77
+
78
+ def api_modularity(self):
79
+ self.set_api_attr(torch.Tensor, self.tensor_hook_attr)
80
+ self.set_api_attr(torch, self.torch_hook_attr)
81
+ self.set_api_attr(torch.nn.functional, self.functional_hook_attr)
82
+ self.set_api_attr(dist, self.distributed_hook_attr)
83
+ self.set_api_attr(dist.distributed_c10d, self.distributed_hook_attr)
84
+ if not is_gpu and not torch_without_guard_version:
85
+ self.set_api_attr(torch_npu.distributed, self.npu_distributed_hook_attr)
86
+ self.set_api_attr(torch_npu.distributed.distributed_c10d, self.npu_distributed_hook_attr)
87
+ if torch_version_above_2:
88
+ self.set_api_attr(torch.ops.aten, self.aten_hook_attr)
89
+ self.set_api_attr(torch._VF, self.vf_hook_attr)
90
+ if not is_gpu:
91
+ self.set_api_attr(torch_npu, self.torch_npu_hook_attr)
92
+
93
+ def api_originality(self):
94
+ self.set_api_attr(torch.Tensor, self.tensor_ori_attr)
95
+ self.set_api_attr(torch, self.torch_ori_attr)
96
+ self.set_api_attr(torch.nn.functional, self.functional_ori_attr)
97
+ self.set_api_attr(dist, self.distributed_ori_attr)
98
+ self.set_api_attr(dist.distributed_c10d, self.distributed_ori_attr)
99
+ if not is_gpu and not torch_without_guard_version:
100
+ self.set_api_attr(torch_npu.distributed, self.npu_distributed_ori_attr)
101
+ self.set_api_attr(torch_npu.distributed.distributed_c10d, self.npu_distributed_ori_attr)
102
+ if torch_version_above_2:
103
+ self.set_api_attr(torch.ops.aten, self.aten_ori_attr)
104
+ self.set_api_attr(torch._VF, self.vf_ori_attr)
105
+ if not is_gpu:
106
+ self.set_api_attr(torch_npu, self.torch_npu_ori_attr)
107
+
108
+ def initialize_hook(self, hook, online_run_ut=False):
109
+ """
110
+ initialize_hook
111
+ Args:
112
+ hook (_type_): initialize_hook
113
+ online_run_ut (bool): default False, whether online run_ut or not.
114
+ If online_run_ut is True, the hook will not wrap the aten ops.
115
+ """
116
+ self.store_ori_attr(torch.Tensor, get_tensor_ops(), self.tensor_ori_attr)
117
+ wrap_tensor.wrap_tensor_ops_and_bind(hook)
118
+ for attr_name in dir(wrap_tensor.HOOKTensor):
119
+ if attr_name.startswith(Const.ATTR_NAME_PREFIX):
120
+ self.tensor_hook_attr[attr_name[5:]] = getattr(wrap_tensor.HOOKTensor, attr_name)
121
+
122
+ self.store_ori_attr(torch, get_torch_ops(), self.torch_ori_attr)
123
+ wrap_torch.wrap_torch_ops_and_bind(hook)
124
+ for attr_name in dir(wrap_torch.HOOKTorchOP):
125
+ if attr_name.startswith(Const.ATTR_NAME_PREFIX):
126
+ self.torch_hook_attr[attr_name[5:]] = getattr(wrap_torch.HOOKTorchOP, attr_name)
127
+
128
+ self.store_ori_attr(torch.nn.functional, get_functional_ops(), self.functional_ori_attr)
129
+ wrap_functional.wrap_functional_ops_and_bind(hook)
130
+ for attr_name in dir(wrap_functional.HOOKFunctionalOP):
131
+ if attr_name.startswith(Const.ATTR_NAME_PREFIX):
132
+ self.functional_hook_attr[attr_name[5:]] = getattr(wrap_functional.HOOKFunctionalOP, attr_name)
133
+
134
+ self.store_ori_attr(dist, get_distributed_ops(), self.distributed_ori_attr)
135
+ wrap_distributed.wrap_distributed_ops_and_bind(hook)
136
+ if not is_gpu and not torch_without_guard_version:
137
+ self.store_ori_attr(torch_npu.distributed, npu_distributed_api, self.npu_distributed_ori_attr)
138
+ for attr_name in dir(wrap_distributed.HOOKDistributedOP):
139
+ if attr_name.startswith(Const.ATTR_NAME_PREFIX):
140
+ self.distributed_hook_attr[attr_name[5:]] = getattr(wrap_distributed.HOOKDistributedOP, attr_name)
141
+ if not is_gpu and not torch_without_guard_version and attr_name[5:] in npu_distributed_api:
142
+ self.npu_distributed_hook_attr[attr_name[5:]] = getattr(wrap_distributed.HOOKDistributedOP,
143
+ attr_name)
144
+
145
+ if torch_version_above_2 and not online_run_ut:
146
+ self.store_ori_attr(torch.ops.aten, get_aten_ops(), self.aten_ori_attr)
147
+ wrap_aten.wrap_aten_ops_and_bind(hook)
148
+ for attr_name in dir(wrap_aten.HOOKAtenOP):
149
+ if attr_name.startswith(Const.ATTR_NAME_PREFIX):
150
+ self.aten_hook_attr[attr_name[5:]] = getattr(wrap_aten.HOOKAtenOP, attr_name)
151
+
152
+ self.store_ori_attr(torch._VF, get_vf_ops(), self.vf_ori_attr)
153
+ wrap_vf.wrap_vf_ops_and_bind(hook)
154
+ for attr_name in dir(wrap_vf.HOOKVfOP):
155
+ if attr_name.startswith(Const.ATTR_NAME_PREFIX):
156
+ self.vf_hook_attr[attr_name[5:]] = getattr(wrap_vf.HOOKVfOP, attr_name)
157
+
158
+ if not is_gpu:
159
+ self.store_ori_attr(torch_npu, get_npu_ops(), self.torch_npu_ori_attr)
160
+ wrap_npu_custom.wrap_npu_ops_and_bind(hook)
161
+ for attr_name in dir(wrap_npu_custom.HOOKNpuOP):
162
+ if attr_name.startswith(Const.ATTR_NAME_PREFIX):
163
+ self.torch_npu_hook_attr[attr_name[5:]] = getattr(wrap_npu_custom.HOOKNpuOP, attr_name)
164
+
165
+
166
+ api_register = ApiRegistry()