mindstudio-probe 1.0.3__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (278) hide show
  1. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/LICENSE +201 -201
  2. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/METADATA +36 -34
  3. mindstudio_probe-1.1.0.dist-info/RECORD +287 -0
  4. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/WHEEL +1 -1
  5. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/entry_points.txt +1 -0
  6. msprobe/README.md +131 -237
  7. msprobe/__init__.py +16 -1
  8. msprobe/{config/config.json → config.json} +47 -49
  9. msprobe/core/advisor/advisor.py +124 -124
  10. msprobe/core/advisor/advisor_const.py +58 -59
  11. msprobe/core/advisor/advisor_result.py +58 -58
  12. msprobe/core/common/const.py +402 -318
  13. msprobe/core/common/exceptions.py +99 -99
  14. msprobe/core/common/{file_check.py → file_utils.py} +523 -283
  15. msprobe/core/common/inplace_op_checker.py +38 -0
  16. msprobe/core/common/inplace_ops.yaml +251 -0
  17. msprobe/core/common/log.py +86 -69
  18. msprobe/core/common/utils.py +371 -616
  19. msprobe/core/common_config.py +78 -71
  20. msprobe/core/compare/acc_compare.py +472 -298
  21. msprobe/core/compare/check.py +180 -95
  22. msprobe/core/compare/compare_cli.py +69 -49
  23. msprobe/core/compare/highlight.py +259 -222
  24. msprobe/core/compare/multiprocessing_compute.py +174 -149
  25. msprobe/core/compare/npy_compare.py +310 -295
  26. msprobe/core/compare/utils.py +464 -429
  27. msprobe/core/data_dump/data_collector.py +153 -144
  28. msprobe/core/data_dump/data_processor/base.py +337 -293
  29. msprobe/core/data_dump/data_processor/factory.py +76 -59
  30. msprobe/core/data_dump/data_processor/mindspore_processor.py +192 -198
  31. msprobe/core/data_dump/data_processor/pytorch_processor.py +383 -389
  32. msprobe/core/data_dump/json_writer.py +117 -116
  33. msprobe/core/data_dump/scope.py +194 -178
  34. msprobe/core/grad_probe/constant.py +74 -70
  35. msprobe/core/grad_probe/grad_compare.py +170 -175
  36. msprobe/core/grad_probe/utils.py +77 -52
  37. msprobe/docs/01.installation.md +99 -0
  38. msprobe/docs/02.config_introduction.md +137 -0
  39. msprobe/docs/03.config_examples.md +237 -0
  40. msprobe/docs/04.acl_config_examples.md +78 -0
  41. msprobe/docs/05.data_dump_PyTorch.md +326 -0
  42. msprobe/docs/06.data_dump_MindSpore.md +285 -0
  43. msprobe/docs/07.accuracy_checker_PyTorch.md +297 -0
  44. msprobe/docs/08.accuracy_checker_online_PyTorch.md +238 -0
  45. msprobe/docs/09.accuracy_checker_MindSpore.md +68 -0
  46. msprobe/docs/10.accuracy_compare_PyTorch.md +327 -0
  47. msprobe/docs/11.accuracy_compare_MindSpore.md +333 -0
  48. msprobe/docs/12.overflow_check_PyTorch.md +79 -0
  49. msprobe/docs/13.overflow_check_MindSpore.md +31 -0
  50. msprobe/{pytorch/doc/parse_tool.md → docs/14.data_parse_PyTorch.md} +283 -286
  51. msprobe/docs/15.free_benchmarking_PyTorch.md +170 -0
  52. msprobe/docs/16.free_benchmarking_MindSpore.md +140 -0
  53. msprobe/{doc/grad_probe/grad_probe.md → docs/17.grad_probe.md} +205 -207
  54. msprobe/{pytorch/doc//321/205/320/254/320/270/321/207/342/225/221/342/224/220/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/206/320/277/320/244/321/205/320/277/342/225/243.md → docs/18.online_dispatch.md} +89 -90
  55. msprobe/docs/FAQ.md +189 -0
  56. msprobe/docs/S02.report_free_benchmarking_validation_performance_baseline.md +146 -0
  57. msprobe/docs/img/free_benchmark_framework.png +0 -0
  58. msprobe/docs/img/ms_dump.png +0 -0
  59. msprobe/docs/img/ms_layer.png +0 -0
  60. msprobe/docs/img/pt_dump.png +0 -0
  61. msprobe/mindspore/__init__.py +2 -1
  62. msprobe/mindspore/api_accuracy_checker/api_accuracy_checker.py +278 -245
  63. msprobe/mindspore/api_accuracy_checker/api_info.py +76 -69
  64. msprobe/mindspore/api_accuracy_checker/api_runner.py +155 -151
  65. msprobe/mindspore/api_accuracy_checker/base_compare_algorithm.py +196 -196
  66. msprobe/mindspore/api_accuracy_checker/cmd_parser.py +6 -0
  67. msprobe/mindspore/api_accuracy_checker/compute_element.py +238 -223
  68. msprobe/mindspore/api_accuracy_checker/main.py +8 -15
  69. msprobe/mindspore/api_accuracy_checker/type_mapping.py +113 -113
  70. msprobe/mindspore/api_accuracy_checker/utils.py +79 -62
  71. msprobe/mindspore/cell_processor.py +58 -34
  72. msprobe/mindspore/common/const.py +108 -87
  73. msprobe/mindspore/common/log.py +37 -37
  74. msprobe/mindspore/common/utils.py +97 -57
  75. msprobe/mindspore/compare/distributed_compare.py +62 -75
  76. msprobe/mindspore/compare/layer_mapping.py +146 -0
  77. msprobe/mindspore/compare/modify_mapping.py +107 -0
  78. msprobe/mindspore/compare/ms_compare.py +357 -117
  79. msprobe/mindspore/compare/ms_graph_compare.py +364 -317
  80. msprobe/mindspore/compare/ms_to_pt_api.yaml +399 -399
  81. msprobe/mindspore/debugger/debugger_config.py +69 -74
  82. msprobe/mindspore/debugger/precision_debugger.py +150 -107
  83. msprobe/mindspore/dump/dump_tool_factory.py +50 -35
  84. msprobe/mindspore/dump/hook_cell/api_registry.py +128 -104
  85. msprobe/mindspore/dump/hook_cell/hook_cell.py +55 -53
  86. msprobe/mindspore/dump/hook_cell/primitive_hooks.py +206 -0
  87. msprobe/mindspore/dump/hook_cell/support_wrap_ops.yaml +994 -925
  88. msprobe/mindspore/dump/hook_cell/wrap_api.py +121 -0
  89. msprobe/mindspore/dump/jit_dump.py +96 -56
  90. msprobe/mindspore/dump/kernel_graph_dump.py +75 -60
  91. msprobe/mindspore/dump/kernel_kbyk_dump.py +79 -65
  92. msprobe/mindspore/free_benchmark/api_pynative_self_check.py +131 -116
  93. msprobe/mindspore/free_benchmark/common/config.py +27 -12
  94. msprobe/mindspore/free_benchmark/common/handler_params.py +32 -17
  95. msprobe/mindspore/free_benchmark/common/utils.py +85 -71
  96. msprobe/mindspore/free_benchmark/data/support_wrap_ops.yaml +842 -842
  97. msprobe/mindspore/free_benchmark/decorator/dec_forward.py +57 -42
  98. msprobe/mindspore/free_benchmark/decorator/decorator_factory.py +122 -107
  99. msprobe/mindspore/free_benchmark/handler/base_handler.py +105 -90
  100. msprobe/mindspore/free_benchmark/handler/check_handler.py +56 -41
  101. msprobe/mindspore/free_benchmark/handler/fix_handler.py +51 -36
  102. msprobe/mindspore/free_benchmark/handler/handler_factory.py +36 -21
  103. msprobe/mindspore/free_benchmark/perturbation/add_noise.py +82 -67
  104. msprobe/mindspore/free_benchmark/perturbation/base_perturbation.py +36 -21
  105. msprobe/mindspore/free_benchmark/perturbation/bit_noise.py +78 -63
  106. msprobe/mindspore/free_benchmark/perturbation/exchange_value.py +77 -0
  107. msprobe/mindspore/free_benchmark/perturbation/improve_precision.py +49 -34
  108. msprobe/mindspore/free_benchmark/perturbation/no_change.py +27 -12
  109. msprobe/mindspore/free_benchmark/perturbation/perturbation_factory.py +44 -27
  110. msprobe/mindspore/free_benchmark/self_check_tool_factory.py +48 -33
  111. msprobe/mindspore/grad_probe/global_context.py +100 -91
  112. msprobe/mindspore/grad_probe/grad_analyzer.py +231 -231
  113. msprobe/mindspore/grad_probe/grad_monitor.py +27 -27
  114. msprobe/mindspore/grad_probe/grad_stat_csv.py +131 -131
  115. msprobe/mindspore/grad_probe/hook.py +94 -92
  116. msprobe/mindspore/grad_probe/utils.py +29 -28
  117. msprobe/mindspore/ms_config.py +128 -126
  118. msprobe/mindspore/overflow_check/kernel_graph_overflow_check.py +60 -45
  119. msprobe/mindspore/overflow_check/overflow_check_tool_factory.py +49 -34
  120. msprobe/mindspore/runtime.py +4 -4
  121. msprobe/mindspore/service.py +297 -354
  122. msprobe/mindspore/task_handler_factory.py +24 -24
  123. msprobe/msprobe.py +105 -107
  124. msprobe/pytorch/__init__.py +23 -4
  125. msprobe/pytorch/api_accuracy_checker/common/config.py +70 -55
  126. msprobe/pytorch/api_accuracy_checker/common/utils.py +246 -165
  127. msprobe/pytorch/api_accuracy_checker/compare/algorithm.py +230 -213
  128. msprobe/pytorch/api_accuracy_checker/compare/api_precision_compare.py +632 -581
  129. msprobe/pytorch/api_accuracy_checker/compare/api_precision_standard.yaml +132 -132
  130. msprobe/pytorch/api_accuracy_checker/compare/api_precision_threshold.yaml +390 -390
  131. msprobe/pytorch/api_accuracy_checker/compare/compare.py +416 -381
  132. msprobe/pytorch/api_accuracy_checker/compare/compare_column.py +90 -73
  133. msprobe/pytorch/api_accuracy_checker/compare/compare_utils.py +265 -244
  134. msprobe/pytorch/api_accuracy_checker/config.yaml +10 -10
  135. msprobe/pytorch/api_accuracy_checker/run_ut/data_generate.py +370 -332
  136. msprobe/pytorch/api_accuracy_checker/run_ut/multi_run_ut.py +221 -199
  137. msprobe/pytorch/api_accuracy_checker/run_ut/run_overflow_check.py +150 -134
  138. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut.py +518 -581
  139. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut_utils.py +213 -74
  140. msprobe/pytorch/api_accuracy_checker/run_ut/torch_ut_setting.json +7 -4
  141. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/attl.py +218 -202
  142. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/client.py +370 -324
  143. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/device_dispatch.py +227 -204
  144. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/dump_dispatch.py +110 -0
  145. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/server.py +244 -218
  146. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/torch_ops_config.yaml +63 -0
  147. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/utils.py +44 -0
  148. msprobe/pytorch/bench_functions/__init__.py +30 -15
  149. msprobe/pytorch/bench_functions/apply_adam_w.py +43 -28
  150. msprobe/pytorch/bench_functions/confusion_transpose.py +34 -19
  151. msprobe/pytorch/bench_functions/fast_gelu.py +70 -55
  152. msprobe/pytorch/bench_functions/layer_norm_eval.py +21 -6
  153. msprobe/pytorch/bench_functions/linear.py +27 -12
  154. msprobe/pytorch/bench_functions/matmul_backward.py +63 -48
  155. msprobe/pytorch/bench_functions/npu_fusion_attention.py +538 -421
  156. msprobe/pytorch/bench_functions/rms_norm.py +30 -15
  157. msprobe/pytorch/bench_functions/rotary_mul.py +71 -52
  158. msprobe/pytorch/bench_functions/scaled_mask_softmax.py +41 -26
  159. msprobe/pytorch/bench_functions/swiglu.py +70 -55
  160. msprobe/pytorch/common/__init__.py +17 -2
  161. msprobe/pytorch/common/compare_script.template +14 -14
  162. msprobe/pytorch/common/log.py +33 -32
  163. msprobe/pytorch/common/parse_json.py +54 -39
  164. msprobe/pytorch/common/utils.py +310 -300
  165. msprobe/pytorch/compare/distributed_compare.py +66 -66
  166. msprobe/pytorch/compare/mapping.yaml +607 -607
  167. msprobe/pytorch/compare/match.py +49 -33
  168. msprobe/pytorch/compare/pt_compare.py +82 -40
  169. msprobe/pytorch/debugger/debugger_config.py +108 -95
  170. msprobe/pytorch/debugger/precision_debugger.py +173 -125
  171. msprobe/pytorch/free_benchmark/__init__.py +23 -8
  172. msprobe/pytorch/free_benchmark/common/constant.py +70 -70
  173. msprobe/pytorch/free_benchmark/common/counter.py +71 -71
  174. msprobe/pytorch/free_benchmark/common/enums.py +65 -37
  175. msprobe/pytorch/free_benchmark/common/params.py +144 -129
  176. msprobe/pytorch/free_benchmark/common/utils.py +118 -102
  177. msprobe/pytorch/free_benchmark/compare/grad_saver.py +200 -179
  178. msprobe/pytorch/free_benchmark/compare/single_benchmark.py +119 -104
  179. msprobe/pytorch/free_benchmark/main.py +120 -105
  180. msprobe/pytorch/free_benchmark/perturbed_layers/base_layer.py +28 -13
  181. msprobe/pytorch/free_benchmark/perturbed_layers/layer_factory.py +56 -41
  182. msprobe/pytorch/free_benchmark/perturbed_layers/npu/add_noise.py +105 -90
  183. msprobe/pytorch/free_benchmark/perturbed_layers/npu/bit_noise.py +119 -104
  184. msprobe/pytorch/free_benchmark/perturbed_layers/npu/change_value.py +87 -63
  185. msprobe/pytorch/free_benchmark/perturbed_layers/npu/improve_precision.py +83 -68
  186. msprobe/pytorch/free_benchmark/perturbed_layers/npu/no_change.py +43 -28
  187. msprobe/pytorch/free_benchmark/perturbed_layers/npu/npu_base_layser.py +60 -45
  188. msprobe/pytorch/free_benchmark/perturbed_layers/run_cpu.py +34 -19
  189. msprobe/pytorch/free_benchmark/result_handlers/base_handler.py +256 -217
  190. msprobe/pytorch/free_benchmark/result_handlers/check_handler.py +54 -39
  191. msprobe/pytorch/free_benchmark/result_handlers/fix_handler.py +38 -23
  192. msprobe/pytorch/free_benchmark/result_handlers/handler_factory.py +45 -30
  193. msprobe/pytorch/free_benchmark/result_handlers/preheat_handler.py +185 -170
  194. msprobe/pytorch/function_factory.py +91 -75
  195. msprobe/pytorch/functional/module_dump.py +84 -0
  196. msprobe/pytorch/grad_probe/grad_monitor.py +91 -90
  197. msprobe/pytorch/grad_probe/grad_stat_csv.py +128 -128
  198. msprobe/pytorch/hook_module/__init__.py +16 -1
  199. msprobe/pytorch/hook_module/api_registry.py +166 -161
  200. msprobe/pytorch/hook_module/hook_module.py +118 -120
  201. msprobe/pytorch/hook_module/support_wrap_ops.yaml +1879 -1877
  202. msprobe/pytorch/hook_module/utils.py +28 -29
  203. msprobe/pytorch/hook_module/wrap_aten.py +111 -110
  204. msprobe/pytorch/hook_module/wrap_distributed.py +77 -78
  205. msprobe/pytorch/hook_module/wrap_functional.py +104 -105
  206. msprobe/pytorch/hook_module/wrap_npu_custom.py +85 -84
  207. msprobe/pytorch/hook_module/wrap_tensor.py +69 -71
  208. msprobe/pytorch/hook_module/wrap_torch.py +84 -86
  209. msprobe/pytorch/hook_module/wrap_vf.py +60 -62
  210. msprobe/pytorch/module_processer.py +153 -138
  211. msprobe/pytorch/online_dispatch/__init__.py +20 -20
  212. msprobe/pytorch/online_dispatch/compare.py +235 -236
  213. msprobe/pytorch/online_dispatch/dispatch.py +271 -271
  214. msprobe/pytorch/online_dispatch/dump_compare.py +155 -156
  215. msprobe/pytorch/online_dispatch/single_compare.py +391 -391
  216. msprobe/pytorch/online_dispatch/torch_ops_config.yaml +57 -49
  217. msprobe/pytorch/online_dispatch/utils.py +127 -146
  218. msprobe/pytorch/parse.py +19 -4
  219. msprobe/pytorch/parse_tool/cli.py +31 -32
  220. msprobe/pytorch/parse_tool/lib/compare.py +259 -271
  221. msprobe/pytorch/parse_tool/lib/config.py +52 -52
  222. msprobe/pytorch/parse_tool/lib/file_desc.py +31 -31
  223. msprobe/pytorch/parse_tool/lib/interactive_cli.py +102 -102
  224. msprobe/pytorch/parse_tool/lib/parse_exception.py +54 -54
  225. msprobe/pytorch/parse_tool/lib/parse_tool.py +161 -158
  226. msprobe/pytorch/parse_tool/lib/utils.py +320 -321
  227. msprobe/pytorch/parse_tool/lib/visualization.py +85 -91
  228. msprobe/pytorch/pt_config.py +317 -187
  229. msprobe/pytorch/service.py +311 -252
  230. mindstudio_probe-1.0.3.dist-info/RECORD +0 -272
  231. msprobe/config/README.md +0 -539
  232. msprobe/mindspore/doc/compare.md +0 -58
  233. msprobe/mindspore/doc/dump.md +0 -217
  234. msprobe/mindspore/dump/hook_cell/wrap_functional.py +0 -91
  235. msprobe/mindspore/dump/hook_cell/wrap_tensor.py +0 -63
  236. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/ssl_config.py +0 -10
  237. msprobe/pytorch/doc/FAQ.md +0 -193
  238. msprobe/pytorch/doc/api_accuracy_checker.md +0 -313
  239. msprobe/pytorch/doc/api_accuracy_checker_online.md +0 -187
  240. msprobe/pytorch/doc/dump.md +0 -260
  241. msprobe/pytorch/doc/msprobe/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/206/320/245/342/226/221/321/206/320/235/320/276dump/321/206/320/260/320/227/321/205/320/227/320/226/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -182
  242. msprobe/pytorch/doc/ptdbg_ascend_compare.md +0 -240
  243. msprobe/pytorch/doc/ptdbg_ascend_overview.md +0 -68
  244. msprobe/pytorch/doc/ptdbg_ascend_quickstart.md +0 -381
  245. msprobe/pytorch/doc/run_overflow_check.md +0 -25
  246. msprobe/pytorch/doc//321/206/320/247/320/260/321/206/320/260/320/227/321/206/320/255/320/226/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/205/320/254/342/225/221/321/206/320/251/320/277/321/211/320/272/320/234/321/210/320/277/320/221/321/205/320/242/320/234/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -151
  247. msprobe/pytorch/functional/data_processor.py +0 -0
  248. msprobe/pytorch/functional/dump_module.py +0 -39
  249. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/top_level.txt +0 -0
  250. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_1.png +0 -0
  251. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_2.png +0 -0
  252. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_3.png +0 -0
  253. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_4.png +0 -0
  254. /msprobe/{pytorch/doc → docs}/img/GPT-3_1.png +0 -0
  255. /msprobe/{pytorch/doc → docs}/img/GPT-3_2.png +0 -0
  256. /msprobe/{pytorch/doc → docs}/img/GPT-3_3.png +0 -0
  257. /msprobe/{pytorch/doc → docs}/img/GPT-3_4.png +0 -0
  258. /msprobe/{pytorch/doc → docs}/img/GPT-3_5.png +0 -0
  259. /msprobe/{pytorch/doc → docs}/img/GPT-3_6.png +0 -0
  260. /msprobe/{pytorch/doc → docs}/img/GPT-3_7.png +0 -0
  261. /msprobe/{pytorch/doc → docs}/img/GPT-3_8.png +0 -0
  262. /msprobe/{pytorch/doc → docs}/img/YOLOV5S_1.png +0 -0
  263. /msprobe/{pytorch/doc → docs}/img/YOLOV5S_2.png +0 -0
  264. /msprobe/{pytorch/doc → docs}/img/accuracy_checking_details.png +0 -0
  265. /msprobe/{pytorch/doc → docs}/img/accuracy_checking_result.png +0 -0
  266. /msprobe/{pytorch/doc → docs}/img/api_precision_compare_details.png +0 -0
  267. /msprobe/{pytorch/doc → docs}/img/api_precision_compare_result.png +0 -0
  268. /msprobe/{pytorch/doc → docs}/img/auto_analyze_log.png +0 -0
  269. /msprobe/{pytorch/doc → docs}/img/compare_result_pkl.png +0 -0
  270. /msprobe/{pytorch/doc → docs}/img/compare_result_pkl_md5.png.png +0 -0
  271. /msprobe/{pytorch/doc → docs}/img/cpu_info.png +0 -0
  272. /msprobe/{config → docs}/img/free_benchmark.png +0 -0
  273. /msprobe/{doc/grad_probe/img/image-1.png → docs/img/grad_probe_image-1.png} +0 -0
  274. /msprobe/{doc/grad_probe/img/image-2.png → docs/img/grad_probe_image-2.png} +0 -0
  275. /msprobe/{doc/grad_probe/img/image-3.png → docs/img/grad_probe_image-3.png} +0 -0
  276. /msprobe/{doc/grad_probe/img/image-4.png → docs/img/grad_probe_image-4.png} +0 -0
  277. /msprobe/{doc/grad_probe/img/image.png → docs/img/grad_probe_image.png} +0 -0
  278. /msprobe/{pytorch/doc → docs}/img/module_compare.png +0 -0
@@ -1,15 +1,30 @@
1
- import torch
2
-
3
-
4
- def npu_rms_norm(x, gamma, epsilon=1e-5):
5
- rstd = torch.rsqrt(torch.mean(torch.pow(x, 2), axis=-1, keepdim=True) + epsilon)
6
- res = x * rstd * gamma
7
- return res.cpu(), rstd.float().cpu()
8
-
9
-
10
- def npu_rms_norm_backward(grad, x, gamma, rstd):
11
- mean_gy = (grad * x * gamma * rstd).mean(dim=-1, keepdim=True)
12
- grad_x = (grad * gamma - x * rstd * mean_gy) * rstd
13
- grad_gamma = x * grad * rstd
14
- return grad_x.cpu(), grad_gamma.cpu()
15
-
1
+ # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import torch
17
+
18
+
19
+ def npu_rms_norm(x, gamma, epsilon=1e-5):
20
+ rstd = torch.rsqrt(torch.mean(torch.pow(x, 2), axis=-1, keepdim=True) + epsilon)
21
+ res = x * rstd * gamma
22
+ return res, rstd.float()
23
+
24
+
25
+ def npu_rms_norm_backward(grad, x, gamma, rstd):
26
+ mean_gy = (grad * x * gamma * rstd).mean(dim=-1, keepdim=True)
27
+ grad_x = (grad * gamma - x * rstd * mean_gy) * rstd
28
+ grad_gamma = x * grad * rstd
29
+ return grad_x.cpu(), grad_gamma.cpu()
30
+
@@ -1,52 +1,71 @@
1
- import torch
2
-
3
-
4
- def npu_rotary_mul(x, r1, r2):
5
- x1, x2 = torch.chunk(x, 2, -1)
6
- x_new = torch.cat((-x2, x1), dim=-1)
7
- output = r1 * x + r2 * x_new
8
- return output.cpu()
9
-
10
-
11
- def npu_rotary_mul_backward(dy_tensor, x, r1, r2):
12
- x.requires_grad = True
13
- r1.requires_grad = True
14
- r2.requires_grad = True
15
- # golden
16
- x1, x2 = torch.chunk(x, 2, -1)
17
- x_new = torch.cat((-x2, x1), dim=-1)
18
- golden_tensor = r1 * x + r2 * x_new
19
- golden_tensor.backward(dy_tensor)
20
- r1_shape = r1.shape
21
- r1_grad = torch.zeros(r1_shape).type(torch.float32)
22
- r2_grad = torch.zeros(r1_shape).type(torch.float32)
23
- x1, x2 = torch.chunk(x.float(), 2, -1)
24
- x_new2 = torch.cat((-x2, x1), dim=-1)
25
- x_shape = x.shape
26
- h = x.float()
27
- grad = dy_tensor.float()
28
- condition_1 = (((r1_shape[0] == 1 and x_shape[0] != 1) or (r1_shape[0] == 1 and x_shape[0] == 1)) and
29
- ((r1_shape[2] == 1 and x_shape[2] != 1) or (r1_shape[2] == 1 and x_shape[2] == 1)) and
30
- (r1_shape[1] == x_shape[1]) and (r1_shape[3] == x_shape[3]))
31
- condition_2 = (((r1_shape[0] == 1 and x_shape[0] != 1) or (r1_shape[0] == 1 and x_shape[0] == 1)) and
32
- ((r1_shape[1] == 1 and x_shape[1] != 1) or (r1_shape[1] == 1 and x_shape[1] == 1)) and
33
- (r1_shape[2] == x_shape[2]) and (r1_shape[3] == x_shape[3]))
34
- condition_3 = (((r1_shape[2] == 1 and x_shape[2] != 1) or (r1_shape[2] == 1 and x_shape[2] == 1)) and
35
- ((r1_shape[1] == 1 and x_shape[1] != 1) or (r1_shape[1] == 1 and x_shape[1] == 1)) and
36
- (r1_shape[0] == x_shape[0]) and (r1_shape[3] == x_shape[3]))
37
- if condition_1:
38
- for i in range(x_shape[0]):
39
- for j in range(x_shape[2]):
40
- r2_grad[0, :, 0, :] += (x_new2[i, :, j, :] * grad[i, :, j, :])
41
- r1_grad[0, :, 0, :] += (h[i, :, j, :] * grad[i, :, j, :])
42
- elif condition_2:
43
- for i in range(x_shape[0]):
44
- for j in range(x_shape[1]):
45
- r2_grad[0, 0, :, :] += (x_new2[i, j, :, :] * grad[i, j, :, :])
46
- r1_grad[0, 0, :, :] += (h[i, j, :, :] * grad[i, j, :, :])
47
- elif condition_3:
48
- for i in range(x_shape[1]):
49
- for j in range(x_shape[2]):
50
- r2_grad[:, 0, 0, :] += (x_new2[:, i, j, :] * grad[:, i, j, :])
51
- r1_grad[:, 0, 0, :] += (h[:, i, j, :] * grad[:, i, j, :])
52
- return x.grad.cpu(), r1_grad.cpu(), r2_grad.cpu()
1
+ # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import torch
17
+
18
+
19
+ def npu_rotary_mul(x, r1, r2):
20
+ x1, x2 = torch.chunk(x, 2, -1)
21
+ x_new = torch.cat((-x2, x1), dim=-1)
22
+ output = r1 * x + r2 * x_new
23
+ return output
24
+
25
+
26
+ def npu_rotary_mul_backward(dy_tensor, x, r1, r2):
27
+ x.requires_grad = True
28
+ r1.requires_grad = True
29
+ r2.requires_grad = True
30
+ # golden
31
+ x1, x2 = torch.chunk(x, 2, -1)
32
+ x_new = torch.cat((-x2, x1), dim=-1)
33
+ golden_tensor = r1 * x + r2 * x_new
34
+ golden_tensor.backward(dy_tensor)
35
+ r1_shape = r1.shape
36
+ r1_grad = torch.zeros(r1_shape).type(torch.float32)
37
+ r2_grad = torch.zeros(r1_shape).type(torch.float32)
38
+ x1, x2 = torch.chunk(x.float(), 2, -1)
39
+ x_new2 = torch.cat((-x2, x1), dim=-1)
40
+ x_shape = x.shape
41
+ h = x.float()
42
+ grad = dy_tensor.float()
43
+ condition_1 = (r1_shape[0] == 1
44
+ and r1_shape[1] == x_shape[1]
45
+ and r1_shape[2] == 1
46
+ and r1_shape[3] == x_shape[3])
47
+ condition_2 = (r1_shape[0] == 1
48
+ and r1_shape[1] == 1
49
+ and r1_shape[2] == x_shape[2]
50
+ and r1_shape[3] == x_shape[3])
51
+ condition_3 = (r1_shape[0] == x_shape[0]
52
+ and r1_shape[1] == 1
53
+ and r1_shape[2] == 1
54
+ and r1_shape[3] == x_shape[3])
55
+
56
+ if condition_1:
57
+ for i in range(x_shape[0]):
58
+ for j in range(x_shape[2]):
59
+ r2_grad[0, :, 0, :] += (x_new2[i, :, j, :] * grad[i, :, j, :])
60
+ r1_grad[0, :, 0, :] += (h[i, :, j, :] * grad[i, :, j, :])
61
+ elif condition_2:
62
+ for i in range(x_shape[0]):
63
+ for j in range(x_shape[1]):
64
+ r2_grad[0, 0, :, :] += (x_new2[i, j, :, :] * grad[i, j, :, :])
65
+ r1_grad[0, 0, :, :] += (h[i, j, :, :] * grad[i, j, :, :])
66
+ elif condition_3:
67
+ for i in range(x_shape[1]):
68
+ for j in range(x_shape[2]):
69
+ r2_grad[:, 0, 0, :] += (x_new2[:, i, j, :] * grad[:, i, j, :])
70
+ r1_grad[:, 0, 0, :] += (h[:, i, j, :] * grad[:, i, j, :])
71
+ return x.grad.cpu(), r1_grad.cpu(), r2_grad.cpu()
@@ -1,26 +1,41 @@
1
- import torch
2
-
3
-
4
- def npu_scaled_masked_softmax(x, mask, scale, fixed_triu_mask):
5
- if fixed_triu_mask:
6
- mask = (torch.triu(torch.ones(mask.shape), k=1)).bool().to(mask.device)
7
- dtype = x.dtype
8
- x = (x * scale).masked_fill(mask, value=-10000)
9
- x = x - torch.max(x, dim=-1, keepdims=True)[0]
10
- x = torch.exp(x.float())
11
- y = torch.div(x, torch.sum(x, dim=-1, keepdims=True))
12
- return y.to(dtype).cpu()
13
-
14
-
15
- def npu_scaled_masked_softmax_backward(y_grad, y, mask, scale, fixed_triu_mask):
16
- if fixed_triu_mask:
17
- mask = (torch.triu(torch.ones(mask.shape), k=1)).bool().to(mask.device)
18
- dtype = y_grad.dtype
19
- y_grad = y_grad.float()
20
- y = y.float()
21
- x_grad = y_grad * y
22
- x_grad = y_grad - torch.sum(x_grad, dim=-1, keepdims=True)
23
- x_grad = x_grad * y
24
- x_grad = x_grad * scale
25
- x_grad = x_grad.masked_fill(mask, value=0)
26
- return x_grad.to(dtype).cpu()
1
+ # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import torch
17
+
18
+
19
+ def npu_scaled_masked_softmax(x, mask, scale, fixed_triu_mask):
20
+ if fixed_triu_mask:
21
+ mask = (torch.triu(torch.ones(mask.shape), k=1)).bool().to(mask.device)
22
+ dtype = x.dtype
23
+ x = (x * scale).masked_fill(mask, value=-10000)
24
+ x = x - torch.max(x, dim=-1, keepdims=True)[0]
25
+ x = torch.exp(x.float())
26
+ y = torch.div(x, torch.sum(x, dim=-1, keepdims=True))
27
+ return y.to(dtype)
28
+
29
+
30
+ def npu_scaled_masked_softmax_backward(y_grad, y, mask, scale, fixed_triu_mask):
31
+ if fixed_triu_mask:
32
+ mask = (torch.triu(torch.ones(mask.shape), k=1)).bool().to(mask.device)
33
+ dtype = y_grad.dtype
34
+ y_grad = y_grad.float()
35
+ y = y.float()
36
+ x_grad = y_grad * y
37
+ x_grad = y_grad - torch.sum(x_grad, dim=-1, keepdims=True)
38
+ x_grad = x_grad * y
39
+ x_grad = x_grad * scale
40
+ x_grad = x_grad.masked_fill(mask, value=0)
41
+ return x_grad.to(dtype).cpu()
@@ -1,55 +1,70 @@
1
- import torch
2
-
3
-
4
- def npu_swiglu(x, dim=-1):
5
- tensor_dtype = x.dtype
6
-
7
- inTensors = torch.chunk(x, 2, dim=dim)
8
- if tensor_dtype == torch.float32:
9
- tensor_scalar = torch.sigmoid(torch.mul(inTensors[0], 1.0))
10
- output_data = torch.mul(torch.mul(tensor_scalar, inTensors[0]), inTensors[1])
11
- else:
12
- tensor_self_float = inTensors[0].type(torch.float)
13
- tensor_other_float = inTensors[1].type(torch.float)
14
- tensor_out_float = torch.nn.functional.silu(tensor_self_float).type(tensor_dtype).type(
15
- torch.float32) * tensor_other_float
16
- output_data = tensor_out_float.type(tensor_dtype)
17
- return output_data.cpu()
18
-
19
-
20
- def npu_swiglu_backward(grad, x, dim=-1):
21
- tensor_dtype = grad.dtype
22
- in_tensors = torch.chunk(x, 2, dim=dim)
23
- tensor_grad_out = grad
24
-
25
- if tensor_dtype == torch.float16:
26
- tensor_out1 = torch.mul(
27
- torch.mul(in_tensors[1].type(torch.float32), swish_grad(1, in_tensors[0].type(torch.float32))),
28
- tensor_grad_out.type(torch.float32)).type(torch.float16)
29
- tensor_out2 = torch.mul(tensor_grad_out.type(torch.float32),
30
- swish(1, in_tensors[0].type(torch.float32))).type(torch.float16)
31
- output = torch.cat((tensor_out1, tensor_out2), dim)
32
- elif tensor_dtype == torch.bfloat16:
33
- tensor_self_float = in_tensors[0].type(torch.float)
34
- tensor_other_float = in_tensors[1].type(torch.float)
35
- tensor_gradout_float = tensor_grad_out.type(torch.float)
36
-
37
- tensor_out1 = torch.mul(tensor_gradout_float, swish_grad(1.0, tensor_self_float)).type(torch.bfloat16).type(
38
- torch.float32) * tensor_other_float
39
- tensor_out2 = swish(1.0, tensor_self_float).type(torch.bfloat16).type(torch.float32) * tensor_gradout_float
40
- tensor_out_float = torch.cat((tensor_out1, tensor_out2), dim=dim)
41
- output = tensor_out_float.type(torch.bfloat16)
42
- else:
43
- tensor_out1 = torch.mul(torch.mul(in_tensors[1], swish_grad(1.0, in_tensors[0])), tensor_grad_out)
44
- tensor_out2 = torch.mul(tensor_grad_out, swish(1.0, in_tensors[0]))
45
- output = torch.cat((tensor_out1, tensor_out2), dim)
46
- return output.cpu()
47
-
48
-
49
- def swish_grad(beta, x):
50
- return torch.sigmoid(beta * x) + x * (1 - torch.sigmoid(beta * x)) * torch.sigmoid(beta * x) * beta
51
-
52
-
53
- def swish(beta, x):
54
- return x * torch.sigmoid(beta * x)
55
-
1
+ # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import torch
17
+
18
+
19
+ def npu_swiglu(x, dim=-1):
20
+ tensor_dtype = x.dtype
21
+
22
+ in_tensors = torch.chunk(x, 2, dim=dim)
23
+ if tensor_dtype == torch.float32:
24
+ tensor_scalar = torch.sigmoid(torch.mul(in_tensors[0], 1.0))
25
+ output_data = torch.mul(torch.mul(tensor_scalar, in_tensors[0]), in_tensors[1])
26
+ else:
27
+ tensor_self_float = in_tensors[0].type(torch.float)
28
+ tensor_other_float = in_tensors[1].type(torch.float)
29
+ tensor_out_float = torch.nn.functional.silu(tensor_self_float).type(tensor_dtype).type(
30
+ torch.float32) * tensor_other_float
31
+ output_data = tensor_out_float.type(tensor_dtype)
32
+ return output_data
33
+
34
+
35
+ def npu_swiglu_backward(grad, x, dim=-1):
36
+ tensor_dtype = grad.dtype
37
+ in_tensors = torch.chunk(x, 2, dim=dim)
38
+ tensor_grad_out = grad
39
+
40
+ if tensor_dtype == torch.float16:
41
+ tensor_out1 = torch.mul(
42
+ torch.mul(in_tensors[1].type(torch.float32), swish_grad(1, in_tensors[0].type(torch.float32))),
43
+ tensor_grad_out.type(torch.float32)).type(torch.float16)
44
+ tensor_out2 = torch.mul(tensor_grad_out.type(torch.float32),
45
+ swish(1, in_tensors[0].type(torch.float32))).type(torch.float16)
46
+ output = torch.cat((tensor_out1, tensor_out2), dim)
47
+ elif tensor_dtype == torch.bfloat16:
48
+ tensor_self_float = in_tensors[0].type(torch.float)
49
+ tensor_other_float = in_tensors[1].type(torch.float)
50
+ tensor_gradout_float = tensor_grad_out.type(torch.float)
51
+
52
+ tensor_out1 = torch.mul(tensor_gradout_float, swish_grad(1.0, tensor_self_float)).type(torch.bfloat16).type(
53
+ torch.float32) * tensor_other_float
54
+ tensor_out2 = swish(1.0, tensor_self_float).type(torch.bfloat16).type(torch.float32) * tensor_gradout_float
55
+ tensor_out_float = torch.cat((tensor_out1, tensor_out2), dim=dim)
56
+ output = tensor_out_float.type(torch.bfloat16)
57
+ else:
58
+ tensor_out1 = torch.mul(torch.mul(in_tensors[1], swish_grad(1.0, in_tensors[0])), tensor_grad_out)
59
+ tensor_out2 = torch.mul(tensor_grad_out, swish(1.0, in_tensors[0]))
60
+ output = torch.cat((tensor_out1, tensor_out2), dim)
61
+ return output.cpu()
62
+
63
+
64
+ def swish_grad(beta, x):
65
+ return torch.sigmoid(beta * x) + x * (1 - torch.sigmoid(beta * x)) * torch.sigmoid(beta * x) * beta
66
+
67
+
68
+ def swish(beta, x):
69
+ return x * torch.sigmoid(beta * x)
70
+
@@ -1,2 +1,17 @@
1
- from .parse_json import parse_json_info_forward_backward
2
- from .utils import seed_all
1
+ # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from .parse_json import parse_json_info_forward_backward
17
+ from .utils import seed_all
@@ -1,14 +1,14 @@
1
- from ptdbg_ascend import compare
2
-
3
- pkl_path = "%s"
4
- dump_data_dir = "%s"
5
-
6
- dump_path_param = {
7
- "npu_pkl_path": ,
8
- "bench_pkl_path": ,
9
- "npu_dump_data_dir": ,
10
- "bench_dump_data_dir": ,
11
- "is_print_compare_log": True
12
- }
13
-
14
- compare(dump_path_param, output_path="", stack_mode=%s)
1
+ from ptdbg_ascend import compare
2
+
3
+ pkl_path = "%s"
4
+ dump_data_dir = "%s"
5
+
6
+ dump_path_param = {
7
+ "npu_pkl_path": ,
8
+ "bench_pkl_path": ,
9
+ "npu_dump_data_dir": ,
10
+ "bench_dump_data_dir": ,
11
+ "is_print_compare_log": True
12
+ }
13
+
14
+ compare(dump_path_param, output_path="", stack_mode=%s)
@@ -1,32 +1,33 @@
1
- import os
2
- import time
3
- import sys
4
- from msprobe.pytorch.common.utils import get_rank_if_initialized
5
- from msprobe.core.common.log import BaseLogger
6
- from msprobe.core.common.exceptions import DistributedNotInitializedError
7
-
8
-
9
- class PyTorchLogger(BaseLogger):
10
- def __init__(self):
11
- super().__init__()
12
-
13
- def get_rank(self):
14
- try:
15
- current_rank = get_rank_if_initialized()
16
- except DistributedNotInitializedError:
17
- current_rank = None
18
- return current_rank
19
-
20
- def _print_log(self, level, msg, end='\n'):
21
- current_rank = self.get_rank()
22
- current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
23
- pid = os.getpid()
24
- if current_rank is not None:
25
- full_msg = f"{current_time} ({pid}) [rank {current_rank}] [{level}] {msg}"
26
- else:
27
- full_msg = f"{current_time} ({pid}) [{level}] {msg}"
28
- print(full_msg, end=end)
29
- sys.stdout.flush()
30
-
31
-
32
- logger = PyTorchLogger()
1
+ # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from msprobe.core.common.exceptions import DistributedNotInitializedError
17
+ from msprobe.core.common.log import BaseLogger
18
+ from msprobe.pytorch.common.utils import get_rank_if_initialized
19
+
20
+
21
+ class PyTorchLogger(BaseLogger):
22
+ def __init__(self):
23
+ super().__init__()
24
+
25
+ def get_rank(self):
26
+ try:
27
+ current_rank = get_rank_if_initialized()
28
+ except DistributedNotInitializedError:
29
+ current_rank = None
30
+ return current_rank
31
+
32
+
33
+ logger = PyTorchLogger()
@@ -1,39 +1,54 @@
1
- import json
2
-
3
- from msprobe.core.common.exceptions import ParseJsonException
4
- from msprobe.core.common.file_check import FileOpen
5
-
6
-
7
- def parse_json_info_forward_backward(json_path):
8
- def parse_data_name_with_pattern(data_name, pattern):
9
- name_struct = data_name.split('.')
10
- if not name_struct[-1] == pattern:
11
- raise ParseJsonException(ParseJsonException.UnexpectedNameStruct,
12
- f"{data_name} in file {json_path}")
13
- api_name = '.'.join(name_struct[:-1])
14
- return api_name
15
-
16
- with FileOpen(json_path, 'r') as f:
17
- dump_json = json.load(f)
18
-
19
- real_data_path = dump_json.get("dump_data_dir")
20
- dump_data = dump_json.get("data")
21
- if not dump_data:
22
- raise ParseJsonException(ParseJsonException.InvalidDumpJson, "dump数据中没有data字段")
23
-
24
- forward_data = {}
25
- backward_data = {}
26
- for data_name, data_item in dump_data.items():
27
- if "Module" in data_name:
28
- continue
29
- if "forward" in data_name:
30
- api_name = parse_data_name_with_pattern(data_name, "forward")
31
- forward_data.update({api_name: data_item})
32
- elif "backward" in data_name:
33
- api_name = parse_data_name_with_pattern(data_name, "backward")
34
- backward_data.update({api_name: data_item})
35
- else:
36
- raise ParseJsonException(ParseJsonException.UnexpectedNameStruct,
37
- f"{data_name} in file {json_path}.")
38
-
39
- return forward_data, backward_data, real_data_path
1
+ # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+
18
+ from msprobe.core.common.exceptions import ParseJsonException
19
+ from msprobe.core.common.file_utils import FileOpen
20
+
21
+
22
+ def parse_json_info_forward_backward(json_path):
23
+ with FileOpen(json_path, 'r') as f:
24
+ dump_json = json.load(f)
25
+
26
+ real_data_path = dump_json.get("dump_data_dir")
27
+ dump_data = dump_json.get("data")
28
+ if not dump_data:
29
+ raise ParseJsonException(ParseJsonException.InvalidDumpJson, "dump数据中没有data字段")
30
+
31
+ forward_data = {}
32
+ backward_data = {}
33
+ for data_name, data_item in dump_data.items():
34
+ if "Module" in data_name:
35
+ continue
36
+ if "forward" in data_name:
37
+ api_name = parse_data_name_with_pattern(data_name, "forward", json_path)
38
+ forward_data.update({api_name: data_item})
39
+ elif "backward" in data_name:
40
+ api_name = parse_data_name_with_pattern(data_name, "backward", json_path)
41
+ backward_data.update({api_name: data_item})
42
+ else:
43
+ raise ParseJsonException(ParseJsonException.UnexpectedNameStruct,
44
+ f"{data_name} in file {json_path}.")
45
+
46
+ return forward_data, backward_data, real_data_path
47
+
48
+
49
+ def parse_data_name_with_pattern(data_name, pattern, json_path):
50
+ name_struct = data_name.split('.')
51
+ if not name_struct[-1] == pattern:
52
+ raise ParseJsonException(ParseJsonException.UnexpectedNameStruct, f"{data_name} in file {json_path}")
53
+ api_name = '.'.join(name_struct[:-1])
54
+ return api_name