mindstudio-probe 1.0.3__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (278) hide show
  1. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/LICENSE +201 -201
  2. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/METADATA +36 -34
  3. mindstudio_probe-1.1.0.dist-info/RECORD +287 -0
  4. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/WHEEL +1 -1
  5. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/entry_points.txt +1 -0
  6. msprobe/README.md +131 -237
  7. msprobe/__init__.py +16 -1
  8. msprobe/{config/config.json → config.json} +47 -49
  9. msprobe/core/advisor/advisor.py +124 -124
  10. msprobe/core/advisor/advisor_const.py +58 -59
  11. msprobe/core/advisor/advisor_result.py +58 -58
  12. msprobe/core/common/const.py +402 -318
  13. msprobe/core/common/exceptions.py +99 -99
  14. msprobe/core/common/{file_check.py → file_utils.py} +523 -283
  15. msprobe/core/common/inplace_op_checker.py +38 -0
  16. msprobe/core/common/inplace_ops.yaml +251 -0
  17. msprobe/core/common/log.py +86 -69
  18. msprobe/core/common/utils.py +371 -616
  19. msprobe/core/common_config.py +78 -71
  20. msprobe/core/compare/acc_compare.py +472 -298
  21. msprobe/core/compare/check.py +180 -95
  22. msprobe/core/compare/compare_cli.py +69 -49
  23. msprobe/core/compare/highlight.py +259 -222
  24. msprobe/core/compare/multiprocessing_compute.py +174 -149
  25. msprobe/core/compare/npy_compare.py +310 -295
  26. msprobe/core/compare/utils.py +464 -429
  27. msprobe/core/data_dump/data_collector.py +153 -144
  28. msprobe/core/data_dump/data_processor/base.py +337 -293
  29. msprobe/core/data_dump/data_processor/factory.py +76 -59
  30. msprobe/core/data_dump/data_processor/mindspore_processor.py +192 -198
  31. msprobe/core/data_dump/data_processor/pytorch_processor.py +383 -389
  32. msprobe/core/data_dump/json_writer.py +117 -116
  33. msprobe/core/data_dump/scope.py +194 -178
  34. msprobe/core/grad_probe/constant.py +74 -70
  35. msprobe/core/grad_probe/grad_compare.py +170 -175
  36. msprobe/core/grad_probe/utils.py +77 -52
  37. msprobe/docs/01.installation.md +99 -0
  38. msprobe/docs/02.config_introduction.md +137 -0
  39. msprobe/docs/03.config_examples.md +237 -0
  40. msprobe/docs/04.acl_config_examples.md +78 -0
  41. msprobe/docs/05.data_dump_PyTorch.md +326 -0
  42. msprobe/docs/06.data_dump_MindSpore.md +285 -0
  43. msprobe/docs/07.accuracy_checker_PyTorch.md +297 -0
  44. msprobe/docs/08.accuracy_checker_online_PyTorch.md +238 -0
  45. msprobe/docs/09.accuracy_checker_MindSpore.md +68 -0
  46. msprobe/docs/10.accuracy_compare_PyTorch.md +327 -0
  47. msprobe/docs/11.accuracy_compare_MindSpore.md +333 -0
  48. msprobe/docs/12.overflow_check_PyTorch.md +79 -0
  49. msprobe/docs/13.overflow_check_MindSpore.md +31 -0
  50. msprobe/{pytorch/doc/parse_tool.md → docs/14.data_parse_PyTorch.md} +283 -286
  51. msprobe/docs/15.free_benchmarking_PyTorch.md +170 -0
  52. msprobe/docs/16.free_benchmarking_MindSpore.md +140 -0
  53. msprobe/{doc/grad_probe/grad_probe.md → docs/17.grad_probe.md} +205 -207
  54. msprobe/{pytorch/doc//321/205/320/254/320/270/321/207/342/225/221/342/224/220/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/206/320/277/320/244/321/205/320/277/342/225/243.md → docs/18.online_dispatch.md} +89 -90
  55. msprobe/docs/FAQ.md +189 -0
  56. msprobe/docs/S02.report_free_benchmarking_validation_performance_baseline.md +146 -0
  57. msprobe/docs/img/free_benchmark_framework.png +0 -0
  58. msprobe/docs/img/ms_dump.png +0 -0
  59. msprobe/docs/img/ms_layer.png +0 -0
  60. msprobe/docs/img/pt_dump.png +0 -0
  61. msprobe/mindspore/__init__.py +2 -1
  62. msprobe/mindspore/api_accuracy_checker/api_accuracy_checker.py +278 -245
  63. msprobe/mindspore/api_accuracy_checker/api_info.py +76 -69
  64. msprobe/mindspore/api_accuracy_checker/api_runner.py +155 -151
  65. msprobe/mindspore/api_accuracy_checker/base_compare_algorithm.py +196 -196
  66. msprobe/mindspore/api_accuracy_checker/cmd_parser.py +6 -0
  67. msprobe/mindspore/api_accuracy_checker/compute_element.py +238 -223
  68. msprobe/mindspore/api_accuracy_checker/main.py +8 -15
  69. msprobe/mindspore/api_accuracy_checker/type_mapping.py +113 -113
  70. msprobe/mindspore/api_accuracy_checker/utils.py +79 -62
  71. msprobe/mindspore/cell_processor.py +58 -34
  72. msprobe/mindspore/common/const.py +108 -87
  73. msprobe/mindspore/common/log.py +37 -37
  74. msprobe/mindspore/common/utils.py +97 -57
  75. msprobe/mindspore/compare/distributed_compare.py +62 -75
  76. msprobe/mindspore/compare/layer_mapping.py +146 -0
  77. msprobe/mindspore/compare/modify_mapping.py +107 -0
  78. msprobe/mindspore/compare/ms_compare.py +357 -117
  79. msprobe/mindspore/compare/ms_graph_compare.py +364 -317
  80. msprobe/mindspore/compare/ms_to_pt_api.yaml +399 -399
  81. msprobe/mindspore/debugger/debugger_config.py +69 -74
  82. msprobe/mindspore/debugger/precision_debugger.py +150 -107
  83. msprobe/mindspore/dump/dump_tool_factory.py +50 -35
  84. msprobe/mindspore/dump/hook_cell/api_registry.py +128 -104
  85. msprobe/mindspore/dump/hook_cell/hook_cell.py +55 -53
  86. msprobe/mindspore/dump/hook_cell/primitive_hooks.py +206 -0
  87. msprobe/mindspore/dump/hook_cell/support_wrap_ops.yaml +994 -925
  88. msprobe/mindspore/dump/hook_cell/wrap_api.py +121 -0
  89. msprobe/mindspore/dump/jit_dump.py +96 -56
  90. msprobe/mindspore/dump/kernel_graph_dump.py +75 -60
  91. msprobe/mindspore/dump/kernel_kbyk_dump.py +79 -65
  92. msprobe/mindspore/free_benchmark/api_pynative_self_check.py +131 -116
  93. msprobe/mindspore/free_benchmark/common/config.py +27 -12
  94. msprobe/mindspore/free_benchmark/common/handler_params.py +32 -17
  95. msprobe/mindspore/free_benchmark/common/utils.py +85 -71
  96. msprobe/mindspore/free_benchmark/data/support_wrap_ops.yaml +842 -842
  97. msprobe/mindspore/free_benchmark/decorator/dec_forward.py +57 -42
  98. msprobe/mindspore/free_benchmark/decorator/decorator_factory.py +122 -107
  99. msprobe/mindspore/free_benchmark/handler/base_handler.py +105 -90
  100. msprobe/mindspore/free_benchmark/handler/check_handler.py +56 -41
  101. msprobe/mindspore/free_benchmark/handler/fix_handler.py +51 -36
  102. msprobe/mindspore/free_benchmark/handler/handler_factory.py +36 -21
  103. msprobe/mindspore/free_benchmark/perturbation/add_noise.py +82 -67
  104. msprobe/mindspore/free_benchmark/perturbation/base_perturbation.py +36 -21
  105. msprobe/mindspore/free_benchmark/perturbation/bit_noise.py +78 -63
  106. msprobe/mindspore/free_benchmark/perturbation/exchange_value.py +77 -0
  107. msprobe/mindspore/free_benchmark/perturbation/improve_precision.py +49 -34
  108. msprobe/mindspore/free_benchmark/perturbation/no_change.py +27 -12
  109. msprobe/mindspore/free_benchmark/perturbation/perturbation_factory.py +44 -27
  110. msprobe/mindspore/free_benchmark/self_check_tool_factory.py +48 -33
  111. msprobe/mindspore/grad_probe/global_context.py +100 -91
  112. msprobe/mindspore/grad_probe/grad_analyzer.py +231 -231
  113. msprobe/mindspore/grad_probe/grad_monitor.py +27 -27
  114. msprobe/mindspore/grad_probe/grad_stat_csv.py +131 -131
  115. msprobe/mindspore/grad_probe/hook.py +94 -92
  116. msprobe/mindspore/grad_probe/utils.py +29 -28
  117. msprobe/mindspore/ms_config.py +128 -126
  118. msprobe/mindspore/overflow_check/kernel_graph_overflow_check.py +60 -45
  119. msprobe/mindspore/overflow_check/overflow_check_tool_factory.py +49 -34
  120. msprobe/mindspore/runtime.py +4 -4
  121. msprobe/mindspore/service.py +297 -354
  122. msprobe/mindspore/task_handler_factory.py +24 -24
  123. msprobe/msprobe.py +105 -107
  124. msprobe/pytorch/__init__.py +23 -4
  125. msprobe/pytorch/api_accuracy_checker/common/config.py +70 -55
  126. msprobe/pytorch/api_accuracy_checker/common/utils.py +246 -165
  127. msprobe/pytorch/api_accuracy_checker/compare/algorithm.py +230 -213
  128. msprobe/pytorch/api_accuracy_checker/compare/api_precision_compare.py +632 -581
  129. msprobe/pytorch/api_accuracy_checker/compare/api_precision_standard.yaml +132 -132
  130. msprobe/pytorch/api_accuracy_checker/compare/api_precision_threshold.yaml +390 -390
  131. msprobe/pytorch/api_accuracy_checker/compare/compare.py +416 -381
  132. msprobe/pytorch/api_accuracy_checker/compare/compare_column.py +90 -73
  133. msprobe/pytorch/api_accuracy_checker/compare/compare_utils.py +265 -244
  134. msprobe/pytorch/api_accuracy_checker/config.yaml +10 -10
  135. msprobe/pytorch/api_accuracy_checker/run_ut/data_generate.py +370 -332
  136. msprobe/pytorch/api_accuracy_checker/run_ut/multi_run_ut.py +221 -199
  137. msprobe/pytorch/api_accuracy_checker/run_ut/run_overflow_check.py +150 -134
  138. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut.py +518 -581
  139. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut_utils.py +213 -74
  140. msprobe/pytorch/api_accuracy_checker/run_ut/torch_ut_setting.json +7 -4
  141. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/attl.py +218 -202
  142. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/client.py +370 -324
  143. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/device_dispatch.py +227 -204
  144. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/dump_dispatch.py +110 -0
  145. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/server.py +244 -218
  146. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/torch_ops_config.yaml +63 -0
  147. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/utils.py +44 -0
  148. msprobe/pytorch/bench_functions/__init__.py +30 -15
  149. msprobe/pytorch/bench_functions/apply_adam_w.py +43 -28
  150. msprobe/pytorch/bench_functions/confusion_transpose.py +34 -19
  151. msprobe/pytorch/bench_functions/fast_gelu.py +70 -55
  152. msprobe/pytorch/bench_functions/layer_norm_eval.py +21 -6
  153. msprobe/pytorch/bench_functions/linear.py +27 -12
  154. msprobe/pytorch/bench_functions/matmul_backward.py +63 -48
  155. msprobe/pytorch/bench_functions/npu_fusion_attention.py +538 -421
  156. msprobe/pytorch/bench_functions/rms_norm.py +30 -15
  157. msprobe/pytorch/bench_functions/rotary_mul.py +71 -52
  158. msprobe/pytorch/bench_functions/scaled_mask_softmax.py +41 -26
  159. msprobe/pytorch/bench_functions/swiglu.py +70 -55
  160. msprobe/pytorch/common/__init__.py +17 -2
  161. msprobe/pytorch/common/compare_script.template +14 -14
  162. msprobe/pytorch/common/log.py +33 -32
  163. msprobe/pytorch/common/parse_json.py +54 -39
  164. msprobe/pytorch/common/utils.py +310 -300
  165. msprobe/pytorch/compare/distributed_compare.py +66 -66
  166. msprobe/pytorch/compare/mapping.yaml +607 -607
  167. msprobe/pytorch/compare/match.py +49 -33
  168. msprobe/pytorch/compare/pt_compare.py +82 -40
  169. msprobe/pytorch/debugger/debugger_config.py +108 -95
  170. msprobe/pytorch/debugger/precision_debugger.py +173 -125
  171. msprobe/pytorch/free_benchmark/__init__.py +23 -8
  172. msprobe/pytorch/free_benchmark/common/constant.py +70 -70
  173. msprobe/pytorch/free_benchmark/common/counter.py +71 -71
  174. msprobe/pytorch/free_benchmark/common/enums.py +65 -37
  175. msprobe/pytorch/free_benchmark/common/params.py +144 -129
  176. msprobe/pytorch/free_benchmark/common/utils.py +118 -102
  177. msprobe/pytorch/free_benchmark/compare/grad_saver.py +200 -179
  178. msprobe/pytorch/free_benchmark/compare/single_benchmark.py +119 -104
  179. msprobe/pytorch/free_benchmark/main.py +120 -105
  180. msprobe/pytorch/free_benchmark/perturbed_layers/base_layer.py +28 -13
  181. msprobe/pytorch/free_benchmark/perturbed_layers/layer_factory.py +56 -41
  182. msprobe/pytorch/free_benchmark/perturbed_layers/npu/add_noise.py +105 -90
  183. msprobe/pytorch/free_benchmark/perturbed_layers/npu/bit_noise.py +119 -104
  184. msprobe/pytorch/free_benchmark/perturbed_layers/npu/change_value.py +87 -63
  185. msprobe/pytorch/free_benchmark/perturbed_layers/npu/improve_precision.py +83 -68
  186. msprobe/pytorch/free_benchmark/perturbed_layers/npu/no_change.py +43 -28
  187. msprobe/pytorch/free_benchmark/perturbed_layers/npu/npu_base_layser.py +60 -45
  188. msprobe/pytorch/free_benchmark/perturbed_layers/run_cpu.py +34 -19
  189. msprobe/pytorch/free_benchmark/result_handlers/base_handler.py +256 -217
  190. msprobe/pytorch/free_benchmark/result_handlers/check_handler.py +54 -39
  191. msprobe/pytorch/free_benchmark/result_handlers/fix_handler.py +38 -23
  192. msprobe/pytorch/free_benchmark/result_handlers/handler_factory.py +45 -30
  193. msprobe/pytorch/free_benchmark/result_handlers/preheat_handler.py +185 -170
  194. msprobe/pytorch/function_factory.py +91 -75
  195. msprobe/pytorch/functional/module_dump.py +84 -0
  196. msprobe/pytorch/grad_probe/grad_monitor.py +91 -90
  197. msprobe/pytorch/grad_probe/grad_stat_csv.py +128 -128
  198. msprobe/pytorch/hook_module/__init__.py +16 -1
  199. msprobe/pytorch/hook_module/api_registry.py +166 -161
  200. msprobe/pytorch/hook_module/hook_module.py +118 -120
  201. msprobe/pytorch/hook_module/support_wrap_ops.yaml +1879 -1877
  202. msprobe/pytorch/hook_module/utils.py +28 -29
  203. msprobe/pytorch/hook_module/wrap_aten.py +111 -110
  204. msprobe/pytorch/hook_module/wrap_distributed.py +77 -78
  205. msprobe/pytorch/hook_module/wrap_functional.py +104 -105
  206. msprobe/pytorch/hook_module/wrap_npu_custom.py +85 -84
  207. msprobe/pytorch/hook_module/wrap_tensor.py +69 -71
  208. msprobe/pytorch/hook_module/wrap_torch.py +84 -86
  209. msprobe/pytorch/hook_module/wrap_vf.py +60 -62
  210. msprobe/pytorch/module_processer.py +153 -138
  211. msprobe/pytorch/online_dispatch/__init__.py +20 -20
  212. msprobe/pytorch/online_dispatch/compare.py +235 -236
  213. msprobe/pytorch/online_dispatch/dispatch.py +271 -271
  214. msprobe/pytorch/online_dispatch/dump_compare.py +155 -156
  215. msprobe/pytorch/online_dispatch/single_compare.py +391 -391
  216. msprobe/pytorch/online_dispatch/torch_ops_config.yaml +57 -49
  217. msprobe/pytorch/online_dispatch/utils.py +127 -146
  218. msprobe/pytorch/parse.py +19 -4
  219. msprobe/pytorch/parse_tool/cli.py +31 -32
  220. msprobe/pytorch/parse_tool/lib/compare.py +259 -271
  221. msprobe/pytorch/parse_tool/lib/config.py +52 -52
  222. msprobe/pytorch/parse_tool/lib/file_desc.py +31 -31
  223. msprobe/pytorch/parse_tool/lib/interactive_cli.py +102 -102
  224. msprobe/pytorch/parse_tool/lib/parse_exception.py +54 -54
  225. msprobe/pytorch/parse_tool/lib/parse_tool.py +161 -158
  226. msprobe/pytorch/parse_tool/lib/utils.py +320 -321
  227. msprobe/pytorch/parse_tool/lib/visualization.py +85 -91
  228. msprobe/pytorch/pt_config.py +317 -187
  229. msprobe/pytorch/service.py +311 -252
  230. mindstudio_probe-1.0.3.dist-info/RECORD +0 -272
  231. msprobe/config/README.md +0 -539
  232. msprobe/mindspore/doc/compare.md +0 -58
  233. msprobe/mindspore/doc/dump.md +0 -217
  234. msprobe/mindspore/dump/hook_cell/wrap_functional.py +0 -91
  235. msprobe/mindspore/dump/hook_cell/wrap_tensor.py +0 -63
  236. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/ssl_config.py +0 -10
  237. msprobe/pytorch/doc/FAQ.md +0 -193
  238. msprobe/pytorch/doc/api_accuracy_checker.md +0 -313
  239. msprobe/pytorch/doc/api_accuracy_checker_online.md +0 -187
  240. msprobe/pytorch/doc/dump.md +0 -260
  241. msprobe/pytorch/doc/msprobe/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/206/320/245/342/226/221/321/206/320/235/320/276dump/321/206/320/260/320/227/321/205/320/227/320/226/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -182
  242. msprobe/pytorch/doc/ptdbg_ascend_compare.md +0 -240
  243. msprobe/pytorch/doc/ptdbg_ascend_overview.md +0 -68
  244. msprobe/pytorch/doc/ptdbg_ascend_quickstart.md +0 -381
  245. msprobe/pytorch/doc/run_overflow_check.md +0 -25
  246. msprobe/pytorch/doc//321/206/320/247/320/260/321/206/320/260/320/227/321/206/320/255/320/226/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/205/320/254/342/225/221/321/206/320/251/320/277/321/211/320/272/320/234/321/210/320/277/320/221/321/205/320/242/320/234/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -151
  247. msprobe/pytorch/functional/data_processor.py +0 -0
  248. msprobe/pytorch/functional/dump_module.py +0 -39
  249. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/top_level.txt +0 -0
  250. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_1.png +0 -0
  251. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_2.png +0 -0
  252. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_3.png +0 -0
  253. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_4.png +0 -0
  254. /msprobe/{pytorch/doc → docs}/img/GPT-3_1.png +0 -0
  255. /msprobe/{pytorch/doc → docs}/img/GPT-3_2.png +0 -0
  256. /msprobe/{pytorch/doc → docs}/img/GPT-3_3.png +0 -0
  257. /msprobe/{pytorch/doc → docs}/img/GPT-3_4.png +0 -0
  258. /msprobe/{pytorch/doc → docs}/img/GPT-3_5.png +0 -0
  259. /msprobe/{pytorch/doc → docs}/img/GPT-3_6.png +0 -0
  260. /msprobe/{pytorch/doc → docs}/img/GPT-3_7.png +0 -0
  261. /msprobe/{pytorch/doc → docs}/img/GPT-3_8.png +0 -0
  262. /msprobe/{pytorch/doc → docs}/img/YOLOV5S_1.png +0 -0
  263. /msprobe/{pytorch/doc → docs}/img/YOLOV5S_2.png +0 -0
  264. /msprobe/{pytorch/doc → docs}/img/accuracy_checking_details.png +0 -0
  265. /msprobe/{pytorch/doc → docs}/img/accuracy_checking_result.png +0 -0
  266. /msprobe/{pytorch/doc → docs}/img/api_precision_compare_details.png +0 -0
  267. /msprobe/{pytorch/doc → docs}/img/api_precision_compare_result.png +0 -0
  268. /msprobe/{pytorch/doc → docs}/img/auto_analyze_log.png +0 -0
  269. /msprobe/{pytorch/doc → docs}/img/compare_result_pkl.png +0 -0
  270. /msprobe/{pytorch/doc → docs}/img/compare_result_pkl_md5.png.png +0 -0
  271. /msprobe/{pytorch/doc → docs}/img/cpu_info.png +0 -0
  272. /msprobe/{config → docs}/img/free_benchmark.png +0 -0
  273. /msprobe/{doc/grad_probe/img/image-1.png → docs/img/grad_probe_image-1.png} +0 -0
  274. /msprobe/{doc/grad_probe/img/image-2.png → docs/img/grad_probe_image-2.png} +0 -0
  275. /msprobe/{doc/grad_probe/img/image-3.png → docs/img/grad_probe_image-3.png} +0 -0
  276. /msprobe/{doc/grad_probe/img/image-4.png → docs/img/grad_probe_image-4.png} +0 -0
  277. /msprobe/{doc/grad_probe/img/image.png → docs/img/grad_probe_image.png} +0 -0
  278. /msprobe/{pytorch/doc → docs}/img/module_compare.png +0 -0
@@ -1,28 +1,43 @@
1
- import torch
2
-
3
-
4
- def npu_apply_adam_w(beta1_power, beta2_power, lr, weight_decay,
5
- beta1, beta2, eps, grad, max_grad_norm, amsgrad, maximize, out):
6
- var, m, v = out
7
- if amsgrad:
8
- max_grad_norm = (torch.rand(var.shape) * 10.0 - 5.0).to(var.dtype)
9
- beta1_power_out = beta1_power * beta1
10
- beta2_power_out = beta2_power * beta2
11
- var_t = var * (1 + (-lr * weight_decay))
12
- gt = -grad if maximize else grad
13
- m_out = m * beta1 - (beta1 + (-1)) * gt
14
- v_out = v * beta2 - (beta2 + (-1)) * gt * gt
15
-
16
- if amsgrad:
17
- max_grad_norm_out = torch.max(max_grad_norm, v_out)
18
- if (1 - beta2_power_out) == 0:
19
- beta2_power_out -= eps
20
- denom = torch.sqrt(torch.div(max_grad_norm_out, (1 - beta2_power_out))) + eps
21
- else:
22
- vraintain = torch.div(v_out, (1 - beta2_power_out))
23
- denom = torch.sqrt(vraintain) + eps
24
-
25
- if (1 - beta1_power_out) == 0:
26
- beta1_power_out -= eps
27
- var_out = var_t + torch.div(-lr * m_out, (1 - beta1_power_out)).div(denom)
28
- return var_out.cpu(), m_out.cpu(), v_out.cpu()
1
+ # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import torch
17
+
18
+
19
+ def npu_apply_adam_w(beta1_power, beta2_power, lr, weight_decay,
20
+ beta1, beta2, eps, grad, max_grad_norm, amsgrad, maximize, out):
21
+ var, m, v = out
22
+ if amsgrad:
23
+ max_grad_norm = (torch.rand(var.shape) * 10.0 - 5.0).to(var.dtype)
24
+ beta1_power_out = beta1_power * beta1
25
+ beta2_power_out = beta2_power * beta2
26
+ var_t = var * (1 + (-lr * weight_decay))
27
+ gt = -grad if maximize else grad
28
+ m_out = m * beta1 - (beta1 + (-1)) * gt
29
+ v_out = v * beta2 - (beta2 + (-1)) * gt * gt
30
+
31
+ if amsgrad:
32
+ max_grad_norm_out = torch.max(max_grad_norm, v_out)
33
+ if (1 - beta2_power_out) == 0:
34
+ beta2_power_out -= eps
35
+ denom = torch.sqrt(torch.div(max_grad_norm_out, (1 - beta2_power_out))) + eps
36
+ else:
37
+ vraintain = torch.div(v_out, (1 - beta2_power_out))
38
+ denom = torch.sqrt(vraintain) + eps
39
+
40
+ if (1 - beta1_power_out) == 0:
41
+ beta1_power_out -= eps
42
+ var_out = var_t + torch.div(-lr * m_out, (1 - beta1_power_out)).div(denom)
43
+ return var_out, m_out, v_out
@@ -1,19 +1,34 @@
1
- def npu_confusion_transpose(data, perm, shape, transpose_first):
2
- if transpose_first:
3
- output = data.permute(*perm).contiguous().view(shape)
4
- else:
5
- output = data.view(shape).permute(*perm)
6
- return output.cpu()
7
-
8
-
9
- def npu_confusion_transpose_backward(grad, perm, shape, transpose_first):
10
- shape_cal = shape if transpose_first else [shape[perm_dim] for perm_dim in perm]
11
- perm_cal = [0] * len(perm)
12
- for i, perm_dim in enumerate(perm):
13
- perm_cal[perm_dim] = i
14
-
15
- if transpose_first:
16
- result = grad.permute(*perm_cal).reshape(shape_cal)
17
- else:
18
- result = grad.reshape(shape_cal).permute(*perm_cal)
19
- return result.cpu()
1
+ # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ def npu_confusion_transpose(data, perm, shape, transpose_first):
17
+ if transpose_first:
18
+ output = data.permute(*perm).contiguous().view(shape)
19
+ else:
20
+ output = data.view(shape).permute(*perm)
21
+ return output
22
+
23
+
24
+ def npu_confusion_transpose_backward(grad, perm, shape, transpose_first):
25
+ shape_cal = shape if transpose_first else [shape[perm_dim] for perm_dim in perm]
26
+ perm_cal = [0] * len(perm)
27
+ for i, perm_dim in enumerate(perm):
28
+ perm_cal[perm_dim] = i
29
+
30
+ if transpose_first:
31
+ result = grad.permute(*perm_cal).reshape(shape_cal)
32
+ else:
33
+ result = grad.reshape(shape_cal).permute(*perm_cal)
34
+ return result.cpu()
@@ -1,55 +1,70 @@
1
- import torch
2
-
3
-
4
- def fast_gelu(input0):
5
- attr = 1.702
6
- const_0 = 0 - attr
7
- const_1 = 1
8
- const_2 = attr / 2
9
-
10
- abs_x = torch.abs(input0)
11
- mul_abs_x = abs_x * const_0
12
- exp_abs_x = torch.exp(mul_abs_x)
13
- div_down = exp_abs_x + const_1
14
-
15
- pn_x = input0 - abs_x
16
- mul_pn_x = pn_x * const_2
17
- exp_pn_x = torch.exp(mul_pn_x)
18
- div_up = input0 * exp_pn_x
19
- div_down_rec = torch.reciprocal(div_down)
20
- result = div_up * div_down_rec
21
-
22
- return result.cpu()
23
-
24
-
25
- def npu_fast_gelu_backward(grad, input_x):
26
- const_2 = 1.702
27
- const_3 = 1.0
28
- const_1 = 0.0 - const_2
29
-
30
- # e^(-1.702x)
31
- abs_x = torch.abs(input_x)
32
- mul_abs_x = abs_x * const_1
33
- exp_x = torch.exp(mul_abs_x)
34
-
35
- # 1.702xe^(-1.702x)
36
- add_2 = input_x * exp_x
37
- add_2 = add_2 * const_2
38
-
39
- # e^(1.702(x-|x|))
40
- pn_x = input_x - abs_x
41
- mul_pn_x = pn_x * const_2
42
- exp_pn_x = torch.exp(mul_pn_x)
43
-
44
- # e^(-1.702x) + 1.702xe^(-1.702x) + e^(1.702(x-|x|))
45
- div_up = exp_x + add_2
46
- div_up = div_up + exp_pn_x
47
-
48
- # (e^(-1.702x)+1)^2
49
- div_down_i = exp_x + const_3
50
- div_down = div_down_i * div_down_i
51
- div_down_rec = torch.reciprocal(div_down)
52
- result_temp = div_up * div_down_rec
53
- result = grad * result_temp
54
-
55
- return result.cpu()
1
+ # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import torch
17
+
18
+
19
+ def npu_fast_gelu(input0):
20
+ attr = 1.702
21
+ const_0 = 0 - attr
22
+ const_1 = 1
23
+ const_2 = attr / 2
24
+
25
+ abs_x = torch.abs(input0)
26
+ mul_abs_x = abs_x * const_0
27
+ exp_abs_x = torch.exp(mul_abs_x)
28
+ div_down = exp_abs_x + const_1
29
+
30
+ pn_x = input0 - abs_x
31
+ mul_pn_x = pn_x * const_2
32
+ exp_pn_x = torch.exp(mul_pn_x)
33
+ div_up = input0 * exp_pn_x
34
+ div_down_rec = torch.reciprocal(div_down)
35
+ result = div_up * div_down_rec
36
+
37
+ return result
38
+
39
+
40
+ def npu_fast_gelu_backward(grad, input_x):
41
+ const_2 = 1.702
42
+ const_3 = 1.0
43
+ const_1 = 0.0 - const_2
44
+
45
+ # e^(-1.702x)
46
+ abs_x = torch.abs(input_x)
47
+ mul_abs_x = abs_x * const_1
48
+ exp_x = torch.exp(mul_abs_x)
49
+
50
+ # 1.702xe^(-1.702x)
51
+ add_2 = input_x * exp_x
52
+ add_2 = add_2 * const_2
53
+
54
+ # e^(1.702(x-|x|))
55
+ pn_x = input_x - abs_x
56
+ mul_pn_x = pn_x * const_2
57
+ exp_pn_x = torch.exp(mul_pn_x)
58
+
59
+ # e^(-1.702x) + 1.702xe^(-1.702x) + e^(1.702(x-|x|))
60
+ div_up = exp_x + add_2
61
+ div_up = div_up + exp_pn_x
62
+
63
+ # (e^(-1.702x)+1)^2
64
+ div_down_i = exp_x + const_3
65
+ div_down = div_down_i * div_down_i
66
+ div_down_rec = torch.reciprocal(div_down)
67
+ result_temp = div_up * div_down_rec
68
+ result = grad * result_temp
69
+
70
+ return result.cpu()
@@ -1,6 +1,21 @@
1
- import torch
2
-
3
-
4
- def npu_layer_norm_eval(data, normalized_shape):
5
- result = torch.nn.functional.layer_norm(data, normalized_shape)
6
- return result.cpu()
1
+ # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import torch
17
+
18
+
19
+ def npu_layer_norm_eval(data, normalized_shape, weight=None, bias=None, eps=1e-5):
20
+ result = torch.nn.functional.layer_norm(data, normalized_shape)
21
+ return result
@@ -1,12 +1,27 @@
1
- import torch
2
-
3
-
4
- def npu_linear(x, weight, bias):
5
- output = torch.nn.functional.linear(x, weight, bias)
6
- return output.cpu()
7
-
8
-
9
- def npu_linear_backward(grad, input_data, weight):
10
- input_grad = torch.matmul(grad, weight)
11
- weight_grad = torch.matmul(grad.t(), input_data)
12
- return input_grad.cpu(), weight_grad.cpu()
1
+ # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import torch
17
+
18
+
19
+ def npu_linear(x, weight, bias):
20
+ output = torch.nn.functional.linear(x, weight, bias)
21
+ return output
22
+
23
+
24
+ def npu_linear_backward(grad, input_data, weight):
25
+ input_grad = torch.matmul(grad, weight)
26
+ weight_grad = torch.matmul(grad.t(), input_data)
27
+ return input_grad.cpu(), weight_grad.cpu()
@@ -1,48 +1,63 @@
1
- import torch
2
-
3
-
4
- def matmul_backward(grad, self, other, mask):
5
- grad_self, grad_other = None, None
6
- dim_self = self.dim()
7
- dim_other = other.dim()
8
-
9
- size_grad = list(grad.size())
10
- size_self = list(self.size())
11
- size_other = list(other.size())
12
- if dim_self == 1 and dim_other == 1:
13
- grad_self = other.mul(grad) if mask[0] else grad_self
14
- grad_other = self.mul(grad) if mask[1] else grad_other
15
- elif dim_self == 2 and dim_other == 1:
16
- grad_self = grad.unsqueeze(1).mm(other.unsqueeze(0)) if mask[0] else grad_self
17
- grad_other = self.transpose(-1, -2).mm(grad.unsqueeze(1)).squeeze_(1) if mask[1] else grad_other
18
- elif dim_self == 1 and dim_other == 2:
19
- grad_self = grad.unsqueeze(0).mm(other.transpose(-1, -2)).squeeze_(0) if mask[0] else grad_self
20
- grad_other = self.unsqueeze(1).mm(grad.unsqueeze(0)) if mask[1] else grad_other
21
- elif dim_self >= 3 and (dim_other == 1 or dim_other == 2):
22
- view_size = 1 if dim_other == 1 else size_grad[-1]
23
- unfolded_grad = (grad.unsqueeze(-1) if dim_other == 1 else grad).contiguous().view(-1, view_size)
24
- if mask[0]:
25
- grad_self = unfolded_grad.mm(other.unsqueeze(0) if dim_other == 1 else other.transpose(-1, -2)) \
26
- .view(size_self)
27
- if mask[1]:
28
- unfolded_self = self.contiguous().view([-1, size_self[-1]])
29
- grad_other = unfolded_self.transpose(-1, -2).mm(unfolded_grad).view(size_other)
30
- elif (dim_self == 1 or dim_self == 2) and dim_other >= 3:
31
- view_size = 1 if dim_self == 1 else size_grad[-2]
32
- unfolded_grad_T = grad.view([-1, view_size]) \
33
- if dim_self == 1 else grad.transpose(-1, -2).contiguous().view([-1, view_size])
34
- if mask[0]:
35
- # create a 2D-matrix from other
36
- unfolded_other_T = \
37
- other.transpose(-1, -2).contiguous().view([-1, size_other[-2]]).transpose(-1, -2)
38
- grad_self = unfolded_other_T.mm(unfolded_grad_T).transpose(-1, -2).view(size_self)
39
- if mask[1]:
40
- size_other_T = size_other[:-2]
41
- size_other_T.extend(size_other[::-1][:2])
42
- grad_other = \
43
- unfolded_grad_T.mm(self.unsqueeze(0) if dim_self == 1 else self).view(size_other_T).transpose(-1, -2)
44
- else:
45
- grad_self = torch.matmul(grad, other.transpose(-1, -2)) if mask[0] else grad_self
46
- grad_other = torch.matmul(self.transpose(-1, -2), grad) if mask[1] else grad_other
47
-
48
- return grad_self.cpu(), grad_other.cpu()
1
+ # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import torch
17
+
18
+
19
+ def matmul_backward(grad, self, other, mask):
20
+ grad_self, grad_other = None, None
21
+ dim_self = self.dim()
22
+ dim_other = other.dim()
23
+
24
+ size_grad = list(grad.size())
25
+ size_self = list(self.size())
26
+ size_other = list(other.size())
27
+ if dim_self == 1 and dim_other == 1:
28
+ grad_self = other.mul(grad) if mask[0] else grad_self
29
+ grad_other = self.mul(grad) if mask[1] else grad_other
30
+ elif dim_self == 2 and dim_other == 1:
31
+ grad_self = grad.unsqueeze(1).mm(other.unsqueeze(0)) if mask[0] else grad_self
32
+ grad_other = self.transpose(-1, -2).mm(grad.unsqueeze(1)).squeeze_(1) if mask[1] else grad_other
33
+ elif dim_self == 1 and dim_other == 2:
34
+ grad_self = grad.unsqueeze(0).mm(other.transpose(-1, -2)).squeeze_(0) if mask[0] else grad_self
35
+ grad_other = self.unsqueeze(1).mm(grad.unsqueeze(0)) if mask[1] else grad_other
36
+ elif dim_self >= 3 and (dim_other == 1 or dim_other == 2):
37
+ view_size = 1 if dim_other == 1 else size_grad[-1]
38
+ unfolded_grad = (grad.unsqueeze(-1) if dim_other == 1 else grad).contiguous().view(-1, view_size)
39
+ if mask[0]:
40
+ grad_self = unfolded_grad.mm(other.unsqueeze(0) if dim_other == 1 else other.transpose(-1, -2)) \
41
+ .view(size_self)
42
+ if mask[1]:
43
+ unfolded_self = self.contiguous().view([-1, size_self[-1]])
44
+ grad_other = unfolded_self.transpose(-1, -2).mm(unfolded_grad).view(size_other)
45
+ elif (dim_self == 1 or dim_self == 2) and dim_other >= 3:
46
+ view_size = 1 if dim_self == 1 else size_grad[-2]
47
+ unfolded_grad_t = grad.view([-1, view_size]) \
48
+ if dim_self == 1 else grad.transpose(-1, -2).contiguous().view([-1, view_size])
49
+ if mask[0]:
50
+ # create a 2D-matrix from other
51
+ unfolded_other_t = \
52
+ other.transpose(-1, -2).contiguous().view([-1, size_other[-2]]).transpose(-1, -2)
53
+ grad_self = unfolded_other_t.mm(unfolded_grad_t).transpose(-1, -2).view(size_self)
54
+ if mask[1]:
55
+ size_other_t = size_other[:-2]
56
+ size_other_t.extend(size_other[::-1][:2])
57
+ grad_other = \
58
+ unfolded_grad_t.mm(self.unsqueeze(0) if dim_self == 1 else self).view(size_other_t).transpose(-1, -2)
59
+ else:
60
+ grad_self = torch.matmul(grad, other.transpose(-1, -2)) if mask[0] else grad_self
61
+ grad_other = torch.matmul(self.transpose(-1, -2), grad) if mask[1] else grad_other
62
+
63
+ return grad_self.cpu(), grad_other.cpu()