mindstudio-probe 1.0.3__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (278) hide show
  1. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/LICENSE +201 -201
  2. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/METADATA +36 -34
  3. mindstudio_probe-1.1.0.dist-info/RECORD +287 -0
  4. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/WHEEL +1 -1
  5. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/entry_points.txt +1 -0
  6. msprobe/README.md +131 -237
  7. msprobe/__init__.py +16 -1
  8. msprobe/{config/config.json → config.json} +47 -49
  9. msprobe/core/advisor/advisor.py +124 -124
  10. msprobe/core/advisor/advisor_const.py +58 -59
  11. msprobe/core/advisor/advisor_result.py +58 -58
  12. msprobe/core/common/const.py +402 -318
  13. msprobe/core/common/exceptions.py +99 -99
  14. msprobe/core/common/{file_check.py → file_utils.py} +523 -283
  15. msprobe/core/common/inplace_op_checker.py +38 -0
  16. msprobe/core/common/inplace_ops.yaml +251 -0
  17. msprobe/core/common/log.py +86 -69
  18. msprobe/core/common/utils.py +371 -616
  19. msprobe/core/common_config.py +78 -71
  20. msprobe/core/compare/acc_compare.py +472 -298
  21. msprobe/core/compare/check.py +180 -95
  22. msprobe/core/compare/compare_cli.py +69 -49
  23. msprobe/core/compare/highlight.py +259 -222
  24. msprobe/core/compare/multiprocessing_compute.py +174 -149
  25. msprobe/core/compare/npy_compare.py +310 -295
  26. msprobe/core/compare/utils.py +464 -429
  27. msprobe/core/data_dump/data_collector.py +153 -144
  28. msprobe/core/data_dump/data_processor/base.py +337 -293
  29. msprobe/core/data_dump/data_processor/factory.py +76 -59
  30. msprobe/core/data_dump/data_processor/mindspore_processor.py +192 -198
  31. msprobe/core/data_dump/data_processor/pytorch_processor.py +383 -389
  32. msprobe/core/data_dump/json_writer.py +117 -116
  33. msprobe/core/data_dump/scope.py +194 -178
  34. msprobe/core/grad_probe/constant.py +74 -70
  35. msprobe/core/grad_probe/grad_compare.py +170 -175
  36. msprobe/core/grad_probe/utils.py +77 -52
  37. msprobe/docs/01.installation.md +99 -0
  38. msprobe/docs/02.config_introduction.md +137 -0
  39. msprobe/docs/03.config_examples.md +237 -0
  40. msprobe/docs/04.acl_config_examples.md +78 -0
  41. msprobe/docs/05.data_dump_PyTorch.md +326 -0
  42. msprobe/docs/06.data_dump_MindSpore.md +285 -0
  43. msprobe/docs/07.accuracy_checker_PyTorch.md +297 -0
  44. msprobe/docs/08.accuracy_checker_online_PyTorch.md +238 -0
  45. msprobe/docs/09.accuracy_checker_MindSpore.md +68 -0
  46. msprobe/docs/10.accuracy_compare_PyTorch.md +327 -0
  47. msprobe/docs/11.accuracy_compare_MindSpore.md +333 -0
  48. msprobe/docs/12.overflow_check_PyTorch.md +79 -0
  49. msprobe/docs/13.overflow_check_MindSpore.md +31 -0
  50. msprobe/{pytorch/doc/parse_tool.md → docs/14.data_parse_PyTorch.md} +283 -286
  51. msprobe/docs/15.free_benchmarking_PyTorch.md +170 -0
  52. msprobe/docs/16.free_benchmarking_MindSpore.md +140 -0
  53. msprobe/{doc/grad_probe/grad_probe.md → docs/17.grad_probe.md} +205 -207
  54. msprobe/{pytorch/doc//321/205/320/254/320/270/321/207/342/225/221/342/224/220/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/206/320/277/320/244/321/205/320/277/342/225/243.md → docs/18.online_dispatch.md} +89 -90
  55. msprobe/docs/FAQ.md +189 -0
  56. msprobe/docs/S02.report_free_benchmarking_validation_performance_baseline.md +146 -0
  57. msprobe/docs/img/free_benchmark_framework.png +0 -0
  58. msprobe/docs/img/ms_dump.png +0 -0
  59. msprobe/docs/img/ms_layer.png +0 -0
  60. msprobe/docs/img/pt_dump.png +0 -0
  61. msprobe/mindspore/__init__.py +2 -1
  62. msprobe/mindspore/api_accuracy_checker/api_accuracy_checker.py +278 -245
  63. msprobe/mindspore/api_accuracy_checker/api_info.py +76 -69
  64. msprobe/mindspore/api_accuracy_checker/api_runner.py +155 -151
  65. msprobe/mindspore/api_accuracy_checker/base_compare_algorithm.py +196 -196
  66. msprobe/mindspore/api_accuracy_checker/cmd_parser.py +6 -0
  67. msprobe/mindspore/api_accuracy_checker/compute_element.py +238 -223
  68. msprobe/mindspore/api_accuracy_checker/main.py +8 -15
  69. msprobe/mindspore/api_accuracy_checker/type_mapping.py +113 -113
  70. msprobe/mindspore/api_accuracy_checker/utils.py +79 -62
  71. msprobe/mindspore/cell_processor.py +58 -34
  72. msprobe/mindspore/common/const.py +108 -87
  73. msprobe/mindspore/common/log.py +37 -37
  74. msprobe/mindspore/common/utils.py +97 -57
  75. msprobe/mindspore/compare/distributed_compare.py +62 -75
  76. msprobe/mindspore/compare/layer_mapping.py +146 -0
  77. msprobe/mindspore/compare/modify_mapping.py +107 -0
  78. msprobe/mindspore/compare/ms_compare.py +357 -117
  79. msprobe/mindspore/compare/ms_graph_compare.py +364 -317
  80. msprobe/mindspore/compare/ms_to_pt_api.yaml +399 -399
  81. msprobe/mindspore/debugger/debugger_config.py +69 -74
  82. msprobe/mindspore/debugger/precision_debugger.py +150 -107
  83. msprobe/mindspore/dump/dump_tool_factory.py +50 -35
  84. msprobe/mindspore/dump/hook_cell/api_registry.py +128 -104
  85. msprobe/mindspore/dump/hook_cell/hook_cell.py +55 -53
  86. msprobe/mindspore/dump/hook_cell/primitive_hooks.py +206 -0
  87. msprobe/mindspore/dump/hook_cell/support_wrap_ops.yaml +994 -925
  88. msprobe/mindspore/dump/hook_cell/wrap_api.py +121 -0
  89. msprobe/mindspore/dump/jit_dump.py +96 -56
  90. msprobe/mindspore/dump/kernel_graph_dump.py +75 -60
  91. msprobe/mindspore/dump/kernel_kbyk_dump.py +79 -65
  92. msprobe/mindspore/free_benchmark/api_pynative_self_check.py +131 -116
  93. msprobe/mindspore/free_benchmark/common/config.py +27 -12
  94. msprobe/mindspore/free_benchmark/common/handler_params.py +32 -17
  95. msprobe/mindspore/free_benchmark/common/utils.py +85 -71
  96. msprobe/mindspore/free_benchmark/data/support_wrap_ops.yaml +842 -842
  97. msprobe/mindspore/free_benchmark/decorator/dec_forward.py +57 -42
  98. msprobe/mindspore/free_benchmark/decorator/decorator_factory.py +122 -107
  99. msprobe/mindspore/free_benchmark/handler/base_handler.py +105 -90
  100. msprobe/mindspore/free_benchmark/handler/check_handler.py +56 -41
  101. msprobe/mindspore/free_benchmark/handler/fix_handler.py +51 -36
  102. msprobe/mindspore/free_benchmark/handler/handler_factory.py +36 -21
  103. msprobe/mindspore/free_benchmark/perturbation/add_noise.py +82 -67
  104. msprobe/mindspore/free_benchmark/perturbation/base_perturbation.py +36 -21
  105. msprobe/mindspore/free_benchmark/perturbation/bit_noise.py +78 -63
  106. msprobe/mindspore/free_benchmark/perturbation/exchange_value.py +77 -0
  107. msprobe/mindspore/free_benchmark/perturbation/improve_precision.py +49 -34
  108. msprobe/mindspore/free_benchmark/perturbation/no_change.py +27 -12
  109. msprobe/mindspore/free_benchmark/perturbation/perturbation_factory.py +44 -27
  110. msprobe/mindspore/free_benchmark/self_check_tool_factory.py +48 -33
  111. msprobe/mindspore/grad_probe/global_context.py +100 -91
  112. msprobe/mindspore/grad_probe/grad_analyzer.py +231 -231
  113. msprobe/mindspore/grad_probe/grad_monitor.py +27 -27
  114. msprobe/mindspore/grad_probe/grad_stat_csv.py +131 -131
  115. msprobe/mindspore/grad_probe/hook.py +94 -92
  116. msprobe/mindspore/grad_probe/utils.py +29 -28
  117. msprobe/mindspore/ms_config.py +128 -126
  118. msprobe/mindspore/overflow_check/kernel_graph_overflow_check.py +60 -45
  119. msprobe/mindspore/overflow_check/overflow_check_tool_factory.py +49 -34
  120. msprobe/mindspore/runtime.py +4 -4
  121. msprobe/mindspore/service.py +297 -354
  122. msprobe/mindspore/task_handler_factory.py +24 -24
  123. msprobe/msprobe.py +105 -107
  124. msprobe/pytorch/__init__.py +23 -4
  125. msprobe/pytorch/api_accuracy_checker/common/config.py +70 -55
  126. msprobe/pytorch/api_accuracy_checker/common/utils.py +246 -165
  127. msprobe/pytorch/api_accuracy_checker/compare/algorithm.py +230 -213
  128. msprobe/pytorch/api_accuracy_checker/compare/api_precision_compare.py +632 -581
  129. msprobe/pytorch/api_accuracy_checker/compare/api_precision_standard.yaml +132 -132
  130. msprobe/pytorch/api_accuracy_checker/compare/api_precision_threshold.yaml +390 -390
  131. msprobe/pytorch/api_accuracy_checker/compare/compare.py +416 -381
  132. msprobe/pytorch/api_accuracy_checker/compare/compare_column.py +90 -73
  133. msprobe/pytorch/api_accuracy_checker/compare/compare_utils.py +265 -244
  134. msprobe/pytorch/api_accuracy_checker/config.yaml +10 -10
  135. msprobe/pytorch/api_accuracy_checker/run_ut/data_generate.py +370 -332
  136. msprobe/pytorch/api_accuracy_checker/run_ut/multi_run_ut.py +221 -199
  137. msprobe/pytorch/api_accuracy_checker/run_ut/run_overflow_check.py +150 -134
  138. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut.py +518 -581
  139. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut_utils.py +213 -74
  140. msprobe/pytorch/api_accuracy_checker/run_ut/torch_ut_setting.json +7 -4
  141. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/attl.py +218 -202
  142. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/client.py +370 -324
  143. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/device_dispatch.py +227 -204
  144. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/dump_dispatch.py +110 -0
  145. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/server.py +244 -218
  146. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/torch_ops_config.yaml +63 -0
  147. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/utils.py +44 -0
  148. msprobe/pytorch/bench_functions/__init__.py +30 -15
  149. msprobe/pytorch/bench_functions/apply_adam_w.py +43 -28
  150. msprobe/pytorch/bench_functions/confusion_transpose.py +34 -19
  151. msprobe/pytorch/bench_functions/fast_gelu.py +70 -55
  152. msprobe/pytorch/bench_functions/layer_norm_eval.py +21 -6
  153. msprobe/pytorch/bench_functions/linear.py +27 -12
  154. msprobe/pytorch/bench_functions/matmul_backward.py +63 -48
  155. msprobe/pytorch/bench_functions/npu_fusion_attention.py +538 -421
  156. msprobe/pytorch/bench_functions/rms_norm.py +30 -15
  157. msprobe/pytorch/bench_functions/rotary_mul.py +71 -52
  158. msprobe/pytorch/bench_functions/scaled_mask_softmax.py +41 -26
  159. msprobe/pytorch/bench_functions/swiglu.py +70 -55
  160. msprobe/pytorch/common/__init__.py +17 -2
  161. msprobe/pytorch/common/compare_script.template +14 -14
  162. msprobe/pytorch/common/log.py +33 -32
  163. msprobe/pytorch/common/parse_json.py +54 -39
  164. msprobe/pytorch/common/utils.py +310 -300
  165. msprobe/pytorch/compare/distributed_compare.py +66 -66
  166. msprobe/pytorch/compare/mapping.yaml +607 -607
  167. msprobe/pytorch/compare/match.py +49 -33
  168. msprobe/pytorch/compare/pt_compare.py +82 -40
  169. msprobe/pytorch/debugger/debugger_config.py +108 -95
  170. msprobe/pytorch/debugger/precision_debugger.py +173 -125
  171. msprobe/pytorch/free_benchmark/__init__.py +23 -8
  172. msprobe/pytorch/free_benchmark/common/constant.py +70 -70
  173. msprobe/pytorch/free_benchmark/common/counter.py +71 -71
  174. msprobe/pytorch/free_benchmark/common/enums.py +65 -37
  175. msprobe/pytorch/free_benchmark/common/params.py +144 -129
  176. msprobe/pytorch/free_benchmark/common/utils.py +118 -102
  177. msprobe/pytorch/free_benchmark/compare/grad_saver.py +200 -179
  178. msprobe/pytorch/free_benchmark/compare/single_benchmark.py +119 -104
  179. msprobe/pytorch/free_benchmark/main.py +120 -105
  180. msprobe/pytorch/free_benchmark/perturbed_layers/base_layer.py +28 -13
  181. msprobe/pytorch/free_benchmark/perturbed_layers/layer_factory.py +56 -41
  182. msprobe/pytorch/free_benchmark/perturbed_layers/npu/add_noise.py +105 -90
  183. msprobe/pytorch/free_benchmark/perturbed_layers/npu/bit_noise.py +119 -104
  184. msprobe/pytorch/free_benchmark/perturbed_layers/npu/change_value.py +87 -63
  185. msprobe/pytorch/free_benchmark/perturbed_layers/npu/improve_precision.py +83 -68
  186. msprobe/pytorch/free_benchmark/perturbed_layers/npu/no_change.py +43 -28
  187. msprobe/pytorch/free_benchmark/perturbed_layers/npu/npu_base_layser.py +60 -45
  188. msprobe/pytorch/free_benchmark/perturbed_layers/run_cpu.py +34 -19
  189. msprobe/pytorch/free_benchmark/result_handlers/base_handler.py +256 -217
  190. msprobe/pytorch/free_benchmark/result_handlers/check_handler.py +54 -39
  191. msprobe/pytorch/free_benchmark/result_handlers/fix_handler.py +38 -23
  192. msprobe/pytorch/free_benchmark/result_handlers/handler_factory.py +45 -30
  193. msprobe/pytorch/free_benchmark/result_handlers/preheat_handler.py +185 -170
  194. msprobe/pytorch/function_factory.py +91 -75
  195. msprobe/pytorch/functional/module_dump.py +84 -0
  196. msprobe/pytorch/grad_probe/grad_monitor.py +91 -90
  197. msprobe/pytorch/grad_probe/grad_stat_csv.py +128 -128
  198. msprobe/pytorch/hook_module/__init__.py +16 -1
  199. msprobe/pytorch/hook_module/api_registry.py +166 -161
  200. msprobe/pytorch/hook_module/hook_module.py +118 -120
  201. msprobe/pytorch/hook_module/support_wrap_ops.yaml +1879 -1877
  202. msprobe/pytorch/hook_module/utils.py +28 -29
  203. msprobe/pytorch/hook_module/wrap_aten.py +111 -110
  204. msprobe/pytorch/hook_module/wrap_distributed.py +77 -78
  205. msprobe/pytorch/hook_module/wrap_functional.py +104 -105
  206. msprobe/pytorch/hook_module/wrap_npu_custom.py +85 -84
  207. msprobe/pytorch/hook_module/wrap_tensor.py +69 -71
  208. msprobe/pytorch/hook_module/wrap_torch.py +84 -86
  209. msprobe/pytorch/hook_module/wrap_vf.py +60 -62
  210. msprobe/pytorch/module_processer.py +153 -138
  211. msprobe/pytorch/online_dispatch/__init__.py +20 -20
  212. msprobe/pytorch/online_dispatch/compare.py +235 -236
  213. msprobe/pytorch/online_dispatch/dispatch.py +271 -271
  214. msprobe/pytorch/online_dispatch/dump_compare.py +155 -156
  215. msprobe/pytorch/online_dispatch/single_compare.py +391 -391
  216. msprobe/pytorch/online_dispatch/torch_ops_config.yaml +57 -49
  217. msprobe/pytorch/online_dispatch/utils.py +127 -146
  218. msprobe/pytorch/parse.py +19 -4
  219. msprobe/pytorch/parse_tool/cli.py +31 -32
  220. msprobe/pytorch/parse_tool/lib/compare.py +259 -271
  221. msprobe/pytorch/parse_tool/lib/config.py +52 -52
  222. msprobe/pytorch/parse_tool/lib/file_desc.py +31 -31
  223. msprobe/pytorch/parse_tool/lib/interactive_cli.py +102 -102
  224. msprobe/pytorch/parse_tool/lib/parse_exception.py +54 -54
  225. msprobe/pytorch/parse_tool/lib/parse_tool.py +161 -158
  226. msprobe/pytorch/parse_tool/lib/utils.py +320 -321
  227. msprobe/pytorch/parse_tool/lib/visualization.py +85 -91
  228. msprobe/pytorch/pt_config.py +317 -187
  229. msprobe/pytorch/service.py +311 -252
  230. mindstudio_probe-1.0.3.dist-info/RECORD +0 -272
  231. msprobe/config/README.md +0 -539
  232. msprobe/mindspore/doc/compare.md +0 -58
  233. msprobe/mindspore/doc/dump.md +0 -217
  234. msprobe/mindspore/dump/hook_cell/wrap_functional.py +0 -91
  235. msprobe/mindspore/dump/hook_cell/wrap_tensor.py +0 -63
  236. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/ssl_config.py +0 -10
  237. msprobe/pytorch/doc/FAQ.md +0 -193
  238. msprobe/pytorch/doc/api_accuracy_checker.md +0 -313
  239. msprobe/pytorch/doc/api_accuracy_checker_online.md +0 -187
  240. msprobe/pytorch/doc/dump.md +0 -260
  241. msprobe/pytorch/doc/msprobe/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/206/320/245/342/226/221/321/206/320/235/320/276dump/321/206/320/260/320/227/321/205/320/227/320/226/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -182
  242. msprobe/pytorch/doc/ptdbg_ascend_compare.md +0 -240
  243. msprobe/pytorch/doc/ptdbg_ascend_overview.md +0 -68
  244. msprobe/pytorch/doc/ptdbg_ascend_quickstart.md +0 -381
  245. msprobe/pytorch/doc/run_overflow_check.md +0 -25
  246. msprobe/pytorch/doc//321/206/320/247/320/260/321/206/320/260/320/227/321/206/320/255/320/226/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/205/320/254/342/225/221/321/206/320/251/320/277/321/211/320/272/320/234/321/210/320/277/320/221/321/205/320/242/320/234/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -151
  247. msprobe/pytorch/functional/data_processor.py +0 -0
  248. msprobe/pytorch/functional/dump_module.py +0 -39
  249. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/top_level.txt +0 -0
  250. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_1.png +0 -0
  251. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_2.png +0 -0
  252. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_3.png +0 -0
  253. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_4.png +0 -0
  254. /msprobe/{pytorch/doc → docs}/img/GPT-3_1.png +0 -0
  255. /msprobe/{pytorch/doc → docs}/img/GPT-3_2.png +0 -0
  256. /msprobe/{pytorch/doc → docs}/img/GPT-3_3.png +0 -0
  257. /msprobe/{pytorch/doc → docs}/img/GPT-3_4.png +0 -0
  258. /msprobe/{pytorch/doc → docs}/img/GPT-3_5.png +0 -0
  259. /msprobe/{pytorch/doc → docs}/img/GPT-3_6.png +0 -0
  260. /msprobe/{pytorch/doc → docs}/img/GPT-3_7.png +0 -0
  261. /msprobe/{pytorch/doc → docs}/img/GPT-3_8.png +0 -0
  262. /msprobe/{pytorch/doc → docs}/img/YOLOV5S_1.png +0 -0
  263. /msprobe/{pytorch/doc → docs}/img/YOLOV5S_2.png +0 -0
  264. /msprobe/{pytorch/doc → docs}/img/accuracy_checking_details.png +0 -0
  265. /msprobe/{pytorch/doc → docs}/img/accuracy_checking_result.png +0 -0
  266. /msprobe/{pytorch/doc → docs}/img/api_precision_compare_details.png +0 -0
  267. /msprobe/{pytorch/doc → docs}/img/api_precision_compare_result.png +0 -0
  268. /msprobe/{pytorch/doc → docs}/img/auto_analyze_log.png +0 -0
  269. /msprobe/{pytorch/doc → docs}/img/compare_result_pkl.png +0 -0
  270. /msprobe/{pytorch/doc → docs}/img/compare_result_pkl_md5.png.png +0 -0
  271. /msprobe/{pytorch/doc → docs}/img/cpu_info.png +0 -0
  272. /msprobe/{config → docs}/img/free_benchmark.png +0 -0
  273. /msprobe/{doc/grad_probe/img/image-1.png → docs/img/grad_probe_image-1.png} +0 -0
  274. /msprobe/{doc/grad_probe/img/image-2.png → docs/img/grad_probe_image-2.png} +0 -0
  275. /msprobe/{doc/grad_probe/img/image-3.png → docs/img/grad_probe_image-3.png} +0 -0
  276. /msprobe/{doc/grad_probe/img/image-4.png → docs/img/grad_probe_image-4.png} +0 -0
  277. /msprobe/{doc/grad_probe/img/image.png → docs/img/grad_probe_image.png} +0 -0
  278. /msprobe/{pytorch/doc → docs}/img/module_compare.png +0 -0
@@ -1,300 +1,310 @@
1
- #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
3
- """
4
- # Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved.
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
- """
17
- import logging
18
- import os
19
- import random
20
- import stat
21
- import csv
22
- import json
23
- import torch
24
- import torch.distributed as dist
25
- import numpy as np
26
- from functools import wraps
27
- from msprobe.core.common.exceptions import DistributedNotInitializedError
28
- from msprobe.core.common.log import logger as common_logger
29
- from msprobe.core.common.utils import check_file_or_directory_path, check_path_before_create, CompareException
30
- from msprobe.core.common.file_check import FileCheckConst, change_mode, FileOpen
31
-
32
-
33
- try:
34
- import torch_npu
35
- except ImportError:
36
- is_gpu = True
37
- else:
38
- is_gpu = False
39
-
40
-
41
- torch_without_guard_version = torch.__version__ >= '2.1'
42
-
43
-
44
- if not is_gpu and not torch_without_guard_version:
45
- from torch_npu.utils.device_guard import torch_device_guard as torch_npu_device_guard
46
-
47
- npu_distributed_api = ['isend', 'irecv']
48
-
49
-
50
- def parameter_adapter(func):
51
-
52
- def handle_masked_select(input_tensor, indices):
53
- masked_select_func = getattr(torch._C._VariableFunctionsClass, "masked_select")
54
- if input_tensor.dtype == torch.bfloat16:
55
- # masked_select在NPU上输入数据dtype类型为bfloat16会报错,提示不支持此类型
56
- return masked_select_func(input_tensor.to(torch.float32), indices).to(torch.bfloat16)
57
- else:
58
- return masked_select_func(input_tensor, indices)
59
-
60
- @wraps(func)
61
- def inner(self, *args, **kwargs):
62
- if self.op_name_ == "__getitem__" and len(args) > 1 and isinstance(args[1], torch.Tensor):
63
- input_tensor = args[0]
64
- indices = args[1]
65
- if indices.dtype == torch.uint8:
66
- indices = indices.bool()
67
- if indices.dtype == torch.bool:
68
- if indices.shape == input_tensor.shape:
69
- return handle_masked_select(input_tensor, indices)
70
- else:
71
- indices = getattr(torch._C._VariableFunctionsClass, "nonzero")(indices, as_tuple=True)
72
- return getattr(torch._C._TensorBase, "__getitem__")(input_tensor, indices)
73
- elif indices.dtype != torch.bool:
74
- if not indices.shape or len(indices.shape) == 1:
75
- return func(self, input_tensor, indices.tolist())
76
- elif len(indices.shape) == 2:
77
- result = [func(self, input_tensor, index) for index in indices.tolist()]
78
- return getattr(torch._C._VariableFunctionsClass, "stack")(result, 0)
79
- else:
80
- res = [input_tensor[tensor_index] for tensor_index in indices]
81
- return getattr(torch._C._VariableFunctionsClass, "stack")(res, 0)
82
- if self.op_name_ == "__eq__" and args[1] is None:
83
- return False
84
- return func(self, *args, **kwargs)
85
- return inner
86
-
87
-
88
- def torch_device_guard(func):
89
- if is_gpu or torch_without_guard_version:
90
- return func
91
- # Parse args/kwargs matched torch.device objects
92
-
93
- @torch_npu_device_guard
94
- def wrapper(*args, **kwargs):
95
- return func(*args, **kwargs)
96
- return wrapper
97
-
98
-
99
- def get_rank_if_initialized():
100
- """
101
- return rank id if it is initialized or raise Exception: DistributedNotInitializedError
102
- """
103
- if torch.distributed.is_initialized():
104
- return torch.distributed.get_rank()
105
- else:
106
- raise DistributedNotInitializedError("torch distributed environment is not initialized")
107
-
108
-
109
- def seed_all(seed=1234, mode=False):
110
- random.seed(seed)
111
- os.environ['PYTHONHASHSEED'] = str(seed)
112
- np.random.seed(seed)
113
- torch.manual_seed(seed)
114
- torch.use_deterministic_algorithms(mode)
115
- if is_gpu:
116
- torch.cuda.manual_seed_all(seed)
117
- torch.cuda.manual_seed(seed)
118
- torch.backends.cudnn.deterministic = True
119
- torch.backends.cudnn.enable = False
120
- torch.backends.cudnn.benchmark = False
121
- else:
122
- torch_npu.npu.manual_seed_all(seed)
123
- torch_npu.npu.manual_seed(seed)
124
-
125
-
126
- class Const:
127
- """
128
- Class for const
129
- """
130
- SEP = "."
131
- MODEL_TYPE = ['.onnx', '.pb', '.om']
132
- DIM_PATTERN = r"^(-?[0-9]+)(,-?[0-9]+)*"
133
- SEMICOLON = ";"
134
- COLON = ":"
135
- EQUAL = "="
136
- COMMA = ","
137
- DOT = "."
138
- DUMP_RATIO_MAX = 100
139
- SUMMERY_DATA_NUMS = 256
140
- FLOAT_EPSILON = np.finfo(float).eps
141
- SUPPORT_DUMP_MODE = ['api', 'acl']
142
- ON = 'ON'
143
- OFF = 'OFF'
144
- KWARGS = 'kwargs'
145
- INPUT = 'input'
146
- OUTPUT = 'output'
147
- BACKWARD = 'backward'
148
- FORWARD = 'forward'
149
- PRE_FORWARD = "pre_forward"
150
- INPUT_ARGS = 'input_args'
151
- INPUT_KWARGS = 'input_kwargs'
152
- GRAD_INPUT = 'grad_input'
153
- GRAD_OUTPUT = 'grad_output'
154
- START = "start"
155
- STOP = "stop"
156
- MAX = 'Max'
157
- MIN = 'Min'
158
-
159
- # dump mode
160
- ALL = "all"
161
- LIST = "list"
162
- RANGE = "range"
163
- STACK = "stack"
164
- ACL = "acl"
165
- API_LIST = "api_list"
166
- API_STACK = "api_stack"
167
- DUMP_MODE = [ALL, LIST, RANGE, STACK, ACL, API_LIST, API_STACK]
168
- AUTO = "auto"
169
- ONLINE_DUMP_MODE = [ALL, LIST, AUTO, OFF]
170
- SUMMARY = "summary"
171
- MD5 = "md5"
172
- SUMMARY_MODE = [ALL, SUMMARY, MD5]
173
-
174
- WRITE_FLAGS = os.O_WRONLY | os.O_CREAT
175
- OVERWRITE_FLAGS = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
176
- WRITE_MODES = stat.S_IWUSR | stat.S_IRUSR
177
-
178
- PKL_SUFFIX = ".pkl"
179
- NUMPY_SUFFIX = ".npy"
180
- ONE_GB = 1 * 1024 * 1024 * 1024
181
- TEN_GB = 10 * 1024 * 1024 * 1024
182
- FILE_PATTERN = r'^[a-zA-Z0-9_./-]+$'
183
- FILE_NAME_LENGTH = 255
184
- DIRECTORY_LENGTH = 4096
185
- DISTRIBUTED_PREFIX_LENGTH = 60
186
- SUMMARY_COLUMN_NUM = 6
187
- STACK_COLUMN_NUM = 2
188
- # env dump path
189
- ASCEND_WORK_PATH = "ASCEND_WORK_PATH"
190
- DUMP_DIR = "dump_data"
191
- DATA = "data"
192
-
193
- ENV_ENABLE = "1"
194
- ENV_DISABLE = "0"
195
-
196
- MAX_SEED_VALUE = 2**32 - 1
197
-
198
- INPLACE_LIST = ["broadcast", "all_reduce", "reduce", "all_gather", "gather", "scatter", "reduce_scatter",
199
- "_reduce_scatter_base", "_all_gather_base", "all_to_all_single"]
200
-
201
- TASK_LIST = ["tensor", "statistics", "overflow_check", "free_benchmark"]
202
- LEVEL_LIST = ["L0", "L1", "L2", "mix"]
203
- STATISTICS = "statistics"
204
- TENSOR = "tensor"
205
- OVERFLOW_CHECK = "overflow_check"
206
- FREE_BENCHMARK = "free_benchmark"
207
-
208
- ATTR_NAME_PREFIX = "wrap_"
209
-
210
- FLOAT_TYPE = [np.half, np.single, float, np.double, np.float64, np.longdouble, np.float32, np.float16]
211
- BOOL_TYPE = [bool, np.uint8]
212
- INT_TYPE = [np.int32, np.int64]
213
- NPU = 'NPU'
214
- DISTRIBUTED = 'Distributed'
215
-
216
- RAISE_PRECISION = {
217
- torch.float16: torch.float32,
218
- torch.bfloat16: torch.float32,
219
- torch.float32: torch.float64
220
- }
221
- CONVERT = {
222
- "int32_to_int64": ["torch.int32", "torch.int64"],
223
- }
224
-
225
- CONVERT_API = {
226
- "int32_to_int64": ["cross_entropy"]
227
- }
228
-
229
-
230
- def get_tensor_rank(in_feat, out_feat):
231
- if dist.is_initialized():
232
- return dist.get_rank()
233
-
234
- def get_tensor_rank_single(x):
235
- if isinstance(x, (list, tuple)):
236
- if len(x) > 0:
237
- return get_tensor_rank_single(x[0])
238
- elif isinstance(x, torch.Tensor):
239
- device = x.device
240
- if device.type != 'cpu':
241
- return device.index
242
- return None
243
-
244
- in_rank = get_tensor_rank_single(in_feat)
245
- out_rank = get_tensor_rank_single(out_feat)
246
- tensor_rank = in_rank if in_rank else out_rank
247
- return tensor_rank
248
-
249
-
250
- def get_rank_id():
251
- if torch.distributed.is_initialized():
252
- return torch.distributed.get_rank()
253
- return 0
254
-
255
-
256
- def print_rank_0(message):
257
- if dist.is_initialized():
258
- if dist.get_rank() == 0:
259
- logger.info(message)
260
- else:
261
- logger.info(message)
262
-
263
-
264
- def load_pt(pt_path, to_cpu=False):
265
- pt_path = os.path.realpath(pt_path)
266
- check_file_or_directory_path(pt_path)
267
- try:
268
- if to_cpu:
269
- pt = torch.load(pt_path, map_location=torch.device("cpu"))
270
- else:
271
- pt = torch.load(pt_path)
272
- except Exception as e:
273
- raise RuntimeError(f"load pt file {pt_path} failed") from e
274
- return pt
275
-
276
-
277
- def save_pt(tensor, filepath):
278
- filepath = os.path.realpath(filepath)
279
- check_path_before_create(filepath)
280
- try:
281
- torch.save(tensor, filepath)
282
- except Exception as e:
283
- common_logger.error("Save pt file failed, please check according possible error causes: "
284
- "1. out of disk space or disk error, "
285
- "2. no permission to write files, etc.")
286
- raise RuntimeError(f"save pt file {filepath} failed") from e
287
- change_mode(filepath, FileCheckConst.DATA_FILE_AUTHORITY)
288
-
289
-
290
- def _create_logger(level=logging.INFO):
291
- logger_ = logging.getLogger()
292
- logger_.setLevel(level)
293
- ch = logging.StreamHandler()
294
- ch.setLevel(level)
295
- logger_.addHandler(ch)
296
- return logger_
297
-
298
-
299
- log_level = logging.DEBUG if os.environ.get("API_ACCURACY_CHECK_LOG_LEVEL") == "1" else logging.INFO
300
- logger = _create_logger(log_level)
1
+ # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import io
17
+ import os
18
+ import random
19
+ import stat
20
+ from functools import wraps
21
+
22
+ import numpy as np
23
+ import torch
24
+ import torch.distributed as dist
25
+ from msprobe.core.common.exceptions import DistributedNotInitializedError
26
+ from msprobe.core.common.file_utils import (FileCheckConst, change_mode,
27
+ check_file_or_directory_path, check_path_before_create)
28
+ from msprobe.core.common.log import logger
29
+ from msprobe.core.common.utils import check_seed_all
30
+ from packaging import version
31
+
32
+ try:
33
+ import torch_npu
34
+ except ImportError:
35
+ is_gpu = True
36
+ else:
37
+ is_gpu = False
38
+
39
+ torch_without_guard_version = torch.__version__ >= '2.1'
40
+
41
+ if not is_gpu and not torch_without_guard_version:
42
+ from torch_npu.utils.device_guard import torch_device_guard as torch_npu_device_guard
43
+
44
+ npu_distributed_api = ['isend', 'irecv']
45
+
46
+
47
+ def parameter_adapter(func):
48
+ def handle_masked_select(input_tensor, indices):
49
+ masked_select_func = getattr(torch._C._VariableFunctionsClass, "masked_select")
50
+ if input_tensor.dtype == torch.bfloat16:
51
+ # masked_select在NPU上输入数据dtype类型为bfloat16会报错,提示不支持此类型
52
+ return masked_select_func(input_tensor.to(torch.float32), indices).to(torch.bfloat16)
53
+ else:
54
+ return masked_select_func(input_tensor, indices)
55
+
56
+ @wraps(func)
57
+ def inner(self, *args, **kwargs):
58
+ if self.op_name_ == "__getitem__" and len(args) > 1 and isinstance(args[1], torch.Tensor):
59
+ input_tensor = args[0]
60
+ indices = args[1]
61
+ if indices.dtype == torch.uint8:
62
+ indices = indices.bool()
63
+ if indices.dtype == torch.bool:
64
+ if indices.shape == input_tensor.shape:
65
+ return handle_masked_select(input_tensor, indices)
66
+ else:
67
+ indices = getattr(torch._C._VariableFunctionsClass, "nonzero")(indices, as_tuple=True)
68
+ return getattr(torch._C._TensorBase, "__getitem__")(input_tensor, indices)
69
+ elif indices.dtype != torch.bool:
70
+ if not indices.shape or len(indices.shape) == 1:
71
+ return func(self, input_tensor, indices.tolist())
72
+ elif len(indices.shape) == 2:
73
+ result = [func(self, input_tensor, index) for index in indices.tolist()]
74
+ return getattr(torch._C._VariableFunctionsClass, "stack")(result, 0)
75
+ else:
76
+ res = [input_tensor[tensor_index] for tensor_index in indices]
77
+ return getattr(torch._C._VariableFunctionsClass, "stack")(res, 0)
78
+ if self.op_name_ == "__eq__" and args[1] is None:
79
+ return False
80
+ return func(self, *args, **kwargs)
81
+
82
+ return inner
83
+
84
+
85
+ def torch_device_guard(func):
86
+ if is_gpu or torch_without_guard_version:
87
+ return func
88
+
89
+ # Parse args/kwargs matched torch.device objects
90
+ @torch_npu_device_guard
91
+ def wrapper(*args, **kwargs):
92
+ return func(*args, **kwargs)
93
+
94
+ return wrapper
95
+
96
+
97
+ def get_rank_if_initialized():
98
+ """
99
+ return rank id if it is initialized or raise Exception: DistributedNotInitializedError
100
+ """
101
+ if torch.distributed.is_initialized():
102
+ return torch.distributed.get_rank()
103
+ else:
104
+ raise DistributedNotInitializedError("torch distributed environment is not initialized")
105
+
106
+
107
+ def seed_all(seed=1234, mode=False):
108
+ check_seed_all(seed, mode)
109
+ try:
110
+ random.seed(seed)
111
+ os.environ['PYTHONHASHSEED'] = str(seed)
112
+ np.random.seed(seed)
113
+ torch.manual_seed(seed)
114
+ cuda_version = torch.version.cuda
115
+ if cuda_version is not None and version.parse(cuda_version) >= version.parse("10.2"):
116
+ os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
117
+ os.environ['HCCL_DETERMINISTIC'] = str(mode)
118
+ torch.use_deterministic_algorithms(mode)
119
+ if is_gpu:
120
+ torch.cuda.manual_seed_all(seed)
121
+ torch.cuda.manual_seed(seed)
122
+ torch.backends.cudnn.deterministic = True
123
+ torch.backends.cudnn.enable = False
124
+ torch.backends.cudnn.benchmark = False
125
+ else:
126
+ torch_npu.npu.manual_seed_all(seed)
127
+ torch_npu.npu.manual_seed(seed)
128
+ except Exception as e:
129
+ logger.error(f"There is an unexpected error while determinating randomness. {e}")
130
+
131
+
132
+ class Const:
133
+ """
134
+ Class for const
135
+ """
136
+ SEP = "."
137
+ MODEL_TYPE = ['.onnx', '.pb', '.om']
138
+ DIM_PATTERN = r"^(-?[0-9]+)(,-?[0-9]+)*"
139
+ SEMICOLON = ";"
140
+ COLON = ":"
141
+ EQUAL = "="
142
+ COMMA = ","
143
+ DOT = "."
144
+ DUMP_RATIO_MAX = 100
145
+ SUMMERY_DATA_NUMS = 256
146
+ FLOAT_EPSILON = np.finfo(float).eps
147
+ SUPPORT_DUMP_MODE = ['api', 'acl']
148
+ ON = 'ON'
149
+ OFF = 'OFF'
150
+ KWARGS = 'kwargs'
151
+ INPUT = 'input'
152
+ OUTPUT = 'output'
153
+ BACKWARD = 'backward'
154
+ FORWARD = 'forward'
155
+ PRE_FORWARD = "pre_forward"
156
+ INPUT_ARGS = 'input_args'
157
+ INPUT_KWARGS = 'input_kwargs'
158
+ GRAD_INPUT = 'grad_input'
159
+ GRAD_OUTPUT = 'grad_output'
160
+ START = "start"
161
+ STOP = "stop"
162
+ MAX = 'Max'
163
+ MIN = 'Min'
164
+
165
+ # dump mode
166
+ ALL = "all"
167
+ LIST = "list"
168
+ RANGE = "range"
169
+ STACK = "stack"
170
+ ACL = "acl"
171
+ API_LIST = "api_list"
172
+ API_STACK = "api_stack"
173
+ DUMP_MODE = [ALL, LIST, RANGE, STACK, ACL, API_LIST, API_STACK]
174
+ AUTO = "auto"
175
+ ONLINE_DUMP_MODE = [ALL, LIST, AUTO, OFF]
176
+ SUMMARY = "summary"
177
+ MD5 = "md5"
178
+ SUMMARY_MODE = [ALL, SUMMARY, MD5]
179
+
180
+ WRITE_FLAGS = os.O_WRONLY | os.O_CREAT
181
+ OVERWRITE_FLAGS = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
182
+ WRITE_MODES = stat.S_IWUSR | stat.S_IRUSR
183
+
184
+ PKL_SUFFIX = ".pkl"
185
+ NUMPY_SUFFIX = ".npy"
186
+ ONE_GB = 1 * 1024 * 1024 * 1024
187
+ TEN_GB = 10 * 1024 * 1024 * 1024
188
+ FILE_PATTERN = r'^[a-zA-Z0-9_./-]+$'
189
+ FILE_NAME_LENGTH = 255
190
+ DIRECTORY_LENGTH = 4096
191
+ DISTRIBUTED_PREFIX_LENGTH = 60
192
+ SUMMARY_COLUMN_NUM = 6
193
+ STACK_COLUMN_NUM = 2
194
+ # env dump path
195
+ ASCEND_WORK_PATH = "ASCEND_WORK_PATH"
196
+ DUMP_DIR = "dump_data"
197
+ DATA = "data"
198
+
199
+ ENV_ENABLE = "1"
200
+ ENV_DISABLE = "0"
201
+
202
+ MAX_SEED_VALUE = 2 ** 32 - 1
203
+
204
+ TASK_LIST = ["tensor", "statistics", "overflow_check", "free_benchmark"]
205
+ LEVEL_LIST = ["L0", "L1", "L2", "mix"]
206
+ STATISTICS = "statistics"
207
+ TENSOR = "tensor"
208
+ OVERFLOW_CHECK = "overflow_check"
209
+ FREE_BENCHMARK = "free_benchmark"
210
+
211
+ ATTR_NAME_PREFIX = "wrap_"
212
+
213
+ FLOAT_TYPE = [np.half, np.single, float, np.double, np.float64, np.longdouble, np.float32, np.float16]
214
+ BOOL_TYPE = [bool, np.uint8]
215
+ INT_TYPE = [np.int32, np.int64]
216
+ NPU = 'NPU'
217
+ DISTRIBUTED = 'Distributed'
218
+
219
+ RAISE_PRECISION = {
220
+ torch.float16: torch.float32,
221
+ torch.bfloat16: torch.float32,
222
+ torch.float32: torch.float64
223
+ }
224
+ CONVERT = {
225
+ "int32_to_int64": ["torch.int32", "torch.int64"],
226
+ }
227
+
228
+ CONVERT_API = {
229
+ "int32_to_int64": ["cross_entropy"]
230
+ }
231
+
232
+
233
+ def get_tensor_rank(in_feat, out_feat):
234
+ if dist.is_initialized():
235
+ return dist.get_rank()
236
+
237
+ def get_tensor_rank_single(x):
238
+ if isinstance(x, (list, tuple)):
239
+ if len(x) > 0:
240
+ return get_tensor_rank_single(x[0])
241
+ elif isinstance(x, torch.Tensor):
242
+ device = x.device
243
+ if device.type != 'cpu':
244
+ return device.index
245
+ return None
246
+
247
+ in_rank = get_tensor_rank_single(in_feat)
248
+ out_rank = get_tensor_rank_single(out_feat)
249
+ tensor_rank = in_rank if in_rank else out_rank
250
+ return tensor_rank
251
+
252
+
253
+ def get_rank_id():
254
+ if torch.distributed.is_initialized():
255
+ return torch.distributed.get_rank()
256
+ return 0
257
+
258
+
259
+ def print_rank_0(message):
260
+ if dist.is_initialized():
261
+ if dist.get_rank() == 0:
262
+ logger.info(message)
263
+ else:
264
+ logger.info(message)
265
+
266
+
267
+ def load_pt(pt_path, to_cpu=False):
268
+ pt_path = os.path.realpath(pt_path)
269
+ check_file_or_directory_path(pt_path)
270
+ try:
271
+ if to_cpu:
272
+ pt = torch.load(pt_path, map_location=torch.device("cpu"))
273
+ else:
274
+ pt = torch.load(pt_path)
275
+ except Exception as e:
276
+ raise RuntimeError(f"load pt file {pt_path} failed") from e
277
+ return pt
278
+
279
+
280
+ def save_pt(tensor, filepath):
281
+ filepath = os.path.realpath(filepath)
282
+ check_path_before_create(filepath)
283
+ try:
284
+ torch.save(tensor, filepath)
285
+ except Exception as e:
286
+ logger.error("Save pt file failed, please check according possible error causes: "
287
+ "1. out of disk space or disk error, "
288
+ "2. no permission to write files, etc.")
289
+ raise RuntimeError(f"save pt file {filepath} failed") from e
290
+ change_mode(filepath, FileCheckConst.DATA_FILE_AUTHORITY)
291
+
292
+
293
+ def save_api_data(api_data):
294
+ """Save data to io stream"""
295
+ try:
296
+ io_buff = io.BytesIO()
297
+ torch.save(api_data, io_buff)
298
+ except Exception as e:
299
+ raise RuntimeError(f"save api_data to io_buff failed") from e
300
+ return io_buff
301
+
302
+
303
+ def load_api_data(api_data_bytes):
304
+ """Load data from bytes stream"""
305
+ try:
306
+ buffer = io.BytesIO(api_data_bytes)
307
+ buffer = torch.load(buffer, map_location="cpu")
308
+ except Exception as e:
309
+ raise RuntimeError(f"load api_data from bytes failed") from e
310
+ return buffer