mindstudio-probe 1.1.1__py3-none-any.whl → 1.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (226) hide show
  1. {mindstudio_probe-1.1.1.dist-info → mindstudio_probe-1.2.2.dist-info}/METADATA +3 -2
  2. mindstudio_probe-1.2.2.dist-info/RECORD +415 -0
  3. msprobe/CMakeLists.txt +5 -0
  4. msprobe/README.md +16 -21
  5. msprobe/config.json +1 -0
  6. msprobe/core/common/const.py +185 -11
  7. msprobe/core/common/exceptions.py +3 -1
  8. msprobe/core/common/file_utils.py +33 -7
  9. msprobe/core/common/inplace_ops.yaml +4 -0
  10. msprobe/core/common/utils.py +42 -14
  11. msprobe/core/common_config.py +6 -0
  12. msprobe/core/compare/acc_compare.py +139 -128
  13. msprobe/core/compare/check.py +31 -29
  14. msprobe/core/compare/compare_cli.py +17 -16
  15. msprobe/core/compare/highlight.py +186 -99
  16. msprobe/core/compare/layer_mapping/data_scope_parser.py +19 -8
  17. msprobe/core/compare/layer_mapping/layer_mapping.py +21 -14
  18. msprobe/core/compare/layer_mapping/postprocess_pass.py +4 -3
  19. msprobe/core/compare/merge_result/merge_result.py +381 -0
  20. msprobe/core/compare/merge_result/merge_result_cli.py +31 -0
  21. msprobe/core/compare/merge_result/utils.py +81 -0
  22. msprobe/core/compare/multiprocessing_compute.py +2 -2
  23. msprobe/core/compare/npy_compare.py +109 -147
  24. msprobe/core/compare/utils.py +199 -69
  25. msprobe/core/data_dump/data_collector.py +100 -25
  26. msprobe/core/data_dump/data_processor/base.py +130 -28
  27. msprobe/core/data_dump/data_processor/factory.py +8 -3
  28. msprobe/core/data_dump/data_processor/mindspore_processor.py +170 -23
  29. msprobe/core/data_dump/data_processor/pytorch_processor.py +175 -64
  30. msprobe/core/data_dump/json_writer.py +54 -8
  31. msprobe/core/data_dump/scope.py +19 -18
  32. msprobe/core/overflow_check/abnormal_scene.py +9 -5
  33. msprobe/core/overflow_check/checker.py +1 -1
  34. msprobe/core/overflow_check/utils.py +1 -1
  35. msprobe/docs/01.installation.md +121 -17
  36. msprobe/docs/02.config_introduction.md +18 -16
  37. msprobe/docs/03.config_examples.md +24 -0
  38. msprobe/docs/05.data_dump_PyTorch.md +107 -58
  39. msprobe/docs/06.data_dump_MindSpore.md +95 -34
  40. msprobe/docs/07.accuracy_checker_PyTorch.md +18 -18
  41. msprobe/docs/09.accuracy_checker_MindSpore.md +8 -6
  42. msprobe/docs/10.accuracy_compare_PyTorch.md +99 -41
  43. msprobe/docs/11.accuracy_compare_MindSpore.md +249 -48
  44. msprobe/docs/12.overflow_check_PyTorch.md +1 -1
  45. msprobe/docs/19.monitor.md +310 -220
  46. msprobe/docs/21.visualization_PyTorch.md +125 -35
  47. msprobe/docs/22.visualization_MindSpore.md +149 -41
  48. msprobe/docs/23.generate_operator_PyTorch.md +107 -0
  49. msprobe/docs/24.code_mapping_Mindspore.md +28 -0
  50. msprobe/docs/{23.tool_function_introduction.md → 25.tool_function_introduction.md} +1 -0
  51. msprobe/docs/26.data_dump_PyTorch_baseline.md +37 -0
  52. msprobe/docs/27.dump_json_instruction.md +525 -0
  53. msprobe/docs/28.debugger_save_instruction.md +94 -0
  54. msprobe/docs/28.kernel_dump_MindSpore.md +69 -0
  55. msprobe/docs/FAQ.md +26 -2
  56. msprobe/docs/accuracy_checker_MindSpore/accuracy_checker_MindSpore_baseline.md +14 -0
  57. msprobe/docs/data_dump_MindSpore/data_dump_MindSpore_baseline.md +22 -0
  58. msprobe/docs/img/merge_result.png +0 -0
  59. msprobe/docs/img/monitor/step_count_per_record.png +0 -0
  60. msprobe/docs/img/visualization/fuzzy_match_ms.png +0 -0
  61. msprobe/docs/img/visualization/fuzzy_match_pt.png +0 -0
  62. msprobe/docs/img/visualization/tensorboard_1.png +0 -0
  63. msprobe/docs/img/visualization/tensorboard_2.png +0 -0
  64. msprobe/docs/img/visualization/vis_browser_1.png +0 -0
  65. msprobe/docs/img/visualization/vis_browser_2.png +0 -0
  66. msprobe/docs/img/visualization/vis_precision_info.png +0 -0
  67. msprobe/docs/img/visualization/vis_search_info.png +0 -0
  68. msprobe/docs/img/visualization/vis_show_info.png +0 -0
  69. msprobe/docs/img/visualization/vis_showcase.png +0 -0
  70. msprobe/docs/img/visualization/vis_unmatch_info.png +0 -0
  71. msprobe/docs/visualization/GPTModel.png +0 -0
  72. msprobe/docs/visualization/ParallelMLP.png +0 -0
  73. msprobe/docs/visualization/layer_mapping_example.md +132 -0
  74. msprobe/docs/visualization/mapping.png +0 -0
  75. msprobe/docs/visualization/mapping1.png +0 -0
  76. msprobe/docs/visualization/module_name.png +0 -0
  77. msprobe/docs/visualization/module_name1.png +0 -0
  78. msprobe/docs/visualization/no_mapping.png +0 -0
  79. msprobe/docs/visualization/no_mapping1.png +0 -0
  80. msprobe/docs/visualization/no_mapping_analyze.png +0 -0
  81. msprobe/docs/visualization/top_layer.png +0 -0
  82. msprobe/mindspore/__init__.py +11 -0
  83. msprobe/mindspore/api_accuracy_checker/api_accuracy_checker.py +80 -28
  84. msprobe/mindspore/api_accuracy_checker/api_runner.py +54 -16
  85. msprobe/mindspore/api_accuracy_checker/cmd_parser.py +2 -1
  86. msprobe/mindspore/api_accuracy_checker/compute_element.py +52 -8
  87. msprobe/mindspore/api_accuracy_checker/data_manager.py +37 -0
  88. msprobe/mindspore/api_accuracy_checker/main.py +1 -0
  89. msprobe/mindspore/api_accuracy_checker/multi_api_accuracy_checker.py +12 -6
  90. msprobe/mindspore/api_accuracy_checker/multi_data_manager.py +3 -1
  91. msprobe/mindspore/api_accuracy_checker/torch_mindtorch_importer.py +129 -0
  92. msprobe/mindspore/api_accuracy_checker/type_mapping.py +24 -1
  93. msprobe/mindspore/api_accuracy_checker/utils.py +6 -1
  94. msprobe/mindspore/code_mapping/bind.py +264 -0
  95. msprobe/mindspore/code_mapping/cmd_parser.py +40 -0
  96. msprobe/mindspore/code_mapping/graph.py +49 -0
  97. msprobe/mindspore/code_mapping/graph_parser.py +226 -0
  98. msprobe/mindspore/code_mapping/main.py +24 -0
  99. msprobe/mindspore/code_mapping/processor.py +34 -0
  100. msprobe/mindspore/common/const.py +3 -1
  101. msprobe/mindspore/common/utils.py +68 -5
  102. msprobe/mindspore/compare/distributed_compare.py +0 -2
  103. msprobe/mindspore/compare/ms_compare.py +105 -63
  104. msprobe/mindspore/compare/ms_graph_compare.py +14 -5
  105. msprobe/mindspore/debugger/debugger_config.py +28 -2
  106. msprobe/mindspore/debugger/precision_debugger.py +100 -12
  107. msprobe/mindspore/dump/hook_cell/api_registry.py +85 -16
  108. msprobe/mindspore/dump/hook_cell/hook_cell.py +60 -38
  109. msprobe/mindspore/dump/hook_cell/primitive_hooks.py +33 -15
  110. msprobe/mindspore/dump/hook_cell/support_wrap_ops.yaml +11 -1
  111. msprobe/mindspore/dump/hook_cell/wrap_api.py +92 -1
  112. msprobe/mindspore/dump/jit_dump.py +7 -6
  113. msprobe/mindspore/dump/kernel_dump/kernel_config.py +33 -0
  114. msprobe/mindspore/dump/kernel_graph_dump.py +7 -0
  115. msprobe/mindspore/free_benchmark/api_pynative_self_check.py +13 -4
  116. msprobe/mindspore/free_benchmark/perturbation/bit_noise.py +2 -2
  117. msprobe/mindspore/grad_probe/grad_analyzer.py +24 -12
  118. msprobe/mindspore/grad_probe/hook.py +13 -4
  119. msprobe/mindspore/mindtorch/__init__.py +18 -0
  120. msprobe/mindspore/mindtorch/mindtorch_adaptor.py +255 -0
  121. msprobe/mindspore/monitor/anomaly_detect.py +404 -0
  122. msprobe/mindspore/monitor/distributed/__init__.py +0 -0
  123. msprobe/mindspore/monitor/distributed/distributed_ops.yaml +15 -0
  124. msprobe/mindspore/monitor/distributed/stack_blacklist.yaml +5 -0
  125. msprobe/mindspore/monitor/distributed/wrap_distributed.py +300 -0
  126. msprobe/mindspore/monitor/features.py +63 -0
  127. msprobe/mindspore/monitor/module_hook.py +821 -0
  128. msprobe/mindspore/monitor/module_spec_verifier.py +94 -0
  129. msprobe/mindspore/monitor/utils.py +267 -0
  130. msprobe/mindspore/ms_config.py +13 -3
  131. msprobe/mindspore/overflow_check/kernel_graph_overflow_check.py +7 -0
  132. msprobe/mindspore/service.py +347 -107
  133. msprobe/msprobe.py +24 -3
  134. msprobe/pytorch/__init__.py +7 -7
  135. msprobe/pytorch/api_accuracy_checker/common/utils.py +31 -16
  136. msprobe/pytorch/api_accuracy_checker/compare/algorithm.py +41 -8
  137. msprobe/pytorch/api_accuracy_checker/compare/api_precision_compare.py +100 -267
  138. msprobe/pytorch/api_accuracy_checker/compare/api_precision_standard.yaml +4 -1
  139. msprobe/pytorch/api_accuracy_checker/compare/compare.py +69 -68
  140. msprobe/pytorch/api_accuracy_checker/compare/compare_column.py +54 -0
  141. msprobe/pytorch/api_accuracy_checker/compare/compare_input.py +51 -0
  142. msprobe/pytorch/api_accuracy_checker/compare/compare_utils.py +2 -4
  143. msprobe/pytorch/api_accuracy_checker/generate_op_script/op_generator.py +55 -31
  144. msprobe/pytorch/api_accuracy_checker/precision_standard/absolute_threshold.py +106 -0
  145. msprobe/pytorch/api_accuracy_checker/precision_standard/accumulative_error_compare.py +107 -0
  146. msprobe/pytorch/api_accuracy_checker/precision_standard/base_standard.py +151 -0
  147. msprobe/pytorch/api_accuracy_checker/precision_standard/benchmark_compare.py +226 -0
  148. msprobe/pytorch/api_accuracy_checker/precision_standard/binary_consistency.py +68 -0
  149. msprobe/pytorch/api_accuracy_checker/precision_standard/standard_config.py +218 -0
  150. msprobe/pytorch/api_accuracy_checker/precision_standard/standard_register.py +104 -0
  151. msprobe/pytorch/api_accuracy_checker/precision_standard/thousandth_standard.py +63 -0
  152. msprobe/pytorch/api_accuracy_checker/precision_standard/ulp_compare.py +200 -0
  153. msprobe/pytorch/api_accuracy_checker/run_ut/data_generate.py +57 -1
  154. msprobe/pytorch/api_accuracy_checker/run_ut/multi_run_ut.py +2 -1
  155. msprobe/pytorch/api_accuracy_checker/run_ut/run_overflow_check.py +42 -14
  156. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut.py +64 -19
  157. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut_utils.py +34 -4
  158. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/device_dispatch.py +5 -3
  159. msprobe/pytorch/bench_functions/apply_adam.py +215 -0
  160. msprobe/pytorch/bench_functions/group_norm_silu.py +27 -0
  161. msprobe/pytorch/bench_functions/mish.py +21 -0
  162. msprobe/pytorch/bench_functions/moe_gating_top_k_softmax.py +44 -0
  163. msprobe/pytorch/bench_functions/npu_fusion_attention.py +42 -10
  164. msprobe/pytorch/bench_functions/sort_v2.py +21 -0
  165. msprobe/pytorch/common/parse_json.py +2 -1
  166. msprobe/pytorch/common/utils.py +116 -2
  167. msprobe/pytorch/compare/distributed_compare.py +17 -29
  168. msprobe/pytorch/compare/pt_compare.py +40 -20
  169. msprobe/pytorch/debugger/debugger_config.py +42 -17
  170. msprobe/pytorch/debugger/precision_debugger.py +56 -12
  171. msprobe/pytorch/dump/module_dump/__init__.py +0 -0
  172. msprobe/pytorch/dump/module_dump/module_dump.py +86 -0
  173. msprobe/pytorch/dump/module_dump/module_processer.py +204 -0
  174. msprobe/pytorch/free_benchmark/common/params.py +2 -1
  175. msprobe/pytorch/free_benchmark/common/utils.py +3 -0
  176. msprobe/pytorch/free_benchmark/compare/grad_saver.py +0 -2
  177. msprobe/pytorch/free_benchmark/result_handlers/base_handler.py +31 -47
  178. msprobe/pytorch/free_benchmark/result_handlers/preheat_handler.py +0 -4
  179. msprobe/pytorch/function_factory.py +7 -1
  180. msprobe/pytorch/hook_module/__init__.py +1 -1
  181. msprobe/pytorch/hook_module/hook_module.py +14 -11
  182. msprobe/pytorch/hook_module/register_optimizer_hook.py +59 -0
  183. msprobe/pytorch/hook_module/support_wrap_ops.yaml +36 -1
  184. msprobe/pytorch/hook_module/wrap_distributed.py +10 -8
  185. msprobe/pytorch/hook_module/wrap_functional.py +0 -40
  186. msprobe/pytorch/monitor/anomaly_analyse.py +1 -1
  187. msprobe/pytorch/monitor/anomaly_detect.py +98 -28
  188. msprobe/pytorch/monitor/csv2tb.py +164 -0
  189. msprobe/pytorch/monitor/distributed/wrap_distributed.py +25 -14
  190. msprobe/pytorch/monitor/features.py +3 -3
  191. msprobe/pytorch/monitor/module_hook.py +543 -318
  192. msprobe/pytorch/monitor/module_metric.py +27 -48
  193. msprobe/pytorch/monitor/module_spec_verifier.py +3 -1
  194. msprobe/pytorch/monitor/optimizer_collect.py +76 -56
  195. msprobe/pytorch/monitor/unittest/test_monitor.py +24 -9
  196. msprobe/pytorch/monitor/utils.py +84 -48
  197. msprobe/pytorch/online_dispatch/dispatch.py +8 -2
  198. msprobe/pytorch/parse_tool/lib/compare.py +10 -10
  199. msprobe/pytorch/parse_tool/lib/config.py +5 -7
  200. msprobe/pytorch/parse_tool/lib/file_desc.py +15 -1
  201. msprobe/pytorch/parse_tool/lib/interactive_cli.py +10 -10
  202. msprobe/pytorch/parse_tool/lib/parse_exception.py +7 -7
  203. msprobe/pytorch/parse_tool/lib/parse_tool.py +11 -10
  204. msprobe/pytorch/parse_tool/lib/utils.py +18 -19
  205. msprobe/pytorch/parse_tool/lib/visualization.py +9 -10
  206. msprobe/pytorch/pt_config.py +19 -22
  207. msprobe/pytorch/service.py +264 -115
  208. msprobe/visualization/builder/graph_builder.py +93 -10
  209. msprobe/visualization/builder/msprobe_adapter.py +30 -6
  210. msprobe/visualization/compare/graph_comparator.py +64 -14
  211. msprobe/visualization/compare/mode_adapter.py +1 -15
  212. msprobe/visualization/graph/base_node.py +15 -19
  213. msprobe/visualization/graph/distributed_analyzer.py +395 -0
  214. msprobe/visualization/graph/graph.py +9 -0
  215. msprobe/visualization/graph/node_op.py +4 -2
  216. msprobe/visualization/graph_service.py +100 -27
  217. msprobe/visualization/utils.py +24 -31
  218. mindstudio_probe-1.1.1.dist-info/RECORD +0 -341
  219. msprobe/pytorch/functional/module_dump.py +0 -84
  220. msprobe/pytorch/module_processer.py +0 -150
  221. {mindstudio_probe-1.1.1.dist-info → mindstudio_probe-1.2.2.dist-info}/LICENSE +0 -0
  222. {mindstudio_probe-1.1.1.dist-info → mindstudio_probe-1.2.2.dist-info}/WHEEL +0 -0
  223. {mindstudio_probe-1.1.1.dist-info → mindstudio_probe-1.2.2.dist-info}/entry_points.txt +0 -0
  224. {mindstudio_probe-1.1.1.dist-info → mindstudio_probe-1.2.2.dist-info}/top_level.txt +0 -0
  225. /msprobe/docs/{data_dump_Mindspore → data_dump_MindSpore}/dynamic_graph_quick_start_example.md +0 -0
  226. /msprobe/{pytorch/functional → mindspore/code_mapping}/__init__.py +0 -0
@@ -0,0 +1,37 @@
1
+ # PyTorch 场景的精度数据采集基线
2
+
3
+ ## "tensor"模式采集数据量参考基线
4
+
5
+ 该基线为pytorch框架下,使用"tensor"模式采集数据量参考基线。本基线测试了两个模型,分别为LLAMA2-7B和LLAMA2-13B,测试了不同采集模式下,不同global_batch_size下,单卡和8卡下,数据量的变化。
6
+
7
+ ### LLAMA2-7B
8
+
9
+ <table>
10
+ <tr><th>采集模式</th><th>global_batch_size</th><th>单卡</th><th>8卡</th></tr>
11
+ </td><td rowspan="3">L0</td><td>1</td><td>7.8GB</td><td>63GB</td></tr>
12
+ <tr><td>2</td><td>16GB</td><td>125GB</td></tr>
13
+ <tr><td>3</td><td>24GB</td><td>187GB</td></tr>
14
+ </td><td rowspan="3">L1</td><td>1</td><td>300.8GB</td><td>2.3TB</td></tr>
15
+ <tr><td>2</td><td>480GB</td><td>3.6TB</td></tr>
16
+ <tr><td>3</td><td>640GB</td><td>4.9TB</td></tr>
17
+ </td><td rowspan="3">mix</td><td>1</td><td>313.6GB</td><td>2.4TB</td></tr>
18
+ <tr><td>2</td><td>512GB</td><td>3.8TB</td></tr>
19
+ <tr><td>3</td><td>672GB</td><td>5.1TB</td></tr>
20
+
21
+ </table>
22
+
23
+ ### LLAMA2-13B
24
+
25
+ <table>
26
+ <tr><th>采集模式</th><th>global_batch_size</th><th>单卡</th><th>8卡</th></tr>
27
+ </td><td rowspan="3">L0</td><td>1</td><td>13GB</td><td>97GB</td></tr>
28
+ <tr><td>2</td><td>25B</td><td>194GB</td></tr>
29
+ <tr><td>3</td><td>37G</td><td>291GB</td></tr>
30
+ </td><td rowspan="3">L1</td><td>1</td><td>440GB</td><td>3.4TB</td></tr>
31
+ <tr><td>2</td><td>720GB</td><td>5.4TB</td></tr>
32
+ <tr><td>3</td><td>960GB</td><td>7.3TB</td></tr>
33
+ </td><td rowspan="3">mix</td><td>1</td><td>480GB</td><td>3.6TB</td></tr>
34
+ <tr><td>2</td><td>720GB</td><td>5.6TB</td></tr>
35
+ <tr><td>3</td><td>1000GB</td><td>7.7TB</td></tr>
36
+
37
+ </table>
@@ -0,0 +1,525 @@
1
+ # dump.json文件说明及示例
2
+
3
+ ## 1. dump.json文件示例(PyTorch)
4
+
5
+ ### 1.1 L0级别
6
+ L0级别的dump.json文件包括模块的前反向的输入输出,以及模块的参数和参数梯度。以PyTorch的Conv2d模块为例,网络中模块调用代码为:
7
+ `output = self.conv2(input) # self.conv2 = torch.nn.Conv2d(64, 128, 5, padding=2, bias=True)`
8
+
9
+ dump.json文件中包含以下数据名称:
10
+
11
+ - `Module.conv2.Conv2d.forward.0`:模块的前向数据,其中input_args为模块的输入数据(位置参数),input_kwargs为模块的输入数据(关键字参数),output为模块的输出数据,parameters为模块的参数数据,包括权重(weight)和偏置(bias)。
12
+ - `Module.conv2.Conv2d.parameters_grad`:模块的参数梯度数据,包括权重(weight)和偏置(bias)的梯度。
13
+ - `Module.conv2.Conv2d.backward.0`:模块的反向数据,其中input为模块反向的输入梯度(对应前向输出的梯度),output为模块的反向输出梯度(对应前向输入的梯度)。
14
+
15
+ **说明**:当dump时传入的model参数为List[torch.nn.Module]或Tuple[torch.nn.Module]时,模块级数据的命名中包含该模块在列表中的索引index,命名格式为`{Module}.{index}.*`,*表示以上三种模块级数据的命名格式,例如:`Module.0.conv1.Conv2d.forward.0`。
16
+
17
+ ```json
18
+ {
19
+ "task": "tensor",
20
+ "level": "L0",
21
+ "framework": "pytorch",
22
+ "dump_data_dir": "/dump/path",
23
+ "data": {
24
+ "Module.conv2.Conv2d.forward.0": {
25
+ "input_args": [
26
+ {
27
+ "type": "torch.Tensor",
28
+ "dtype": "torch.float32",
29
+ "shape": [
30
+ 8,
31
+ 16,
32
+ 14,
33
+ 14
34
+ ],
35
+ "Max": 1.638758659362793,
36
+ "Min": 0.0,
37
+ "Mean": 0.2544615864753723,
38
+ "Norm": 70.50277709960938,
39
+ "requires_grad": true,
40
+ "data_name": "Module.conv2.Conv2d.forward.0.input.0.pt"
41
+ }
42
+ ],
43
+ "input_kwargs": {},
44
+ "output": [
45
+ {
46
+ "type": "torch.Tensor",
47
+ "dtype": "torch.float32",
48
+ "shape": [
49
+ 8,
50
+ 32,
51
+ 10,
52
+ 10
53
+ ],
54
+ "Max": 1.6815717220306396,
55
+ "Min": -1.5120246410369873,
56
+ "Mean": -0.025344856083393097,
57
+ "Norm": 149.65576171875,
58
+ "requires_grad": true,
59
+ "data_name": "Module.conv2.Conv2d.forward.0.output.0.pt"
60
+ }
61
+ ],
62
+ "parameters": {
63
+ "weight": {
64
+ "type": "torch.Tensor",
65
+ "dtype": "torch.float32",
66
+ "shape": [
67
+ 32,
68
+ 16,
69
+ 5,
70
+ 5
71
+ ],
72
+ "Max": 0.05992485210299492,
73
+ "Min": -0.05999220535159111,
74
+ "Mean": -0.0006165213999338448,
75
+ "Norm": 3.421217441558838,
76
+ "requires_grad": true,
77
+ "data_name": "Module.conv2.Conv2d.forward.0.parameters.weight.pt"
78
+ },
79
+ "bias": {
80
+ "type": "torch.Tensor",
81
+ "dtype": "torch.float32",
82
+ "shape": [
83
+ 32
84
+ ],
85
+ "Max": 0.05744686722755432,
86
+ "Min": -0.04894155263900757,
87
+ "Mean": 0.006410328671336174,
88
+ "Norm": 0.17263513803482056,
89
+ "requires_grad": true,
90
+ "data_name": "Module.conv2.Conv2d.forward.0.parameters.bias.pt"
91
+ }
92
+ }
93
+ },
94
+ "Module.conv2.Conv2d.parameters_grad": {
95
+ "weight": [
96
+ {
97
+ "type": "torch.Tensor",
98
+ "dtype": "torch.float32",
99
+ "shape": [
100
+ 32,
101
+ 16,
102
+ 5,
103
+ 5
104
+ ],
105
+ "Max": 0.018550323322415352,
106
+ "Min": -0.008627401664853096,
107
+ "Mean": 0.0006675920449197292,
108
+ "Norm": 0.26084786653518677,
109
+ "requires_grad": false,
110
+ "data_name": "Module.conv2.Conv2d.parameters_grad.weight.pt"
111
+ }
112
+ ],
113
+ "bias": [
114
+ {
115
+ "type": "torch.Tensor",
116
+ "dtype": "torch.float32",
117
+ "shape": [
118
+ 32
119
+ ],
120
+ "Max": 0.014914230443537235,
121
+ "Min": -0.006656786892563105,
122
+ "Mean": 0.002657240955159068,
123
+ "Norm": 0.029451673850417137,
124
+ "requires_grad": false,
125
+ "data_name": "Module.conv2.Conv2d.parameters_grad.bias.pt"
126
+ }
127
+ ]
128
+ },
129
+ "Module.conv2.Conv2d.backward.0": {
130
+ "input": [
131
+ {
132
+ "type": "torch.Tensor",
133
+ "dtype": "torch.float32",
134
+ "shape": [
135
+ 8,
136
+ 32,
137
+ 10,
138
+ 10
139
+ ],
140
+ "Max": 0.0015069986693561077,
141
+ "Min": -0.001139344065450132,
142
+ "Mean": 3.3215508210560074e-06,
143
+ "Norm": 0.020567523315548897,
144
+ "requires_grad": false,
145
+ "data_name": "Module.conv2.Conv2d.backward.0.input.0.pt"
146
+ }
147
+ ],
148
+ "output": [
149
+ {
150
+ "type": "torch.Tensor",
151
+ "dtype": "torch.float32",
152
+ "shape": [
153
+ 8,
154
+ 16,
155
+ 14,
156
+ 14
157
+ ],
158
+ "Max": 0.0007466732058674097,
159
+ "Min": -0.00044813455315306783,
160
+ "Mean": 6.814070275140693e-06,
161
+ "Norm": 0.01474067009985447,
162
+ "requires_grad": false,
163
+ "data_name": "Module.conv2.Conv2d.backward.0.output.0.pt"
164
+ }
165
+ ]
166
+ }
167
+ }
168
+ }
169
+ ```
170
+
171
+ ### 1.2 L1级别
172
+ L1级别的dump.json文件包括API的前反向的输入输出。以PyTorch的relu函数为例,网络中API调用代码为:
173
+ `output = torch.nn.functional.relu(input)`
174
+
175
+ dump.json文件中包含以下数据名称:
176
+ - `Functional.relu.0.forward`:API的前向数据,其中input_args为API的输入数据(位置参数),input_kwargs为API的输入数据(关键字参数),output为API的输出数据。
177
+ - `Functional.relu.0.backward`:API的反向数据,其中input为API的反向输入梯度(对应前向输出的梯度),output为API的反向输出梯度(对应前向输入的梯度)。
178
+
179
+ ```json
180
+ {
181
+ "task": "tensor",
182
+ "level": "L1",
183
+ "framework": "pytorch",
184
+ "dump_data_dir":"/dump/path",
185
+ "data": {
186
+ "Functional.relu.0.forward": {
187
+ "input_args": [
188
+ {
189
+ "type": "torch.Tensor",
190
+ "dtype": "torch.float32",
191
+ "shape": [
192
+ 32,
193
+ 16,
194
+ 28,
195
+ 28
196
+ ],
197
+ "Max": 1.3864083290100098,
198
+ "Min": -1.3364859819412231,
199
+ "Mean": 0.03711778670549393,
200
+ "Norm": 236.20692443847656,
201
+ "requires_grad": true,
202
+ "data_name": "Functional.relu.0.forward.input.0.pt"
203
+ }
204
+ ],
205
+ "input_kwargs": {},
206
+ "output": [
207
+ {
208
+ "type": "torch.Tensor",
209
+ "dtype": "torch.float32",
210
+ "shape": [
211
+ 32,
212
+ 16,
213
+ 28,
214
+ 28
215
+ ],
216
+ "Max": 1.3864083290100098,
217
+ "Min": 0.0,
218
+ "Mean": 0.16849493980407715,
219
+ "Norm": 175.23345947265625,
220
+ "requires_grad": true,
221
+ "data_name": "Functional.relu.0.forward.output.0.pt"
222
+ }
223
+ ]
224
+ },
225
+ "Functional.relu.0.backward": {
226
+ "input": [
227
+ {
228
+ "type": "torch.Tensor",
229
+ "dtype": "torch.float32",
230
+ "shape": [
231
+ 32,
232
+ 16,
233
+ 28,
234
+ 28
235
+ ],
236
+ "Max": 0.0001815402356442064,
237
+ "Min": -0.00013352684618439525,
238
+ "Mean": 0.00011915402356442064,
239
+ "Norm": 0.007598237134516239,
240
+ "requires_grad": false,
241
+ "data_name": "Functional.relu.0.backward.input.0.pt"
242
+ }
243
+ ],
244
+ "output": [
245
+ {
246
+ "type": "torch.Tensor",
247
+ "dtype": "torch.float32",
248
+ "shape": [
249
+ 32,
250
+ 16,
251
+ 28,
252
+ 28
253
+ ],
254
+ "Max": 0.0001815402356442064,
255
+ "Min": -0.00012117840378778055,
256
+ "Mean": 2.0098118724831693e-08,
257
+ "Norm": 0.006532244384288788,
258
+ "requires_grad": false,
259
+ "data_name": "Functional.relu.0.backward.output.0.pt"
260
+ }
261
+ ]
262
+ }
263
+ }
264
+ }
265
+ ```
266
+
267
+ ### 1.3 mix级别
268
+
269
+ mix级别的dump.json文件同时包括L0和L1级别的dump数据,文件格式与上述示例相同。
270
+
271
+ ## 2. dump.json文件示例(MindSpore)
272
+
273
+ ### 2.1 L0级别
274
+
275
+ L0级别的dump.json文件包括模块的前反向的输入输出,以及模块的参数和参数梯度。
276
+ 以MindSpore的Conv2d模块为例,dump.json文件中使用的模块调用代码为:
277
+ `output = self.conv2(input) # self.conv2 = mindspore.nn.Conv2d(64, 128, 5, pad_mode='same', has_bias=True)`
278
+
279
+ dump.json文件中包含以下数据名称:
280
+ - `Cell.conv2.Conv2d.forward.0`:模块的前向数据,其中input_args为模块的输入数据(位置参数),input_kwargs为模块的输入数据(关键字参数),output为模块的输出数据,parameters为模块的参数数据,包括权重(weight)和偏置(bias)。
281
+ - `Cell.conv2.Conv2d.parameters_grad`:模块的参数梯度数据,包括权重(weight)和偏置(bias)的梯度。
282
+ - `Cell.conv2.Conv2d.backward.0`:模块的反向数据,其中input为模块反向的输入梯度(对应前向输出的梯度),output为模块的反向输出梯度(对应前向输入的梯度)。
283
+
284
+ **说明**:当dump时传入的model参数为List[mindspore.nn.Cell]或Tuple[mindspore.nn.Cell]时,模块级数据的命名中包含该模块在列表中的索引index,命名格式为`{Cell}.{index}.*`,*表示以上三种模块级数据的命名格式,例如:`Cell.0.conv2.Conv2d.forward.0`。
285
+
286
+ ```json
287
+ {
288
+ "task": "tensor",
289
+ "level": "L0",
290
+ "framework": "mindspore",
291
+ "dump_data_dir": "/dump/path",
292
+ "data": {
293
+ "Cell.conv2.Conv2d.forward.0": {
294
+ "input_args": [
295
+ {
296
+ "type": "mindspore.Tensor",
297
+ "dtype": "Float32",
298
+ "shape": [
299
+ 8,
300
+ 16,
301
+ 14,
302
+ 14
303
+ ],
304
+ "Max": 1.638758659362793,
305
+ "Min": 0.0,
306
+ "Mean": 0.2544615864753723,
307
+ "Norm": 70.50277709960938,
308
+ "data_name": "Cell.conv2.Conv2d.forward.0.input.0.npy"
309
+ }
310
+ ],
311
+ "input_kwargs": {},
312
+ "output": [
313
+ {
314
+ "type": "mindspore.Tensor",
315
+ "dtype": "Float32",
316
+ "shape": [
317
+ 8,
318
+ 32,
319
+ 10,
320
+ 10
321
+ ],
322
+ "Max": 1.6815717220306396,
323
+ "Min": -1.5120246410369873,
324
+ "Mean": -0.025344856083393097,
325
+ "Norm": 149.65576171875,
326
+ "data_name": "Cell.conv2.Conv2d.forward.0.output.0.npy"
327
+ }
328
+ ],
329
+ "parameters": {
330
+ "weight": {
331
+ "type": "mindspore.Tensor",
332
+ "dtype": "Float32",
333
+ "shape": [
334
+ 32,
335
+ 16,
336
+ 5,
337
+ 5
338
+ ],
339
+ "Max": 0.05992485210299492,
340
+ "Min": -0.05999220535159111,
341
+ "Mean": -0.0006165213999338448,
342
+ "Norm": 3.421217441558838,
343
+ "data_name": "Cell.conv2.Conv2d.forward.0.parameters.weight.npy"
344
+ },
345
+ "bias": {
346
+ "type": "mindspore.Tensor",
347
+ "dtype": "Float32",
348
+ "shape": [
349
+ 32
350
+ ],
351
+ "Max": 0.05744686722755432,
352
+ "Min": -0.04894155263900757,
353
+ "Mean": 0.006410328671336174,
354
+ "Norm": 0.17263513803482056,
355
+ "data_name": "Cell.conv2.Conv2d.forward.0.parameters.bias.npy"
356
+ }
357
+ }
358
+ },
359
+ "Cell.conv2.Conv2d.parameters_grad": {
360
+ "weight": [
361
+ {
362
+ "type": "mindspore.Tensor",
363
+ "dtype": "Float32",
364
+ "shape": [
365
+ 32,
366
+ 16,
367
+ 5,
368
+ 5
369
+ ],
370
+ "Max": 0.018550323322415352,
371
+ "Min": -0.008627401664853096,
372
+ "Mean": 0.0006675920449197292,
373
+ "Norm": 0.26084786653518677,
374
+ "data_name": "Cell.conv2.Conv2d.parameters_grad.weight.npy"
375
+ }
376
+ ],
377
+ "bias": [
378
+ {
379
+ "type": "mindspore.Tensor",
380
+ "dtype": "Float32",
381
+ "shape": [
382
+ 32
383
+ ],
384
+ "Max": 0.014914230443537235,
385
+ "Min": -0.006656786892563105,
386
+ "Mean": 0.002657240955159068,
387
+ "Norm": 0.029451673850417137,
388
+ "data_name": "Cell.conv2.Conv2d.parameters_grad.bias.npy"
389
+ }
390
+ ]
391
+ },
392
+ "Cell.conv2.Conv2d.backward.0": {
393
+ "input": [
394
+ {
395
+ "type": "mindspore.Tensor",
396
+ "dtype": "Float32",
397
+ "shape": [
398
+ 8,
399
+ 32,
400
+ 10,
401
+ 10
402
+ ],
403
+ "Max": 0.0015069986693561077,
404
+ "Min": -0.001139344065450132,
405
+ "Mean": 3.3215508210560074e-06,
406
+ "Norm": 0.020567523315548897,
407
+ "data_name": "Cell.conv2.Conv2d.backward.0.input.0.npy"
408
+ }
409
+ ],
410
+ "output": [
411
+ {
412
+ "type": "mindspore.Tensor",
413
+ "dtype": "Float32",
414
+ "shape": [
415
+ 8,
416
+ 16,
417
+ 14,
418
+ 14
419
+ ],
420
+ "Max": 0.0007466732058674097,
421
+ "Min": -0.00044813455315306783,
422
+ "Mean": 6.814070275140693e-06,
423
+ "Norm": 0.01474067009985447,
424
+ "data_name": "Cell.conv2.Conv2d.backward.0.output.0.npy"
425
+ }
426
+ ]
427
+ }
428
+ }
429
+ }
430
+ ```
431
+
432
+ ### 2.2 L1级别
433
+ L1级别的dump.json文件包括API的前反向的输入输出,以MindSpore的relu函数为例,网络中API调用代码为:
434
+ `output = mindspore.ops.relu(input)`
435
+
436
+ dump.json文件中包含以下数据名称:
437
+ - `Functional.relu.0.forward`:API的前向数据,其中input_args为API的输入数据(位置参数),input_kwargs为API的输入数据(关键字参数),output为API的输出数据。
438
+ - `Functional.relu.0.backward`:API的反向数据,其中input为API的反向输入梯度(对应前向输出的梯度),output为API的反向输出梯度(对应前向输入的梯度)。
439
+
440
+ ```json
441
+ {
442
+ "task": "tensor",
443
+ "level": "L1",
444
+ "framework": "mindspore",
445
+ "dump_data_dir":"/dump/path",
446
+ "data": {
447
+ "Functional.relu.0.forward": {
448
+ "input_args": [
449
+ {
450
+ "type": "mindspore.Tensor",
451
+ "dtype": "Float32",
452
+ "shape": [
453
+ 32,
454
+ 16,
455
+ 28,
456
+ 28
457
+ ],
458
+ "Max": 1.3864083290100098,
459
+ "Min": -1.3364859819412231,
460
+ "Mean": 0.03711778670549393,
461
+ "Norm": 236.20692443847656,
462
+ "data_name": "Functional.relu.0.forward.input.0.npy"
463
+ }
464
+ ],
465
+ "input_kwargs": {},
466
+ "output": [
467
+ {
468
+ "type": "mindspore.Tensor",
469
+ "dtype": "Float32",
470
+ "shape": [
471
+ 32,
472
+ 16,
473
+ 28,
474
+ 28
475
+ ],
476
+ "Max": 1.3864083290100098,
477
+ "Min": 0.0,
478
+ "Mean": 0.16849493980407715,
479
+ "Norm": 175.23345947265625,
480
+ "data_name": "Functional.relu.0.forward.output.0.npy"
481
+ }
482
+ ]
483
+ },
484
+ "Functional.relu.0.backward": {
485
+ "input": [
486
+ {
487
+ "type": "mindspore.Tensor",
488
+ "dtype": "Float32",
489
+ "shape": [
490
+ 32,
491
+ 16,
492
+ 28,
493
+ 28
494
+ ],
495
+ "Max": 0.0001815402356442064,
496
+ "Min": -0.00013352684618439525,
497
+ "Mean": 0.00011915402356442064,
498
+ "Norm": 0.007598237134516239,
499
+ "data_name": "Functional.relu.0.backward.input.0.npy"
500
+ }
501
+ ],
502
+ "output": [
503
+ {
504
+ "type": "mindspore.Tensor",
505
+ "dtype": "Float32",
506
+ "shape": [
507
+ 32,
508
+ 16,
509
+ 28,
510
+ 28
511
+ ],
512
+ "Max": 0.0001815402356442064,
513
+ "Min": -0.00012117840378778055,
514
+ "Mean": 2.0098118724831693e-08,
515
+ "Norm": 0.006532244384288788,
516
+ "data_name": "Functional.relu.0.backward.output.0.npy"
517
+ }
518
+ ]
519
+ }
520
+ }
521
+ }
522
+ ```
523
+
524
+ ### 2.3 mix级别
525
+ mix级别的dump.json文件同时包括L0和L1级别的dump数据,文件格式与上述示例相同。
@@ -0,0 +1,94 @@
1
+ # 单点保存工具 README
2
+
3
+ ## 简介
4
+ L0, L1, mix dump存在盲区,网络中的非api/module的输入输出不会被批量dump下来。单点保存提供类似np.save和print的功能和使用体验,可以保存指定的变量。同时针对大模型场景进行了增强,具备以下特性:
5
+ - 可保存变量的反向梯度结果。
6
+ - 能直接保存嵌套结构数据(如 list、dict),无需手动遍历。
7
+ - 自动分 rank 保存。
8
+ - 多次调用时会自动计数。
9
+ - 可配置保存统计值或者张量。
10
+
11
+ ## 支持场景
12
+ 仅支持 PyTorch 与 MindSpore 的动态图场景。
13
+
14
+ ## 使能方式
15
+
16
+ ### 配置文件说明
17
+
18
+ 通用配置:
19
+
20
+ | 参数 | 解释 | 是否必选 |
21
+ | -------- |-------------------------------------------| -------- |
22
+ | task | dump 的任务类型,str 类型。 单点保存场景仅支持传入"statistics", "tensor"。 | 是 |
23
+ | level | dump 级别,str 类型,根据不同级别采集不同数据。单点保存场景传入"debug"。 | 是 |
24
+ | dump_path | 设置 dump 数据目录路径,str 类型。细节详见[通用配置说明](./02.config_introduction.md#11-通用配置) | 是 |
25
+ | rank | 指定对某张卡上的数据进行采集,list[Union[int, str]] 类型。细节详见[通用配置说明](./02.config_introduction.md#11-通用配置) | 否 |
26
+
27
+ "statistics" 任务子配置项:
28
+ | 参数 | 解释 | 是否必选 |
29
+ | -------- |-------------------------------------------| -------- |
30
+ | summary_mode | 控制 dump 文件输出的模式,str 类型。支持传入"statistics", "md5"。 细节详见[statistics任务子配置项说明](./02.config_introduction.md#12-task-配置为-statistics) | 否 |
31
+
32
+ "tensor" 任务无子配置项。
33
+
34
+ ### 接口调用说明
35
+
36
+ 调用PrecisionDebugger.save,传入需要保存的变量,指定变量名称以及是否需要保存反向数据。接口入参说明详见[pytorch单点保存接口](./05.data_dump_PyTorch.md#19-save),[mindspore单点保存接口](./06.data_dump_MindSpore.md#615-save)
37
+
38
+ ### 实例(以pytorch场景为例)
39
+
40
+ 配置文件
41
+ ```json
42
+ {
43
+ "task": "statistics",
44
+ "dump_path": "./dump_path",
45
+ "rank": [],
46
+ "level": "debug",
47
+ "statistics": {
48
+ "summary_mode": "statistics"
49
+ }
50
+ }
51
+ ```
52
+
53
+ 初始化
54
+ ```python
55
+ # 训练启动py脚本
56
+ from mindspore.pytorch import PrecisionDebugger
57
+ debugger = PrecisionDebugger("./config.json")
58
+ for data, label in data_loader:
59
+ # 执行模型训练
60
+ train(data, label)
61
+
62
+ ```
63
+
64
+ 初始化(无配置文件)
65
+ ```python
66
+ # 训练启动py脚本
67
+ from mindspore.pytorch import PrecisionDebugger
68
+ debugger = PrecisionDebugger(dump_path="dump_path", level="debug")
69
+ for data, label in data_loader:
70
+ # 执行模型训练
71
+ train(data, label)
72
+
73
+ ```
74
+
75
+ 调用保存接口
76
+ ```python
77
+ # 训练过程中被调用py文件
78
+ from mindspore.pytorch import PrecisionDebugger
79
+ dict_variable = {"key1": "value1", "key2": [1, 2]}
80
+ PrecisionDebugger.save(dict_variable, "dict_variable", save_backward=False)
81
+
82
+ ```
83
+
84
+ ## 输出结果
85
+ * **"task" 配置为 "statistics" 场景** :在 dump 目录下会生成包含变量统计值信息的 `debug.json` 文件。
86
+ * **"task" 配置为 "tensor" 场景** :除了在 dump 目录下生成包含变量统计值信息的 `debug.json` 文件外,还会在 dump 子目录 `dump_tensor_data` 中保存张量二进制文件,文件名称格式为 `{variable_name}{grad_flag}.{count}.tensor.{indexes}.{file_suffix}`。
87
+
88
+ - variable_name: 传入save接口的变量名称。
89
+ - grad_flag: 反向数据标识,反向数据为"_grad",正向数据为""。
90
+ - count: 调用计数,多次以相同变量名称调用时的计数。
91
+ - indexes: 索引,在保存嵌套结构数据时的索引。例如:嵌套结构为`{"key1": "value1", "key2": ["value2", "value3"]}`,"value2"的索引为"key2.0"
92
+ - file_suffix:文件后缀,pytorch场景为"pt",mindspore场景为"npy"
93
+
94
+