mindstudio-probe 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (249) hide show
  1. {mindstudio_probe-1.0.1.dist-info → mindstudio_probe-1.0.3.dist-info}/METADATA +5 -1
  2. mindstudio_probe-1.0.3.dist-info/RECORD +272 -0
  3. msprobe/README.md +78 -23
  4. msprobe/__init__.py +1 -0
  5. msprobe/config/README.md +182 -40
  6. msprobe/config/config.json +22 -0
  7. msprobe/core/__init__.py +0 -0
  8. msprobe/{pytorch → core}/advisor/advisor.py +3 -3
  9. msprobe/{pytorch → core}/advisor/advisor_result.py +2 -2
  10. msprobe/core/common/const.py +82 -5
  11. msprobe/core/common/exceptions.py +30 -18
  12. msprobe/core/common/file_check.py +19 -1
  13. msprobe/core/common/log.py +15 -1
  14. msprobe/core/common/utils.py +130 -30
  15. msprobe/core/common_config.py +32 -19
  16. msprobe/core/compare/acc_compare.py +299 -0
  17. msprobe/core/compare/check.py +95 -0
  18. msprobe/core/compare/compare_cli.py +49 -0
  19. msprobe/core/compare/highlight.py +222 -0
  20. msprobe/core/compare/multiprocessing_compute.py +149 -0
  21. msprobe/{pytorch → core}/compare/npy_compare.py +55 -4
  22. msprobe/core/compare/utils.py +429 -0
  23. msprobe/core/data_dump/data_collector.py +39 -35
  24. msprobe/core/data_dump/data_processor/base.py +85 -37
  25. msprobe/core/data_dump/data_processor/factory.py +5 -7
  26. msprobe/core/data_dump/data_processor/mindspore_processor.py +198 -0
  27. msprobe/core/data_dump/data_processor/pytorch_processor.py +94 -51
  28. msprobe/core/data_dump/json_writer.py +11 -11
  29. msprobe/core/grad_probe/__init__.py +0 -0
  30. msprobe/core/grad_probe/constant.py +71 -0
  31. msprobe/core/grad_probe/grad_compare.py +175 -0
  32. msprobe/core/grad_probe/utils.py +52 -0
  33. msprobe/doc/grad_probe/grad_probe.md +207 -0
  34. msprobe/doc/grad_probe/img/image-1.png +0 -0
  35. msprobe/doc/grad_probe/img/image-2.png +0 -0
  36. msprobe/doc/grad_probe/img/image-3.png +0 -0
  37. msprobe/doc/grad_probe/img/image-4.png +0 -0
  38. msprobe/doc/grad_probe/img/image.png +0 -0
  39. msprobe/mindspore/api_accuracy_checker/__init__.py +0 -0
  40. msprobe/mindspore/api_accuracy_checker/api_accuracy_checker.py +246 -0
  41. msprobe/mindspore/api_accuracy_checker/api_info.py +69 -0
  42. msprobe/mindspore/api_accuracy_checker/api_runner.py +152 -0
  43. msprobe/mindspore/api_accuracy_checker/base_compare_algorithm.py +197 -0
  44. msprobe/mindspore/api_accuracy_checker/compute_element.py +224 -0
  45. msprobe/mindspore/api_accuracy_checker/main.py +16 -0
  46. msprobe/mindspore/api_accuracy_checker/type_mapping.py +114 -0
  47. msprobe/mindspore/api_accuracy_checker/utils.py +63 -0
  48. msprobe/mindspore/cell_processor.py +34 -0
  49. msprobe/mindspore/common/const.py +87 -0
  50. msprobe/mindspore/common/log.py +38 -0
  51. msprobe/mindspore/common/utils.py +57 -0
  52. msprobe/mindspore/compare/distributed_compare.py +75 -0
  53. msprobe/mindspore/compare/ms_compare.py +117 -0
  54. msprobe/mindspore/compare/ms_graph_compare.py +317 -0
  55. msprobe/mindspore/compare/ms_to_pt_api.yaml +399 -0
  56. msprobe/mindspore/debugger/debugger_config.py +38 -15
  57. msprobe/mindspore/debugger/precision_debugger.py +79 -4
  58. msprobe/mindspore/doc/compare.md +58 -0
  59. msprobe/mindspore/doc/dump.md +158 -6
  60. msprobe/mindspore/dump/dump_tool_factory.py +19 -22
  61. msprobe/mindspore/dump/hook_cell/api_registry.py +104 -0
  62. msprobe/mindspore/dump/hook_cell/hook_cell.py +53 -0
  63. msprobe/mindspore/dump/hook_cell/support_wrap_ops.yaml +925 -0
  64. msprobe/mindspore/dump/hook_cell/wrap_functional.py +91 -0
  65. msprobe/mindspore/dump/hook_cell/wrap_tensor.py +63 -0
  66. msprobe/mindspore/dump/jit_dump.py +56 -0
  67. msprobe/mindspore/dump/kernel_kbyk_dump.py +65 -0
  68. msprobe/mindspore/free_benchmark/__init__.py +0 -0
  69. msprobe/mindspore/free_benchmark/api_pynative_self_check.py +116 -0
  70. msprobe/mindspore/free_benchmark/common/__init__.py +0 -0
  71. msprobe/mindspore/free_benchmark/common/config.py +12 -0
  72. msprobe/mindspore/free_benchmark/common/handler_params.py +17 -0
  73. msprobe/mindspore/free_benchmark/common/utils.py +71 -0
  74. msprobe/mindspore/free_benchmark/data/support_wrap_ops.yaml +842 -0
  75. msprobe/mindspore/free_benchmark/decorator/__init__.py +0 -0
  76. msprobe/mindspore/free_benchmark/decorator/dec_forward.py +42 -0
  77. msprobe/mindspore/free_benchmark/decorator/decorator_factory.py +107 -0
  78. msprobe/mindspore/free_benchmark/handler/__init__.py +0 -0
  79. msprobe/mindspore/free_benchmark/handler/base_handler.py +90 -0
  80. msprobe/mindspore/free_benchmark/handler/check_handler.py +41 -0
  81. msprobe/mindspore/free_benchmark/handler/fix_handler.py +36 -0
  82. msprobe/mindspore/free_benchmark/handler/handler_factory.py +21 -0
  83. msprobe/mindspore/free_benchmark/perturbation/add_noise.py +67 -0
  84. msprobe/mindspore/free_benchmark/perturbation/base_perturbation.py +21 -0
  85. msprobe/mindspore/free_benchmark/perturbation/bit_noise.py +63 -0
  86. msprobe/mindspore/free_benchmark/perturbation/improve_precision.py +34 -0
  87. msprobe/mindspore/free_benchmark/perturbation/no_change.py +12 -0
  88. msprobe/mindspore/free_benchmark/perturbation/perturbation_factory.py +27 -0
  89. msprobe/mindspore/free_benchmark/self_check_tool_factory.py +33 -0
  90. msprobe/mindspore/grad_probe/__init__.py +0 -0
  91. msprobe/mindspore/grad_probe/global_context.py +91 -0
  92. msprobe/mindspore/grad_probe/grad_analyzer.py +231 -0
  93. msprobe/mindspore/grad_probe/grad_monitor.py +27 -0
  94. msprobe/mindspore/grad_probe/grad_stat_csv.py +132 -0
  95. msprobe/mindspore/grad_probe/hook.py +92 -0
  96. msprobe/mindspore/grad_probe/utils.py +29 -0
  97. msprobe/mindspore/ms_config.py +63 -15
  98. msprobe/mindspore/overflow_check/overflow_check_tool_factory.py +17 -15
  99. msprobe/mindspore/runtime.py +4 -0
  100. msprobe/mindspore/service.py +354 -0
  101. msprobe/mindspore/task_handler_factory.py +7 -4
  102. msprobe/msprobe.py +66 -26
  103. msprobe/pytorch/__init__.py +1 -1
  104. msprobe/pytorch/api_accuracy_checker/common/config.py +21 -16
  105. msprobe/pytorch/api_accuracy_checker/common/utils.py +1 -60
  106. msprobe/pytorch/api_accuracy_checker/compare/algorithm.py +2 -5
  107. msprobe/pytorch/api_accuracy_checker/compare/api_precision_compare.py +46 -10
  108. msprobe/pytorch/api_accuracy_checker/compare/compare.py +84 -48
  109. msprobe/pytorch/api_accuracy_checker/compare/compare_utils.py +8 -12
  110. msprobe/pytorch/api_accuracy_checker/config.yaml +7 -1
  111. msprobe/pytorch/api_accuracy_checker/run_ut/data_generate.py +15 -11
  112. msprobe/pytorch/api_accuracy_checker/run_ut/multi_run_ut.py +11 -15
  113. msprobe/pytorch/api_accuracy_checker/run_ut/run_overflow_check.py +16 -9
  114. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut.py +193 -105
  115. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut_utils.py +68 -1
  116. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/__init__.py +0 -0
  117. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/attl.py +202 -0
  118. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/client.py +324 -0
  119. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/device_dispatch.py +204 -0
  120. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/server.py +218 -0
  121. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/ssl_config.py +10 -0
  122. msprobe/pytorch/bench_functions/__init__.py +15 -0
  123. msprobe/pytorch/bench_functions/apply_adam_w.py +28 -0
  124. msprobe/pytorch/bench_functions/confusion_transpose.py +19 -0
  125. msprobe/pytorch/bench_functions/fast_gelu.py +55 -0
  126. msprobe/pytorch/bench_functions/layer_norm_eval.py +6 -0
  127. msprobe/pytorch/bench_functions/linear.py +12 -0
  128. msprobe/pytorch/bench_functions/matmul_backward.py +48 -0
  129. msprobe/pytorch/bench_functions/npu_fusion_attention.py +421 -0
  130. msprobe/pytorch/bench_functions/rms_norm.py +15 -0
  131. msprobe/pytorch/bench_functions/rotary_mul.py +52 -0
  132. msprobe/pytorch/bench_functions/scaled_mask_softmax.py +26 -0
  133. msprobe/pytorch/bench_functions/swiglu.py +55 -0
  134. msprobe/pytorch/common/parse_json.py +3 -1
  135. msprobe/pytorch/common/utils.py +83 -7
  136. msprobe/pytorch/compare/distributed_compare.py +19 -64
  137. msprobe/pytorch/compare/match.py +3 -6
  138. msprobe/pytorch/compare/pt_compare.py +40 -0
  139. msprobe/pytorch/debugger/debugger_config.py +11 -2
  140. msprobe/pytorch/debugger/precision_debugger.py +34 -4
  141. msprobe/pytorch/doc/api_accuracy_checker.md +57 -13
  142. msprobe/pytorch/doc/api_accuracy_checker_online.md +187 -0
  143. msprobe/pytorch/doc/dump.md +73 -20
  144. msprobe/pytorch/doc/ptdbg_ascend_compare.md +75 -11
  145. msprobe/pytorch/doc/ptdbg_ascend_quickstart.md +3 -3
  146. msprobe/pytorch/doc/run_overflow_check.md +1 -1
  147. msprobe/pytorch/doc//321/206/320/247/320/260/321/206/320/260/320/227/321/206/320/255/320/226/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/205/320/254/342/225/221/321/206/320/251/320/277/321/211/320/272/320/234/321/210/320/277/320/221/321/205/320/242/320/234/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +151 -0
  148. msprobe/pytorch/free_benchmark/common/constant.py +3 -0
  149. msprobe/pytorch/free_benchmark/common/utils.py +4 -0
  150. msprobe/pytorch/free_benchmark/compare/grad_saver.py +22 -26
  151. msprobe/pytorch/free_benchmark/main.py +7 -4
  152. msprobe/pytorch/free_benchmark/perturbed_layers/npu/add_noise.py +1 -1
  153. msprobe/pytorch/free_benchmark/perturbed_layers/npu/bit_noise.py +1 -1
  154. msprobe/pytorch/free_benchmark/perturbed_layers/npu/change_value.py +1 -1
  155. msprobe/pytorch/free_benchmark/perturbed_layers/npu/improve_precision.py +3 -3
  156. msprobe/pytorch/free_benchmark/perturbed_layers/npu/no_change.py +1 -1
  157. msprobe/pytorch/free_benchmark/perturbed_layers/run_cpu.py +1 -1
  158. msprobe/pytorch/free_benchmark/result_handlers/base_handler.py +43 -29
  159. msprobe/pytorch/free_benchmark/result_handlers/handler_factory.py +0 -1
  160. msprobe/pytorch/function_factory.py +75 -0
  161. msprobe/pytorch/functional/dump_module.py +4 -4
  162. msprobe/pytorch/grad_probe/__init__.py +0 -0
  163. msprobe/pytorch/grad_probe/grad_monitor.py +90 -0
  164. msprobe/pytorch/grad_probe/grad_stat_csv.py +129 -0
  165. msprobe/pytorch/hook_module/hook_module.py +14 -3
  166. msprobe/pytorch/hook_module/support_wrap_ops.yaml +2 -1
  167. msprobe/pytorch/hook_module/utils.py +9 -9
  168. msprobe/pytorch/hook_module/wrap_aten.py +20 -10
  169. msprobe/pytorch/hook_module/wrap_distributed.py +10 -7
  170. msprobe/pytorch/hook_module/wrap_functional.py +4 -7
  171. msprobe/pytorch/hook_module/wrap_npu_custom.py +21 -10
  172. msprobe/pytorch/hook_module/wrap_tensor.py +5 -6
  173. msprobe/pytorch/hook_module/wrap_torch.py +5 -7
  174. msprobe/pytorch/hook_module/wrap_vf.py +6 -8
  175. msprobe/pytorch/module_processer.py +53 -13
  176. msprobe/pytorch/online_dispatch/compare.py +4 -4
  177. msprobe/pytorch/online_dispatch/dispatch.py +39 -41
  178. msprobe/pytorch/online_dispatch/dump_compare.py +17 -47
  179. msprobe/pytorch/online_dispatch/single_compare.py +5 -5
  180. msprobe/pytorch/online_dispatch/utils.py +2 -43
  181. msprobe/pytorch/parse_tool/lib/compare.py +31 -19
  182. msprobe/pytorch/parse_tool/lib/config.py +2 -1
  183. msprobe/pytorch/parse_tool/lib/parse_tool.py +4 -4
  184. msprobe/pytorch/parse_tool/lib/utils.py +34 -80
  185. msprobe/pytorch/parse_tool/lib/visualization.py +4 -3
  186. msprobe/pytorch/pt_config.py +100 -6
  187. msprobe/pytorch/service.py +104 -19
  188. mindstudio_probe-1.0.1.dist-info/RECORD +0 -228
  189. msprobe/mindspore/dump/api_kbk_dump.py +0 -55
  190. msprobe/pytorch/compare/acc_compare.py +0 -1024
  191. msprobe/pytorch/compare/highlight.py +0 -100
  192. msprobe/test/core_ut/common/test_utils.py +0 -345
  193. msprobe/test/core_ut/data_dump/test_data_collector.py +0 -47
  194. msprobe/test/core_ut/data_dump/test_json_writer.py +0 -183
  195. msprobe/test/core_ut/data_dump/test_scope.py +0 -151
  196. msprobe/test/core_ut/test_common_config.py +0 -152
  197. msprobe/test/core_ut/test_file_check.py +0 -218
  198. msprobe/test/core_ut/test_log.py +0 -109
  199. msprobe/test/mindspore_ut/test_api_kbk_dump.py +0 -51
  200. msprobe/test/mindspore_ut/test_debugger_config.py +0 -42
  201. msprobe/test/mindspore_ut/test_dump_tool_factory.py +0 -51
  202. msprobe/test/mindspore_ut/test_kernel_graph_dump.py +0 -66
  203. msprobe/test/mindspore_ut/test_kernel_graph_overflow_check.py +0 -63
  204. msprobe/test/mindspore_ut/test_ms_config.py +0 -69
  205. msprobe/test/mindspore_ut/test_overflow_check_tool_factory.py +0 -51
  206. msprobe/test/mindspore_ut/test_precision_debugger.py +0 -56
  207. msprobe/test/mindspore_ut/test_task_handler_factory.py +0 -58
  208. msprobe/test/pytorch_ut/advisor/test_advisor.py +0 -83
  209. msprobe/test/pytorch_ut/api_accuracy_checker/common/test_common_utils.py +0 -108
  210. msprobe/test/pytorch_ut/api_accuracy_checker/common/test_config.py +0 -39
  211. msprobe/test/pytorch_ut/api_accuracy_checker/compare/test_algorithm.py +0 -112
  212. msprobe/test/pytorch_ut/api_accuracy_checker/compare/test_api_precision_compare.py +0 -77
  213. msprobe/test/pytorch_ut/api_accuracy_checker/compare/test_compare.py +0 -125
  214. msprobe/test/pytorch_ut/api_accuracy_checker/compare/test_compare_column.py +0 -10
  215. msprobe/test/pytorch_ut/api_accuracy_checker/compare/test_compare_utils.py +0 -43
  216. msprobe/test/pytorch_ut/api_accuracy_checker/run_ut/dump.json +0 -179
  217. msprobe/test/pytorch_ut/api_accuracy_checker/run_ut/forward.json +0 -63
  218. msprobe/test/pytorch_ut/api_accuracy_checker/run_ut/test_data_generate.py +0 -99
  219. msprobe/test/pytorch_ut/api_accuracy_checker/run_ut/test_multi_run_ut.py +0 -115
  220. msprobe/test/pytorch_ut/api_accuracy_checker/run_ut/test_run_ut.py +0 -72
  221. msprobe/test/pytorch_ut/compare/test_acc_compare.py +0 -17
  222. msprobe/test/pytorch_ut/free_benchmark/perturbed_layers/test_perturbed_layser.py +0 -105
  223. msprobe/test/pytorch_ut/free_benchmark/result_handlers/test_result_handler.py +0 -121
  224. msprobe/test/pytorch_ut/free_benchmark/test_main.py +0 -101
  225. msprobe/test/pytorch_ut/functional/test_dump_module.py +0 -15
  226. msprobe/test/pytorch_ut/hook_module/test_api_registry.py +0 -130
  227. msprobe/test/pytorch_ut/hook_module/test_hook_module.py +0 -42
  228. msprobe/test/pytorch_ut/hook_module/test_wrap_aten.py +0 -65
  229. msprobe/test/pytorch_ut/hook_module/test_wrap_distributed.py +0 -35
  230. msprobe/test/pytorch_ut/hook_module/test_wrap_functional.py +0 -20
  231. msprobe/test/pytorch_ut/hook_module/test_wrap_tensor.py +0 -35
  232. msprobe/test/pytorch_ut/hook_module/test_wrap_torch.py +0 -43
  233. msprobe/test/pytorch_ut/hook_module/test_wrap_vf.py +0 -11
  234. msprobe/test/pytorch_ut/test_pt_config.py +0 -69
  235. msprobe/test/pytorch_ut/test_service.py +0 -59
  236. msprobe/test/resources/advisor.txt +0 -3
  237. msprobe/test/resources/compare_result_20230703104808.csv +0 -9
  238. msprobe/test/resources/compare_result_without_accuracy.csv +0 -9
  239. msprobe/test/resources/config.yaml +0 -3
  240. msprobe/test/resources/npu_test.pkl +0 -8
  241. msprobe/test/run_test.sh +0 -30
  242. msprobe/test/run_ut.py +0 -58
  243. msprobe/test/test_module_processer.py +0 -64
  244. {mindstudio_probe-1.0.1.dist-info → mindstudio_probe-1.0.3.dist-info}/LICENSE +0 -0
  245. {mindstudio_probe-1.0.1.dist-info → mindstudio_probe-1.0.3.dist-info}/WHEEL +0 -0
  246. {mindstudio_probe-1.0.1.dist-info → mindstudio_probe-1.0.3.dist-info}/entry_points.txt +0 -0
  247. {mindstudio_probe-1.0.1.dist-info → mindstudio_probe-1.0.3.dist-info}/top_level.txt +0 -0
  248. /msprobe/{pytorch → core}/advisor/advisor_const.py +0 -0
  249. /msprobe/pytorch/doc/{atat → msprobe}/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/206/320/245/342/226/221/321/206/320/235/320/276dump/321/206/320/260/320/227/321/205/320/227/320/226/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md" +0 -0
@@ -0,0 +1,204 @@
1
+ import time
2
+ from collections import namedtuple
3
+
4
+ import pandas as pd
5
+ import torch
6
+ import torch.multiprocessing as mp
7
+
8
+ from msprobe.core.common.const import Const
9
+ from msprobe.pytorch.api_accuracy_checker.compare.api_precision_compare import online_api_precision_compare
10
+ from msprobe.pytorch.api_accuracy_checker.compare.compare_utils import DETAIL_TEST_ROWS, thousandth_standard_api, \
11
+ binary_standard_api, absolute_standard_api
12
+ from msprobe.pytorch.api_accuracy_checker.run_ut.run_ut_utils import UtDataInfo, exec_api
13
+ from msprobe.pytorch.common.log import logger
14
+ from msprobe.pytorch.api_accuracy_checker.tensor_transport_layer.attl import move2target_device
15
+
16
+ # NPU vs GPU api list
17
+ CompareApi = set(absolute_standard_api) | set(binary_standard_api) | set(thousandth_standard_api)
18
+
19
+ current_time = time.strftime("%Y%m%d%H%M%S")
20
+ ONLINE_API_PRECISION_COMPARE_RESULT_FILE_NAME = "api_precision_compare_result_" + current_time + "_rank*.csv"
21
+ ONLINE_API_PRECISION_COMPARE_DETAILS_FILE_NAME = "api_precision_compare_details_" + current_time + "_rank*.csv"
22
+
23
+ OnlineApiPrecisionCompareConfig = namedtuple('OnlineApiPrecisionCompareConfig',
24
+ ['npu_data', 'gpu_data', 'rank', 'result_csv_path', 'details_csv_path'])
25
+ # namedtuple of [instance of Comparator, func of run_touch_api_online, config of run_ut_config]
26
+ CommonCompareConfig = namedtuple('CommonCompareConfig', ['compare', 'handle_func', 'config'])
27
+
28
+
29
+ def run_ut_process(xpu_id, consumer_queue, common_config, api_precision_csv_file):
30
+ """ When consumer_queue(shared with ConsumerDispatcher) is not empty, consume api data from consumer_queue.
31
+ :param xpu_id: int
32
+ :param consumer_queue: shared queues of ConsumerDispatcher
33
+ :param common_config: namedtuple of CommonCompareConfig
34
+ :param api_precision_csv_file: list, length is 2, result file name and details file name
35
+ :return:
36
+ """
37
+ gpu_device = torch.device(f'cuda:{xpu_id}')
38
+
39
+ while True:
40
+ if consumer_queue.empty():
41
+ time.sleep(0.1)
42
+ continue
43
+
44
+ api_data = consumer_queue.get()
45
+ if api_data == "KILL_":
46
+ # current consumer finish
47
+ return
48
+
49
+ _, api_name, _ = api_data.name.split(Const.SEP)
50
+ if api_name in CompareApi:
51
+ # NPU vs GPU
52
+ online_compare(api_data, gpu_device, common_config)
53
+ else:
54
+ # NPUvsCPU vs GPUvsCPU
55
+ online_precision_compare(api_data, gpu_device, common_config, api_precision_csv_file)
56
+
57
+
58
+ def online_precision_compare(api_data, device, common_config, api_precision_csv_file):
59
+ """online run_ut for precision_compare: NPUvsCPU vs GPUvsCPU
60
+ 1. get NPUvsCPU compare result
61
+ 2. get GPUvsCPU compare result
62
+ 3. call online_api_precision_compare
63
+ :param api_data
64
+ :param device
65
+ :param common_config: namedtuple of CommonCompareConfig
66
+ :param api_precision_csv_file: [result_file_name, details_file_name]
67
+ """
68
+ compare, func, config = common_config.compare, common_config.handle_func, common_config.config
69
+ api_full_name = api_data.name
70
+ [api_type, api_name, _] = api_full_name.split(Const.SEP)
71
+ npu_args, npu_kwargs, npu_out = api_data.args, api_data.kwargs, api_data.result
72
+
73
+ if npu_kwargs.get("device"):
74
+ del npu_kwargs["device"]
75
+
76
+ try:
77
+ # NPU vs CPU
78
+ cpu_out = exec_api(api_type, api_name, npu_args, npu_kwargs)
79
+ npu_data_info = UtDataInfo(None, None, npu_out, cpu_out, None, [], None, rank=api_data.rank)
80
+ npu_detail = compare.compare_output(api_full_name, npu_data_info, True)
81
+ npu_data = pd.DataFrame(npu_detail, columns=DETAIL_TEST_ROWS[-1])
82
+
83
+ # GPU vs CPU
84
+ api_data_gpu = move2target_device(api_data, device) # args, kwargs -> gpu, result -> npu
85
+ data_info = func(api_full_name, api_data_gpu, config.backward_content)
86
+ gpu_out = data_info.bench_output
87
+ gpu_data_info = UtDataInfo(None, None, gpu_out, cpu_out, None, [], None, rank=api_data.rank)
88
+ gpu_detail = compare.compare_output(api_full_name, gpu_data_info, True)
89
+ gpu_data = pd.DataFrame(gpu_detail, columns=DETAIL_TEST_ROWS[-1])
90
+
91
+ # NPUvsCPU vs GPUvsCPU
92
+ result_file_name, details_file_name = api_precision_csv_file
93
+ precision_compare_config = OnlineApiPrecisionCompareConfig(npu_data, gpu_data, api_data.rank,
94
+ result_file_name, details_file_name)
95
+ online_api_precision_compare(precision_compare_config)
96
+
97
+ except Exception as err:
98
+ if "expected scalar type Long" in str(err):
99
+ logger.warning(
100
+ f"API {api_name} not support int32 tensor in CPU, please add {api_name} to CONVERT_API "
101
+ f"'int32_to_int64' list in accuracy_tools/msprobe/core/common/const.py file.")
102
+ elif api_type in [Const.DISTRIBUTED]:
103
+ logger.info(f"{api_full_name} is not supported for run ut. SKIP.")
104
+ else:
105
+ logger.error(f"Run {api_full_name} UT Error: {str(err)}")
106
+
107
+ compare.write_summary_csv((api_full_name, "SKIP", "SKIP", [[str(err)]], api_data.rank))
108
+
109
+ finally:
110
+ torch.cuda.empty_cache()
111
+
112
+
113
+ def online_compare(api_data, device, common_config):
114
+ """online run_ut for compare:NPU vs GPU
115
+ """
116
+ compare, func, config = common_config.compare, common_config.handle_func, common_config.config
117
+ api_full_name = api_data.name
118
+ api_data = move2target_device(api_data, device)
119
+ try:
120
+ data_info = func(api_full_name, api_data, config.backward_content)
121
+ is_fwd_success, is_bwd_success = compare.compare_output(api_full_name, data_info)
122
+ logger.info(f"running api_full_name {api_full_name} ut, "
123
+ f"is_fwd_success: {is_fwd_success}, "
124
+ f"is_bwd_success: {is_bwd_success}")
125
+ except Exception as err:
126
+ [api_type, api_name, _] = api_full_name.split(Const.SEP)
127
+ if "expected scalar type Long" in str(err):
128
+ logger.warning(
129
+ f"API {api_name} not support int32 tensor in CPU, please add {api_name} to CONVERT_API "
130
+ f"'int32_to_int64' list in accuracy_tools/msprobe/core/common/const.py file.")
131
+ elif api_type in [Const.DISTRIBUTED]:
132
+ logger.info(f"{api_full_name} is not supported for run ut. SKIP.")
133
+ else:
134
+ logger.error(f"Run {api_full_name} UT Error: {str(err)}")
135
+
136
+ compare.write_summary_csv((api_full_name, "SKIP", "SKIP", [[str(err)]], api_data.rank))
137
+
138
+ finally:
139
+ torch.cuda.empty_cache()
140
+
141
+
142
+ class ConsumerDispatcher:
143
+ def __init__(self, compare, capacity=10, num_workers=8, device: str = "gpu") -> None:
144
+ self.num_workers = num_workers
145
+ self.capacity = capacity
146
+ self.compare = compare
147
+ self.queues = []
148
+ self.processes = []
149
+ self.reverse_sort = False
150
+ self.pool = None
151
+ self.device = device
152
+ self.data_id = 0
153
+ self.lock = mp.Lock()
154
+ self.result_queue = mp.Queue()
155
+ mp.set_start_method("spawn", force=True)
156
+
157
+ def start(self, handle_func, config):
158
+ self.queues = [mp.Queue(maxsize=self.capacity) for _ in range(self.num_workers)]
159
+ api_precision_csv_file = [ONLINE_API_PRECISION_COMPARE_RESULT_FILE_NAME, ONLINE_API_PRECISION_COMPARE_DETAILS_FILE_NAME]
160
+ common_config = CommonCompareConfig(self.compare, handle_func, config)
161
+ for xpu_id, q in enumerate(self.queues):
162
+ p = mp.Process(name="run_ut_process", target=run_ut_process,
163
+ args=(xpu_id, q, common_config, api_precision_csv_file))
164
+
165
+ p.start()
166
+ self.processes.append(p)
167
+ logger.info(f"Api_precision_compare task result will be saved in {ONLINE_API_PRECISION_COMPARE_RESULT_FILE_NAME}")
168
+ logger.info(f"Api_precision_compare task details will be saved in {ONLINE_API_PRECISION_COMPARE_DETAILS_FILE_NAME}")
169
+ logger.info("Successfully start unittest process.")
170
+
171
+ def stop(self):
172
+ for q in self.queues:
173
+ while q.full():
174
+ time.sleep(0.1)
175
+ q.put("KILL_")
176
+
177
+ for p in self.processes:
178
+ p.join()
179
+ logger.info("Successfully stop unittest process.")
180
+ logger.info(f"Api_precision_compare task result is saved in {ONLINE_API_PRECISION_COMPARE_RESULT_FILE_NAME}")
181
+ logger.info(f"Api_precision_compare task details is saved in {ONLINE_API_PRECISION_COMPARE_DETAILS_FILE_NAME}")
182
+
183
+ def update_consume_queue(self, api_data):
184
+ while True:
185
+ index = self._choose_max_empty_site_strategy()
186
+ if index != -1:
187
+ q = self.queues[index]
188
+ q.put(api_data)
189
+ break
190
+ time.sleep(0.1)
191
+
192
+ def _choose_max_empty_site_strategy(self):
193
+ maximum = 0
194
+ index = -1
195
+ # 充分利用多卡资源,防止任务过多分配给前面的卡
196
+ _reverse = 1 if not self.reverse_sort else -1
197
+ for i, q in enumerate(self.queues[::_reverse]):
198
+ empty_site = self.capacity - q.qsize()
199
+ if empty_site > maximum:
200
+ maximum = empty_site
201
+ index = i
202
+ index = len(self.queues) - index - 1 if index != -1 and self.reverse_sort else index
203
+ self.reverse_sort = not self.reverse_sort
204
+ return index
@@ -0,0 +1,218 @@
1
+ import os.path
2
+ import struct
3
+ import hashlib
4
+ import time
5
+ import io
6
+ from threading import Thread
7
+
8
+ from OpenSSL import SSL
9
+ from twisted.internet import ssl, reactor, protocol, endpoints
10
+
11
+ from msprobe.pytorch.common.utils import logger
12
+ from msprobe.pytorch.api_accuracy_checker.tensor_transport_layer.ssl_config import cipher_list
13
+
14
+
15
+ class TCPServer:
16
+ def __init__(self, port, shared_queue, check_sum=False, tls_path=None) -> None:
17
+ self.port = port
18
+ self.shared_queue = shared_queue
19
+ self.check_sum = check_sum
20
+ self.tls_path = tls_path
21
+ self.factory = MessageServerFactory()
22
+ self.reactor_thread = None
23
+
24
+ @staticmethod
25
+ def run_reactor():
26
+ reactor.run(installSignalHandlers=False)
27
+
28
+ def start(self):
29
+ self.factory.protocol = self.build_protocol
30
+
31
+ if self.tls_path:
32
+ server_key = os.path.join(self.tls_path, "server.key")
33
+ server_crt = os.path.join(self.tls_path, "server.crt")
34
+ server_context_factory = ssl.DefaultOpenSSLContextFactory(server_key, server_crt, SSL.TLSv1_2_METHOD)
35
+ server_context_ = server_context_factory.getContext()
36
+ server_context_.set_cipher_list(cipher_list)
37
+ server_context_.set_options(SSL.OP_NO_RENEGOTIATION)
38
+ endpoint = endpoints.SSL4ServerEndpoint(reactor, self.port, server_context_factory)
39
+ else:
40
+ endpoint = endpoints.TCP4ServerEndpoint(reactor, self.port)
41
+ endpoint.listen(self.factory)
42
+ self.reactor_thread = Thread(target=self.run_reactor, daemon=True)
43
+ self.reactor_thread.start()
44
+
45
+ def is_running(self):
46
+ return not self.factory.is_all_connection_closed()
47
+
48
+ def stop(self):
49
+ self.factory.doStop()
50
+ reactor.callFromThread(reactor.sigInt, 2)
51
+ self.reactor_thread.join()
52
+
53
+ def build_protocol(self):
54
+ return ServerProtocol(self.shared_queue, self.check_sum)
55
+
56
+
57
+ class ServerProtocol(protocol.Protocol):
58
+ ACK_SUCCESS = b"OK___"
59
+ ACK_ERROR = b"ERROR"
60
+ ACK_BUSY = b"BUSY_"
61
+ ACK_STOP = b"STOP_"
62
+ ACK_STOP_CONFIRM = b"OVER_"
63
+ ACK_KILL_PROCESS = b"KILL_"
64
+
65
+ def __init__(self, shared_queue, check_sum=False):
66
+ self.start_time = None
67
+ self.buffer = io.BytesIO()
68
+ self.consumer_queue = shared_queue
69
+ self.check_sum = check_sum
70
+ self.length_width = 8
71
+ self.md5_width = 32
72
+ self.obj_length = None
73
+ self.tell = 0
74
+ self.obj_md5 = None
75
+ self.obj_body = None
76
+ self.sequence_number = -1
77
+ self.rank = -1
78
+ self.step = -1
79
+ self.sequence_number_dict = dict()
80
+
81
+ def connectionMade(self):
82
+ self.buffer = io.BytesIO()
83
+ self.obj_length = None
84
+ self.tell = 0
85
+ self.obj_md5 = None
86
+ self.obj_body = None
87
+ self.factory.transport_dict[self.transport] = 1
88
+ self.factory.transport_list.append(self.transport)
89
+ logger.info(f"Connected to {self.transport.getPeer()} successfully.")
90
+
91
+ def connectionLost(self, reason):
92
+ self.factory.transport_dict.pop(self.transport, None)
93
+ if len(self.factory.transport_dict) == 0:
94
+ self.consumer_queue.put(self.ACK_KILL_PROCESS)
95
+
96
+ logger.info(f"Lost connection with {self.transport.getPeer()}. Reason is: {reason} 与客户端 断开连接, "
97
+ f"current connection number is: {len(self.factory.transport_dict)}")
98
+
99
+ def send_ack(self, ack_info):
100
+ ack_message = b"".join([
101
+ ack_info,
102
+ self.sequence_number.to_bytes(8, byteorder='big'),
103
+ self.rank.to_bytes(8, byteorder='big'),
104
+ self.step.to_bytes(8, byteorder='big')
105
+ ])
106
+ self.transport.write(ack_message)
107
+
108
+ def post_process(self):
109
+ send_busy_ack = False
110
+ while self.consumer_queue.full():
111
+ if not send_busy_ack:
112
+ self.send_ack(self.ACK_BUSY)
113
+ logger.debug("sending BUSY ACK")
114
+ send_busy_ack = True
115
+ time.sleep(0.1)
116
+
117
+ obj_key = str(self.sequence_number) + "_" + str(self.rank) + "_" + str(self.step)
118
+
119
+ recv_md5 = hashlib.md5(self.obj_body).hexdigest()
120
+ if self.check_sum and recv_md5 != self.obj_md5:
121
+ # when needs check md5 and check no pass, indicates received data error, send b"ERROR" to client.
122
+ logger.debug(f"Error:接收数据有问题,流水号{self.sequence_number}, expected {self.obj_md5}, but get {recv_md5}")
123
+ self.send_ack(self.ACK_ERROR)
124
+ else:
125
+ if self.obj_body == self.ACK_STOP:
126
+ self.handle_with_stop()
127
+ else:
128
+ self.send_ack(self.ACK_SUCCESS)
129
+ if obj_key in self.sequence_number_dict:
130
+ logger.debug(f"这是一次异常的重传,可以忽略。 {obj_key}, {self.sequence_number_dict}")
131
+ else:
132
+ self.sequence_number_dict[obj_key] = self.obj_md5
133
+ self.consumer_queue.put(self.obj_body, block=True)
134
+
135
+ self.reset_env()
136
+ finish_time = time.time()
137
+ logger.debug(f"finish_time: {finish_time - self.start_time}")
138
+
139
+ def handle_with_stop(self):
140
+ logger.debug(f"接收到停止传输信号 TCP{self.transport.getPeer()}")
141
+ self.send_ack(self.ACK_STOP_CONFIRM)
142
+ if len(self.factory.transport_dict) == 0:
143
+ _rank, _step, _sequence_number = 0, 0, 100000000
144
+ ack_kill = self.ACK_KILL_PROCESS + \
145
+ _sequence_number.to_bytes(8, byteorder='big') + \
146
+ _rank.to_bytes(8, byteorder='big') + \
147
+ _step.to_bytes(8, byteorder='big')
148
+ for trans in self.factory.transport_list:
149
+ trans.write(ack_kill)
150
+ logger.debug(f"发送KILL信息给{self.transport.getPeer()}")
151
+ self.consumer_queue.put(self.ACK_KILL_PROCESS)
152
+ time.sleep(2)
153
+
154
+ def reset_env(self):
155
+ self.obj_length = None
156
+ self.sequence_number = -1
157
+ self.rank = -1
158
+ self.step = -1
159
+ self.obj_md5 = None
160
+ self.obj_body = None
161
+
162
+ def dataReceived(self, data):
163
+ self.buffer.seek(0, 2)
164
+ self.buffer.write(data)
165
+ self.buffer.seek(self.tell)
166
+
167
+ # The first data packet is packet header, it contains obj_length, sequence_number, rank, step
168
+ if self.obj_length is None and len(self.buffer.getvalue()) >= self.length_width * 4:
169
+ self.start_time = time.time()
170
+ self.obj_length = struct.unpack('!Q', self.buffer.read(self.length_width))[0]
171
+ self.sequence_number = struct.unpack('!Q', self.buffer.read(self.length_width))[0]
172
+ self.rank = struct.unpack('!Q', self.buffer.read(self.length_width))[0]
173
+ self.step = struct.unpack('!Q', self.buffer.read(self.length_width))[0]
174
+ self.tell += self.length_width * 4
175
+ logger.debug(
176
+ f"流水号: {self.sequence_number}; RANK: {self.rank}; STEP: {self.step}; Length: {self.obj_length}")
177
+
178
+ # If needs check md5 but not parse md5 yet, read 32b md5 values
179
+ check_sum_and_md5 = (self.check_sum
180
+ and self.obj_length is not None
181
+ and self.obj_md5 is None
182
+ and len(self.buffer.getvalue()) - self.tell >= self.md5_width)
183
+ if check_sum_and_md5:
184
+ self.obj_md5 = self.buffer.read(self.md5_width).decode()
185
+ self.tell += self.md5_width
186
+ logger.debug(f"MD5: {self.obj_md5}")
187
+
188
+ current_length = len(self.buffer.getvalue()) - self.tell
189
+ if self.obj_length is not None and 0 < self.obj_length <= current_length:
190
+ # Current api data receive finished
191
+ self.obj_body = self.buffer.read(self.obj_length)
192
+
193
+ self.tell += self.obj_length
194
+ self.buffer = io.BytesIO(self.buffer.getvalue()[self.tell:])
195
+ self.buffer.seek(0)
196
+ self.tell = 0
197
+ recv_data_time = time.time()
198
+ logger.debug(f"self.sequence_number {self.sequence_number} "
199
+ f"recv_data_time {recv_data_time - self.start_time}")
200
+
201
+ if self.obj_body == self.ACK_STOP:
202
+ # Indicates the current TCP link receives a STOP signal and remove from the transport_dict
203
+ _transport = self.factory.transport_dict.pop(self.transport, None)
204
+ logger.debug(f"接收到b'STOP_' self.sequence_number {self.sequence_number} ")
205
+ self.post_process()
206
+
207
+
208
+ class MessageServerFactory(protocol.ServerFactory):
209
+ def __init__(self) -> None:
210
+ """
211
+ transport_dict: links that have not completed data transmission.
212
+ transport_list: Records all TCP links. Appends TCP link to the transport list when a new TCP link is established.
213
+ """
214
+ self.transport_dict = {}
215
+ self.transport_list = []
216
+
217
+ def is_all_connection_closed(self):
218
+ return len(self.transport_dict) == 0
@@ -0,0 +1,10 @@
1
+ cipher_list = ":".join([
2
+ 'ECDHE-ECDSA-AES128-GCM-SHA256',
3
+ 'ECDHE-RSA-AES128-GCM-SHA256',
4
+ 'ECDHE-ECDSA-AES256-GCM-SHA384',
5
+ 'ECDHE-RSA-AES256-GCM-SHA384',
6
+ 'ECDHE-ECDSA-CHACHA20-POLY1305',
7
+ 'ECDHE-RSA-CHACHA20-POLY1305',
8
+ 'DHE-RSA-AES128-GCM-SHA256',
9
+ 'DHE-RSA-AES256-GCM-SHA384'
10
+ ]).encode()
@@ -0,0 +1,15 @@
1
+ import os
2
+ from pkgutil import iter_modules
3
+ from importlib import import_module
4
+
5
+ """
6
+ gpu and cpu not implement benchmark function, supplementary benchmarking function implementation
7
+ """
8
+
9
+ package_path = os.path.dirname(os.path.realpath(__file__))
10
+ for _, module_name, _ in iter_modules([package_path]):
11
+ module = import_module(f"{__name__}.{module_name}")
12
+ for attr_name in dir(module):
13
+ attr = getattr(module, attr_name)
14
+ if callable(attr) and "npu_custom" not in attr_name:
15
+ globals()[attr_name] = attr
@@ -0,0 +1,28 @@
1
+ import torch
2
+
3
+
4
+ def npu_apply_adam_w(beta1_power, beta2_power, lr, weight_decay,
5
+ beta1, beta2, eps, grad, max_grad_norm, amsgrad, maximize, out):
6
+ var, m, v = out
7
+ if amsgrad:
8
+ max_grad_norm = (torch.rand(var.shape) * 10.0 - 5.0).to(var.dtype)
9
+ beta1_power_out = beta1_power * beta1
10
+ beta2_power_out = beta2_power * beta2
11
+ var_t = var * (1 + (-lr * weight_decay))
12
+ gt = -grad if maximize else grad
13
+ m_out = m * beta1 - (beta1 + (-1)) * gt
14
+ v_out = v * beta2 - (beta2 + (-1)) * gt * gt
15
+
16
+ if amsgrad:
17
+ max_grad_norm_out = torch.max(max_grad_norm, v_out)
18
+ if (1 - beta2_power_out) == 0:
19
+ beta2_power_out -= eps
20
+ denom = torch.sqrt(torch.div(max_grad_norm_out, (1 - beta2_power_out))) + eps
21
+ else:
22
+ vraintain = torch.div(v_out, (1 - beta2_power_out))
23
+ denom = torch.sqrt(vraintain) + eps
24
+
25
+ if (1 - beta1_power_out) == 0:
26
+ beta1_power_out -= eps
27
+ var_out = var_t + torch.div(-lr * m_out, (1 - beta1_power_out)).div(denom)
28
+ return var_out.cpu(), m_out.cpu(), v_out.cpu()
@@ -0,0 +1,19 @@
1
+ def npu_confusion_transpose(data, perm, shape, transpose_first):
2
+ if transpose_first:
3
+ output = data.permute(*perm).contiguous().view(shape)
4
+ else:
5
+ output = data.view(shape).permute(*perm)
6
+ return output.cpu()
7
+
8
+
9
+ def npu_confusion_transpose_backward(grad, perm, shape, transpose_first):
10
+ shape_cal = shape if transpose_first else [shape[perm_dim] for perm_dim in perm]
11
+ perm_cal = [0] * len(perm)
12
+ for i, perm_dim in enumerate(perm):
13
+ perm_cal[perm_dim] = i
14
+
15
+ if transpose_first:
16
+ result = grad.permute(*perm_cal).reshape(shape_cal)
17
+ else:
18
+ result = grad.reshape(shape_cal).permute(*perm_cal)
19
+ return result.cpu()
@@ -0,0 +1,55 @@
1
+ import torch
2
+
3
+
4
+ def fast_gelu(input0):
5
+ attr = 1.702
6
+ const_0 = 0 - attr
7
+ const_1 = 1
8
+ const_2 = attr / 2
9
+
10
+ abs_x = torch.abs(input0)
11
+ mul_abs_x = abs_x * const_0
12
+ exp_abs_x = torch.exp(mul_abs_x)
13
+ div_down = exp_abs_x + const_1
14
+
15
+ pn_x = input0 - abs_x
16
+ mul_pn_x = pn_x * const_2
17
+ exp_pn_x = torch.exp(mul_pn_x)
18
+ div_up = input0 * exp_pn_x
19
+ div_down_rec = torch.reciprocal(div_down)
20
+ result = div_up * div_down_rec
21
+
22
+ return result.cpu()
23
+
24
+
25
+ def npu_fast_gelu_backward(grad, input_x):
26
+ const_2 = 1.702
27
+ const_3 = 1.0
28
+ const_1 = 0.0 - const_2
29
+
30
+ # e^(-1.702x)
31
+ abs_x = torch.abs(input_x)
32
+ mul_abs_x = abs_x * const_1
33
+ exp_x = torch.exp(mul_abs_x)
34
+
35
+ # 1.702xe^(-1.702x)
36
+ add_2 = input_x * exp_x
37
+ add_2 = add_2 * const_2
38
+
39
+ # e^(1.702(x-|x|))
40
+ pn_x = input_x - abs_x
41
+ mul_pn_x = pn_x * const_2
42
+ exp_pn_x = torch.exp(mul_pn_x)
43
+
44
+ # e^(-1.702x) + 1.702xe^(-1.702x) + e^(1.702(x-|x|))
45
+ div_up = exp_x + add_2
46
+ div_up = div_up + exp_pn_x
47
+
48
+ # (e^(-1.702x)+1)^2
49
+ div_down_i = exp_x + const_3
50
+ div_down = div_down_i * div_down_i
51
+ div_down_rec = torch.reciprocal(div_down)
52
+ result_temp = div_up * div_down_rec
53
+ result = grad * result_temp
54
+
55
+ return result.cpu()
@@ -0,0 +1,6 @@
1
+ import torch
2
+
3
+
4
+ def npu_layer_norm_eval(data, normalized_shape):
5
+ result = torch.nn.functional.layer_norm(data, normalized_shape)
6
+ return result.cpu()
@@ -0,0 +1,12 @@
1
+ import torch
2
+
3
+
4
+ def npu_linear(x, weight, bias):
5
+ output = torch.nn.functional.linear(x, weight, bias)
6
+ return output.cpu()
7
+
8
+
9
+ def npu_linear_backward(grad, input_data, weight):
10
+ input_grad = torch.matmul(grad, weight)
11
+ weight_grad = torch.matmul(grad.t(), input_data)
12
+ return input_grad.cpu(), weight_grad.cpu()
@@ -0,0 +1,48 @@
1
+ import torch
2
+
3
+
4
+ def matmul_backward(grad, self, other, mask):
5
+ grad_self, grad_other = None, None
6
+ dim_self = self.dim()
7
+ dim_other = other.dim()
8
+
9
+ size_grad = list(grad.size())
10
+ size_self = list(self.size())
11
+ size_other = list(other.size())
12
+ if dim_self == 1 and dim_other == 1:
13
+ grad_self = other.mul(grad) if mask[0] else grad_self
14
+ grad_other = self.mul(grad) if mask[1] else grad_other
15
+ elif dim_self == 2 and dim_other == 1:
16
+ grad_self = grad.unsqueeze(1).mm(other.unsqueeze(0)) if mask[0] else grad_self
17
+ grad_other = self.transpose(-1, -2).mm(grad.unsqueeze(1)).squeeze_(1) if mask[1] else grad_other
18
+ elif dim_self == 1 and dim_other == 2:
19
+ grad_self = grad.unsqueeze(0).mm(other.transpose(-1, -2)).squeeze_(0) if mask[0] else grad_self
20
+ grad_other = self.unsqueeze(1).mm(grad.unsqueeze(0)) if mask[1] else grad_other
21
+ elif dim_self >= 3 and (dim_other == 1 or dim_other == 2):
22
+ view_size = 1 if dim_other == 1 else size_grad[-1]
23
+ unfolded_grad = (grad.unsqueeze(-1) if dim_other == 1 else grad).contiguous().view(-1, view_size)
24
+ if mask[0]:
25
+ grad_self = unfolded_grad.mm(other.unsqueeze(0) if dim_other == 1 else other.transpose(-1, -2)) \
26
+ .view(size_self)
27
+ if mask[1]:
28
+ unfolded_self = self.contiguous().view([-1, size_self[-1]])
29
+ grad_other = unfolded_self.transpose(-1, -2).mm(unfolded_grad).view(size_other)
30
+ elif (dim_self == 1 or dim_self == 2) and dim_other >= 3:
31
+ view_size = 1 if dim_self == 1 else size_grad[-2]
32
+ unfolded_grad_T = grad.view([-1, view_size]) \
33
+ if dim_self == 1 else grad.transpose(-1, -2).contiguous().view([-1, view_size])
34
+ if mask[0]:
35
+ # create a 2D-matrix from other
36
+ unfolded_other_T = \
37
+ other.transpose(-1, -2).contiguous().view([-1, size_other[-2]]).transpose(-1, -2)
38
+ grad_self = unfolded_other_T.mm(unfolded_grad_T).transpose(-1, -2).view(size_self)
39
+ if mask[1]:
40
+ size_other_T = size_other[:-2]
41
+ size_other_T.extend(size_other[::-1][:2])
42
+ grad_other = \
43
+ unfolded_grad_T.mm(self.unsqueeze(0) if dim_self == 1 else self).view(size_other_T).transpose(-1, -2)
44
+ else:
45
+ grad_self = torch.matmul(grad, other.transpose(-1, -2)) if mask[0] else grad_self
46
+ grad_other = torch.matmul(self.transpose(-1, -2), grad) if mask[1] else grad_other
47
+
48
+ return grad_self.cpu(), grad_other.cpu()