mindspore 2.6.0rc1__cp311-cp311-win_amd64.whl → 2.7.0__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (458) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +2 -2
  5. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  8. mindspore/_checkparam.py +42 -11
  9. mindspore/_extends/builtin_operations.py +3 -3
  10. mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
  11. mindspore/_extends/optimize/cell_utils.py +96 -0
  12. mindspore/_extends/parallel_compile/akg_compiler/custom.py +1109 -0
  13. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  14. mindspore/_extends/parse/__init__.py +3 -3
  15. mindspore/_extends/parse/compile_config.py +44 -22
  16. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -2
  17. mindspore/_extends/parse/parser.py +65 -84
  18. mindspore/_extends/parse/resources.py +39 -0
  19. mindspore/_extends/parse/standard_method.py +58 -14
  20. mindspore/_extends/parse/trope.py +8 -1
  21. mindspore/_extends/pijit/__init__.py +1 -2
  22. mindspore/_extends/pijit/pijit_func_white_list.py +2 -5
  23. mindspore/amp.py +4 -22
  24. mindspore/atlprov.dll +0 -0
  25. mindspore/avcodec-59.dll +0 -0
  26. mindspore/avdevice-59.dll +0 -0
  27. mindspore/avfilter-8.dll +0 -0
  28. mindspore/avformat-59.dll +0 -0
  29. mindspore/avutil-57.dll +0 -0
  30. mindspore/boost/adasum.py +1 -1
  31. mindspore/boost/boost_cell_wrapper.py +4 -4
  32. mindspore/c1.dll +0 -0
  33. mindspore/c1xx.dll +0 -0
  34. mindspore/c2.dll +0 -0
  35. mindspore/common/__init__.py +43 -12
  36. mindspore/common/_grad_function.py +2 -1
  37. mindspore/common/_pijit_context.py +28 -7
  38. mindspore/common/_stub_tensor.py +1 -209
  39. mindspore/common/_tensor_cpp_method.py +1 -1
  40. mindspore/common/_tensor_docs.py +178 -53
  41. mindspore/common/_utils.py +9 -1
  42. mindspore/common/api.py +377 -203
  43. mindspore/common/dtype.py +108 -57
  44. mindspore/common/dump.py +11 -16
  45. mindspore/common/dynamic_shape/__init__.py +0 -0
  46. mindspore/common/{auto_dynamic_shape.py → dynamic_shape/auto_dynamic_shape.py} +17 -23
  47. mindspore/common/dynamic_shape/enable_dynamic.py +197 -0
  48. mindspore/common/file_system.py +59 -9
  49. mindspore/common/generator.py +5 -3
  50. mindspore/common/hook_handle.py +33 -5
  51. mindspore/common/jit_config.py +1 -1
  52. mindspore/common/jit_trace.py +84 -105
  53. mindspore/common/np_dtype.py +3 -3
  54. mindspore/common/parameter.py +27 -29
  55. mindspore/common/recompute.py +5 -7
  56. mindspore/common/sparse_tensor.py +0 -3
  57. mindspore/common/symbol.py +0 -1
  58. mindspore/common/tensor.py +117 -131
  59. mindspore/communication/_comm_helper.py +46 -4
  60. mindspore/communication/management.py +79 -7
  61. mindspore/context.py +67 -55
  62. mindspore/dataset/__init__.py +1 -1
  63. mindspore/dataset/audio/transforms.py +1 -1
  64. mindspore/dataset/core/config.py +38 -4
  65. mindspore/dataset/engine/datasets.py +350 -322
  66. mindspore/dataset/engine/datasets_user_defined.py +70 -24
  67. mindspore/dataset/engine/iterators.py +2 -2
  68. mindspore/dataset/engine/obs/config_loader.py +2 -2
  69. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
  70. mindspore/dataset/transforms/c_transforms.py +2 -2
  71. mindspore/dataset/transforms/py_transforms.py +7 -3
  72. mindspore/dataset/transforms/transforms.py +10 -6
  73. mindspore/dataset/vision/__init__.py +1 -1
  74. mindspore/dataset/vision/py_transforms.py +8 -8
  75. mindspore/dataset/vision/transforms.py +17 -5
  76. mindspore/dataset/vision/utils.py +632 -21
  77. mindspore/dataset/vision/validators.py +1 -0
  78. mindspore/device_context/ascend/device.py +1 -1
  79. mindspore/device_context/ascend/op_tuning.py +35 -1
  80. mindspore/device_context/gpu/__init__.py +2 -2
  81. mindspore/device_context/gpu/device.py +1 -1
  82. mindspore/device_context/gpu/op_precision.py +4 -2
  83. mindspore/device_context/gpu/op_tuning.py +6 -3
  84. mindspore/device_manager.py +16 -9
  85. mindspore/dnnl.dll +0 -0
  86. mindspore/dpcmi.dll +0 -0
  87. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -4
  88. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  89. mindspore/experimental/optim/adadelta.py +13 -20
  90. mindspore/experimental/optim/adagrad.py +15 -22
  91. mindspore/experimental/optim/adam.py +17 -24
  92. mindspore/experimental/optim/adamax.py +14 -22
  93. mindspore/experimental/optim/adamw.py +28 -34
  94. mindspore/experimental/optim/asgd.py +15 -25
  95. mindspore/experimental/optim/lr_scheduler.py +27 -45
  96. mindspore/experimental/optim/nadam.py +14 -24
  97. mindspore/experimental/optim/optimizer.py +13 -23
  98. mindspore/experimental/optim/radam.py +18 -24
  99. mindspore/experimental/optim/rmsprop.py +14 -25
  100. mindspore/experimental/optim/rprop.py +15 -26
  101. mindspore/experimental/optim/sgd.py +9 -19
  102. mindspore/hal/__init__.py +4 -4
  103. mindspore/hal/contiguous_tensors_handle.py +2 -2
  104. mindspore/hal/memory.py +27 -7
  105. mindspore/include/api/cell.h +65 -5
  106. mindspore/include/api/cfg.h +24 -7
  107. mindspore/include/api/context.h +1 -0
  108. mindspore/include/api/delegate.h +10 -2
  109. mindspore/include/api/dual_abi_helper.h +100 -19
  110. mindspore/include/api/graph.h +14 -1
  111. mindspore/include/api/kernel.h +16 -3
  112. mindspore/include/api/kernel_api.h +9 -1
  113. mindspore/include/api/metrics/accuracy.h +9 -0
  114. mindspore/include/api/model.h +8 -1
  115. mindspore/include/api/model_group.h +4 -0
  116. mindspore/include/api/model_parallel_runner.h +2 -0
  117. mindspore/include/api/status.h +48 -10
  118. mindspore/include/api/types.h +8 -3
  119. mindspore/include/c_api/model_c.h +0 -58
  120. mindspore/include/c_api/tensor_c.h +0 -26
  121. mindspore/include/dataset/constants.h +9 -0
  122. mindspore/include/dataset/vision_ascend.h +1 -1
  123. mindspore/jpeg62.dll +0 -0
  124. mindspore/mindrecord/tools/cifar10.py +61 -11
  125. mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
  126. mindspore/mindspore_backend_common.dll +0 -0
  127. mindspore/mindspore_backend_manager.dll +0 -0
  128. mindspore/mindspore_common.dll +0 -0
  129. mindspore/mindspore_core.dll +0 -0
  130. mindspore/mindspore_cpu_res_manager.dll +0 -0
  131. mindspore/mindspore_dump.dll +0 -0
  132. mindspore/mindspore_frontend.dll +0 -0
  133. mindspore/mindspore_glog.dll +0 -0
  134. mindspore/mindspore_memory_pool.dll +0 -0
  135. mindspore/mindspore_ms_backend.dll +0 -0
  136. mindspore/mindspore_ops.dll +0 -0
  137. mindspore/mindspore_ops_host.dll +0 -0
  138. mindspore/mindspore_ops_kernel_common.dll +0 -0
  139. mindspore/mindspore_profiler.dll +0 -0
  140. mindspore/mindspore_pyboost.dll +0 -0
  141. mindspore/mindspore_pynative.dll +0 -0
  142. mindspore/mindspore_res_manager.dll +0 -0
  143. mindspore/mindspore_runtime_pipeline.dll +0 -0
  144. mindspore/mint/__init__.py +6 -46
  145. mindspore/mint/distributed/__init__.py +5 -0
  146. mindspore/mint/distributed/distributed.py +429 -23
  147. mindspore/mint/nn/__init__.py +1 -1
  148. mindspore/mint/nn/functional.py +53 -6
  149. mindspore/mint/nn/layer/_functions.py +163 -294
  150. mindspore/mint/nn/layer/activation.py +8 -6
  151. mindspore/mint/nn/layer/conv.py +140 -104
  152. mindspore/mint/nn/layer/normalization.py +11 -25
  153. mindspore/mint/optim/adam.py +19 -18
  154. mindspore/mint/optim/adamw.py +14 -8
  155. mindspore/mint/optim/sgd.py +5 -5
  156. mindspore/msobj140.dll +0 -0
  157. mindspore/mspdb140.dll +0 -0
  158. mindspore/mspdbcore.dll +0 -0
  159. mindspore/mspdbst.dll +0 -0
  160. mindspore/mspft140.dll +0 -0
  161. mindspore/msvcdis140.dll +0 -0
  162. mindspore/msvcp140_1.dll +0 -0
  163. mindspore/msvcp140_2.dll +0 -0
  164. mindspore/msvcp140_atomic_wait.dll +0 -0
  165. mindspore/msvcp140_codecvt_ids.dll +0 -0
  166. mindspore/nn/cell.py +491 -623
  167. mindspore/nn/grad/cell_grad.py +11 -12
  168. mindspore/nn/layer/activation.py +36 -36
  169. mindspore/nn/layer/basic.py +74 -77
  170. mindspore/nn/layer/channel_shuffle.py +4 -4
  171. mindspore/nn/layer/combined.py +4 -2
  172. mindspore/nn/layer/conv.py +117 -110
  173. mindspore/nn/layer/dense.py +9 -7
  174. mindspore/nn/layer/embedding.py +50 -52
  175. mindspore/nn/layer/image.py +38 -40
  176. mindspore/nn/layer/math.py +111 -112
  177. mindspore/nn/layer/normalization.py +56 -44
  178. mindspore/nn/layer/pooling.py +58 -63
  179. mindspore/nn/layer/rnn_cells.py +33 -33
  180. mindspore/nn/layer/rnns.py +56 -56
  181. mindspore/nn/layer/thor_layer.py +74 -73
  182. mindspore/nn/layer/transformer.py +11 -1
  183. mindspore/nn/learning_rate_schedule.py +20 -20
  184. mindspore/nn/loss/loss.py +79 -81
  185. mindspore/nn/optim/adam.py +4 -6
  186. mindspore/nn/optim/adasum.py +2 -2
  187. mindspore/nn/optim/asgd.py +2 -0
  188. mindspore/nn/optim/lamb.py +1 -3
  189. mindspore/nn/optim/optimizer.py +1 -1
  190. mindspore/nn/optim/tft_wrapper.py +2 -3
  191. mindspore/nn/optim/thor.py +2 -2
  192. mindspore/nn/probability/distribution/_utils/utils.py +2 -2
  193. mindspore/nn/probability/distribution/exponential.py +2 -1
  194. mindspore/nn/probability/distribution/poisson.py +2 -1
  195. mindspore/nn/sparse/sparse.py +3 -3
  196. mindspore/nn/wrap/cell_wrapper.py +73 -42
  197. mindspore/nn/wrap/grad_reducer.py +37 -52
  198. mindspore/nn/wrap/loss_scale.py +72 -74
  199. mindspore/numpy/array_creations.py +7 -7
  200. mindspore/numpy/fft.py +1 -1
  201. mindspore/numpy/math_ops.py +5 -5
  202. mindspore/numpy/utils_const.py +1 -1
  203. mindspore/opencv_core452.dll +0 -0
  204. mindspore/opencv_imgcodecs452.dll +0 -0
  205. mindspore/opencv_imgproc452.dll +0 -0
  206. mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
  207. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
  208. mindspore/ops/_grad_experimental/grad_inner_ops.py +0 -9
  209. mindspore/ops/_op_impl/cpu/__init__.py +1 -0
  210. mindspore/{experimental/es/__init__.py → ops/_op_impl/cpu/joinedstr_op.py} +12 -6
  211. mindspore/ops/_vmap/vmap_array_ops.py +31 -13
  212. mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
  213. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +54 -13
  214. mindspore/ops/auto_generate/gen_extend_func.py +27 -145
  215. mindspore/ops/auto_generate/gen_ops_def.py +1027 -347
  216. mindspore/ops/auto_generate/gen_ops_prim.py +2341 -1117
  217. mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
  218. mindspore/ops/composite/__init__.py +10 -0
  219. mindspore/ops/composite/base.py +9 -5
  220. mindspore/ops/composite/multitype_ops/__init__.py +12 -1
  221. mindspore/ops/composite/multitype_ops/_compile_utils.py +133 -109
  222. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
  223. mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
  224. mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
  225. mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
  226. mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
  227. mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
  228. mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
  229. mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
  230. mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
  231. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
  232. mindspore/ops/function/__init__.py +4 -1
  233. mindspore/ops/function/_add_attr_func.py +11 -6
  234. mindspore/ops/function/array_func.py +19 -102
  235. mindspore/ops/function/debug_func.py +8 -5
  236. mindspore/ops/function/grad/grad_func.py +5 -13
  237. mindspore/ops/function/math_func.py +77 -572
  238. mindspore/ops/function/nn_func.py +46 -94
  239. mindspore/ops/function/other_func.py +4 -1
  240. mindspore/ops/function/random_func.py +44 -5
  241. mindspore/ops/function/vmap_func.py +2 -1
  242. mindspore/ops/functional.py +4 -4
  243. mindspore/ops/functional_overload.py +594 -18
  244. mindspore/ops/op_info_register.py +21 -0
  245. mindspore/ops/operations/__init__.py +16 -11
  246. mindspore/ops/operations/_custom_ops_utils.py +689 -34
  247. mindspore/ops/operations/_inner_ops.py +14 -18
  248. mindspore/ops/operations/_sequence_ops.py +1 -1
  249. mindspore/ops/operations/array_ops.py +5 -51
  250. mindspore/ops/operations/comm_ops.py +186 -41
  251. mindspore/ops/operations/custom_ops.py +303 -177
  252. mindspore/ops/operations/debug_ops.py +59 -4
  253. mindspore/ops/operations/image_ops.py +13 -13
  254. mindspore/ops/operations/manually_defined/ops_def.py +27 -28
  255. mindspore/ops/operations/math_ops.py +8 -9
  256. mindspore/ops/operations/nn_ops.py +8 -40
  257. mindspore/ops/primitive.py +9 -20
  258. mindspore/ops/tensor_method.py +63 -15
  259. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
  260. mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
  261. mindspore/ops_generate/api/functions_cc_generator.py +58 -10
  262. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
  263. mindspore/ops_generate/common/base_generator.py +14 -0
  264. mindspore/ops_generate/common/gen_constants.py +8 -3
  265. mindspore/ops_generate/common/gen_utils.py +0 -19
  266. mindspore/ops_generate/common/op_proto.py +11 -4
  267. mindspore/ops_generate/common/template.py +88 -11
  268. mindspore/ops_generate/gen_ops.py +1 -1
  269. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
  270. mindspore/ops_generate/op_def/ops_def_cc_generator.py +0 -3
  271. mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
  272. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
  273. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
  274. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
  275. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
  276. mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -16
  277. mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
  278. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
  279. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
  280. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
  281. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
  282. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
  283. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
  284. mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
  285. mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
  286. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
  287. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
  288. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
  289. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
  290. mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
  291. mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
  292. mindspore/parallel/_auto_parallel_context.py +16 -23
  293. mindspore/parallel/_cell_wrapper.py +113 -45
  294. mindspore/parallel/_parallel_serialization.py +4 -3
  295. mindspore/parallel/_ps_context.py +4 -6
  296. mindspore/parallel/_tensor.py +167 -12
  297. mindspore/parallel/_transformer/moe.py +1 -1
  298. mindspore/parallel/_transformer/transformer.py +17 -12
  299. mindspore/parallel/_utils.py +5 -11
  300. mindspore/parallel/auto_parallel.py +35 -14
  301. mindspore/parallel/checkpoint_convert.py +3 -3
  302. mindspore/parallel/checkpoint_transform.py +13 -7
  303. mindspore/parallel/cluster/process_entity/_api.py +88 -49
  304. mindspore/parallel/cluster/process_entity/_utils.py +95 -7
  305. mindspore/parallel/cluster/run.py +48 -7
  306. mindspore/parallel/function/__init__.py +8 -1
  307. mindspore/parallel/function/reshard_func.py +12 -12
  308. mindspore/parallel/nn/__init__.py +15 -2
  309. mindspore/parallel/nn/parallel_cell_wrapper.py +50 -14
  310. mindspore/parallel/nn/parallel_grad_reducer.py +7 -14
  311. mindspore/parallel/shard.py +10 -25
  312. mindspore/parallel/transform_safetensors.py +469 -174
  313. mindspore/pgodb140.dll +0 -0
  314. mindspore/pgort140.dll +0 -0
  315. mindspore/profiler/__init__.py +2 -1
  316. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
  317. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
  318. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +12 -6
  319. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
  320. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  321. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
  322. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
  323. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
  324. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
  325. mindspore/profiler/analysis/task_manager.py +1 -1
  326. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
  327. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
  328. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +10 -9
  329. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +43 -23
  330. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
  331. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
  332. mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
  333. mindspore/profiler/common/constant.py +16 -0
  334. mindspore/profiler/common/msprof_cmd_tool.py +2 -2
  335. mindspore/profiler/common/path_manager.py +9 -0
  336. mindspore/profiler/common/profiler_context.py +50 -29
  337. mindspore/profiler/common/profiler_info.py +0 -16
  338. mindspore/profiler/common/profiler_meta_data.py +1 -0
  339. mindspore/profiler/common/profiler_op_analyse.py +239 -0
  340. mindspore/profiler/common/profiler_output_path.py +23 -8
  341. mindspore/profiler/common/profiler_parameters.py +128 -35
  342. mindspore/profiler/dynamic_profile/__init__.py +0 -0
  343. mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
  344. mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
  345. mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
  346. mindspore/profiler/dynamic_profiler.py +374 -338
  347. mindspore/profiler/envprofiler.py +42 -12
  348. mindspore/profiler/experimental_config.py +112 -7
  349. mindspore/profiler/mstx.py +33 -12
  350. mindspore/profiler/platform/__init__.py +2 -3
  351. mindspore/profiler/platform/cpu_profiler.py +10 -4
  352. mindspore/profiler/platform/npu_profiler.py +30 -20
  353. mindspore/profiler/profiler.py +218 -154
  354. mindspore/profiler/profiler_action_controller.py +65 -77
  355. mindspore/profiler/profiler_interface.py +2 -2
  356. mindspore/profiler/schedule.py +10 -4
  357. mindspore/rewrite/common/config.py +1 -0
  358. mindspore/rewrite/common/namer.py +1 -0
  359. mindspore/rewrite/common/namespace.py +1 -0
  360. mindspore/rewrite/node/node.py +31 -11
  361. mindspore/rewrite/parsers/assign_parser.py +1 -1
  362. mindspore/rewrite/symbol_tree/symbol_tree.py +2 -2
  363. mindspore/run_check/_check_version.py +7 -10
  364. mindspore/runtime/__init__.py +8 -6
  365. mindspore/runtime/event.py +10 -4
  366. mindspore/runtime/executor.py +87 -45
  367. mindspore/runtime/memory.py +31 -32
  368. mindspore/runtime/thread_bind_core.py +299 -165
  369. mindspore/safeguard/rewrite_obfuscation.py +12 -13
  370. mindspore/swresample-4.dll +0 -0
  371. mindspore/swscale-6.dll +0 -0
  372. mindspore/tbbmalloc.dll +0 -0
  373. mindspore/tinyxml2.dll +0 -0
  374. mindspore/train/_utils.py +17 -7
  375. mindspore/train/amp.py +43 -23
  376. mindspore/train/callback/__init__.py +5 -5
  377. mindspore/train/callback/_callback.py +2 -1
  378. mindspore/train/callback/_checkpoint.py +4 -14
  379. mindspore/train/callback/_flops_collector.py +11 -7
  380. mindspore/train/callback/_landscape.py +0 -1
  381. mindspore/train/callback/_train_fault_tolerance.py +98 -21
  382. mindspore/train/data_sink.py +15 -6
  383. mindspore/train/dataset_helper.py +14 -5
  384. mindspore/train/model.py +133 -69
  385. mindspore/train/serialization.py +168 -126
  386. mindspore/train/summary/summary_record.py +13 -2
  387. mindspore/train/train_thor/model_thor.py +2 -2
  388. mindspore/turbojpeg.dll +0 -0
  389. mindspore/utils/__init__.py +3 -2
  390. mindspore/utils/dryrun.py +0 -6
  391. mindspore/utils/runtime_execution_order_check.py +163 -77
  392. mindspore/utils/sdc_detect.py +68 -0
  393. mindspore/utils/utils.py +14 -17
  394. mindspore/vcmeta.dll +0 -0
  395. mindspore/vcruntime140.dll +0 -0
  396. mindspore/vcruntime140_1.dll +0 -0
  397. mindspore/version.py +1 -1
  398. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/METADATA +5 -4
  399. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/RECORD +403 -442
  400. mindspore/_deprecated/jit.py +0 -198
  401. mindspore/_extends/remote/kernel_build_server_ascend.py +0 -75
  402. mindspore/communication/_hccl_management.py +0 -297
  403. mindspore/experimental/es/embedding_service.py +0 -891
  404. mindspore/experimental/es/embedding_service_layer.py +0 -581
  405. mindspore/profiler/common/validator/__init__.py +0 -14
  406. mindspore/profiler/common/validator/validate_path.py +0 -84
  407. mindspore/profiler/parser/__init__.py +0 -14
  408. mindspore/profiler/parser/aicpu_data_parser.py +0 -272
  409. mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
  410. mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
  411. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
  412. mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
  413. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
  414. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
  415. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
  416. mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
  417. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
  418. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
  419. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
  420. mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
  421. mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
  422. mindspore/profiler/parser/ascend_flops_generator.py +0 -116
  423. mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
  424. mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
  425. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  426. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  427. mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
  428. mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
  429. mindspore/profiler/parser/ascend_op_generator.py +0 -334
  430. mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
  431. mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
  432. mindspore/profiler/parser/base_timeline_generator.py +0 -483
  433. mindspore/profiler/parser/container.py +0 -229
  434. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
  435. mindspore/profiler/parser/flops_parser.py +0 -531
  436. mindspore/profiler/parser/framework_enum.py +0 -111
  437. mindspore/profiler/parser/framework_parser.py +0 -464
  438. mindspore/profiler/parser/framework_struct.py +0 -61
  439. mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
  440. mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
  441. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
  442. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
  443. mindspore/profiler/parser/hccl_parser.py +0 -573
  444. mindspore/profiler/parser/hwts_log_parser.py +0 -122
  445. mindspore/profiler/parser/integrator.py +0 -526
  446. mindspore/profiler/parser/memory_usage_parser.py +0 -277
  447. mindspore/profiler/parser/minddata_analyzer.py +0 -800
  448. mindspore/profiler/parser/minddata_parser.py +0 -186
  449. mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
  450. mindspore/profiler/parser/op_intermediate_parser.py +0 -149
  451. mindspore/profiler/parser/optime_parser.py +0 -250
  452. mindspore/profiler/parser/profiler_info.py +0 -213
  453. mindspore/profiler/parser/step_trace_parser.py +0 -666
  454. mindspore/utils/hooks.py +0 -81
  455. /mindspore/common/{_auto_dynamic.py → dynamic_shape/_auto_dynamic.py} +0 -0
  456. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/WHEEL +0 -0
  457. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/entry_points.txt +0 -0
  458. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/top_level.txt +0 -0
@@ -56,12 +56,11 @@ from mindspore.ops.auto_generate import (minimum, maximum, mul, muls, sin, sinc,
56
56
  sum_ext_op, prod_ext_op, all, matrix_inverse_ext, atan2_ext, sign, acos_ext,
57
57
  acosh_ext, asin_ext, asinh_ext, atan_ext, tan, median_ext_op, median_dim_op,
58
58
  xlogy_op, xlogy_scalar_other_op, xlogy_scalar_self_op, trunc, histc_ext, roll,
59
- bincount_ext, rotated_iou_op, cat, narrow, var_op, pow, pow_scalar_tensor_op,
59
+ bincount_ext, rotated_iou_op, cat, narrow, var_op, pow, inplace_erfinv_op,
60
60
  frac_ext, pow_tensor_scalar_op, not_equal_op, isinf, addmv_op, cdist,
61
- addbmm_op, addmm_op, grouped_matmul_v2, transpose_ext, grouped_matmul_v4,
62
- inplace_erfinv_op)
61
+ addbmm_op, addmm_op, pow_scalar_tensor_op)
63
62
  # 2
64
-
63
+ from mindspore.ops.functional_overload import gmm
65
64
  # 3
66
65
 
67
66
  # 4
@@ -1219,7 +1218,7 @@ def logical_not(input):
1219
1218
 
1220
1219
  Examples:
1221
1220
  >>> import mindspore
1222
- >>> x = mindspore.tensor([True, False, True], mindspore.bool_)
1221
+ >>> x = mindspore.tensor([True, False, True], mindspore.bool)
1223
1222
  >>> output = mindspore.ops.logical_not(x)
1224
1223
  >>> print(output)
1225
1224
  [False True False]
@@ -1251,23 +1250,23 @@ def logical_or(input, other):
1251
1250
 
1252
1251
  Examples:
1253
1252
  >>> import mindspore
1254
- >>> x = mindspore.tensor([True, False, True], mindspore.bool_)
1255
- >>> y = mindspore.tensor([True, True, False], mindspore.bool_)
1253
+ >>> x = mindspore.tensor([True, False, True], mindspore.bool)
1254
+ >>> y = mindspore.tensor([True, True, False], mindspore.bool)
1256
1255
  >>> output = mindspore.ops.logical_or(x, y)
1257
1256
  >>> print(output)
1258
1257
  [ True True True]
1259
- >>> x = mindspore.tensor(1, mindspore.bool_)
1260
- >>> y = mindspore.tensor(0, mindspore.bool_)
1258
+ >>> x = mindspore.tensor(1, mindspore.bool)
1259
+ >>> y = mindspore.tensor(0, mindspore.bool)
1261
1260
  >>> output = mindspore.ops.logical_or(x, y)
1262
1261
  >>> print(output)
1263
1262
  True
1264
1263
  >>> x = True
1265
- >>> y = mindspore.tensor(0, mindspore.bool_)
1264
+ >>> y = mindspore.tensor(0, mindspore.bool)
1266
1265
  >>> output = mindspore.ops.logical_or(x, y)
1267
1266
  >>> print(output)
1268
1267
  True
1269
1268
  >>> x = True
1270
- >>> y = mindspore.tensor([True, False], mindspore.bool_)
1269
+ >>> y = mindspore.tensor([True, False], mindspore.bool)
1271
1270
  >>> output = mindspore.ops.logical_or(x, y)
1272
1271
  >>> print(output)
1273
1272
  [True True]
@@ -1299,23 +1298,23 @@ def logical_and(input, other):
1299
1298
 
1300
1299
  Examples:
1301
1300
  >>> import mindspore
1302
- >>> x = mindspore.tensor([True, False, True], mindspore.bool_)
1303
- >>> y = mindspore.tensor([True, True, False], mindspore.bool_)
1301
+ >>> x = mindspore.tensor([True, False, True], mindspore.bool)
1302
+ >>> y = mindspore.tensor([True, True, False], mindspore.bool)
1304
1303
  >>> output = mindspore.ops.logical_and(x, y)
1305
1304
  >>> print(output)
1306
1305
  [ True False False]
1307
- >>> x = mindspore.tensor(1, mindspore.bool_)
1308
- >>> y = mindspore.tensor(0, mindspore.bool_)
1306
+ >>> x = mindspore.tensor(1, mindspore.bool)
1307
+ >>> y = mindspore.tensor(0, mindspore.bool)
1309
1308
  >>> output = mindspore.ops.logical_and(x, y)
1310
1309
  >>> print(output)
1311
1310
  False
1312
1311
  >>> x = True
1313
- >>> y = mindspore.tensor(0, mindspore.bool_)
1312
+ >>> y = mindspore.tensor(0, mindspore.bool)
1314
1313
  >>> output = mindspore.ops.logical_and(x, y)
1315
1314
  >>> print(output)
1316
1315
  False
1317
1316
  >>> x = True
1318
- >>> y = mindspore.tensor([True, False], mindspore.bool_)
1317
+ >>> y = mindspore.tensor([True, False], mindspore.bool)
1319
1318
  >>> output = mindspore.ops.logical_and(x, y)
1320
1319
  >>> print(output)
1321
1320
  [True False]
@@ -1783,10 +1782,10 @@ def pow_ext(input, exponent):
1783
1782
  Args:
1784
1783
  input (Union[Tensor, Number]): The first input is a Number or a tensor whose data type is
1785
1784
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1786
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1785
+ `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1787
1786
  exponent (Union[Tensor, Number]): The second input is a Number or a tensor whose data type is
1788
1787
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1789
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1788
+ `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1790
1789
 
1791
1790
  Returns:
1792
1791
  Tensor, the shape is the same as the one after broadcasting,
@@ -3219,7 +3218,7 @@ def approximate_equal(x, y, tolerance=1e-5):
3219
3218
  >>> import mindspore
3220
3219
  >>> mindspore.ops.approximate_equal(mindspore.tensor([1e6, 2e6, float("inf"), float("-inf"), float("nan")]),
3221
3220
  ... mindspore.tensor([1e6, 2e7, float("inf"), float("-inf"), float("nan")]))
3222
- Tensor(shape=[6], dtype=Bool, value= [ True, False, False, False, False])
3221
+ Tensor(shape=[5], dtype=Bool, value= [ True, False, False, False, False])
3223
3222
  >>>
3224
3223
  >>> mindspore.ops.approximate_equal(mindspore.tensor([1e6, 2e6, 3e6]),
3225
3224
  ... mindspore.tensor([1.00001e6, 2.00002e6, 3.00009e6]), tolerance=1e3)
@@ -4860,7 +4859,7 @@ def addmv(input, mat, vec, *, beta=1, alpha=1):
4860
4859
 
4861
4860
 
4862
4861
  def addmv_ext(input, mat, vec, *, beta=1, alpha=1):
4863
- """
4862
+ r"""
4864
4863
  Performs a matrix-vector product of `mat` and `vec`, and add the input vector `input` to the final result.
4865
4864
 
4866
4865
  If `mat` is a tensor of size :math:`(N, M)` , `vec` is a 1-D tensor of size :math:`M` , then `input` must be
@@ -5150,6 +5149,18 @@ def bernoulli_ext(input, *, generator=None):
5150
5149
  return bernoulli_ext_(input, seed, offset)
5151
5150
 
5152
5151
 
5152
+ def bernoulli_(input, p=0.5, *, generator=None):
5153
+ r"""
5154
+ bernoulli_(input, p=0.5, *, generator=None) -> Tensor
5155
+
5156
+ In-place version of :func:`mindspore.ops.bernoulli_ext`.
5157
+ """
5158
+ if generator is None:
5159
+ generator = default_generator
5160
+ seed, offset = generator._step(generator_step_) # pylint: disable=protected-access
5161
+ return ops.functional_overload.bernoulli_(input, p, seed, offset)
5162
+
5163
+
5153
5164
  def bessel_i1(x):
5154
5165
  r"""
5155
5166
  Computes the first order modified Bessel function of the first kind for each element input.
@@ -5622,7 +5633,7 @@ def dstack(tensors):
5622
5633
  1-D or 2-D tensors must have the same shape.
5623
5634
 
5624
5635
  Args:
5625
- tensors (Union(List[Tensor], Tuple[Tensor])): The list of tensors or tuple of tensors.
5636
+ tensors (Union(List[Tensor], tuple[Tensor])): The list of tensors or tuple of tensors.
5626
5637
 
5627
5638
  Returns:
5628
5639
  Tensor
@@ -5783,7 +5794,7 @@ def _diff_helper(input, n, dim):
5783
5794
  is_bool = (input.dtype == mstype.bool_)
5784
5795
  result = input
5785
5796
 
5786
- for i in range(n): # pylint: disable=unused-variable
5797
+ for _ in range(n): # pylint: disable=unused-variable
5787
5798
  if is_bool:
5788
5799
  result = logical_xor(narrow(result, dim, 1, out_len), narrow(result, dim, 0, out_len))
5789
5800
  else:
@@ -6626,7 +6637,7 @@ def amin(input, axis=None, keepdims=False, *, initial=None, where=None):
6626
6637
  >>> # case 4: Use "where" to include only specific elements in computing the minimum.
6627
6638
  >>> where = mindspore.tensor([[1, 0, 1, 0],
6628
6639
  ... [0, 0, 1, 1],
6629
- ... [1, 1, 1, 0]], dtype=mindspore.bool_)
6640
+ ... [1, 1, 1, 0]], dtype=mindspore.bool)
6630
6641
  >>> mindspore.ops.amin(input, axis=1, keepdims=True, initial=0, where=where)
6631
6642
  Tensor(shape=[3, 1], dtype=Int64, value=
6632
6643
  [[ 0],
@@ -6708,7 +6719,7 @@ def amax(input, axis=None, keepdims=False, *, initial=None, where=None):
6708
6719
  >>> # case 4: Use "where" to include only specific elements in computing the maximum.
6709
6720
  >>> where = mindspore.tensor([[0, 0, 1, 0],
6710
6721
  ... [0, 0, 1, 1],
6711
- ... [1, 1, 1, 0]], dtype=mindspore.bool_)
6722
+ ... [1, 1, 1, 0]], dtype=mindspore.bool)
6712
6723
  >>> mindspore.ops.amax(input, axis=1, keepdims=True, initial=0, where=where)
6713
6724
  Tensor(shape=[3, 1], dtype=Int64, value=
6714
6725
  [[4],
@@ -8441,7 +8452,7 @@ def matmul(input, other):
8441
8452
  >>> input = mindspore.ops.arange(24, dtype=mindspore.float32).reshape(2, 3, 4)
8442
8453
  >>> other = mindspore.ops.arange(20, dtype=mindspore.float32).reshape(4, 5)
8443
8454
  >>> output = mindspore.ops.matmul(input, other)
8444
- >>> print(>>> output)
8455
+ >>> print(output)
8445
8456
  [[[ 70, 76, 82, 88, 94],
8446
8457
  [ 190, 212, 234, 256, 278],
8447
8458
  [ 310, 348, 386, 424, 462]],
@@ -9161,10 +9172,10 @@ def remainder_ext(input, other):
9161
9172
  input (Union[Tensor, numbers.Number, bool]): The dividend is a numbers.Number or
9162
9173
  a bool or a tensor whose data type is
9163
9174
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
9164
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
9175
+ `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
9165
9176
  other (Union[Tensor, numbers.Number, bool]): The divisor is a numbers.Number or
9166
- a bool or a tensor whose data type is number or bool\_ when the dividend is a tensor.
9167
- When the dividend is Scalar, the divisor must be a Tensor whose data type is number or bool\_.
9177
+ a bool or a tensor whose data type is number or bool when the dividend is a tensor.
9178
+ When the dividend is Scalar, the divisor must be a Tensor whose data type is number or bool.
9168
9179
 
9169
9180
  Returns:
9170
9181
  Tensor, with dtype promoted and shape broadcasted.
@@ -9631,12 +9642,10 @@ def _einsum_convert_num_to_char(num):
9631
9642
  """For einsum, convert number into char."""
9632
9643
  if [num] == [Ellipsis]:
9633
9644
  return '...'
9634
- # pylint: disable=chained-comparison
9635
- if num >= 0 and num < 26:
9636
- return chr(num + ord('A'))
9637
- # pylint: disable=chained-comparison
9638
- if num >= 26 and num < 52:
9639
- return chr(num - 26 + ord('a'))
9645
+ if 0 <= num < 26:
9646
+ return chr(num + 65)
9647
+ if 26 <= num < 52:
9648
+ return chr(num + 71)
9640
9649
  raise ValueError(f"For Einsum, the number in sublist should be in range [0, 52), but got {num}")
9641
9650
 
9642
9651
 
@@ -9738,33 +9747,19 @@ def einsum(equation, *operands):
9738
9747
  return _get_cache_prim(P.Einsum)(equation)(operands)
9739
9748
 
9740
9749
 
9741
- def _einsum_convert_sublist_to_label(num, ell_num=False):
9742
- """Convert sublist to label."""
9743
- if num == Ellipsis or ell_num and num == 52:
9744
- return '...'
9745
- if 0 <= num < 26:
9746
- return chr(num + ord('A'))
9747
- if 26 <= num < 52:
9748
- return chr(num + ord('a') - 26)
9749
- raise ValueError(
9750
- f'For einsum, the number in sublist must be in range [0, 52), but got {num}')
9751
-
9752
-
9753
- def _einsum_convert_label_to_index(label):
9754
- """Convert label to index."""
9755
- label_num = ord(label)
9756
- if ord('A') <= label_num <= ord('Z'):
9757
- return label_num - ord('A')
9758
- if ord('a') <= label_num <= ord('z'):
9759
- return label_num - ord('a') + 26
9760
- if label_num == ord('.'):
9761
- return 52
9762
- raise ValueError(
9763
- f'For einsum, the label in equation must be in [a-zA-Z] or ., but got {label}')
9764
-
9765
-
9766
- def _einsum_convert_sublist(equation, *operands):
9750
+ def _einsum_convert_sublist(equation, operands):
9767
9751
  """Convert the sublist to an equation operand if the received input is a sublist format."""
9752
+ def _einsum_convert_sublist_to_label(num, ell_num=False):
9753
+ """Convert sublist to label."""
9754
+ if num == Ellipsis or ell_num and num == 52:
9755
+ return '...'
9756
+ if 0 <= num < 26:
9757
+ return chr(num + ord('A'))
9758
+ if 26 <= num < 52:
9759
+ return chr(num + ord('a') - 26)
9760
+ raise ValueError(
9761
+ f'For einsum, the number in sublist must be in range [0, 52), but got {num}')
9762
+
9768
9763
  if isinstance(equation, Tensor):
9769
9764
  equation_tmp = ''
9770
9765
  for i, lst in enumerate(operands):
@@ -9789,331 +9784,6 @@ def _einsum_convert_sublist(equation, *operands):
9789
9784
  return equation, operands
9790
9785
 
9791
9786
 
9792
- def _einsum_check_inputargs(equation, operands):
9793
- """Check equation and operands."""
9794
- if not isinstance(equation, str):
9795
- raise TypeError(
9796
- f"For einsum, 'equation' must be a str, but got {type(equation)}.")
9797
- for operand in operands:
9798
- if not isinstance(operand, Tensor):
9799
- raise TypeError(
9800
- f"For einsum, members of 'operands' must be Tensor, but got {type(operand)}.")
9801
-
9802
-
9803
- @constexpr
9804
- def _einsum_parse_equation(equation):
9805
- """Parse equation."""
9806
- l_equation = ''
9807
- r_equation = ''
9808
- equation = equation.replace(' ', '')
9809
-
9810
- if '->' in equation:
9811
- l_equation, r_equation = equation.split('->', 1)
9812
- if l_equation == '':
9813
- raise ValueError(
9814
- 'For einsum, equation must contain characters to the left fo the arrow.')
9815
- else:
9816
- l_equation = equation
9817
-
9818
- if ',' in l_equation:
9819
- l_equationlst = l_equation.split(",")
9820
- else:
9821
- l_equationlst = [l_equation]
9822
-
9823
- l_equationlst = []
9824
-
9825
- for subequation in l_equation.split(','):
9826
- if '.' in subequation and ('...' not in subequation or subequation.count('.') != 3):
9827
- raise ValueError(f"For einsum, an ellipsis in the equation must include three continuous \'.\', "
9828
- f"and can only be found once.")
9829
- subequation_lst = [_einsum_convert_label_to_index(label) for label in subequation.replace('...', '.')]
9830
- l_equationlst.append(subequation_lst)
9831
-
9832
- if "." in r_equation and ('...' not in r_equation or r_equation.count('.') != 3):
9833
- raise ValueError(f"For einsum, an ellipsis in the equation must include three continuous \'.\', "
9834
- f"and can only be found once.")
9835
- r_equationlst = [_einsum_convert_label_to_index(label) for label in r_equation.replace('...', '.')]
9836
-
9837
- return l_equationlst, r_equationlst, ('->' in equation)
9838
-
9839
-
9840
- def _einsum_parse_labels(l_equationlst, operands):
9841
- """Parse left script of equation."""
9842
- align_rank = 0
9843
- max_labels = 53
9844
- ellipsis_dimnum = 0
9845
- labels_count = [0] * max_labels
9846
-
9847
- if len(operands) != len(l_equationlst):
9848
- raise ValueError(f"For einsum, 'operands' is not equal to specified in the 'equation', "
9849
- f"but got {len(operands)} and {len(l_equationlst)}.")
9850
-
9851
- for idx, sub_equ in enumerate(l_equationlst):
9852
- start_dim = 0
9853
- label_num = 0
9854
- operand_shape = list(operands[idx].shape)
9855
- for label in sub_equ:
9856
- dim_num = 1
9857
- label_num += 1
9858
- end_dim = start_dim + 1
9859
-
9860
- # Label is ellipsis
9861
- if label == 52:
9862
- end_dim = len(operand_shape) - len(sub_equ) + label_num
9863
- dim_num = end_dim - start_dim
9864
- if ellipsis_dimnum != 0 and ellipsis_dimnum != dim_num:
9865
- raise ValueError(f"For einsum, an ellipsis in 'equation' can only represent the same numbers of "
9866
- f"dimensions in 'operands'.")
9867
- ellipsis_dimnum = dim_num
9868
- if labels_count[label] == 0:
9869
- align_rank += dim_num
9870
- labels_count[label] += 1
9871
- start_dim += dim_num
9872
- if label_num != len(sub_equ) or start_dim != len(operand_shape):
9873
- raise ValueError(f"For einsum, the numbers of labels specified in the 'equation' does not match "
9874
- f"'operands[{idx}]'.")
9875
- return ellipsis_dimnum, labels_count, align_rank
9876
-
9877
-
9878
- def _einsum_infer_output(r_equationlst, arrow_exist, ellipsis_dimnum, labels_count):
9879
- """Parse right script of equation and infer output shape."""
9880
- idx = 0
9881
- idle_idx = -1
9882
- output_rank = 0
9883
- labels_perm_idx = [idle_idx] * 53
9884
-
9885
- if arrow_exist:
9886
- for label in r_equationlst:
9887
- if labels_count[label] != 0:
9888
- if labels_perm_idx[label] != idle_idx:
9889
- raise ValueError(f"For einsum, '{_einsum_convert_sublist_to_label(label, True)}' or {label} in "
9890
- f"sublist format has appears more than once in output subscript.")
9891
- dimnum = 1
9892
- if label == 52:
9893
- dimnum = ellipsis_dimnum
9894
- labels_perm_idx[label] = idx
9895
- output_rank += dimnum
9896
- idx += dimnum
9897
- else:
9898
- raise ValueError(f"For einsum, the label to the right of arrow in the 'equation' must appear on "
9899
- f"left, but '{_einsum_convert_sublist_to_label(label, True)}' does not.")
9900
- else:
9901
- if labels_count[52] != 0:
9902
- output_rank += ellipsis_dimnum
9903
- labels_perm_idx[52] = idx
9904
- idx += ellipsis_dimnum
9905
- for label, count in enumerate(labels_count):
9906
- if count == 1:
9907
- output_rank += 1
9908
- labels_perm_idx[label] = idx
9909
- idx += 1
9910
-
9911
- for label, count in enumerate(labels_count):
9912
- if count != 0 and labels_perm_idx[label] == idle_idx:
9913
- labels_perm_idx[label] = idx
9914
- idx += 1
9915
-
9916
- return output_rank, labels_perm_idx
9917
-
9918
-
9919
- def _einsum_adjust_operands(operands, l_equationlst, ellipsis_dimnum, labels_perm_idx, align_rank):
9920
- """Align operands to output as possible."""
9921
- # Unsqueeze miss dimensions to make all operands has same rank, compute diagonal if operand has same label.
9922
- # Then use _labels_perm_idx to transpose all operands to align dimensions with output.
9923
- adjust_operands = []
9924
- for idx, operand in enumerate(operands):
9925
- idle_dim = -1
9926
- align_axis = [idle_dim] * align_rank
9927
- label_dims = [idle_dim] * 53
9928
- dim = 0
9929
-
9930
- for label in l_equationlst[idx]:
9931
- if label_dims[label] != idle_dim:
9932
- operand = ops.diagonal(operand, 0, label_dims[label], dim)
9933
- diag_perm = []
9934
- diag_dim = 0
9935
- for i in range(len(operand.shape)):
9936
- if i == label_dims[label]:
9937
- diag_perm.append(len(operand.shape) - 1)
9938
- else:
9939
- diag_perm.append(diag_dim)
9940
- diag_dim += 1
9941
- operand = permute(operand, tuple(diag_perm))
9942
- else:
9943
- label_dims[label] = dim
9944
- if label == 52:
9945
- for ell_idx in range(ellipsis_dimnum):
9946
- align_axis[labels_perm_idx[label] + ell_idx] = dim
9947
- dim += 1
9948
- else:
9949
- align_axis[labels_perm_idx[label]] = dim
9950
- dim += 1
9951
- if len(operand.shape) < align_rank:
9952
- for i, axis in enumerate(align_axis):
9953
- if axis == idle_dim:
9954
- align_axis[i] = dim
9955
- dim += 1
9956
- missing_dims = [1] * (align_rank - len(operand.shape))
9957
- operand_shape = list(operand.shape) + missing_dims
9958
- operand = ops.reshape(operand, operand_shape)
9959
- operand = permute(operand, tuple(align_axis))
9960
- adjust_operands.append(operand)
9961
- return adjust_operands
9962
-
9963
-
9964
- def _einsum_find_dimlastop(align_rank, operands, adjust_operands):
9965
- """Find dim last operand."""
9966
- dim_last_op = [0] * align_rank
9967
- has_zero_dim = False
9968
- for dim in range(align_rank):
9969
- broadcast_dim = adjust_operands[0].shape[dim]
9970
- for idx in range(1, len(adjust_operands)):
9971
- other_dim = adjust_operands[idx].shape[dim]
9972
- if broadcast_dim != other_dim and broadcast_dim != 1 and other_dim != 1:
9973
- err_msg = "For einsum, operands do not broadcast after align to output [shapes :origin -> adjust]:"
9974
- for i in range(len(operands)):
9975
- err_msg += f" {operands[i].shape} -> {adjust_operands[i].shape}"
9976
- raise ValueError(err_msg)
9977
- if other_dim != 1:
9978
- dim_last_op[dim] = idx
9979
- broadcast_dim = other_dim
9980
- has_zero_dim = has_zero_dim or broadcast_dim == 0
9981
- return dim_last_op, has_zero_dim
9982
-
9983
-
9984
- def _einsum_multiplication(sum_dims, l_tensor, r_tensor):
9985
- """Compute bmm for einsum."""
9986
- batch_dims = []
9987
- lonly_dims = []
9988
- ronly_dims = []
9989
- batch_size = 1
9990
- lonly_size = 1
9991
- ronly_size = 1
9992
- sum_size = 1
9993
-
9994
- l_shape = l_tensor.shape
9995
- r_shape = r_tensor.shape
9996
-
9997
- # Compute sum if dim is in sum_dims and get shapes for bmm
9998
- for i in range(len(l_shape)):
9999
- sum_l = l_shape[i] > 1
10000
- sum_r = r_shape[i] > 1
10001
- if i in sum_dims:
10002
- if sum_l and sum_r:
10003
- sum_size *= l_shape[i]
10004
- elif sum_l:
10005
- l_tensor = ops.auto_generate.sum_ext(l_tensor, i, True)
10006
- elif sum_r:
10007
- r_tensor = ops.auto_generate.sum_ext(r_tensor, i, True)
10008
- elif sum_l and sum_r:
10009
- batch_dims.append(i)
10010
- batch_size *= l_shape[i]
10011
- elif sum_l:
10012
- lonly_dims.append(i)
10013
- lonly_size *= l_shape[i]
10014
- else:
10015
- ronly_dims.append(i)
10016
- ronly_size *= r_shape[i]
10017
-
10018
- # Compute the einsum bmm operators pipeline.
10019
- # The whole operators pipeline is transpose(in) -> reshape(in) -> bmm(in) -> reshape(out) -> transpose(out).
10020
- l_reshape_shape = (batch_size, lonly_size, sum_size)
10021
- r_reshape_shape = (batch_size, sum_size, ronly_size)
10022
-
10023
- out_reshape_shape = [l_shape[dim] for dim in batch_dims]
10024
- out_reshape_shape += [l_shape[dim] for dim in lonly_dims]
10025
- out_reshape_shape += [1 for _ in sum_dims]
10026
- out_reshape_shape += [r_shape[dim] for dim in ronly_dims]
10027
-
10028
- l_perm_axis = batch_dims + lonly_dims + sum_dims + ronly_dims
10029
- r_perm_axis = batch_dims + sum_dims + ronly_dims + lonly_dims
10030
- out_perm_axis = [-1] * len(out_reshape_shape)
10031
-
10032
- out_dim = 0
10033
- for idx in range(len(l_perm_axis)):
10034
- out_perm_axis[l_perm_axis[idx]] = out_dim
10035
- out_dim += 1
10036
-
10037
- l_tensor = permute(l_tensor, tuple(l_perm_axis))
10038
- l_tensor = ops.reshape(l_tensor, l_reshape_shape)
10039
-
10040
- r_tensor = permute(r_tensor, tuple(r_perm_axis))
10041
- r_tensor = ops.reshape(r_tensor, r_reshape_shape)
10042
-
10043
- output = bmm_ext(l_tensor, r_tensor)
10044
- output = ops.reshape(output, out_reshape_shape)
10045
- output = permute(output, tuple(out_perm_axis))
10046
-
10047
- output_origin_shape = output.shape
10048
- output_squeeze_shape = []
10049
- for dim in range(len(output_origin_shape)):
10050
- if dim not in sum_dims:
10051
- output_squeeze_shape.append(output_origin_shape[dim])
10052
-
10053
- return ops.reshape(output, output_squeeze_shape)
10054
-
10055
-
10056
- def _einsum(equation, operands):
10057
- '''Einsum main process'''
10058
- _l_equationlst, _r_equationlst, _arrow_exist = _einsum_parse_equation(
10059
- equation)
10060
- _ellipsis_dimnum, _labels_count, _align_rank = _einsum_parse_labels(
10061
- _l_equationlst, operands)
10062
- _output_rank, _labels_perm_idx = _einsum_infer_output(
10063
- _r_equationlst, _arrow_exist, _ellipsis_dimnum, _labels_count)
10064
- _adjust_operands = _einsum_adjust_operands(operands, _l_equationlst, _ellipsis_dimnum, _labels_perm_idx,
10065
- _align_rank)
10066
- _dim_last_op, _has_zero_dim = _einsum_find_dimlastop(
10067
- _align_rank, operands, _adjust_operands)
10068
- _result = _adjust_operands[0]
10069
-
10070
- # Fast path if operands has zero dim.
10071
- if _has_zero_dim:
10072
- output_shape = []
10073
- for dim in range(_output_rank):
10074
- output_shape.append(_adjust_operands[_dim_last_op[dim]].shape[dim])
10075
- return ops.auto_generate.zeros(output_shape, dtype=_result.dtype)
10076
-
10077
- # Sum or squeeze dimensions that is 1 for all rest operands.
10078
- _reduce_dim = _output_rank
10079
- for dim in range(_output_rank, _align_rank):
10080
- if _dim_last_op[dim] == 0:
10081
- if _result.shape[_reduce_dim] == 1:
10082
- _result = ops.auto_generate.pyboost_inner_prim.squeeze_impl(_result, _reduce_dim)
10083
- else:
10084
- _result = ops.auto_generate.sum_ext(_result, _reduce_dim)
10085
- else:
10086
- _reduce_dim += 1
10087
-
10088
- # Compute multiplication if operands are more than two.
10089
- for i in range(1, len(_adjust_operands)):
10090
- operand = _adjust_operands[i]
10091
- dim = _output_rank
10092
- sum_dims = []
10093
- for j in range(_output_rank, _align_rank):
10094
- if _dim_last_op[j] < i:
10095
- operand = ops.auto_generate.pyboost_inner_prim.squeeze_impl(operand, dim)
10096
- elif _dim_last_op[j] == i:
10097
- if _result.shape[dim] == 1:
10098
- operand = ops.auto_generate.sum_ext(operand, dim)
10099
- _result = ops.auto_generate.pyboost_inner_prim.squeeze_impl(_result, dim)
10100
- else:
10101
- sum_dims.append(dim)
10102
- dim += 1
10103
- else:
10104
- dim += 1
10105
-
10106
- if sum_dims == []:
10107
- _result = mul_ext(_result, operand)
10108
- elif len(sum_dims) == len(_result.shape):
10109
- _result = ops.auto_generate.dot(ops.auto_generate.flatten_ext(_result),
10110
- ops.auto_generate.flatten_ext(operand))
10111
- else:
10112
- _result = _einsum_multiplication(sum_dims, _result, operand)
10113
-
10114
- return _result
10115
-
10116
-
10117
9787
  def einsum_ext(equation, *operands):
10118
9788
  r"""
10119
9789
  According to the Einstein summation Convention (Einsum),
@@ -10208,14 +9878,9 @@ def einsum_ext(equation, *operands):
10208
9878
  [3. 6.]
10209
9879
  [4. 8.]]
10210
9880
  """
10211
- _equation, _operands = _einsum_convert_sublist(equation, *operands)
10212
- _einsum_check_inputargs(_equation, _operands)
10213
-
10214
- for operand in _operands:
10215
- if ops.is_sequence_shape_unknown(operand.shape) or ops.is_sequence_value_unknown(operand.shape):
10216
- raise ValueError(f"For einsum, the element of 'operands' can't be dynamic shape or dynamic rank.")
9881
+ _equation, _operands = _einsum_convert_sublist(equation, operands)
10217
9882
 
10218
- return _einsum(_equation, _operands)
9883
+ return ops.functional_overload.einsum(_equation, _operands)
10219
9884
 
10220
9885
 
10221
9886
  def cumprod(input, dim, dtype=None):
@@ -10524,23 +10189,23 @@ def logical_xor(input, other):
10524
10189
 
10525
10190
  Examples:
10526
10191
  >>> import mindspore
10527
- >>> x = mindspore.tensor([True, False, True], mindspore.bool_)
10528
- >>> y = mindspore.tensor([True, True, False], mindspore.bool_)
10192
+ >>> x = mindspore.tensor([True, False, True], mindspore.bool)
10193
+ >>> y = mindspore.tensor([True, True, False], mindspore.bool)
10529
10194
  >>> output = mindspore.ops.logical_xor(x, y)
10530
10195
  >>> print(output)
10531
10196
  [False True True]
10532
- >>> x = mindspore.tensor(1, mindspore.bool_)
10533
- >>> y = mindspore.tensor(0, mindspore.bool_)
10197
+ >>> x = mindspore.tensor(1, mindspore.bool)
10198
+ >>> y = mindspore.tensor(0, mindspore.bool)
10534
10199
  >>> output = mindspore.ops.logical_xor(x, y)
10535
10200
  >>> print(output)
10536
10201
  True
10537
10202
  >>> x = True
10538
- >>> y = mindspore.tensor(0, mindspore.bool_)
10203
+ >>> y = mindspore.tensor(0, mindspore.bool)
10539
10204
  >>> output = mindspore.ops.logical_xor(x, y)
10540
10205
  >>> print(output)
10541
10206
  True
10542
10207
  >>> x = True
10543
- >>> y = mindspore.tensor([True, False], mindspore.bool_)
10208
+ >>> y = mindspore.tensor([True, False], mindspore.bool)
10544
10209
  >>> output = mindspore.ops.logical_xor(x, y)
10545
10210
  >>> print(output)
10546
10211
  [False True]
@@ -10999,7 +10664,7 @@ def _canonicalize_fft_shape_and_dim(input, shape, dim):
10999
10664
 
11000
10665
 
11001
10666
  def as_strided(x, shape=None, strides=None):
11002
- n = np.dtype(mstype.dtype_to_nptype(x.dtype)).itemsize
10667
+ n = np.dtype(mstype._dtype_to_nptype(x.dtype)).itemsize # pylint:disable=protected-access
11003
10668
  strides = tuple(np.array(strides) * n)
11004
10669
  if x.dtype == mstype.bfloat16:
11005
10670
  return Tensor(np.lib.stride_tricks.as_strided(x.float().asnumpy(), shape, strides, False, True), dtype=x.dtype)
@@ -11050,7 +10715,7 @@ def _permute_input(input, input_dim, ret_dim):
11050
10715
  (dim_permute_a if not is_transformed_dim[i] else dim_permute_b).append(value)
11051
10716
 
11052
10717
  # strides
11053
- type_size = np.dtype(mstype.dtype_to_nptype(input.dtype)).itemsize
10718
+ type_size = np.dtype(mstype._dtype_to_nptype(input.dtype)).itemsize # pylint:disable=protected-access
11054
10719
  input_strides = [int(x / type_size) for x in input.strides]
11055
10720
 
11056
10721
  def cmp(x, y):
@@ -11163,7 +10828,7 @@ def _handle_fftwithsize_output(out, input_dim, batch_dims, dim_permute, out_size
11163
10828
  for i in range(batch_dims, input_dim):
11164
10829
  out_strides[dim_permute[i]] = out.strides[1 + (i - batch_dims)]
11165
10830
 
11166
- type_size = np.dtype(mstype.dtype_to_nptype(out.dtype)).itemsize
10831
+ type_size = np.dtype(mstype._dtype_to_nptype(out.dtype)).itemsize # pylint:disable=protected-access
11167
10832
  if out.shape != out_sizes or out.strides != out_strides:
11168
10833
  out = as_strided(out, out_sizes, [int(i / type_size) for i in out_strides])
11169
10834
  return out
@@ -11541,7 +11206,7 @@ def count_nonzero(x, axis=(), keep_dims=False, dtype=mstype.int32):
11541
11206
  Default ``()`` , which counts all non-zero elements.
11542
11207
  keep_dims (bool, optional): Whether to maintain dimensions specified by `axis`.
11543
11208
  Default ``False`` , don't keep these dimensions.
11544
- dtype (Union[Number, mindspore.bool\_], optional): The data type returned.
11209
+ dtype (Union[Number, mindspore.bool], optional): The data type returned.
11545
11210
  Default ``mstype.int32`` .
11546
11211
 
11547
11212
 
@@ -12430,11 +12095,11 @@ def mul_ext(input, other):
12430
12095
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
12431
12096
  a bool or a tensor whose data type is
12432
12097
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
12433
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
12098
+ `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
12434
12099
  other (Union[Tensor, number.Number, bool]): The second input, is a number.Number or
12435
12100
  a bool or a tensor whose data type is
12436
12101
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
12437
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
12102
+ `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
12438
12103
 
12439
12104
  Returns:
12440
12105
  Tensor, the shape is the same as the one after broadcasting,
@@ -12466,184 +12131,24 @@ def mul_ext(input, other):
12466
12131
  return mul(input, other)
12467
12132
 
12468
12133
 
12469
- def _for_each_transpose(inputs):
12470
- inputs_t = []
12471
- for input_i in inputs:
12472
- input_i_t = transpose_ext(input_i, -1, -2)
12473
- inputs_t.append(input_i_t)
12474
- return inputs_t
12475
-
12476
-
12477
- def _is_transposed(input_tensor):
12478
- dim = input_tensor.dim()
12479
- if dim < 2 or dim > 3:
12480
- raise ValueError("input tensor of _is_transposed should be either 2- or 3-dimensional.")
12481
- input_shape = input_tensor.shape
12482
- input_strides = input_tensor.stride()
12483
- if input_strides[-2] == 1 and input_strides[-1] == input_shape[-2]:
12484
- return True
12485
- return False
12486
-
12487
-
12488
- def gmm(x, weight, *, bias=None, group_list=None, group_type=0):
12134
+ def gmm_backward(grad, x, weight, group_list=None, group_list_type=0):
12489
12135
  r"""
12490
- Grouping matrix multiplication.
12491
-
12492
- .. warning::
12493
- - This is an experimental API that is subject to change or deletion.
12494
- - `group_type` must be constant.
12495
-
12496
- .. note::
12497
- - When `group_type` is 2, `weight` must be a non-continuous tensor after transpose.
12498
- - Only when `group_type` is 0 and `bias` is None, the reverse derivative is supported,
12499
- which is implemented by the function gmm_backward.
12500
-
12501
- Args:
12502
- x (tuple[Tensor]): The first tensors to be multiplied.
12503
- weight (tuple[Tensor]): The second tensors to be multiplied.
12504
-
12505
- Keyword Args:
12506
- bias (tuple[Tensor], optional): Biases added to outputs. In the training scenario,
12507
- the bias only supoorts None. Default: ``None`` .
12508
-
12509
- group_list (Union[list[int], tuple(int)], optional): Represents the index of
12510
- the different groups on the grouping axis. It must be a non-negative ascending
12511
- sequence . Default: ``None`` .
12512
-
12513
- If `group_type` is 0, the last element in `group_list` should be equal to the
12514
- first dimension of the tensor in `x` .
12515
-
12516
- If `group_type` is 2, the last element in `group_list` should be equal to the
12517
- second dimension of the tensor in `x` .
12518
-
12519
- group_type (int, optional): Represents the dim that need to be grouped. Default: ``0`` .
12520
- For example, :math: `C[m,n] = A[m,k] \times B[k,n]`.
12521
-
12522
- If `group_type` is 0, it means that the m-axis is grouped, where tensors in `x`
12523
- should be 2-D, tensors in `weight` should be 3-D, and the tensors of result would
12524
- be 2-D.
12525
-
12526
- If `group_type` is 2, it means that the k-axis is grouped, where each tensor in `x`
12527
- and `weight` should be 2-D, and the tensors of result would be 3-D.
12528
-
12529
- Returns:
12530
- tuple[Tensor], the results of grouping matrix multiplication.
12531
-
12532
- Raises:
12533
- TypeError: If `group_type` is not a int.
12534
- ValueError: If `group_type` is invalid.
12535
- ValueError: If the length of `x` or `weight` is not 1.
12536
-
12537
- Supported Platforms:
12538
- ``Ascend``
12539
-
12540
- Examples:
12541
- >>> import mindspore
12542
- >>> import numpy as np
12543
- >>> from mindspore import Tensor, ops
12544
- >>> x = Tensor(np.random.uniform(0,1, (10, 20)).astype(np.float32))
12545
- >>> weight = Tensor(np.random.uniform(0,1, (4, 20, 8)).astype(np.float32))
12546
- >>> group_list = [2, 6, 8, 10]
12547
- >>> y = ops.function.math_func.gmm([x,], [weight,], group_list=group_list)
12548
- >>> print(y[0].shape)
12549
- >>> [10, 8]
12550
- """
12551
- return grouped_matmul_v2(x, weight, bias=bias, group_list=group_list,
12552
- split_item=3, group_type=group_type)
12553
-
12554
-
12555
- def gmm_backward(grad, x, weight, *, group_list=None):
12556
- r"""
12557
- the grad of gmm
12136
+ the grad of ops.function.math_func.gmm
12558
12137
  """
12559
- gradients = ops.auto_generate.gmm_backward(grad, x, weight, group_list)
12138
+ gradients = ops.functional_overload.gmm_backward(grad, x, weight, group_list, group_list_type)
12560
12139
  dx = gradients[:len(x)]
12561
12140
  dw = gradients[-len(weight):]
12562
12141
  db = []
12563
12142
  return dx, dw, db
12564
12143
 
12565
12144
 
12566
- def gmm_v2(x, weight, *, bias=None, group_list=None, group_type=0, group_list_type=0):
12567
- r"""
12568
- Grouping matrix multiplication.
12569
-
12570
- .. warning::
12571
- - This is an experimental API that is subject to change or deletion.
12572
- - `group_type` must be constant.
12573
-
12574
- .. note::
12575
- - When `group_type` is 2, the tensors in `weight` must be non-continuous tensors after
12576
- transpose.
12577
- - Only when `group_type` is 0 and `bias` is None, the reverse derivative is supported,
12578
- which is implemented by the function gmm_v2_backward.
12579
-
12580
- Args:
12581
- x (tuple[Tensor]): The first tensors to be multiplied.
12582
- weight (tuple[Tensor]): The second tensors to be multiplied.
12583
-
12584
- Keyword Args:
12585
- bias (tuple[Tensor], optional): Biases added to outputs. In the training scenario,
12586
- the bias only supoorts None. Default: ``None`` .
12587
-
12588
- group_list (Tensor, optional): Represents the index of the different groups on
12589
- the grouping axis. Supported dtypes: int64. Default: ``None`` .
12590
-
12591
- If `group_list_type` is 0, it must be a non-negative ascending sequence.
12592
- And when `group_type` is 0, the last element in `group_list` should be equal to
12593
- the first dimension of the tensor in `x` . When `group_type` is 2, the last element
12594
- in `group_list` should be equal to the second dimension of the tensor in `x` .
12595
-
12596
- If `group_list_type` is 1, the value in `group_list` are the size of each group.
12597
-
12598
- group_type (int, optional): Represents the axes that need to be grouped. For example,
12599
- :math: `C[m,n] = A[m,k] \times B[k,n]`. Default: ``0`` .
12600
-
12601
- If `group_type` is 0, it means that the m-axis is grouped, where tensors in `x`
12602
- should be 2-D, tensors in `weight` should be 3-D, and the tensors of result would be
12603
- 2-D.
12604
-
12605
- If `group_type` is 2, it means that the k-axis is grouped, where each tensor in `x`
12606
- and `weight` should be 2-D, and the tensors of result would be 3-D.
12607
-
12608
- group_list_type (int, optional): If it's 0, the value in `group_list` are the cumsum
12609
- result of the size of each group. If it's 1, the value in `group_list` are the size
12610
- of each group.
12611
-
12612
- Returns:
12613
- tuple[Tensor], the results of grouping matrix multiplication.
12614
-
12615
- Raises:
12616
- TypeError: If `group_type` is not a int.
12617
- ValueError: If `group_type` is invalid.
12618
- ValueError: If the length of `x` or `weight` is not 1.
12619
-
12620
- Supported Platforms:
12621
- ``Ascend``
12622
-
12623
- Examples:
12624
- >>> import mindspore
12625
- >>> import numpy as np
12626
- >>> from mindspore import Tensor, ops
12627
- >>> x = Tensor(np.random.uniform(0,1, (10, 20)).astype(np.float32))
12628
- >>> weight = Tensor(np.random.uniform(0,1, (4, 20, 8)).astype(np.float32))
12629
- >>> group_list = Tensor([2, 4, 2, 2])
12630
- >>> y = ops.function.math_func.gmm_v2([x,], [weight,], group_list=group_list, group_list_type=1)
12631
- >>> print(y[0].shape)
12632
- >>> [10, 8]
12633
- """
12634
- return grouped_matmul_v4(x, weight, bias=bias, group_list=group_list, split_item=3,
12635
- group_type=group_type, group_list_type=group_list_type, act_type=0)
12636
-
12637
-
12638
- def gmm_v2_backward(grad, x, weight, *, group_list=None, group_list_type=0):
12145
+ def gmm_backward_fusion(grad, weight, group_list=None, group_list_type=0):
12639
12146
  r"""
12640
- the grad of gmm_v2
12147
+ the grad of ops.function.math_func.gmm, only dx
12641
12148
  """
12642
- gradients = ops.auto_generate.gmm_v2_backward(grad, x, weight, group_list, group_list_type)
12643
- dx = gradients[:len(x)]
12644
- dw = gradients[-len(weight):]
12149
+ dx = ops.functional_overload.gmm_backward_fusion(grad, weight, group_list, group_list_type)
12150
+ dw = []
12645
12151
  db = []
12646
-
12647
12152
  return dx, dw, db
12648
12153
 
12649
12154