mindspore 2.6.0__cp310-cp310-win_amd64.whl → 2.7.0__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (455) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +2 -2
  5. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  8. mindspore/_checkparam.py +42 -11
  9. mindspore/_extends/builtin_operations.py +3 -3
  10. mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
  11. mindspore/_extends/optimize/cell_utils.py +96 -0
  12. mindspore/_extends/parallel_compile/akg_compiler/custom.py +1109 -0
  13. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  14. mindspore/_extends/parse/__init__.py +3 -3
  15. mindspore/_extends/parse/compile_config.py +44 -22
  16. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -2
  17. mindspore/_extends/parse/parser.py +64 -83
  18. mindspore/_extends/parse/resources.py +39 -0
  19. mindspore/_extends/parse/standard_method.py +47 -14
  20. mindspore/_extends/parse/trope.py +8 -1
  21. mindspore/_extends/pijit/__init__.py +1 -2
  22. mindspore/_extends/pijit/pijit_func_white_list.py +2 -5
  23. mindspore/amp.py +4 -22
  24. mindspore/atlprov.dll +0 -0
  25. mindspore/avcodec-59.dll +0 -0
  26. mindspore/avdevice-59.dll +0 -0
  27. mindspore/avfilter-8.dll +0 -0
  28. mindspore/avformat-59.dll +0 -0
  29. mindspore/avutil-57.dll +0 -0
  30. mindspore/boost/adasum.py +1 -1
  31. mindspore/boost/boost_cell_wrapper.py +4 -4
  32. mindspore/c1.dll +0 -0
  33. mindspore/c1xx.dll +0 -0
  34. mindspore/c2.dll +0 -0
  35. mindspore/common/__init__.py +43 -12
  36. mindspore/common/_grad_function.py +2 -1
  37. mindspore/common/_pijit_context.py +28 -7
  38. mindspore/common/_stub_tensor.py +1 -209
  39. mindspore/common/_tensor_cpp_method.py +1 -1
  40. mindspore/common/_tensor_docs.py +177 -52
  41. mindspore/common/_utils.py +9 -1
  42. mindspore/common/api.py +338 -208
  43. mindspore/common/dtype.py +108 -57
  44. mindspore/common/dump.py +11 -16
  45. mindspore/common/dynamic_shape/__init__.py +0 -0
  46. mindspore/common/{auto_dynamic_shape.py → dynamic_shape/auto_dynamic_shape.py} +17 -23
  47. mindspore/common/dynamic_shape/enable_dynamic.py +197 -0
  48. mindspore/common/file_system.py +59 -9
  49. mindspore/common/generator.py +2 -3
  50. mindspore/common/hook_handle.py +33 -5
  51. mindspore/common/jit_config.py +1 -1
  52. mindspore/common/jit_trace.py +84 -105
  53. mindspore/common/np_dtype.py +3 -3
  54. mindspore/common/parameter.py +27 -29
  55. mindspore/common/recompute.py +5 -7
  56. mindspore/common/sparse_tensor.py +0 -3
  57. mindspore/common/symbol.py +0 -1
  58. mindspore/common/tensor.py +84 -133
  59. mindspore/communication/_comm_helper.py +46 -4
  60. mindspore/communication/management.py +79 -7
  61. mindspore/context.py +47 -38
  62. mindspore/dataset/__init__.py +1 -1
  63. mindspore/dataset/audio/transforms.py +1 -1
  64. mindspore/dataset/core/config.py +38 -4
  65. mindspore/dataset/engine/datasets.py +350 -322
  66. mindspore/dataset/engine/datasets_user_defined.py +69 -23
  67. mindspore/dataset/engine/iterators.py +2 -2
  68. mindspore/dataset/engine/obs/config_loader.py +2 -2
  69. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
  70. mindspore/dataset/transforms/c_transforms.py +2 -2
  71. mindspore/dataset/transforms/py_transforms.py +7 -3
  72. mindspore/dataset/transforms/transforms.py +10 -6
  73. mindspore/dataset/vision/__init__.py +1 -1
  74. mindspore/dataset/vision/py_transforms.py +8 -8
  75. mindspore/dataset/vision/transforms.py +17 -5
  76. mindspore/dataset/vision/utils.py +632 -21
  77. mindspore/dataset/vision/validators.py +1 -0
  78. mindspore/device_context/ascend/device.py +1 -1
  79. mindspore/device_context/ascend/op_tuning.py +35 -1
  80. mindspore/device_context/gpu/__init__.py +2 -2
  81. mindspore/device_context/gpu/device.py +1 -1
  82. mindspore/device_context/gpu/op_precision.py +4 -2
  83. mindspore/device_context/gpu/op_tuning.py +6 -3
  84. mindspore/device_manager.py +16 -9
  85. mindspore/dnnl.dll +0 -0
  86. mindspore/dpcmi.dll +0 -0
  87. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +5 -4
  88. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  89. mindspore/experimental/optim/adadelta.py +13 -20
  90. mindspore/experimental/optim/adagrad.py +15 -22
  91. mindspore/experimental/optim/adam.py +17 -24
  92. mindspore/experimental/optim/adamax.py +14 -22
  93. mindspore/experimental/optim/adamw.py +28 -34
  94. mindspore/experimental/optim/asgd.py +15 -25
  95. mindspore/experimental/optim/lr_scheduler.py +27 -45
  96. mindspore/experimental/optim/nadam.py +14 -24
  97. mindspore/experimental/optim/optimizer.py +13 -23
  98. mindspore/experimental/optim/radam.py +18 -24
  99. mindspore/experimental/optim/rmsprop.py +14 -25
  100. mindspore/experimental/optim/rprop.py +15 -26
  101. mindspore/experimental/optim/sgd.py +9 -19
  102. mindspore/hal/__init__.py +4 -4
  103. mindspore/hal/contiguous_tensors_handle.py +2 -2
  104. mindspore/hal/memory.py +1 -0
  105. mindspore/include/api/cell.h +65 -5
  106. mindspore/include/api/cfg.h +24 -7
  107. mindspore/include/api/context.h +1 -0
  108. mindspore/include/api/delegate.h +10 -2
  109. mindspore/include/api/dual_abi_helper.h +100 -19
  110. mindspore/include/api/graph.h +14 -1
  111. mindspore/include/api/kernel.h +16 -3
  112. mindspore/include/api/kernel_api.h +9 -1
  113. mindspore/include/api/metrics/accuracy.h +9 -0
  114. mindspore/include/api/model.h +8 -1
  115. mindspore/include/api/model_group.h +4 -0
  116. mindspore/include/api/model_parallel_runner.h +2 -0
  117. mindspore/include/api/status.h +48 -10
  118. mindspore/include/api/types.h +8 -3
  119. mindspore/include/c_api/model_c.h +0 -58
  120. mindspore/include/c_api/tensor_c.h +0 -26
  121. mindspore/include/dataset/constants.h +9 -0
  122. mindspore/include/dataset/vision_ascend.h +1 -1
  123. mindspore/jpeg62.dll +0 -0
  124. mindspore/mindrecord/tools/cifar10.py +61 -11
  125. mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
  126. mindspore/mindspore_backend_common.dll +0 -0
  127. mindspore/mindspore_backend_manager.dll +0 -0
  128. mindspore/mindspore_common.dll +0 -0
  129. mindspore/mindspore_core.dll +0 -0
  130. mindspore/mindspore_cpu_res_manager.dll +0 -0
  131. mindspore/mindspore_dump.dll +0 -0
  132. mindspore/mindspore_frontend.dll +0 -0
  133. mindspore/mindspore_glog.dll +0 -0
  134. mindspore/mindspore_memory_pool.dll +0 -0
  135. mindspore/mindspore_ms_backend.dll +0 -0
  136. mindspore/mindspore_ops.dll +0 -0
  137. mindspore/mindspore_ops_host.dll +0 -0
  138. mindspore/mindspore_ops_kernel_common.dll +0 -0
  139. mindspore/mindspore_profiler.dll +0 -0
  140. mindspore/mindspore_pyboost.dll +0 -0
  141. mindspore/mindspore_pynative.dll +0 -0
  142. mindspore/mindspore_res_manager.dll +0 -0
  143. mindspore/mindspore_runtime_pipeline.dll +0 -0
  144. mindspore/mint/__init__.py +4 -44
  145. mindspore/mint/distributed/__init__.py +5 -0
  146. mindspore/mint/distributed/distributed.py +425 -19
  147. mindspore/mint/nn/__init__.py +1 -1
  148. mindspore/mint/nn/functional.py +53 -6
  149. mindspore/mint/nn/layer/_functions.py +163 -294
  150. mindspore/mint/nn/layer/activation.py +8 -6
  151. mindspore/mint/nn/layer/conv.py +125 -101
  152. mindspore/mint/nn/layer/normalization.py +11 -25
  153. mindspore/mint/optim/adam.py +19 -18
  154. mindspore/mint/optim/adamw.py +14 -8
  155. mindspore/mint/optim/sgd.py +5 -5
  156. mindspore/msobj140.dll +0 -0
  157. mindspore/mspdb140.dll +0 -0
  158. mindspore/mspdbcore.dll +0 -0
  159. mindspore/mspdbst.dll +0 -0
  160. mindspore/mspft140.dll +0 -0
  161. mindspore/msvcdis140.dll +0 -0
  162. mindspore/msvcp140_1.dll +0 -0
  163. mindspore/msvcp140_2.dll +0 -0
  164. mindspore/msvcp140_atomic_wait.dll +0 -0
  165. mindspore/msvcp140_codecvt_ids.dll +0 -0
  166. mindspore/nn/cell.py +488 -620
  167. mindspore/nn/grad/cell_grad.py +11 -12
  168. mindspore/nn/layer/activation.py +36 -36
  169. mindspore/nn/layer/basic.py +74 -77
  170. mindspore/nn/layer/channel_shuffle.py +4 -4
  171. mindspore/nn/layer/combined.py +4 -2
  172. mindspore/nn/layer/conv.py +86 -85
  173. mindspore/nn/layer/dense.py +9 -7
  174. mindspore/nn/layer/embedding.py +50 -52
  175. mindspore/nn/layer/image.py +38 -40
  176. mindspore/nn/layer/math.py +111 -112
  177. mindspore/nn/layer/normalization.py +56 -44
  178. mindspore/nn/layer/pooling.py +58 -63
  179. mindspore/nn/layer/rnn_cells.py +33 -33
  180. mindspore/nn/layer/rnns.py +56 -56
  181. mindspore/nn/layer/thor_layer.py +74 -73
  182. mindspore/nn/layer/transformer.py +11 -1
  183. mindspore/nn/learning_rate_schedule.py +20 -20
  184. mindspore/nn/loss/loss.py +79 -81
  185. mindspore/nn/optim/adam.py +2 -4
  186. mindspore/nn/optim/adasum.py +2 -2
  187. mindspore/nn/optim/lamb.py +1 -3
  188. mindspore/nn/optim/optimizer.py +1 -1
  189. mindspore/nn/optim/tft_wrapper.py +2 -3
  190. mindspore/nn/optim/thor.py +2 -2
  191. mindspore/nn/probability/distribution/_utils/utils.py +2 -2
  192. mindspore/nn/probability/distribution/exponential.py +2 -1
  193. mindspore/nn/probability/distribution/poisson.py +2 -1
  194. mindspore/nn/sparse/sparse.py +3 -3
  195. mindspore/nn/wrap/cell_wrapper.py +73 -42
  196. mindspore/nn/wrap/grad_reducer.py +37 -52
  197. mindspore/nn/wrap/loss_scale.py +72 -74
  198. mindspore/numpy/array_creations.py +7 -7
  199. mindspore/numpy/fft.py +1 -1
  200. mindspore/numpy/math_ops.py +1 -1
  201. mindspore/numpy/utils_const.py +1 -1
  202. mindspore/opencv_core452.dll +0 -0
  203. mindspore/opencv_imgcodecs452.dll +0 -0
  204. mindspore/opencv_imgproc452.dll +0 -0
  205. mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
  206. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
  207. mindspore/ops/_grad_experimental/grad_inner_ops.py +0 -9
  208. mindspore/ops/_op_impl/cpu/__init__.py +1 -0
  209. mindspore/{experimental/es/__init__.py → ops/_op_impl/cpu/joinedstr_op.py} +12 -6
  210. mindspore/ops/_vmap/vmap_array_ops.py +6 -13
  211. mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
  212. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +29 -10
  213. mindspore/ops/auto_generate/gen_extend_func.py +5 -55
  214. mindspore/ops/auto_generate/gen_ops_def.py +753 -273
  215. mindspore/ops/auto_generate/gen_ops_prim.py +1687 -958
  216. mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
  217. mindspore/ops/composite/__init__.py +10 -0
  218. mindspore/ops/composite/base.py +9 -5
  219. mindspore/ops/composite/multitype_ops/__init__.py +12 -1
  220. mindspore/ops/composite/multitype_ops/_compile_utils.py +132 -108
  221. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
  222. mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
  223. mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
  224. mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
  225. mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
  226. mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
  227. mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
  228. mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
  229. mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
  230. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
  231. mindspore/ops/function/__init__.py +4 -1
  232. mindspore/ops/function/_add_attr_func.py +11 -6
  233. mindspore/ops/function/array_func.py +17 -100
  234. mindspore/ops/function/debug_func.py +8 -5
  235. mindspore/ops/function/grad/grad_func.py +5 -13
  236. mindspore/ops/function/math_func.py +65 -399
  237. mindspore/ops/function/nn_func.py +44 -61
  238. mindspore/ops/function/other_func.py +4 -1
  239. mindspore/ops/function/random_func.py +31 -4
  240. mindspore/ops/functional.py +2 -3
  241. mindspore/ops/functional_overload.py +486 -18
  242. mindspore/ops/op_info_register.py +21 -0
  243. mindspore/ops/operations/__init__.py +5 -2
  244. mindspore/ops/operations/_custom_ops_utils.py +675 -8
  245. mindspore/ops/operations/_inner_ops.py +14 -18
  246. mindspore/ops/operations/_sequence_ops.py +1 -1
  247. mindspore/ops/operations/array_ops.py +4 -50
  248. mindspore/ops/operations/comm_ops.py +186 -41
  249. mindspore/ops/operations/custom_ops.py +244 -175
  250. mindspore/ops/operations/debug_ops.py +55 -4
  251. mindspore/ops/operations/image_ops.py +13 -13
  252. mindspore/ops/operations/manually_defined/ops_def.py +27 -28
  253. mindspore/ops/operations/math_ops.py +8 -9
  254. mindspore/ops/operations/nn_ops.py +6 -7
  255. mindspore/ops/primitive.py +9 -20
  256. mindspore/ops/tensor_method.py +52 -11
  257. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
  258. mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
  259. mindspore/ops_generate/api/functions_cc_generator.py +58 -10
  260. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
  261. mindspore/ops_generate/common/base_generator.py +14 -0
  262. mindspore/ops_generate/common/gen_constants.py +7 -2
  263. mindspore/ops_generate/common/gen_utils.py +0 -19
  264. mindspore/ops_generate/common/op_proto.py +11 -4
  265. mindspore/ops_generate/common/template.py +88 -11
  266. mindspore/ops_generate/gen_ops.py +1 -1
  267. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
  268. mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
  269. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
  270. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
  271. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
  272. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
  273. mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -16
  274. mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
  275. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
  276. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
  277. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
  278. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
  279. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
  280. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
  281. mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
  282. mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
  283. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
  284. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
  285. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
  286. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
  287. mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
  288. mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
  289. mindspore/parallel/_auto_parallel_context.py +9 -17
  290. mindspore/parallel/_cell_wrapper.py +106 -40
  291. mindspore/parallel/_parallel_serialization.py +4 -3
  292. mindspore/parallel/_ps_context.py +4 -6
  293. mindspore/parallel/_tensor.py +167 -12
  294. mindspore/parallel/_transformer/moe.py +1 -1
  295. mindspore/parallel/_transformer/transformer.py +17 -12
  296. mindspore/parallel/_utils.py +5 -11
  297. mindspore/parallel/auto_parallel.py +33 -12
  298. mindspore/parallel/checkpoint_convert.py +3 -3
  299. mindspore/parallel/checkpoint_transform.py +5 -1
  300. mindspore/parallel/cluster/process_entity/_api.py +88 -49
  301. mindspore/parallel/cluster/process_entity/_utils.py +95 -7
  302. mindspore/parallel/cluster/run.py +48 -7
  303. mindspore/parallel/function/__init__.py +8 -1
  304. mindspore/parallel/function/reshard_func.py +7 -6
  305. mindspore/parallel/nn/__init__.py +15 -2
  306. mindspore/parallel/nn/parallel_cell_wrapper.py +50 -14
  307. mindspore/parallel/nn/parallel_grad_reducer.py +7 -14
  308. mindspore/parallel/shard.py +9 -23
  309. mindspore/parallel/transform_safetensors.py +468 -174
  310. mindspore/pgodb140.dll +0 -0
  311. mindspore/pgort140.dll +0 -0
  312. mindspore/profiler/__init__.py +2 -1
  313. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
  314. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
  315. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +3 -0
  316. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
  317. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  318. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
  319. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
  320. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
  321. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
  322. mindspore/profiler/analysis/task_manager.py +1 -1
  323. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
  324. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
  325. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +10 -9
  326. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +43 -23
  327. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
  328. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
  329. mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
  330. mindspore/profiler/common/constant.py +16 -0
  331. mindspore/profiler/common/msprof_cmd_tool.py +2 -2
  332. mindspore/profiler/common/path_manager.py +9 -0
  333. mindspore/profiler/common/profiler_context.py +50 -29
  334. mindspore/profiler/common/profiler_info.py +0 -16
  335. mindspore/profiler/common/profiler_meta_data.py +1 -0
  336. mindspore/profiler/common/profiler_op_analyse.py +239 -0
  337. mindspore/profiler/common/profiler_output_path.py +23 -8
  338. mindspore/profiler/common/profiler_parameters.py +128 -35
  339. mindspore/profiler/dynamic_profile/__init__.py +0 -0
  340. mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
  341. mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
  342. mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
  343. mindspore/profiler/dynamic_profiler.py +374 -338
  344. mindspore/profiler/envprofiler.py +42 -12
  345. mindspore/profiler/experimental_config.py +112 -7
  346. mindspore/profiler/mstx.py +33 -12
  347. mindspore/profiler/platform/__init__.py +2 -3
  348. mindspore/profiler/platform/cpu_profiler.py +10 -4
  349. mindspore/profiler/platform/npu_profiler.py +30 -20
  350. mindspore/profiler/profiler.py +218 -154
  351. mindspore/profiler/profiler_action_controller.py +65 -77
  352. mindspore/profiler/profiler_interface.py +2 -2
  353. mindspore/profiler/schedule.py +10 -4
  354. mindspore/rewrite/common/config.py +1 -0
  355. mindspore/rewrite/common/namer.py +1 -0
  356. mindspore/rewrite/common/namespace.py +1 -0
  357. mindspore/rewrite/node/node.py +31 -11
  358. mindspore/rewrite/parsers/assign_parser.py +1 -1
  359. mindspore/rewrite/symbol_tree/symbol_tree.py +2 -2
  360. mindspore/run_check/_check_version.py +7 -10
  361. mindspore/runtime/__init__.py +8 -6
  362. mindspore/runtime/event.py +10 -4
  363. mindspore/runtime/executor.py +87 -45
  364. mindspore/runtime/memory.py +22 -30
  365. mindspore/runtime/thread_bind_core.py +299 -165
  366. mindspore/safeguard/rewrite_obfuscation.py +12 -13
  367. mindspore/swresample-4.dll +0 -0
  368. mindspore/swscale-6.dll +0 -0
  369. mindspore/tbbmalloc.dll +0 -0
  370. mindspore/tinyxml2.dll +0 -0
  371. mindspore/train/_utils.py +9 -5
  372. mindspore/train/amp.py +43 -23
  373. mindspore/train/callback/__init__.py +5 -5
  374. mindspore/train/callback/_callback.py +2 -1
  375. mindspore/train/callback/_checkpoint.py +4 -14
  376. mindspore/train/callback/_flops_collector.py +11 -7
  377. mindspore/train/callback/_landscape.py +0 -1
  378. mindspore/train/callback/_train_fault_tolerance.py +72 -18
  379. mindspore/train/data_sink.py +15 -6
  380. mindspore/train/dataset_helper.py +14 -5
  381. mindspore/train/model.py +49 -47
  382. mindspore/train/serialization.py +168 -126
  383. mindspore/train/summary/summary_record.py +13 -2
  384. mindspore/train/train_thor/model_thor.py +2 -2
  385. mindspore/turbojpeg.dll +0 -0
  386. mindspore/utils/__init__.py +3 -2
  387. mindspore/utils/dryrun.py +0 -6
  388. mindspore/utils/runtime_execution_order_check.py +162 -78
  389. mindspore/utils/sdc_detect.py +68 -0
  390. mindspore/utils/utils.py +14 -17
  391. mindspore/vcmeta.dll +0 -0
  392. mindspore/vcruntime140.dll +0 -0
  393. mindspore/vcruntime140_1.dll +0 -0
  394. mindspore/version.py +1 -1
  395. {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/METADATA +5 -4
  396. {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/RECORD +400 -439
  397. mindspore/_deprecated/jit.py +0 -198
  398. mindspore/_extends/remote/kernel_build_server_ascend.py +0 -75
  399. mindspore/communication/_hccl_management.py +0 -297
  400. mindspore/experimental/es/embedding_service.py +0 -891
  401. mindspore/experimental/es/embedding_service_layer.py +0 -581
  402. mindspore/profiler/common/validator/__init__.py +0 -14
  403. mindspore/profiler/common/validator/validate_path.py +0 -84
  404. mindspore/profiler/parser/__init__.py +0 -14
  405. mindspore/profiler/parser/aicpu_data_parser.py +0 -272
  406. mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
  407. mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
  408. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
  409. mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
  410. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
  411. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
  412. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
  413. mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
  414. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
  415. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
  416. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
  417. mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
  418. mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
  419. mindspore/profiler/parser/ascend_flops_generator.py +0 -116
  420. mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
  421. mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
  422. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  423. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  424. mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
  425. mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
  426. mindspore/profiler/parser/ascend_op_generator.py +0 -334
  427. mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
  428. mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
  429. mindspore/profiler/parser/base_timeline_generator.py +0 -483
  430. mindspore/profiler/parser/container.py +0 -229
  431. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
  432. mindspore/profiler/parser/flops_parser.py +0 -531
  433. mindspore/profiler/parser/framework_enum.py +0 -111
  434. mindspore/profiler/parser/framework_parser.py +0 -464
  435. mindspore/profiler/parser/framework_struct.py +0 -61
  436. mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
  437. mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
  438. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
  439. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
  440. mindspore/profiler/parser/hccl_parser.py +0 -573
  441. mindspore/profiler/parser/hwts_log_parser.py +0 -122
  442. mindspore/profiler/parser/integrator.py +0 -526
  443. mindspore/profiler/parser/memory_usage_parser.py +0 -277
  444. mindspore/profiler/parser/minddata_analyzer.py +0 -800
  445. mindspore/profiler/parser/minddata_parser.py +0 -186
  446. mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
  447. mindspore/profiler/parser/op_intermediate_parser.py +0 -149
  448. mindspore/profiler/parser/optime_parser.py +0 -250
  449. mindspore/profiler/parser/profiler_info.py +0 -213
  450. mindspore/profiler/parser/step_trace_parser.py +0 -666
  451. mindspore/utils/hooks.py +0 -81
  452. /mindspore/common/{_auto_dynamic.py → dynamic_shape/_auto_dynamic.py} +0 -0
  453. {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/WHEEL +0 -0
  454. {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/entry_points.txt +0 -0
  455. {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/top_level.txt +0 -0
@@ -30,7 +30,6 @@ from mindspore.ops.primitive import prim_attr_register, Primitive, PrimitiveWith
30
30
  from mindspore._checkparam import check_hook_fn
31
31
  from mindspore.ops import operations as P
32
32
 
33
-
34
33
  SUMMARY_TENSOR_CACHE = []
35
34
 
36
35
 
@@ -310,6 +309,8 @@ class TensorDump(Primitive):
310
309
  """Initialize TensorDump."""
311
310
  if security.enable_security():
312
311
  raise ValueError('The TensorDump is not supported, please without `-s on` and recompile source.')
312
+ if input_output not in ['in', 'out']:
313
+ raise ValueError(f"The 'input_output' argument should be one of ['in', 'out'], but got: {input_output}")
313
314
  self.add_prim_attr("side_effect_io", True)
314
315
  self.add_prim_attr("channel_name", "ms_tensor_dump")
315
316
 
@@ -463,15 +464,65 @@ class InsertGradientOf(Primitive):
463
464
  self.f = f
464
465
 
465
466
 
467
+ class DumpGradient(Primitive):
468
+ """
469
+ The `DumpGradient` Primitive is a hook, used to dump dout which pass to `x`.
470
+
471
+ Inputs:
472
+ - **path** (str) - The path of the file to be saved.
473
+ - **x** (Tensor) - Input Tensor of any dimension.
474
+ - **input_output** (str) - support value should be one of ['in', 'out'].
475
+
476
+ Supported Platforms:
477
+ ``Ascend``
478
+
479
+ Examples:
480
+ >>> import numpy as np
481
+ >>> import mindspore as ms
482
+ >>> from mindspore import ops
483
+ >>> from mindspore import Tensor
484
+ >>> ms.set_context(mode=ms.PYNATIVE_MODE)
485
+ >>> ms.set_device(device_target="Ascend")
486
+ >>> dg = ops.DumpGradient()
487
+ >>> def dout_dump_test(x, y):
488
+ ... x = dg("x_dout.npy", x, 'out')
489
+ ... print(f"x value is {x}")
490
+ ... z = x * y
491
+ ... return z
492
+ >>> ms_grad = ms.grad(dout_dump_test, grad_position=(0,1))
493
+ >>> x_grad, y_grad = ms_grad(Tensor(1, ms.float32), Tensor(2, ms.float32))
494
+ >>> print(f"x grad is {x_grad}, y_grad is {y_grad}")
495
+ >>> x_grad_npy = np.load("x_dout.npy")
496
+ >>> print(f"load x_grad from npy, x_grad is {x_grad_npy}")
497
+ x value is 1.0
498
+ x grad is 2.0, y grad is 1.0
499
+ load x_grad from npy, x_grad is array(2., dtype=float32)
500
+ """
501
+
502
+ @prim_attr_register
503
+ def __init__(self):
504
+ pass
505
+
506
+ def __call__(self, path, x, input_output):
507
+ def _dump_hook(dout):
508
+ P.TensorDump()(path, dout)
509
+ return dout
510
+ x = P.InsertGradientOf(_dump_hook)(x)
511
+ return x
512
+
513
+
466
514
  class Morph(PrimitiveWithInfer):
467
515
  """
468
516
  The `Morph` Primitive is used to encapsulate a user-defined function `fn`, allowing it to be used as a custom
469
517
  Primitive.
470
- The primary application scenario of the `Morph` Primitive is in the auto-parallel case after `GRAPH_MODE` mode,
471
- where collective communication operators are used within the user-defined `fn` to implement custom parallel
472
- computation logic, especially in scenarios where `fn` involves dynamic shapes.
518
+
519
+ The `Morph` Primitive is primarily designed for custom graph optimization in GRAPH mode. For example, it supports
520
+ encapsulation of irregular collective communications (such as :func:`mindspore.ops.AlltoAllV`) in distributed
521
+ auto-parallel training scenarios.
522
+
473
523
  When the `Morph` Primitive is applied to inputs, it is actually the encapsulated user-defined function `fn` that is
474
524
  applied to the inputs.
525
+
475
526
  The main difference between the `Morph` Primitive and :func:`mindspore.ops.Custom` is that the former is expanded
476
527
  and replaced by the user-defined `fn` before automatic differentiation, so there is no need to implement a backward
477
528
  function.
@@ -102,19 +102,19 @@ class AdjustContrastv2(Primitive):
102
102
  ``Ascend`` ``GPU`` ``CPU``
103
103
 
104
104
  Examples:
105
- >>> images = Tensor([[[1.0, 2.0, 3.0],
106
- ... [4.0, 5.0, 6.0]],
107
- ... [[7.0, 8.0, 9.0],
108
- ... [10.0, 11.0, 12.0]]], mstype.float32)
109
- >>> contrast_factor = Tensor(2., mstype.float32)
110
- >>> adjustcontrastv2 = AdjustContrastv2()
111
- >>> output = adjustcontrastv2(images, contrast_factor)
112
- >>> print(output)
113
- [[[-3.5 -2.5 -1.5]
114
- [ 2.5 3.5 4.5]]
115
- <BLANKLINE>
116
- [[ 8.5 9.5 10.5]
117
- [14.5 15.5 16.5]]]
105
+ >>> images = Tensor([[[1.0, 2.0, 3.0],
106
+ ... [4.0, 5.0, 6.0]],
107
+ ... [[7.0, 8.0, 9.0],
108
+ ... [10.0, 11.0, 12.0]]], mstype.float32)
109
+ >>> contrast_factor = Tensor(2., mstype.float32)
110
+ >>> adjustcontrastv2 = AdjustContrastv2()
111
+ >>> output = adjustcontrastv2(images, contrast_factor)
112
+ >>> print(output)
113
+ [[[-3.5 -2.5 -1.5]
114
+ [ 2.5 3.5 4.5]]
115
+ <BLANKLINE>
116
+ [[ 8.5 9.5 10.5]
117
+ [14.5 15.5 16.5]]]
118
118
  """
119
119
 
120
120
  @prim_attr_register
@@ -26,7 +26,6 @@ from mindspore.ops._primitive_cache import _get_cache_prim
26
26
  from mindspore.ops._utils import arg_handler as handler
27
27
  from mindspore.ops._utils.arg_dtype_cast import DtypeToEnum
28
28
  from mindspore.common import Tensor, CSRTensor, COOTensor
29
- from mindspore.common._stub_tensor import _convert_stub
30
29
  from mindspore._c_expression import typing
31
30
  from mindspore._c_expression import TensorPy as Tensor_
32
31
  from mindspore._c_expression import pyboost_cast, pyboost_tile, pyboost_zeros, pyboost_ones, pyboost_type_as
@@ -978,7 +977,7 @@ class ScalarToTensor(PrimitiveWithInfer):
978
977
  def __call__(self, x, dtype=mstype.float32):
979
978
  validator.check_value_type("x", x, [bool, int, float], self.name)
980
979
  validator.check_subclass("dtype", dtype, mstype.number, self.name)
981
- data_type = mstype.dtype_to_nptype(dtype)
980
+ data_type = mstype._dtype_to_nptype(dtype) # pylint:disable=protected-access
982
981
  return Tensor(np.array(x, data_type), dtype=dtype)
983
982
 
984
983
 
@@ -1057,8 +1056,8 @@ class Tile(Primitive):
1057
1056
  def __call__(self, input, dims):
1058
1057
  # Add for jit context.
1059
1058
  if jit_context() and jit_context().compiled:
1060
- return None
1061
- res = _convert_stub(pyboost_tile(self, [input, dims]))
1059
+ return jit_context().default_output()
1060
+ res = pyboost_tile(self, [input, dims])
1062
1061
  # Add for jit context.
1063
1062
  if jit_context():
1064
1063
  if validator.is_stub_tensor(res):
@@ -1066,7 +1065,6 @@ class Tile(Primitive):
1066
1065
  return jit_context().run_op(self, res, input, dims)
1067
1066
  return res
1068
1067
 
1069
- # pylint: disable=missing-docstring
1070
1068
  def check_elim(self, *args):
1071
1069
  base_tensor, dims = args
1072
1070
  if not isinstance(base_tensor, Tensor):
@@ -1151,7 +1149,7 @@ def scalar_cast(input_x, input_y):
1151
1149
  Args:
1152
1150
  input_x (scalar): The input scalar.
1153
1151
  input_y (mindspore.dtype): The type to be cast. Only constant value is allowed.
1154
- The value should only be mindspore.int64, mindspore.float64, or mindspore.bool\_.
1152
+ The value should only be mindspore.int64, mindspore.float64, or mindspore.bool.
1155
1153
 
1156
1154
  Returns:
1157
1155
  Scalar, the type is the same as the python type corresponding to `input_y`.
@@ -1230,11 +1228,11 @@ class Cast(Primitive):
1230
1228
  def __call__(self, input_x, dtype):
1231
1229
  # Add for jit context.
1232
1230
  if jit_context() and jit_context().compiled:
1233
- return None
1231
+ return jit_context().default_output()
1234
1232
  should_elim, output = self.check_elim(input_x, dtype)
1235
1233
  if should_elim:
1236
1234
  return output
1237
- res = _convert_stub(pyboost_cast(self, [input_x, dtype_to_type_id('Cast', 'dtype', dtype)]))
1235
+ res = pyboost_cast(self, [input_x, dtype_to_type_id('Cast', 'dtype', dtype)])
1238
1236
  # Add for jit context.
1239
1237
  if jit_context():
1240
1238
  if validator.is_stub_tensor(res):
@@ -1293,7 +1291,7 @@ class TypeAs(Primitive):
1293
1291
  def __call__(self, input, other):
1294
1292
  if input.dtype == other.dtype:
1295
1293
  return input
1296
- return _convert_stub(pyboost_type_as(self, [input, other]))
1294
+ return pyboost_type_as(self, [input, other])
1297
1295
 
1298
1296
 
1299
1297
  def to_sequence(val):
@@ -1716,7 +1714,7 @@ def infer_value_for_Arange(start, end, step, dtype=None):
1716
1714
  if has_float:
1717
1715
  np_dtype = np.float32
1718
1716
  else:
1719
- np_dtype = mstype.dtype_to_nptype(typing.type_id_to_type(dtype))
1717
+ np_dtype = mstype._dtype_to_nptype(typing.type_id_to_type(dtype)) # pylint:disable=protected-access
1720
1718
  return Tensor(np.arange(start, end, step, dtype=np_dtype))
1721
1719
 
1722
1720
 
@@ -1740,7 +1738,7 @@ def _infer_value_for_ReduceExtand(input_x, axis, keep_dims, dtype, prim_name):
1740
1738
  else:
1741
1739
  axis = tuple(range(len(value.shape)))
1742
1740
  if dtype is not None:
1743
- np_dtype = mstype.dtype_to_nptype(typing.type_id_to_type(dtype))
1741
+ np_dtype = mstype._dtype_to_nptype(typing.type_id_to_type(dtype)) # pylint:disable=protected-access
1744
1742
  value = np_reduce_extand_func(value, axis, dtype=np_dtype, keepdims=keep_dims)
1745
1743
  else:
1746
1744
  value = np_reduce_extand_func(value, axis, keepdims=keep_dims)
@@ -1773,7 +1771,7 @@ def infer_value_for_Cast(x, dst_type_enum=None):
1773
1771
  if x is None or dst_type_enum is None:
1774
1772
  return None
1775
1773
  dst_type = typing.type_id_to_type(dst_type_enum)
1776
- src_type = mstype.get_py_obj_dtype(x)
1774
+ src_type = mstype._get_py_obj_dtype(x) # pylint:disable=protected-access
1777
1775
  validator.check_subclass("input_x", src_type, [mstype.tensor_type, mstype.number], "Cast")
1778
1776
  validator.check_subclass("type", dst_type, mstype.number, "Cast")
1779
1777
 
@@ -1783,7 +1781,7 @@ def infer_value_for_Cast(x, dst_type_enum=None):
1783
1781
  dst_type = dst_type.element_type()
1784
1782
 
1785
1783
  value = None
1786
- np_dst_type = mstype.dtype_to_nptype(dst_type)
1784
+ np_dst_type = mstype._dtype_to_nptype(dst_type) # pylint:disable=protected-access
1787
1785
  if isinstance(x, (int, float)):
1788
1786
  value = Tensor(np.array(x).astype(np_dst_type), dtype=dst_type)
1789
1787
  else:
@@ -2070,9 +2068,9 @@ class Ones(Primitive):
2070
2068
  def __call__(self, size, type=None):
2071
2069
  # Add for jit context.
2072
2070
  if jit_context() and jit_context().compiled:
2073
- return None
2074
- res = _convert_stub(pyboost_ones(self, [size, type if type is None \
2075
- else handler.dtype_to_type_id('Ones', 'type', type)]))
2071
+ return jit_context().default_output()
2072
+ res = pyboost_ones(self, [size, type if type is None \
2073
+ else handler.dtype_to_type_id('Ones', 'type', type)])
2076
2074
  # Add for jit context.
2077
2075
  if jit_context():
2078
2076
  if validator.is_stub_tensor(res):
@@ -2130,9 +2128,9 @@ class Zeros(Primitive):
2130
2128
  def __call__(self, size, type=None):
2131
2129
  # Add for jit context.
2132
2130
  if jit_context() and jit_context().compiled:
2133
- return None
2134
- res = _convert_stub(pyboost_zeros(self, [size, type if type is None else \
2135
- handler.dtype_to_type_id('Zeros', 'type', type)]))
2131
+ return jit_context().default_output()
2132
+ res = pyboost_zeros(self, [size, type if type is None else \
2133
+ handler.dtype_to_type_id('Zeros', 'type', type)])
2136
2134
  # Add for jit context.
2137
2135
  if jit_context():
2138
2136
  if validator.is_stub_tensor(res):
@@ -2217,7 +2215,8 @@ def flash_attention_score(query, key, value, head_num, real_shift=None, drop_mas
2217
2215
  keep_prob (double, optional): The keep probability of dropout. Value range is (0.0, 1.0]. When `keep_prob`
2218
2216
  is 1.0, `drop_mask` should be None.
2219
2217
  Default: ``1.0``.
2220
- scalar_value (double, optional): The scale factor of score. Generally, the value is 1.0 / (D ** 0.5).
2218
+ scalar_value (double, optional): The scale value indicating the scale coefficient, which is used as the
2219
+ scalar of Muls in the calculation. Generally, the value is 1.0 / (D ** 0.5).
2221
2220
  Default: ``1.0``.
2222
2221
  pre_tokens (int, optional): Parameter for sparse computation, represents how many tokens are counted forward.
2223
2222
  When `sparse_mode` is set to 1, 2, 3, or 5, this parameter does not take effect.
@@ -2599,8 +2598,8 @@ def fused_infer_attention_score(query, key, value, *, pse_shift=None, atten_mask
2599
2598
  taking exp, and then the sum is computed to obtain softmax_sum. Finally, the log of softmax_sum is taken,
2600
2599
  and softmax_max is added to obtain softmax_lse. The softmax_lse is only calculated when softmax_lse_flag
2601
2600
  is True, and the shape would be :math:`(B, N, Q\_S, 1)`. If softmax_lse_flag is False, then a tensor with
2602
- shape :math:`(1)` filled with zeros would be returned. In graph mode with JitConfig set to O2, please ensure
2603
- that the softmax_lse_flag is enabled before using softmax_lse; otherwise, an exception will occur.
2601
+ shape :math:`(1)` filled with zeros would be returned. In GE backend, please ensure that the softmax_lse_flag
2602
+ is enabled before using softmax_lse; otherwise, an exception will occur.
2604
2603
 
2605
2604
  Constraints:
2606
2605
  - Full Inference Scenario (Q_S > 1):
@@ -2840,8 +2839,8 @@ class WhileLoop(Primitive):
2840
2839
  while cond_func(val):
2841
2840
  val = loop_func(val)
2842
2841
  except Exception as e:
2843
- raise ValueError("Invalid loop_func, please check input arguments and \
2844
- return value, error info: {}".format(e))
2842
+ raise ValueError(f"Invalid loop_func, please check input arguments and "
2843
+ f"return value, error info: {e}")
2845
2844
  return val
2846
2845
 
2847
2846
 
@@ -2936,8 +2935,8 @@ class Scan(Primitive):
2936
2935
  ys.append(y)
2937
2936
  i = i + 1
2938
2937
  except Exception as e:
2939
- raise ValueError("Invalid loop_func, please check input arguments and \
2940
- return value, error info: {}".format(e))
2938
+ raise ValueError(f"Invalid loop_func, please check input arguments and "
2939
+ f"return value, error info: {e}")
2941
2940
  return carry, ys
2942
2941
 
2943
2942
 
@@ -3012,6 +3011,6 @@ class ForiLoop(Primitive):
3012
3011
  for i in range(lower, upper):
3013
3012
  val = loop_func(i, val)
3014
3013
  except Exception as e:
3015
- raise ValueError("Invalid loop_func, please check input arguments and \
3016
- return value, error info: {}".format(e))
3014
+ raise ValueError(f"Invalid loop_func, please check input arguments and "
3015
+ f"return value, error info: {e}")
3017
3016
  return val
@@ -882,7 +882,7 @@ class Sub(_MathBinaryOp):
882
882
  Note:
883
883
  - When the two inputs have different shapes, they must be able to broadcast to a common shape.
884
884
  - The two inputs can not be bool type at the same time,
885
- [True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
885
+ [True, Tensor(True), Tensor(np.array([True]))] are all considered bool type.
886
886
  - The two inputs comply with the implicit type conversion rules to make the data types
887
887
  consistent.
888
888
 
@@ -890,7 +890,7 @@ class Sub(_MathBinaryOp):
890
890
  - **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
891
891
  a bool or a tensor whose data type is
892
892
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
893
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
893
+ `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
894
894
  - **y** (Union[Tensor, number.Number, bool]) - The second input, when the first input is a Tensor,
895
895
  the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
896
896
 
@@ -1231,7 +1231,7 @@ class Heaviside(Primitive):
1231
1231
  0, & \text { if x }<0 \\
1232
1232
  \text { values, } & \text { if x }==0 \\
1233
1233
  1, & \text { if x }>0
1234
- \end{array}\right
1234
+ \end{array}\right.
1235
1235
 
1236
1236
  .. warning::
1237
1237
  This is an experimental API that is subject to change or deletion.
@@ -1289,10 +1289,10 @@ class DivNoNan(Primitive):
1289
1289
  - **x1** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
1290
1290
  a bool or a tensor whose data type is
1291
1291
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1292
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1292
+ `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1293
1293
  - **x2** (Union[Tensor, number.Number, bool]) - The second input is a number.Number or
1294
- a bool when the first input is a bool or a tensor whose data type is number or bool\_.
1295
- When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
1294
+ a bool when the first input is a bool or a tensor whose data type is number or bool.
1295
+ When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
1296
1296
 
1297
1297
  Outputs:
1298
1298
  Tensor, the shape is the same as the one after broadcasting,
@@ -3354,9 +3354,8 @@ class ComplexAbs(Primitive):
3354
3354
 
3355
3355
  Examples:
3356
3356
  >>> import mindspore
3357
- >>> import numpy as np
3358
3357
  >>> from mindspore import Tensor, ops
3359
- >>> x = Tensor(np.asarray(np.complex(3+4j)), mindspore.complex64)
3358
+ >>> x = Tensor(3+4j, mindspore.complex64)
3360
3359
  >>> complex_abs = ops.ComplexAbs()
3361
3360
  >>> output = complex_abs(x)
3362
3361
  >>> print(output)
@@ -3630,7 +3629,7 @@ class MatrixSolveLs(Primitive):
3630
3629
  TypeError: If `l2_regularizer` is not float64.
3631
3630
  TypeError: If `fast` is not bool.
3632
3631
  ValueError: If dimensions of `matrix` or `rhs` is less than 2.
3633
- ValueError: If shape of `matrix` dose not match the shape of `rhs`.
3632
+ ValueError: If shape of `matrix` does not match the shape of `rhs`.
3634
3633
 
3635
3634
  Supported Platforms:
3636
3635
  ``CPU``
@@ -32,7 +32,6 @@ from mindspore.ops.primitive import PrimitiveWithCheck
32
32
  from mindspore.ops.primitive import prim_attr_register
33
33
  from mindspore.run_check._check_version import AscendEnvChecker
34
34
  from mindspore._c_expression import pyboost_all_finite
35
- from mindspore.common._stub_tensor import _convert_stub
36
35
  from ..auto_generate import (CeLU, Flatten, LogSoftmax, LogSoftmaxExt, GLU, ReLU, ReLU6, Dense, Tanh,
37
36
  Elu, Sigmoid, Softmax, SoftplusExt, HSwish, HSigmoid, AvgPool, BiasAdd,
38
37
  NLLLoss, OneHot, GeLU, FastGeLU, PReLU, RmsNorm, IncreFlashAttention, MSELossExt,
@@ -42,7 +41,7 @@ from ..auto_generate import (CeLU, Flatten, LogSoftmax, LogSoftmaxExt, GLU, ReLU
42
41
  UpsampleNearest3D, UpsampleTrilinear3D,
43
42
  SoftMarginLoss, UpsampleBilinear2D, UpsampleLinear1D,
44
43
  BinaryCrossEntropy, BCEWithLogitsLoss, SoftShrink, AdaptiveMaxPool2D,
45
- SmoothL1Loss)
44
+ SmoothL1Loss, KvScaleCache)
46
45
  from .manually_defined import BatchNorm
47
46
 
48
47
 
@@ -7120,8 +7119,8 @@ class Conv3DTranspose(Primitive):
7120
7119
  self.format = validator.check_string(data_format, ['NCDHW'], 'format', self.name)
7121
7120
  self.add_prim_attr('data_format', self.format)
7122
7121
 
7123
- self.output_padding = _check_3d_int_or_tuple('output_padding', output_padding, self.name,
7124
- allow_five=False, ret_five=True, greater_zero=False)
7122
+ self.output_padding = _check_3d_int_or_tuple('output_padding', output_padding, self.name, allow_five=False,
7123
+ ret_five=True, greater_zero=False, pad_value=0)
7125
7124
  output_padding_ = (self.output_padding[2], self.output_padding[3], self.output_padding[4])
7126
7125
  if self.pad_mode != 'pad' and output_padding_ != (0, 0, 0):
7127
7126
  raise ValueError(f"For '{self.name}', the 'output_padding' must be zero or (0, 0, 0) "
@@ -7371,8 +7370,8 @@ class ApplyAdagradDA(Primitive):
7371
7370
  >>> global_step = Tensor(2, mstype.int32)
7372
7371
  >>> output = net(grad, lr, l1, l2, global_step)
7373
7372
  >>> print(output)
7374
- [[-0.00073906, -0.00136889],
7375
- [-0.00059699, -0.00142478]]
7373
+ [[-0.00073906 -0.00136889]
7374
+ [-0.00059699 -0.00142478]]
7376
7375
  """
7377
7376
 
7378
7377
  __mindspore_signature__ = (
@@ -9258,4 +9257,4 @@ class AllFinite(Primitive):
9258
9257
  "in the current environment does not support AllFinite.")
9259
9258
 
9260
9259
  def __call__(self, *args):
9261
- return _convert_stub(pyboost_all_finite(self, args))
9260
+ return pyboost_all_finite(self, args)
@@ -170,10 +170,13 @@ class Primitive(Primitive_):
170
170
  raise TypeError(f'The element of strategy must be tuple/Layout type, but got:{type(in_ele)}')
171
171
  if isinstance(in_ele, tuple):
172
172
  for in_value in in_ele:
173
- if not isinstance(in_value, int) and self.name not in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY:
173
+ if not isinstance(in_value, int) and self.name not in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY \
174
+ and not self.attrs.get("self_define_shard", False):
174
175
  raise TypeError(f'The {log_info}: {strategy} of {self.name} is not valid,'
175
176
  f' the value of strategy must be int type, but got:{type(in_value)}')
176
- if isinstance(in_value, Layout) and (self.name in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY):
177
+ if isinstance(in_value, Layout) and (
178
+ self.name in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY or self.attrs.get("self_define_shard",
179
+ False)):
177
180
  is_layout.append(True)
178
181
  continue
179
182
  is_layout.append(False)
@@ -315,7 +318,7 @@ class Primitive(Primitive_):
315
318
  out_is_layout = self._check_shard_strategy(out_strategy, "out_strategy")
316
319
  is_layout = in_is_layout if in_is_layout is not None else out_is_layout
317
320
  if out_is_layout is not None and is_layout != out_is_layout and \
318
- self.name not in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY:
321
+ self.name not in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY and not self.attrs.get("self_define_shard", False):
319
322
  raise ValueError(f'The in_strategy type must equal to the out_strategy type, '
320
323
  f'one using tuple(tuple) and the other using tuple(Layout) is not allowed.')
321
324
 
@@ -409,12 +412,6 @@ class Primitive(Primitive_):
409
412
  return output
410
413
  return _run_op(self, self.name, args)
411
414
 
412
- def __getstate__(self):
413
- return self.__dict__
414
-
415
- def __setstate__(self, d):
416
- self.__dict__.update(d)
417
-
418
415
  def __deepcopy__(self, memo):
419
416
  return type(self)(**self.init_attrs)
420
417
 
@@ -461,7 +458,7 @@ class Primitive(Primitive_):
461
458
 
462
459
  - If the computation involves something like randomization or global variable, the equivalence
463
460
  is not guaranteed currently.
464
- - Not supported in pynative mode
461
+ - Should only be used in Graph mode or in gradient functions that are decorated by @jit.
465
462
 
466
463
  Args:
467
464
  mode (bool): Specifies whether the primitive is recomputed. Default: ``True`` .
@@ -469,7 +466,7 @@ class Primitive(Primitive_):
469
466
  Examples:
470
467
  >>> import numpy as np
471
468
  >>> import mindspore as ms
472
- >>> from mindspore import Tensor, ops, nn
469
+ >>> from mindspore import Tensor, ops, nn, jit
473
470
  >>> class NetRecompute(nn.Cell):
474
471
  ... def __init__(self):
475
472
  ... super(NetRecompute,self).__init__()
@@ -484,6 +481,7 @@ class Primitive(Primitive_):
484
481
  ... super(GradNet,self).__init__()
485
482
  ... self.network = network
486
483
  ... self.grad = ops.GradOperation()
484
+ ... @jit
487
485
  ... def construct(self, x):
488
486
  ... g_out = self.grad(self.network)(x)
489
487
  ... return g_out
@@ -495,8 +493,6 @@ class Primitive(Primitive_):
495
493
  >>> print(a)
496
494
  [0. 0.5]
497
495
  """
498
- if context.get_context("mode") == context.PYNATIVE_MODE:
499
- raise TypeError("Recompute is not supported in pynative mode currently.")
500
496
  Validator.check_bool(mode)
501
497
  self.add_prim_attr("recompute", mode)
502
498
  return self
@@ -513,8 +509,6 @@ class Primitive(Primitive_):
513
509
  Args:
514
510
  backward_prefetch(Union[str, int]): Specifies whether the activation is prefetched in backward pass.
515
511
  """
516
- if context.get_context("mode") == context.PYNATIVE_MODE:
517
- raise ValueError("Offload is not supported in pynative mode currently.")
518
512
  self.add_prim_attr("offload", True)
519
513
  if isinstance(backward_prefetch, str):
520
514
  Validator.check_string(backward_prefetch, ['Auto'], 'backward_prefetch', 'Primitive._offload')
@@ -554,10 +548,6 @@ class Primitive(Primitive_):
554
548
  Validator.check_non_negative_int(rank_id, "rank_id", "Primitive.place")
555
549
  Validator.check_string(role, "MS_WORKER", "role", "Primitive.place")
556
550
 
557
- if context.get_context("mode") == context.PYNATIVE_MODE:
558
- raise RuntimeError("You are calling Primitive.place in pynative mode."
559
- "It's only supported in graph mode. Please switch to graph mode.")
560
-
561
551
  # Get the execution context and check whether calling of this 'place' method is valid.
562
552
  # This is because placing operators to arbitrary processes while other distributed training mode
563
553
  # is enabled is very unpredictable and may cause fatal error.
@@ -1032,7 +1022,6 @@ def _run_op(obj, op_name, args):
1032
1022
  res = _pynative_executor.run_op_async(obj, op_name, args)
1033
1023
  # Add for jit context.
1034
1024
  if jit_context():
1035
- # todo support TensorPy
1036
1025
  return jit_context().run_op(obj, res, *args)
1037
1026
  return res
1038
1027
 
@@ -21,11 +21,10 @@ from mindspore.ops import operations as P
21
21
  from mindspore.ops import functional as F
22
22
  from mindspore.ops.composite.multitype_ops import _compile_utils as utils
23
23
  from mindspore.ops.composite.multitype_ops._compile_utils import (
24
- sequence_to_tensor, _tensor_sub, _tensor_pow, _tensor_div, _tensor_floordiv
24
+ sequence_to_tensor, _tensor_sub, _tensor_pow, _tensor_div, _tensor_floordiv, _tensor_mod
25
25
  )
26
26
  from mindspore.ops.auto_generate.gen_ops_prim import (
27
- inplace_scatter_src_op, inplace_scatter_src_reduce_op, inplace_scatter_value_op, inplace_scatter_value_reduce_op,
28
- inplace_copy_op
27
+ inplace_scatter_src_op, inplace_scatter_src_reduce_op, inplace_scatter_value_op, inplace_scatter_value_reduce_op
29
28
  )
30
29
  from mindspore.ops.auto_generate.gen_ops_prim import (
31
30
  floor_div_op, floor_div_scalar_op
@@ -442,6 +441,9 @@ from mindspore.ops.auto_generate.gen_ops_prim import inplace_exp_op
442
441
  # 1030 log_
443
442
  from mindspore.ops.auto_generate.gen_ops_prim import inplace_log_op
444
443
 
444
+ # 1031 masked_scatter
445
+ from mindspore.ops.auto_generate import masked_scatter
446
+
445
447
  from .._checkparam import check_axis_in_range
446
448
  from ..ops.composite.multitype_ops import _compile_utils as compile_utils
447
449
 
@@ -503,16 +505,16 @@ def tensor_allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False):
503
505
 
504
506
 
505
507
  # 8 any
506
- def tensor_any(x, axis=None, keep_dims=False):
508
+ def reduce_tensor_any(x, axis=None, keep_dims=False):
507
509
  if axis is None:
508
510
  axis = ()
509
511
  return any(x, axis, keep_dims)
510
512
 
511
513
 
512
- def deprecated_tensor_any(x, dim=None, keepdim=False):
514
+ def tensor_any(input, dim=None, keepdim=False):
513
515
  if dim is None:
514
516
  dim = ()
515
- return any(x, dim, keepdim)
517
+ return any(input, dim, keepdim)
516
518
 
517
519
 
518
520
  # 9 arctan2
@@ -948,6 +950,10 @@ def deprecated_tensor_remainder(input, divisor):
948
950
  return remainder(input, divisor)
949
951
 
950
952
 
953
+ def deprecated_tensor_mod(input, other):
954
+ return _tensor_mod(input, other)
955
+
956
+
951
957
  # 86 repeat
952
958
  def tensor_repeat(input, *repeats):
953
959
  raise RuntimeError("'repeat' is not supported on this device.")
@@ -1449,14 +1455,10 @@ def tensor_atanh(input):
1449
1455
  return F.atanh(input)
1450
1456
 
1451
1457
 
1452
- def tensor_copy_(input, src):
1458
+ def tensor_copy_(input, src, non_blocking=False):
1453
1459
  raise ValueError("should not come here for copy_ method")
1454
1460
 
1455
1461
 
1456
- def deprecated_tensor_copy_(input, src, non_blocking=False):
1457
- return inplace_copy_op(input, src)
1458
-
1459
-
1460
1462
  def tensor_tan(input):
1461
1463
  return F.tan(input)
1462
1464
 
@@ -1514,6 +1516,16 @@ def tensor_empty(*size, dtype=None, device=None):
1514
1516
  "This is a function for empty not should be called. Please check the implementation.")
1515
1517
 
1516
1518
 
1519
+ def tensor_empty_like(input, *, dtype=None, device=None):
1520
+ raise NotImplementedError(
1521
+ "This is a function for empty_like should not be called. Please check the implementation.")
1522
+
1523
+
1524
+ def tensor_new_empty(input, size, *, dtype=None, device=None):
1525
+ raise NotImplementedError(
1526
+ "This is a function for new_empty should not be called. Please check the implementation.")
1527
+
1528
+
1517
1529
  def deprecated_tensor_logaddexp(input, other):
1518
1530
  return F.logaddexp(input, other)
1519
1531
 
@@ -1744,6 +1756,10 @@ def deprecated_tensor_diag(input):
1744
1756
  return F.diag(input)
1745
1757
 
1746
1758
 
1759
+ def deprecated_einsum(equation, operands):
1760
+ raise NotImplementedError('einsum only supports Ascend.')
1761
+
1762
+
1747
1763
  # 916 index_add
1748
1764
  @constexpr
1749
1765
  def _check_index_add_alpha(alpha):
@@ -1808,6 +1824,8 @@ def tensor_inplace_sub(input, other, *, alpha=1):
1808
1824
  return sub(input, other)
1809
1825
  return sub_ext(input, other, alpha=alpha)
1810
1826
 
1827
+ def tensor_new_full(input, size, fill_value, *, dtype=None):
1828
+ raise NotImplementedError("new_full method support Ascend only")
1811
1829
 
1812
1830
  def tensor_div_empty_(input, other, rounding_mode=None):
1813
1831
  raise ValueError("should not come here for div_ method.")
@@ -1840,6 +1858,14 @@ def all_gather_matmul(
1840
1858
  raise NotImplementedError('all_gather_matmul only supports Ascend.')
1841
1859
 
1842
1860
 
1861
+ def conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
1862
+ raise NotImplementedError('conv3d only supports Ascend.')
1863
+
1864
+
1865
+ def tensor_remainder_(input, other):
1866
+ return _tensor_mod(input, other)
1867
+
1868
+
1843
1869
  def tensor_floor_divide_(input, other):
1844
1870
  return _tensor_floordiv(input, other)
1845
1871
 
@@ -1884,12 +1910,27 @@ def tensor_gelu(input, *, approximate):
1884
1910
  return gelu(input, approximate)
1885
1911
 
1886
1912
 
1913
+ def tensor_bernoulli_(input, p, seed, offset):
1914
+ raise RuntimeError("'bernoulli_' is not supported on this device.")
1915
+
1916
+
1887
1917
  def deprecated_pixel_shuffle(input, upscale_factor):
1888
1918
  return F.pixel_shuffle(input, upscale_factor)
1889
1919
 
1890
1920
 
1921
+ def tensor_quant_matmul(x1, x2, scale, *, offset=None, pertoken_scale=None, bias=None, output_dtype=None,
1922
+ x1_dtype=None, x2_dtype=None, pertoken_scale_dtype=None, scale_dtype=None, group_sizes=None):
1923
+ r"""
1924
+ For details, please refer to :func:`mindspore.ops.auto_generate.quant_matmul`.
1925
+ """
1926
+ raise NotImplementedError('quant_matmul only supports Ascend.')
1927
+
1928
+
1891
1929
  def tensor_gmm(x, weight, *, bias=None, group_list=None, group_type=0, group_list_type=0):
1892
1930
  raise NotImplementedError("gmm has not been implemented by python.")
1893
1931
 
1894
1932
  def raise_func(*args, **kwargs):
1895
1933
  raise NotImplementedError("this func has not been implemented.")
1934
+
1935
+ def tensor_masked_scatter(input, mask, source):
1936
+ return masked_scatter(input, mask, source)
@@ -53,7 +53,7 @@ class CppCreatePrimInstanceHelperGenerator(BaseGenerator):
53
53
  """
54
54
  py_arg_default = self.generate_op_arg_default_value(op_protos)
55
55
  py_labels = self.generate_op_labels(op_protos)
56
- res_str = (template.PY_LICENSE_STR + py_arg_default + py_labels)
56
+ res_str = template.PY_LICENSE_STR + py_arg_default + py_labels
57
57
 
58
58
  save_path = os.path.join(work_path, K.PY_AUTO_GEN_PATH)
59
59
  file_name = "cpp_create_prim_instance_helper.py"