mindspore 2.5.0__cp311-cp311-win_amd64.whl → 2.6.0__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (493) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +6 -4
  5. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  8. mindspore/_check_jit_forbidden_api.py +3 -0
  9. mindspore/_checkparam.py +3 -33
  10. mindspore/_deprecated/__init__.py +17 -0
  11. mindspore/_deprecated/jit.py +198 -0
  12. mindspore/_extends/builtin_operations.py +1 -1
  13. mindspore/_extends/parse/__init__.py +6 -7
  14. mindspore/_extends/parse/compile_config.py +19 -0
  15. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
  16. mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
  17. mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
  18. mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
  19. mindspore/_extends/parse/parser.py +25 -194
  20. mindspore/_extends/parse/resources.py +1 -5
  21. mindspore/_extends/parse/standard_method.py +109 -75
  22. mindspore/_extends/pijit/__init__.py +2 -2
  23. mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
  24. mindspore/_extends/pijit/tensor_func_list.py +27 -0
  25. mindspore/_extends/utils.py +1 -1
  26. mindspore/amp.py +4 -4
  27. mindspore/atlprov.dll +0 -0
  28. mindspore/avcodec-59.dll +0 -0
  29. mindspore/avdevice-59.dll +0 -0
  30. mindspore/avfilter-8.dll +0 -0
  31. mindspore/avformat-59.dll +0 -0
  32. mindspore/avutil-57.dll +0 -0
  33. mindspore/boost/__init__.py +2 -2
  34. mindspore/boost/base.py +3 -7
  35. mindspore/boost/boost_cell_wrapper.py +2 -2
  36. mindspore/c1.dll +0 -0
  37. mindspore/c1xx.dll +0 -0
  38. mindspore/c2.dll +0 -0
  39. mindspore/common/__init__.py +4 -3
  40. mindspore/common/_grad_function.py +56 -0
  41. mindspore/common/_pijit_context.py +14 -5
  42. mindspore/common/_register_for_tensor.py +1 -1
  43. mindspore/common/_stub_tensor.py +5 -10
  44. mindspore/common/_tensor_cpp_method.py +1 -1
  45. mindspore/common/_tensor_docs.py +2014 -3386
  46. mindspore/common/api.py +386 -355
  47. mindspore/common/auto_dynamic_shape.py +41 -44
  48. mindspore/common/dtype.py +5 -2
  49. mindspore/common/dump.py +7 -5
  50. mindspore/common/file_system.py +3 -0
  51. mindspore/common/generator.py +3 -0
  52. mindspore/common/hook_handle.py +5 -3
  53. mindspore/common/initializer.py +10 -6
  54. mindspore/common/jit_begin_end.py +94 -0
  55. mindspore/common/jit_config.py +6 -1
  56. mindspore/common/jit_context.py +76 -0
  57. mindspore/common/jit_trace.py +378 -0
  58. mindspore/common/lazy_inline.py +2 -2
  59. mindspore/common/mutable.py +5 -4
  60. mindspore/common/parameter.py +106 -39
  61. mindspore/common/seed.py +2 -2
  62. mindspore/common/sparse_tensor.py +23 -17
  63. mindspore/common/tensor.py +332 -714
  64. mindspore/communication/__init__.py +7 -5
  65. mindspore/communication/_comm_helper.py +47 -2
  66. mindspore/communication/comm_func.py +70 -53
  67. mindspore/communication/management.py +83 -17
  68. mindspore/context.py +228 -571
  69. mindspore/dataset/__init__.py +44 -20
  70. mindspore/dataset/audio/__init__.py +2 -8
  71. mindspore/dataset/audio/transforms.py +3 -17
  72. mindspore/dataset/core/config.py +3 -3
  73. mindspore/dataset/engine/cache_client.py +1 -1
  74. mindspore/dataset/engine/datasets.py +102 -120
  75. mindspore/dataset/engine/datasets_audio.py +22 -22
  76. mindspore/dataset/engine/datasets_standard_format.py +43 -24
  77. mindspore/dataset/engine/datasets_text.py +78 -85
  78. mindspore/dataset/engine/datasets_user_defined.py +109 -77
  79. mindspore/dataset/engine/datasets_vision.py +111 -108
  80. mindspore/dataset/engine/iterators.py +5 -3
  81. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
  82. mindspore/dataset/engine/samplers.py +279 -57
  83. mindspore/dataset/engine/serializer_deserializer.py +2 -1
  84. mindspore/dataset/engine/validators.py +10 -0
  85. mindspore/dataset/text/__init__.py +7 -6
  86. mindspore/dataset/text/transforms.py +6 -5
  87. mindspore/dataset/text/utils.py +3 -3
  88. mindspore/dataset/transforms/__init__.py +0 -9
  89. mindspore/dataset/transforms/transforms.py +3 -3
  90. mindspore/dataset/utils/browse_dataset.py +1 -1
  91. mindspore/dataset/vision/__init__.py +2 -9
  92. mindspore/dataset/vision/transforms.py +202 -158
  93. mindspore/dataset/vision/utils.py +7 -5
  94. mindspore/device_context/ascend/op_debug.py +60 -1
  95. mindspore/device_context/ascend/op_tuning.py +0 -4
  96. mindspore/device_manager.py +39 -3
  97. mindspore/dnnl.dll +0 -0
  98. mindspore/dpcmi.dll +0 -0
  99. mindspore/experimental/es/embedding_service.py +35 -27
  100. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -2
  101. mindspore/experimental/map_parameter.py +4 -4
  102. mindspore/experimental/optim/adadelta.py +22 -26
  103. mindspore/experimental/optim/adagrad.py +4 -4
  104. mindspore/experimental/optim/adam.py +4 -0
  105. mindspore/experimental/optim/adamax.py +4 -4
  106. mindspore/experimental/optim/adamw.py +4 -0
  107. mindspore/experimental/optim/asgd.py +1 -1
  108. mindspore/experimental/optim/lr_scheduler.py +40 -22
  109. mindspore/experimental/optim/radam.py +5 -5
  110. mindspore/experimental/optim/rprop.py +1 -1
  111. mindspore/experimental/optim/sgd.py +1 -1
  112. mindspore/hal/contiguous_tensors_handle.py +6 -10
  113. mindspore/hal/device.py +55 -81
  114. mindspore/hal/event.py +38 -55
  115. mindspore/hal/memory.py +115 -147
  116. mindspore/hal/stream.py +81 -125
  117. mindspore/include/dataset/constants.h +7 -4
  118. mindspore/include/dataset/execute.h +2 -2
  119. mindspore/jpeg62.dll +0 -0
  120. mindspore/log.py +40 -2
  121. mindspore/mindrecord/__init__.py +20 -7
  122. mindspore/mindspore_backend_common.dll +0 -0
  123. mindspore/mindspore_backend_manager.dll +0 -0
  124. mindspore/mindspore_common.dll +0 -0
  125. mindspore/mindspore_core.dll +0 -0
  126. mindspore/mindspore_dump.dll +0 -0
  127. mindspore/mindspore_frontend.dll +0 -0
  128. mindspore/mindspore_glog.dll +0 -0
  129. mindspore/mindspore_memory_pool.dll +0 -0
  130. mindspore/mindspore_ms_backend.dll +0 -0
  131. mindspore/mindspore_ops.dll +0 -0
  132. mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
  133. mindspore/mindspore_ops_kernel_common.dll +0 -0
  134. mindspore/mindspore_profiler.dll +0 -0
  135. mindspore/mindspore_pyboost.dll +0 -0
  136. mindspore/mindspore_pynative.dll +0 -0
  137. mindspore/mindspore_res_manager.dll +0 -0
  138. mindspore/mindspore_runtime_pipeline.dll +0 -0
  139. mindspore/mint/__init__.py +133 -702
  140. mindspore/mint/distributed/__init__.py +5 -1
  141. mindspore/mint/distributed/distributed.py +198 -113
  142. mindspore/mint/linalg/__init__.py +2 -0
  143. mindspore/mint/nn/__init__.py +280 -18
  144. mindspore/mint/nn/functional.py +282 -64
  145. mindspore/mint/nn/layer/__init__.py +4 -0
  146. mindspore/mint/nn/layer/_functions.py +7 -3
  147. mindspore/mint/nn/layer/activation.py +120 -13
  148. mindspore/mint/nn/layer/conv.py +234 -28
  149. mindspore/mint/nn/layer/normalization.py +15 -16
  150. mindspore/mint/nn/layer/padding.py +1 -1
  151. mindspore/mint/nn/layer/pooling.py +66 -1
  152. mindspore/mint/optim/__init__.py +2 -1
  153. mindspore/mint/optim/sgd.py +171 -0
  154. mindspore/msobj140.dll +0 -0
  155. mindspore/mspdb140.dll +0 -0
  156. mindspore/mspdbcore.dll +0 -0
  157. mindspore/mspdbst.dll +0 -0
  158. mindspore/mspft140.dll +0 -0
  159. mindspore/msvcdis140.dll +0 -0
  160. mindspore/msvcp140_1.dll +0 -0
  161. mindspore/msvcp140_2.dll +0 -0
  162. mindspore/msvcp140_atomic_wait.dll +0 -0
  163. mindspore/msvcp140_codecvt_ids.dll +0 -0
  164. mindspore/nn/__init__.py +4 -1
  165. mindspore/nn/cell.py +1253 -179
  166. mindspore/nn/layer/activation.py +23 -21
  167. mindspore/nn/layer/basic.py +22 -16
  168. mindspore/nn/layer/container.py +1 -1
  169. mindspore/nn/layer/conv.py +53 -42
  170. mindspore/nn/layer/embedding.py +9 -8
  171. mindspore/nn/layer/normalization.py +48 -42
  172. mindspore/nn/layer/pooling.py +75 -31
  173. mindspore/nn/layer/transformer.py +11 -10
  174. mindspore/nn/learning_rate_schedule.py +4 -2
  175. mindspore/nn/loss/loss.py +27 -19
  176. mindspore/nn/optim/ada_grad.py +6 -5
  177. mindspore/nn/optim/adadelta.py +9 -7
  178. mindspore/nn/optim/adafactor.py +1 -1
  179. mindspore/nn/optim/adam.py +18 -14
  180. mindspore/nn/optim/adamax.py +8 -7
  181. mindspore/nn/optim/adasum.py +5 -5
  182. mindspore/nn/optim/asgd.py +3 -1
  183. mindspore/nn/optim/ftrl.py +11 -9
  184. mindspore/nn/optim/lamb.py +1 -1
  185. mindspore/nn/optim/lazyadam.py +12 -10
  186. mindspore/nn/optim/momentum.py +7 -6
  187. mindspore/nn/optim/optimizer.py +2 -2
  188. mindspore/nn/optim/proximal_ada_grad.py +12 -10
  189. mindspore/nn/optim/rmsprop.py +13 -12
  190. mindspore/nn/optim/rprop.py +9 -7
  191. mindspore/nn/optim/sgd.py +9 -6
  192. mindspore/nn/optim/tft_wrapper.py +5 -2
  193. mindspore/nn/probability/bijector/bijector.py +17 -11
  194. mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
  195. mindspore/nn/probability/bijector/invert.py +2 -2
  196. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  197. mindspore/nn/probability/bijector/softplus.py +3 -2
  198. mindspore/nn/probability/distribution/beta.py +3 -3
  199. mindspore/nn/probability/distribution/categorical.py +1 -1
  200. mindspore/nn/probability/distribution/cauchy.py +4 -2
  201. mindspore/nn/probability/distribution/exponential.py +6 -7
  202. mindspore/nn/probability/distribution/gamma.py +2 -2
  203. mindspore/nn/probability/distribution/gumbel.py +2 -2
  204. mindspore/nn/probability/distribution/half_normal.py +5 -3
  205. mindspore/nn/probability/distribution/logistic.py +5 -3
  206. mindspore/nn/probability/distribution/poisson.py +1 -1
  207. mindspore/nn/probability/distribution/uniform.py +5 -3
  208. mindspore/nn/reinforcement/_tensors_queue.py +1 -1
  209. mindspore/nn/reinforcement/tensor_array.py +1 -1
  210. mindspore/nn/wrap/__init__.py +6 -6
  211. mindspore/nn/wrap/cell_wrapper.py +178 -117
  212. mindspore/nn/wrap/grad_reducer.py +45 -36
  213. mindspore/nn/wrap/loss_scale.py +3 -3
  214. mindspore/numpy/array_creations.py +3 -3
  215. mindspore/numpy/array_ops.py +1 -1
  216. mindspore/numpy/utils.py +1 -2
  217. mindspore/numpy/utils_const.py +1 -2
  218. mindspore/opencv_core452.dll +0 -0
  219. mindspore/opencv_imgcodecs452.dll +0 -0
  220. mindspore/opencv_imgproc452.dll +0 -0
  221. mindspore/ops/__init__.py +3 -2
  222. mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
  223. mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
  224. mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
  225. mindspore/ops/_register_for_op.py +0 -11
  226. mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
  227. mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
  228. mindspore/ops/_vmap/vmap_array_ops.py +32 -6
  229. mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
  230. mindspore/ops/_vmap/vmap_math_ops.py +4 -7
  231. mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
  232. mindspore/ops/auto_generate/__init__.py +4 -3
  233. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +127 -52
  234. mindspore/ops/auto_generate/gen_extend_func.py +286 -208
  235. mindspore/ops/auto_generate/gen_ops_def.py +2783 -2335
  236. mindspore/ops/auto_generate/gen_ops_prim.py +8992 -2686
  237. mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
  238. mindspore/ops/composite/__init__.py +2 -1
  239. mindspore/ops/composite/base.py +19 -24
  240. mindspore/ops/composite/math_ops.py +6 -16
  241. mindspore/ops/composite/multitype_ops/__init__.py +5 -2
  242. mindspore/ops/composite/multitype_ops/_compile_utils.py +4 -5
  243. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
  244. mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
  245. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  246. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  247. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
  248. mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
  249. mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
  250. mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
  251. mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
  252. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
  253. mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
  254. mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
  255. mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
  256. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
  257. mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
  258. mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
  259. mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
  260. mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
  261. mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
  262. mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
  263. mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
  264. mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
  265. mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
  266. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
  267. mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
  268. mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
  269. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
  270. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  271. mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
  272. mindspore/ops/function/__init__.py +28 -2
  273. mindspore/ops/function/_add_attr_func.py +58 -0
  274. mindspore/ops/function/array_func.py +1631 -2347
  275. mindspore/ops/function/clip_func.py +38 -45
  276. mindspore/ops/function/debug_func.py +36 -44
  277. mindspore/ops/function/grad/__init__.py +1 -0
  278. mindspore/ops/function/grad/grad_func.py +104 -71
  279. mindspore/ops/function/image_func.py +1 -1
  280. mindspore/ops/function/linalg_func.py +46 -78
  281. mindspore/ops/function/math_func.py +3024 -3855
  282. mindspore/ops/function/nn_func.py +678 -274
  283. mindspore/ops/function/other_func.py +159 -1
  284. mindspore/ops/function/parameter_func.py +17 -30
  285. mindspore/ops/function/random_func.py +216 -361
  286. mindspore/ops/function/reshard_func.py +4 -70
  287. mindspore/ops/function/sparse_func.py +3 -3
  288. mindspore/ops/function/sparse_unary_func.py +5 -5
  289. mindspore/ops/function/spectral_func.py +25 -58
  290. mindspore/ops/function/vmap_func.py +26 -18
  291. mindspore/ops/functional.py +8 -5
  292. mindspore/ops/functional_overload.py +655 -4
  293. mindspore/ops/op_info_register.py +32 -244
  294. mindspore/ops/operations/__init__.py +21 -14
  295. mindspore/ops/operations/_custom_ops_utils.py +235 -0
  296. mindspore/ops/operations/_grad_ops.py +1 -10
  297. mindspore/ops/operations/_inner_ops.py +5 -76
  298. mindspore/ops/operations/_ms_kernel.py +4 -10
  299. mindspore/ops/operations/_rl_inner_ops.py +1 -1
  300. mindspore/ops/operations/_scalar_ops.py +3 -2
  301. mindspore/ops/operations/_sequence_ops.py +1 -1
  302. mindspore/ops/operations/_tensor_array.py +1 -1
  303. mindspore/ops/operations/array_ops.py +39 -24
  304. mindspore/ops/operations/comm_ops.py +150 -107
  305. mindspore/ops/operations/custom_ops.py +287 -32
  306. mindspore/ops/operations/debug_ops.py +119 -16
  307. mindspore/ops/operations/inner_ops.py +1 -1
  308. mindspore/ops/operations/linalg_ops.py +1 -58
  309. mindspore/ops/operations/manually_defined/_inner.py +1 -1
  310. mindspore/ops/operations/manually_defined/ops_def.py +746 -79
  311. mindspore/ops/operations/math_ops.py +21 -18
  312. mindspore/ops/operations/nn_ops.py +67 -224
  313. mindspore/ops/operations/other_ops.py +62 -9
  314. mindspore/ops/operations/random_ops.py +13 -7
  315. mindspore/ops/operations/reshard_ops.py +1 -1
  316. mindspore/ops/operations/sparse_ops.py +2 -2
  317. mindspore/ops/primitive.py +43 -32
  318. mindspore/ops/tensor_method.py +243 -17
  319. mindspore/ops_generate/__init__.py +0 -5
  320. mindspore/ops_generate/aclnn/__init__.py +0 -0
  321. mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
  322. mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
  323. mindspore/ops_generate/api/__init__.py +0 -0
  324. mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
  325. mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
  326. mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
  327. mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
  328. mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
  329. mindspore/ops_generate/api/gen_api.py +103 -0
  330. mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
  331. mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
  332. mindspore/ops_generate/common/__init__.py +0 -0
  333. mindspore/ops_generate/common/gen_constants.py +91 -0
  334. mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
  335. mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
  336. mindspore/ops_generate/{template.py → common/template.py} +96 -84
  337. mindspore/ops_generate/gen_ops.py +23 -325
  338. mindspore/ops_generate/op_def/__init__.py +0 -0
  339. mindspore/ops_generate/op_def/gen_op_def.py +90 -0
  340. mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
  341. mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -10
  342. mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
  343. mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
  344. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
  345. mindspore/ops_generate/op_def_py/__init__.py +0 -0
  346. mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
  347. mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
  348. mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
  349. mindspore/ops_generate/pyboost/__init__.py +0 -0
  350. mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
  351. mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
  352. mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
  353. mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
  354. mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
  355. mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
  356. mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
  357. mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
  358. mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
  359. mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
  360. mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
  361. mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
  362. mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
  363. mindspore/ops_generate/resources/__init__.py +0 -0
  364. mindspore/ops_generate/resources/resource_list.py +30 -0
  365. mindspore/ops_generate/resources/resource_loader.py +36 -0
  366. mindspore/ops_generate/resources/resource_manager.py +64 -0
  367. mindspore/ops_generate/resources/yaml_loader.py +88 -0
  368. mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
  369. mindspore/parallel/__init__.py +6 -2
  370. mindspore/parallel/_auto_parallel_context.py +140 -12
  371. mindspore/parallel/_cell_wrapper.py +132 -15
  372. mindspore/parallel/_parallel_serialization.py +95 -4
  373. mindspore/parallel/_ps_context.py +1 -1
  374. mindspore/parallel/_recovery_context.py +7 -2
  375. mindspore/parallel/_tensor.py +142 -18
  376. mindspore/parallel/_utils.py +198 -25
  377. mindspore/parallel/algo_parameter_config.py +3 -3
  378. mindspore/parallel/auto_parallel.py +732 -0
  379. mindspore/parallel/checkpoint_convert.py +159 -0
  380. mindspore/parallel/checkpoint_transform.py +658 -37
  381. mindspore/parallel/cluster/process_entity/_api.py +151 -19
  382. mindspore/parallel/cluster/run.py +1 -1
  383. mindspore/parallel/function/__init__.py +24 -0
  384. mindspore/parallel/function/reshard_func.py +258 -0
  385. mindspore/parallel/nn/__init__.py +25 -0
  386. mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
  387. mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
  388. mindspore/parallel/parameter_broadcast.py +24 -13
  389. mindspore/parallel/shard.py +137 -62
  390. mindspore/parallel/transform_safetensors.py +288 -95
  391. mindspore/pgodb140.dll +0 -0
  392. mindspore/pgort140.dll +0 -0
  393. mindspore/profiler/__init__.py +9 -5
  394. mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
  395. mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
  396. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
  397. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +25 -0
  398. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  399. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
  400. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
  401. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
  402. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
  403. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
  404. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
  405. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
  406. mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
  407. mindspore/profiler/common/constant.py +12 -0
  408. mindspore/profiler/common/msprof_cmd_tool.py +42 -23
  409. mindspore/profiler/common/path_manager.py +24 -0
  410. mindspore/profiler/common/profiler_context.py +26 -2
  411. mindspore/profiler/common/profiler_meta_data.py +74 -0
  412. mindspore/profiler/common/profiler_parameters.py +59 -18
  413. mindspore/profiler/common/profiler_path_manager.py +66 -7
  414. mindspore/profiler/dynamic_profiler.py +112 -79
  415. mindspore/profiler/envprofiler.py +26 -1
  416. mindspore/profiler/experimental_config.py +197 -0
  417. mindspore/profiler/mstx.py +57 -14
  418. mindspore/profiler/platform/npu_profiler.py +33 -7
  419. mindspore/profiler/profiler.py +541 -45
  420. mindspore/profiler/profiler_action_controller.py +1 -1
  421. mindspore/profiler/profiler_interface.py +4 -0
  422. mindspore/profiler/schedule.py +57 -22
  423. mindspore/rewrite/api/node.py +15 -13
  424. mindspore/rewrite/api/symbol_tree.py +1 -1
  425. mindspore/run_check/_check_version.py +25 -14
  426. mindspore/run_check/run_check.py +1 -1
  427. mindspore/runtime/__init__.py +2 -2
  428. mindspore/runtime/executor.py +40 -11
  429. mindspore/runtime/memory.py +37 -13
  430. mindspore/safeguard/rewrite_obfuscation.py +12 -9
  431. mindspore/swresample-4.dll +0 -0
  432. mindspore/swscale-6.dll +0 -0
  433. mindspore/tbbmalloc.dll +0 -0
  434. mindspore/tinyxml2.dll +0 -0
  435. mindspore/train/__init__.py +8 -8
  436. mindspore/train/_utils.py +43 -9
  437. mindspore/train/amp.py +1 -1
  438. mindspore/train/callback/__init__.py +2 -2
  439. mindspore/train/callback/_callback.py +2 -16
  440. mindspore/train/callback/_checkpoint.py +24 -40
  441. mindspore/train/callback/_cluster_monitor.py +14 -18
  442. mindspore/train/callback/_flops_collector.py +2 -3
  443. mindspore/train/callback/_history.py +7 -4
  444. mindspore/train/callback/_lambda_callback.py +2 -2
  445. mindspore/train/callback/_landscape.py +0 -3
  446. mindspore/train/callback/_loss_monitor.py +2 -1
  447. mindspore/train/callback/_on_request_exit.py +6 -5
  448. mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
  449. mindspore/train/callback/_summary_collector.py +8 -13
  450. mindspore/train/callback/_time_monitor.py +2 -1
  451. mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +204 -105
  452. mindspore/train/data_sink.py +25 -2
  453. mindspore/train/dataset_helper.py +4 -5
  454. mindspore/train/loss_scale_manager.py +8 -7
  455. mindspore/train/metrics/accuracy.py +3 -3
  456. mindspore/train/metrics/confusion_matrix.py +9 -9
  457. mindspore/train/metrics/error.py +3 -3
  458. mindspore/train/metrics/hausdorff_distance.py +4 -4
  459. mindspore/train/metrics/mean_surface_distance.py +3 -3
  460. mindspore/train/metrics/metric.py +0 -12
  461. mindspore/train/metrics/occlusion_sensitivity.py +4 -2
  462. mindspore/train/metrics/precision.py +8 -6
  463. mindspore/train/metrics/recall.py +9 -9
  464. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  465. mindspore/train/mind_ir_pb2.py +19 -12
  466. mindspore/train/model.py +262 -127
  467. mindspore/train/serialization.py +246 -988
  468. mindspore/train/summary/_summary_adapter.py +2 -2
  469. mindspore/train/summary/summary_record.py +1 -1
  470. mindspore/turbojpeg.dll +0 -0
  471. mindspore/utils/__init__.py +3 -2
  472. mindspore/utils/dryrun.py +4 -2
  473. mindspore/utils/hooks.py +81 -0
  474. mindspore/utils/runtime_execution_order_check.py +2 -0
  475. mindspore/utils/utils.py +138 -4
  476. mindspore/vcmeta.dll +0 -0
  477. mindspore/vcruntime140.dll +0 -0
  478. mindspore/vcruntime140_1.dll +0 -0
  479. mindspore/version.py +1 -1
  480. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/METADATA +2 -1
  481. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/RECORD +485 -440
  482. mindspore/_install_custom.py +0 -43
  483. mindspore/common/_register_for_adapter.py +0 -74
  484. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
  485. mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
  486. mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
  487. mindspore/ops_generate/gen_constants.py +0 -190
  488. mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
  489. mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
  490. /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
  491. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/WHEEL +0 -0
  492. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/entry_points.txt +0 -0
  493. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,36 @@
1
+ # Copyright 2025 Huawei Technologies Co., Ltd
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ============================================================================
15
+
16
+ """Module of base class for resource loader."""
17
+
18
+ from abc import ABC, abstractmethod
19
+ from typing import Dict
20
+
21
+ from .resource_list import ResourceType
22
+
23
+
24
+ class ResourceLoader(ABC):
25
+ """
26
+ Abstract class for resource loader.
27
+ """
28
+ @abstractmethod
29
+ def load(self) -> Dict[ResourceType, object]:
30
+ """
31
+ Load resource.
32
+
33
+ Returns:
34
+ Dict[ResourceType, object]: The resource type and resource object map.
35
+ """
36
+ raise NotImplementedError
@@ -0,0 +1,64 @@
1
+ # Copyright 2025 Huawei Technologies Co., Ltd
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ============================================================================
15
+
16
+ """Module managing resource."""
17
+
18
+ from common.op_proto import OpProtoLoader, DeprecatedOpProtoLoader, FuncOpProtoLoader
19
+ from api.op_api_proto import OpApiProtoLoader
20
+
21
+ from .resource_loader import ResourceLoader
22
+ from .resource_list import ResourceType
23
+ from .yaml_loader import OpDocYamlLoader, TensorMethodDocYamlLoader, MintFuncDocYamlLoader
24
+
25
+
26
+ class ResourceManager():
27
+ """
28
+ ResourceManager is a class for managing resources.
29
+ """
30
+
31
+ def __init__(self):
32
+ self.resource_map = {}
33
+
34
+ def register_resource(self, loader: ResourceLoader) -> None:
35
+ """
36
+ Register resource.
37
+ """
38
+ self.resource_map.update(loader.load())
39
+
40
+ def get_resource(self, type: ResourceType) -> object:
41
+ """
42
+ Get resource by type.
43
+ """
44
+ if type not in self.resource_map:
45
+ raise ValueError(f"Resource '{type.name}' not registered")
46
+ return self.resource_map[type]
47
+
48
+
49
+ def prepare_resources() -> ResourceManager:
50
+ """
51
+ Load needed resources.
52
+ """
53
+ resource_mgr = ResourceManager()
54
+ resource_mgr.register_resource(OpProtoLoader())
55
+ resource_mgr.register_resource(DeprecatedOpProtoLoader())
56
+ resource_mgr.register_resource(FuncOpProtoLoader())
57
+ resource_mgr.register_resource(OpDocYamlLoader())
58
+ resource_mgr.register_resource(TensorMethodDocYamlLoader())
59
+ resource_mgr.register_resource(MintFuncDocYamlLoader())
60
+ resource_mgr.register_resource(OpApiProtoLoader(
61
+ resource_mgr.get_resource(ResourceType.OP_PROTO),
62
+ resource_mgr.get_resource(ResourceType.DEPRECATED_OP_PROTO),
63
+ resource_mgr.get_resource(ResourceType.FUNC_OP_PROTO)))
64
+ return resource_mgr
@@ -0,0 +1,88 @@
1
+ # Copyright 2025 Huawei Technologies Co., Ltd
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ============================================================================
15
+
16
+ """Module loading ops yaml."""
17
+
18
+ import os
19
+ from typing import Sequence, Union
20
+
21
+ from common.gen_utils import safe_load_yaml_from_dir
22
+ import common.gen_constants as K
23
+
24
+ from .resource_loader import ResourceLoader
25
+ from .resource_list import ResourceType
26
+
27
+
28
+ class YamlLoader(ResourceLoader):
29
+ """
30
+ YamlLoader is a utility class for loading yaml files.
31
+ """
32
+ def __init__(self, resouce_type: ResourceType, yaml_path: Union[Sequence[str], str]):
33
+ """
34
+ Initialize YamlLoader.
35
+
36
+ Args:
37
+ resouce_type (ResourceType): The type of the resource.
38
+ yaml_path (Union[Sequence[str], str]): The path to the yaml file or directory.
39
+ """
40
+ self.type = resouce_type
41
+ if isinstance(yaml_path, str):
42
+ self.yaml_path = [yaml_path]
43
+ else:
44
+ self.yaml_path = yaml_path
45
+
46
+ def load(self) -> dict:
47
+ """
48
+ Load yaml files.
49
+
50
+ Returns:
51
+ tuple[int, object]: The resource id and the yaml dict.
52
+ """
53
+ for yaml_path in self.yaml_path:
54
+ if not os.path.isdir(yaml_path):
55
+ raise ValueError(f"yaml path '{yaml_path}' not found")
56
+
57
+ yaml_dict = {}
58
+ for yaml_path in self.yaml_path:
59
+ yaml_dict.update(safe_load_yaml_from_dir(yaml_path))
60
+
61
+ return {self.type: yaml_dict}
62
+
63
+
64
+ class OpDocYamlLoader(YamlLoader):
65
+ """
66
+ OpDocYamlLoader is a class for loading op primitive doc yaml files.
67
+ """
68
+ def __init__(self):
69
+ op_doc_yaml_path = os.path.join(K.WORK_DIR, K.MS_OP_DEF_YAML_PATH, "doc")
70
+ super().__init__(ResourceType.OP_DOC_YAML, op_doc_yaml_path)
71
+
72
+
73
+ class TensorMethodDocYamlLoader(YamlLoader):
74
+ """
75
+ TensorMethodDocYamlLoader is a class for loading tensor method doc yaml files.
76
+ """
77
+ def __init__(self):
78
+ tensor_method_doc_yaml_path = os.path.join(K.WORK_DIR, K.MS_TENSOR_METHOD_DOC_YAML_PATH)
79
+ super().__init__(ResourceType.TENSOR_METHOD_DOC_YAML, tensor_method_doc_yaml_path)
80
+
81
+
82
+ class MintFuncDocYamlLoader(YamlLoader):
83
+ """
84
+ MintFuncDocYamlLoader is a class for loading mint func doc yaml files.
85
+ """
86
+ def __init__(self):
87
+ mint_func_doc_yaml_path = os.path.join(K.WORK_DIR, K.MS_MINT_FUNC_DOC_YAML_PATH)
88
+ super().__init__(ResourceType.MINT_FUNC_DOC_YAML, mint_func_doc_yaml_path)
@@ -0,0 +1,122 @@
1
+ # Copyright 2025 Huawei Technologies Co., Ltd
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ============================================================================
15
+ """
16
+ Generates mindspore/ccsrc/pybind_api/ir/tensor_py.cc which includes the CPython Tensor APIs.
17
+ """
18
+
19
+ import os
20
+ import common.gen_constants as K
21
+ from common.gen_utils import save_file
22
+ import common.template as template
23
+ from common.template import Template
24
+ from common.base_generator import BaseGenerator
25
+ from pyboost import pyboost_utils
26
+
27
+ class TensorPyCppGenerator(BaseGenerator):
28
+ """
29
+ This class is responsible for generating mindspore/ccsrc/pybind_api/ir/tensor_register/
30
+ auto_generate/tensor_py_gen.cc
31
+ """
32
+ def __init__(self):
33
+ self.TENSOR_PY_CC_TEMPLATE = template.TENSOR_PY_CC_TEMPLATE
34
+ self.TENSOR_PY_H_TEMPLATE = template.TENSOR_PY_H_TEMPLATE
35
+ self.cpy_wrapper_template = Template(" DEFINE_TENSOR_METHOD_CPYWRAPPER(${pascal_api_name}) \\")
36
+ self.tensor_api_def_template = Template(
37
+ '{"${snake_api_name}"'
38
+ ', (PyCFunction)TensorMethod${pascal_api_name}_CPyWrapper, METH_VARARGS | METH_KEYWORDS},'
39
+ )
40
+ self.stubtensor_api_def_template = Template(
41
+ 'py::cpp_function TensorMethod${snake_api_name}_wrapper(\n'
42
+ ' [](const py::object& self, const py::args& args, const py::kwargs& kwargs) {\n'
43
+ ' return TensorMethod${pascal_api_name}(self, args, kwargs);\n'
44
+ ' },\n'
45
+ ' py::is_method(stubTensorClass)\n'
46
+ ');\n'
47
+ 'stubTensorClass.attr("${snake_api_name}") = TensorMethod${snake_api_name}_wrapper;'
48
+ )
49
+
50
+ def generate(self, work_path, tensor_method_protos, alias_func_mapping):
51
+ """
52
+ Generates the content for the helper file and saves it to the specified path.
53
+
54
+ Args:
55
+ work_path (str): The directory where the generated file will be saved.
56
+ tensor_method_protos (dict): A dict mapping from Tensor func API names to their proto lists.
57
+ alias_func_mapping (dict): A dictionary mapping function name to its alias function names.
58
+
59
+ Returns:
60
+ None
61
+ """
62
+ wrapper_defs = []
63
+ tensor_api_defs = []
64
+ stubtensor_api_defs = []
65
+ for api_name, _ in tensor_method_protos.items():
66
+ pascal_api_name = pyboost_utils.format_func_api_name(api_name)
67
+ snake_api_name = api_name
68
+ wrapper_defs.append(self.cpy_wrapper_template.replace(pascal_api_name=pascal_api_name))
69
+ tensor_api_defs.append(
70
+ self.tensor_api_def_template.replace(
71
+ snake_api_name=snake_api_name,
72
+ pascal_api_name=pascal_api_name
73
+ )
74
+ )
75
+ stubtensor_api_defs.append(
76
+ self.stubtensor_api_def_template.replace(
77
+ snake_api_name=snake_api_name,
78
+ pascal_api_name=pascal_api_name
79
+ )
80
+ )
81
+ if api_name in alias_func_mapping:
82
+ alias_api_names = alias_func_mapping[api_name]
83
+ for alias_api_name in alias_api_names:
84
+ snake_api_name = alias_api_name
85
+ tensor_api_defs.append(
86
+ self.tensor_api_def_template.replace(
87
+ snake_api_name=snake_api_name,
88
+ pascal_api_name=pascal_api_name
89
+ )
90
+ )
91
+ stubtensor_api_defs.append(
92
+ self.stubtensor_api_def_template.replace(
93
+ snake_api_name=snake_api_name,
94
+ pascal_api_name=pascal_api_name
95
+ )
96
+ )
97
+
98
+ # delete the ' \' for the last wrapper macro definition
99
+ wrapper_defs[-1] = wrapper_defs[-1][:-2]
100
+
101
+ file_str = self.TENSOR_PY_CC_TEMPLATE.replace(
102
+ tensor_api_defs=tensor_api_defs,
103
+ stubtensor_api_defs=stubtensor_api_defs
104
+ )
105
+ save_file(
106
+ os.path.join(work_path, K.TENSOR_PY_CC_PATH),
107
+ "tensor_py_gen.cc",
108
+ file_str
109
+ )
110
+
111
+ file_str = self.TENSOR_PY_H_TEMPLATE.replace(CPyWrapper_defs=wrapper_defs)
112
+ save_file(
113
+ os.path.join(work_path, K.TENSOR_PY_CC_PATH),
114
+ "tensor_py_gen.h",
115
+ file_str
116
+ )
117
+
118
+ def _format_api_name(api_name):
119
+ has_suffix = api_name.endswith("_")
120
+ parts = api_name.strip("_").split("_")
121
+ formatted_api_name = "".join(part.capitalize() for part in parts)
122
+ return formatted_api_name + '_' if has_suffix else formatted_api_name
@@ -19,7 +19,9 @@ from mindspore.parallel.algo_parameter_config import get_algo_parameters, reset_
19
19
  set_algo_parameters
20
20
  from mindspore.parallel.checkpoint_transform import rank_list_for_transform, transform_checkpoint_by_rank, \
21
21
  transform_checkpoints, merge_pipeline_strategys, sync_pipeline_shared_parameters, \
22
- load_segmented_checkpoints, set_op_strategy_config
22
+ load_segmented_checkpoints, set_op_strategy_config, load_distributed_checkpoint, \
23
+ merge_sliced_parameter, restore_group_info_list, build_searched_strategy
24
+ from mindspore.parallel.checkpoint_convert import rank_list_for_convert, convert_checkpoint_by_rank, convert_checkpoints
23
25
  from mindspore.parallel.parameter_broadcast import parameter_broadcast
24
26
  from mindspore.parallel.shard import shard, Layout
25
27
  from mindspore.parallel.transform_safetensors import unified_safetensors
@@ -27,4 +29,6 @@ from mindspore.parallel.transform_safetensors import unified_safetensors
27
29
  __all__ = ["set_algo_parameters", "reset_algo_parameters", "get_algo_parameters", "rank_list_for_transform",
28
30
  "transform_checkpoint_by_rank", "transform_checkpoints", "merge_pipeline_strategys", "shard",
29
31
  "sync_pipeline_shared_parameters", "Layout", "parameter_broadcast", "load_segmented_checkpoints",
30
- "unified_safetensors", "set_op_strategy_config"]
32
+ "unified_safetensors", "load_distributed_checkpoint", "merge_sliced_parameter", "restore_group_info_list",
33
+ "build_searched_strategy", "set_op_strategy_config", "rank_list_for_convert",
34
+ "convert_checkpoint_by_rank", "convert_checkpoints"]
@@ -1,4 +1,4 @@
1
- # Copyright 2020-2023 Huawei Technologies Co., Ltd
1
+ # Copyright 2020-2025 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -21,6 +21,7 @@ from mindspore import context
21
21
  import mindspore.log as logger
22
22
  from mindspore.parallel._dp_allreduce_fusion import _set_fusion_strategy_by_idx, _set_fusion_strategy_by_size
23
23
  from mindspore.parallel._ps_context import _is_role_pserver
24
+ from mindspore.parallel.shard import Layout
24
25
  from mindspore._c_expression import AutoParallelContext
25
26
  from mindspore._checkparam import args_type_check
26
27
  from mindspore import _checkparam as Validator
@@ -63,6 +64,7 @@ class _ParallelOptimizerConfig:
63
64
  GRADIENT_ACCUMULATION_SHARD = "gradient_accumulation_shard"
64
65
  PARALLEL_OPTIMIZER_THRESHOLD = "parallel_optimizer_threshold"
65
66
  OPTIMIZER_WEIGHT_SHARD_SIZE = "optimizer_weight_shard_size"
67
+ OPTIMIZER_LEVEL = "optimizer_level"
66
68
 
67
69
 
68
70
  class _PipelineConfig:
@@ -77,6 +79,8 @@ class _PipelineScheduler:
77
79
  PIPELINE_1F1B = "1f1b"
78
80
  PIPELINE_GPIPE = "gpipe"
79
81
  PIPELINE_SEQPIPE = "seqpipe"
82
+ PIPELINE_SEQVPP = "seqvpp"
83
+ PIPELINE_SEQSMARTVPP = "seqsmartvpp"
80
84
 
81
85
 
82
86
  class _AutoParallelContext:
@@ -100,6 +104,7 @@ class _AutoParallelContext:
100
104
  def __init__(self):
101
105
  self._context_handle = AutoParallelContext.get_instance()
102
106
  self._dataset_strategy_using_str = True
107
+ self._dataset_layout = None
103
108
 
104
109
  def check_context_handle(self):
105
110
  """
@@ -441,6 +446,9 @@ class _AutoParallelContext:
441
446
  raise ValueError("The context configuration parameter 'parallel_mode' only support 'stand_alone', "
442
447
  "'data_parallel', 'hybrid_parallel', 'semi_auto_parallel' and 'auto_parallel', "
443
448
  "but got the value : {}.".format(parallel_mode))
449
+ if run_mode == context.ParallelMode.DATA_PARALLEL and self.get_enable_parallel_optimizer():
450
+ logger.warning("'enable_parallel_optimizer' is not suggested in 'data_parallel' mode, "
451
+ "consider using 'semi_auto_parallel' or 'auto_parallel' mode.")
444
452
 
445
453
  def get_parallel_mode(self):
446
454
  """Get parallel mode."""
@@ -585,6 +593,9 @@ class _AutoParallelContext:
585
593
  if not isinstance(dataset_strategy, tuple):
586
594
  raise TypeError("For 'set_auto_parallel_context', the argument 'dataset_strategy' "
587
595
  "must be str or tuple type, but got the type : {}.".format(type(dataset_strategy)))
596
+ if dataset_strategy and isinstance(dataset_strategy[0], Layout):
597
+ self._set_dataset_strategy_layout(dataset_strategy)
598
+ return
588
599
  for ele in dataset_strategy:
589
600
  if not isinstance(ele, tuple):
590
601
  raise TypeError("For 'set_auto_parallel_context', the element of argument "
@@ -599,8 +610,36 @@ class _AutoParallelContext:
599
610
  self._dataset_strategy_using_str = False
600
611
  self._context_handle.set_dataset_strategy(dataset_strategy)
601
612
 
613
+ def _set_dataset_strategy_layout(self, dataset_strategy):
614
+ """set dataset layout to c++ by using pybind."""
615
+ dataset_devmat = []
616
+ dataset_tensormap = []
617
+ dataset_alias_name = []
618
+ self._dataset_layout = dataset_strategy
619
+ for ele in dataset_strategy:
620
+ if not isinstance(ele, Layout):
621
+ raise TypeError(f"All the dataset_strategy elements should be Layout, but got {type(ele)}")
622
+ layout_to_dict = ele.to_dict()
623
+ dataset_devmat.append(layout_to_dict["device_matrix"])
624
+ dataset_alias_name.append(layout_to_dict["alias_name"])
625
+ if layout_to_dict["interleaved_parallel"]:
626
+ raise ValueError("For dataset_strategy, layout does not support interleaved_parallel")
627
+ tensor_map = []
628
+ for value in layout_to_dict["tensor_map"]:
629
+ if isinstance(value, tuple):
630
+ tensor_map.append(value)
631
+ elif isinstance(value, int):
632
+ tensor_map.append((value,))
633
+ else:
634
+ raise TypeError(f"value in tensor map must be tuple or int, but got {type(value)}")
635
+ dataset_tensormap.append(tuple(tensor_map))
636
+ self._context_handle.set_dataset_layout(dataset_devmat, dataset_tensormap, dataset_alias_name)
637
+
638
+
602
639
  def get_dataset_strategy(self):
603
640
  """Get dataset sharding strategy."""
641
+ if self._dataset_layout is not None:
642
+ return self._dataset_layout
604
643
  self.check_context_handle()
605
644
  if self._dataset_strategy_using_str:
606
645
  if self._context_handle.get_full_batch():
@@ -888,6 +927,9 @@ class _AutoParallelContext:
888
927
  "the argument 'enable_parallel_optimizer' must be bool, but got the type : {}."
889
928
  .format(type(enable_parallel_optimizer)))
890
929
  self._context_handle.set_enable_parallel_optimizer(enable_parallel_optimizer)
930
+ if enable_parallel_optimizer and self.get_parallel_mode() == context.ParallelMode.DATA_PARALLEL:
931
+ logger.warning("'enable_parallel_optimizer' is not suggested in 'data_parallel' mode, "
932
+ "consider using 'semi_auto_parallel' or 'auto_parallel' mode.")
891
933
 
892
934
  def set_force_fp32_communication(self, force_fp32_communication):
893
935
  """
@@ -918,7 +960,7 @@ class _AutoParallelContext:
918
960
 
919
961
  - pipeline_interleave(bool): Setting true enable interleave scheduler for pipeline parallelism. This
920
962
  scheduler requires more memory but less bubble.
921
- - pipeline_scheduler(string): There are two choices, "1f1b" and "gpipe". default is "1f1b"
963
+ - pipeline_scheduler(str): There are two choices, "1f1b" and "gpipe". default is "1f1b"
922
964
 
923
965
  - 1f1b: It requires less memory and bubble ratio, for it run backward pass when corresponding forward pass
924
966
  finished.
@@ -954,7 +996,9 @@ class _AutoParallelContext:
954
996
 
955
997
  Validator.check_string(pipeline_config[pp_scheduler], [_PipelineScheduler.PIPELINE_1F1B,
956
998
  _PipelineScheduler.PIPELINE_GPIPE,
957
- _PipelineScheduler.PIPELINE_SEQPIPE])
999
+ _PipelineScheduler.PIPELINE_SEQPIPE,
1000
+ _PipelineScheduler.PIPELINE_SEQVPP,
1001
+ _PipelineScheduler.PIPELINE_SEQSMARTVPP])
958
1002
  if not pipeline_config[pp_interleave] and pipeline_config[pp_scheduler] != _PipelineScheduler.PIPELINE_1F1B:
959
1003
  raise ValueError(f"When pipeline_interleave is False, {pp_scheduler} is not supported")
960
1004
 
@@ -994,19 +1038,21 @@ class _AutoParallelContext:
994
1038
  shape[n] \* size(dtype). Non-negative. Unit: KB. Default: 64.
995
1039
  - optimizer_weight_shard_size(int): Set the optimizer weight shard group size if you want to specific the
996
1040
  maximum group size across devices when the parallel optimizer is
997
- enabled. The numerical range can be (0, device_num]. Default value
998
- is -1, which means the optimizer weight shard group size will
999
- the data parallel group of each parameter. Default -1.
1000
-
1041
+ enabled. The numerical range can be (0, device_num] or -1. If pipeline
1042
+ parallelism is enabled, the numerical range is (0, device_num/stage]
1043
+ or -1. Default value is -1, which means the optimizer weight shard
1044
+ group size will be equal to the data parallel group of each parameter.
1001
1045
  """
1002
1046
  self.check_context_handle()
1003
1047
  grad_shard_name = _ParallelOptimizerConfig.GRADIENT_ACCUMULATION_SHARD
1004
1048
  threshold_name = _ParallelOptimizerConfig.PARALLEL_OPTIMIZER_THRESHOLD
1005
1049
  optimizer_weight_shard_size_name = _ParallelOptimizerConfig.OPTIMIZER_WEIGHT_SHARD_SIZE
1050
+ optimizer_level_name = _ParallelOptimizerConfig.OPTIMIZER_LEVEL
1006
1051
 
1007
1052
  for config_name in parallel_optimizer_config:
1008
1053
  unknown_config = []
1009
- if config_name not in [grad_shard_name, threshold_name, optimizer_weight_shard_size_name]:
1054
+ if config_name not in [grad_shard_name, threshold_name, optimizer_weight_shard_size_name,
1055
+ optimizer_level_name]:
1010
1056
  unknown_config.append(config_name)
1011
1057
 
1012
1058
  if unknown_config:
@@ -1017,6 +1063,10 @@ class _AutoParallelContext:
1017
1063
  parallel_optimizer_config[grad_shard_name], grad_shard_name, grad_shard_name)
1018
1064
  self._context_handle.set_grad_accumulation_shard(
1019
1065
  parallel_optimizer_config[grad_shard_name])
1066
+ if optimizer_level_name in parallel_optimizer_config \
1067
+ and parallel_optimizer_config[optimizer_level_name] != "level2":
1068
+ raise ValueError(f"The optimizer_level is set as {parallel_optimizer_config[optimizer_level_name]}, "
1069
+ "thus cannot set grad_accumulation_shard as True.")
1020
1070
 
1021
1071
  if threshold_name in parallel_optimizer_config:
1022
1072
  Validator.check_non_negative_int(
@@ -1026,8 +1076,23 @@ class _AutoParallelContext:
1026
1076
 
1027
1077
  if optimizer_weight_shard_size_name in parallel_optimizer_config:
1028
1078
  value = parallel_optimizer_config[optimizer_weight_shard_size_name]
1029
- Validator.check_positive_int(value)
1030
- self.set_optimizer_weight_shard_size(value)
1079
+ if value != -1:
1080
+ Validator.check_positive_int(value, prim_name="optimizer_weight_shard_size")
1081
+ self.set_optimizer_weight_shard_size(value)
1082
+
1083
+ if optimizer_level_name in parallel_optimizer_config:
1084
+ optimizer_level = parallel_optimizer_config[optimizer_level_name]
1085
+ if optimizer_level not in ["level1", "level2", "level3"]:
1086
+ raise ValueError("Optimizer level should in ['level1', 'level2', 'level3'], but got {}"
1087
+ .format(optimizer_level))
1088
+
1089
+ if self._context_handle.get_grad_accumulation_shard() and optimizer_level != "level2":
1090
+ raise ValueError("The grad_accumulation shard is set, thus cannot set optimizer_level != 'level2'")
1091
+ if optimizer_level == "level2":
1092
+ self._context_handle.set_grad_accumulation_shard(True)
1093
+ if optimizer_level == "level3":
1094
+ self._context_handle.set_zero3(True)
1095
+ self._context_handle.set_grad_accumulation_shard(False)
1031
1096
 
1032
1097
  def get_grad_accumulation_shard(self):
1033
1098
  """Get grad accumulation shard."""
@@ -1136,6 +1201,7 @@ class _AutoParallelContext:
1136
1201
  self.check_context_handle()
1137
1202
  self._context_handle.reset()
1138
1203
  _ParallelFusionConfig.reset()
1204
+ self._dataset_layout = None
1139
1205
 
1140
1206
  def _check_and_default_group(self, group):
1141
1207
  """Validate the given group, if group is empty, returns a default fusion group"""
@@ -1245,6 +1311,36 @@ class _AutoParallelContext:
1245
1311
  self.set_enable_all_gather_fusion(openstate)
1246
1312
  self.set_enable_reduce_scatter_fusion(openstate)
1247
1313
 
1314
+ def set_auto_parallel_new_interface(self, auto_parallel_new_interface):
1315
+ """
1316
+ Set AutoParallel(cell) new interface flag.
1317
+
1318
+ Args:
1319
+ auto_parallel_new_interface (bool): Mark whether to use the new interface.
1320
+ """
1321
+ self.check_context_handle()
1322
+ self._context_handle.set_auto_parallel_new_interface(auto_parallel_new_interface)
1323
+
1324
+ def get_auto_parallel_new_interface(self):
1325
+ """Get auto_parallel_new_interface."""
1326
+ self.check_context_handle()
1327
+ return self._context_handle.get_auto_parallel_new_interface()
1328
+
1329
+ def set_init_param_in_compile(self, init_param_in_compile):
1330
+ """
1331
+ Set flag marking whether to init parameters in compiling process.
1332
+
1333
+ Args:
1334
+ init_param_in_compile (bool): Mark whether to init parameters in compiling process.
1335
+ """
1336
+ self.check_context_handle()
1337
+ self._context_handle.set_init_param_in_compile(init_param_in_compile)
1338
+
1339
+ def get_init_param_in_compile(self):
1340
+ """Get init_param_in_compile."""
1341
+ self.check_context_handle()
1342
+ return self._context_handle.get_init_param_in_compile()
1343
+
1248
1344
  _AUTO_PARALLEL_CONTEXT = None
1249
1345
 
1250
1346
 
@@ -1295,7 +1391,10 @@ _set_auto_parallel_context_func_map = {
1295
1391
  "comm_fusion": auto_parallel_context().set_comm_fusion,
1296
1392
  "dump_local_norm": auto_parallel_context().set_dump_local_norm,
1297
1393
  "dump_local_norm_path": auto_parallel_context().set_dump_local_norm_path,
1298
- "dump_device_local_norm": auto_parallel_context().set_dump_device_local_norm}
1394
+ "dump_device_local_norm": auto_parallel_context().set_dump_device_local_norm,
1395
+ "auto_parallel_new_interface": auto_parallel_context().set_auto_parallel_new_interface,
1396
+ "init_param_in_compile": auto_parallel_context().set_init_param_in_compile}
1397
+
1299
1398
 
1300
1399
  _get_auto_parallel_context_func_map = {
1301
1400
  "device_num": auto_parallel_context().get_device_num,
@@ -1330,7 +1429,9 @@ _get_auto_parallel_context_func_map = {
1330
1429
  "full_batch_is_set": auto_parallel_context().get_full_batch_is_set,
1331
1430
  "dump_local_norm": auto_parallel_context().get_dump_local_norm,
1332
1431
  "dump_local_norm_path": auto_parallel_context().get_dump_local_norm_path,
1333
- "dump_device_local_norm": auto_parallel_context().get_dump_device_local_norm}
1432
+ "dump_device_local_norm": auto_parallel_context().get_dump_device_local_norm,
1433
+ "auto_parallel_new_interface": auto_parallel_context().get_auto_parallel_new_interface,
1434
+ "init_param_in_compile": auto_parallel_context().get_init_param_in_compile}
1334
1435
 
1335
1436
 
1336
1437
  @args_type_check(device_num=int, global_rank=int, gradients_mean=bool, gradient_fp32_sync=bool,
@@ -1472,6 +1573,33 @@ def _get_auto_parallel_context(attr_key):
1472
1573
  return get_func()
1473
1574
 
1474
1575
 
1576
+ def _get_all_auto_parallel_context():
1577
+ """get auto parallel context before reset"""
1578
+ _auto_paralell_context_value_map = {}
1579
+ _pipeline_config = {}
1580
+ for key, value in _get_auto_parallel_context_func_map.items():
1581
+ if key == "pipeline_interleave":
1582
+ _pipeline_config[key] = value()
1583
+ elif key == "pipeline_scheduler":
1584
+ _pipeline_config[key] = value()
1585
+ else:
1586
+ _auto_paralell_context_value_map[key] = value()
1587
+ return _auto_paralell_context_value_map, _pipeline_config
1588
+
1589
+
1590
+ def _recover_auto_parallel_context(context_value_map, pp_config):
1591
+ """set auto parallel context after transformation"""
1592
+ # set the same auto parallel context after transform
1593
+ from mindspore.context import reset_auto_parallel_context
1594
+ reset_auto_parallel_context()
1595
+ for key, value in context_value_map.items():
1596
+ # list is empty or full_batch_is_set is not needed to set
1597
+ if (isinstance(value, list) and not value) or (key == "full_batch_is_set"):
1598
+ continue
1599
+ _set_auto_parallel_context_func_map[key](value)
1600
+ _set_auto_parallel_context_func_map["pipeline_config"](pp_config)
1601
+
1602
+
1475
1603
  def _reset_auto_parallel_context():
1476
1604
  """
1477
1605
  Reset auto parallel context attributes to the default values: