mindspore 2.5.0__cp311-cp311-win_amd64.whl → 2.6.0__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (493) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +6 -4
  5. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  8. mindspore/_check_jit_forbidden_api.py +3 -0
  9. mindspore/_checkparam.py +3 -33
  10. mindspore/_deprecated/__init__.py +17 -0
  11. mindspore/_deprecated/jit.py +198 -0
  12. mindspore/_extends/builtin_operations.py +1 -1
  13. mindspore/_extends/parse/__init__.py +6 -7
  14. mindspore/_extends/parse/compile_config.py +19 -0
  15. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
  16. mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
  17. mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
  18. mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
  19. mindspore/_extends/parse/parser.py +25 -194
  20. mindspore/_extends/parse/resources.py +1 -5
  21. mindspore/_extends/parse/standard_method.py +109 -75
  22. mindspore/_extends/pijit/__init__.py +2 -2
  23. mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
  24. mindspore/_extends/pijit/tensor_func_list.py +27 -0
  25. mindspore/_extends/utils.py +1 -1
  26. mindspore/amp.py +4 -4
  27. mindspore/atlprov.dll +0 -0
  28. mindspore/avcodec-59.dll +0 -0
  29. mindspore/avdevice-59.dll +0 -0
  30. mindspore/avfilter-8.dll +0 -0
  31. mindspore/avformat-59.dll +0 -0
  32. mindspore/avutil-57.dll +0 -0
  33. mindspore/boost/__init__.py +2 -2
  34. mindspore/boost/base.py +3 -7
  35. mindspore/boost/boost_cell_wrapper.py +2 -2
  36. mindspore/c1.dll +0 -0
  37. mindspore/c1xx.dll +0 -0
  38. mindspore/c2.dll +0 -0
  39. mindspore/common/__init__.py +4 -3
  40. mindspore/common/_grad_function.py +56 -0
  41. mindspore/common/_pijit_context.py +14 -5
  42. mindspore/common/_register_for_tensor.py +1 -1
  43. mindspore/common/_stub_tensor.py +5 -10
  44. mindspore/common/_tensor_cpp_method.py +1 -1
  45. mindspore/common/_tensor_docs.py +2014 -3386
  46. mindspore/common/api.py +386 -355
  47. mindspore/common/auto_dynamic_shape.py +41 -44
  48. mindspore/common/dtype.py +5 -2
  49. mindspore/common/dump.py +7 -5
  50. mindspore/common/file_system.py +3 -0
  51. mindspore/common/generator.py +3 -0
  52. mindspore/common/hook_handle.py +5 -3
  53. mindspore/common/initializer.py +10 -6
  54. mindspore/common/jit_begin_end.py +94 -0
  55. mindspore/common/jit_config.py +6 -1
  56. mindspore/common/jit_context.py +76 -0
  57. mindspore/common/jit_trace.py +378 -0
  58. mindspore/common/lazy_inline.py +2 -2
  59. mindspore/common/mutable.py +5 -4
  60. mindspore/common/parameter.py +106 -39
  61. mindspore/common/seed.py +2 -2
  62. mindspore/common/sparse_tensor.py +23 -17
  63. mindspore/common/tensor.py +332 -714
  64. mindspore/communication/__init__.py +7 -5
  65. mindspore/communication/_comm_helper.py +47 -2
  66. mindspore/communication/comm_func.py +70 -53
  67. mindspore/communication/management.py +83 -17
  68. mindspore/context.py +228 -571
  69. mindspore/dataset/__init__.py +44 -20
  70. mindspore/dataset/audio/__init__.py +2 -8
  71. mindspore/dataset/audio/transforms.py +3 -17
  72. mindspore/dataset/core/config.py +3 -3
  73. mindspore/dataset/engine/cache_client.py +1 -1
  74. mindspore/dataset/engine/datasets.py +102 -120
  75. mindspore/dataset/engine/datasets_audio.py +22 -22
  76. mindspore/dataset/engine/datasets_standard_format.py +43 -24
  77. mindspore/dataset/engine/datasets_text.py +78 -85
  78. mindspore/dataset/engine/datasets_user_defined.py +109 -77
  79. mindspore/dataset/engine/datasets_vision.py +111 -108
  80. mindspore/dataset/engine/iterators.py +5 -3
  81. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
  82. mindspore/dataset/engine/samplers.py +279 -57
  83. mindspore/dataset/engine/serializer_deserializer.py +2 -1
  84. mindspore/dataset/engine/validators.py +10 -0
  85. mindspore/dataset/text/__init__.py +7 -6
  86. mindspore/dataset/text/transforms.py +6 -5
  87. mindspore/dataset/text/utils.py +3 -3
  88. mindspore/dataset/transforms/__init__.py +0 -9
  89. mindspore/dataset/transforms/transforms.py +3 -3
  90. mindspore/dataset/utils/browse_dataset.py +1 -1
  91. mindspore/dataset/vision/__init__.py +2 -9
  92. mindspore/dataset/vision/transforms.py +202 -158
  93. mindspore/dataset/vision/utils.py +7 -5
  94. mindspore/device_context/ascend/op_debug.py +60 -1
  95. mindspore/device_context/ascend/op_tuning.py +0 -4
  96. mindspore/device_manager.py +39 -3
  97. mindspore/dnnl.dll +0 -0
  98. mindspore/dpcmi.dll +0 -0
  99. mindspore/experimental/es/embedding_service.py +35 -27
  100. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -2
  101. mindspore/experimental/map_parameter.py +4 -4
  102. mindspore/experimental/optim/adadelta.py +22 -26
  103. mindspore/experimental/optim/adagrad.py +4 -4
  104. mindspore/experimental/optim/adam.py +4 -0
  105. mindspore/experimental/optim/adamax.py +4 -4
  106. mindspore/experimental/optim/adamw.py +4 -0
  107. mindspore/experimental/optim/asgd.py +1 -1
  108. mindspore/experimental/optim/lr_scheduler.py +40 -22
  109. mindspore/experimental/optim/radam.py +5 -5
  110. mindspore/experimental/optim/rprop.py +1 -1
  111. mindspore/experimental/optim/sgd.py +1 -1
  112. mindspore/hal/contiguous_tensors_handle.py +6 -10
  113. mindspore/hal/device.py +55 -81
  114. mindspore/hal/event.py +38 -55
  115. mindspore/hal/memory.py +115 -147
  116. mindspore/hal/stream.py +81 -125
  117. mindspore/include/dataset/constants.h +7 -4
  118. mindspore/include/dataset/execute.h +2 -2
  119. mindspore/jpeg62.dll +0 -0
  120. mindspore/log.py +40 -2
  121. mindspore/mindrecord/__init__.py +20 -7
  122. mindspore/mindspore_backend_common.dll +0 -0
  123. mindspore/mindspore_backend_manager.dll +0 -0
  124. mindspore/mindspore_common.dll +0 -0
  125. mindspore/mindspore_core.dll +0 -0
  126. mindspore/mindspore_dump.dll +0 -0
  127. mindspore/mindspore_frontend.dll +0 -0
  128. mindspore/mindspore_glog.dll +0 -0
  129. mindspore/mindspore_memory_pool.dll +0 -0
  130. mindspore/mindspore_ms_backend.dll +0 -0
  131. mindspore/mindspore_ops.dll +0 -0
  132. mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
  133. mindspore/mindspore_ops_kernel_common.dll +0 -0
  134. mindspore/mindspore_profiler.dll +0 -0
  135. mindspore/mindspore_pyboost.dll +0 -0
  136. mindspore/mindspore_pynative.dll +0 -0
  137. mindspore/mindspore_res_manager.dll +0 -0
  138. mindspore/mindspore_runtime_pipeline.dll +0 -0
  139. mindspore/mint/__init__.py +133 -702
  140. mindspore/mint/distributed/__init__.py +5 -1
  141. mindspore/mint/distributed/distributed.py +198 -113
  142. mindspore/mint/linalg/__init__.py +2 -0
  143. mindspore/mint/nn/__init__.py +280 -18
  144. mindspore/mint/nn/functional.py +282 -64
  145. mindspore/mint/nn/layer/__init__.py +4 -0
  146. mindspore/mint/nn/layer/_functions.py +7 -3
  147. mindspore/mint/nn/layer/activation.py +120 -13
  148. mindspore/mint/nn/layer/conv.py +234 -28
  149. mindspore/mint/nn/layer/normalization.py +15 -16
  150. mindspore/mint/nn/layer/padding.py +1 -1
  151. mindspore/mint/nn/layer/pooling.py +66 -1
  152. mindspore/mint/optim/__init__.py +2 -1
  153. mindspore/mint/optim/sgd.py +171 -0
  154. mindspore/msobj140.dll +0 -0
  155. mindspore/mspdb140.dll +0 -0
  156. mindspore/mspdbcore.dll +0 -0
  157. mindspore/mspdbst.dll +0 -0
  158. mindspore/mspft140.dll +0 -0
  159. mindspore/msvcdis140.dll +0 -0
  160. mindspore/msvcp140_1.dll +0 -0
  161. mindspore/msvcp140_2.dll +0 -0
  162. mindspore/msvcp140_atomic_wait.dll +0 -0
  163. mindspore/msvcp140_codecvt_ids.dll +0 -0
  164. mindspore/nn/__init__.py +4 -1
  165. mindspore/nn/cell.py +1253 -179
  166. mindspore/nn/layer/activation.py +23 -21
  167. mindspore/nn/layer/basic.py +22 -16
  168. mindspore/nn/layer/container.py +1 -1
  169. mindspore/nn/layer/conv.py +53 -42
  170. mindspore/nn/layer/embedding.py +9 -8
  171. mindspore/nn/layer/normalization.py +48 -42
  172. mindspore/nn/layer/pooling.py +75 -31
  173. mindspore/nn/layer/transformer.py +11 -10
  174. mindspore/nn/learning_rate_schedule.py +4 -2
  175. mindspore/nn/loss/loss.py +27 -19
  176. mindspore/nn/optim/ada_grad.py +6 -5
  177. mindspore/nn/optim/adadelta.py +9 -7
  178. mindspore/nn/optim/adafactor.py +1 -1
  179. mindspore/nn/optim/adam.py +18 -14
  180. mindspore/nn/optim/adamax.py +8 -7
  181. mindspore/nn/optim/adasum.py +5 -5
  182. mindspore/nn/optim/asgd.py +3 -1
  183. mindspore/nn/optim/ftrl.py +11 -9
  184. mindspore/nn/optim/lamb.py +1 -1
  185. mindspore/nn/optim/lazyadam.py +12 -10
  186. mindspore/nn/optim/momentum.py +7 -6
  187. mindspore/nn/optim/optimizer.py +2 -2
  188. mindspore/nn/optim/proximal_ada_grad.py +12 -10
  189. mindspore/nn/optim/rmsprop.py +13 -12
  190. mindspore/nn/optim/rprop.py +9 -7
  191. mindspore/nn/optim/sgd.py +9 -6
  192. mindspore/nn/optim/tft_wrapper.py +5 -2
  193. mindspore/nn/probability/bijector/bijector.py +17 -11
  194. mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
  195. mindspore/nn/probability/bijector/invert.py +2 -2
  196. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  197. mindspore/nn/probability/bijector/softplus.py +3 -2
  198. mindspore/nn/probability/distribution/beta.py +3 -3
  199. mindspore/nn/probability/distribution/categorical.py +1 -1
  200. mindspore/nn/probability/distribution/cauchy.py +4 -2
  201. mindspore/nn/probability/distribution/exponential.py +6 -7
  202. mindspore/nn/probability/distribution/gamma.py +2 -2
  203. mindspore/nn/probability/distribution/gumbel.py +2 -2
  204. mindspore/nn/probability/distribution/half_normal.py +5 -3
  205. mindspore/nn/probability/distribution/logistic.py +5 -3
  206. mindspore/nn/probability/distribution/poisson.py +1 -1
  207. mindspore/nn/probability/distribution/uniform.py +5 -3
  208. mindspore/nn/reinforcement/_tensors_queue.py +1 -1
  209. mindspore/nn/reinforcement/tensor_array.py +1 -1
  210. mindspore/nn/wrap/__init__.py +6 -6
  211. mindspore/nn/wrap/cell_wrapper.py +178 -117
  212. mindspore/nn/wrap/grad_reducer.py +45 -36
  213. mindspore/nn/wrap/loss_scale.py +3 -3
  214. mindspore/numpy/array_creations.py +3 -3
  215. mindspore/numpy/array_ops.py +1 -1
  216. mindspore/numpy/utils.py +1 -2
  217. mindspore/numpy/utils_const.py +1 -2
  218. mindspore/opencv_core452.dll +0 -0
  219. mindspore/opencv_imgcodecs452.dll +0 -0
  220. mindspore/opencv_imgproc452.dll +0 -0
  221. mindspore/ops/__init__.py +3 -2
  222. mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
  223. mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
  224. mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
  225. mindspore/ops/_register_for_op.py +0 -11
  226. mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
  227. mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
  228. mindspore/ops/_vmap/vmap_array_ops.py +32 -6
  229. mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
  230. mindspore/ops/_vmap/vmap_math_ops.py +4 -7
  231. mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
  232. mindspore/ops/auto_generate/__init__.py +4 -3
  233. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +127 -52
  234. mindspore/ops/auto_generate/gen_extend_func.py +286 -208
  235. mindspore/ops/auto_generate/gen_ops_def.py +2783 -2335
  236. mindspore/ops/auto_generate/gen_ops_prim.py +8992 -2686
  237. mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
  238. mindspore/ops/composite/__init__.py +2 -1
  239. mindspore/ops/composite/base.py +19 -24
  240. mindspore/ops/composite/math_ops.py +6 -16
  241. mindspore/ops/composite/multitype_ops/__init__.py +5 -2
  242. mindspore/ops/composite/multitype_ops/_compile_utils.py +4 -5
  243. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
  244. mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
  245. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  246. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  247. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
  248. mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
  249. mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
  250. mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
  251. mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
  252. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
  253. mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
  254. mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
  255. mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
  256. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
  257. mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
  258. mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
  259. mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
  260. mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
  261. mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
  262. mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
  263. mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
  264. mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
  265. mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
  266. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
  267. mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
  268. mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
  269. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
  270. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  271. mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
  272. mindspore/ops/function/__init__.py +28 -2
  273. mindspore/ops/function/_add_attr_func.py +58 -0
  274. mindspore/ops/function/array_func.py +1631 -2347
  275. mindspore/ops/function/clip_func.py +38 -45
  276. mindspore/ops/function/debug_func.py +36 -44
  277. mindspore/ops/function/grad/__init__.py +1 -0
  278. mindspore/ops/function/grad/grad_func.py +104 -71
  279. mindspore/ops/function/image_func.py +1 -1
  280. mindspore/ops/function/linalg_func.py +46 -78
  281. mindspore/ops/function/math_func.py +3024 -3855
  282. mindspore/ops/function/nn_func.py +678 -274
  283. mindspore/ops/function/other_func.py +159 -1
  284. mindspore/ops/function/parameter_func.py +17 -30
  285. mindspore/ops/function/random_func.py +216 -361
  286. mindspore/ops/function/reshard_func.py +4 -70
  287. mindspore/ops/function/sparse_func.py +3 -3
  288. mindspore/ops/function/sparse_unary_func.py +5 -5
  289. mindspore/ops/function/spectral_func.py +25 -58
  290. mindspore/ops/function/vmap_func.py +26 -18
  291. mindspore/ops/functional.py +8 -5
  292. mindspore/ops/functional_overload.py +655 -4
  293. mindspore/ops/op_info_register.py +32 -244
  294. mindspore/ops/operations/__init__.py +21 -14
  295. mindspore/ops/operations/_custom_ops_utils.py +235 -0
  296. mindspore/ops/operations/_grad_ops.py +1 -10
  297. mindspore/ops/operations/_inner_ops.py +5 -76
  298. mindspore/ops/operations/_ms_kernel.py +4 -10
  299. mindspore/ops/operations/_rl_inner_ops.py +1 -1
  300. mindspore/ops/operations/_scalar_ops.py +3 -2
  301. mindspore/ops/operations/_sequence_ops.py +1 -1
  302. mindspore/ops/operations/_tensor_array.py +1 -1
  303. mindspore/ops/operations/array_ops.py +39 -24
  304. mindspore/ops/operations/comm_ops.py +150 -107
  305. mindspore/ops/operations/custom_ops.py +287 -32
  306. mindspore/ops/operations/debug_ops.py +119 -16
  307. mindspore/ops/operations/inner_ops.py +1 -1
  308. mindspore/ops/operations/linalg_ops.py +1 -58
  309. mindspore/ops/operations/manually_defined/_inner.py +1 -1
  310. mindspore/ops/operations/manually_defined/ops_def.py +746 -79
  311. mindspore/ops/operations/math_ops.py +21 -18
  312. mindspore/ops/operations/nn_ops.py +67 -224
  313. mindspore/ops/operations/other_ops.py +62 -9
  314. mindspore/ops/operations/random_ops.py +13 -7
  315. mindspore/ops/operations/reshard_ops.py +1 -1
  316. mindspore/ops/operations/sparse_ops.py +2 -2
  317. mindspore/ops/primitive.py +43 -32
  318. mindspore/ops/tensor_method.py +243 -17
  319. mindspore/ops_generate/__init__.py +0 -5
  320. mindspore/ops_generate/aclnn/__init__.py +0 -0
  321. mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
  322. mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
  323. mindspore/ops_generate/api/__init__.py +0 -0
  324. mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
  325. mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
  326. mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
  327. mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
  328. mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
  329. mindspore/ops_generate/api/gen_api.py +103 -0
  330. mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
  331. mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
  332. mindspore/ops_generate/common/__init__.py +0 -0
  333. mindspore/ops_generate/common/gen_constants.py +91 -0
  334. mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
  335. mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
  336. mindspore/ops_generate/{template.py → common/template.py} +96 -84
  337. mindspore/ops_generate/gen_ops.py +23 -325
  338. mindspore/ops_generate/op_def/__init__.py +0 -0
  339. mindspore/ops_generate/op_def/gen_op_def.py +90 -0
  340. mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
  341. mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -10
  342. mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
  343. mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
  344. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
  345. mindspore/ops_generate/op_def_py/__init__.py +0 -0
  346. mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
  347. mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
  348. mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
  349. mindspore/ops_generate/pyboost/__init__.py +0 -0
  350. mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
  351. mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
  352. mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
  353. mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
  354. mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
  355. mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
  356. mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
  357. mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
  358. mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
  359. mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
  360. mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
  361. mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
  362. mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
  363. mindspore/ops_generate/resources/__init__.py +0 -0
  364. mindspore/ops_generate/resources/resource_list.py +30 -0
  365. mindspore/ops_generate/resources/resource_loader.py +36 -0
  366. mindspore/ops_generate/resources/resource_manager.py +64 -0
  367. mindspore/ops_generate/resources/yaml_loader.py +88 -0
  368. mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
  369. mindspore/parallel/__init__.py +6 -2
  370. mindspore/parallel/_auto_parallel_context.py +140 -12
  371. mindspore/parallel/_cell_wrapper.py +132 -15
  372. mindspore/parallel/_parallel_serialization.py +95 -4
  373. mindspore/parallel/_ps_context.py +1 -1
  374. mindspore/parallel/_recovery_context.py +7 -2
  375. mindspore/parallel/_tensor.py +142 -18
  376. mindspore/parallel/_utils.py +198 -25
  377. mindspore/parallel/algo_parameter_config.py +3 -3
  378. mindspore/parallel/auto_parallel.py +732 -0
  379. mindspore/parallel/checkpoint_convert.py +159 -0
  380. mindspore/parallel/checkpoint_transform.py +658 -37
  381. mindspore/parallel/cluster/process_entity/_api.py +151 -19
  382. mindspore/parallel/cluster/run.py +1 -1
  383. mindspore/parallel/function/__init__.py +24 -0
  384. mindspore/parallel/function/reshard_func.py +258 -0
  385. mindspore/parallel/nn/__init__.py +25 -0
  386. mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
  387. mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
  388. mindspore/parallel/parameter_broadcast.py +24 -13
  389. mindspore/parallel/shard.py +137 -62
  390. mindspore/parallel/transform_safetensors.py +288 -95
  391. mindspore/pgodb140.dll +0 -0
  392. mindspore/pgort140.dll +0 -0
  393. mindspore/profiler/__init__.py +9 -5
  394. mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
  395. mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
  396. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
  397. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +25 -0
  398. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  399. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
  400. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
  401. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
  402. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
  403. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
  404. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
  405. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
  406. mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
  407. mindspore/profiler/common/constant.py +12 -0
  408. mindspore/profiler/common/msprof_cmd_tool.py +42 -23
  409. mindspore/profiler/common/path_manager.py +24 -0
  410. mindspore/profiler/common/profiler_context.py +26 -2
  411. mindspore/profiler/common/profiler_meta_data.py +74 -0
  412. mindspore/profiler/common/profiler_parameters.py +59 -18
  413. mindspore/profiler/common/profiler_path_manager.py +66 -7
  414. mindspore/profiler/dynamic_profiler.py +112 -79
  415. mindspore/profiler/envprofiler.py +26 -1
  416. mindspore/profiler/experimental_config.py +197 -0
  417. mindspore/profiler/mstx.py +57 -14
  418. mindspore/profiler/platform/npu_profiler.py +33 -7
  419. mindspore/profiler/profiler.py +541 -45
  420. mindspore/profiler/profiler_action_controller.py +1 -1
  421. mindspore/profiler/profiler_interface.py +4 -0
  422. mindspore/profiler/schedule.py +57 -22
  423. mindspore/rewrite/api/node.py +15 -13
  424. mindspore/rewrite/api/symbol_tree.py +1 -1
  425. mindspore/run_check/_check_version.py +25 -14
  426. mindspore/run_check/run_check.py +1 -1
  427. mindspore/runtime/__init__.py +2 -2
  428. mindspore/runtime/executor.py +40 -11
  429. mindspore/runtime/memory.py +37 -13
  430. mindspore/safeguard/rewrite_obfuscation.py +12 -9
  431. mindspore/swresample-4.dll +0 -0
  432. mindspore/swscale-6.dll +0 -0
  433. mindspore/tbbmalloc.dll +0 -0
  434. mindspore/tinyxml2.dll +0 -0
  435. mindspore/train/__init__.py +8 -8
  436. mindspore/train/_utils.py +43 -9
  437. mindspore/train/amp.py +1 -1
  438. mindspore/train/callback/__init__.py +2 -2
  439. mindspore/train/callback/_callback.py +2 -16
  440. mindspore/train/callback/_checkpoint.py +24 -40
  441. mindspore/train/callback/_cluster_monitor.py +14 -18
  442. mindspore/train/callback/_flops_collector.py +2 -3
  443. mindspore/train/callback/_history.py +7 -4
  444. mindspore/train/callback/_lambda_callback.py +2 -2
  445. mindspore/train/callback/_landscape.py +0 -3
  446. mindspore/train/callback/_loss_monitor.py +2 -1
  447. mindspore/train/callback/_on_request_exit.py +6 -5
  448. mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
  449. mindspore/train/callback/_summary_collector.py +8 -13
  450. mindspore/train/callback/_time_monitor.py +2 -1
  451. mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +204 -105
  452. mindspore/train/data_sink.py +25 -2
  453. mindspore/train/dataset_helper.py +4 -5
  454. mindspore/train/loss_scale_manager.py +8 -7
  455. mindspore/train/metrics/accuracy.py +3 -3
  456. mindspore/train/metrics/confusion_matrix.py +9 -9
  457. mindspore/train/metrics/error.py +3 -3
  458. mindspore/train/metrics/hausdorff_distance.py +4 -4
  459. mindspore/train/metrics/mean_surface_distance.py +3 -3
  460. mindspore/train/metrics/metric.py +0 -12
  461. mindspore/train/metrics/occlusion_sensitivity.py +4 -2
  462. mindspore/train/metrics/precision.py +8 -6
  463. mindspore/train/metrics/recall.py +9 -9
  464. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  465. mindspore/train/mind_ir_pb2.py +19 -12
  466. mindspore/train/model.py +262 -127
  467. mindspore/train/serialization.py +246 -988
  468. mindspore/train/summary/_summary_adapter.py +2 -2
  469. mindspore/train/summary/summary_record.py +1 -1
  470. mindspore/turbojpeg.dll +0 -0
  471. mindspore/utils/__init__.py +3 -2
  472. mindspore/utils/dryrun.py +4 -2
  473. mindspore/utils/hooks.py +81 -0
  474. mindspore/utils/runtime_execution_order_check.py +2 -0
  475. mindspore/utils/utils.py +138 -4
  476. mindspore/vcmeta.dll +0 -0
  477. mindspore/vcruntime140.dll +0 -0
  478. mindspore/vcruntime140_1.dll +0 -0
  479. mindspore/version.py +1 -1
  480. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/METADATA +2 -1
  481. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/RECORD +485 -440
  482. mindspore/_install_custom.py +0 -43
  483. mindspore/common/_register_for_adapter.py +0 -74
  484. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
  485. mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
  486. mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
  487. mindspore/ops_generate/gen_constants.py +0 -190
  488. mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
  489. mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
  490. /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
  491. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/WHEEL +0 -0
  492. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/entry_points.txt +0 -0
  493. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/top_level.txt +0 -0
@@ -23,7 +23,7 @@ from mindspore import log as logger
23
23
  from mindspore.common import dtype as mstype
24
24
  from mindspore.ops import ReduceOp, cat
25
25
  from mindspore.common.tensor import Tensor
26
- from mindspore._c_expression import Tensor as Tensor_
26
+ from mindspore._c_expression import TensorPy as Tensor_
27
27
  from mindspore.ops.primitive import _primexpr
28
28
  from mindspore.communication._comm_helper import (
29
29
  _destroy_group_helper,
@@ -31,6 +31,8 @@ from mindspore.communication._comm_helper import (
31
31
  _get_size_helper,
32
32
  _get_backend,
33
33
  _get_group_ranks,
34
+ _is_available,
35
+ _is_initialized,
34
36
  )
35
37
  from mindspore.communication import (
36
38
  init,
@@ -44,7 +46,6 @@ from mindspore.communication import (
44
46
  from mindspore.communication.comm_func import (
45
47
  _deal_comm_outputs,
46
48
  _check_all_tensors,
47
- _contiguous,
48
49
  _check_all_tensor_same_dtype,
49
50
  _is_split_sizes_empty,
50
51
  _get_size,
@@ -74,6 +75,8 @@ _pickler = pickle.Pickler
74
75
  _unpickler = pickle.Unpickler
75
76
  BACKEND_HCCL = "hccl"
76
77
  BACKEND_MCCL = "mccl"
78
+ _GROPU_SIZE_CACHE = {}
79
+ _GROPU_RANK_CACHE = {}
77
80
 
78
81
  safe_builtins = {
79
82
  'range',
@@ -84,6 +87,24 @@ safe_builtins = {
84
87
  }
85
88
 
86
89
 
90
+ def get_cache_group_size(group=GlobalComm.WORLD_COMM_GROUP):
91
+ """get cache group size."""
92
+ global _GROPU_SIZE_CACHE
93
+ if group not in _GROPU_SIZE_CACHE:
94
+ _GROPU_SIZE_CACHE[group] = _get_size_helper(group)
95
+ group_size = _GROPU_SIZE_CACHE[group]
96
+ return group_size
97
+
98
+
99
+ def get_cache_group_rank(group=GlobalComm.WORLD_COMM_GROUP):
100
+ """get cache rank id."""
101
+ global _GROPU_RANK_CACHE
102
+ if group not in _GROPU_RANK_CACHE:
103
+ _GROPU_RANK_CACHE[group] = _get_rank_helper(group)
104
+ group_rank = _GROPU_RANK_CACHE[group]
105
+ return group_rank
106
+
107
+
87
108
  class RestrictedUnpickler(pickle.Unpickler):
88
109
  # Override find_class method.
89
110
  def find_class(self, module, name):
@@ -116,6 +137,68 @@ def _tensor_to_object(tensor, tensor_size):
116
137
  return restricted_loads(buf)
117
138
 
118
139
 
140
+ def is_available():
141
+ """
142
+ Checks if distributed module is available.
143
+
144
+ Note:
145
+ Always returns `True` because MindSpore always has distributed ability on all platforms.
146
+
147
+ Returns:
148
+ bool, whether this distributed module is available.
149
+
150
+ Supported Platforms:
151
+ ``Ascend``
152
+
153
+ Examples:
154
+ .. note::
155
+ Before running the following examples, you need to configure the communication environment variables.
156
+
157
+ For Ascend devices, it is recommended to use the msrun startup method
158
+ without any third-party or configuration file dependencies.
159
+ Please see the `msrun start up
160
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
161
+ for more details.
162
+
163
+ >>> import mindspore as ms
164
+ >>> from mindspore.mint.distributed import is_available
165
+ >>> ms.set_device(device_target="Ascend")
166
+ >>> is_available()
167
+ True
168
+ """
169
+ return _is_available()
170
+
171
+
172
+ def is_initialized():
173
+ """
174
+ Checks if default process group has been initialized.
175
+
176
+ Returns:
177
+ bool, whether the default process group has been initialized.
178
+
179
+ Supported Platforms:
180
+ ``Ascend``
181
+
182
+ Examples:
183
+ .. note::
184
+ Before running the following examples, you need to configure the communication environment variables.
185
+
186
+ For Ascend devices, it is recommended to use the msrun startup method
187
+ without any third-party or configuration file dependencies.
188
+ Please see the `msrun start up
189
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
190
+ for more details.
191
+
192
+ >>> import mindspore as ms
193
+ >>> from mindspore.mint.distributed import init_process_group, is_initialized
194
+ >>> ms.set_device(device_target="Ascend")
195
+ >>> init_process_group()
196
+ >>> print(is_initialized())
197
+ True
198
+ """
199
+ return _is_initialized()
200
+
201
+
119
202
  def init_process_group(backend="hccl",
120
203
  init_method=None,
121
204
  timeout=None,
@@ -167,7 +250,7 @@ def init_process_group(backend="hccl",
167
250
  For Ascend devices, it is recommended to use the msrun startup method
168
251
  without any third-party or configuration file dependencies.
169
252
  Please see the `msrun start up
170
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
253
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
171
254
  for more details.
172
255
 
173
256
  >>> import mindspore as ms
@@ -231,7 +314,7 @@ def destroy_process_group(group=None):
231
314
  For Ascend devices, it is recommended to use the msrun startup method
232
315
  without any third-party or configuration file dependencies.
233
316
  Please see the `msrun start up
234
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
317
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
235
318
  for more details.
236
319
 
237
320
  >>> import mindspore as ms
@@ -281,7 +364,7 @@ def get_rank(group=None):
281
364
  For Ascend devices, it is recommended to use the msrun startup method
282
365
  without any third-party or configuration file dependencies.
283
366
  Please see the `msrun start up
284
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
367
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
285
368
  for more details.
286
369
 
287
370
  >>> import mindspore as ms
@@ -291,6 +374,8 @@ def get_rank(group=None):
291
374
  >>> rank_id = get_rank()
292
375
  >>> print(rank_id)
293
376
  >>> # the result is the rank_id in world_group
377
+ #rank 0: 0
378
+ #rank 1: 1
294
379
  """
295
380
  if group is None:
296
381
  group = GlobalComm.WORLD_COMM_GROUP
@@ -336,9 +421,11 @@ def get_world_size(group=None):
336
421
  For Ascend devices, it is recommended to use the msrun startup method
337
422
  without any third-party or configuration file dependencies.
338
423
  Please see the `msrun start up
339
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
424
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
340
425
  for more details.
341
426
 
427
+ This example should be run with 8 devices.
428
+
342
429
  >>> import mindspore as ms
343
430
  >>> from mindspore.mint.distributed import init_process_group, get_world_size
344
431
  >>> ms.set_device(device_target="Ascend")
@@ -402,7 +489,7 @@ def new_group(ranks=None,
402
489
  For Ascend devices, it is recommended to use the msrun startup method
403
490
  without any third-party or configuration file dependencies.
404
491
  Please see the `msrun start up
405
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
492
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
406
493
  for more details.
407
494
 
408
495
  >>> import mindspore as ms
@@ -460,7 +547,7 @@ def get_backend(group=None):
460
547
  For Ascend devices, it is recommended to use the msrun startup method
461
548
  without any third-party or configuration file dependencies.
462
549
  Please see the `msrun start up
463
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
550
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
464
551
  for more details.
465
552
 
466
553
  >>> import mindspore as ms
@@ -518,7 +605,7 @@ def get_global_rank(group, group_rank):
518
605
  without any third-party or configuration file dependencies.
519
606
 
520
607
  Please see the `msrun start up
521
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
608
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
522
609
  for more details.
523
610
 
524
611
  This example should be run with 8 devices.
@@ -583,7 +670,7 @@ def get_group_rank(group, global_rank):
583
670
  For Ascend devices, it is recommended to use the msrun startup method
584
671
  without any third-party or configuration file dependencies.
585
672
  Please see the `msrun start up
586
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
673
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
587
674
  for more details.
588
675
 
589
676
  This example should be run with 8 devices.
@@ -644,7 +731,7 @@ def get_process_group_ranks(group=None):
644
731
  without any third-party or configuration file dependencies.
645
732
 
646
733
  Please see the `msrun start up
647
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
734
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
648
735
  for more details.
649
736
 
650
737
  This example should be run with 4 devices.
@@ -726,7 +813,7 @@ def all_reduce(tensor, op=ReduceOp.SUM, group=None, async_op=False):
726
813
  RuntimeError: If device target is invalid, or backend is invalid, or distributed initialization fails.
727
814
 
728
815
  Supported Platforms:
729
- ``Ascend``
816
+ ``Ascend`` ``CPU``
730
817
 
731
818
  Examples:
732
819
  .. note::
@@ -735,7 +822,7 @@ def all_reduce(tensor, op=ReduceOp.SUM, group=None, async_op=False):
735
822
  For Ascend devices, it is recommended to use the msrun startup method
736
823
  without any third-party or configuration file dependencies.
737
824
  Please see the `msrun start up
738
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
825
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
739
826
  for more details.
740
827
 
741
828
  This example should be run with 2 devices.
@@ -815,7 +902,7 @@ def all_gather_into_tensor(output_tensor, input_tensor, group=None, async_op=Fal
815
902
  For Ascend devices, it is recommended to use the msrun startup method
816
903
  without any third-party or configuration file dependencies.
817
904
  Please see the `msrun start up
818
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
905
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
819
906
  for more details.
820
907
 
821
908
  This example should be run with 2 devices.
@@ -855,7 +942,7 @@ def all_gather_into_tensor(output_tensor, input_tensor, group=None, async_op=Fal
855
942
  raise TypeError(
856
943
  f"The argument 'async_op' must be a bool, but got {type(async_op)}."
857
944
  )
858
- group_size = get_group_size(group)
945
+ group_size = get_cache_group_size(group)
859
946
  result = dist_comm_all_gather_into_tensor_op(
860
947
  output_tensor, input_tensor, group_size, group
861
948
  )
@@ -902,7 +989,7 @@ def reduce_scatter_tensor(output, input, op=ReduceOp.SUM, group=None, async_op=F
902
989
  For Ascend devices, it is recommended to use the msrun startup method
903
990
  without any third-party or configuration file dependencies.
904
991
  Please see the `msrun start up
905
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
992
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
906
993
  for more details.
907
994
 
908
995
  This example should be run with 2 devices.
@@ -947,7 +1034,7 @@ def reduce_scatter_tensor(output, input, op=ReduceOp.SUM, group=None, async_op=F
947
1034
  raise TypeError(
948
1035
  f"The argument 'async_op' must be a bool, but got {type(async_op)}."
949
1036
  )
950
- rank_size = get_group_size(group)
1037
+ rank_size = get_cache_group_size(group)
951
1038
  result = dist_comm_reduce_scatter_tensor_op(output, input, rank_size, op, group)
952
1039
  _, handle = _deal_comm_outputs(result, async_op)
953
1040
  return handle
@@ -973,8 +1060,8 @@ def reduce(tensor, dst, op=ReduceOp.SUM, group=None, async_op=False):
973
1060
  async_op (bool, optional): Whether this operator should be an async operator. Default: ``False`` .
974
1061
 
975
1062
  Returns:
976
- CommHandle, CommHandle is an async work handle, if `async_op` is set to True.
977
- CommHandle will be None, when `async_op` is False.
1063
+ CommHandle, CommHandle is an async work handle, if `async_op` is set to ``True``.
1064
+ CommHandle will be None, when `async_op` is ``False``.
978
1065
 
979
1066
  Raises:
980
1067
  TypeError: If the type of `tensor` is not Tensor, any of `op` and `group` is not a str.
@@ -992,7 +1079,7 @@ def reduce(tensor, dst, op=ReduceOp.SUM, group=None, async_op=False):
992
1079
  without any third-party or configuration file dependencies.
993
1080
 
994
1081
  Please see the `msrun start up
995
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
1082
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
996
1083
  for more details.
997
1084
 
998
1085
  This example should be run with 4 devices.
@@ -1150,7 +1237,7 @@ def batch_isend_irecv(p2p_op_list):
1150
1237
  For Ascend devices, it is recommended to use the msrun startup method
1151
1238
  without any third-party or configuration file dependencies.
1152
1239
  Please see the `msrun start up
1153
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
1240
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
1154
1241
  for more details.
1155
1242
 
1156
1243
  This example should be run with 2 devices.
@@ -1263,7 +1350,7 @@ def scatter_tensor(output_tensor, input_tensor, src=0, group=None, async_op=Fals
1263
1350
  For Ascend devices, it is recommended to use the msrun startup method
1264
1351
  without any third-party or configuration file dependencies.
1265
1352
  Please see the `msrun start up
1266
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
1353
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
1267
1354
  for more details.
1268
1355
 
1269
1356
  This example should be run with 2 devices.
@@ -1304,8 +1391,8 @@ def scatter_tensor(output_tensor, input_tensor, src=0, group=None, async_op=Fals
1304
1391
  f"The argument 'async_op' must be a bool, but got {type(async_op)}."
1305
1392
  )
1306
1393
  src = get_group_rank_from_world_rank(src, group)
1307
- rank_size = get_group_size(group)
1308
- rank_id = get_rank(group)
1394
+ rank_size = get_cache_group_size(group)
1395
+ rank_id = get_cache_group_rank(group)
1309
1396
  output = dist_comm_scatter_tensor_op(
1310
1397
  output_tensor, input_tensor, rank_size, src, rank_id, group
1311
1398
  )
@@ -1352,7 +1439,7 @@ def gather_into_tensor(output_tensor, input_tensor, dst=0, group=None, async_op=
1352
1439
  For Ascend devices, it is recommended to use the msrun startup method
1353
1440
  without any third-party or configuration file dependencies.
1354
1441
  Please see the `msrun start up
1355
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
1442
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
1356
1443
  for more details.
1357
1444
 
1358
1445
  This example should be run with 2 devices.
@@ -1396,9 +1483,9 @@ def gather_into_tensor(output_tensor, input_tensor, dst=0, group=None, async_op=
1396
1483
  raise TypeError(
1397
1484
  f"The argument 'async_op' must be a bool, but got {type(async_op)}."
1398
1485
  )
1399
- group_size = get_group_size(group)
1486
+ group_size = get_cache_group_size(group)
1400
1487
  dst = get_group_rank_from_world_rank(dst, group)
1401
- rank_id = get_rank(group)
1488
+ rank_id = get_cache_group_rank(group)
1402
1489
  output = dist_comm_gather_into_tensor_op(
1403
1490
  output_tensor, input_tensor, group_size, dst, rank_id, group
1404
1491
  )
@@ -1442,7 +1529,7 @@ def broadcast(tensor, src, group=None, async_op=False):
1442
1529
  For Ascend devices, it is recommended to use the msrun startup method
1443
1530
  without any third-party or configuration file dependencies.
1444
1531
  Please see the `msrun start up
1445
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
1532
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
1446
1533
  for more details.
1447
1534
 
1448
1535
  This example should be run with 2 devices.
@@ -1474,8 +1561,9 @@ def broadcast(tensor, src, group=None, async_op=False):
1474
1561
  raise TypeError(
1475
1562
  f"The argument 'async_op' must be a bool, but got {type(async_op)}."
1476
1563
  )
1477
- rank = get_group_rank_from_world_rank(src, group)
1478
- output = dist_comm_broadcast_op(tensor, rank, group)
1564
+ src_rank = get_group_rank_from_world_rank(src, group)
1565
+ rank_id = get_cache_group_rank(group)
1566
+ output = dist_comm_broadcast_op(tensor, src_rank, rank_id, group)
1479
1567
  _, handle = _deal_comm_outputs(output, async_op)
1480
1568
  return handle
1481
1569
 
@@ -1501,7 +1589,7 @@ def barrier(group=None, async_op=False, device_ids=None):
1501
1589
  RuntimeError: If backend is invalid, or distributed initialization fails.
1502
1590
 
1503
1591
  Supported Platforms:
1504
- ``Ascend``
1592
+ ``Ascend`` ``CPU``
1505
1593
 
1506
1594
  Examples:
1507
1595
  .. note::
@@ -1510,7 +1598,7 @@ def barrier(group=None, async_op=False, device_ids=None):
1510
1598
  For Ascend devices, it is recommended to use the msrun startup method
1511
1599
  without any third-party or configuration file dependencies.
1512
1600
  Please see the `msrun start up
1513
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
1601
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
1514
1602
  for more details.
1515
1603
 
1516
1604
  This example should be run with 2 devices.
@@ -1520,6 +1608,8 @@ def barrier(group=None, async_op=False, device_ids=None):
1520
1608
  >>> # Launch 2 processes.
1521
1609
  >>> init_process_group()
1522
1610
  >>> barrier()
1611
+ >>> print("barrier finish!")
1612
+ barrier finish!
1523
1613
  """
1524
1614
  if group is None:
1525
1615
  group = GlobalComm.WORLD_COMM_GROUP
@@ -1557,7 +1647,7 @@ def send(tensor, dst=0, group=None, tag=0):
1557
1647
  ValueError: If the `dst` process rank id is same as the current process.
1558
1648
 
1559
1649
  Supported Platforms:
1560
- ``Ascend``
1650
+ ``Ascend`` ``CPU``
1561
1651
 
1562
1652
  Examples:
1563
1653
  .. note::
@@ -1566,7 +1656,7 @@ def send(tensor, dst=0, group=None, tag=0):
1566
1656
  For Ascend devices, it is recommended to use the msrun startup method
1567
1657
  without any third-party or configuration file dependencies.
1568
1658
  Please see the `msrun start up
1569
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
1659
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
1570
1660
  for more details.
1571
1661
 
1572
1662
  This example should be run with 2 devices.
@@ -1576,17 +1666,16 @@ def send(tensor, dst=0, group=None, tag=0):
1576
1666
  >>> from mindspore import Tensor
1577
1667
  >>> import numpy as np
1578
1668
  >>>
1579
- # Launch 2 processes.
1669
+ # Launch 2 processes, Process 0 sends the array to Process 1.
1580
1670
  >>> init_process_group()
1581
1671
  >>> this_rank = get_rank()
1582
- # Process 0 send the array to Process 1
1583
1672
  >>> if this_rank == 0:
1584
- >>> input_ = Tensor(np.ones([2, 8]).astype(np.float32))
1585
- >>> send(input_, 1)
1673
+ ... input_ = Tensor(np.ones([2, 8]).astype(np.float32))
1674
+ ... send(input_, 1)
1586
1675
  >>> if this_rank == 1:
1587
- >>> x = Tensor(np.zeros([2, 8]).astype(np.float32))
1588
- >>> out = recv(x, src=0)
1589
- >>> print(x)
1676
+ ... x = Tensor(np.zeros([2, 8]).astype(np.float32))
1677
+ ... out = recv(x, src=0)
1678
+ ... print(x)
1590
1679
  rank 1:
1591
1680
  [[1. 1. 1. 1. 1. 1. 1. 1.]
1592
1681
  [1. 1. 1. 1. 1. 1. 1. 1.]]
@@ -1602,7 +1691,7 @@ def send(tensor, dst=0, group=None, tag=0):
1602
1691
  "The argument 'group' must be type of string, "
1603
1692
  "but got 'group' type : {}.".format(type(group))
1604
1693
  )
1605
- if get_rank() == dst:
1694
+ if get_cache_group_rank() == dst:
1606
1695
  raise ValueError(
1607
1696
  "Invalid destination rank: destination rank should not be the same as "
1608
1697
  "the rank of the current process."
@@ -1626,7 +1715,7 @@ def recv(tensor, src=0, group=None, tag=0):
1626
1715
  group (str, optional): The communication group to work on. If ``None``, which means ``"hccl_world_group"`` in
1627
1716
  Ascend. Default: ``None``.
1628
1717
  tag (int, optional): A required integer identifying the send/recv message tag. The message will
1629
- be received by the Send op with the same "tag". Default: 0. It is a reserved parameter currently.
1718
+ be received by the Send op with the same "tag". Default: ``0``. It is a reserved parameter currently.
1630
1719
 
1631
1720
  Returns:
1632
1721
  int, If success, return ``0``.
@@ -1636,7 +1725,7 @@ def recv(tensor, src=0, group=None, tag=0):
1636
1725
  ValueError: If the rank ID of the process is greater than the rank size of the communication group.
1637
1726
 
1638
1727
  Supported Platforms:
1639
- ``Ascend``
1728
+ ``Ascend`` ``CPU``
1640
1729
 
1641
1730
  Examples:
1642
1731
  .. note::
@@ -1645,7 +1734,7 @@ def recv(tensor, src=0, group=None, tag=0):
1645
1734
  For Ascend devices, it is recommended to use the msrun startup method
1646
1735
  without any third-party or configuration file dependencies.
1647
1736
  Please see the `msrun start up
1648
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
1737
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
1649
1738
  for more details.
1650
1739
 
1651
1740
  This example should be run with 2 devices.
@@ -1655,17 +1744,16 @@ def recv(tensor, src=0, group=None, tag=0):
1655
1744
  >>> from mindspore import Tensor
1656
1745
  >>> import numpy as np
1657
1746
  >>>
1658
- # Launch 2 processes.
1747
+ # Launch 2 processes, Process 0 sends the array to Process 1.
1659
1748
  >>> init_process_group()
1660
1749
  >>> this_rank = get_rank()
1661
- # Process 0 send the array to Process 1
1662
1750
  >>> if this_rank == 0:
1663
- >>> input_ = Tensor(np.ones([2, 8]).astype(np.float32))
1664
- >>> send(input_, 1)
1751
+ ... input_ = Tensor(np.ones([2, 8]).astype(np.float32))
1752
+ ... send(input_, 1)
1665
1753
  >>> if this_rank == 1:
1666
- >>> x = Tensor(np.zeros([2, 8]).astype(np.float32))
1667
- >>> out = recv(x, src=0)
1668
- >>> print(x)
1754
+ ... x = Tensor(np.zeros([2, 8]).astype(np.float32))
1755
+ ... out = recv(x, src=0)
1756
+ ... print(x)
1669
1757
  rank 1:
1670
1758
  [[1. 1. 1. 1. 1. 1. 1. 1.]
1671
1759
  [1. 1. 1. 1. 1. 1. 1. 1.]]
@@ -1720,7 +1808,7 @@ def isend(tensor, dst=0, group=None, tag=0):
1720
1808
  For Ascend devices, it is recommended to use the msrun startup method
1721
1809
  without any third-party or configuration file dependencies.
1722
1810
  Please see the `msrun start up
1723
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
1811
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
1724
1812
  for more details.
1725
1813
 
1726
1814
  This example should be run with 2 devices.
@@ -1730,19 +1818,18 @@ def isend(tensor, dst=0, group=None, tag=0):
1730
1818
  >>> from mindspore import Tensor
1731
1819
  >>> import numpy as np
1732
1820
  >>>
1733
- # Launch 2 processes.
1821
+ # Launch 2 processes, Process 0 sends the array to Process 1.
1734
1822
  >>> init_process_group()
1735
1823
  >>> this_rank = get_rank()
1736
- # Process 0 send the array to Process 1
1737
1824
  >>> if this_rank == 0:
1738
- >>> input_ = Tensor(np.ones([2, 8]).astype(np.float32))
1739
- >>> handle = isend(input_, 1)
1740
- >>> handle.wait()
1825
+ ... input_ = Tensor(np.ones([2, 8]).astype(np.float32))
1826
+ ... handle = isend(input_, 1)
1827
+ ... handle.wait()
1741
1828
  >>> if this_rank == 1:
1742
- >>> x = Tensor(np.zeros([2, 8]).astype(np.float32))
1743
- >>> handle = irecv(x, src=0)
1744
- >>> handle.wait()
1745
- >>> print(x)
1829
+ ... x = Tensor(np.zeros([2, 8]).astype(np.float32))
1830
+ ... handle = irecv(x, src=0)
1831
+ ... handle.wait()
1832
+ ... print(x)
1746
1833
  rank 1:
1747
1834
  [[1. 1. 1. 1. 1. 1. 1. 1.]
1748
1835
  [1. 1. 1. 1. 1. 1. 1. 1.]]
@@ -1758,7 +1845,7 @@ def isend(tensor, dst=0, group=None, tag=0):
1758
1845
  "The argument 'group' must be type of string, "
1759
1846
  "but got 'group' type : {}.".format(type(group))
1760
1847
  )
1761
- if get_rank() == dst:
1848
+ if get_cache_group_rank() == dst:
1762
1849
  raise ValueError(
1763
1850
  "Invalid destination rank: destination rank should not be the same as "
1764
1851
  "the rank of the current process."
@@ -1802,7 +1889,7 @@ def irecv(tensor, src=0, group=None, tag=0):
1802
1889
  For Ascend devices, it is recommended to use the msrun startup method
1803
1890
  without any third-party or configuration file dependencies.
1804
1891
  Please see the `msrun start up
1805
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
1892
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
1806
1893
  for more details.
1807
1894
 
1808
1895
  This example should be run with 2 devices.
@@ -1812,19 +1899,18 @@ def irecv(tensor, src=0, group=None, tag=0):
1812
1899
  >>> from mindspore import Tensor
1813
1900
  >>> import numpy as np
1814
1901
  >>>
1815
- # Launch 2 processes.
1902
+ # Launch 2 processes, Process 0 sends the array to Process 1.
1816
1903
  >>> init_process_group()
1817
1904
  >>> this_rank = get_rank()
1818
- # Process 0 send the array to Process 1
1819
1905
  >>> if this_rank == 0:
1820
- >>> input_ = Tensor(np.ones([2, 8]).astype(np.float32))
1821
- >>> handle = isend(input_, 1)
1822
- >>> handle.wait()
1906
+ ... input_ = Tensor(np.ones([2, 8]).astype(np.float32))
1907
+ ... handle = isend(input_, 1)
1908
+ ... handle.wait()
1823
1909
  >>> if this_rank == 1:
1824
- >>> x = Tensor(np.zeros([2, 8]).astype(np.float32))
1825
- >>> handle = irecv(x, src=0)
1826
- >>> handle.wait()
1827
- >>> print(x)
1910
+ ... x = Tensor(np.zeros([2, 8]).astype(np.float32))
1911
+ ... handle = irecv(x, src=0)
1912
+ ... handle.wait()
1913
+ ... print(x)
1828
1914
  rank 1:
1829
1915
  [[1. 1. 1. 1. 1. 1. 1. 1.]
1830
1916
  [1. 1. 1. 1. 1. 1. 1. 1.]]
@@ -1880,7 +1966,7 @@ def all_to_all(output_tensor_list, input_tensor_list, group=None, async_op=False
1880
1966
  For Ascend devices, it is recommended to use the msrun startup method
1881
1967
  without any third-party or configuration file dependencies.
1882
1968
  Please see the `msrun start up
1883
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
1969
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
1884
1970
  for more details.
1885
1971
 
1886
1972
  This example should be run with 2 devices.
@@ -1893,11 +1979,11 @@ def all_to_all(output_tensor_list, input_tensor_list, group=None, async_op=False
1893
1979
  >>> init_process_group()
1894
1980
  >>> this_rank = get_rank()
1895
1981
  >>> if this_rank == 0:
1896
- >>> send_tensor_list = [Tensor(1.), Tensor([[2, 3], [4, 5.]])]
1897
- >>> recv_tensor_list = [Tensor((0), dtype=ms.float32), Tensor([0, 0.])]
1982
+ ... send_tensor_list = [Tensor(1.), Tensor([[2, 3], [4, 5.]])]
1983
+ ... recv_tensor_list = [Tensor((0), dtype=ms.float32), Tensor([0, 0.])]
1898
1984
  >>> if this_rank == 1:
1899
- >>> send_tensor_list = [Tensor([2, 2.]), Tensor([4, 5, 6, 7.])]
1900
- >>> recv_tensor_list = [Tensor([[0, 0.],[0, 0]]), Tensor([0, 0, 0, 0.])]
1985
+ ... send_tensor_list = [Tensor([2, 2.]), Tensor([4, 5, 6, 7.])]
1986
+ ... recv_tensor_list = [Tensor([[0, 0.],[0, 0]]), Tensor([0, 0, 0, 0.])]
1901
1987
  >>> handle = all_to_all(recv_tensor_list, send_tensor_list)
1902
1988
  >>> print(recv_tensor_list)
1903
1989
  rank 0:
@@ -1939,9 +2025,8 @@ def all_to_all(output_tensor_list, input_tensor_list, group=None, async_op=False
1939
2025
  recv_shape_list.append(tensor.shape)
1940
2026
 
1941
2027
  send_flatten_tensor = cat(send_flatten_tensor)
1942
- send_flatten_tensor = _contiguous(send_flatten_tensor)
1943
2028
 
1944
- rank_size = get_group_size(group)
2029
+ rank_size = get_cache_group_size(group)
1945
2030
  output = dist_comm_all_to_all_v_op(
1946
2031
  output_tensor_list,
1947
2032
  send_flatten_tensor,
@@ -1958,7 +2043,7 @@ def _get_all_to_all_single_numel_list(tensor, output, output_split_sizes,
1958
2043
  input_split_sizes, group):
1959
2044
  """get numel list for all_to_all_single."""
1960
2045
  if _is_split_sizes_empty(input_split_sizes):
1961
- _world_size = get_group_size(group)
2046
+ _world_size = get_cache_group_size(group)
1962
2047
  if tensor.shape[0] % _world_size != 0:
1963
2048
  raise ValueError(
1964
2049
  "input shape at dim 0 must be divided by world_size, "
@@ -1967,7 +2052,7 @@ def _get_all_to_all_single_numel_list(tensor, output, output_split_sizes,
1967
2052
  _split_size = tensor.shape[0] // _world_size
1968
2053
  input_split_sizes = (_split_size,) * _world_size
1969
2054
  if _is_split_sizes_empty(output_split_sizes):
1970
- _world_size = get_group_size(group)
2055
+ _world_size = get_cache_group_size(group)
1971
2056
  shape_dim_0 = output.shape[0]
1972
2057
 
1973
2058
  if shape_dim_0 % _world_size != 0:
@@ -2031,7 +2116,7 @@ def all_to_all_single(output,
2031
2116
  For Ascend devices, it is recommended to use the msrun startup method
2032
2117
  without any third-party or configuration file dependencies.
2033
2118
  Please see the `msrun start up
2034
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
2119
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
2035
2120
  for more details.
2036
2121
 
2037
2122
  This example should be run with 2 devices.
@@ -2046,15 +2131,15 @@ def all_to_all_single(output,
2046
2131
  >>> init_process_group()
2047
2132
  >>> this_rank = get_rank()
2048
2133
  >>> if this_rank == 0:
2049
- >>> output = Tensor(np.zeros([3, 3]).astype(np.float32))
2050
- >>> tensor = Tensor([[0, 1, 2.], [3, 4, 5], [6, 7, 8]])
2051
- >>> result = all_to_all_single(output, tensor, [2, 1], [2, 1])
2052
- >>> print(output)
2134
+ ... output = Tensor(np.zeros([3, 3]).astype(np.float32))
2135
+ ... tensor = Tensor([[0, 1, 2.], [3, 4, 5], [6, 7, 8]])
2136
+ ... result = all_to_all_single(output, tensor, [2, 1], [2, 1])
2137
+ ... print(output)
2053
2138
  >>> if this_rank == 1:
2054
- >>> output = Tensor(np.zeros([2, 3]).astype(np.float32))
2055
- >>> tensor = Tensor([[9, 10., 11], [12, 13, 14]])
2056
- >>> result = all_to_all_single(output, tensor, [1, 1], [1, 1])
2057
- >>> print(output)
2139
+ ... output = Tensor(np.zeros([2, 3]).astype(np.float32))
2140
+ ... tensor = Tensor([[9, 10., 11], [12, 13, 14]])
2141
+ ... result = all_to_all_single(output, tensor, [1, 1], [1, 1])
2142
+ ... print(output)
2058
2143
  rank 0:
2059
2144
  [[ 0. 1. 2.]
2060
2145
  [ 3. 4. 5.]
@@ -2082,7 +2167,7 @@ def all_to_all_single(output,
2082
2167
  send_numel_list, recv_numel_list, _ = \
2083
2168
  _get_all_to_all_single_numel_list(input, output, output_split_sizes, input_split_sizes, group)
2084
2169
  _input = input.reshape(-1)
2085
- rank_size = get_group_size(group)
2170
+ rank_size = get_cache_group_size(group)
2086
2171
  result = dist_comm_all_to_all_v_single_op(
2087
2172
  output,
2088
2173
  _input,
@@ -2147,7 +2232,7 @@ def all_gather(tensor_list, tensor, group=None, async_op=False):
2147
2232
  For Ascend devices, it is recommended to use the msrun startup method
2148
2233
  without any third-party or configuration file dependencies.
2149
2234
  Please see the `msrun start up
2150
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
2235
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
2151
2236
  for more details.
2152
2237
 
2153
2238
  This example should be run with 2 devices.
@@ -2187,7 +2272,7 @@ def all_gather(tensor_list, tensor, group=None, async_op=False):
2187
2272
  raise TypeError(
2188
2273
  f"The argument 'async_op' must be a bool, but got {type(async_op)}."
2189
2274
  )
2190
- group_size = get_group_size(group)
2275
+ group_size = get_cache_group_size(group)
2191
2276
  _check_tensor_list(tensor_list, tensor, group_size)
2192
2277
  result = dist_comm_all_gather_op(tensor_list, tensor, group_size, group)
2193
2278
  _, handle = _deal_comm_outputs(result, async_op)
@@ -2232,7 +2317,7 @@ def reduce_scatter(output, input_list, op=ReduceOp.SUM, group=None, async_op=Fal
2232
2317
  For Ascend devices, it is recommended to use the msrun startup method
2233
2318
  without any third-party or configuration file dependencies.
2234
2319
  Please see the `msrun start up
2235
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
2320
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
2236
2321
  for more details.
2237
2322
 
2238
2323
  This example should be run with 2 devices.
@@ -2275,7 +2360,7 @@ def reduce_scatter(output, input_list, op=ReduceOp.SUM, group=None, async_op=Fal
2275
2360
  raise TypeError(
2276
2361
  "For reduce_scatter, the input op value must be one of sum, prod, min, max"
2277
2362
  )
2278
- rank_size = get_group_size(group)
2363
+ rank_size = get_cache_group_size(group)
2279
2364
  _check_tensor_list(input_list, output, rank_size)
2280
2365
  result = dist_comm_reduce_scatter_op(output, input_list, rank_size, op, group)
2281
2366
  _, handle = _deal_comm_outputs(result, async_op)
@@ -2322,7 +2407,7 @@ def scatter(tensor, scatter_list, src=0, group=None, async_op=False):
2322
2407
  For Ascend devices, it is recommended to use the msrun startup method
2323
2408
  without any third-party or configuration file dependencies.
2324
2409
  Please see the `msrun start up
2325
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
2410
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
2326
2411
  for more details.
2327
2412
 
2328
2413
  This example should be run with 2 devices.
@@ -2362,8 +2447,8 @@ def scatter(tensor, scatter_list, src=0, group=None, async_op=False):
2362
2447
  f"The argument 'async_op' must be a bool, but got {type(async_op)}."
2363
2448
  )
2364
2449
  src = get_group_rank_from_world_rank(src, group)
2365
- rank_size = get_group_size(group)
2366
- rank_id = get_rank(group)
2450
+ rank_size = get_cache_group_size(group)
2451
+ rank_id = get_cache_group_rank(group)
2367
2452
  if src == rank_id:
2368
2453
  _check_tensor_list(scatter_list, tensor, rank_size)
2369
2454
  output = dist_comm_scatter_op(tensor, scatter_list, rank_size, src, rank_id, group)
@@ -2412,7 +2497,7 @@ def gather(tensor, gather_list, dst=0, group=None, async_op=False):
2412
2497
  For Ascend devices, it is recommended to use the msrun startup method
2413
2498
  without any third-party or configuration file dependencies.
2414
2499
  Please see the `msrun start up
2415
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
2500
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
2416
2501
  for more details.
2417
2502
 
2418
2503
  This example should be run with 2 devices.
@@ -2462,9 +2547,9 @@ def gather(tensor, gather_list, dst=0, group=None, async_op=False):
2462
2547
  )
2463
2548
  if not isinstance(async_op, bool):
2464
2549
  raise TypeError(f"The argument 'async_op' must be a bool, but got {type(async_op)}.")
2465
- group_size = get_group_size(group)
2550
+ group_size = get_cache_group_size(group)
2466
2551
  dst = get_group_rank_from_world_rank(dst, group)
2467
- rank_id = get_rank(group)
2552
+ rank_id = get_cache_group_rank(group)
2468
2553
  if dst == rank_id:
2469
2554
  _check_tensor_list(gather_list, tensor, group_size)
2470
2555
  output = dist_comm_gather_op(tensor, gather_list, group_size, dst, rank_id, group)
@@ -2506,7 +2591,7 @@ def scatter_object_list(scatter_object_output_list, scatter_object_input_list, s
2506
2591
  For Ascend devices, it is recommended to use the msrun startup method
2507
2592
  without any third-party or configuration file dependencies.
2508
2593
  Please see the `msrun start up
2509
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
2594
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
2510
2595
  for more details.
2511
2596
 
2512
2597
  This example should be run with 2 devices.
@@ -2533,8 +2618,8 @@ def scatter_object_list(scatter_object_output_list, scatter_object_input_list, s
2533
2618
  raise TypeError(f"The scatter_object_output_list can not be empty.")
2534
2619
  if not isinstance(src, int):
2535
2620
  raise TypeError("For scatter_object_list, the src must be int")
2536
- group_size = get_group_size(group)
2537
- rank_id = get_rank()
2621
+ group_size = get_cache_group_size(group)
2622
+ rank_id = get_cache_group_rank()
2538
2623
  tensor_sizes = []
2539
2624
  tensor_list = []
2540
2625
  if rank_id == src:
@@ -2598,7 +2683,7 @@ def gather_object(obj, object_gather_list=None, dst=0, group=None):
2598
2683
  For Ascend devices, it is recommended to use the msrun startup method
2599
2684
  without any third-party or configuration file dependencies.
2600
2685
  Please see the `msrun start up
2601
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
2686
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
2602
2687
  for more details.
2603
2688
 
2604
2689
  This example should be run with 2 devices.
@@ -2622,8 +2707,8 @@ def gather_object(obj, object_gather_list=None, dst=0, group=None):
2622
2707
  )
2623
2708
  if not isinstance(dst, int):
2624
2709
  raise TypeError("For gather_object, the dst must be int")
2625
- group_size = get_group_size(group)
2626
- rank_id = get_rank()
2710
+ group_size = get_cache_group_size(group)
2711
+ rank_id = get_cache_group_rank()
2627
2712
  if rank_id == dst:
2628
2713
  if not isinstance(object_gather_list, list) or len(object_gather_list) != group_size:
2629
2714
  raise TypeError(
@@ -2677,7 +2762,7 @@ def broadcast_object_list(object_list, src=0, group=None, device=None):
2677
2762
  For Ascend devices, it is recommended to use the msrun startup method
2678
2763
  without any third-party or configuration file dependencies.
2679
2764
  Please see the `msrun start up
2680
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
2765
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
2681
2766
  for more details.
2682
2767
 
2683
2768
  This example should be run with 2 devices.
@@ -2687,7 +2772,7 @@ def broadcast_object_list(object_list, src=0, group=None, device=None):
2687
2772
  >>> rank = get_rank()
2688
2773
  >>> obj = ["test", 12, {1: 2}]
2689
2774
  >>> if rank == 1:
2690
- >>> obj = [None, None, None]
2775
+ ... obj = [None, None, None]
2691
2776
  >>> broadcast_object_list(obj)
2692
2777
  >>> print(obj)
2693
2778
  ['test', 12, {1: 2}]
@@ -2703,7 +2788,7 @@ def broadcast_object_list(object_list, src=0, group=None, device=None):
2703
2788
  raise TypeError("For broadcast_object_list, the src must be int")
2704
2789
  if not isinstance(object_list, list) or not object_list:
2705
2790
  raise TypeError(f"The object_list can not be empty.")
2706
- rank_id = get_rank()
2791
+ rank_id = get_cache_group_rank()
2707
2792
  tensor_sizes = []
2708
2793
  tensor_list = []
2709
2794
  size = 0
@@ -2758,7 +2843,7 @@ def all_gather_object(object_list, obj, group=None):
2758
2843
  For Ascend devices, it is recommended to use the msrun startup method
2759
2844
  without any third-party or configuration file dependencies.
2760
2845
  Please see the `msrun start up
2761
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
2846
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
2762
2847
  for more details.
2763
2848
 
2764
2849
  This example should be run with 2 devices.
@@ -2783,7 +2868,7 @@ def all_gather_object(object_list, obj, group=None):
2783
2868
  "For 'all_gather_object', the argument 'group' must be type of string, "
2784
2869
  "but got 'group' type : {}.".format(type(group))
2785
2870
  )
2786
- group_size = get_group_size(group)
2871
+ group_size = get_cache_group_size(group)
2787
2872
  if not isinstance(object_list, list) or len(object_list) != group_size:
2788
2873
  raise TypeError(
2789
2874
  f"The len of argument object_list must be equal to group rank size, but got {len(object_list)}."