mindspore 2.5.0__cp310-cp310-win_amd64.whl → 2.6.0__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (493) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +6 -4
  5. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  8. mindspore/_check_jit_forbidden_api.py +3 -0
  9. mindspore/_checkparam.py +3 -33
  10. mindspore/_deprecated/__init__.py +17 -0
  11. mindspore/_deprecated/jit.py +198 -0
  12. mindspore/_extends/builtin_operations.py +1 -1
  13. mindspore/_extends/parse/__init__.py +6 -7
  14. mindspore/_extends/parse/compile_config.py +19 -0
  15. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
  16. mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
  17. mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
  18. mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
  19. mindspore/_extends/parse/parser.py +25 -194
  20. mindspore/_extends/parse/resources.py +1 -5
  21. mindspore/_extends/parse/standard_method.py +109 -75
  22. mindspore/_extends/pijit/__init__.py +2 -2
  23. mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
  24. mindspore/_extends/pijit/tensor_func_list.py +27 -0
  25. mindspore/_extends/utils.py +1 -1
  26. mindspore/amp.py +4 -4
  27. mindspore/atlprov.dll +0 -0
  28. mindspore/avcodec-59.dll +0 -0
  29. mindspore/avdevice-59.dll +0 -0
  30. mindspore/avfilter-8.dll +0 -0
  31. mindspore/avformat-59.dll +0 -0
  32. mindspore/avutil-57.dll +0 -0
  33. mindspore/boost/__init__.py +2 -2
  34. mindspore/boost/base.py +3 -7
  35. mindspore/boost/boost_cell_wrapper.py +2 -2
  36. mindspore/c1.dll +0 -0
  37. mindspore/c1xx.dll +0 -0
  38. mindspore/c2.dll +0 -0
  39. mindspore/common/__init__.py +4 -3
  40. mindspore/common/_grad_function.py +56 -0
  41. mindspore/common/_pijit_context.py +14 -5
  42. mindspore/common/_register_for_tensor.py +1 -1
  43. mindspore/common/_stub_tensor.py +5 -10
  44. mindspore/common/_tensor_cpp_method.py +1 -1
  45. mindspore/common/_tensor_docs.py +2014 -3386
  46. mindspore/common/api.py +386 -355
  47. mindspore/common/auto_dynamic_shape.py +41 -44
  48. mindspore/common/dtype.py +5 -2
  49. mindspore/common/dump.py +7 -5
  50. mindspore/common/file_system.py +3 -0
  51. mindspore/common/generator.py +3 -0
  52. mindspore/common/hook_handle.py +5 -3
  53. mindspore/common/initializer.py +10 -6
  54. mindspore/common/jit_begin_end.py +94 -0
  55. mindspore/common/jit_config.py +6 -1
  56. mindspore/common/jit_context.py +76 -0
  57. mindspore/common/jit_trace.py +378 -0
  58. mindspore/common/lazy_inline.py +2 -2
  59. mindspore/common/mutable.py +5 -4
  60. mindspore/common/parameter.py +106 -39
  61. mindspore/common/seed.py +2 -2
  62. mindspore/common/sparse_tensor.py +23 -17
  63. mindspore/common/tensor.py +332 -714
  64. mindspore/communication/__init__.py +7 -5
  65. mindspore/communication/_comm_helper.py +47 -2
  66. mindspore/communication/comm_func.py +70 -53
  67. mindspore/communication/management.py +83 -17
  68. mindspore/context.py +228 -571
  69. mindspore/dataset/__init__.py +44 -20
  70. mindspore/dataset/audio/__init__.py +2 -8
  71. mindspore/dataset/audio/transforms.py +3 -17
  72. mindspore/dataset/core/config.py +3 -3
  73. mindspore/dataset/engine/cache_client.py +1 -1
  74. mindspore/dataset/engine/datasets.py +102 -120
  75. mindspore/dataset/engine/datasets_audio.py +22 -22
  76. mindspore/dataset/engine/datasets_standard_format.py +43 -24
  77. mindspore/dataset/engine/datasets_text.py +78 -85
  78. mindspore/dataset/engine/datasets_user_defined.py +109 -77
  79. mindspore/dataset/engine/datasets_vision.py +111 -108
  80. mindspore/dataset/engine/iterators.py +5 -3
  81. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
  82. mindspore/dataset/engine/samplers.py +279 -57
  83. mindspore/dataset/engine/serializer_deserializer.py +2 -1
  84. mindspore/dataset/engine/validators.py +10 -0
  85. mindspore/dataset/text/__init__.py +7 -6
  86. mindspore/dataset/text/transforms.py +6 -5
  87. mindspore/dataset/text/utils.py +3 -3
  88. mindspore/dataset/transforms/__init__.py +0 -9
  89. mindspore/dataset/transforms/transforms.py +3 -3
  90. mindspore/dataset/utils/browse_dataset.py +1 -1
  91. mindspore/dataset/vision/__init__.py +2 -9
  92. mindspore/dataset/vision/transforms.py +202 -158
  93. mindspore/dataset/vision/utils.py +7 -5
  94. mindspore/device_context/ascend/op_debug.py +60 -1
  95. mindspore/device_context/ascend/op_tuning.py +0 -4
  96. mindspore/device_manager.py +39 -3
  97. mindspore/dnnl.dll +0 -0
  98. mindspore/dpcmi.dll +0 -0
  99. mindspore/experimental/es/embedding_service.py +35 -27
  100. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -2
  101. mindspore/experimental/map_parameter.py +4 -4
  102. mindspore/experimental/optim/adadelta.py +22 -26
  103. mindspore/experimental/optim/adagrad.py +4 -4
  104. mindspore/experimental/optim/adam.py +4 -0
  105. mindspore/experimental/optim/adamax.py +4 -4
  106. mindspore/experimental/optim/adamw.py +4 -0
  107. mindspore/experimental/optim/asgd.py +1 -1
  108. mindspore/experimental/optim/lr_scheduler.py +40 -22
  109. mindspore/experimental/optim/radam.py +5 -5
  110. mindspore/experimental/optim/rprop.py +1 -1
  111. mindspore/experimental/optim/sgd.py +1 -1
  112. mindspore/hal/contiguous_tensors_handle.py +6 -10
  113. mindspore/hal/device.py +55 -81
  114. mindspore/hal/event.py +38 -55
  115. mindspore/hal/memory.py +115 -147
  116. mindspore/hal/stream.py +81 -125
  117. mindspore/include/dataset/constants.h +7 -4
  118. mindspore/include/dataset/execute.h +2 -2
  119. mindspore/jpeg62.dll +0 -0
  120. mindspore/log.py +40 -2
  121. mindspore/mindrecord/__init__.py +20 -7
  122. mindspore/mindspore_backend_common.dll +0 -0
  123. mindspore/mindspore_backend_manager.dll +0 -0
  124. mindspore/mindspore_common.dll +0 -0
  125. mindspore/mindspore_core.dll +0 -0
  126. mindspore/mindspore_dump.dll +0 -0
  127. mindspore/mindspore_frontend.dll +0 -0
  128. mindspore/mindspore_glog.dll +0 -0
  129. mindspore/mindspore_memory_pool.dll +0 -0
  130. mindspore/mindspore_ms_backend.dll +0 -0
  131. mindspore/mindspore_ops.dll +0 -0
  132. mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
  133. mindspore/mindspore_ops_kernel_common.dll +0 -0
  134. mindspore/mindspore_profiler.dll +0 -0
  135. mindspore/mindspore_pyboost.dll +0 -0
  136. mindspore/mindspore_pynative.dll +0 -0
  137. mindspore/mindspore_res_manager.dll +0 -0
  138. mindspore/mindspore_runtime_pipeline.dll +0 -0
  139. mindspore/mint/__init__.py +133 -702
  140. mindspore/mint/distributed/__init__.py +5 -1
  141. mindspore/mint/distributed/distributed.py +198 -113
  142. mindspore/mint/linalg/__init__.py +2 -0
  143. mindspore/mint/nn/__init__.py +280 -18
  144. mindspore/mint/nn/functional.py +282 -64
  145. mindspore/mint/nn/layer/__init__.py +4 -0
  146. mindspore/mint/nn/layer/_functions.py +7 -3
  147. mindspore/mint/nn/layer/activation.py +120 -13
  148. mindspore/mint/nn/layer/conv.py +234 -28
  149. mindspore/mint/nn/layer/normalization.py +15 -16
  150. mindspore/mint/nn/layer/padding.py +1 -1
  151. mindspore/mint/nn/layer/pooling.py +66 -1
  152. mindspore/mint/optim/__init__.py +2 -1
  153. mindspore/mint/optim/sgd.py +171 -0
  154. mindspore/msobj140.dll +0 -0
  155. mindspore/mspdb140.dll +0 -0
  156. mindspore/mspdbcore.dll +0 -0
  157. mindspore/mspdbst.dll +0 -0
  158. mindspore/mspft140.dll +0 -0
  159. mindspore/msvcdis140.dll +0 -0
  160. mindspore/msvcp140_1.dll +0 -0
  161. mindspore/msvcp140_2.dll +0 -0
  162. mindspore/msvcp140_atomic_wait.dll +0 -0
  163. mindspore/msvcp140_codecvt_ids.dll +0 -0
  164. mindspore/nn/__init__.py +4 -1
  165. mindspore/nn/cell.py +1253 -179
  166. mindspore/nn/layer/activation.py +23 -21
  167. mindspore/nn/layer/basic.py +22 -16
  168. mindspore/nn/layer/container.py +1 -1
  169. mindspore/nn/layer/conv.py +53 -42
  170. mindspore/nn/layer/embedding.py +9 -8
  171. mindspore/nn/layer/normalization.py +48 -42
  172. mindspore/nn/layer/pooling.py +75 -31
  173. mindspore/nn/layer/transformer.py +11 -10
  174. mindspore/nn/learning_rate_schedule.py +4 -2
  175. mindspore/nn/loss/loss.py +27 -19
  176. mindspore/nn/optim/ada_grad.py +6 -5
  177. mindspore/nn/optim/adadelta.py +9 -7
  178. mindspore/nn/optim/adafactor.py +1 -1
  179. mindspore/nn/optim/adam.py +18 -14
  180. mindspore/nn/optim/adamax.py +8 -7
  181. mindspore/nn/optim/adasum.py +5 -5
  182. mindspore/nn/optim/asgd.py +3 -1
  183. mindspore/nn/optim/ftrl.py +11 -9
  184. mindspore/nn/optim/lamb.py +1 -1
  185. mindspore/nn/optim/lazyadam.py +12 -10
  186. mindspore/nn/optim/momentum.py +7 -6
  187. mindspore/nn/optim/optimizer.py +2 -2
  188. mindspore/nn/optim/proximal_ada_grad.py +12 -10
  189. mindspore/nn/optim/rmsprop.py +13 -12
  190. mindspore/nn/optim/rprop.py +9 -7
  191. mindspore/nn/optim/sgd.py +9 -6
  192. mindspore/nn/optim/tft_wrapper.py +5 -2
  193. mindspore/nn/probability/bijector/bijector.py +17 -11
  194. mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
  195. mindspore/nn/probability/bijector/invert.py +2 -2
  196. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  197. mindspore/nn/probability/bijector/softplus.py +3 -2
  198. mindspore/nn/probability/distribution/beta.py +3 -3
  199. mindspore/nn/probability/distribution/categorical.py +1 -1
  200. mindspore/nn/probability/distribution/cauchy.py +4 -2
  201. mindspore/nn/probability/distribution/exponential.py +6 -7
  202. mindspore/nn/probability/distribution/gamma.py +2 -2
  203. mindspore/nn/probability/distribution/gumbel.py +2 -2
  204. mindspore/nn/probability/distribution/half_normal.py +5 -3
  205. mindspore/nn/probability/distribution/logistic.py +5 -3
  206. mindspore/nn/probability/distribution/poisson.py +1 -1
  207. mindspore/nn/probability/distribution/uniform.py +5 -3
  208. mindspore/nn/reinforcement/_tensors_queue.py +1 -1
  209. mindspore/nn/reinforcement/tensor_array.py +1 -1
  210. mindspore/nn/wrap/__init__.py +6 -6
  211. mindspore/nn/wrap/cell_wrapper.py +178 -117
  212. mindspore/nn/wrap/grad_reducer.py +45 -36
  213. mindspore/nn/wrap/loss_scale.py +3 -3
  214. mindspore/numpy/array_creations.py +3 -3
  215. mindspore/numpy/array_ops.py +1 -1
  216. mindspore/numpy/utils.py +1 -2
  217. mindspore/numpy/utils_const.py +1 -2
  218. mindspore/opencv_core452.dll +0 -0
  219. mindspore/opencv_imgcodecs452.dll +0 -0
  220. mindspore/opencv_imgproc452.dll +0 -0
  221. mindspore/ops/__init__.py +3 -2
  222. mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
  223. mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
  224. mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
  225. mindspore/ops/_register_for_op.py +0 -11
  226. mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
  227. mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
  228. mindspore/ops/_vmap/vmap_array_ops.py +32 -6
  229. mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
  230. mindspore/ops/_vmap/vmap_math_ops.py +4 -7
  231. mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
  232. mindspore/ops/auto_generate/__init__.py +4 -3
  233. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +127 -52
  234. mindspore/ops/auto_generate/gen_extend_func.py +286 -208
  235. mindspore/ops/auto_generate/gen_ops_def.py +2783 -2335
  236. mindspore/ops/auto_generate/gen_ops_prim.py +8992 -2686
  237. mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
  238. mindspore/ops/composite/__init__.py +2 -1
  239. mindspore/ops/composite/base.py +19 -24
  240. mindspore/ops/composite/math_ops.py +6 -16
  241. mindspore/ops/composite/multitype_ops/__init__.py +5 -2
  242. mindspore/ops/composite/multitype_ops/_compile_utils.py +4 -5
  243. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
  244. mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
  245. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  246. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  247. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
  248. mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
  249. mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
  250. mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
  251. mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
  252. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
  253. mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
  254. mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
  255. mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
  256. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
  257. mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
  258. mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
  259. mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
  260. mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
  261. mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
  262. mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
  263. mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
  264. mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
  265. mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
  266. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
  267. mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
  268. mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
  269. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
  270. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  271. mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
  272. mindspore/ops/function/__init__.py +28 -2
  273. mindspore/ops/function/_add_attr_func.py +58 -0
  274. mindspore/ops/function/array_func.py +1631 -2347
  275. mindspore/ops/function/clip_func.py +38 -45
  276. mindspore/ops/function/debug_func.py +36 -44
  277. mindspore/ops/function/grad/__init__.py +1 -0
  278. mindspore/ops/function/grad/grad_func.py +104 -71
  279. mindspore/ops/function/image_func.py +1 -1
  280. mindspore/ops/function/linalg_func.py +46 -78
  281. mindspore/ops/function/math_func.py +3024 -3855
  282. mindspore/ops/function/nn_func.py +678 -274
  283. mindspore/ops/function/other_func.py +159 -1
  284. mindspore/ops/function/parameter_func.py +17 -30
  285. mindspore/ops/function/random_func.py +216 -361
  286. mindspore/ops/function/reshard_func.py +4 -70
  287. mindspore/ops/function/sparse_func.py +3 -3
  288. mindspore/ops/function/sparse_unary_func.py +5 -5
  289. mindspore/ops/function/spectral_func.py +25 -58
  290. mindspore/ops/function/vmap_func.py +26 -18
  291. mindspore/ops/functional.py +8 -5
  292. mindspore/ops/functional_overload.py +655 -4
  293. mindspore/ops/op_info_register.py +32 -244
  294. mindspore/ops/operations/__init__.py +21 -14
  295. mindspore/ops/operations/_custom_ops_utils.py +235 -0
  296. mindspore/ops/operations/_grad_ops.py +1 -10
  297. mindspore/ops/operations/_inner_ops.py +5 -76
  298. mindspore/ops/operations/_ms_kernel.py +4 -10
  299. mindspore/ops/operations/_rl_inner_ops.py +1 -1
  300. mindspore/ops/operations/_scalar_ops.py +3 -2
  301. mindspore/ops/operations/_sequence_ops.py +1 -1
  302. mindspore/ops/operations/_tensor_array.py +1 -1
  303. mindspore/ops/operations/array_ops.py +39 -24
  304. mindspore/ops/operations/comm_ops.py +150 -107
  305. mindspore/ops/operations/custom_ops.py +287 -32
  306. mindspore/ops/operations/debug_ops.py +119 -16
  307. mindspore/ops/operations/inner_ops.py +1 -1
  308. mindspore/ops/operations/linalg_ops.py +1 -58
  309. mindspore/ops/operations/manually_defined/_inner.py +1 -1
  310. mindspore/ops/operations/manually_defined/ops_def.py +746 -79
  311. mindspore/ops/operations/math_ops.py +21 -18
  312. mindspore/ops/operations/nn_ops.py +67 -224
  313. mindspore/ops/operations/other_ops.py +62 -9
  314. mindspore/ops/operations/random_ops.py +13 -7
  315. mindspore/ops/operations/reshard_ops.py +1 -1
  316. mindspore/ops/operations/sparse_ops.py +2 -2
  317. mindspore/ops/primitive.py +43 -32
  318. mindspore/ops/tensor_method.py +243 -17
  319. mindspore/ops_generate/__init__.py +0 -5
  320. mindspore/ops_generate/aclnn/__init__.py +0 -0
  321. mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
  322. mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
  323. mindspore/ops_generate/api/__init__.py +0 -0
  324. mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
  325. mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
  326. mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
  327. mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
  328. mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
  329. mindspore/ops_generate/api/gen_api.py +103 -0
  330. mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
  331. mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
  332. mindspore/ops_generate/common/__init__.py +0 -0
  333. mindspore/ops_generate/common/gen_constants.py +91 -0
  334. mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
  335. mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
  336. mindspore/ops_generate/{template.py → common/template.py} +96 -84
  337. mindspore/ops_generate/gen_ops.py +23 -325
  338. mindspore/ops_generate/op_def/__init__.py +0 -0
  339. mindspore/ops_generate/op_def/gen_op_def.py +90 -0
  340. mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
  341. mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -10
  342. mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
  343. mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
  344. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
  345. mindspore/ops_generate/op_def_py/__init__.py +0 -0
  346. mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
  347. mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
  348. mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
  349. mindspore/ops_generate/pyboost/__init__.py +0 -0
  350. mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
  351. mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
  352. mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
  353. mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
  354. mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
  355. mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
  356. mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
  357. mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
  358. mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
  359. mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
  360. mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
  361. mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
  362. mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
  363. mindspore/ops_generate/resources/__init__.py +0 -0
  364. mindspore/ops_generate/resources/resource_list.py +30 -0
  365. mindspore/ops_generate/resources/resource_loader.py +36 -0
  366. mindspore/ops_generate/resources/resource_manager.py +64 -0
  367. mindspore/ops_generate/resources/yaml_loader.py +88 -0
  368. mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
  369. mindspore/parallel/__init__.py +6 -2
  370. mindspore/parallel/_auto_parallel_context.py +140 -12
  371. mindspore/parallel/_cell_wrapper.py +132 -15
  372. mindspore/parallel/_parallel_serialization.py +95 -4
  373. mindspore/parallel/_ps_context.py +1 -1
  374. mindspore/parallel/_recovery_context.py +7 -2
  375. mindspore/parallel/_tensor.py +142 -18
  376. mindspore/parallel/_utils.py +198 -25
  377. mindspore/parallel/algo_parameter_config.py +3 -3
  378. mindspore/parallel/auto_parallel.py +732 -0
  379. mindspore/parallel/checkpoint_convert.py +159 -0
  380. mindspore/parallel/checkpoint_transform.py +658 -37
  381. mindspore/parallel/cluster/process_entity/_api.py +151 -19
  382. mindspore/parallel/cluster/run.py +1 -1
  383. mindspore/parallel/function/__init__.py +24 -0
  384. mindspore/parallel/function/reshard_func.py +258 -0
  385. mindspore/parallel/nn/__init__.py +25 -0
  386. mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
  387. mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
  388. mindspore/parallel/parameter_broadcast.py +24 -13
  389. mindspore/parallel/shard.py +137 -62
  390. mindspore/parallel/transform_safetensors.py +288 -95
  391. mindspore/pgodb140.dll +0 -0
  392. mindspore/pgort140.dll +0 -0
  393. mindspore/profiler/__init__.py +9 -5
  394. mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
  395. mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
  396. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
  397. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +25 -0
  398. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  399. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
  400. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
  401. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
  402. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
  403. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
  404. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
  405. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
  406. mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
  407. mindspore/profiler/common/constant.py +12 -0
  408. mindspore/profiler/common/msprof_cmd_tool.py +42 -23
  409. mindspore/profiler/common/path_manager.py +24 -0
  410. mindspore/profiler/common/profiler_context.py +26 -2
  411. mindspore/profiler/common/profiler_meta_data.py +74 -0
  412. mindspore/profiler/common/profiler_parameters.py +59 -18
  413. mindspore/profiler/common/profiler_path_manager.py +66 -7
  414. mindspore/profiler/dynamic_profiler.py +112 -79
  415. mindspore/profiler/envprofiler.py +26 -1
  416. mindspore/profiler/experimental_config.py +197 -0
  417. mindspore/profiler/mstx.py +57 -14
  418. mindspore/profiler/platform/npu_profiler.py +33 -7
  419. mindspore/profiler/profiler.py +541 -45
  420. mindspore/profiler/profiler_action_controller.py +1 -1
  421. mindspore/profiler/profiler_interface.py +4 -0
  422. mindspore/profiler/schedule.py +57 -22
  423. mindspore/rewrite/api/node.py +15 -13
  424. mindspore/rewrite/api/symbol_tree.py +1 -1
  425. mindspore/run_check/_check_version.py +25 -14
  426. mindspore/run_check/run_check.py +1 -1
  427. mindspore/runtime/__init__.py +2 -2
  428. mindspore/runtime/executor.py +40 -11
  429. mindspore/runtime/memory.py +37 -13
  430. mindspore/safeguard/rewrite_obfuscation.py +12 -9
  431. mindspore/swresample-4.dll +0 -0
  432. mindspore/swscale-6.dll +0 -0
  433. mindspore/tbbmalloc.dll +0 -0
  434. mindspore/tinyxml2.dll +0 -0
  435. mindspore/train/__init__.py +8 -8
  436. mindspore/train/_utils.py +43 -9
  437. mindspore/train/amp.py +1 -1
  438. mindspore/train/callback/__init__.py +2 -2
  439. mindspore/train/callback/_callback.py +2 -16
  440. mindspore/train/callback/_checkpoint.py +24 -40
  441. mindspore/train/callback/_cluster_monitor.py +14 -18
  442. mindspore/train/callback/_flops_collector.py +2 -3
  443. mindspore/train/callback/_history.py +7 -4
  444. mindspore/train/callback/_lambda_callback.py +2 -2
  445. mindspore/train/callback/_landscape.py +0 -3
  446. mindspore/train/callback/_loss_monitor.py +2 -1
  447. mindspore/train/callback/_on_request_exit.py +6 -5
  448. mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
  449. mindspore/train/callback/_summary_collector.py +8 -13
  450. mindspore/train/callback/_time_monitor.py +2 -1
  451. mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +204 -105
  452. mindspore/train/data_sink.py +25 -2
  453. mindspore/train/dataset_helper.py +4 -5
  454. mindspore/train/loss_scale_manager.py +8 -7
  455. mindspore/train/metrics/accuracy.py +3 -3
  456. mindspore/train/metrics/confusion_matrix.py +9 -9
  457. mindspore/train/metrics/error.py +3 -3
  458. mindspore/train/metrics/hausdorff_distance.py +4 -4
  459. mindspore/train/metrics/mean_surface_distance.py +3 -3
  460. mindspore/train/metrics/metric.py +0 -12
  461. mindspore/train/metrics/occlusion_sensitivity.py +4 -2
  462. mindspore/train/metrics/precision.py +8 -6
  463. mindspore/train/metrics/recall.py +9 -9
  464. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  465. mindspore/train/mind_ir_pb2.py +19 -12
  466. mindspore/train/model.py +262 -127
  467. mindspore/train/serialization.py +246 -988
  468. mindspore/train/summary/_summary_adapter.py +2 -2
  469. mindspore/train/summary/summary_record.py +1 -1
  470. mindspore/turbojpeg.dll +0 -0
  471. mindspore/utils/__init__.py +3 -2
  472. mindspore/utils/dryrun.py +4 -2
  473. mindspore/utils/hooks.py +81 -0
  474. mindspore/utils/runtime_execution_order_check.py +2 -0
  475. mindspore/utils/utils.py +138 -4
  476. mindspore/vcmeta.dll +0 -0
  477. mindspore/vcruntime140.dll +0 -0
  478. mindspore/vcruntime140_1.dll +0 -0
  479. mindspore/version.py +1 -1
  480. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/METADATA +2 -1
  481. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/RECORD +485 -440
  482. mindspore/_install_custom.py +0 -43
  483. mindspore/common/_register_for_adapter.py +0 -74
  484. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
  485. mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
  486. mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
  487. mindspore/ops_generate/gen_constants.py +0 -190
  488. mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
  489. mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
  490. /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
  491. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/WHEEL +0 -0
  492. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/entry_points.txt +0 -0
  493. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/top_level.txt +0 -0
@@ -20,18 +20,20 @@ Note that the APIs in the following list need to preset communication environmen
20
20
  For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
21
21
  without any third-party or configuration file dependencies.
22
22
  Please see the `msrun start up
23
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
23
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
24
24
  for more details.
25
25
  """
26
26
 
27
27
  from mindspore.communication.management import GlobalComm, init, release, get_rank, \
28
28
  get_group_size, get_world_rank_from_group_rank, \
29
- get_group_rank_from_world_rank, create_group, HCCL_WORLD_COMM_GROUP, NCCL_WORLD_COMM_GROUP, \
30
- MCCL_WORLD_COMM_GROUP, get_local_rank, get_local_rank_size, destroy_group, get_process_group_ranks
29
+ get_group_rank_from_world_rank, create_group, get_comm_name, \
30
+ HCCL_WORLD_COMM_GROUP, NCCL_WORLD_COMM_GROUP, MCCL_WORLD_COMM_GROUP, get_local_rank, \
31
+ get_local_rank_size, destroy_group, get_process_group_ranks
31
32
 
32
33
 
33
34
  __all__ = [
34
35
  "GlobalComm", "init", "release", "get_rank", "get_group_size", "get_world_rank_from_group_rank",
35
- "get_group_rank_from_world_rank", "create_group", "HCCL_WORLD_COMM_GROUP", "NCCL_WORLD_COMM_GROUP",
36
- "MCCL_WORLD_COMM_GROUP", "get_local_rank", "get_local_rank_size", "destroy_group", "get_process_group_ranks"
36
+ "get_group_rank_from_world_rank", "create_group", "get_comm_name",
37
+ "HCCL_WORLD_COMM_GROUP", "NCCL_WORLD_COMM_GROUP", "MCCL_WORLD_COMM_GROUP", "get_local_rank",
38
+ "get_local_rank_size", "destroy_group", "get_process_group_ranks"
37
39
  ]
@@ -178,8 +178,17 @@ def check_parameter_available(func):
178
178
  Wrapper. If not available, raise Error.
179
179
  """
180
180
  def wrapper(*args, **kargs):
181
- if not GlobalComm.INITED:
182
- raise RuntimeError("Distributed Communication has not been inited")
181
+ # This function list indicates these functions will return 0 or 1 value in standalone mode or
182
+ # not calling 'init' method.
183
+ standalone_bypass_check_func_list = [
184
+ "_get_rank_helper",
185
+ "_get_local_rank_helper",
186
+ "_get_size_helper",
187
+ "_get_local_size_helper"
188
+ ]
189
+ if not GlobalComm.INITED and func.__name__ not in standalone_bypass_check_func_list:
190
+ raise RuntimeError(f"Distributed Communication has not been inited."
191
+ f"You can't invoke this interface yet. Please call `init()` method first.")
183
192
  group = None
184
193
  if "group" in kargs.keys():
185
194
  group = kargs.get("group")
@@ -264,6 +273,11 @@ def _get_rank_helper(group):
264
273
  if _check_bypass_rank_id_and_size():
265
274
  rank_id = 0
266
275
  return rank_id
276
+ if not GlobalComm.INITED:
277
+ # If 'RANK_ID' is not set, return 0 as default value.
278
+ logger.info(f"You are invoking this interface without calling `init` method."
279
+ "Return 'RANK_ID' env value instead. If 'RANK_ID' is not set, return 0 as default value.")
280
+ return int(os.getenv("RANK_ID", "0"))
267
281
  if _hccl_test():
268
282
  return hccl.get_rank_id(group)
269
283
  rank_id = CollectiveManager.get_instance().get_rank_id(group)
@@ -288,6 +302,11 @@ def _get_local_rank_helper(group):
288
302
  if _check_bypass_rank_id_and_size():
289
303
  local_rank_id = 0
290
304
  return local_rank_id
305
+ if not GlobalComm.INITED:
306
+ # If 'LOCAL_RANK' env is not set, return 0 as default value.
307
+ logger.info(f"You are invoking this interface without calling `init` method."
308
+ "Return 'LOCAL_RANK' env value instead. If 'LOCAL_RANK' is not set, return 0 as default value.")
309
+ return int(os.getenv("LOCAL_RANK", "0"))
291
310
  if _hccl_test():
292
311
  return hccl.get_local_rank_id(group)
293
312
  rank_id = CollectiveManager.get_instance().get_local_rank_id(group)
@@ -312,6 +331,11 @@ def _get_size_helper(group):
312
331
  if _check_bypass_rank_id_and_size():
313
332
  size = 1
314
333
  return size
334
+ if not GlobalComm.INITED:
335
+ # If 'LOCAL_RANK' env is not set, return 0 as default value.
336
+ logger.info(f"You are invoking this interface without calling `init` method."
337
+ "Return 'RANK_SIZE' env value instead. If 'RANK_SIZE' is not set, return 1 as default value.")
338
+ return int(os.getenv("RANK_SIZE", "1"))
315
339
  if _hccl_test():
316
340
  return hccl.get_rank_size(group)
317
341
  size = CollectiveManager.get_instance().get_group_size(group)
@@ -333,6 +357,15 @@ def _get_local_size_helper(group):
333
357
  Returns:
334
358
  Integer. The local rank size where the calling process is being within specified group.
335
359
  """
360
+ if _check_bypass_rank_id_and_size():
361
+ size = 1
362
+ return size
363
+ if not GlobalComm.INITED:
364
+ # If 'LOCAL_RANK_SIZE' env is not set, return 0 as default value.
365
+ logger.info(f"You are invoking this interface without calling `init` method."
366
+ "Return 'LOCAL_RANK_SIZE' env value instead. If 'LOCAL_RANK_SIZE' is not set,"
367
+ "return 1 as default value.")
368
+ return int(os.getenv("LOCAL_RANK_SIZE", "1"))
336
369
  size = CollectiveManager.get_instance().get_local_group_size(group)
337
370
  return size
338
371
 
@@ -501,6 +534,18 @@ def _destroy_group_helper(group):
501
534
  CollectiveManager.get_instance().destroy_group(group)
502
535
 
503
536
 
537
+ @check_parameter_available
538
+ def _get_comm_name_helper(group):
539
+ """
540
+ The Helper to get inner_comm_name.
541
+
542
+ Args:
543
+ group (str): The user communication group.
544
+
545
+ """
546
+ return CollectiveManager.get_instance().get_comm_name(group)
547
+
548
+
504
549
  def _get_group_map():
505
550
  """Get the group map"""
506
551
  return CollectiveManager.get_instance().get_group_map()
@@ -20,7 +20,7 @@ from mindspore.communication import GlobalComm, get_group_rank_from_world_rank,
20
20
  from mindspore.communication.management import _get_group
21
21
  from mindspore.communication._comm_helper import _get_group_rank_from_world_rank_from_cache_helper
22
22
  from mindspore.common.tensor import Tensor
23
- from mindspore._c_expression import Tensor as Tensor_
23
+ from mindspore._c_expression import TensorPy as Tensor_
24
24
  from mindspore.ops import ReduceOp, cat
25
25
  from mindspore.ops._primitive_cache import _get_cache_prim
26
26
  from mindspore.ops.primitive import _primexpr
@@ -30,7 +30,7 @@ from mindspore.ops.auto_generate.gen_ops_prim import (inner_comm_all_reduce_op,
30
30
  from mindspore._c_expression import CommHandle as CommHandle_
31
31
  from mindspore._c_expression.typing import Type
32
32
  from mindspore import jit_class
33
- from mindspore.common.api import _pynative_executor
33
+ import mindspore as ms
34
34
 
35
35
  __all__ = [
36
36
  'all_reduce',
@@ -63,6 +63,12 @@ class CommHandle(CommHandle_):
63
63
  handles will be created using Python.
64
64
  """
65
65
 
66
+ def __init__(self, handle=None, exec_sync=False):
67
+ super(CommHandle, self).__init__()
68
+ self.handle = handle
69
+ self.exec_sync = exec_sync
70
+
71
+
66
72
  def wait(self):
67
73
  r"""
68
74
  The wait for asynchronous handles will not take effect for handles created on the Python side.
@@ -80,6 +86,10 @@ class CommHandle(CommHandle_):
80
86
  [[2. 2. 2. 2. 2. 2. 2. 2.]
81
87
  [2. 2. 2. 2. 2. 2. 2. 2.]]
82
88
  """
89
+ if self.handle:
90
+ self.handle.wait()
91
+ if self.exec_sync:
92
+ ms.runtime.synchronize()
83
93
 
84
94
 
85
95
  default_handle = CommHandle()
@@ -220,7 +230,7 @@ def all_reduce(tensor, op=ReduceOp.SUM, group=GlobalComm.WORLD_COMM_GROUP, async
220
230
  For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
221
231
  without any third-party or configuration file dependencies.
222
232
  Please see the `msrun start up
223
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
233
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
224
234
  for more details.
225
235
 
226
236
  This example should be run with 2 devices.
@@ -285,7 +295,7 @@ def all_gather_into_tensor(tensor, group=GlobalComm.WORLD_COMM_GROUP, async_op=F
285
295
  For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
286
296
  without any third-party or configuration file dependencies.
287
297
  Please see the `msrun start up
288
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
298
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
289
299
  for more details.
290
300
 
291
301
  This example should be run with 2 devices.
@@ -355,7 +365,7 @@ def reduce_scatter_tensor(tensor, op=ReduceOp.SUM, group=GlobalComm.WORLD_COMM_G
355
365
  For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
356
366
  without any third-party or configuration file dependencies.
357
367
  Please see the `msrun start up
358
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
368
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
359
369
  for more details.
360
370
 
361
371
  This example should be run with 2 devices.
@@ -424,7 +434,7 @@ def reduce(tensor, dst, op=ReduceOp.SUM, group=GlobalComm.WORLD_COMM_GROUP):
424
434
  without any third-party or configuration file dependencies.
425
435
 
426
436
  Please see the `msrun start up
427
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
437
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
428
438
  for more details.
429
439
 
430
440
  This example should be run with 4 devices.
@@ -561,7 +571,7 @@ def batch_isend_irecv(p2p_op_list):
561
571
  For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
562
572
  without any third-party or configuration file dependencies.
563
573
  Please see the `msrun start up
564
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
574
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
565
575
  for more details.
566
576
 
567
577
  This example should be run with 2 devices.
@@ -679,7 +689,7 @@ def scatter_tensor(tensor, src=0, group=GlobalComm.WORLD_COMM_GROUP):
679
689
  For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
680
690
  without any third-party or configuration file dependencies.
681
691
  Please see the `msrun start up
682
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
692
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
683
693
  for more details.
684
694
 
685
695
  This example should be run with 2 devices.
@@ -744,7 +754,7 @@ def gather_into_tensor(tensor, dst=0, group=GlobalComm.WORLD_COMM_GROUP):
744
754
  For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
745
755
  without any third-party or configuration file dependencies.
746
756
  Please see the `msrun start up
747
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
757
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
748
758
  for more details.
749
759
 
750
760
  This example should be run with 2 devices.
@@ -805,7 +815,7 @@ def broadcast(tensor, src=0, group=GlobalComm.WORLD_COMM_GROUP):
805
815
  For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
806
816
  without any third-party or configuration file dependencies.
807
817
  Please see the `msrun start up
808
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
818
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
809
819
  for more details.
810
820
 
811
821
  This example should be run with 2 devices.
@@ -819,6 +829,7 @@ def broadcast(tensor, src=0, group=GlobalComm.WORLD_COMM_GROUP):
819
829
  >>> comm.init()
820
830
  >>> data = ms.Tensor(np.arange(8).reshape([2, 4]).astype(np.float32))
821
831
  >>> out = comm.comm_func.broadcast(tensor=data, src=0)
832
+ >>> print(out)
822
833
  [[0. 1. 2. 3.]
823
834
  [4. 5. 6. 7.]]
824
835
 
@@ -858,7 +869,7 @@ def barrier(group=GlobalComm.WORLD_COMM_GROUP):
858
869
  For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
859
870
  without any third-party or configuration file dependencies.
860
871
  Please see the `msrun start up
861
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
872
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
862
873
  for more details.
863
874
 
864
875
  This example should be run with 2 devices.
@@ -869,6 +880,8 @@ def barrier(group=GlobalComm.WORLD_COMM_GROUP):
869
880
  >>> # Launch 2 processes.
870
881
  >>> comm.init()
871
882
  >>> comm.comm_func.barrier()
883
+ >>> print("barrier finish!")
884
+ barrier finish!
872
885
 
873
886
  Tutorial Examples:
874
887
  - `Distributed Set Communication Primitives - Barrier
@@ -888,9 +901,9 @@ def _deal_comm_outputs(output, async_op, exec_sync=False):
888
901
  if not async_op:
889
902
  output[1].wait()
890
903
  if exec_sync:
891
- _pynative_executor.sync()
904
+ ms.runtime.synchronize()
892
905
  return (output[0], None)
893
- return output
906
+ return (output[0], CommHandle(output[1], exec_sync))
894
907
 
895
908
  if not async_op:
896
909
  return (output, None)
@@ -926,7 +939,7 @@ def send(tensor, dst=0, group=GlobalComm.WORLD_COMM_GROUP, tag=0):
926
939
  For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
927
940
  without any third-party or configuration file dependencies.
928
941
  Please see the `msrun start up
929
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
942
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
930
943
  for more details.
931
944
 
932
945
  This example should be run with 2 devices.
@@ -946,12 +959,13 @@ def send(tensor, dst=0, group=GlobalComm.WORLD_COMM_GROUP, tag=0):
946
959
  >>>
947
960
  >>>
948
961
  >>> if rank < size / 2:
949
- >>> _x = ms.Tensor(x)
950
- >>> send(_x, rank + size // 2)
951
- >>> else:
952
- >>> _x2 = ms.Tensor(x2)
953
- >>> output = recv(_x2, rank - size // 2)
954
- >>> print(output)
962
+ ... _x = ms.Tensor(x)
963
+ ... send(_x, rank + size // 2)
964
+ ... else:
965
+ ... _x2 = ms.Tensor(x2)
966
+ ... output = recv(_x2, rank - size // 2)
967
+ ... print(output)
968
+ rank1:
955
969
  [[0.01 0.01]
956
970
  [0.01 0.01]]
957
971
  """
@@ -1000,7 +1014,7 @@ def recv(tensor, src=0, group=GlobalComm.WORLD_COMM_GROUP, tag=0):
1000
1014
  For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
1001
1015
  without any third-party or configuration file dependencies.
1002
1016
  Please see the `msrun start up
1003
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
1017
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
1004
1018
  for more details.
1005
1019
 
1006
1020
  This example should be run with 2 devices.
@@ -1020,12 +1034,13 @@ def recv(tensor, src=0, group=GlobalComm.WORLD_COMM_GROUP, tag=0):
1020
1034
  >>>
1021
1035
  >>>
1022
1036
  >>> if rank < size / 2:
1023
- >>> _x = ms.Tensor(x)
1024
- >>> send(_x, rank + size // 2)
1025
- >>> else:
1026
- >>> _x2 = ms.Tensor(x2)
1027
- >>> output = recv(_x2, rank - size // 2)
1028
- >>> print(output)
1037
+ ... _x = ms.Tensor(x)
1038
+ ... send(_x, rank + size // 2)
1039
+ ... else:
1040
+ ... _x2 = ms.Tensor(x2)
1041
+ ... output = recv(_x2, rank - size // 2)
1042
+ ... print(output)
1043
+ rank1:
1029
1044
  [[0.01 0.01]
1030
1045
  [0.01 0.01]]
1031
1046
  """
@@ -1075,7 +1090,7 @@ def isend(tensor, dst=0, group=GlobalComm.WORLD_COMM_GROUP, tag=0):
1075
1090
  For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
1076
1091
  without any third-party or configuration file dependencies.
1077
1092
  Please see the `msrun start up
1078
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
1093
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
1079
1094
  for more details.
1080
1095
 
1081
1096
  This example should be run with 2 devices.
@@ -1095,13 +1110,14 @@ def isend(tensor, dst=0, group=GlobalComm.WORLD_COMM_GROUP, tag=0):
1095
1110
  >>>
1096
1111
  >>>
1097
1112
  >>> if rank < size / 2:
1098
- >>> _x = ms.Tensor(x)
1099
- >>> isend(_x, rank + size // 2)
1100
- >>> else:
1101
- >>> _x2 = ms.Tensor(x2)
1102
- >>> output, handle = irecv(_x2, rank - size // 2)
1103
- >>> handle.wait()
1104
- >>> print(output)
1113
+ ... _x = ms.Tensor(x)
1114
+ ... isend(_x, rank + size // 2)
1115
+ ... else:
1116
+ ... _x2 = ms.Tensor(x2)
1117
+ ... output, handle = irecv(_x2, rank - size // 2)
1118
+ ... handle.wait()
1119
+ ... print(output)
1120
+ rank1:
1105
1121
  [[0.01 0.01]
1106
1122
  [0.01 0.01]]
1107
1123
  """
@@ -1153,7 +1169,7 @@ def irecv(tensor, src=0, group=GlobalComm.WORLD_COMM_GROUP, tag=0):
1153
1169
  For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
1154
1170
  without any third-party or configuration file dependencies.
1155
1171
  Please see the `msrun start up
1156
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
1172
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
1157
1173
  for more details.
1158
1174
 
1159
1175
  This example should be run with 2 devices.
@@ -1173,13 +1189,14 @@ def irecv(tensor, src=0, group=GlobalComm.WORLD_COMM_GROUP, tag=0):
1173
1189
  >>>
1174
1190
  >>>
1175
1191
  >>> if rank < size / 2:
1176
- >>> _x = ms.Tensor(x)
1177
- >>> isend(_x, rank + size // 2)
1178
- >>> else:
1179
- >>> _x2 = ms.Tensor(x2)
1180
- >>> output, handle = irecv(_x2, rank - size // 2)
1181
- >>> handle.wait()
1182
- >>> print(output)
1192
+ ... _x = ms.Tensor(x)
1193
+ ... isend(_x, rank + size // 2)
1194
+ ... else:
1195
+ ... _x2 = ms.Tensor(x2)
1196
+ ... output, handle = irecv(_x2, rank - size // 2)
1197
+ ... handle.wait()
1198
+ ... print(output)
1199
+ rank1:
1183
1200
  [[0.01 0.01]
1184
1201
  [0.01 0.01]]
1185
1202
  """
@@ -1229,7 +1246,7 @@ def all_to_all_with_output_shape(output_shape_list, input_tensor_list, group=Non
1229
1246
  For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
1230
1247
  without any third-party or configuration file dependencies.
1231
1248
  Please see the `msrun start up
1232
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
1249
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
1233
1250
  for more details.
1234
1251
 
1235
1252
  This example should be run with 2 devices.
@@ -1241,11 +1258,11 @@ def all_to_all_with_output_shape(output_shape_list, input_tensor_list, group=Non
1241
1258
  >>> comm.init()
1242
1259
  >>> this_rank = comm.get_rank()
1243
1260
  >>> if this_rank == 0:
1244
- >>> send_tensor_list = [ms.Tensor(1.), ms.Tensor([[2, 3], [4, 5.]])]
1245
- >>> recv_tensor_list = [(), (2,)]
1261
+ ... send_tensor_list = [ms.Tensor(1.), ms.Tensor([[2, 3], [4, 5.]])]
1262
+ ... recv_tensor_list = [(), (2,)]
1246
1263
  >>> if this_rank == 1:
1247
- >>> send_tensor_list = [ms.Tensor([2, 2.]), ms.Tensor([4, 5, 6, 7.])]
1248
- >>> recv_tensor_list = [(2, 2), (4,)]
1264
+ ... send_tensor_list = [ms.Tensor([2, 2.]), ms.Tensor([4, 5, 6, 7.])]
1265
+ ... recv_tensor_list = [(2, 2), (4,)]
1249
1266
  >>> output, _ = comm.comm_func.all_to_all_with_output_shape(recv_tensor_list, send_tensor_list)
1250
1267
  >>> print(output)
1251
1268
  rank 0:
@@ -1280,7 +1297,6 @@ def all_to_all_with_output_shape(output_shape_list, input_tensor_list, group=Non
1280
1297
  recv_shape_list.append(_shape)
1281
1298
 
1282
1299
  send_flatten_tensor = cat(send_flatten_tensor)
1283
- send_flatten_tensor = _contiguous(send_flatten_tensor)
1284
1300
  group = GlobalComm.WORLD_COMM_GROUP if group is None else _get_group(group)
1285
1301
  global _GROPU_SIZE_CACHE
1286
1302
  if group not in _GROPU_SIZE_CACHE:
@@ -1345,7 +1361,8 @@ _ALL_TO_ALL_CACHE = {}
1345
1361
  def all_to_all_single_with_output_shape(output_shape, tensor, output_split_sizes=None,
1346
1362
  input_split_sizes=None, group=None, async_op=False):
1347
1363
  """
1348
- scatter and gather input with split size to/from all rank, and return result in a single tensor.
1364
+ Based on the slice size of the user input, the input `tensor` is sliced and sent to other devices
1365
+ and receives the sliced chunks from the other devices, which are then merged into an output Tensor.
1349
1366
 
1350
1367
  Note:
1351
1368
  'output_shape' and 'tensor' shape should be match across ranks.
@@ -1365,8 +1382,8 @@ def all_to_all_single_with_output_shape(output_shape, tensor, output_split_sizes
1365
1382
 
1366
1383
  Returns:
1367
1384
  Tuple(Tensor, CommHandle), the output tensor is gathered concatenated from remote ranks.
1368
- If the numel of tensor gathered from remote is zero, it will return a Tensor will value 0,
1369
- which has no actual meanning. CommHandle is an async work handle, if `async_op` is set to True.
1385
+ If the numel of tensor gathered from remote is zero, it will return a Tensor with shape `()`,
1386
+ and value has no actual meanning. CommHandle is an async work handle, if `async_op` is set to True.
1370
1387
  CommHandle will be None, when `async_op` is False.
1371
1388
 
1372
1389
  Raises:
@@ -1383,7 +1400,7 @@ def all_to_all_single_with_output_shape(output_shape, tensor, output_split_sizes
1383
1400
  For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
1384
1401
  without any third-party or configuration file dependencies.
1385
1402
  Please see the `msrun start up
1386
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
1403
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
1387
1404
  for more details.
1388
1405
 
1389
1406
  This example should be run with 2 devices.