mindspore 2.5.0__cp311-cp311-win_amd64.whl → 2.6.0__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (493) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +6 -4
  5. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  8. mindspore/_check_jit_forbidden_api.py +3 -0
  9. mindspore/_checkparam.py +3 -33
  10. mindspore/_deprecated/__init__.py +17 -0
  11. mindspore/_deprecated/jit.py +198 -0
  12. mindspore/_extends/builtin_operations.py +1 -1
  13. mindspore/_extends/parse/__init__.py +6 -7
  14. mindspore/_extends/parse/compile_config.py +19 -0
  15. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
  16. mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
  17. mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
  18. mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
  19. mindspore/_extends/parse/parser.py +25 -194
  20. mindspore/_extends/parse/resources.py +1 -5
  21. mindspore/_extends/parse/standard_method.py +109 -75
  22. mindspore/_extends/pijit/__init__.py +2 -2
  23. mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
  24. mindspore/_extends/pijit/tensor_func_list.py +27 -0
  25. mindspore/_extends/utils.py +1 -1
  26. mindspore/amp.py +4 -4
  27. mindspore/atlprov.dll +0 -0
  28. mindspore/avcodec-59.dll +0 -0
  29. mindspore/avdevice-59.dll +0 -0
  30. mindspore/avfilter-8.dll +0 -0
  31. mindspore/avformat-59.dll +0 -0
  32. mindspore/avutil-57.dll +0 -0
  33. mindspore/boost/__init__.py +2 -2
  34. mindspore/boost/base.py +3 -7
  35. mindspore/boost/boost_cell_wrapper.py +2 -2
  36. mindspore/c1.dll +0 -0
  37. mindspore/c1xx.dll +0 -0
  38. mindspore/c2.dll +0 -0
  39. mindspore/common/__init__.py +4 -3
  40. mindspore/common/_grad_function.py +56 -0
  41. mindspore/common/_pijit_context.py +14 -5
  42. mindspore/common/_register_for_tensor.py +1 -1
  43. mindspore/common/_stub_tensor.py +5 -10
  44. mindspore/common/_tensor_cpp_method.py +1 -1
  45. mindspore/common/_tensor_docs.py +2014 -3386
  46. mindspore/common/api.py +386 -355
  47. mindspore/common/auto_dynamic_shape.py +41 -44
  48. mindspore/common/dtype.py +5 -2
  49. mindspore/common/dump.py +7 -5
  50. mindspore/common/file_system.py +3 -0
  51. mindspore/common/generator.py +3 -0
  52. mindspore/common/hook_handle.py +5 -3
  53. mindspore/common/initializer.py +10 -6
  54. mindspore/common/jit_begin_end.py +94 -0
  55. mindspore/common/jit_config.py +6 -1
  56. mindspore/common/jit_context.py +76 -0
  57. mindspore/common/jit_trace.py +378 -0
  58. mindspore/common/lazy_inline.py +2 -2
  59. mindspore/common/mutable.py +5 -4
  60. mindspore/common/parameter.py +106 -39
  61. mindspore/common/seed.py +2 -2
  62. mindspore/common/sparse_tensor.py +23 -17
  63. mindspore/common/tensor.py +332 -714
  64. mindspore/communication/__init__.py +7 -5
  65. mindspore/communication/_comm_helper.py +47 -2
  66. mindspore/communication/comm_func.py +70 -53
  67. mindspore/communication/management.py +83 -17
  68. mindspore/context.py +228 -571
  69. mindspore/dataset/__init__.py +44 -20
  70. mindspore/dataset/audio/__init__.py +2 -8
  71. mindspore/dataset/audio/transforms.py +3 -17
  72. mindspore/dataset/core/config.py +3 -3
  73. mindspore/dataset/engine/cache_client.py +1 -1
  74. mindspore/dataset/engine/datasets.py +102 -120
  75. mindspore/dataset/engine/datasets_audio.py +22 -22
  76. mindspore/dataset/engine/datasets_standard_format.py +43 -24
  77. mindspore/dataset/engine/datasets_text.py +78 -85
  78. mindspore/dataset/engine/datasets_user_defined.py +109 -77
  79. mindspore/dataset/engine/datasets_vision.py +111 -108
  80. mindspore/dataset/engine/iterators.py +5 -3
  81. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
  82. mindspore/dataset/engine/samplers.py +279 -57
  83. mindspore/dataset/engine/serializer_deserializer.py +2 -1
  84. mindspore/dataset/engine/validators.py +10 -0
  85. mindspore/dataset/text/__init__.py +7 -6
  86. mindspore/dataset/text/transforms.py +6 -5
  87. mindspore/dataset/text/utils.py +3 -3
  88. mindspore/dataset/transforms/__init__.py +0 -9
  89. mindspore/dataset/transforms/transforms.py +3 -3
  90. mindspore/dataset/utils/browse_dataset.py +1 -1
  91. mindspore/dataset/vision/__init__.py +2 -9
  92. mindspore/dataset/vision/transforms.py +202 -158
  93. mindspore/dataset/vision/utils.py +7 -5
  94. mindspore/device_context/ascend/op_debug.py +60 -1
  95. mindspore/device_context/ascend/op_tuning.py +0 -4
  96. mindspore/device_manager.py +39 -3
  97. mindspore/dnnl.dll +0 -0
  98. mindspore/dpcmi.dll +0 -0
  99. mindspore/experimental/es/embedding_service.py +35 -27
  100. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -2
  101. mindspore/experimental/map_parameter.py +4 -4
  102. mindspore/experimental/optim/adadelta.py +22 -26
  103. mindspore/experimental/optim/adagrad.py +4 -4
  104. mindspore/experimental/optim/adam.py +4 -0
  105. mindspore/experimental/optim/adamax.py +4 -4
  106. mindspore/experimental/optim/adamw.py +4 -0
  107. mindspore/experimental/optim/asgd.py +1 -1
  108. mindspore/experimental/optim/lr_scheduler.py +40 -22
  109. mindspore/experimental/optim/radam.py +5 -5
  110. mindspore/experimental/optim/rprop.py +1 -1
  111. mindspore/experimental/optim/sgd.py +1 -1
  112. mindspore/hal/contiguous_tensors_handle.py +6 -10
  113. mindspore/hal/device.py +55 -81
  114. mindspore/hal/event.py +38 -55
  115. mindspore/hal/memory.py +115 -147
  116. mindspore/hal/stream.py +81 -125
  117. mindspore/include/dataset/constants.h +7 -4
  118. mindspore/include/dataset/execute.h +2 -2
  119. mindspore/jpeg62.dll +0 -0
  120. mindspore/log.py +40 -2
  121. mindspore/mindrecord/__init__.py +20 -7
  122. mindspore/mindspore_backend_common.dll +0 -0
  123. mindspore/mindspore_backend_manager.dll +0 -0
  124. mindspore/mindspore_common.dll +0 -0
  125. mindspore/mindspore_core.dll +0 -0
  126. mindspore/mindspore_dump.dll +0 -0
  127. mindspore/mindspore_frontend.dll +0 -0
  128. mindspore/mindspore_glog.dll +0 -0
  129. mindspore/mindspore_memory_pool.dll +0 -0
  130. mindspore/mindspore_ms_backend.dll +0 -0
  131. mindspore/mindspore_ops.dll +0 -0
  132. mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
  133. mindspore/mindspore_ops_kernel_common.dll +0 -0
  134. mindspore/mindspore_profiler.dll +0 -0
  135. mindspore/mindspore_pyboost.dll +0 -0
  136. mindspore/mindspore_pynative.dll +0 -0
  137. mindspore/mindspore_res_manager.dll +0 -0
  138. mindspore/mindspore_runtime_pipeline.dll +0 -0
  139. mindspore/mint/__init__.py +133 -702
  140. mindspore/mint/distributed/__init__.py +5 -1
  141. mindspore/mint/distributed/distributed.py +198 -113
  142. mindspore/mint/linalg/__init__.py +2 -0
  143. mindspore/mint/nn/__init__.py +280 -18
  144. mindspore/mint/nn/functional.py +282 -64
  145. mindspore/mint/nn/layer/__init__.py +4 -0
  146. mindspore/mint/nn/layer/_functions.py +7 -3
  147. mindspore/mint/nn/layer/activation.py +120 -13
  148. mindspore/mint/nn/layer/conv.py +234 -28
  149. mindspore/mint/nn/layer/normalization.py +15 -16
  150. mindspore/mint/nn/layer/padding.py +1 -1
  151. mindspore/mint/nn/layer/pooling.py +66 -1
  152. mindspore/mint/optim/__init__.py +2 -1
  153. mindspore/mint/optim/sgd.py +171 -0
  154. mindspore/msobj140.dll +0 -0
  155. mindspore/mspdb140.dll +0 -0
  156. mindspore/mspdbcore.dll +0 -0
  157. mindspore/mspdbst.dll +0 -0
  158. mindspore/mspft140.dll +0 -0
  159. mindspore/msvcdis140.dll +0 -0
  160. mindspore/msvcp140_1.dll +0 -0
  161. mindspore/msvcp140_2.dll +0 -0
  162. mindspore/msvcp140_atomic_wait.dll +0 -0
  163. mindspore/msvcp140_codecvt_ids.dll +0 -0
  164. mindspore/nn/__init__.py +4 -1
  165. mindspore/nn/cell.py +1253 -179
  166. mindspore/nn/layer/activation.py +23 -21
  167. mindspore/nn/layer/basic.py +22 -16
  168. mindspore/nn/layer/container.py +1 -1
  169. mindspore/nn/layer/conv.py +53 -42
  170. mindspore/nn/layer/embedding.py +9 -8
  171. mindspore/nn/layer/normalization.py +48 -42
  172. mindspore/nn/layer/pooling.py +75 -31
  173. mindspore/nn/layer/transformer.py +11 -10
  174. mindspore/nn/learning_rate_schedule.py +4 -2
  175. mindspore/nn/loss/loss.py +27 -19
  176. mindspore/nn/optim/ada_grad.py +6 -5
  177. mindspore/nn/optim/adadelta.py +9 -7
  178. mindspore/nn/optim/adafactor.py +1 -1
  179. mindspore/nn/optim/adam.py +18 -14
  180. mindspore/nn/optim/adamax.py +8 -7
  181. mindspore/nn/optim/adasum.py +5 -5
  182. mindspore/nn/optim/asgd.py +3 -1
  183. mindspore/nn/optim/ftrl.py +11 -9
  184. mindspore/nn/optim/lamb.py +1 -1
  185. mindspore/nn/optim/lazyadam.py +12 -10
  186. mindspore/nn/optim/momentum.py +7 -6
  187. mindspore/nn/optim/optimizer.py +2 -2
  188. mindspore/nn/optim/proximal_ada_grad.py +12 -10
  189. mindspore/nn/optim/rmsprop.py +13 -12
  190. mindspore/nn/optim/rprop.py +9 -7
  191. mindspore/nn/optim/sgd.py +9 -6
  192. mindspore/nn/optim/tft_wrapper.py +5 -2
  193. mindspore/nn/probability/bijector/bijector.py +17 -11
  194. mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
  195. mindspore/nn/probability/bijector/invert.py +2 -2
  196. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  197. mindspore/nn/probability/bijector/softplus.py +3 -2
  198. mindspore/nn/probability/distribution/beta.py +3 -3
  199. mindspore/nn/probability/distribution/categorical.py +1 -1
  200. mindspore/nn/probability/distribution/cauchy.py +4 -2
  201. mindspore/nn/probability/distribution/exponential.py +6 -7
  202. mindspore/nn/probability/distribution/gamma.py +2 -2
  203. mindspore/nn/probability/distribution/gumbel.py +2 -2
  204. mindspore/nn/probability/distribution/half_normal.py +5 -3
  205. mindspore/nn/probability/distribution/logistic.py +5 -3
  206. mindspore/nn/probability/distribution/poisson.py +1 -1
  207. mindspore/nn/probability/distribution/uniform.py +5 -3
  208. mindspore/nn/reinforcement/_tensors_queue.py +1 -1
  209. mindspore/nn/reinforcement/tensor_array.py +1 -1
  210. mindspore/nn/wrap/__init__.py +6 -6
  211. mindspore/nn/wrap/cell_wrapper.py +178 -117
  212. mindspore/nn/wrap/grad_reducer.py +45 -36
  213. mindspore/nn/wrap/loss_scale.py +3 -3
  214. mindspore/numpy/array_creations.py +3 -3
  215. mindspore/numpy/array_ops.py +1 -1
  216. mindspore/numpy/utils.py +1 -2
  217. mindspore/numpy/utils_const.py +1 -2
  218. mindspore/opencv_core452.dll +0 -0
  219. mindspore/opencv_imgcodecs452.dll +0 -0
  220. mindspore/opencv_imgproc452.dll +0 -0
  221. mindspore/ops/__init__.py +3 -2
  222. mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
  223. mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
  224. mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
  225. mindspore/ops/_register_for_op.py +0 -11
  226. mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
  227. mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
  228. mindspore/ops/_vmap/vmap_array_ops.py +32 -6
  229. mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
  230. mindspore/ops/_vmap/vmap_math_ops.py +4 -7
  231. mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
  232. mindspore/ops/auto_generate/__init__.py +4 -3
  233. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +127 -52
  234. mindspore/ops/auto_generate/gen_extend_func.py +286 -208
  235. mindspore/ops/auto_generate/gen_ops_def.py +2783 -2335
  236. mindspore/ops/auto_generate/gen_ops_prim.py +8992 -2686
  237. mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
  238. mindspore/ops/composite/__init__.py +2 -1
  239. mindspore/ops/composite/base.py +19 -24
  240. mindspore/ops/composite/math_ops.py +6 -16
  241. mindspore/ops/composite/multitype_ops/__init__.py +5 -2
  242. mindspore/ops/composite/multitype_ops/_compile_utils.py +4 -5
  243. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
  244. mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
  245. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  246. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  247. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
  248. mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
  249. mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
  250. mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
  251. mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
  252. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
  253. mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
  254. mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
  255. mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
  256. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
  257. mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
  258. mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
  259. mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
  260. mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
  261. mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
  262. mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
  263. mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
  264. mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
  265. mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
  266. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
  267. mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
  268. mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
  269. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
  270. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  271. mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
  272. mindspore/ops/function/__init__.py +28 -2
  273. mindspore/ops/function/_add_attr_func.py +58 -0
  274. mindspore/ops/function/array_func.py +1631 -2347
  275. mindspore/ops/function/clip_func.py +38 -45
  276. mindspore/ops/function/debug_func.py +36 -44
  277. mindspore/ops/function/grad/__init__.py +1 -0
  278. mindspore/ops/function/grad/grad_func.py +104 -71
  279. mindspore/ops/function/image_func.py +1 -1
  280. mindspore/ops/function/linalg_func.py +46 -78
  281. mindspore/ops/function/math_func.py +3024 -3855
  282. mindspore/ops/function/nn_func.py +678 -274
  283. mindspore/ops/function/other_func.py +159 -1
  284. mindspore/ops/function/parameter_func.py +17 -30
  285. mindspore/ops/function/random_func.py +216 -361
  286. mindspore/ops/function/reshard_func.py +4 -70
  287. mindspore/ops/function/sparse_func.py +3 -3
  288. mindspore/ops/function/sparse_unary_func.py +5 -5
  289. mindspore/ops/function/spectral_func.py +25 -58
  290. mindspore/ops/function/vmap_func.py +26 -18
  291. mindspore/ops/functional.py +8 -5
  292. mindspore/ops/functional_overload.py +655 -4
  293. mindspore/ops/op_info_register.py +32 -244
  294. mindspore/ops/operations/__init__.py +21 -14
  295. mindspore/ops/operations/_custom_ops_utils.py +235 -0
  296. mindspore/ops/operations/_grad_ops.py +1 -10
  297. mindspore/ops/operations/_inner_ops.py +5 -76
  298. mindspore/ops/operations/_ms_kernel.py +4 -10
  299. mindspore/ops/operations/_rl_inner_ops.py +1 -1
  300. mindspore/ops/operations/_scalar_ops.py +3 -2
  301. mindspore/ops/operations/_sequence_ops.py +1 -1
  302. mindspore/ops/operations/_tensor_array.py +1 -1
  303. mindspore/ops/operations/array_ops.py +39 -24
  304. mindspore/ops/operations/comm_ops.py +150 -107
  305. mindspore/ops/operations/custom_ops.py +287 -32
  306. mindspore/ops/operations/debug_ops.py +119 -16
  307. mindspore/ops/operations/inner_ops.py +1 -1
  308. mindspore/ops/operations/linalg_ops.py +1 -58
  309. mindspore/ops/operations/manually_defined/_inner.py +1 -1
  310. mindspore/ops/operations/manually_defined/ops_def.py +746 -79
  311. mindspore/ops/operations/math_ops.py +21 -18
  312. mindspore/ops/operations/nn_ops.py +67 -224
  313. mindspore/ops/operations/other_ops.py +62 -9
  314. mindspore/ops/operations/random_ops.py +13 -7
  315. mindspore/ops/operations/reshard_ops.py +1 -1
  316. mindspore/ops/operations/sparse_ops.py +2 -2
  317. mindspore/ops/primitive.py +43 -32
  318. mindspore/ops/tensor_method.py +243 -17
  319. mindspore/ops_generate/__init__.py +0 -5
  320. mindspore/ops_generate/aclnn/__init__.py +0 -0
  321. mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
  322. mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
  323. mindspore/ops_generate/api/__init__.py +0 -0
  324. mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
  325. mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
  326. mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
  327. mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
  328. mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
  329. mindspore/ops_generate/api/gen_api.py +103 -0
  330. mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
  331. mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
  332. mindspore/ops_generate/common/__init__.py +0 -0
  333. mindspore/ops_generate/common/gen_constants.py +91 -0
  334. mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
  335. mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
  336. mindspore/ops_generate/{template.py → common/template.py} +96 -84
  337. mindspore/ops_generate/gen_ops.py +23 -325
  338. mindspore/ops_generate/op_def/__init__.py +0 -0
  339. mindspore/ops_generate/op_def/gen_op_def.py +90 -0
  340. mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
  341. mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -10
  342. mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
  343. mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
  344. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
  345. mindspore/ops_generate/op_def_py/__init__.py +0 -0
  346. mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
  347. mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
  348. mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
  349. mindspore/ops_generate/pyboost/__init__.py +0 -0
  350. mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
  351. mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
  352. mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
  353. mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
  354. mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
  355. mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
  356. mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
  357. mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
  358. mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
  359. mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
  360. mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
  361. mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
  362. mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
  363. mindspore/ops_generate/resources/__init__.py +0 -0
  364. mindspore/ops_generate/resources/resource_list.py +30 -0
  365. mindspore/ops_generate/resources/resource_loader.py +36 -0
  366. mindspore/ops_generate/resources/resource_manager.py +64 -0
  367. mindspore/ops_generate/resources/yaml_loader.py +88 -0
  368. mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
  369. mindspore/parallel/__init__.py +6 -2
  370. mindspore/parallel/_auto_parallel_context.py +140 -12
  371. mindspore/parallel/_cell_wrapper.py +132 -15
  372. mindspore/parallel/_parallel_serialization.py +95 -4
  373. mindspore/parallel/_ps_context.py +1 -1
  374. mindspore/parallel/_recovery_context.py +7 -2
  375. mindspore/parallel/_tensor.py +142 -18
  376. mindspore/parallel/_utils.py +198 -25
  377. mindspore/parallel/algo_parameter_config.py +3 -3
  378. mindspore/parallel/auto_parallel.py +732 -0
  379. mindspore/parallel/checkpoint_convert.py +159 -0
  380. mindspore/parallel/checkpoint_transform.py +658 -37
  381. mindspore/parallel/cluster/process_entity/_api.py +151 -19
  382. mindspore/parallel/cluster/run.py +1 -1
  383. mindspore/parallel/function/__init__.py +24 -0
  384. mindspore/parallel/function/reshard_func.py +258 -0
  385. mindspore/parallel/nn/__init__.py +25 -0
  386. mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
  387. mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
  388. mindspore/parallel/parameter_broadcast.py +24 -13
  389. mindspore/parallel/shard.py +137 -62
  390. mindspore/parallel/transform_safetensors.py +288 -95
  391. mindspore/pgodb140.dll +0 -0
  392. mindspore/pgort140.dll +0 -0
  393. mindspore/profiler/__init__.py +9 -5
  394. mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
  395. mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
  396. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
  397. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +25 -0
  398. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  399. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
  400. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
  401. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
  402. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
  403. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
  404. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
  405. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
  406. mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
  407. mindspore/profiler/common/constant.py +12 -0
  408. mindspore/profiler/common/msprof_cmd_tool.py +42 -23
  409. mindspore/profiler/common/path_manager.py +24 -0
  410. mindspore/profiler/common/profiler_context.py +26 -2
  411. mindspore/profiler/common/profiler_meta_data.py +74 -0
  412. mindspore/profiler/common/profiler_parameters.py +59 -18
  413. mindspore/profiler/common/profiler_path_manager.py +66 -7
  414. mindspore/profiler/dynamic_profiler.py +112 -79
  415. mindspore/profiler/envprofiler.py +26 -1
  416. mindspore/profiler/experimental_config.py +197 -0
  417. mindspore/profiler/mstx.py +57 -14
  418. mindspore/profiler/platform/npu_profiler.py +33 -7
  419. mindspore/profiler/profiler.py +541 -45
  420. mindspore/profiler/profiler_action_controller.py +1 -1
  421. mindspore/profiler/profiler_interface.py +4 -0
  422. mindspore/profiler/schedule.py +57 -22
  423. mindspore/rewrite/api/node.py +15 -13
  424. mindspore/rewrite/api/symbol_tree.py +1 -1
  425. mindspore/run_check/_check_version.py +25 -14
  426. mindspore/run_check/run_check.py +1 -1
  427. mindspore/runtime/__init__.py +2 -2
  428. mindspore/runtime/executor.py +40 -11
  429. mindspore/runtime/memory.py +37 -13
  430. mindspore/safeguard/rewrite_obfuscation.py +12 -9
  431. mindspore/swresample-4.dll +0 -0
  432. mindspore/swscale-6.dll +0 -0
  433. mindspore/tbbmalloc.dll +0 -0
  434. mindspore/tinyxml2.dll +0 -0
  435. mindspore/train/__init__.py +8 -8
  436. mindspore/train/_utils.py +43 -9
  437. mindspore/train/amp.py +1 -1
  438. mindspore/train/callback/__init__.py +2 -2
  439. mindspore/train/callback/_callback.py +2 -16
  440. mindspore/train/callback/_checkpoint.py +24 -40
  441. mindspore/train/callback/_cluster_monitor.py +14 -18
  442. mindspore/train/callback/_flops_collector.py +2 -3
  443. mindspore/train/callback/_history.py +7 -4
  444. mindspore/train/callback/_lambda_callback.py +2 -2
  445. mindspore/train/callback/_landscape.py +0 -3
  446. mindspore/train/callback/_loss_monitor.py +2 -1
  447. mindspore/train/callback/_on_request_exit.py +6 -5
  448. mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
  449. mindspore/train/callback/_summary_collector.py +8 -13
  450. mindspore/train/callback/_time_monitor.py +2 -1
  451. mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +204 -105
  452. mindspore/train/data_sink.py +25 -2
  453. mindspore/train/dataset_helper.py +4 -5
  454. mindspore/train/loss_scale_manager.py +8 -7
  455. mindspore/train/metrics/accuracy.py +3 -3
  456. mindspore/train/metrics/confusion_matrix.py +9 -9
  457. mindspore/train/metrics/error.py +3 -3
  458. mindspore/train/metrics/hausdorff_distance.py +4 -4
  459. mindspore/train/metrics/mean_surface_distance.py +3 -3
  460. mindspore/train/metrics/metric.py +0 -12
  461. mindspore/train/metrics/occlusion_sensitivity.py +4 -2
  462. mindspore/train/metrics/precision.py +8 -6
  463. mindspore/train/metrics/recall.py +9 -9
  464. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  465. mindspore/train/mind_ir_pb2.py +19 -12
  466. mindspore/train/model.py +262 -127
  467. mindspore/train/serialization.py +246 -988
  468. mindspore/train/summary/_summary_adapter.py +2 -2
  469. mindspore/train/summary/summary_record.py +1 -1
  470. mindspore/turbojpeg.dll +0 -0
  471. mindspore/utils/__init__.py +3 -2
  472. mindspore/utils/dryrun.py +4 -2
  473. mindspore/utils/hooks.py +81 -0
  474. mindspore/utils/runtime_execution_order_check.py +2 -0
  475. mindspore/utils/utils.py +138 -4
  476. mindspore/vcmeta.dll +0 -0
  477. mindspore/vcruntime140.dll +0 -0
  478. mindspore/vcruntime140_1.dll +0 -0
  479. mindspore/version.py +1 -1
  480. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/METADATA +2 -1
  481. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/RECORD +485 -440
  482. mindspore/_install_custom.py +0 -43
  483. mindspore/common/_register_for_adapter.py +0 -74
  484. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
  485. mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
  486. mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
  487. mindspore/ops_generate/gen_constants.py +0 -190
  488. mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
  489. mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
  490. /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
  491. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/WHEEL +0 -0
  492. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/entry_points.txt +0 -0
  493. {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/top_level.txt +0 -0
@@ -15,17 +15,77 @@
15
15
  """shard"""
16
16
 
17
17
  import copy
18
+ import numpy as np
18
19
  import mindspore as ms
19
20
  from mindspore import log as logger
20
21
  from mindspore._c_expression import Shard_
21
22
 
22
23
 
23
- class Layout:
24
+ class _DistributedTensorInfo:
25
+ """
26
+ Describe the distributed information of a tensor.
27
+
28
+ Args:
29
+ distributed_info (Union[Layout, DeviceMesh]): The distributed information of a tensor.
30
+
31
+ Raises:
32
+ TypeError: If `distributed_info` is not a Layout type.
33
+
34
+ Examples:
35
+ >>> from mindspore import _DistributedTensorInfo, Layout
36
+ >>> layout = Layout((2, 2), ("dp", "mp"))
37
+ >>> src_layout = layout("dp", "mp")
38
+ >>> distributed_info = _DistributedTensorInfo(src_layout)
39
+ >>> print(distributed_info.sharding_strategy)
40
+ [2, 2]
24
41
  """
25
- Parallel layout describes the detailed sharding information.
26
42
 
27
- For more detailed information, refer to the file `Higher-order Operator-level Parallelism
28
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/advanced_operator_parallel.html>`_.
43
+ def __init__(self, distributed_info):
44
+ if isinstance(distributed_info, Layout):
45
+ self._layout = distributed_info
46
+ self._distributed_info = distributed_info
47
+ else:
48
+ raise TypeError(
49
+ f"DistributedTensorInfo only supports Layout or DeviceMesh as input, but got {type(distributed_info)}")
50
+ self._sharding_strategy = None
51
+
52
+ @property
53
+ def layout(self):
54
+ """return layout of current tensor"""
55
+ return self._layout
56
+
57
+ @property
58
+ def distributed_info(self):
59
+ """return the distributed info, it depends on user's input """
60
+ return self._distributed_info
61
+
62
+ @property
63
+ def sharding_strategy(self):
64
+ """return the sharding strategy of current tensor"""
65
+ if self._sharding_strategy is None:
66
+ layout_info = self._layout.to_dict()
67
+ device_matrix = layout_info["device_matrix"]
68
+ tensor_map = layout_info["tensor_map"]
69
+ sharding_strategy = []
70
+ for map_value in tensor_map:
71
+ if isinstance(map_value, (tuple, list)):
72
+ shard_size = 1
73
+ for value in map_value:
74
+ if value != -1:
75
+ shard_size *= device_matrix[len(device_matrix) - value - 1]
76
+ sharding_strategy.append(shard_size)
77
+ else:
78
+ if map_value != -1:
79
+ sharding_strategy.append(device_matrix[len(device_matrix) - map_value - 1])
80
+ else:
81
+ sharding_strategy.append(1)
82
+ self._sharding_strategy = sharding_strategy
83
+ return self._sharding_strategy
84
+
85
+
86
+ class Layout:
87
+ """
88
+ Topological abstraction describing cluster devices for tensor slice placement on the cluster.
29
89
 
30
90
  Note:
31
91
  - It is valid only in semi auto parallel or auto parallel mode.
@@ -38,28 +98,35 @@ class Layout:
38
98
  alias_name (tuple): The alias name for each axis of device_matrix, its length shoits element type is string.
39
99
  When using "interleaved_parallel" as an alias name, the tensor would be split into multiple
40
100
  copies on the corresponding partition dimension on a single card.
101
+ rank_list (list, optional): Data is allocated to the device according to rank_list. Default: ``None``.
102
+
41
103
  Raises:
42
104
  TypeError: `device_matrix` is not a tuple type.
43
105
  TypeError: `alias_name` is not a tuple type.
106
+ TypeError: 'rank_list' is not a list type.
44
107
  ValueError: `device_matrix` length is not equal to `alias_name` length.
45
108
  TypeError: The element of `device_matrix` is not int type.
46
109
  TypeError: The element of `alias_name` is not a str type.
110
+ TypeError: The element of `rank_list` is not int type.
47
111
  ValueError: The element of `alias_name` is an empty str.
48
112
  ValueError: The element of `alias_name` is "None".
49
113
  ValueError: `alias_name` contains repeated element.
50
114
 
115
+ Supported Platforms:
116
+ ``Ascend``
117
+
51
118
  Examples:
52
- >>> from mindspore import Layout
119
+ >>> from mindspore.parallel import Layout
53
120
  >>> layout = Layout((2, 2, 2), ("dp", "sp", "mp"))
54
121
  >>> layout0 = layout("dp", "mp")
55
122
  >>> print(layout0.to_dict())
56
- {"device_matrix": (2, 2, 2), "tensor_map": (2, 0), "interleaved_parallel": False}
57
- >>> # Total device num is 4, but split the tensor in local device into two copies.
123
+ {'device_matrix': (2, 2, 2), 'tensor_map': (2, 0), 'interleaved_parallel': False,
124
+ 'alias_name': {'dp', 'sp', 'mp'}, 'rank_list': [0, 1, 2, 3, 4, 5, 6, 7]}
58
125
  >>> layout = Layout((2, 2, 2), ("dp", "sp", "interleaved_parallel"))
59
126
  >>> layout1 = layout(("dp", "interleaved_parallel"), "sp")
60
127
  """
61
128
 
62
- def __init__(self, device_matrix, alias_name):
129
+ def __init__(self, device_matrix, alias_name, rank_list=None):
63
130
  if not isinstance(device_matrix, tuple):
64
131
  raise TypeError(f'device_matrix must be tuple type, but got:{type(device_matrix)}')
65
132
  if not isinstance(alias_name, tuple):
@@ -85,6 +152,20 @@ class Layout:
85
152
  self._device_shape = device_matrix
86
153
  self._alias_name = alias_name
87
154
  self._tensor_map = None
155
+ self._rank_list = list(range(np.prod(np.array(self._device_shape))))
156
+ if rank_list is not None:
157
+ if not isinstance(rank_list, list):
158
+ raise TypeError(f"The rank_list should be a list, but got {type(rank_list).__name__}.")
159
+ for in_ele in rank_list:
160
+ if not isinstance(in_ele, int):
161
+ raise TypeError(f"The element of rank_list should be int, but got {type(in_ele).__name__}.")
162
+ if len(np.array(rank_list).shape) != 1:
163
+ raise ValueError(
164
+ f"The rank_list should be a 1-D list, but got {len(np.array(rank_list).shape)}-D list.")
165
+ if len(rank_list) != np.prod(np.array(self._device_shape)):
166
+ raise ValueError(f"The length of rank_list should be equal to the product of device_matrix, "
167
+ f"but got {len(rank_list)} and {np.prod(np.array(self._device_shape))}.")
168
+ self._rank_list = rank_list
88
169
 
89
170
  def __call__(self, *tensor_map):
90
171
  self._tensor_map = ()
@@ -125,8 +206,8 @@ class Layout:
125
206
  raise ValueError("The tensor_map of layout is None")
126
207
  interleaved_parallel = "interleaved_parallel" in self._alias_name
127
208
  return {"device_matrix": self._device_shape, "tensor_map": self._tensor_map,
128
- "interleaved_parallel": interleaved_parallel, "alias_name": self._alias_name}
129
-
209
+ "interleaved_parallel": interleaved_parallel, "alias_name": self._alias_name,
210
+ "rank_list": self._rank_list}
130
211
 
131
212
 
132
213
  class Shard(Shard_):
@@ -144,18 +225,6 @@ class Shard(Shard_):
144
225
  self.level = None
145
226
 
146
227
  def __call__(self, fn, in_strategy, out_strategy=None, parameter_plan=None, device="Ascend", level=0):
147
- parallel_mode = ms.context.get_auto_parallel_context("parallel_mode")
148
- if parallel_mode not in ("auto_parallel", "semi_auto_parallel"):
149
- raise AssertionError(
150
- f"Cell shard only supports auto parallel and semi auto parallel.")
151
- if ms.context.get_context("device_target") not in ("Ascend", "GPU"):
152
- raise AssertionError(
153
- f"'Shard' now only supports 'Ascend' and 'GPU'")
154
- if parallel_mode == "auto_parallel" and \
155
- ms.context.get_auto_parallel_context("search_mode") != "sharding_propagation":
156
- raise AssertionError(f"'search_mode' must be 'sharding_propagation' for 'Shard' when the "
157
- f"'parallel_mode' is 'auto_parallel.'")
158
-
159
228
  if not isinstance(in_strategy, tuple):
160
229
  raise TypeError(
161
230
  f"For 'Shard', the 'in_strategy' should be a tuple, but got {type(in_strategy).__name__}.")
@@ -184,7 +253,8 @@ class Shard(Shard_):
184
253
  "will be overwritten as False.")
185
254
  ms.set_algo_parameters(fully_use_devices=False)
186
255
 
187
- if ms.context.get_auto_parallel_context("full_batch_is_set") is False:
256
+ if ms.context.get_auto_parallel_context("full_batch_is_set") is False and \
257
+ ms.context.get_context("mode") == ms.context.PYNATIVE_MODE:
188
258
  logger.warning("When calling the shard interface, "
189
259
  "'dataset_strategy' or 'full_batch' is not manually set by the user, "
190
260
  "and the 'dataset_strategy' will be set to 'full_batch'.")
@@ -196,13 +266,13 @@ class Shard(Shard_):
196
266
 
197
267
  if isinstance(fn, ms.nn.Cell):
198
268
  for param in fn.trainable_params():
199
- param.is_in_shard = True
269
+ param.param_info.is_in_pynative_shard = True
200
270
 
201
271
  # Set parameter layout to corresponding parameter
202
272
  self._set_param_layout_into_parameter(fn, parameter_plan)
203
273
 
204
274
  def shard_fn(*args):
205
- @ms.common.jit(hash_args=fn)
275
+ @ms.common.jit(hash_args=fn, backend="ms_backend")
206
276
  def after_shard(*args):
207
277
  return shard_(fn, in_strategy, out_strategy, device, level)(*args)
208
278
 
@@ -293,7 +363,7 @@ class Shard(Shard_):
293
363
  for stra in strategy:
294
364
  if not isinstance(stra, (tuple, Layout)):
295
365
  raise TypeError(
296
- f"The '{log_info}' should be a tuple(tuple(int)) or tuple(mindspore.Layout), "
366
+ f"The '{log_info}' should be a tuple(tuple(int)) or tuple(mindspore.parallel.Layout), "
297
367
  f"but got {type(stra).__name__}")
298
368
  if isinstance(stra, Layout):
299
369
  strategy_set.add("layout")
@@ -315,7 +385,7 @@ class Shard(Shard_):
315
385
  for in_ele in layout:
316
386
  if not isinstance(in_ele, Layout):
317
387
  raise TypeError(f"The {log_info} item should be a object of class Layout.")
318
- layout_value += (in_ele.to_dict(),)
388
+ layout_value += ({k: v for k, v in in_ele.to_dict().items() if k != "rank_list"},)
319
389
  return layout_value
320
390
 
321
391
  def _check_tuple_strategy(self, dim_strategy):
@@ -326,8 +396,8 @@ class Shard(Shard_):
326
396
 
327
397
  def shard(fn, in_strategy, out_strategy=None, parameter_plan=None, device="Ascend", level=0):
328
398
  """
329
- Defining the input and output layouts of this cell and the parallel strategies of remaining ops will be
330
- generated by sharding propagation. In PyNative mode, use this method to specify a Cell for distributed
399
+ Specify the input and output slicing strategy for a Cell or function.
400
+ In PyNative mode, use this method to specify a Cell for distributed
331
401
  execution in graph mode. In Graph mode, use this method to specify distribution strategy for a Cell,
332
402
  strategy for others will be set by sharding propagation.
333
403
  in_strategy and out_strategy define the input and output layout respectively.
@@ -337,33 +407,37 @@ def shard(fn, in_strategy, out_strategy=None, parameter_plan=None, device="Ascen
337
407
  The parallel strategies of remaining operators are derived from the strategy specified by the input and output.
338
408
 
339
409
  Note:
340
- If ms.shard is called, the parallel mode in `set_auto_parallel_context` (parallel_mode) will be set to
341
- "auto_parallel" and the search mode (search_mode) to "sharding_propagation".
342
- If the input contain Parameter, its strategy should be set in `in_strategy`.
410
+ - It is valid only in semi auto parallel or auto parallel mode.
411
+ In other parallel modes, strategies set here will be ignored.
412
+ - If the input contain Parameter, its strategy should be set in `in_strategy`.
413
+ - This method currently does not support dynamic shapes.
343
414
 
344
415
  Args:
345
416
  fn (Union[Cell, Function]): Function to be executed in parallel.
346
- Its arguments and return value must be Tensor or Parameter.
417
+ Its arguments and return value must be Tensor.
347
418
  If `fn` is a Cell with parameters, `fn` needs to be an instantiated object,
348
419
  otherwise its arguments cannot be accessed.
349
420
  in_strategy (tuple): Define the layout of inputs, each element of the tuple should be a tuple(int) or
350
- tuple(mindspore.Layout).
421
+ tuple(mindspore.parallel.Layout).
351
422
  Tuple defines the layout of the corresponding input.
352
- out_strategy (Union[tuple, None]): Define the layout of outputs similar with `in_strategy`.
353
- It is not in use right now. Default: ``None`` .
354
- parameter_plan (Union[dict, None]): Define the layout for the specified parameters. Each element in dict
423
+ out_strategy (Union[tuple, None], optional): Define the layout of outputs similar with `in_strategy`.
424
+ Default: ``None`` .
425
+ parameter_plan (Union[dict, None], optional): Define the layout for the specified parameters.
426
+ Each element in dict
355
427
  defines the layout of the parameter like "param_name: layout".
356
428
  The key is a parameter name of type 'str'.
357
- The value is a 1-D integer tuple or a 1-D mindspore.Layout tuple,
429
+ The value is a 1-D integer tuple or a 1-D mindspore.parallel.Layout tuple,
358
430
  indicating the corresponding layout.
359
431
  If the parameter name is incorrect or the corresponding parameter
360
- has been set, the parameter setting will be ignored.
432
+ has been set, the parameter setting will be ignored. Supported
433
+ only when `fn` is a Cell with parameters.
361
434
  Default: ``None`` .
362
- device (string): Select a certain `device` target. It is not in use right now.
363
- Support ["CPU", "GPU", "Ascend"]. Default: ``"Ascend"`` .
364
- level (int): Option for parallel strategy infer algorithm, namely the object function, maximize computation
365
- over communication ratio, maximize speed performance, minimize memory usage etc. It is not in
366
- use right now. Support [0, 1, 2]. Default: ``0`` .
435
+ device (str, optional): Select a certain `device` target. It is not in use right now.
436
+ Support ["CPU", "GPU", "Ascend"]. Default: ``"Ascend"`` .
437
+ level (int, optional): Option for parallel strategy infer algorithm, namely the object function,
438
+ maximize computation
439
+ over communication ratio, maximize speed performance, minimize memory usage etc. It is not in
440
+ use right now. Support [0, 1, 2]. Default: ``0`` .
367
441
 
368
442
  Returns:
369
443
  Function, return the function that will be executed under auto parallel process.
@@ -373,26 +447,28 @@ def shard(fn, in_strategy, out_strategy=None, parameter_plan=None, device="Ascen
373
447
  AssertionError: If device_target it not "Ascend" or "GPU".
374
448
  TypeError: If `in_strategy` is not a tuple.
375
449
  TypeError: If `out_strategy` is not a tuple or None.
376
- TypeError: If any element in `in_strategy` is not a tuple(int) or tuple(mindspore.Layout).
377
- TypeError: If any element in `out_strategy` is not a tuple(int) or tuple(mindspore.Layout).
450
+ TypeError: If any element in `in_strategy` is not a tuple(int) or tuple(mindspore.parallel.Layout).
451
+ TypeError: If any element in `out_strategy` is not a tuple(int) or tuple(mindspore.parallel.Layout).
378
452
  TypeError: If `parameter_plan` is not a dict or None.
379
453
  TypeError: If any key in `parameter_plan` is not a str.
380
- TypeError: If any value in `parameter_plan` is not a tuple(int) or a tuple(mindspore.Layout).
454
+ TypeError: If any value in `parameter_plan` is not a tuple(int) or a tuple(mindspore.parallel.Layout).
381
455
  TypeError: If `device` is not a str.
382
456
  TypeError: If `level` is not an integer.
383
457
 
384
458
  Supported Platforms:
385
- ``Ascend`` ``GPU``
459
+ ``Ascend``
386
460
 
387
461
  Examples:
388
462
  >>> import numpy as np
389
463
  >>> import mindspore as ms
390
- >>> from mindspore import Tensor, nn
464
+ >>> from mindspore import Tensor, nn, ops
391
465
  >>> from mindspore.communication import init
466
+ >>> from mindspore.parallel import shard
467
+ >>> from mindspore.parallel import Layout
468
+ >>> from mindspore.nn.utils import no_init_parameters
469
+ >>> from mindspore.parallel.auto_parallel import AutoParallel
392
470
  >>> ms.set_context(mode=ms.GRAPH_MODE)
393
471
  >>> init()
394
- >>> ms.set_auto_parallel_context(parallel_mode="auto_parallel", search_mode="sharding_propagation",
395
- ... device_num=8)
396
472
  >>>
397
473
  >>> # Case 1: cell uses functional
398
474
  >>> class BasicBlock(nn.Cell):
@@ -404,7 +480,7 @@ def shard(fn, in_strategy, out_strategy=None, parameter_plan=None, device="Ascen
404
480
  >>> x = ops.abs(x)
405
481
  >>> return x + y
406
482
  >>> # shard a function with tuple(int) strategies
407
- >>> self.shard_my_add = ms.shard(my_add, in_strategy=((2, 2), (1, 4)), out_strategy=((4, 1),))
483
+ >>> self.shard_my_add = shard(my_add, in_strategy=((2, 2), (1, 4)), out_strategy=((4, 1),))
408
484
  >>>
409
485
  >>> def construct(self, x, u):
410
486
  >>> x = self.gelu(x)
@@ -432,7 +508,7 @@ def shard(fn, in_strategy, out_strategy=None, parameter_plan=None, device="Ascen
432
508
  >>> super(Net, self).__init__()
433
509
  >>> # setting cell sharding strategy and parameter_plan by tuple(int)
434
510
  >>> self.layer_net1 = NetForward()
435
- >>> self.layer_net1_shard = ms.shard(self.layer_net1, in_strategy=((4, 2), (2, 1)),
511
+ >>> self.layer_net1_shard = shard(self.layer_net1, in_strategy=((4, 2), (2, 1)),
436
512
  ... parameter_plan={"self.layer_net1.block1.weight": (4, 1)})
437
513
  >>>
438
514
  >>> # setting cell sharding strategy and parameter_plan by tuple(ms.Layout)
@@ -440,7 +516,7 @@ def shard(fn, in_strategy, out_strategy=None, parameter_plan=None, device="Ascen
440
516
  >>> layout = Layout((4, 2, 1), ("dp", "mp", "sp"))
441
517
  >>> in_layout = (layout("dp", "mp"), layout("mp", "sp"))
442
518
  >>> param_layout = layout("dp", "sp")
443
- >>> self.layer_net2_shard = ms.shard(self.layer_net2, in_strategy=in_layout,
519
+ >>> self.layer_net2_shard = shard(self.layer_net2, in_strategy=in_layout,
444
520
  ... parameter_plan={"self.layer_net2.block2.weight": param_layout})
445
521
  >>> self.flatten = nn.Flatten()
446
522
  >>> self.layer1 = nn.Dense(64, 64)
@@ -458,26 +534,25 @@ def shard(fn, in_strategy, out_strategy=None, parameter_plan=None, device="Ascen
458
534
  >>> x = self.matmul(x, Tensor(np.ones(shape=(32, 32)), dtype=ms.float32))
459
535
  >>> return x
460
536
  >>>
461
- >>> net = Net()
537
+ >>> with no_init_parameters():
538
+ >>> net = Net()
462
539
  >>> x = Tensor(np.ones(shape=(64, 1, 8, 8)), dtype=ms.float32)
463
540
  >>> y = Tensor(np.ones(shape=(64, 1, 8, 8)), dtype=ms.float32)
464
- >>> net(x, y)
541
+ >>> parallel_net = AutoParallel(net, parallel_mode='sharding_propagation')
542
+ >>> parallel_net(x, y)
465
543
  >>>
466
544
  >>> # Case 2: function uses functional sharding
467
545
  >>> def test_shard(x, y):
468
546
  ... return x + y
469
547
  >>> x = Tensor(np.ones(shape=(32, 10)), dtype=ms.float32)
470
548
  >>> y = Tensor(np.ones(shape=(32, 10)), dtype=ms.float32)
471
- >>> output = ms.shard(test_shard, in_strategy=((4, 2), (4, 2)))(x, y)
549
+ >>> output = shard(test_shard, in_strategy=((4, 2), (4, 2)))(x, y)
472
550
  >>> print(output.shape)
473
551
  (32, 10)
474
552
 
475
- Tutorial Examples:
476
- - `Functional Operator Sharding
477
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/shard_function_parallel.html>`_
478
- - `mindspore.Layout
479
- <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.Layout.html>`_
480
553
  """
554
+ if ms.communication.management.get_group_size() == 1:
555
+ return fn
481
556
  if not isinstance(fn, (ms.nn.Cell)):
482
557
  logger.warning("'fn' is not a mindspore.nn.Cell, and its definition cannot involve Parameter; "
483
558
  "otherwise, the result may be incorrect.")