mindspore 2.5.0__cp39-cp39-win_amd64.whl → 2.6.0rc1__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (491) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +6 -4
  5. mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
  8. mindspore/_check_jit_forbidden_api.py +3 -0
  9. mindspore/_checkparam.py +3 -33
  10. mindspore/_deprecated/__init__.py +17 -0
  11. mindspore/_deprecated/jit.py +198 -0
  12. mindspore/_extends/builtin_operations.py +1 -1
  13. mindspore/_extends/parse/__init__.py +6 -7
  14. mindspore/_extends/parse/compile_config.py +19 -0
  15. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
  16. mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
  17. mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
  18. mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
  19. mindspore/_extends/parse/parser.py +24 -193
  20. mindspore/_extends/parse/resources.py +1 -5
  21. mindspore/_extends/parse/standard_method.py +97 -74
  22. mindspore/_extends/pijit/__init__.py +2 -2
  23. mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
  24. mindspore/_extends/pijit/tensor_func_list.py +27 -0
  25. mindspore/_extends/utils.py +1 -1
  26. mindspore/amp.py +4 -4
  27. mindspore/atlprov.dll +0 -0
  28. mindspore/avcodec-59.dll +0 -0
  29. mindspore/avdevice-59.dll +0 -0
  30. mindspore/avfilter-8.dll +0 -0
  31. mindspore/avformat-59.dll +0 -0
  32. mindspore/avutil-57.dll +0 -0
  33. mindspore/boost/__init__.py +2 -2
  34. mindspore/boost/base.py +3 -7
  35. mindspore/boost/boost_cell_wrapper.py +2 -2
  36. mindspore/c1.dll +0 -0
  37. mindspore/c1xx.dll +0 -0
  38. mindspore/c2.dll +0 -0
  39. mindspore/common/__init__.py +4 -3
  40. mindspore/common/_grad_function.py +56 -0
  41. mindspore/common/_pijit_context.py +14 -5
  42. mindspore/common/_register_for_tensor.py +1 -1
  43. mindspore/common/_stub_tensor.py +5 -10
  44. mindspore/common/_tensor_cpp_method.py +1 -1
  45. mindspore/common/_tensor_docs.py +1915 -3287
  46. mindspore/common/api.py +341 -354
  47. mindspore/common/auto_dynamic_shape.py +41 -44
  48. mindspore/common/dtype.py +5 -2
  49. mindspore/common/dump.py +7 -5
  50. mindspore/common/file_system.py +3 -0
  51. mindspore/common/hook_handle.py +5 -3
  52. mindspore/common/initializer.py +10 -6
  53. mindspore/common/jit_begin_end.py +94 -0
  54. mindspore/common/jit_config.py +6 -1
  55. mindspore/common/jit_context.py +76 -0
  56. mindspore/common/jit_trace.py +378 -0
  57. mindspore/common/lazy_inline.py +2 -2
  58. mindspore/common/mutable.py +5 -4
  59. mindspore/common/parameter.py +106 -39
  60. mindspore/common/seed.py +2 -2
  61. mindspore/common/sparse_tensor.py +23 -17
  62. mindspore/common/tensor.py +297 -714
  63. mindspore/communication/__init__.py +7 -5
  64. mindspore/communication/_comm_helper.py +47 -2
  65. mindspore/communication/comm_func.py +70 -53
  66. mindspore/communication/management.py +83 -17
  67. mindspore/context.py +214 -560
  68. mindspore/dataset/__init__.py +44 -20
  69. mindspore/dataset/audio/__init__.py +2 -8
  70. mindspore/dataset/audio/transforms.py +3 -17
  71. mindspore/dataset/core/config.py +3 -3
  72. mindspore/dataset/engine/cache_client.py +1 -1
  73. mindspore/dataset/engine/datasets.py +102 -120
  74. mindspore/dataset/engine/datasets_audio.py +22 -22
  75. mindspore/dataset/engine/datasets_standard_format.py +43 -24
  76. mindspore/dataset/engine/datasets_text.py +78 -85
  77. mindspore/dataset/engine/datasets_user_defined.py +108 -76
  78. mindspore/dataset/engine/datasets_vision.py +111 -108
  79. mindspore/dataset/engine/iterators.py +5 -3
  80. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
  81. mindspore/dataset/engine/samplers.py +279 -57
  82. mindspore/dataset/engine/serializer_deserializer.py +2 -1
  83. mindspore/dataset/engine/validators.py +10 -0
  84. mindspore/dataset/text/__init__.py +7 -6
  85. mindspore/dataset/text/transforms.py +6 -5
  86. mindspore/dataset/text/utils.py +3 -3
  87. mindspore/dataset/transforms/__init__.py +0 -9
  88. mindspore/dataset/transforms/transforms.py +3 -3
  89. mindspore/dataset/utils/browse_dataset.py +1 -1
  90. mindspore/dataset/vision/__init__.py +2 -9
  91. mindspore/dataset/vision/transforms.py +202 -158
  92. mindspore/dataset/vision/utils.py +7 -5
  93. mindspore/device_context/ascend/op_debug.py +60 -1
  94. mindspore/device_context/ascend/op_tuning.py +0 -4
  95. mindspore/device_manager.py +39 -3
  96. mindspore/dnnl.dll +0 -0
  97. mindspore/dpcmi.dll +0 -0
  98. mindspore/experimental/es/embedding_service.py +35 -27
  99. mindspore/experimental/map_parameter.py +4 -4
  100. mindspore/experimental/optim/adadelta.py +22 -26
  101. mindspore/experimental/optim/adagrad.py +4 -4
  102. mindspore/experimental/optim/adam.py +4 -0
  103. mindspore/experimental/optim/adamax.py +4 -4
  104. mindspore/experimental/optim/adamw.py +4 -0
  105. mindspore/experimental/optim/asgd.py +1 -1
  106. mindspore/experimental/optim/lr_scheduler.py +40 -22
  107. mindspore/experimental/optim/radam.py +5 -5
  108. mindspore/experimental/optim/rprop.py +1 -1
  109. mindspore/experimental/optim/sgd.py +1 -1
  110. mindspore/hal/contiguous_tensors_handle.py +6 -10
  111. mindspore/hal/device.py +55 -81
  112. mindspore/hal/event.py +38 -55
  113. mindspore/hal/memory.py +93 -144
  114. mindspore/hal/stream.py +81 -125
  115. mindspore/include/dataset/constants.h +7 -4
  116. mindspore/include/dataset/execute.h +2 -2
  117. mindspore/jpeg62.dll +0 -0
  118. mindspore/log.py +40 -2
  119. mindspore/mindrecord/__init__.py +20 -7
  120. mindspore/mindspore_backend_common.dll +0 -0
  121. mindspore/mindspore_backend_manager.dll +0 -0
  122. mindspore/mindspore_common.dll +0 -0
  123. mindspore/mindspore_core.dll +0 -0
  124. mindspore/mindspore_dump.dll +0 -0
  125. mindspore/mindspore_frontend.dll +0 -0
  126. mindspore/mindspore_glog.dll +0 -0
  127. mindspore/mindspore_memory_pool.dll +0 -0
  128. mindspore/mindspore_ms_backend.dll +0 -0
  129. mindspore/mindspore_ops.dll +0 -0
  130. mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
  131. mindspore/mindspore_ops_kernel_common.dll +0 -0
  132. mindspore/mindspore_profiler.dll +0 -0
  133. mindspore/mindspore_pyboost.dll +0 -0
  134. mindspore/mindspore_pynative.dll +0 -0
  135. mindspore/mindspore_res_manager.dll +0 -0
  136. mindspore/mindspore_runtime_pipeline.dll +0 -0
  137. mindspore/mint/__init__.py +131 -700
  138. mindspore/mint/distributed/__init__.py +5 -1
  139. mindspore/mint/distributed/distributed.py +194 -109
  140. mindspore/mint/linalg/__init__.py +2 -0
  141. mindspore/mint/nn/__init__.py +280 -18
  142. mindspore/mint/nn/functional.py +282 -64
  143. mindspore/mint/nn/layer/__init__.py +4 -0
  144. mindspore/mint/nn/layer/_functions.py +7 -3
  145. mindspore/mint/nn/layer/activation.py +120 -13
  146. mindspore/mint/nn/layer/conv.py +218 -24
  147. mindspore/mint/nn/layer/normalization.py +15 -16
  148. mindspore/mint/nn/layer/padding.py +1 -1
  149. mindspore/mint/nn/layer/pooling.py +66 -1
  150. mindspore/mint/optim/__init__.py +2 -1
  151. mindspore/mint/optim/sgd.py +171 -0
  152. mindspore/msobj140.dll +0 -0
  153. mindspore/mspdb140.dll +0 -0
  154. mindspore/mspdbcore.dll +0 -0
  155. mindspore/mspdbst.dll +0 -0
  156. mindspore/mspft140.dll +0 -0
  157. mindspore/msvcdis140.dll +0 -0
  158. mindspore/msvcp140_1.dll +0 -0
  159. mindspore/msvcp140_2.dll +0 -0
  160. mindspore/msvcp140_atomic_wait.dll +0 -0
  161. mindspore/msvcp140_codecvt_ids.dll +0 -0
  162. mindspore/nn/__init__.py +4 -1
  163. mindspore/nn/cell.py +1250 -176
  164. mindspore/nn/layer/activation.py +23 -21
  165. mindspore/nn/layer/basic.py +22 -16
  166. mindspore/nn/layer/container.py +1 -1
  167. mindspore/nn/layer/conv.py +22 -17
  168. mindspore/nn/layer/embedding.py +9 -8
  169. mindspore/nn/layer/normalization.py +48 -42
  170. mindspore/nn/layer/pooling.py +75 -31
  171. mindspore/nn/layer/transformer.py +11 -10
  172. mindspore/nn/learning_rate_schedule.py +4 -2
  173. mindspore/nn/loss/loss.py +27 -19
  174. mindspore/nn/optim/ada_grad.py +6 -5
  175. mindspore/nn/optim/adadelta.py +9 -7
  176. mindspore/nn/optim/adafactor.py +1 -1
  177. mindspore/nn/optim/adam.py +16 -12
  178. mindspore/nn/optim/adamax.py +8 -7
  179. mindspore/nn/optim/adasum.py +5 -5
  180. mindspore/nn/optim/asgd.py +1 -1
  181. mindspore/nn/optim/ftrl.py +11 -9
  182. mindspore/nn/optim/lamb.py +1 -1
  183. mindspore/nn/optim/lazyadam.py +12 -10
  184. mindspore/nn/optim/momentum.py +7 -6
  185. mindspore/nn/optim/optimizer.py +2 -2
  186. mindspore/nn/optim/proximal_ada_grad.py +12 -10
  187. mindspore/nn/optim/rmsprop.py +13 -12
  188. mindspore/nn/optim/rprop.py +9 -7
  189. mindspore/nn/optim/sgd.py +9 -6
  190. mindspore/nn/optim/tft_wrapper.py +5 -2
  191. mindspore/nn/probability/bijector/bijector.py +17 -11
  192. mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
  193. mindspore/nn/probability/bijector/invert.py +2 -2
  194. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  195. mindspore/nn/probability/bijector/softplus.py +3 -2
  196. mindspore/nn/probability/distribution/beta.py +3 -3
  197. mindspore/nn/probability/distribution/categorical.py +1 -1
  198. mindspore/nn/probability/distribution/cauchy.py +4 -2
  199. mindspore/nn/probability/distribution/exponential.py +6 -7
  200. mindspore/nn/probability/distribution/gamma.py +2 -2
  201. mindspore/nn/probability/distribution/gumbel.py +2 -2
  202. mindspore/nn/probability/distribution/half_normal.py +5 -3
  203. mindspore/nn/probability/distribution/logistic.py +5 -3
  204. mindspore/nn/probability/distribution/poisson.py +1 -1
  205. mindspore/nn/probability/distribution/uniform.py +5 -3
  206. mindspore/nn/reinforcement/_tensors_queue.py +1 -1
  207. mindspore/nn/reinforcement/tensor_array.py +1 -1
  208. mindspore/nn/wrap/__init__.py +6 -6
  209. mindspore/nn/wrap/cell_wrapper.py +178 -117
  210. mindspore/nn/wrap/grad_reducer.py +45 -36
  211. mindspore/nn/wrap/loss_scale.py +3 -3
  212. mindspore/numpy/array_creations.py +3 -3
  213. mindspore/numpy/array_ops.py +1 -1
  214. mindspore/numpy/math_ops.py +4 -4
  215. mindspore/numpy/utils.py +1 -2
  216. mindspore/numpy/utils_const.py +1 -2
  217. mindspore/opencv_core452.dll +0 -0
  218. mindspore/opencv_imgcodecs452.dll +0 -0
  219. mindspore/opencv_imgproc452.dll +0 -0
  220. mindspore/ops/__init__.py +3 -2
  221. mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
  222. mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
  223. mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
  224. mindspore/ops/_register_for_op.py +0 -11
  225. mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
  226. mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
  227. mindspore/ops/_vmap/vmap_array_ops.py +7 -6
  228. mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
  229. mindspore/ops/_vmap/vmap_math_ops.py +4 -7
  230. mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
  231. mindspore/ops/auto_generate/__init__.py +4 -3
  232. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +102 -49
  233. mindspore/ops/auto_generate/gen_extend_func.py +281 -135
  234. mindspore/ops/auto_generate/gen_ops_def.py +2574 -2326
  235. mindspore/ops/auto_generate/gen_ops_prim.py +8566 -2755
  236. mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
  237. mindspore/ops/composite/__init__.py +2 -1
  238. mindspore/ops/composite/base.py +19 -24
  239. mindspore/ops/composite/math_ops.py +6 -16
  240. mindspore/ops/composite/multitype_ops/__init__.py +5 -2
  241. mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -3
  242. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
  243. mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
  244. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  245. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  246. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
  247. mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
  248. mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
  249. mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
  250. mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
  251. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
  252. mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
  253. mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
  254. mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
  255. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
  256. mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
  257. mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
  258. mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
  259. mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
  260. mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
  261. mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
  262. mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
  263. mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
  264. mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
  265. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
  266. mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
  267. mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
  268. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
  269. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  270. mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
  271. mindspore/ops/function/__init__.py +28 -2
  272. mindspore/ops/function/_add_attr_func.py +58 -0
  273. mindspore/ops/function/array_func.py +1629 -2345
  274. mindspore/ops/function/clip_func.py +38 -45
  275. mindspore/ops/function/debug_func.py +36 -44
  276. mindspore/ops/function/grad/__init__.py +1 -0
  277. mindspore/ops/function/grad/grad_func.py +104 -71
  278. mindspore/ops/function/image_func.py +1 -1
  279. mindspore/ops/function/linalg_func.py +46 -78
  280. mindspore/ops/function/math_func.py +3035 -3705
  281. mindspore/ops/function/nn_func.py +676 -241
  282. mindspore/ops/function/other_func.py +159 -1
  283. mindspore/ops/function/parameter_func.py +17 -30
  284. mindspore/ops/function/random_func.py +204 -361
  285. mindspore/ops/function/reshard_func.py +4 -70
  286. mindspore/ops/function/sparse_func.py +3 -3
  287. mindspore/ops/function/sparse_unary_func.py +5 -5
  288. mindspore/ops/function/spectral_func.py +25 -58
  289. mindspore/ops/function/vmap_func.py +24 -17
  290. mindspore/ops/functional.py +6 -4
  291. mindspore/ops/functional_overload.py +547 -4
  292. mindspore/ops/op_info_register.py +32 -244
  293. mindspore/ops/operations/__init__.py +10 -5
  294. mindspore/ops/operations/_custom_ops_utils.py +247 -0
  295. mindspore/ops/operations/_grad_ops.py +1 -10
  296. mindspore/ops/operations/_inner_ops.py +5 -76
  297. mindspore/ops/operations/_ms_kernel.py +4 -10
  298. mindspore/ops/operations/_rl_inner_ops.py +1 -1
  299. mindspore/ops/operations/_scalar_ops.py +3 -2
  300. mindspore/ops/operations/_sequence_ops.py +1 -1
  301. mindspore/ops/operations/_tensor_array.py +1 -1
  302. mindspore/ops/operations/array_ops.py +37 -22
  303. mindspore/ops/operations/comm_ops.py +150 -107
  304. mindspore/ops/operations/custom_ops.py +221 -23
  305. mindspore/ops/operations/debug_ops.py +115 -16
  306. mindspore/ops/operations/inner_ops.py +1 -1
  307. mindspore/ops/operations/linalg_ops.py +1 -58
  308. mindspore/ops/operations/manually_defined/_inner.py +1 -1
  309. mindspore/ops/operations/manually_defined/ops_def.py +746 -79
  310. mindspore/ops/operations/math_ops.py +21 -18
  311. mindspore/ops/operations/nn_ops.py +65 -191
  312. mindspore/ops/operations/other_ops.py +62 -9
  313. mindspore/ops/operations/random_ops.py +13 -7
  314. mindspore/ops/operations/reshard_ops.py +1 -1
  315. mindspore/ops/operations/sparse_ops.py +2 -2
  316. mindspore/ops/primitive.py +43 -32
  317. mindspore/ops/tensor_method.py +232 -13
  318. mindspore/ops_generate/__init__.py +0 -5
  319. mindspore/ops_generate/aclnn/__init__.py +0 -0
  320. mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
  321. mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
  322. mindspore/ops_generate/api/__init__.py +0 -0
  323. mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
  324. mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
  325. mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
  326. mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
  327. mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
  328. mindspore/ops_generate/api/gen_api.py +103 -0
  329. mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
  330. mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
  331. mindspore/ops_generate/common/__init__.py +0 -0
  332. mindspore/ops_generate/common/gen_constants.py +91 -0
  333. mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
  334. mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
  335. mindspore/ops_generate/{template.py → common/template.py} +96 -84
  336. mindspore/ops_generate/gen_ops.py +23 -325
  337. mindspore/ops_generate/op_def/__init__.py +0 -0
  338. mindspore/ops_generate/op_def/gen_op_def.py +90 -0
  339. mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
  340. mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -7
  341. mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
  342. mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
  343. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
  344. mindspore/ops_generate/op_def_py/__init__.py +0 -0
  345. mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
  346. mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
  347. mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
  348. mindspore/ops_generate/pyboost/__init__.py +0 -0
  349. mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
  350. mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
  351. mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
  352. mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
  353. mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
  354. mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
  355. mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
  356. mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
  357. mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
  358. mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
  359. mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
  360. mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
  361. mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
  362. mindspore/ops_generate/resources/__init__.py +0 -0
  363. mindspore/ops_generate/resources/resource_list.py +30 -0
  364. mindspore/ops_generate/resources/resource_loader.py +36 -0
  365. mindspore/ops_generate/resources/resource_manager.py +64 -0
  366. mindspore/ops_generate/resources/yaml_loader.py +88 -0
  367. mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
  368. mindspore/parallel/__init__.py +6 -2
  369. mindspore/parallel/_auto_parallel_context.py +133 -6
  370. mindspore/parallel/_cell_wrapper.py +130 -15
  371. mindspore/parallel/_parallel_serialization.py +95 -4
  372. mindspore/parallel/_ps_context.py +1 -1
  373. mindspore/parallel/_recovery_context.py +7 -2
  374. mindspore/parallel/_tensor.py +142 -18
  375. mindspore/parallel/_utils.py +198 -25
  376. mindspore/parallel/algo_parameter_config.py +3 -3
  377. mindspore/parallel/auto_parallel.py +732 -0
  378. mindspore/parallel/checkpoint_convert.py +159 -0
  379. mindspore/parallel/checkpoint_transform.py +656 -37
  380. mindspore/parallel/cluster/process_entity/_api.py +151 -19
  381. mindspore/parallel/cluster/run.py +1 -1
  382. mindspore/parallel/function/__init__.py +24 -0
  383. mindspore/parallel/function/reshard_func.py +259 -0
  384. mindspore/parallel/nn/__init__.py +25 -0
  385. mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
  386. mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
  387. mindspore/parallel/parameter_broadcast.py +24 -13
  388. mindspore/parallel/shard.py +137 -61
  389. mindspore/parallel/transform_safetensors.py +287 -95
  390. mindspore/pgodb140.dll +0 -0
  391. mindspore/pgort140.dll +0 -0
  392. mindspore/profiler/__init__.py +9 -5
  393. mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
  394. mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
  395. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
  396. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +22 -0
  397. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  398. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
  399. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
  400. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
  401. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
  402. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
  403. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
  404. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
  405. mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
  406. mindspore/profiler/common/constant.py +12 -0
  407. mindspore/profiler/common/msprof_cmd_tool.py +42 -23
  408. mindspore/profiler/common/path_manager.py +24 -0
  409. mindspore/profiler/common/profiler_context.py +26 -2
  410. mindspore/profiler/common/profiler_meta_data.py +74 -0
  411. mindspore/profiler/common/profiler_parameters.py +59 -18
  412. mindspore/profiler/common/profiler_path_manager.py +66 -7
  413. mindspore/profiler/dynamic_profiler.py +112 -79
  414. mindspore/profiler/envprofiler.py +26 -1
  415. mindspore/profiler/experimental_config.py +197 -0
  416. mindspore/profiler/mstx.py +57 -14
  417. mindspore/profiler/platform/npu_profiler.py +33 -7
  418. mindspore/profiler/profiler.py +541 -45
  419. mindspore/profiler/profiler_action_controller.py +1 -1
  420. mindspore/profiler/profiler_interface.py +4 -0
  421. mindspore/profiler/schedule.py +57 -22
  422. mindspore/rewrite/api/node.py +15 -13
  423. mindspore/rewrite/api/symbol_tree.py +1 -1
  424. mindspore/run_check/_check_version.py +25 -14
  425. mindspore/run_check/run_check.py +1 -1
  426. mindspore/runtime/__init__.py +2 -2
  427. mindspore/runtime/executor.py +40 -11
  428. mindspore/runtime/memory.py +25 -8
  429. mindspore/safeguard/rewrite_obfuscation.py +12 -9
  430. mindspore/swresample-4.dll +0 -0
  431. mindspore/swscale-6.dll +0 -0
  432. mindspore/tbbmalloc.dll +0 -0
  433. mindspore/tinyxml2.dll +0 -0
  434. mindspore/train/__init__.py +8 -8
  435. mindspore/train/_utils.py +35 -7
  436. mindspore/train/amp.py +1 -1
  437. mindspore/train/callback/__init__.py +2 -2
  438. mindspore/train/callback/_callback.py +2 -16
  439. mindspore/train/callback/_checkpoint.py +24 -40
  440. mindspore/train/callback/_cluster_monitor.py +14 -18
  441. mindspore/train/callback/_flops_collector.py +2 -3
  442. mindspore/train/callback/_history.py +7 -4
  443. mindspore/train/callback/_lambda_callback.py +2 -2
  444. mindspore/train/callback/_landscape.py +0 -3
  445. mindspore/train/callback/_loss_monitor.py +2 -1
  446. mindspore/train/callback/_on_request_exit.py +6 -5
  447. mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
  448. mindspore/train/callback/_summary_collector.py +8 -13
  449. mindspore/train/callback/_time_monitor.py +2 -1
  450. mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +179 -103
  451. mindspore/train/data_sink.py +25 -2
  452. mindspore/train/dataset_helper.py +4 -5
  453. mindspore/train/loss_scale_manager.py +8 -7
  454. mindspore/train/metrics/accuracy.py +3 -3
  455. mindspore/train/metrics/confusion_matrix.py +9 -9
  456. mindspore/train/metrics/error.py +3 -3
  457. mindspore/train/metrics/hausdorff_distance.py +4 -4
  458. mindspore/train/metrics/mean_surface_distance.py +3 -3
  459. mindspore/train/metrics/metric.py +0 -12
  460. mindspore/train/metrics/occlusion_sensitivity.py +4 -2
  461. mindspore/train/metrics/precision.py +8 -6
  462. mindspore/train/metrics/recall.py +9 -9
  463. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  464. mindspore/train/mind_ir_pb2.py +19 -12
  465. mindspore/train/model.py +176 -103
  466. mindspore/train/serialization.py +246 -988
  467. mindspore/train/summary/_summary_adapter.py +2 -2
  468. mindspore/train/summary/summary_record.py +1 -1
  469. mindspore/turbojpeg.dll +0 -0
  470. mindspore/utils/__init__.py +3 -2
  471. mindspore/utils/dryrun.py +4 -2
  472. mindspore/utils/hooks.py +81 -0
  473. mindspore/utils/utils.py +138 -4
  474. mindspore/vcmeta.dll +0 -0
  475. mindspore/vcruntime140.dll +0 -0
  476. mindspore/vcruntime140_1.dll +0 -0
  477. mindspore/version.py +1 -1
  478. {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/METADATA +2 -1
  479. {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/RECORD +483 -438
  480. mindspore/_install_custom.py +0 -43
  481. mindspore/common/_register_for_adapter.py +0 -74
  482. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
  483. mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
  484. mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
  485. mindspore/ops_generate/gen_constants.py +0 -190
  486. mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
  487. mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
  488. /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
  489. {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/WHEEL +0 -0
  490. {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/entry_points.txt +0 -0
  491. {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/top_level.txt +0 -0
@@ -15,17 +15,77 @@
15
15
  """shard"""
16
16
 
17
17
  import copy
18
+ import numpy as np
18
19
  import mindspore as ms
19
20
  from mindspore import log as logger
20
21
  from mindspore._c_expression import Shard_
21
22
 
22
23
 
23
- class Layout:
24
+ class _DistributedTensorInfo:
25
+ """
26
+ Describe the distributed information of a tensor.
27
+
28
+ Args:
29
+ distributed_info (Union[Layout, DeviceMesh]): The distributed information of a tensor.
30
+
31
+ Raises:
32
+ TypeError: If `distributed_info` is not a Layout type.
33
+
34
+ Examples:
35
+ >>> from mindspore import _DistributedTensorInfo, Layout
36
+ >>> layout = Layout((2, 2), ("dp", "mp"))
37
+ >>> src_layout = layout("dp", "mp")
38
+ >>> distributed_info = _DistributedTensorInfo(src_layout)
39
+ >>> print(distributed_info.sharding_strategy)
40
+ [2, 2]
24
41
  """
25
- Parallel layout describes the detailed sharding information.
26
42
 
27
- For more detailed information, refer to the file `Higher-order Operator-level Parallelism
28
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/advanced_operator_parallel.html>`_.
43
+ def __init__(self, distributed_info):
44
+ if isinstance(distributed_info, Layout):
45
+ self._layout = distributed_info
46
+ self._distributed_info = distributed_info
47
+ else:
48
+ raise TypeError(
49
+ f"DistributedTensorInfo only supports Layout or DeviceMesh as input, but got {type(distributed_info)}")
50
+ self._sharding_strategy = None
51
+
52
+ @property
53
+ def layout(self):
54
+ """return layout of current tensor"""
55
+ return self._layout
56
+
57
+ @property
58
+ def distributed_info(self):
59
+ """return the distributed info, it depends on user's input """
60
+ return self._distributed_info
61
+
62
+ @property
63
+ def sharding_strategy(self):
64
+ """return the sharding strategy of current tensor"""
65
+ if self._sharding_strategy is None:
66
+ layout_info = self._layout.to_dict()
67
+ device_matrix = layout_info["device_matrix"]
68
+ tensor_map = layout_info["tensor_map"]
69
+ sharding_strategy = []
70
+ for map_value in tensor_map:
71
+ if isinstance(map_value, (tuple, list)):
72
+ shard_size = 1
73
+ for value in map_value:
74
+ if value != -1:
75
+ shard_size *= device_matrix[len(device_matrix) - value - 1]
76
+ sharding_strategy.append(shard_size)
77
+ else:
78
+ if map_value != -1:
79
+ sharding_strategy.append(device_matrix[len(device_matrix) - map_value - 1])
80
+ else:
81
+ sharding_strategy.append(1)
82
+ self._sharding_strategy = sharding_strategy
83
+ return self._sharding_strategy
84
+
85
+
86
+ class Layout:
87
+ """
88
+ Topological abstraction describing cluster devices for tensor slice placement on the cluster.
29
89
 
30
90
  Note:
31
91
  - It is valid only in semi auto parallel or auto parallel mode.
@@ -38,28 +98,36 @@ class Layout:
38
98
  alias_name (tuple): The alias name for each axis of device_matrix, its length shoits element type is string.
39
99
  When using "interleaved_parallel" as an alias name, the tensor would be split into multiple
40
100
  copies on the corresponding partition dimension on a single card.
101
+ rank_list (list, optional): Data is allocated to the device according to rank_list. Default: ``None``.
102
+
41
103
  Raises:
42
104
  TypeError: `device_matrix` is not a tuple type.
43
105
  TypeError: `alias_name` is not a tuple type.
106
+ TypeError: 'rank_list' is not a list type.
44
107
  ValueError: `device_matrix` length is not equal to `alias_name` length.
45
108
  TypeError: The element of `device_matrix` is not int type.
46
109
  TypeError: The element of `alias_name` is not a str type.
110
+ TypeError: The element of `rank_list` is not int type.
47
111
  ValueError: The element of `alias_name` is an empty str.
48
112
  ValueError: The element of `alias_name` is "None".
49
113
  ValueError: `alias_name` contains repeated element.
50
114
 
115
+ Supported Platforms:
116
+ ``Ascend``
117
+
51
118
  Examples:
52
- >>> from mindspore import Layout
119
+ >>> from mindspore.parallel import Layout
53
120
  >>> layout = Layout((2, 2, 2), ("dp", "sp", "mp"))
54
121
  >>> layout0 = layout("dp", "mp")
55
122
  >>> print(layout0.to_dict())
56
- {"device_matrix": (2, 2, 2), "tensor_map": (2, 0), "interleaved_parallel": False}
123
+ {"device_matrix": (2, 2, 2), "tensor_map": (2, 0), "interleaved_parallel": False,
124
+ 'alias_name': {'dp', 'sp', 'mp'}, "rank_list": [0, 1, 2, 3, 4, 1, 6, 7]}
57
125
  >>> # Total device num is 4, but split the tensor in local device into two copies.
58
126
  >>> layout = Layout((2, 2, 2), ("dp", "sp", "interleaved_parallel"))
59
127
  >>> layout1 = layout(("dp", "interleaved_parallel"), "sp")
60
128
  """
61
129
 
62
- def __init__(self, device_matrix, alias_name):
130
+ def __init__(self, device_matrix, alias_name, rank_list=None):
63
131
  if not isinstance(device_matrix, tuple):
64
132
  raise TypeError(f'device_matrix must be tuple type, but got:{type(device_matrix)}')
65
133
  if not isinstance(alias_name, tuple):
@@ -85,6 +153,20 @@ class Layout:
85
153
  self._device_shape = device_matrix
86
154
  self._alias_name = alias_name
87
155
  self._tensor_map = None
156
+ self._rank_list = list(range(np.prod(np.array(self._device_shape))))
157
+ if rank_list is not None:
158
+ if not isinstance(rank_list, list):
159
+ raise TypeError(f"The rank_list should be a list, but got {type(rank_list).__name__}.")
160
+ for in_ele in rank_list:
161
+ if not isinstance(in_ele, int):
162
+ raise TypeError(f"The element of rank_list should be int, but got {type(in_ele).__name__}.")
163
+ if len(np.array(rank_list).shape) != 1:
164
+ raise ValueError(
165
+ f"The rank_list should be a 1-D list, but got {len(np.array(rank_list).shape)}-D list.")
166
+ if len(rank_list) != np.prod(np.array(self._device_shape)):
167
+ raise ValueError(f"The length of rank_list should be equal to the product of device_matrix, "
168
+ f"but got {len(rank_list)} and {np.prod(np.array(self._device_shape))}.")
169
+ self._rank_list = rank_list
88
170
 
89
171
  def __call__(self, *tensor_map):
90
172
  self._tensor_map = ()
@@ -125,8 +207,8 @@ class Layout:
125
207
  raise ValueError("The tensor_map of layout is None")
126
208
  interleaved_parallel = "interleaved_parallel" in self._alias_name
127
209
  return {"device_matrix": self._device_shape, "tensor_map": self._tensor_map,
128
- "interleaved_parallel": interleaved_parallel, "alias_name": self._alias_name}
129
-
210
+ "interleaved_parallel": interleaved_parallel, "alias_name": self._alias_name,
211
+ "rank_list": self._rank_list}
130
212
 
131
213
 
132
214
  class Shard(Shard_):
@@ -144,18 +226,6 @@ class Shard(Shard_):
144
226
  self.level = None
145
227
 
146
228
  def __call__(self, fn, in_strategy, out_strategy=None, parameter_plan=None, device="Ascend", level=0):
147
- parallel_mode = ms.context.get_auto_parallel_context("parallel_mode")
148
- if parallel_mode not in ("auto_parallel", "semi_auto_parallel"):
149
- raise AssertionError(
150
- f"Cell shard only supports auto parallel and semi auto parallel.")
151
- if ms.context.get_context("device_target") not in ("Ascend", "GPU"):
152
- raise AssertionError(
153
- f"'Shard' now only supports 'Ascend' and 'GPU'")
154
- if parallel_mode == "auto_parallel" and \
155
- ms.context.get_auto_parallel_context("search_mode") != "sharding_propagation":
156
- raise AssertionError(f"'search_mode' must be 'sharding_propagation' for 'Shard' when the "
157
- f"'parallel_mode' is 'auto_parallel.'")
158
-
159
229
  if not isinstance(in_strategy, tuple):
160
230
  raise TypeError(
161
231
  f"For 'Shard', the 'in_strategy' should be a tuple, but got {type(in_strategy).__name__}.")
@@ -184,7 +254,8 @@ class Shard(Shard_):
184
254
  "will be overwritten as False.")
185
255
  ms.set_algo_parameters(fully_use_devices=False)
186
256
 
187
- if ms.context.get_auto_parallel_context("full_batch_is_set") is False:
257
+ if ms.context.get_auto_parallel_context("full_batch_is_set") is False and \
258
+ ms.context.get_context("mode") == ms.context.PYNATIVE_MODE:
188
259
  logger.warning("When calling the shard interface, "
189
260
  "'dataset_strategy' or 'full_batch' is not manually set by the user, "
190
261
  "and the 'dataset_strategy' will be set to 'full_batch'.")
@@ -196,13 +267,13 @@ class Shard(Shard_):
196
267
 
197
268
  if isinstance(fn, ms.nn.Cell):
198
269
  for param in fn.trainable_params():
199
- param.is_in_shard = True
270
+ param.param_info.is_in_pynative_shard = True
200
271
 
201
272
  # Set parameter layout to corresponding parameter
202
273
  self._set_param_layout_into_parameter(fn, parameter_plan)
203
274
 
204
275
  def shard_fn(*args):
205
- @ms.common.jit(hash_args=fn)
276
+ @ms.common.jit(hash_args=fn, backend="ms_backend")
206
277
  def after_shard(*args):
207
278
  return shard_(fn, in_strategy, out_strategy, device, level)(*args)
208
279
 
@@ -293,7 +364,7 @@ class Shard(Shard_):
293
364
  for stra in strategy:
294
365
  if not isinstance(stra, (tuple, Layout)):
295
366
  raise TypeError(
296
- f"The '{log_info}' should be a tuple(tuple(int)) or tuple(mindspore.Layout), "
367
+ f"The '{log_info}' should be a tuple(tuple(int)) or tuple(mindspore.parallel.Layout), "
297
368
  f"but got {type(stra).__name__}")
298
369
  if isinstance(stra, Layout):
299
370
  strategy_set.add("layout")
@@ -315,7 +386,7 @@ class Shard(Shard_):
315
386
  for in_ele in layout:
316
387
  if not isinstance(in_ele, Layout):
317
388
  raise TypeError(f"The {log_info} item should be a object of class Layout.")
318
- layout_value += (in_ele.to_dict(),)
389
+ layout_value += ({k: v for k, v in in_ele.to_dict().items() if k != "rank_list"},)
319
390
  return layout_value
320
391
 
321
392
  def _check_tuple_strategy(self, dim_strategy):
@@ -326,8 +397,8 @@ class Shard(Shard_):
326
397
 
327
398
  def shard(fn, in_strategy, out_strategy=None, parameter_plan=None, device="Ascend", level=0):
328
399
  """
329
- Defining the input and output layouts of this cell and the parallel strategies of remaining ops will be
330
- generated by sharding propagation. In PyNative mode, use this method to specify a Cell for distributed
400
+ Specify the input and output slicing strategy for a Cell or function.
401
+ In PyNative mode, use this method to specify a Cell for distributed
331
402
  execution in graph mode. In Graph mode, use this method to specify distribution strategy for a Cell,
332
403
  strategy for others will be set by sharding propagation.
333
404
  in_strategy and out_strategy define the input and output layout respectively.
@@ -337,33 +408,37 @@ def shard(fn, in_strategy, out_strategy=None, parameter_plan=None, device="Ascen
337
408
  The parallel strategies of remaining operators are derived from the strategy specified by the input and output.
338
409
 
339
410
  Note:
340
- If ms.shard is called, the parallel mode in `set_auto_parallel_context` (parallel_mode) will be set to
341
- "auto_parallel" and the search mode (search_mode) to "sharding_propagation".
342
- If the input contain Parameter, its strategy should be set in `in_strategy`.
411
+ - If shard is called, the parallel mode in `set_auto_parallel_context` (parallel_mode) will be set to
412
+ "auto_parallel" and the search mode (search_mode) to "sharding_propagation".
413
+ - If the input contain Parameter, its strategy should be set in `in_strategy`.
414
+ - This method currently does not support dynamic shapes.
343
415
 
344
416
  Args:
345
417
  fn (Union[Cell, Function]): Function to be executed in parallel.
346
- Its arguments and return value must be Tensor or Parameter.
418
+ Its arguments and return value must be Tensor.
347
419
  If `fn` is a Cell with parameters, `fn` needs to be an instantiated object,
348
420
  otherwise its arguments cannot be accessed.
349
421
  in_strategy (tuple): Define the layout of inputs, each element of the tuple should be a tuple(int) or
350
- tuple(mindspore.Layout).
422
+ tuple(mindspore.parallel.Layout).
351
423
  Tuple defines the layout of the corresponding input.
352
- out_strategy (Union[tuple, None]): Define the layout of outputs similar with `in_strategy`.
353
- It is not in use right now. Default: ``None`` .
354
- parameter_plan (Union[dict, None]): Define the layout for the specified parameters. Each element in dict
424
+ out_strategy (Union[tuple, None], optional): Define the layout of outputs similar with `in_strategy`.
425
+ Default: ``None`` .
426
+ parameter_plan (Union[dict, None], optional): Define the layout for the specified parameters.
427
+ Each element in dict
355
428
  defines the layout of the parameter like "param_name: layout".
356
429
  The key is a parameter name of type 'str'.
357
- The value is a 1-D integer tuple or a 1-D mindspore.Layout tuple,
430
+ The value is a 1-D integer tuple or a 1-D mindspore.parallel.Layout tuple,
358
431
  indicating the corresponding layout.
359
432
  If the parameter name is incorrect or the corresponding parameter
360
- has been set, the parameter setting will be ignored.
433
+ has been set, the parameter setting will be ignored. Supported
434
+ only when `fn` is a Cell with parameters.
361
435
  Default: ``None`` .
362
- device (string): Select a certain `device` target. It is not in use right now.
363
- Support ["CPU", "GPU", "Ascend"]. Default: ``"Ascend"`` .
364
- level (int): Option for parallel strategy infer algorithm, namely the object function, maximize computation
365
- over communication ratio, maximize speed performance, minimize memory usage etc. It is not in
366
- use right now. Support [0, 1, 2]. Default: ``0`` .
436
+ device (str, optional): Select a certain `device` target. It is not in use right now.
437
+ Support ["CPU", "GPU", "Ascend"]. Default: ``"Ascend"`` .
438
+ level (int, optional): Option for parallel strategy infer algorithm, namely the object function,
439
+ maximize computation
440
+ over communication ratio, maximize speed performance, minimize memory usage etc. It is not in
441
+ use right now. Support [0, 1, 2]. Default: ``0`` .
367
442
 
368
443
  Returns:
369
444
  Function, return the function that will be executed under auto parallel process.
@@ -373,26 +448,28 @@ def shard(fn, in_strategy, out_strategy=None, parameter_plan=None, device="Ascen
373
448
  AssertionError: If device_target it not "Ascend" or "GPU".
374
449
  TypeError: If `in_strategy` is not a tuple.
375
450
  TypeError: If `out_strategy` is not a tuple or None.
376
- TypeError: If any element in `in_strategy` is not a tuple(int) or tuple(mindspore.Layout).
377
- TypeError: If any element in `out_strategy` is not a tuple(int) or tuple(mindspore.Layout).
451
+ TypeError: If any element in `in_strategy` is not a tuple(int) or tuple(mindspore.parallel.Layout).
452
+ TypeError: If any element in `out_strategy` is not a tuple(int) or tuple(mindspore.parallel.Layout).
378
453
  TypeError: If `parameter_plan` is not a dict or None.
379
454
  TypeError: If any key in `parameter_plan` is not a str.
380
- TypeError: If any value in `parameter_plan` is not a tuple(int) or a tuple(mindspore.Layout).
455
+ TypeError: If any value in `parameter_plan` is not a tuple(int) or a tuple(mindspore.parallel.Layout).
381
456
  TypeError: If `device` is not a str.
382
457
  TypeError: If `level` is not an integer.
383
458
 
384
459
  Supported Platforms:
385
- ``Ascend`` ``GPU``
460
+ ``Ascend``
386
461
 
387
462
  Examples:
388
463
  >>> import numpy as np
389
464
  >>> import mindspore as ms
390
- >>> from mindspore import Tensor, nn
465
+ >>> from mindspore import Tensor, nn, ops
391
466
  >>> from mindspore.communication import init
467
+ >>> from mindspore.parallel import shard
468
+ >>> from mindspore.parallel import Layout
469
+ >>> from mindspore.nn.utils import no_init_parameters
470
+ >>> from mindspore.parallel.auto_parallel import AutoParallel
392
471
  >>> ms.set_context(mode=ms.GRAPH_MODE)
393
472
  >>> init()
394
- >>> ms.set_auto_parallel_context(parallel_mode="auto_parallel", search_mode="sharding_propagation",
395
- ... device_num=8)
396
473
  >>>
397
474
  >>> # Case 1: cell uses functional
398
475
  >>> class BasicBlock(nn.Cell):
@@ -404,7 +481,7 @@ def shard(fn, in_strategy, out_strategy=None, parameter_plan=None, device="Ascen
404
481
  >>> x = ops.abs(x)
405
482
  >>> return x + y
406
483
  >>> # shard a function with tuple(int) strategies
407
- >>> self.shard_my_add = ms.shard(my_add, in_strategy=((2, 2), (1, 4)), out_strategy=((4, 1),))
484
+ >>> self.shard_my_add = shard(my_add, in_strategy=((2, 2), (1, 4)), out_strategy=((4, 1),))
408
485
  >>>
409
486
  >>> def construct(self, x, u):
410
487
  >>> x = self.gelu(x)
@@ -432,7 +509,7 @@ def shard(fn, in_strategy, out_strategy=None, parameter_plan=None, device="Ascen
432
509
  >>> super(Net, self).__init__()
433
510
  >>> # setting cell sharding strategy and parameter_plan by tuple(int)
434
511
  >>> self.layer_net1 = NetForward()
435
- >>> self.layer_net1_shard = ms.shard(self.layer_net1, in_strategy=((4, 2), (2, 1)),
512
+ >>> self.layer_net1_shard = shard(self.layer_net1, in_strategy=((4, 2), (2, 1)),
436
513
  ... parameter_plan={"self.layer_net1.block1.weight": (4, 1)})
437
514
  >>>
438
515
  >>> # setting cell sharding strategy and parameter_plan by tuple(ms.Layout)
@@ -440,7 +517,7 @@ def shard(fn, in_strategy, out_strategy=None, parameter_plan=None, device="Ascen
440
517
  >>> layout = Layout((4, 2, 1), ("dp", "mp", "sp"))
441
518
  >>> in_layout = (layout("dp", "mp"), layout("mp", "sp"))
442
519
  >>> param_layout = layout("dp", "sp")
443
- >>> self.layer_net2_shard = ms.shard(self.layer_net2, in_strategy=in_layout,
520
+ >>> self.layer_net2_shard = shard(self.layer_net2, in_strategy=in_layout,
444
521
  ... parameter_plan={"self.layer_net2.block2.weight": param_layout})
445
522
  >>> self.flatten = nn.Flatten()
446
523
  >>> self.layer1 = nn.Dense(64, 64)
@@ -458,26 +535,25 @@ def shard(fn, in_strategy, out_strategy=None, parameter_plan=None, device="Ascen
458
535
  >>> x = self.matmul(x, Tensor(np.ones(shape=(32, 32)), dtype=ms.float32))
459
536
  >>> return x
460
537
  >>>
461
- >>> net = Net()
538
+ >>> with no_init_parameters():
539
+ >>> net = Net()
462
540
  >>> x = Tensor(np.ones(shape=(64, 1, 8, 8)), dtype=ms.float32)
463
541
  >>> y = Tensor(np.ones(shape=(64, 1, 8, 8)), dtype=ms.float32)
464
- >>> net(x, y)
542
+ >>> parallel_net = AutoParallel(net, parallel_mode='sharding_propagation')
543
+ >>> parallel_net(x, y)
465
544
  >>>
466
545
  >>> # Case 2: function uses functional sharding
467
546
  >>> def test_shard(x, y):
468
547
  ... return x + y
469
548
  >>> x = Tensor(np.ones(shape=(32, 10)), dtype=ms.float32)
470
549
  >>> y = Tensor(np.ones(shape=(32, 10)), dtype=ms.float32)
471
- >>> output = ms.shard(test_shard, in_strategy=((4, 2), (4, 2)))(x, y)
550
+ >>> output = shard(test_shard, in_strategy=((4, 2), (4, 2)))(x, y)
472
551
  >>> print(output.shape)
473
552
  (32, 10)
474
553
 
475
- Tutorial Examples:
476
- - `Functional Operator Sharding
477
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/shard_function_parallel.html>`_
478
- - `mindspore.Layout
479
- <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.Layout.html>`_
480
554
  """
555
+ if ms.communication.management.get_group_size() == 1:
556
+ return fn
481
557
  if not isinstance(fn, (ms.nn.Cell)):
482
558
  logger.warning("'fn' is not a mindspore.nn.Cell, and its definition cannot involve Parameter; "
483
559
  "otherwise, the result may be incorrect.")