mindspore 2.3.0rc1__cp37-none-any.whl → 2.3.0rc2__cp37-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (316) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +1 -1
  3. mindspore/_akg/akg/utils/tbe_codegen_utils.py +13 -3
  4. mindspore/_c_dataengine.cpython-37m-aarch64-linux-gnu.so +0 -0
  5. mindspore/_c_expression.cpython-37m-aarch64-linux-gnu.so +0 -0
  6. mindspore/_checkparam.py +20 -0
  7. mindspore/_extends/parse/parser.py +1 -1
  8. mindspore/_extends/parse/standard_method.py +6 -5
  9. mindspore/_mindspore_offline_debug.cpython-37m-aarch64-linux-gnu.so +0 -0
  10. mindspore/amp.py +5 -5
  11. mindspore/bin/cache_admin +0 -0
  12. mindspore/bin/cache_server +0 -0
  13. mindspore/boost/boost_cell_wrapper.py +1 -1
  14. mindspore/boost/group_loss_scale_manager.py +1 -1
  15. mindspore/common/__init__.py +4 -2
  16. mindspore/common/_register_for_recompute.py +48 -0
  17. mindspore/common/_stub_tensor.py +1 -0
  18. mindspore/common/api.py +56 -4
  19. mindspore/common/dtype.py +5 -3
  20. mindspore/common/dump.py +2 -2
  21. mindspore/common/hook_handle.py +51 -4
  22. mindspore/common/initializer.py +1 -1
  23. mindspore/common/jit_config.py +17 -6
  24. mindspore/common/parameter.py +7 -2
  25. mindspore/common/recompute.py +247 -0
  26. mindspore/common/sparse_tensor.py +2 -2
  27. mindspore/common/symbol.py +1 -1
  28. mindspore/common/tensor.py +74 -36
  29. mindspore/communication/__init__.py +3 -3
  30. mindspore/communication/management.py +30 -30
  31. mindspore/context.py +28 -15
  32. mindspore/dataset/__init__.py +5 -5
  33. mindspore/dataset/audio/__init__.py +2 -2
  34. mindspore/dataset/audio/transforms.py +51 -51
  35. mindspore/dataset/callback/ds_callback.py +2 -2
  36. mindspore/dataset/engine/cache_client.py +1 -1
  37. mindspore/dataset/engine/datasets.py +3 -3
  38. mindspore/dataset/engine/datasets_audio.py +14 -14
  39. mindspore/dataset/engine/datasets_standard_format.py +3 -3
  40. mindspore/dataset/engine/datasets_text.py +38 -38
  41. mindspore/dataset/engine/datasets_user_defined.py +3 -3
  42. mindspore/dataset/engine/datasets_vision.py +68 -68
  43. mindspore/dataset/text/__init__.py +3 -3
  44. mindspore/dataset/text/transforms.py +26 -26
  45. mindspore/dataset/transforms/__init__.py +1 -1
  46. mindspore/dataset/vision/__init__.py +3 -3
  47. mindspore/dataset/vision/transforms.py +92 -92
  48. mindspore/dataset/vision/utils.py +1 -1
  49. mindspore/experimental/optim/adadelta.py +2 -2
  50. mindspore/experimental/optim/adagrad.py +2 -2
  51. mindspore/experimental/optim/adam.py +2 -2
  52. mindspore/experimental/optim/adamax.py +2 -2
  53. mindspore/experimental/optim/adamw.py +2 -2
  54. mindspore/experimental/optim/asgd.py +2 -2
  55. mindspore/experimental/optim/lr_scheduler.py +24 -20
  56. mindspore/experimental/optim/nadam.py +2 -2
  57. mindspore/experimental/optim/optimizer.py +1 -1
  58. mindspore/experimental/optim/radam.py +2 -2
  59. mindspore/experimental/optim/rmsprop.py +2 -2
  60. mindspore/experimental/optim/rprop.py +2 -2
  61. mindspore/experimental/optim/sgd.py +2 -2
  62. mindspore/hal/stream.py +2 -0
  63. mindspore/include/mindapi/base/types.h +5 -0
  64. mindspore/lib/libdnnl.so.2 +0 -0
  65. mindspore/lib/libmindspore.so +0 -0
  66. mindspore/lib/libmindspore_backend.so +0 -0
  67. mindspore/lib/libmindspore_common.so +0 -0
  68. mindspore/lib/libmindspore_core.so +0 -0
  69. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  70. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  71. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  72. mindspore/lib/libmindspore_shared_lib.so +0 -0
  73. mindspore/lib/libopencv_core.so.4.5 +0 -0
  74. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  75. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6 -6
  76. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  77. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  78. mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
  79. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  80. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/DeviceBin +0 -0
  81. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
  82. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
  83. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +101787 -98559
  84. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
  85. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
  86. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/base/op_register.h +2 -2
  87. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/params/mix.h +8 -1
  88. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/params/norm.h +5 -3
  89. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/params/reduce.h +2 -2
  90. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/rt/backend/backend.h +3 -3
  91. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/rt/backend/rtbackend.h +3 -3
  92. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/rt/base/types.h +0 -1
  93. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/rt/module/module.h +3 -3
  94. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/svector/svector.h +3 -2
  95. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
  96. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
  97. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/tiling/add_tiling.h +9 -9
  98. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +2 -6
  99. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb.h +2 -2
  100. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_base.h +460 -0
  101. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_bf16.h +217 -0
  102. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp16.h +116 -0
  103. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_tiling.h +16 -24
  104. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_value.h +27 -0
  105. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -4
  106. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention/FlashAttentionScore_impl.h → flash_attention_score/flash_attention_score_impl.h} +2 -1
  107. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention/bs_attention_tiling.h → flash_attention_score/flash_attention_score_tiling.h} +15 -19
  108. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/gelu/tiling/gelu_tiling.h +7 -9
  109. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/lccl/lccl_wrapper.h +58 -0
  110. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/matmul_impl.h +19 -8
  111. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{matmul → matmul_common}/pp_matmul_common_tiling.h +18 -8
  112. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{matmul → matmul_common}/pp_matmul_info.h +7 -4
  113. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{matmul → matmul_common}/tiling_data.h +44 -6
  114. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/tiling_utils.h +65 -0
  115. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/matmul_stridedslice_fusion_impl.h +10 -6
  116. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +4 -1
  117. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/kernel/paged_attention_mix_hwsync.h +41 -0
  118. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention/PagedAttention_impl.h → paged_attention/paged_attention_impl.h} +1 -1
  119. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_tiling.h +63 -0
  120. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/add_param.h +2 -2
  121. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention_param.h → param/attention_param.h} +11 -2
  122. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +37 -0
  123. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +45 -0
  124. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache/reshape_and_cache_tiling.h +1 -2
  125. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/rms_norm.h +23 -0
  126. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/rms_norm_base.h +175 -0
  127. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/rms_norm_normal.h +276 -0
  128. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/rms_norm_split_d.h +280 -0
  129. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/tiling_data.h +35 -0
  130. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +45 -0
  131. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/kernel/sub_kernel.h +20 -0
  132. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +47 -0
  133. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_tiling.h +25 -0
  134. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +323 -23
  135. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/types.h +15 -4
  136. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +8 -0
  137. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
  138. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
  139. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layernorm_impl.so +0 -0
  140. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_impl.so +0 -0
  141. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_impl.so +0 -0
  142. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_impl.so +0 -0
  143. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_impl.so +0 -0
  144. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_impl.so +0 -0
  145. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_stridedslice_fusion_impl.so +0 -0
  146. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
  147. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libnot_equal_impl.so +0 -0
  148. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_impl.so +0 -0
  149. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_impl.so +0 -0
  150. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
  151. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
  152. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_full_mix.o +0 -0
  153. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
  154. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
  155. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
  156. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_full_mix.o +0 -0
  157. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
  158. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bnsd_full_mix.o +0 -0
  159. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bsh_full_mix.o +0 -0
  160. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bnsd_full_mix.o +0 -0
  161. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bsh_full_mix.o +0 -0
  162. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lcal.h +22 -0
  163. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lcal_comm.h +70 -0
  164. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lcal_types.h +103 -0
  165. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lccl.h +47 -0
  166. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lccl_wrapper.h +58 -0
  167. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lcoc.h +154 -0
  168. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblcal.so +0 -0
  169. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
  170. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  171. mindspore/log.py +2 -2
  172. mindspore/mint/__init__.py +457 -0
  173. mindspore/mint/nn/__init__.py +430 -0
  174. mindspore/mint/nn/functional.py +424 -0
  175. mindspore/mint/optim/__init__.py +24 -0
  176. mindspore/mint/optim/adamw.py +186 -0
  177. mindspore/multiprocessing/__init__.py +4 -0
  178. mindspore/nn/__init__.py +3 -0
  179. mindspore/nn/cell.py +51 -47
  180. mindspore/nn/extend/__init__.py +29 -0
  181. mindspore/nn/extend/basic.py +140 -0
  182. mindspore/nn/extend/embedding.py +143 -0
  183. mindspore/nn/extend/layer/__init__.py +27 -0
  184. mindspore/nn/extend/layer/normalization.py +107 -0
  185. mindspore/nn/extend/pooling.py +117 -0
  186. mindspore/nn/generator.py +297 -0
  187. mindspore/nn/layer/basic.py +109 -1
  188. mindspore/nn/layer/container.py +2 -2
  189. mindspore/nn/layer/conv.py +6 -6
  190. mindspore/nn/layer/embedding.py +1 -1
  191. mindspore/nn/layer/normalization.py +21 -43
  192. mindspore/nn/layer/padding.py +4 -0
  193. mindspore/nn/optim/ada_grad.py +2 -2
  194. mindspore/nn/optim/adadelta.py +1 -1
  195. mindspore/nn/optim/adafactor.py +1 -1
  196. mindspore/nn/optim/adam.py +7 -7
  197. mindspore/nn/optim/adamax.py +2 -2
  198. mindspore/nn/optim/adasum.py +2 -2
  199. mindspore/nn/optim/asgd.py +2 -2
  200. mindspore/nn/optim/ftrl.py +1 -1
  201. mindspore/nn/optim/lamb.py +3 -3
  202. mindspore/nn/optim/lars.py +1 -1
  203. mindspore/nn/optim/lazyadam.py +2 -2
  204. mindspore/nn/optim/momentum.py +2 -2
  205. mindspore/nn/optim/optimizer.py +2 -2
  206. mindspore/nn/optim/proximal_ada_grad.py +2 -2
  207. mindspore/nn/optim/rmsprop.py +2 -2
  208. mindspore/nn/optim/rprop.py +2 -2
  209. mindspore/nn/optim/sgd.py +2 -2
  210. mindspore/nn/optim/thor.py +2 -2
  211. mindspore/nn/wrap/cell_wrapper.py +9 -9
  212. mindspore/nn/wrap/grad_reducer.py +5 -5
  213. mindspore/ops/_grad_experimental/grad_comm_ops.py +4 -2
  214. mindspore/ops/_vmap/vmap_grad_nn_ops.py +41 -2
  215. mindspore/ops/_vmap/vmap_math_ops.py +27 -8
  216. mindspore/ops/_vmap/vmap_nn_ops.py +66 -8
  217. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +73 -1
  218. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +12 -3
  219. mindspore/ops/auto_generate/gen_arg_handler.py +24 -0
  220. mindspore/ops/auto_generate/gen_extend_func.py +274 -0
  221. mindspore/ops/auto_generate/gen_ops_def.py +889 -22
  222. mindspore/ops/auto_generate/gen_ops_prim.py +3541 -253
  223. mindspore/ops/auto_generate/pyboost_inner_prim.py +282 -0
  224. mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -1
  225. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +9 -0
  226. mindspore/ops/extend/__init__.py +9 -1
  227. mindspore/ops/extend/array_func.py +134 -27
  228. mindspore/ops/extend/math_func.py +3 -3
  229. mindspore/ops/extend/nn_func.py +363 -2
  230. mindspore/ops/function/__init__.py +19 -2
  231. mindspore/ops/function/array_func.py +463 -439
  232. mindspore/ops/function/clip_func.py +7 -18
  233. mindspore/ops/function/grad/grad_func.py +5 -5
  234. mindspore/ops/function/linalg_func.py +4 -4
  235. mindspore/ops/function/math_func.py +260 -243
  236. mindspore/ops/function/nn_func.py +825 -62
  237. mindspore/ops/function/random_func.py +73 -4
  238. mindspore/ops/function/sparse_unary_func.py +1 -1
  239. mindspore/ops/function/vmap_func.py +1 -1
  240. mindspore/ops/functional.py +2 -2
  241. mindspore/ops/op_info_register.py +1 -31
  242. mindspore/ops/operations/__init__.py +2 -3
  243. mindspore/ops/operations/_grad_ops.py +2 -107
  244. mindspore/ops/operations/_inner_ops.py +5 -5
  245. mindspore/ops/operations/_sequence_ops.py +2 -2
  246. mindspore/ops/operations/array_ops.py +11 -233
  247. mindspore/ops/operations/comm_ops.py +32 -32
  248. mindspore/ops/operations/custom_ops.py +7 -89
  249. mindspore/ops/operations/manually_defined/ops_def.py +329 -4
  250. mindspore/ops/operations/math_ops.py +13 -163
  251. mindspore/ops/operations/nn_ops.py +9 -316
  252. mindspore/ops/operations/random_ops.py +1 -1
  253. mindspore/ops/operations/sparse_ops.py +3 -3
  254. mindspore/ops/primitive.py +2 -2
  255. mindspore/ops_generate/arg_dtype_cast.py +12 -3
  256. mindspore/ops_generate/arg_handler.py +24 -0
  257. mindspore/ops_generate/gen_ops_inner_prim.py +2 -0
  258. mindspore/ops_generate/gen_pyboost_func.py +13 -6
  259. mindspore/ops_generate/pyboost_utils.py +2 -17
  260. mindspore/parallel/__init__.py +3 -2
  261. mindspore/parallel/_auto_parallel_context.py +106 -1
  262. mindspore/parallel/_parallel_serialization.py +34 -2
  263. mindspore/parallel/_utils.py +16 -0
  264. mindspore/parallel/algo_parameter_config.py +4 -4
  265. mindspore/parallel/checkpoint_transform.py +249 -77
  266. mindspore/parallel/cluster/process_entity/_api.py +1 -1
  267. mindspore/parallel/parameter_broadcast.py +1 -1
  268. mindspore/parallel/shard.py +1 -1
  269. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +1 -0
  270. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +17 -5
  271. mindspore/profiler/parser/ascend_msprof_exporter.py +3 -3
  272. mindspore/profiler/parser/ascend_msprof_generator.py +10 -3
  273. mindspore/profiler/parser/ascend_op_generator.py +26 -9
  274. mindspore/profiler/parser/ascend_timeline_generator.py +7 -4
  275. mindspore/profiler/parser/profiler_info.py +11 -1
  276. mindspore/profiler/profiling.py +13 -5
  277. mindspore/rewrite/api/node.py +12 -12
  278. mindspore/rewrite/api/symbol_tree.py +11 -11
  279. mindspore/run_check/_check_version.py +1 -1
  280. mindspore/safeguard/rewrite_obfuscation.py +2 -2
  281. mindspore/train/amp.py +4 -4
  282. mindspore/train/anf_ir_pb2.py +8 -2
  283. mindspore/train/callback/_backup_and_restore.py +2 -2
  284. mindspore/train/callback/_callback.py +4 -4
  285. mindspore/train/callback/_checkpoint.py +2 -2
  286. mindspore/train/callback/_early_stop.py +2 -2
  287. mindspore/train/callback/_landscape.py +4 -4
  288. mindspore/train/callback/_loss_monitor.py +2 -2
  289. mindspore/train/callback/_on_request_exit.py +2 -2
  290. mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
  291. mindspore/train/callback/_summary_collector.py +2 -2
  292. mindspore/train/callback/_time_monitor.py +2 -2
  293. mindspore/train/dataset_helper.py +8 -3
  294. mindspore/train/loss_scale_manager.py +2 -2
  295. mindspore/train/metrics/metric.py +3 -3
  296. mindspore/train/mind_ir_pb2.py +22 -17
  297. mindspore/train/model.py +15 -15
  298. mindspore/train/serialization.py +18 -18
  299. mindspore/train/summary/summary_record.py +7 -7
  300. mindspore/train/train_thor/convert_utils.py +3 -3
  301. mindspore/version.py +1 -1
  302. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +1 -1
  303. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +307 -260
  304. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/tiling_data.h +0 -59
  305. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/FlashAttentionScore_bf16_BNSD_mix.o +0 -0
  306. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/FlashAttentionScore_bf16_BSH_mix.o +0 -0
  307. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/FlashAttentionScore_fp16_BNSD_mix.o +0 -0
  308. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/FlashAttentionScore_fp16_BSH_mix.o +0 -0
  309. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/PagedAttention_bf16_BNSD_mix.o +0 -0
  310. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/PagedAttention_bf16_BSH_mix.o +0 -0
  311. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/PagedAttention_fp16_BNSD_mix.o +0 -0
  312. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/PagedAttention_fp16_BSH_mix.o +0 -0
  313. /mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention/bs_attention_mix_hwsync.h → flash_attention_score/kernel/flash_attention_score_mix_hwsync.h} +0 -0
  314. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
  315. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
  316. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,247 @@
1
+ # Copyright 2023 Huawei Technologies Co., Ltd
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ============================================================================
15
+ """Defines other operators with functional form."""
16
+
17
+ from collections import OrderedDict
18
+ from types import MethodType
19
+ from mindspore import log as logger
20
+ from mindspore.nn.cell import Cell
21
+ from mindspore import context
22
+ from mindspore.common.tensor import Tensor
23
+ from mindspore.ops.composite import GradOperation
24
+ from mindspore.common._register_for_recompute import recompute_registry
25
+ from mindspore.common.api import _pynative_executor
26
+ from mindspore.nn.generator import get_rng_state, set_rng_state
27
+
28
+
29
+ class _WrapCell(Cell):
30
+ """
31
+ The warp cell is used by recompute cell,
32
+ which can set mixed precision to warp cell
33
+ """
34
+
35
+ def __init__(self, function):
36
+ super(_WrapCell, self).__init__()
37
+ self.function = function
38
+
39
+ def construct(self, *args, **kwargs):
40
+ return self.function(*args, **kwargs)
41
+
42
+
43
+ class _RecomputeCell(Cell):
44
+ """
45
+ Recompute cell, given the sub block, this cell will recompute the block, rather than
46
+ storing the intermediate activation computed in forward pass, we will recompute it in backward pass.
47
+ Note:
48
+ - RecomputeCell now only support pynative mode.
49
+ - When use recompute function, block object should not decorated by @jit.
50
+ """
51
+
52
+ def __init__(self, block):
53
+ """Initialize Recompute cell."""
54
+ super(_RecomputeCell, self).__init__()
55
+ self.args = []
56
+ self.kwargs = []
57
+ self.wrap_cell = _WrapCell(block)
58
+ self.net = block
59
+ self.internal_params = []
60
+ self.save_rng_state = False
61
+ self.cpu_rng_state = None
62
+ self._add_attr("is_cell_recompute", "True")
63
+ self.grad = GradOperation(get_all=True, get_by_list=True, sens_param=True)
64
+ self.init_mixed_precision_type(block)
65
+
66
+ def construct(self, *args, **kwargs):
67
+ _check_input_args_validate(self.net, args)
68
+ self.args.append(args)
69
+ self.kwargs.append(kwargs)
70
+ self.save_rng_state = kwargs.pop("save_rng_state", True)
71
+ if self.save_rng_state:
72
+ self.cpu_rng_state = get_rng_state()
73
+ return self.net(*args, **kwargs)
74
+
75
+ def bprop(self, *args):
76
+ """
77
+ Custom grad method for recompute
78
+ :param args:
79
+ :return: input grad and weight grads
80
+ """
81
+ grad_input = args[-1]
82
+ input_args = self.args[-1]
83
+ kwargs = self.kwargs[-1]
84
+ self.args.pop()
85
+ self.kwargs.pop()
86
+ if kwargs:
87
+ input_args = list(input_args) + list(kwargs.values())
88
+ try:
89
+ pre_rng_state = get_rng_state()
90
+ set_rng_state(*self.cpu_rng_state)
91
+ _pynative_executor.set_is_run_recompute(True)
92
+ grads = self.grad(self.net, self.internal_params)(*input_args, grad_input)
93
+ _pynative_executor.set_is_run_recompute(False)
94
+ set_rng_state(*pre_rng_state)
95
+ except Exception as err:
96
+ _pynative_executor.clear_res()
97
+ raise err
98
+ weights = OrderedDict()
99
+ input_grads = list(grads[0])
100
+ _padding_input_grads(input_args, input_grads)
101
+ for i, param in enumerate(self.internal_params):
102
+ weights[param] = grads[1][i]
103
+ return tuple(input_grads), weights
104
+
105
+ def init_mixed_precision_type(self, block):
106
+ """
107
+ init mix precision
108
+ :param block:
109
+ :return:
110
+ """
111
+ if isinstance(block, Cell):
112
+ # To avoid sub cell same name
113
+ block.check_names_and_refresh_name()
114
+ self.internal_params = block.trainable_params()
115
+ return
116
+ if isinstance(block, MethodType) and isinstance(block.__self__, Cell):
117
+ # To avoid sub cell same name
118
+ block.__self__.check_names_and_refresh_name()
119
+ self.internal_params = block.__self__.trainable_params()
120
+ self.wrap_cell.set_mixed_precision_type(block.__self__.get_mixed_precision_type())
121
+ self.net = self.wrap_cell
122
+ else:
123
+ raise TypeError("For Recompute cell, it not support FunctionType function, "
124
+ "only support Cell object or MethodType function!")
125
+
126
+
127
+ def _check_input_args_validate(block, args):
128
+ """
129
+ Check recompute input args validate
130
+ :param args:
131
+ :return:
132
+ """
133
+ if not any([isinstance(arg, Tensor) for arg in args]):
134
+ logger.warning("None of the inputs of function are tensors, which not need use recompute!")
135
+ for arg in args:
136
+ if isinstance(arg, (tuple, list)):
137
+ for data in arg:
138
+ if isinstance(data, Tensor):
139
+ logger.info("For recompute block {}, tensor input in Tuple or list "
140
+ "will not calculate grads!".format(block))
141
+ break
142
+
143
+
144
+ def _padding_input_grads(args, input_grads):
145
+ """
146
+ Padding input grads to same as input args
147
+ :param args:
148
+ :param input_grads:
149
+ :return:
150
+ """
151
+ for i, arg in enumerate(args):
152
+ if isinstance(arg, (list, tuple)):
153
+ if all([not isinstance(data, Tensor) for data in arg]):
154
+ input_grads.insert(i, None)
155
+ else:
156
+ # None is placeholder
157
+ grads = [None for data in arg]
158
+ input_grads.insert(i, grads)
159
+ elif not isinstance(arg, Tensor):
160
+ input_grads.insert(i, None)
161
+ if len(args) != len(input_grads):
162
+ raise ValueError("For recompute cell, the input grads size should be same as input args size: {}, "
163
+ "but got {}".format(len(args), len(input_grads)))
164
+
165
+
166
+ def _check_validation(block):
167
+ if not isinstance(block, Cell):
168
+ raise TypeError("Recompute function now only support block which inherited from Cell!")
169
+ if context.get_context("mode") != context.PYNATIVE_MODE:
170
+ raise AssertionError("Recompute function now only support pynative mode, you can use "
171
+ "Cell.recompute() in graph mode.")
172
+ if block.construct.__code__.co_name == "staging_specialize":
173
+ logger.warning('Block\'s construct method decorated by @jit that recompute '
174
+ 'function will not come into effect.')
175
+
176
+
177
+ def recompute(block, *args, **kwargs):
178
+ r"""
179
+ This function is used to reduce memory, when run block, rather than
180
+ storing the intermediate activation computed in forward pass, we will recompute it in backward pass.
181
+
182
+ Note:
183
+ - Recompute function only support block which inherited from Cell object.
184
+ - This function interface now only support pynative mode. you can use Cell.recompute interface
185
+ in graph mode.
186
+ - When use recompute function, block object should not decorated by @jit.
187
+
188
+ Args:
189
+ block (Cell): Block to be recompute.
190
+ args(tuple): Inputs for block object to run forward pass.
191
+ kwargs(dict): Optional input for recompute function.
192
+
193
+ Returns:
194
+ Same as return type of block.
195
+
196
+ Raises:
197
+ TypeError: If `block` is not Cell object.
198
+ AssertionError: If execute mode is not PYNATIVE_MODE.
199
+
200
+ Supported Platforms:
201
+ ``Ascend`` ``GPU`` ``CPU``
202
+
203
+ Examples:
204
+ >>> import numpy as np
205
+ >>> import mindspore.nn as nn
206
+ >>> import mindspore.ops as ops
207
+ >>> from mindspore import Tensor, recompute
208
+ >>> class MyCell(nn.Cell):
209
+ ... def __init__(self):
210
+ ... super(MyCell, self).__init__(auto_prefix=False)
211
+ ... self.conv = nn.Conv2d(2, 2, 2, has_bias=False, weight_init='ones')
212
+ ... self.relu = ops.ReLU()
213
+ ...
214
+ ... def construct(self, x):
215
+ ... y = recompute(self.conv, x)
216
+ ... return self.relu(y)
217
+ >>> inputs = Tensor(np.ones([2, 2, 2, 2]).astype(np.float32) * 2)
218
+ >>> my_net = MyCell()
219
+ >>> grad = ops.grad(my_net)(inputs)
220
+ >>> print(grad)
221
+ [[[[2. 4.]
222
+ [4. 8.]]
223
+ [[2. 4.]
224
+ [4. 8.]]]
225
+ [[[2. 4.]
226
+ [4. 8.]]
227
+ [[2. 4.]
228
+ [4. 8.]]]]
229
+ """
230
+
231
+ _check_validation(block)
232
+ return _RecomputeCell(block)(*args, **kwargs)
233
+
234
+
235
+ def recompute_generator(block):
236
+ """
237
+ generator of recompute object.
238
+ :param block:
239
+ :return:
240
+ """
241
+ return _RecomputeCell(block)
242
+
243
+
244
+ recompute_registry.register(recompute_generator)
245
+
246
+ __all__ = ['recompute']
247
+ __all__.sort()
@@ -226,7 +226,7 @@ class COOTensor(COOTensor_):
226
226
 
227
227
  Common arithmetic operations include: addition (+), subtraction (-), multiplication (*),
228
228
  and division (/). For details about operations supported by `COOTensor`, see
229
- `operators <https://www.mindspore.cn/docs/en/r2.3.q1/note/static_graph_syntax_support.html#operators>`_.
229
+ `operators <https://www.mindspore.cn/docs/en/master/note/static_graph_syntax_support.html#operators>`_.
230
230
 
231
231
  .. warning::
232
232
  - This is an experimental API that is subject to change or deletion.
@@ -653,7 +653,7 @@ class CSRTensor(CSRTensor_):
653
653
 
654
654
  Common arithmetic operations include: addition (+), subtraction (-), multiplication (*),
655
655
  and division (/). For details about operations supported by `CSRTensor`, see
656
- `operators <https://www.mindspore.cn/docs/en/r2.3.q1/note/static_graph_syntax_support.html#operators>`_.
656
+ `operators <https://www.mindspore.cn/docs/en/master/note/static_graph_syntax_support.html#operators>`_.
657
657
 
658
658
  .. warning::
659
659
  - This is an experimental API that is subjected to change.
@@ -22,7 +22,7 @@ class Symbol:
22
22
  Symbol is a data structure to indicate the symbolic info of shape.
23
23
 
24
24
  For dynamic shape networks, compared with only setting the unknown dimensions ( ``None`` ) in `Tensor` , providing
25
- more symbolic shape info can help the framework better optimize the computation graph, to improve the performce of
25
+ more symbolic shape info can help the framework better optimize the computation graph, to improve the performance of
26
26
  network execution.
27
27
 
28
28
  Args:
@@ -27,12 +27,13 @@ from mindspore.common.seed import get_seed
27
27
  from mindspore import context
28
28
  from mindspore import log as logger
29
29
  from mindspore.common import dtype as mstype
30
+ from mindspore.common.hook_handle import _TensorHookHandle
30
31
 
31
32
  from mindspore.common._utils import get_slice_num
32
33
  from mindspore.common._register_for_tensor import tensor_operator_registry
33
34
  from mindspore._c_expression import Tensor as Tensor_
34
35
  from mindspore import _checkparam as validator
35
- from mindspore._checkparam import check_is_number, is_stub_tensor
36
+ from mindspore._checkparam import check_is_number, is_stub_tensor, check_hook_fn
36
37
  from mindspore._check_jit_forbidden_api import jit_forbidden_register
37
38
  from mindspore.common.symbol import Symbol
38
39
 
@@ -83,11 +84,11 @@ def tensor(input_data=None, dtype=None, shape=None, init=None, internal=False, c
83
84
  based on the `dtype` argument.
84
85
 
85
86
  Please refer to `Creating and Using Tensor
86
- <https://www.mindspore.cn/docs/en/r2.3.q1/note/static_graph_syntax_support.html#mindspore-user-defined-data-types>`_ .
87
+ <https://www.mindspore.cn/docs/en/master/note/static_graph_syntax_support.html#mindspore-user-defined-data-types>`_ .
87
88
 
88
89
  The difference between it and the Tensor class is that it adds
89
90
  `Annotation
90
- <https://www.mindspore.cn/docs/en/r2.3.q1/design/dynamic_graph_and_static_graph.html?#annotation-type>`_
91
+ <https://www.mindspore.cn/docs/en/master/design/dynamic_graph_and_static_graph.html?#annotation-type>`_
91
92
  which can prevent the generation of AnyType compared to the Tensor class.
92
93
 
93
94
  The arguments and return values are the same as the Tensor class. Also see: :class:`mindspore.Tensor`.
@@ -1226,6 +1227,54 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1226
1227
  """
1227
1228
  return Tensor_.storage_offset(self)
1228
1229
 
1230
+ def register_hook(self, hook_fn):
1231
+ """
1232
+ Registers a backward hook for tensor.
1233
+
1234
+ Note:
1235
+ - The `register_backward_hook(hook_fn)` does not work in graph mode or functions decorated with 'jit'.
1236
+ - The 'hook_fn' must be defined as the following code. `grad` is the gradient passed to the tensor,
1237
+ which may be modified by returning a new output gradient.
1238
+ - The 'hook_fn' should have the following signature:
1239
+ hook_fn(grad) -> New output gradient, but can not return None or not set return value.
1240
+
1241
+ Args:
1242
+ hook_fn (function): Python function. Tensor backward hook function.
1243
+
1244
+ Returns:
1245
+ A handle corresponding to the `hook_fn` . The handle can be used to remove the added `hook_fn` by calling
1246
+ `handle.remove()` .
1247
+
1248
+ Raises:
1249
+ TypeError: If the `hook_fn` is not a function of python.
1250
+
1251
+ Supported Platforms:
1252
+ ``Ascend`` ``GPU`` ``CPU``
1253
+
1254
+ Examples:
1255
+ >>> import mindspore as ms
1256
+ >>> from mindspore import Tensor
1257
+ >>> ms.set_context(mode=ms.PYNATIVE_MODE)
1258
+ >>> def hook_fn(grad):
1259
+ ... return grad * 2
1260
+ ...
1261
+ >>> def hook_test(x, y):
1262
+ ... z = x * y
1263
+ ... z.register_hook(hook_fn)
1264
+ ... z = z * y
1265
+ ... return z
1266
+ ...
1267
+ >>> ms_grad = ms.grad(hook_test, grad_position=(0,1))
1268
+ >>> output = ms_grad(Tensor(1, ms.float32), Tensor(2, ms.float32))
1269
+ >>> print(output)
1270
+ (Tensor(shape=[], dtype=Float32, value=8), Tensor(shape=[], dtype=Float32, value=6))
1271
+ """
1272
+ if not check_hook_fn("register_hook", hook_fn):
1273
+ return _TensorHookHandle()
1274
+ handle = _TensorHookHandle()
1275
+ handle.id = Tensor_.register_hook(self, hook_fn)
1276
+ return handle
1277
+
1229
1278
  def flush_from_cache(self):
1230
1279
  """
1231
1280
  Flush cache data to host if tensor is cache enable.
@@ -1815,9 +1864,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1815
1864
 
1816
1865
  def reverse(self, axis):
1817
1866
  """
1818
- For details, please refer to :func:`mindspore.ops.reverse`.
1867
+ For details, please refer to :func:`mindspore.ops.flip`.
1819
1868
  """
1820
- return tensor_operator_registry.get('reverse')(self, axis)
1869
+ return tensor_operator_registry.get('flip')(self, axis)
1821
1870
 
1822
1871
  def amax(self, axis=None, keepdims=False, *, initial=None, where=None):
1823
1872
  """
@@ -1839,11 +1888,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1839
1888
  """
1840
1889
  return tensor_operator_registry.get("reverse_sequence")(self, seq_lengths, seq_dim, batch_dim)
1841
1890
 
1842
- def prod(self, axis=None, keep_dims=False):
1891
+ def prod(self, axis=None, keep_dims=False, dtype=None):
1843
1892
  """
1844
1893
  For details, please refer to :func:`mindspore.ops.prod`.
1845
1894
  """
1846
- return tensor_operator_registry.get('prod')(self, axis, keep_dims)
1895
+ return tensor_operator_registry.get('prod')(self, axis, keep_dims, dtype)
1847
1896
 
1848
1897
  def select(self, condition, y):
1849
1898
  r"""
@@ -2720,7 +2769,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2720
2769
  opt_shard_group(str): Optimizer shard group which is used in auto or semi auto parallel mode
2721
2770
  to get one shard of a parameter's slice. For more information about optimizer parallel, please refer to:
2722
2771
  `Optimizer Parallel
2723
- <https://www.mindspore.cn/tutorials/experts/en/r2.3.q1/parallel/optimizer_parallel.html>`_.
2772
+ <https://www.mindspore.cn/tutorials/experts/en/master/parallel/optimizer_parallel.html>`_.
2724
2773
  Default: ``None``.
2725
2774
 
2726
2775
  Returns:
@@ -3306,14 +3355,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3306
3355
  >>> print(input_x.sum(axis=1))
3307
3356
  [10. 35.]
3308
3357
  """
3309
- if initial is not None and not isinstance(initial, (int, float, bool)):
3310
- raise TypeError(f"For Tensor.sum, initial must be int, float or bool, but got {type(initial)}.")
3311
- res = tensor_operator_registry.get("sum")(self, axis, keepdims)
3312
- if initial is not None:
3313
- res += initial
3314
- if dtype is not None:
3315
- res = res.astype(dtype)
3316
- return res
3358
+ if initial is None:
3359
+ return tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype)
3360
+ return tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype) + initial
3317
3361
 
3318
3362
  def sum_to_size(self, *size):
3319
3363
  r"""
@@ -4393,14 +4437,15 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4393
4437
  """
4394
4438
  return tensor_operator_registry.get('not_equal')(self, other)
4395
4439
 
4396
- def new_zeros(self, size, *, dtype=None):
4440
+ def new_zeros(self, size, dtype=None):
4397
4441
  r"""
4398
4442
  Return a tensor of `size` filled with zeros.
4399
4443
 
4400
- Args:
4401
- size (Union[int, tuple, list]): An int, list or tuple of integers defining the output shape.
4444
+ .. warning::
4445
+ For argument `size`, Tensor type input will be deprecated in the future version.
4402
4446
 
4403
- Keyword Args:
4447
+ Args:
4448
+ size (Union[int, tuple, list, Tensor]): An int, list or tuple of integers defining the output shape.
4404
4449
  dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned tensor has
4405
4450
  thesame dtype as `self`. Default: ``None``.
4406
4451
 
@@ -4408,7 +4453,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4408
4453
  Tensor, the shape and dtype is defined above and filled with zeros.
4409
4454
 
4410
4455
  Raises:
4411
- TypeError: If `size` is not an int, list or tuple of integers.
4456
+ TypeError: If `size` is neither an int nor an tuple/list/Tensor of int.
4412
4457
 
4413
4458
  Supported Platforms:
4414
4459
  ``Ascend`` ``GPU`` ``CPU``
@@ -4423,20 +4468,17 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4423
4468
  [[0. 0.]
4424
4469
  [0. 0.]]
4425
4470
  """
4426
- validator.check_value_type('size', size, [list, int, tuple], 'Tensor.new_zeros')
4427
- if isinstance(size, list):
4428
- size = tuple(size)
4429
- _dtype = self.dtype if dtype is None else dtype
4430
- return tensor_operator_registry.get('zeros')(size, _dtype)
4471
+ return tensor_operator_registry.get('zeros')(size, dtype)
4431
4472
 
4432
- def new_ones(self, size, *, dtype=None):
4473
+ def new_ones(self, size, dtype=None):
4433
4474
  r"""
4434
4475
  Return a tensor of `size` filled with ones.
4435
4476
 
4436
- Args:
4437
- size (Union[int, tuple, list]): An int, list or tuple of integers defining the output shape.
4477
+ .. warning::
4478
+ For argument `size`, Tensor type input will be deprecated in the future version.
4438
4479
 
4439
- Keyword Args:
4480
+ Args:
4481
+ size (Union[int, tuple, list, Tensor]): An int, list or tuple of integers defining the output shape.
4440
4482
  dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned
4441
4483
  tensor has the same dtype as `self`. Default: ``None``.
4442
4484
 
@@ -4444,7 +4486,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4444
4486
  Tensor, the shape and dtype is defined above and filled with ones.
4445
4487
 
4446
4488
  Raises:
4447
- TypeError: If `size` is not an int, list or tuple of integers.
4489
+ TypeError: If `size` is neither an int nor an tuple/list/Tensor of int.
4448
4490
 
4449
4491
  Supported Platforms:
4450
4492
  ``Ascend`` ``GPU`` ``CPU``
@@ -4459,11 +4501,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4459
4501
  [[1. 1.]
4460
4502
  [1. 1.]]
4461
4503
  """
4462
- validator.check_value_type('size', size, [list, int, tuple], 'Tensor.new_zeros')
4463
- if isinstance(size, list):
4464
- size = tuple(size)
4465
- _dtype = self.dtype if dtype is None else dtype
4466
- return tensor_operator_registry.get('ones')(size, _dtype)
4504
+ return tensor_operator_registry.get('ones')(size, dtype)
4467
4505
 
4468
4506
  def sign(self):
4469
4507
  r"""
@@ -19,14 +19,14 @@ Note that the APIs in the following list need to preset communication environmen
19
19
 
20
20
  For the Ascend devices, users need to prepare the rank table, set rank_id and device_id.
21
21
  Please see the `rank table Startup
22
- <https://www.mindspore.cn/tutorials/experts/en/r2.3.q1/parallel/rank_table.html>`_
22
+ <https://www.mindspore.cn/tutorials/experts/en/master/parallel/rank_table.html>`_
23
23
  for more details.
24
24
 
25
25
  For the GPU devices, users need to prepare the host file and mpi, please see the `mpirun Startup
26
- <https://www.mindspore.cn/tutorials/experts/en/r2.3.q1/parallel/mpirun.html>`_ .
26
+ <https://www.mindspore.cn/tutorials/experts/en/master/parallel/mpirun.html>`_ .
27
27
 
28
28
  For the CPU device, users need to write a dynamic cluster startup script, please see the `Dynamic Cluster Startup
29
- <https://www.mindspore.cn/tutorials/experts/en/r2.3.q1/parallel/dynamic_cluster.html>`_ .
29
+ <https://www.mindspore.cn/tutorials/experts/en/master/parallel/dynamic_cluster.html>`_ .
30
30
  """
31
31
 
32
32
  from mindspore.communication.management import GlobalComm, init, release, get_rank, \