mindspore 2.3.0__cp39-none-any.whl → 2.3.0rc2__cp39-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (423) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Third_Party_Open_Source_Software_Notice +0 -1512
  3. mindspore/__init__.py +1 -2
  4. mindspore/_c_dataengine.cpython-39-aarch64-linux-gnu.so +0 -0
  5. mindspore/_c_expression.cpython-39-aarch64-linux-gnu.so +0 -0
  6. mindspore/_c_mindrecord.cpython-39-aarch64-linux-gnu.so +0 -0
  7. mindspore/_checkparam.py +25 -5
  8. mindspore/_extends/graph_kernel/model/graph_parallel.py +1 -1
  9. mindspore/_extends/parse/__init__.py +2 -2
  10. mindspore/_extends/parse/compile_config.py +0 -29
  11. mindspore/_extends/parse/namespace.py +2 -2
  12. mindspore/_extends/parse/parser.py +5 -21
  13. mindspore/_extends/parse/resources.py +7 -5
  14. mindspore/_extends/parse/standard_method.py +59 -40
  15. mindspore/_mindspore_offline_debug.cpython-39-aarch64-linux-gnu.so +0 -0
  16. mindspore/amp.py +5 -26
  17. mindspore/bin/cache_admin +0 -0
  18. mindspore/bin/cache_server +0 -0
  19. mindspore/boost/adasum.py +1 -1
  20. mindspore/boost/base.py +1 -1
  21. mindspore/boost/boost_cell_wrapper.py +1 -1
  22. mindspore/boost/grad_freeze.py +2 -2
  23. mindspore/boost/less_batch_normalization.py +6 -9
  24. mindspore/common/__init__.py +1 -8
  25. mindspore/common/_register_for_tensor.py +9 -8
  26. mindspore/common/api.py +65 -275
  27. mindspore/common/dtype.py +4 -8
  28. mindspore/common/dump.py +5 -2
  29. mindspore/common/jit_config.py +1 -1
  30. mindspore/common/lazy_inline.py +2 -14
  31. mindspore/common/parameter.py +15 -14
  32. mindspore/common/recompute.py +5 -20
  33. mindspore/common/sparse_tensor.py +6 -21
  34. mindspore/common/tensor.py +52 -100
  35. mindspore/communication/__init__.py +11 -6
  36. mindspore/communication/management.py +94 -92
  37. mindspore/context.py +18 -180
  38. mindspore/dataset/engine/datasets.py +46 -69
  39. mindspore/dataset/engine/datasets_user_defined.py +53 -72
  40. mindspore/dataset/engine/datasets_vision.py +2 -2
  41. mindspore/dataset/engine/queue.py +38 -56
  42. mindspore/dataset/engine/validators.py +5 -11
  43. mindspore/dataset/vision/__init__.py +5 -5
  44. mindspore/dataset/vision/c_transforms.py +5 -5
  45. mindspore/dataset/vision/py_transforms_util.py +1 -1
  46. mindspore/dataset/vision/transforms.py +46 -591
  47. mindspore/dataset/vision/utils.py +1 -121
  48. mindspore/dataset/vision/validators.py +3 -9
  49. mindspore/hal/__init__.py +1 -7
  50. mindspore/hal/device.py +1 -1
  51. mindspore/include/api/model.h +0 -3
  52. mindspore/include/dataset/vision.h +2 -54
  53. mindspore/include/mindapi/base/types.h +0 -1
  54. mindspore/lib/libdnnl.so.2 +0 -0
  55. mindspore/lib/libmindspore.so +0 -0
  56. mindspore/lib/libmindspore_backend.so +0 -0
  57. mindspore/lib/libmindspore_common.so +0 -0
  58. mindspore/lib/libmindspore_core.so +0 -0
  59. mindspore/lib/libmindspore_glog.so.0 +0 -0
  60. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  61. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  62. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  63. mindspore/lib/libmindspore_shared_lib.so +0 -0
  64. mindspore/lib/libmpi_adapter.so +0 -0
  65. mindspore/lib/libmpi_collective.so +0 -0
  66. mindspore/lib/libnnacl.so +0 -0
  67. mindspore/lib/libopencv_core.so.4.5 +0 -0
  68. mindspore/lib/libps_cache.so +0 -0
  69. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +0 -35
  70. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  71. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  72. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  73. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +0 -72
  74. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  75. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/{aclnn_all_finite.h → aclnn_add_custom.h} +11 -9
  76. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +1 -1
  77. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +1 -1
  78. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
  79. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +12 -184
  80. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +15 -7
  81. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +15 -7
  82. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
  83. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
  84. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +31 -77
  85. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +31 -77
  86. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
  87. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
  88. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +5 -4
  89. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
  90. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  91. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  92. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  93. mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
  94. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  95. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/DeviceBin +0 -0
  96. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
  97. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
  98. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +286 -275
  99. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
  100. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
  101. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
  102. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
  103. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -1
  104. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -1
  105. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -3
  106. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -5
  107. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_tiling.h +45 -1
  108. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -1
  109. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +4 -8
  110. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_tiling.h +4 -11
  111. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/kernel/flash_attention_score_mix_hwsync.h +0 -18
  112. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -6
  113. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +75 -1
  114. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/kernel/matmul.h +5 -5
  115. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/matmul_impl.h +3 -18
  116. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_common_tiling.h +5 -5
  117. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_info.h +2 -2
  118. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/tiling_data.h +3 -36
  119. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/kernel/matmul_stridedslice_fusion.h +2 -2
  120. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/matmul_stridedslice_fusion_impl.h +4 -22
  121. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +2 -16
  122. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/kernel/paged_attention_mix_hwsync.h +3 -1
  123. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +4 -5
  124. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_tiling.h +4 -9
  125. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +2 -5
  126. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -1
  127. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +4 -10
  128. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +12 -0
  129. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -1
  130. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -1
  131. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +1 -1
  132. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +2 -10
  133. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +1 -5
  134. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -1
  135. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -17
  136. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +7 -2
  137. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
  138. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
  139. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layernorm_impl.so +0 -0
  140. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_impl.so +0 -0
  141. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_impl.so +0 -0
  142. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_impl.so +0 -0
  143. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_impl.so +0 -0
  144. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_impl.so +0 -0
  145. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_stridedslice_fusion_impl.so +0 -0
  146. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
  147. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libnot_equal_impl.so +0 -0
  148. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_impl.so +0 -0
  149. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_impl.so +0 -0
  150. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
  151. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
  152. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_full_mix.o +0 -0
  153. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
  154. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
  155. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
  156. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_full_mix.o +0 -0
  157. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
  158. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bnsd_full_mix.o +0 -0
  159. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bsh_full_mix.o +0 -0
  160. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bnsd_full_mix.o +0 -0
  161. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bsh_full_mix.o +0 -0
  162. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblcal.so +0 -0
  163. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
  164. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  165. mindspore/mindrecord/filewriter.py +2 -2
  166. mindspore/mint/__init__.py +40 -720
  167. mindspore/mint/nn/__init__.py +7 -89
  168. mindspore/mint/nn/functional.py +16 -165
  169. mindspore/mint/optim/adamw.py +16 -15
  170. mindspore/nn/__init__.py +2 -0
  171. mindspore/nn/cell.py +98 -97
  172. mindspore/nn/extend/basic.py +2 -2
  173. mindspore/nn/extend/embedding.py +1 -1
  174. mindspore/nn/extend/layer/normalization.py +5 -7
  175. mindspore/nn/generator.py +297 -0
  176. mindspore/nn/layer/activation.py +3 -4
  177. mindspore/nn/layer/basic.py +16 -79
  178. mindspore/nn/layer/conv.py +8 -17
  179. mindspore/nn/layer/embedding.py +4 -1
  180. mindspore/nn/layer/math.py +1 -1
  181. mindspore/nn/layer/normalization.py +1 -1
  182. mindspore/nn/layer/pooling.py +0 -5
  183. mindspore/nn/layer/rnn_cells.py +2 -2
  184. mindspore/nn/loss/loss.py +19 -19
  185. mindspore/nn/optim/adasum.py +1 -1
  186. mindspore/nn/optim/sgd.py +2 -3
  187. mindspore/nn/probability/distribution/exponential.py +1 -1
  188. mindspore/nn/probability/distribution/geometric.py +1 -1
  189. mindspore/nn/probability/distribution/logistic.py +1 -1
  190. mindspore/nn/wrap/cell_wrapper.py +1 -25
  191. mindspore/nn/wrap/loss_scale.py +1 -24
  192. mindspore/numpy/array_ops.py +1 -5
  193. mindspore/numpy/dtypes.py +3 -3
  194. mindspore/numpy/math_ops.py +8 -8
  195. mindspore/ops/__init__.py +1 -1
  196. mindspore/ops/_grad_experimental/grad_comm_ops.py +16 -75
  197. mindspore/ops/_vmap/vmap_array_ops.py +0 -27
  198. mindspore/ops/_vmap/vmap_math_ops.py +1 -29
  199. mindspore/ops/_vmap/vmap_nn_ops.py +18 -19
  200. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +8 -34
  201. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +9 -2
  202. mindspore/ops/auto_generate/gen_arg_handler.py +0 -26
  203. mindspore/ops/auto_generate/gen_extend_func.py +27 -603
  204. mindspore/ops/auto_generate/gen_ops_def.py +203 -993
  205. mindspore/ops/auto_generate/gen_ops_prim.py +402 -1946
  206. mindspore/ops/auto_generate/pyboost_inner_prim.py +20 -90
  207. mindspore/ops/composite/base.py +6 -3
  208. mindspore/ops/composite/math_ops.py +1 -1
  209. mindspore/ops/composite/multitype_ops/_compile_utils.py +17 -24
  210. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
  211. mindspore/ops/extend/__init__.py +3 -2
  212. mindspore/ops/extend/array_func.py +51 -10
  213. mindspore/ops/extend/nn_func.py +78 -2
  214. mindspore/ops/function/__init__.py +13 -8
  215. mindspore/ops/function/array_func.py +179 -455
  216. mindspore/ops/function/clip_func.py +1 -1
  217. mindspore/ops/function/grad/grad_func.py +3 -3
  218. mindspore/ops/function/math_func.py +103 -117
  219. mindspore/ops/function/nn_func.py +163 -275
  220. mindspore/ops/function/other_func.py +2 -2
  221. mindspore/ops/function/random_func.py +69 -202
  222. mindspore/ops/function/sparse_func.py +4 -4
  223. mindspore/ops/functional.py +327 -332
  224. mindspore/ops/operations/__init__.py +3 -13
  225. mindspore/ops/operations/_grad_ops.py +27 -3
  226. mindspore/ops/operations/_inner_ops.py +356 -53
  227. mindspore/ops/operations/_rl_inner_ops.py +2 -2
  228. mindspore/ops/operations/_tensor_array.py +8 -8
  229. mindspore/ops/operations/array_ops.py +65 -82
  230. mindspore/ops/operations/comm_ops.py +93 -784
  231. mindspore/ops/operations/custom_ops.py +28 -51
  232. mindspore/ops/operations/debug_ops.py +4 -4
  233. mindspore/ops/operations/inner_ops.py +2 -2
  234. mindspore/ops/operations/manually_defined/ops_def.py +4 -304
  235. mindspore/ops/operations/math_ops.py +50 -3
  236. mindspore/ops/operations/nn_ops.py +247 -14
  237. mindspore/ops/operations/other_ops.py +3 -3
  238. mindspore/ops/operations/random_ops.py +1 -1
  239. mindspore/ops/operations/sparse_ops.py +1 -1
  240. mindspore/ops/primitive.py +8 -9
  241. mindspore/ops/silent_check.py +5 -5
  242. mindspore/ops_generate/arg_dtype_cast.py +9 -2
  243. mindspore/ops_generate/arg_handler.py +0 -26
  244. mindspore/ops_generate/gen_aclnn_implement.py +4 -1
  245. mindspore/ops_generate/gen_ops.py +4 -26
  246. mindspore/ops_generate/gen_pyboost_func.py +12 -41
  247. mindspore/ops_generate/gen_utils.py +0 -21
  248. mindspore/ops_generate/pyboost_utils.py +2 -7
  249. mindspore/ops_generate/template.py +0 -1
  250. mindspore/parallel/_auto_parallel_context.py +1 -21
  251. mindspore/parallel/_tensor.py +5 -0
  252. mindspore/parallel/_transformer/transformer.py +1 -1
  253. mindspore/parallel/_utils.py +1 -15
  254. mindspore/parallel/algo_parameter_config.py +3 -1
  255. mindspore/parallel/checkpoint_transform.py +9 -12
  256. mindspore/parallel/cluster/process_entity/_api.py +29 -28
  257. mindspore/parallel/cluster/process_entity/_utils.py +3 -13
  258. mindspore/parallel/cluster/run.py +16 -13
  259. mindspore/parallel/parameter_broadcast.py +2 -2
  260. mindspore/parallel/shard.py +17 -31
  261. mindspore/profiler/__init__.py +2 -3
  262. mindspore/profiler/common/util.py +2 -107
  263. mindspore/profiler/envprofiling.py +1 -1
  264. mindspore/profiler/parser/ascend_analysis/constant.py +21 -8
  265. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -82
  266. mindspore/profiler/parser/ascend_analysis/function_event.py +28 -43
  267. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +27 -49
  268. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +10 -15
  269. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +20 -25
  270. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +5 -5
  271. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +1 -10
  272. mindspore/profiler/parser/ascend_hccl_generator.py +1 -4
  273. mindspore/profiler/parser/ascend_msprof_exporter.py +22 -43
  274. mindspore/profiler/parser/ascend_timeline_generator.py +5 -7
  275. mindspore/profiler/parser/minddata_parser.py +3 -72
  276. mindspore/profiler/profiling.py +59 -176
  277. mindspore/rewrite/api/node.py +1 -1
  278. mindspore/rewrite/common/namespace.py +5 -5
  279. mindspore/rewrite/parsers/assign_parser.py +0 -2
  280. mindspore/rewrite/parsers/class_def_parser.py +4 -8
  281. mindspore/run_check/_check_version.py +1 -1
  282. mindspore/scipy/fft.py +3 -1
  283. mindspore/scipy/linalg.py +3 -2
  284. mindspore/scipy/ops.py +3 -5
  285. mindspore/scipy/optimize/__init__.py +2 -2
  286. mindspore/train/__init__.py +4 -4
  287. mindspore/train/anf_ir_pb2.py +2 -8
  288. mindspore/train/callback/__init__.py +2 -5
  289. mindspore/train/callback/_backup_and_restore.py +2 -2
  290. mindspore/train/callback/_checkpoint.py +16 -104
  291. mindspore/train/callback/_landscape.py +1 -1
  292. mindspore/train/callback/_time_monitor.py +1 -1
  293. mindspore/train/data_sink.py +4 -5
  294. mindspore/train/dataset_helper.py +20 -45
  295. mindspore/train/model.py +38 -266
  296. mindspore/train/serialization.py +105 -256
  297. mindspore/train/summary/_summary_adapter.py +1 -1
  298. mindspore/version.py +1 -1
  299. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +2 -2
  300. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +303 -420
  301. mindspore/_extends/pijit/__init__.py +0 -23
  302. mindspore/_extends/pijit/pijit_func_white_list.py +0 -343
  303. mindspore/common/file_system.py +0 -48
  304. mindspore/common/generator.py +0 -260
  305. mindspore/common/no_inline.py +0 -54
  306. mindspore/common/np_dtype.py +0 -25
  307. mindspore/communication/comm_func.py +0 -1140
  308. mindspore/hal/memory.py +0 -326
  309. mindspore/lib/libavcodec.so.59 +0 -0
  310. mindspore/lib/libavdevice.so.59 +0 -0
  311. mindspore/lib/libavfilter.so.8 +0 -0
  312. mindspore/lib/libavformat.so.59 +0 -0
  313. mindspore/lib/libavutil.so.57 +0 -0
  314. mindspore/lib/libmindspore_np_dtype.so +0 -0
  315. mindspore/lib/libswresample.so.4 +0 -0
  316. mindspore/lib/libswscale.so.6 +0 -0
  317. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.cpp +0 -326
  318. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.py +0 -180
  319. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.json +0 -58
  320. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.o +0 -0
  321. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.json +0 -58
  322. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.o +0 -0
  323. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.json +0 -58
  324. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.o +0 -0
  325. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/all_finite.json +0 -109
  326. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/binary_info_config.json +0 -38
  327. mindspore/lib/plugin/ascend/custom_compiler/OWNERS +0 -12
  328. mindspore/lib/plugin/ascend/custom_compiler/setup.py +0 -255
  329. mindspore/lib/plugin/ascend/custom_compiler/start.sh +0 -26
  330. mindspore/lib/plugin/ascend/custom_compiler/template.json +0 -40
  331. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
  332. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -69
  333. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/base_type.h +0 -133
  334. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -32
  335. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -35
  336. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/tiling_info.h +0 -60
  337. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -37
  338. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
  339. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
  340. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_op.h +0 -34
  341. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_backoff_base.h +0 -62
  342. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -33
  343. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_ops.h +0 -88
  344. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_pa_op.h +0 -45
  345. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
  346. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -95
  347. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -84
  348. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -61
  349. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -224
  350. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
  351. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
  352. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
  353. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
  354. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
  355. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
  356. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
  357. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
  358. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
  359. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
  360. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
  361. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
  362. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
  363. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
  364. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
  365. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
  366. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
  367. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
  368. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
  369. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
  370. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
  371. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
  372. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
  373. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
  374. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
  375. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
  376. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
  377. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
  378. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
  379. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
  380. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
  381. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
  382. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
  383. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
  384. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
  385. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
  386. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_impl.h +0 -45
  387. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_tiling.h +0 -187
  388. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul.h +0 -245
  389. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_interface.h +0 -24
  390. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_utils.h +0 -111
  391. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/tiling_data.h +0 -54
  392. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
  393. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
  394. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
  395. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -364
  396. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -69
  397. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
  398. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -114
  399. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -98
  400. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.json +0 -19
  401. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.o +0 -0
  402. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aic_0.o +0 -0
  403. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aiv_0.o +0 -0
  404. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.json +0 -19
  405. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.o +0 -0
  406. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aic_0.o +0 -0
  407. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aiv_0.o +0 -0
  408. mindspore/mint/linalg/__init__.py +0 -22
  409. mindspore/nn/layer/embedding_service.py +0 -531
  410. mindspore/nn/layer/embedding_service_layer.py +0 -393
  411. mindspore/ops/function/reshard_func.py +0 -102
  412. mindspore/ops/operations/_infer_ops.py +0 -19
  413. mindspore/ops/operations/reshard_ops.py +0 -53
  414. mindspore/profiler/common/process_pool.py +0 -41
  415. mindspore/profiler/common/singleton.py +0 -28
  416. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  417. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  418. mindspore/train/callback/_cluster_monitor.py +0 -201
  419. mindspore/train/callback/_flops_collector.py +0 -238
  420. mindspore/train/callback/_mindio_ttp.py +0 -443
  421. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
  422. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
  423. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
@@ -24,7 +24,6 @@ import numpy as np
24
24
  import mindspore as ms
25
25
  import mindspore.common.dtype as mstype
26
26
  from mindspore.ops import operations as P
27
- from mindspore.ops import functional as F
28
27
  from mindspore.ops.primitive import constexpr
29
28
  from mindspore.ops.primitive import _primexpr
30
29
  import mindspore.ops as ops
@@ -32,10 +31,9 @@ from mindspore.ops.operations._inner_ops import DynamicBroadcastTo
32
31
  from mindspore.ops.operations._sequence_ops import TupleToTensor
33
32
  from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
34
33
  from mindspore.ops.operations._sequence_ops import TensorToList
35
- from mindspore.ops.auto_generate import OnesLikeExt, ZerosLikeExt, FillScalar, FillTensor, Arange, Chunk, UniqueDim,\
36
- Unique2, SortExt, NonZero, NonZeroExt
34
+ from mindspore.ops.auto_generate import OnesLikeExt, ZerosLikeExt, FillScalar, FillTensor, Arange, Chunk
37
35
  from mindspore.ops.auto_generate.gen_ops_prim import SplitTensor
38
- from mindspore.ops.auto_generate.gen_ops_prim import SplitWithSize, RepeatInterleaveInt, RepeatInterleaveTensor
36
+ from mindspore.ops.auto_generate.gen_ops_prim import SplitWithSize, RepeatInterleave
39
37
 
40
38
  from mindspore.ops.operations.array_ops import (
41
39
  UniqueConsecutive,
@@ -53,8 +51,6 @@ from mindspore.ops.operations.array_ops import (
53
51
  Expand,
54
52
  Lstsq,
55
53
  Mvlgamma,
56
- Tril,
57
- Argmax,
58
54
  ArgMaxWithValue,
59
55
  ArgMinWithValue
60
56
  )
@@ -66,13 +62,12 @@ from mindspore._c_expression import Tensor as Tensor_
66
62
  from mindspore.ops._utils.utils import ms_arrange
67
63
 
68
64
  from mindspore.ops.auto_generate import cat, range, scatter_nd, deepcopy, masked_fill, diagonal, expand_dims, \
69
- flip, transpose, triu, unsorted_segment_sum, diag, gather, gather_d, gather_nd, reshape, \
65
+ nonzero, flip, transpose, tril, triu, unsorted_segment_sum, diag, gather, gather_d, gather_nd, reshape, \
70
66
  broadcast_to, strided_slice, ones, zeros, max_, min_, select
71
- from mindspore.ops.auto_generate.gen_ops_prim import scatter_add_ext_op, slice_ext_op
67
+ from mindspore.ops.auto_generate.gen_ops_prim import scatter_add_ext_op
72
68
  from mindspore.ops.operations.manually_defined import tile, rank, scalar_cast
73
69
 
74
70
  arg_max_with_value_ = ArgMaxWithValue()
75
- arg_min_with_value_ = ArgMinWithValue()
76
71
  batch_to_space_nd_v2_ = P.BatchToSpaceNDV2()
77
72
  cast_ = P.Cast()
78
73
  diag_ = P.Diag()
@@ -133,15 +128,9 @@ ones_like_ext_ = OnesLikeExt()
133
128
  zeros_like_ext_ = ZerosLikeExt()
134
129
  fill_scalar_ = FillScalar()
135
130
  fill_tensor_ = FillTensor()
136
- sort_ext_ = SortExt()
137
131
  arange_ = Arange()
138
132
  chunk_ = Chunk()
139
- repeat_interleave_int_ = RepeatInterleaveInt()
140
- repeat_interleave_tensor_ = RepeatInterleaveTensor()
141
- unique_dim_ = UniqueDim()
142
- unique2_ = Unique2()
143
- non_zero_ = NonZero()
144
- non_zero_ext_ = NonZeroExt()
133
+ repeat_interleave_ = RepeatInterleave()
145
134
 
146
135
 
147
136
  @_primexpr
@@ -286,10 +275,13 @@ def arange_ext(start=0, end=None, step=1, *, dtype=None):
286
275
  `step` up to but not including `end`.
287
276
 
288
277
  Args:
289
- start (Union[float, int], optional): The start of the interval. Default: ``0`` .
290
- end (Union[float, int], optional): The end of the interval, exclusive.
278
+ start (Union[float, int, Tensor], optional): The start of the interval.
279
+ If Tensor, the shape must be :math:`()` . Default: ``0`` .
280
+ end (Union[float, int, Tensor], optional): The end of the interval, exclusive.
281
+ If Tensor, the shape must be :math:`()`.
291
282
  Default: ``None`` . If ``None`` , it defaults to the value of `start`, and 0 is used as the starting value.
292
- step (Union[float, int], optional): The step size with which the array element increments. Default: ``1`` .
283
+ step (Union[float, int, Tensor], optional): Number that increments `start`.
284
+ If Tensor, the shape must be :math:`()`. Default: ``1`` .
293
285
 
294
286
  Keyword Args:
295
287
  dtype (mindspore.dtype, optional): The required data type of returned Tensor. Default: ``None`` .
@@ -300,10 +292,11 @@ def arange_ext(start=0, end=None, step=1, *, dtype=None):
300
292
  If `start`, `end`, and `step` contain at least one floating-point number, the dtype of output is float32.
301
293
 
302
294
  Returns:
303
- A 1-D Tensor, cast to `dtype` if provided, may potentially lose precision due to casting.
295
+ A 1-D Tensor, with the same type as the inputs.
304
296
 
305
297
  Raises:
306
- TypeError: If `start`, `end` or `step` are not of type int or float.
298
+ TypeError: If `start`, `end` or `step` is not an int or a float or a TensorScalar(Special Tensor with shape ())
299
+ in valid dtypes.
307
300
  ValueError: If `step` = 0.
308
301
  ValueError: If `start` >= `end` when `step` > 0.
309
302
  ValueError: If `start` <= `end` when `step` < 0.
@@ -313,31 +306,35 @@ def arange_ext(start=0, end=None, step=1, *, dtype=None):
313
306
 
314
307
  Examples:
315
308
  >>> import mindspore as ms
316
- >>> from mindspore import Tensor, ops
317
- >>> output = ops.arange_ext(1, 6)
309
+ >>> from mindspore import Tensor, mint
310
+ >>> output = mint.arange(1, 6)
318
311
  >>> print(output)
319
312
  [1 2 3 4 5]
320
313
  >>> print(output.dtype)
321
314
  Int64
322
- >>> output = ops.arange_ext(0, 3, 1.2)
315
+ >>> output = mint.arange(0, 3, 1.2)
323
316
  >>> print(output)
324
317
  [0. 1.2 2.4]
325
318
  >>> print(output.dtype)
326
319
  Float32
327
- >>> output = ops.arange_ext(7, 1, -2)
320
+ >>> output = mint.arange(7, 1, -2)
328
321
  >>> print(output)
329
322
  [7 5 3]
330
323
  >>> print(output.dtype)
331
324
  Int64
332
- >>> output = ops.arange_ext(12, 2, -1, dtype=ms.bfloat16))
325
+ >>> output = mint.arange(ms.Tensor(12.0, dtype=ms.float64), 2, ms.Tensor(-1.0, dtype=ms.float32))
333
326
  >>> print(output)
334
327
  [12. 11. 10. 9. 8. 7. 6. 5. 4. 3.]
335
328
  >>> print(output.dtype)
336
- BFloat16
329
+ Float32
337
330
  """
338
331
  if end is None:
339
332
  start, end = 0, start
340
- return arange_(start, end, step, dtype)
333
+
334
+ out = arange_(start, end, step)
335
+ if dtype is not None:
336
+ out = cast_(out, dtype)
337
+ return out
341
338
 
342
339
 
343
340
  def concat(tensors, axis=0):
@@ -1078,9 +1075,7 @@ def zeros_like(input, *, dtype=None):
1078
1075
 
1079
1076
  def ones_like_ext(input, *, dtype=None):
1080
1077
  """
1081
- Creates a tensor filled with 1, with the same shape as input, and its data type is determined by the given dtype.
1082
-
1083
- If `dtype = None`, the tensor will have the same dtype as input `input`.
1078
+ Returns a Tensor with a value of 1 and its shape is the same as the input.
1084
1079
 
1085
1080
  Args:
1086
1081
  input (Tensor): Tensor of any dimension.
@@ -1102,7 +1097,7 @@ def ones_like_ext(input, *, dtype=None):
1102
1097
  >>> import numpy as np
1103
1098
  >>> from mindspore import Tensor, ops
1104
1099
  >>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
1105
- >>> output = ops.function.array_func.ones_like_ext(x)
1100
+ >>> output = ops.mint.ones_like(x)
1106
1101
  >>> print(output)
1107
1102
  [[1 1]
1108
1103
  [1 1]]
@@ -1112,7 +1107,7 @@ def ones_like_ext(input, *, dtype=None):
1112
1107
 
1113
1108
  def zeros_like_ext(input, *, dtype=None):
1114
1109
  r"""
1115
- Creates a tensor filled with 0, with the same size as input. Its data type is determined by the given dtype.
1110
+ Creates a tensor filled with 0, with the same size as input, and the given dtype.
1116
1111
 
1117
1112
  If `dtype = None`, the tensor will have the same dtype as input `input`.
1118
1113
 
@@ -1137,7 +1132,7 @@ def zeros_like_ext(input, *, dtype=None):
1137
1132
  >>> import numpy as np
1138
1133
  >>> from mindspore import Tensor, ops
1139
1134
  >>> x = Tensor(np.arange(4).reshape(2, 2))
1140
- >>> output = ops.function.array_func.zeros_like_ext(x, dtype=mindspore.float32)
1135
+ >>> output = ops.mint.zeros_like(x, dtype=mindspore.float32)
1141
1136
  >>> print(output)
1142
1137
  [[0. 0.]
1143
1138
  [0. 0.]]
@@ -1204,77 +1199,6 @@ def unique(input):
1204
1199
  return y, idx
1205
1200
 
1206
1201
 
1207
- def unique_ext(input, sorted=True, return_inverse=False, return_counts=False, dim=None):
1208
- """
1209
- Returns the unique elements of input tensor.
1210
-
1211
- when `return_inverse=True`, also return a tensor containing the index of each value of input
1212
- tensor corresponding to the output unique tensor.
1213
- when `return_counts=True`, also return a tensor containing the number of occurrences for each
1214
- unique value or tensor
1215
-
1216
- Args:
1217
- input (Tensor): The input tensor.
1218
- sorted(bool): Whether to sort the unique elements in ascending order before returning as output.
1219
- Default: ``True`` .
1220
- return_inverse(bool): Whether to also return the indices for where elements in the original input ended up in
1221
- the returned unique list. Default: ``False`` .
1222
- return_counts(bool): Whether to also return the counts for each unique element. Default: ``False`` .
1223
- dim(int): the dimension to operate upon. If ``None``, the unique of the flattened input is returned.
1224
- Otherwise, each of the tensors indexed by the given dimension is treated as one of the elements to apply the
1225
- unique operation upon. Default: ``None`` .
1226
-
1227
-
1228
- Returns:
1229
- A tensor or a tuple of tensors containing some of tensor objects (`output`, `inverse_indices`, `counts`).
1230
-
1231
- - output(Tensor) - The output tensor including the unique elements of input tensor, it has same dtype as input.
1232
- - inverse_indices(Tensor) - Return when ``return_inverse`` is True. It represents the indices for where
1233
- elements in the original input map to in the output. When ``dim`` is ``None``, it has same shape as input,
1234
- otherwise, the shape is input.shape[dim].
1235
- - counts(Tensor) - Return when ``return_counts`` is True. It represents the number of occurrences for each
1236
- unique value or tensor. When ``dim`` is ``None``, it has same shape as output, otherwise, the shape is
1237
- output.shape(dim).
1238
-
1239
-
1240
- Raises:
1241
- TypeError: If `input` is not a Tensor.
1242
-
1243
- Supported Platforms:
1244
- ``Ascend``
1245
-
1246
- Examples:
1247
- >>> import mindspore
1248
- >>> import numpy as np
1249
- >>> from mindspore import Tensor, nn
1250
- >>> from mindspore import ops
1251
- >>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
1252
- >>> output = ops.unique_ext(x, return_inverse=True)
1253
- >>> print(output)
1254
- (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int64, value= [0, 1, 2, 1]))
1255
- >>> y = output[0]
1256
- >>> print(y)
1257
- [1 2 5]
1258
- >>> idx = output[1]
1259
- >>> print(idx)
1260
- [0 1 2 1]
1261
- """
1262
- if not F.isconstant(return_inverse) or not F.isconstant(return_counts):
1263
- raise ValueError(f"For 'unique_ext', 'return_inverse' and 'return_counts' cannot be mutable")
1264
- if dim is None:
1265
- y, inverse, counts = unique2_(input, sorted, return_inverse, return_counts)
1266
- else:
1267
- validator.check_value_type("return_counts", return_counts, [bool], "unique_ext")
1268
- y, inverse, counts = unique_dim_(input, sorted, return_inverse, dim)
1269
- if return_inverse and return_counts:
1270
- return y, inverse, counts
1271
- if return_inverse:
1272
- return y, inverse
1273
- if return_counts:
1274
- return y, counts
1275
- return y
1276
-
1277
-
1278
1202
  def unique_with_pad(x, pad_num):
1279
1203
  """
1280
1204
  Returns unique elements and relative indexes in 1-D tensor, filled with padding num.
@@ -1380,7 +1304,7 @@ def unique_consecutive(input, return_idx=False, return_counts=False, axis=None):
1380
1304
  return output
1381
1305
 
1382
1306
 
1383
- def searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=None, sorter=None):
1307
+ def searchsorted(sorted_sequence, values, *, out_int32=False, right=False):
1384
1308
  """
1385
1309
  Return the position indices such that after inserting the values into the `sorted_sequence`, the order of innermost
1386
1310
  dimension of the `sorted_sequence` remains unchanged.
@@ -1395,12 +1319,6 @@ def searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=
1395
1319
  if ``False`` , the output datatype will be int64. Default: ``False`` .
1396
1320
  right (bool, optional): Search Strategy. If ``True`` , return the last suitable index found;
1397
1321
  if ``False`` , return the first such index. Default: ``False`` .
1398
- side (str, optional): the same as right but preferred. ``"left"`` corresponds to ``False`` for `right`
1399
- and ``"right"`` corresponds to ``True`` for `right`. An error will be reported if this parameter is
1400
- set to ``"left"`` while `right` is ``True``. Default: ``None`` .
1401
- sorter(Tensor, optional): if provided, a tensor matching the shape of the unsorted sorted_sequence
1402
- containing a sequence of indices that sort it in the ascending order on the innermost
1403
- dimension and type must be int64. Default: ``None`` .
1404
1322
 
1405
1323
  Returns:
1406
1324
  Tensor containing the indices from the innermost dimension of `sorted_sequence` such that,
@@ -1411,8 +1329,6 @@ def searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=
1411
1329
  Raises:
1412
1330
  ValueError: If the dimension of `sorted_sequence` isn't 1 and all dimensions except the last dimension of
1413
1331
  `sorted_sequence` and `values` are different.
1414
- ValueError: If `sorted_sequence` value is a scalar.
1415
- ValueError: If `values` is a scalar when `sorted_sequence` dimension is not 1.
1416
1332
 
1417
1333
  Supported Platforms:
1418
1334
  ``Ascend`` ``GPU`` ``CPU``
@@ -1429,16 +1345,10 @@ def searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=
1429
1345
  [1 2 4]]
1430
1346
  """
1431
1347
 
1432
- validator.check_value_type("out_int32", out_int32, [bool], "search_sorted")
1433
- validator.check_value_type("right", right, [bool], "search_sorted")
1434
- dtype = mstype.int32 if bool(out_int32) else mstype.int64
1435
- if (side == "left" and right is True):
1436
- raise ValueError(f"For 'searchsorted', side and right can't be set to opposites,"
1437
- f"got side of left while right was True.")
1438
- if side == "right":
1439
- right = True
1348
+ _check_attr_dtype("out_int32", out_int32, [bool], "search_sorted")
1349
+ dtype = mstype.int64 if not out_int32 else mstype.int32
1440
1350
  search_sorted_ = SearchSorted(dtype, right)
1441
- return search_sorted_(sorted_sequence, values, sorter)
1351
+ return search_sorted_(sorted_sequence, values)
1442
1352
 
1443
1353
 
1444
1354
  def ger(input, vec2):
@@ -2868,59 +2778,6 @@ def sort(input_x, axis=-1, descending=False):
2868
2778
  return _sort(input_x)
2869
2779
 
2870
2780
 
2871
- def sort_ext(input, *, dim=-1, descending=False, stable=False):
2872
- r"""
2873
- Sorts the elements of the input tensor along the given dimension in the specified order.
2874
-
2875
- .. warning::
2876
- Currently, the data types of float16, uint8, int8, int16, int32, int64 are well supported.
2877
- If use float32, it may cause loss of accuracy.
2878
-
2879
- Args:
2880
- input(Tensor): The input tensor to sort.
2881
- The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
2882
-
2883
- Keyword Args:
2884
- dim (int, optional): The dimension to sort along. Default: ``-1``, means the last dimension.
2885
- descending (bool, optional): Controls the sort order. If `descending` is True, the elements
2886
- are sorted in descending order, or else sorted in ascending order. Default: ``False`` .
2887
- stable (bool, optional): Controls the sort order. If stable is True then the sorting routine
2888
- becomes stable, preserving the order of equivalent elements. Default: ``False`` .
2889
-
2890
- Returns:
2891
- - y1, a tensor whose values are the sorted values, with the same shape and data type as input.
2892
- - y2, a tensor that consists of the indices of the elements in the original input tensor.
2893
- Data type is int64.
2894
-
2895
- Raises:
2896
- TypeError: If `dim` is not an int.
2897
- TypeError: If `descending` is not a bool.
2898
- TypeError: If `input` not in float16, float32, uint8, int8, int16, int32, int64, bfloat16
2899
- TypeError: If `stable` is not a bool.
2900
- ValueError: If `dim` is not in range of [-len(input_x.shape), len(input_x.shape)).
2901
-
2902
- Supported Platforms:
2903
- ``Ascend``
2904
-
2905
- Examples:
2906
- >>> import mindspore
2907
- >>> import numpy as np
2908
- >>> from mindspore import Tensor, ops
2909
- >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
2910
- >>> output = ops.function.array_func.sort_ext(x)
2911
- >>> # The output below is based on the Ascend platform.
2912
- >>> print(output)
2913
- (Tensor(shape=[3, 3], dtype=Float16, value=
2914
- [[ 1.0000e+00, 2.0000e+00, 8.0000e+00],
2915
- [ 3.0000e+00, 5.0000e+00, 9.0000e+00],
2916
- [ 4.0000e+00, 6.0000e+00, 7.0000e+00]]), Tensor(shape=[3, 3], dtype=Int64, value=
2917
- [[2, 1, 0],
2918
- [2, 0, 1],
2919
- [0, 1, 2]]))
2920
- """
2921
- return sort_ext_(input, dim, descending, stable)
2922
-
2923
-
2924
2781
  def argsort(input, axis=-1, descending=False):
2925
2782
  r"""
2926
2783
  Sorts the input tensor along the given dimension in specified order and return the sorted indices.
@@ -3386,42 +3243,29 @@ def scatter(input, axis, index, src):
3386
3243
 
3387
3244
  def scatter_add_ext(input, dim, index, src):
3388
3245
  """
3389
- Add all elements in `src` to the index specified by `index` to `input` along dimension specified by `dim`.
3390
- It takes three inputs `input`, `src` and `index` of the same rank r >= 1.
3391
-
3392
- For a 3-D tensor, the operation updates input as follows:
3393
-
3394
- .. code-block::
3395
-
3396
- input[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
3397
-
3398
- input[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
3399
-
3400
- input[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
3246
+ Update the value in `src` to `input` according to the specified index.
3401
3247
 
3402
3248
  Args:
3403
- input (Tensor): The target tensor. The rank must be at least 1.
3404
- dim (int): Which dim to scatter. Accepted range is [-r, r) where r = rank(`input`). Default: ``0``.
3405
- index (Tensor): The index of `input` to do scatter operation whose data type must be mindspore.int32 or
3406
- mindspore.int64. Same rank as `input`. Except for the dimension specified by `dim`,
3407
- the size of each dimension of `index` must be less than or equal to the size of
3408
- the corresponding dimension of `input`.
3409
- src (Tensor): The tensor doing the scatter operation with `input`, has the same type as `input` and
3410
- the size of each dimension must be greater than or equal to that of `index`.
3249
+ input (Tensor): The target tensor. The rank of `input` must be at least 1.
3250
+ dim (int): Which axis to scatter. Accepted range is [-r, r) where r = rank(input).
3251
+ index (Tensor): The index to do update operation whose data type must be mindspore.int32 or
3252
+ mindspore.int64. Same rank as `input` . And accepted range is [-s, s) where s is the size along axis.
3253
+ src (Tensor): The tensor doing the update operation with `input` , has the same type as `input` ,
3254
+ and the shape of `src` should be equal to the shape of `index` .
3411
3255
 
3412
3256
  Returns:
3413
- Tensor, has the same shape and type as `input`.
3257
+ Tensor, has the same shape and type as `input` .
3414
3258
 
3415
3259
  Raises:
3416
3260
  TypeError: If `index` is neither int32 nor int64.
3417
- ValueError: If anyone of the rank among `input`, `index` and `src` less than 1.
3418
- ValueError: If the rank of `input`, `index` and `src` is not the same.
3419
- ValueError: If, outside dimension `dim`, the size of any dimension of `index` is greater than the size of
3420
- the corresponding dimension of `input` .
3421
- ValueError: If the size of any dimension of `src` is less than that of `index`.
3261
+ ValueError: If anyone of the rank among `input` , `index` and `src` less than 1.
3262
+ ValueError: If the shape of `src` is not equal to the shape of `index` .
3263
+ ValueError: If the rank of `src` is not equal to the rank of `input` .
3264
+ RuntimeError: If the data type of `input` and `src` conversion of Parameter
3265
+ is required when data type conversion of Parameter is not supported.
3422
3266
 
3423
3267
  Supported Platforms:
3424
- ``Ascend``
3268
+ ``Ascend`` ``GPU`` ``CPU``
3425
3269
 
3426
3270
  Examples:
3427
3271
  >>> import numpy as np
@@ -3430,29 +3274,29 @@ def scatter_add_ext(input, dim, index, src):
3430
3274
  >>> input = Tensor(np.array([[1, 2, 3, 4, 5]]), dtype=ms.float32)
3431
3275
  >>> src = Tensor(np.array([[8, 8]]), dtype=ms.float32)
3432
3276
  >>> index = Tensor(np.array([[2, 4]]), dtype=ms.int64)
3433
- >>> out = ops.function.array_func.scatter_add_ext(input=input, dim=1, index=index, src=src)
3277
+ >>> out = ops.scatter_add_ext(input=input, dim=1, index=index, src=src)
3434
3278
  >>> print(out)
3435
- [[1. 2. 11. 4. 13.]]
3279
+ [[1. 2. 8. 4. 8.]]
3436
3280
  >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
3437
3281
  >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
3438
3282
  >>> index = Tensor(np.array([[0, 0, 0], [2, 2, 2], [4, 4, 4]]), dtype=ms.int64)
3439
- >>> out = ops.function.array_func.scatter_add_ext(input=input, dim=0, index=index, src=src)
3283
+ >>> out = ops.scatter_add_ext(input=input, dim=0, index=index, src=src)
3440
3284
  >>> print(out)
3441
3285
  [[1. 2. 3. 0. 0.]
3442
- [0. 0. 0. 0. 0.]
3443
- [4. 5. 6. 0. 0.]
3444
- [0. 0. 0. 0. 0.]
3445
- [7. 8. 9. 0. 0.]]
3286
+ [0. 0. 0. 0. 0.]
3287
+ [4. 5. 6. 0. 0.]
3288
+ [0. 0. 0. 0. 0.]
3289
+ [7. 8. 9. 0. 0.]]
3446
3290
  >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
3447
3291
  >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
3448
3292
  >>> index = Tensor(np.array([[0, 2, 4], [0, 2, 4], [0, 2, 4]]), dtype=ms.int64)
3449
- >>> out = ops.function.array_func.scatter_add_ext(input=input, dim=1, index=index, src=src)
3293
+ >>> out = ops.scatter_add_ext(input=input, dim=1, index=index, src=src)
3450
3294
  >>> print(out)
3451
3295
  [[1. 0. 2. 0. 3.]
3452
- [4. 0. 5. 0. 6.]
3453
- [7. 0. 8. 0. 9.]
3454
- [0. 0. 0. 0. 0.]
3455
- [0. 0. 0. 0. 0.]]
3296
+ [4. 0. 5. 0. 6.]
3297
+ [7. 0. 8. 0. 9.]
3298
+ [0. 0. 0. 0. 0.]
3299
+ [0. 0. 0. 0. 0.]]
3456
3300
  """
3457
3301
  return scatter_add_ext_op(input, dim, index, src)
3458
3302
 
@@ -3985,7 +3829,7 @@ def meshgrid(*inputs, indexing='xy'):
3985
3829
  Examples:
3986
3830
  >>> import numpy as np
3987
3831
  >>> from mindspore import Tensor
3988
- >>> from mindspore import ops
3832
+ >>> import mindspore.ops as ops
3989
3833
  >>> x = Tensor(np.array([1, 2, 3, 4]).astype(np.int32))
3990
3834
  >>> y = Tensor(np.array([5, 6, 7]).astype(np.int32))
3991
3835
  >>> z = Tensor(np.array([8, 9, 0, 1, 2]).astype(np.int32))
@@ -4068,7 +3912,7 @@ def affine_grid(theta, size, align_corners=False):
4068
3912
  Examples:
4069
3913
  >>> import mindspore
4070
3914
  >>> from mindspore import Tensor
4071
- >>> from mindspore import ops
3915
+ >>> import mindspore.ops as ops
4072
3916
  >>> theta = Tensor([[[0.8, 0.5, 0],[-0.5, 0.8, 0]]], mindspore.float32)
4073
3917
  >>> out_size = (1, 3, 2, 3)
4074
3918
  >>> output = ops.affine_grid(theta, out_size, False)
@@ -4261,7 +4105,7 @@ def index_fill(x, axis, index, value):
4261
4105
  Examples:
4262
4106
  >>> import mindspore
4263
4107
  >>> import numpy as np
4264
- >>> from mindspore import ops
4108
+ >>> import mindspore.ops as ops
4265
4109
  >>> from mindspore import Tensor
4266
4110
  >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32))
4267
4111
  >>> index = Tensor([0, 2], mindspore.int32)
@@ -4884,8 +4728,8 @@ def split_ext(tensor, split_size_or_sections, axis=0):
4884
4728
 
4885
4729
  Raises:
4886
4730
  TypeError: If argument `tensor` is not Tensor.
4887
- TypeError: If argument `axis` is not int.
4888
- ValueError: If argument `axis` is out of range of :[-tensor.ndim, tensor.ndim).
4731
+ TypeError: If argument `axis` is not Tensor.
4732
+ ValueError: If argument `axis` is out of range of :math:`[-tensor.ndim, tensor.ndim)` .
4889
4733
  TypeError: If each element in `split_size_or_sections` is not integer.
4890
4734
  TypeError: If argument `split_size_or_sections` is not int, tuple(int) or list(int).
4891
4735
  ValueError: The sum of `split_size_or_sections` is not equal to x.shape[axis].
@@ -4897,7 +4741,7 @@ def split_ext(tensor, split_size_or_sections, axis=0):
4897
4741
  >>> import numpy as np
4898
4742
  >>> from mindspore import ops, Tensor
4899
4743
  >>> input_x = np.arange(9).astype("float32")
4900
- >>> output = ops.split_ext(Tensor(input_x), 3)
4744
+ >>> output = ops.split(Tensor(input_x), 3)
4901
4745
  >>> print(output)
4902
4746
  (Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
4903
4747
  Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
@@ -4913,67 +4757,6 @@ def split_ext(tensor, split_size_or_sections, axis=0):
4913
4757
  return res
4914
4758
 
4915
4759
 
4916
- def tril(input, diagonal=0): # pylint: disable=redefined-outer-name
4917
- """
4918
- Returns the lower triangle part of 'input' (elements that contain the diagonal and below),
4919
- and set the other elements to zeros.
4920
-
4921
- Args:
4922
- input (Tensor): A Tensor with shape :math:`(x_1, x_2, ..., x_R)`. The rank must be at least 2.
4923
- Supporting all number types including bool.
4924
- diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: 0,
4925
- indicating the main diagonal.
4926
-
4927
- Returns:
4928
- Tensor, the same shape and data type as the input `x`.
4929
-
4930
- Raises:
4931
- TypeError: If `x` is not a Tensor.
4932
- TypeError: If `diagonal` is not an int.
4933
- TypeError: If the type of `x` is neither number nor bool.
4934
- ValueError: If the rank of `x` is less than 2.
4935
-
4936
- Supported Platforms:
4937
- ``Ascend`` ``GPU`` ``CPU``
4938
-
4939
- Examples:
4940
- >>> import numpy as np
4941
- >>> from mindspore import Tensor, ops
4942
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
4943
- ... [ 5, 6, 7, 8],
4944
- ... [10, 11, 12, 13],
4945
- ... [14, 15, 16, 17]]))
4946
- >>> result = ops.tril(x)
4947
- >>> print(result)
4948
- [[ 1 0 0 0]
4949
- [ 5 6 0 0]
4950
- [10 11 12 0]
4951
- [14 15 16 17]]
4952
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
4953
- ... [ 5, 6, 7, 8],
4954
- ... [10, 11, 12, 13],
4955
- ... [14, 15, 16, 17]]))
4956
- >>> result = ops.tril(x, diagonal=1)
4957
- >>> print(result)
4958
- [[ 1 2 0 0]
4959
- [ 5 6 7 0]
4960
- [10 11 12 13]
4961
- [14 15 16 17]]
4962
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
4963
- ... [ 5, 6, 7, 8],
4964
- ... [10, 11, 12, 13],
4965
- ... [14, 15, 16, 17]]))
4966
- >>> result = ops.tril(x, diagonal=-1)
4967
- >>> print(result)
4968
- [[ 0 0 0 0]
4969
- [ 5 0 0 0]
4970
- [10 11 0 0]
4971
- [14 15 16 0]]
4972
- """
4973
- tril_ = Tril(diagonal)
4974
- return tril_(input)
4975
-
4976
-
4977
4760
  @_primexpr
4978
4761
  def _canonicalize_axis(axis, ndim):
4979
4762
  """
@@ -5363,52 +5146,6 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
5363
5146
  return values, indices
5364
5147
 
5365
5148
 
5366
- def argmax(input, dim=None, keepdim=False):
5367
- """
5368
- Return the indices of the maximum values of a tensor across a dimension.
5369
-
5370
- Args:
5371
- input (Tensor): Input tensor.
5372
- dim (Union[int, None], optional): The dimension to reduce. If `dim` is ``None`` , the indices of the maximum
5373
- value within the flattened input will be returned. Default: ``None`` .
5374
- keepdim (bool, optional): Whether the output tensor retains the specified
5375
- dimension. Ignored if `dim` is None. Default: ``False`` .
5376
-
5377
- Returns:
5378
- Tensor, indices of the maximum values across a dimension.
5379
-
5380
- Raises:
5381
- TypeError: If `keepdim` is not bool.
5382
- ValueError: If `dim` is out of range.
5383
-
5384
- Supported Platforms:
5385
- ``Ascend`` ``GPU`` ``CPU``
5386
-
5387
- Examples:
5388
- >>> import numpy as np
5389
- >>> from mindspore import Tensor, ops
5390
- >>> x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
5391
- >>> output = ops.argmax(x, dim=-1)
5392
- >>> print(output)
5393
- [1 0 0]
5394
- """
5395
- _check_attr_dtype("keepdim", keepdim, [bool], "argmax")
5396
- if not input.shape:
5397
- return Tensor(0)
5398
- if input.dtype == mstype.bool_:
5399
- input = input.astype(mstype.int32)
5400
- is_dim_none = False
5401
- if dim is None:
5402
- input = reshape_(input, (-1,))
5403
- dim = 0
5404
- is_dim_none = True
5405
- out = _get_cache_prim(Argmax)(dim, mstype.int64)(input)
5406
- if keepdim and not is_dim_none:
5407
- out = expand_dims(out, dim)
5408
- return out
5409
-
5410
-
5411
-
5412
5149
  def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylint: disable=redefined-outer-name
5413
5150
  """
5414
5151
  Calculates the minimum value along with the given axis for the input tensor. It returns the minimum values and
@@ -5593,47 +5330,6 @@ def narrow(input, axis, start, length):
5593
5330
  return tensor_slice(input, begins, sizes)
5594
5331
 
5595
5332
 
5596
- def narrow_ext(input, dim, start, length):
5597
- """
5598
- Returns a narrowed tensor from input tensor, and
5599
- the dimension axis is input from start to start + length.
5600
-
5601
- Args:
5602
- input (Tensor): the tensor to narrow.
5603
- dim (int): dimension along which to narrow.
5604
- start (int): the starting dimension.
5605
- length (int): the distance to the ending dimension.
5606
-
5607
- Returns:
5608
- Tensor.
5609
-
5610
- Raises:
5611
- ValueError: If dim is out of range [-input.ndim, input.ndim).
5612
- ValueError: If start is out of range [-input.shape[dim], input.shape[dim]].
5613
- ValueError: It length is out of range [0, input.shape[dim]-start].
5614
-
5615
- Supported Platforms:
5616
- ``Ascend``
5617
-
5618
- Examples:
5619
- >>> import mindspore
5620
- >>> from mindspore import ops
5621
- >>> from mindspore import Tensor
5622
- >>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mindspore.int32)
5623
- >>> output = ops.narrow(x, 0, 0, 2)
5624
- >>> print(output)
5625
- [[ 1 2 3]
5626
- [ 4 5 6]]
5627
- >>> output = ops.narrow(x, 1, 1, 2)
5628
- >>> print(output)
5629
- [[ 2 3]
5630
- [ 5 6]
5631
- [ 8 9]]
5632
- """
5633
- validator.check_value_type("input", input, Tensor, "narrow")
5634
- return slice_ext_op(input, dim, start, start+length, 1)
5635
-
5636
-
5637
5333
  def topk(input, k, dim=None, largest=True, sorted=True):
5638
5334
  r"""
5639
5335
  Finds values and indices of the `k` largest or smallest entries along a given dimension.
@@ -5725,6 +5421,80 @@ def topk(input, k, dim=None, largest=True, sorted=True):
5725
5421
  return res
5726
5422
 
5727
5423
 
5424
+ def topk_ext(input, k, dim=-1, largest=True, sorted=True):
5425
+ r"""
5426
+ Finds values and indices of the `k` largest or smallest entries along a given dimension.
5427
+
5428
+ .. warning::
5429
+ - If sorted is set to False, it will use the aicpu operator, the performance may be reduced. In addition, due to
5430
+ different memory layout and traversal methods on different platforms, the display order of calculation results
5431
+ may be inconsistent when `sorted` is False.
5432
+
5433
+ If the `input` is a one-dimensional Tensor, finds the `k` largest or smallest entries in the Tensor,
5434
+ and outputs its value and index as a Tensor. values[`k`] is the `k` largest item in `input`,
5435
+ and its index is indices [`k`].
5436
+
5437
+ For a multi-dimensional matrix,
5438
+ calculates the first or last `k` entries in a given dimension, therefore:
5439
+
5440
+ .. math::
5441
+
5442
+ values.shape = indices.shape
5443
+
5444
+ If the two compared elements are the same, the one with the smaller index value is returned first.
5445
+
5446
+ Args:
5447
+ input (Tensor): Input to be computed, data type must be float16, float32 or int32.
5448
+ k (int): The number of top or bottom elements to be computed along the last dimension.
5449
+ dim (int, optional): The dimension to sort along. Default: ``-1`` .
5450
+ largest (bool, optional): If largest is ``False`` then the k smallest elements are returned.
5451
+ Default: ``True`` .
5452
+ sorted (bool, optional): If ``True`` , the obtained elements will be sorted by the values in descending order.
5453
+ If ``False`` , the obtained elements will not be sorted. Default: ``True`` .
5454
+
5455
+ Returns:
5456
+ A tuple consisting of `values` and `indexes`.
5457
+
5458
+ - values (Tensor): The `k` largest or smallest elements in each slice of the given dimension.
5459
+ - indices (Tensor): The indices of values within the last dimension of input.
5460
+
5461
+ Raises:
5462
+ TypeError: If `sorted` is not a bool.
5463
+ TypeError: If `input` is not a Tensor.
5464
+ TypeError: If `k` is not an int.
5465
+ TypeError: If dtype of `input` is not one of the following: float16, float32 or int32.
5466
+
5467
+ Supported Platforms:
5468
+ ``Ascend`` ``GPU`` ``CPU``
5469
+
5470
+ Examples:
5471
+ >>> import mindspore as ms
5472
+ >>> from mindspore import ops
5473
+ >>> x = ms.Tensor([[0.5368, 0.2447, 0.4302, 0.9673],
5474
+ ... [0.4388, 0.6525, 0.4685, 0.1868],
5475
+ ... [0.3563, 0.5152, 0.9675, 0.8230]], dtype=ms.float32)
5476
+ >>> output = ops.topk_ext(x, 2, dim=1)
5477
+ >>> print(output)
5478
+ (Tensor(shape=[3, 2], dtype=Float32, value=
5479
+ [[ 9.67299998e-01, 5.36800027e-01],
5480
+ [ 6.52499974e-01, 4.68499988e-01],
5481
+ [ 9.67499971e-01, 8.23000014e-01]]), Tensor(shape=[3, 2], dtype=Int32, value=
5482
+ [[3, 0],
5483
+ [1, 2],
5484
+ [2, 3]]))
5485
+ >>> output2 = ops.topk(x, 2, dim=1, largest=False)
5486
+ >>> print(output2)
5487
+ (Tensor(shape=[3, 2], dtype=Float32, value=
5488
+ [[ 2.44700000e-01, 4.30200011e-01],
5489
+ [ 1.86800003e-01, 4.38800007e-01],
5490
+ [ 3.56299996e-01, 5.15200019e-01]]), Tensor(shape=[3, 2], dtype=Int32, value=
5491
+ [[1, 2],
5492
+ [3, 0],
5493
+ [0, 1]]))
5494
+ """
5495
+ return _get_cache_prim(ops.auto_generate.TopkExt)()(input, k, dim, largest, sorted)
5496
+
5497
+
5728
5498
  def expand(input_x, size):
5729
5499
  r"""
5730
5500
  :func:`mindspore.ops.expand` will be deprecated in the future.
@@ -5788,8 +5558,8 @@ def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
5788
5558
  A Tensor, with same type as `input` . And its shape is as described above.
5789
5559
 
5790
5560
  Raises:
5791
- TypeError: If `output_size`, `kernel_size`, `stride`, `dilation`, `padding` data type is not int, tuple or list.
5792
- ValueError: If `output_size`, `kernel_size`, `dilation`, `stride` value is not
5561
+ TypeError: If `kernel_size`, `dilation`, `padding`, `stride` data type is not int, tuple or list.
5562
+ ValueError: If `kernel_size`, `dilation`, `stride` value is not
5793
5563
  greater than zero or elements number more than `2`.
5794
5564
  ValueError: If `padding` value is less than zero or elements number more than `2`.
5795
5565
  ValueError: If `input.shape[1] != kernel_size[0] * kernel_size[1]`
@@ -6122,64 +5892,6 @@ def mvlgamma(input, p):
6122
5892
  return mvlgamma_op(input)
6123
5893
 
6124
5894
 
6125
- def nonzero(input, as_tuple=False):
6126
- r"""
6127
- Return the positions of all non-zero values.
6128
-
6129
- Args:
6130
- input (Tensor): The input Tensor, its rank should be greater than or equal to 1.
6131
- as_tuple (bool, optional): Whether the output is tuple.
6132
- If ``False`` , return Tensor. Default: ``False`` .
6133
- If ``True`` , return Tuple of Tensor, only support ``Ascend`` .
6134
-
6135
-
6136
- Returns:
6137
- - If `as_tuple` is ``False``, return the Tensor, a 2-D Tensor whose data type is int64,
6138
- containing the positions of all non-zero values of the input.
6139
- - If `as_tuple` is ``True``, return the Tuple of Tensor and data type is int64.
6140
- The Tuple length is the dimension of the input tensor,
6141
- and each element is the 1D tensor of the subscript of all non-zero elements of
6142
- the input tensor in that dimension.
6143
-
6144
- Raises:
6145
- TypeError: If `input` is not Tensor.
6146
- TypeError: If `as_tuple` is not bool.
6147
- ValueError: If dim of `input` equals to 0.
6148
-
6149
- Supported Platforms:
6150
- ``Ascend`` ``GPU`` ``CPU``
6151
-
6152
- Examples:
6153
- >>> import mindspore
6154
- >>> import numpy as np
6155
- >>> from mindspore import Tensor, ops
6156
- >>> x = Tensor(np.array([[[1, 0], [-5, 0]]]), mindspore.int32)
6157
- >>> output = ops.nonzero(x)
6158
- >>> print(output)
6159
- [[0 0 0]
6160
- [0 1 0]]
6161
- >>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
6162
- >>> output = ops.nonzero(x, False)
6163
- >>> print(output)
6164
- [[0]
6165
- [2]
6166
- [4]]
6167
- >>> x = Tensor(np.array([[[1, 0], [-5, 0]]]), mindspore.int32)
6168
- >>> output = ops.nonzero(x, True)
6169
- >>> print(output)
6170
- (Tensor(shape=[2], dtype=Int64, value=[0, 0]),
6171
- Tensor(shape=[2], dtype=Int64, value=[0, 1]),
6172
- Tensor(shape=[2], dtype=Int64, value=[0, 0]))
6173
- >>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
6174
- >>> output = ops.nonzero(x, True)
6175
- >>> print(output)
6176
- (Tensor(shape=[3], dtype=Int64, value=[0, 2, 4]), )
6177
- """
6178
- if as_tuple:
6179
- return non_zero_ext_(input)
6180
- return non_zero_(input)
6181
-
6182
-
6183
5895
  def argwhere(input):
6184
5896
  """
6185
5897
  Return a Tensor of the positions of all non-zero values.
@@ -6447,7 +6159,7 @@ def swapaxes(input, axis0, axis1):
6447
6159
 
6448
6160
  Examples:
6449
6161
  >>> import numpy as np
6450
- >>> from mindspore import ops
6162
+ >>> import mindspore.ops as ops
6451
6163
  >>> from mindspore import Tensor
6452
6164
  >>> input = Tensor(np.ones((2,3,4), dtype=np.float32))
6453
6165
  >>> output = ops.swapaxes(input, 0, 2)
@@ -6497,7 +6209,7 @@ def swapdims(input, dim0, dim1):
6497
6209
 
6498
6210
  Examples:
6499
6211
  >>> import numpy as np
6500
- >>> from mindspore import ops
6212
+ >>> import mindspore.ops as ops
6501
6213
  >>> from mindspore import Tensor
6502
6214
  >>> input = Tensor(np.ones((2,3,4), dtype=np.float32))
6503
6215
  >>> output = ops.swapdims(input, 0, 2)
@@ -6579,23 +6291,18 @@ def repeat_interleave(input, repeats, axis=None):
6579
6291
  return output
6580
6292
 
6581
6293
 
6582
- def repeat_interleave_ext(input, repeats, dim=None, output_size=None):
6294
+ def repeat_interleave_ext(tensor, repeats, axis=None, output_size=None):
6583
6295
  r"""
6584
- Repeat elements of a tensor along an axis, like `numpy.repeat`.
6296
+ Repeat elements of a tensor.
6585
6297
 
6586
6298
  Args:
6587
- input (Tensor): The tensor to repeat values for. Must be of type: float16,
6588
- float32, int8, uint8, int16, int32, or int64.
6589
- repeats (Union[int, tuple, list, Tensor]): The number of times to repeat, must be positive.
6590
- dim (int, optional): The dim along which to repeat, Default: ``None``. if dims is None,
6591
- the input Tensor will be flattened and the output will alse be flattened.
6592
- output_size (int, optional): Total output size for the given axis (e.g. sum of repeats),
6593
- Default: ``None``.
6299
+ tensor (Tensor): the input tensor.
6300
+ repeats (Union[int, list, tuple, Tensor]) the number of repetitions for each element
6301
+ axis (int, optional) the axis along wich to repeat, if None, defaults to 0.
6302
+ output_size (int, optional): Calculated output size along specified axis.
6594
6303
 
6595
6304
  Returns:
6596
- One tensor with values repeated along the specified dim. If input has shape
6597
- :math:`(s1, s2, ..., sn)` and dim is i, the output will have shape :math:`(s1, s2, ...,
6598
- si * repeats, ..., sn)`. The output type will be the same as the type of `input`.
6305
+ Tensor, one-hot tensor.
6599
6306
 
6600
6307
  Supported Platforms:
6601
6308
  ``Ascend``
@@ -6603,18 +6310,32 @@ def repeat_interleave_ext(input, repeats, dim=None, output_size=None):
6603
6310
  Examples:
6604
6311
  >>> import mindspore
6605
6312
  >>> import numpy as np
6606
- >>> from mindspore import Tensor, ops
6607
- >>> input = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
6608
- >>> output = ops.function.array_func.repeat_interleave_ext(input, repeats=2, dim=0)
6313
+ >>> from mindspore import mint
6314
+ >>> from mindspore import Tensor
6315
+ >>> tensor = Tensor(np.array([0, 1, 2], [3, 4, 5]), mindspore.int32)
6316
+ >>> repeats = 2
6317
+ >>> axis = 0
6318
+ >>> output = mint.repeat_interleave(tensor, repeats, axis)
6609
6319
  >>> print(output)
6610
- [[0 1 2]
6611
- [0 1 2]
6612
- [3 4 5]
6613
- [3 4 5]]
6320
+ [[0. 1. 2.]
6321
+ [0. 1. 2.]
6322
+ [3. 4. 5.]
6323
+ [3. 4. 5.]]
6614
6324
  """
6615
- if isinstance(repeats, int):
6616
- return repeat_interleave_int_(input, repeats, dim, output_size)
6617
- return repeat_interleave_tensor_(input, repeats, dim, output_size)
6325
+ if axis is None:
6326
+ tensor = tensor.ravel()
6327
+ axis = 0
6328
+
6329
+ size = tensor.shape[axis]
6330
+ if output_size is None:
6331
+ if isinstance(repeats, int):
6332
+ output_size = size*repeats
6333
+ elif len(repeats) == 1:
6334
+ output_size = size*repeats[0]
6335
+ else:
6336
+ output_size = sum(repeats)
6337
+
6338
+ return repeat_interleave_(tensor, repeats, axis, output_size)
6618
6339
 
6619
6340
 
6620
6341
  def repeat_elements(x, rep, axis=0):
@@ -6761,8 +6482,10 @@ __all__ = [
6761
6482
  'ger',
6762
6483
  'ones',
6763
6484
  'ones_like',
6485
+ 'ones_like_ext',
6764
6486
  'zeros',
6765
6487
  'zeros_like',
6488
+ 'zeros_like_ext',
6766
6489
  'shape',
6767
6490
  'shape_',
6768
6491
  'reverse',
@@ -6770,6 +6493,7 @@ __all__ = [
6770
6493
  'hamming_window',
6771
6494
  'chunk',
6772
6495
  'full',
6496
+ 'full_ext',
6773
6497
  'full_like',
6774
6498
  'dyn_shape',
6775
6499
  'rank',
@@ -6828,6 +6552,7 @@ __all__ = [
6828
6552
  'narrow',
6829
6553
  'ravel',
6830
6554
  'scatter_add',
6555
+ 'scatter_add_ext',
6831
6556
  'scatter_mul',
6832
6557
  'scatter_max',
6833
6558
  'scatter_min',
@@ -6856,7 +6581,6 @@ __all__ = [
6856
6581
  'index_fill',
6857
6582
  'index_select',
6858
6583
  'max',
6859
- 'argmax',
6860
6584
  'min',
6861
6585
  'unsorted_segment_sum',
6862
6586
  'population_count',
@@ -6884,6 +6608,6 @@ __all__ = [
6884
6608
  'sort',
6885
6609
  'top_k',
6886
6610
  'deepcopy',
6887
- 'flip',
6611
+ 'flip'
6888
6612
  ]
6889
6613
  __all__.sort()