mindspore 2.1.0__cp37-cp37m-manylinux1_x86_64.whl → 2.2.11__cp37-cp37m-manylinux1_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (589) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +4 -1
  3. mindspore/_akg/akg/build_module.py +5 -6
  4. mindspore/_akg/akg/composite/build_module.py +139 -22
  5. mindspore/_akg/akg/composite/split_stitch.py +10 -11
  6. mindspore/_akg/akg/ms/info_version_adapt.py +67 -1
  7. mindspore/_akg/akg/tvm/api.py +4 -3
  8. mindspore/_akg/akg/tvm/autotvm/__init__.py +1 -2
  9. mindspore/_akg/akg/tvm/autotvm/graph_tuner/base_graph_tuner.py +1 -5
  10. mindspore/_akg/akg/tvm/autotvm/measure/__init__.py +1 -1
  11. mindspore/_akg/akg/tvm/autotvm/measure/measure.py +1 -10
  12. mindspore/_akg/akg/tvm/autotvm/measure/measure_methods.py +1 -372
  13. mindspore/_akg/akg/tvm/build_module.py +16 -1
  14. mindspore/_akg/akg/tvm/contrib/graph_runtime.py +0 -53
  15. mindspore/_akg/akg/tvm/hybrid/parser.py +7 -6
  16. mindspore/_akg/akg/tvm/ir_builder.py +1 -1
  17. mindspore/_akg/akg/tvm/module.py +1 -2
  18. mindspore/_akg/akg/tvm/stmt.py +2 -2
  19. mindspore/_akg/akg/utils/ascend_profilier/cann_file_parser.py +76 -0
  20. mindspore/_akg/akg/utils/ascend_profilier/file_manager.py +56 -0
  21. mindspore/_akg/akg/utils/ascend_profilier/op_summary_bean.py +23 -0
  22. mindspore/_akg/akg/utils/ascend_profilier/op_summary_headers.py +8 -0
  23. mindspore/_akg/akg/utils/ascend_profilier/op_summary_parser.py +42 -0
  24. mindspore/_akg/akg/utils/ascend_profilier/path_manager.py +65 -0
  25. mindspore/_akg/akg/utils/composite_op_helper.py +16 -12
  26. mindspore/_akg/akg/utils/dump_ascend_meta.py +22 -3
  27. mindspore/_akg/akg/utils/kernel_exec.py +98 -274
  28. mindspore/_akg/akg/utils/result_analysis.py +4 -24
  29. mindspore/_akg/akg/utils/tbe_codegen_utils.py +219 -0
  30. mindspore/_akg/akg/utils/util.py +56 -1
  31. mindspore/_c_dataengine.cpython-37m-x86_64-linux-gnu.so +0 -0
  32. mindspore/_c_expression.cpython-37m-x86_64-linux-gnu.so +0 -0
  33. mindspore/_c_mindrecord.cpython-37m-x86_64-linux-gnu.so +0 -0
  34. mindspore/_check_jit_forbidden_api.py +3 -1
  35. mindspore/_checkparam.py +23 -29
  36. mindspore/_extends/graph_kernel/__init__.py +0 -1
  37. mindspore/_extends/graph_kernel/model/graph_split.py +84 -76
  38. mindspore/_extends/graph_kernel/model/model_builder.py +9 -50
  39. mindspore/_extends/graph_kernel/splitter.py +4 -11
  40. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +122 -15
  41. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +84 -67
  42. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +4 -2
  43. mindspore/_extends/parallel_compile/akg_compiler/util.py +10 -7
  44. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +2 -2
  45. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +6 -5
  46. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +1 -1
  47. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -1
  48. mindspore/_extends/parse/__init__.py +13 -15
  49. mindspore/_extends/parse/namespace.py +7 -33
  50. mindspore/_extends/parse/parser.py +67 -72
  51. mindspore/_extends/parse/resources.py +1 -1
  52. mindspore/_extends/parse/standard_method.py +86 -106
  53. mindspore/_extends/parse/trope.py +1 -1
  54. mindspore/_extends/remote/kernel_build_server.py +25 -7
  55. mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
  56. mindspore/_install_custom.py +43 -0
  57. mindspore/_mindspore_offline_debug.cpython-37m-x86_64-linux-gnu.so +0 -0
  58. mindspore/amp.py +47 -11
  59. mindspore/bin/cache_admin +0 -0
  60. mindspore/bin/cache_server +0 -0
  61. mindspore/boost/boost.py +1 -8
  62. mindspore/boost/boost_cell_wrapper.py +3 -2
  63. mindspore/boost/grad_accumulation.py +1 -1
  64. mindspore/boost/group_loss_scale_manager.py +8 -7
  65. mindspore/common/__init__.py +5 -3
  66. mindspore/common/_jit_fallback_utils.py +6 -0
  67. mindspore/common/_register_for_adapter.py +2 -0
  68. mindspore/common/_register_for_tensor.py +2 -2
  69. mindspore/common/_stub_tensor.py +13 -0
  70. mindspore/common/_utils.py +29 -0
  71. mindspore/common/api.py +174 -259
  72. mindspore/common/auto_dynamic_shape.py +494 -0
  73. mindspore/common/dtype.py +18 -11
  74. mindspore/common/dump.py +6 -4
  75. mindspore/common/initializer.py +14 -14
  76. mindspore/common/jit_config.py +33 -15
  77. mindspore/common/lazy_inline.py +126 -7
  78. mindspore/common/mindir_util.py +101 -0
  79. mindspore/common/parameter.py +51 -41
  80. mindspore/common/seed.py +4 -4
  81. mindspore/common/sparse_tensor.py +13 -14
  82. mindspore/common/tensor.py +243 -165
  83. mindspore/communication/__init__.py +7 -4
  84. mindspore/communication/_comm_helper.py +83 -4
  85. mindspore/communication/management.py +152 -84
  86. mindspore/config/op_info.config +14 -3
  87. mindspore/config/super_bar_config.json +4 -2
  88. mindspore/context.py +152 -61
  89. mindspore/dataset/__init__.py +5 -5
  90. mindspore/dataset/audio/__init__.py +2 -2
  91. mindspore/dataset/audio/transforms.py +52 -52
  92. mindspore/dataset/callback/ds_callback.py +16 -2
  93. mindspore/dataset/core/config.py +68 -51
  94. mindspore/dataset/engine/cache_client.py +33 -7
  95. mindspore/dataset/engine/datasets.py +250 -112
  96. mindspore/dataset/engine/datasets_audio.py +43 -211
  97. mindspore/dataset/engine/datasets_standard_format.py +16 -35
  98. mindspore/dataset/engine/datasets_text.py +43 -67
  99. mindspore/dataset/engine/datasets_user_defined.py +86 -100
  100. mindspore/dataset/engine/datasets_vision.py +219 -1029
  101. mindspore/dataset/engine/iterators.py +11 -4
  102. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +4 -0
  103. mindspore/dataset/engine/obs/util.py +3 -0
  104. mindspore/dataset/engine/samplers.py +1 -1
  105. mindspore/dataset/engine/validators.py +19 -5
  106. mindspore/dataset/text/__init__.py +3 -3
  107. mindspore/dataset/text/transforms.py +101 -127
  108. mindspore/dataset/text/utils.py +205 -138
  109. mindspore/dataset/transforms/__init__.py +1 -1
  110. mindspore/dataset/transforms/py_transforms_util.py +40 -12
  111. mindspore/dataset/transforms/transforms.py +95 -40
  112. mindspore/dataset/utils/browse_dataset.py +8 -2
  113. mindspore/dataset/utils/line_reader.py +17 -19
  114. mindspore/dataset/vision/__init__.py +3 -3
  115. mindspore/dataset/vision/c_transforms.py +6 -3
  116. mindspore/dataset/vision/transforms.py +409 -287
  117. mindspore/dataset/vision/utils.py +13 -14
  118. mindspore/dataset/vision/validators.py +11 -1
  119. mindspore/experimental/map_parameter.py +14 -0
  120. mindspore/{nn/optim_ex → experimental/optim}/__init__.py +30 -29
  121. mindspore/{nn/optim_ex → experimental/optim}/adam.py +60 -67
  122. mindspore/{nn/optim_ex → experimental/optim}/adamw.py +181 -203
  123. mindspore/experimental/optim/lr_scheduler.py +1427 -0
  124. mindspore/{nn/optim_ex → experimental/optim}/optimizer.py +252 -259
  125. mindspore/{nn/optim_ex → experimental/optim}/sgd.py +147 -152
  126. mindspore/gen_ops.py +273 -0
  127. mindspore/include/OWNERS +0 -1
  128. mindspore/include/api/data_type.h +2 -1
  129. mindspore/include/api/graph.h +0 -15
  130. mindspore/include/api/kernel.h +2 -0
  131. mindspore/include/api/kernel_api.h +37 -12
  132. mindspore/include/api/model.h +17 -14
  133. mindspore/include/api/status.h +8 -3
  134. mindspore/include/api/types.h +37 -4
  135. mindspore/include/c_api/ms/abstract.h +67 -0
  136. mindspore/include/c_api/ms/attribute.h +197 -0
  137. mindspore/include/c_api/ms/base/handle_types.h +43 -0
  138. mindspore/include/c_api/ms/base/macros.h +32 -0
  139. mindspore/include/c_api/ms/base/status.h +33 -0
  140. mindspore/include/c_api/ms/base/types.h +282 -0
  141. mindspore/include/c_api/ms/context.h +102 -0
  142. mindspore/include/c_api/ms/graph.h +160 -0
  143. mindspore/include/c_api/ms/node.h +606 -0
  144. mindspore/include/c_api/ms/tensor.h +161 -0
  145. mindspore/include/c_api/ms/value.h +84 -0
  146. mindspore/include/dataset/constants.h +6 -5
  147. mindspore/include/dataset/execute.h +23 -13
  148. mindspore/include/dataset/text.h +26 -26
  149. mindspore/include/dataset/transforms.h +13 -13
  150. mindspore/include/dataset/vision.h +60 -60
  151. mindspore/include/dataset/vision_ascend.h +5 -6
  152. mindspore/include/dataset/vision_lite.h +17 -17
  153. mindspore/include/mindapi/base/type_id.h +1 -0
  154. mindspore/include/mindapi/base/types.h +1 -0
  155. mindspore/lib/libdnnl.so.2 +0 -0
  156. mindspore/lib/libjemalloc.so.2 +0 -0
  157. mindspore/lib/libmindspore.so +0 -0
  158. mindspore/lib/libmindspore_backend.so +0 -0
  159. mindspore/lib/libmindspore_common.so +0 -0
  160. mindspore/lib/libmindspore_core.so +0 -0
  161. mindspore/lib/libmindspore_glog.so.0 +0 -0
  162. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  163. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  164. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  165. mindspore/lib/libmindspore_shared_lib.so +0 -0
  166. mindspore/lib/libnnacl.so +0 -0
  167. mindspore/lib/libopencv_core.so.4.5 +0 -0
  168. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  169. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  170. mindspore/lib/libps_cache.so +0 -0
  171. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310/aic-ascend310-ops-info.json +123 -0
  172. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +123 -0
  173. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +158 -0
  174. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +37 -0
  175. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/add_dsl.py +46 -0
  176. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/add_tik.py +51 -0
  177. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +241 -0
  178. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/matmul_tik.py +212 -0
  179. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/add_dsl.py +46 -0
  180. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/add_tik.py +51 -0
  181. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +241 -0
  182. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/matmul_tik.py +212 -0
  183. mindspore/lib/plugin/ascend/custom_aicore_ops/op_proto/libop_proto.so +0 -0
  184. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
  185. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  186. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +8998 -0
  187. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  188. mindspore/lib/plugin/ascend/libakg.so +0 -0
  189. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  190. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  191. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  192. mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
  193. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  194. mindspore/lib/plugin/cpu/libakg.so +0 -0
  195. mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
  196. mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
  197. mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
  198. mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
  199. mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
  200. mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
  201. mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
  202. mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
  203. mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
  204. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  205. mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
  206. mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
  207. mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
  208. mindspore/mindrecord/tools/imagenet_to_mr.py +1 -1
  209. mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
  210. mindspore/nn/__init__.py +0 -2
  211. mindspore/nn/cell.py +313 -74
  212. mindspore/nn/dynamic_lr.py +21 -21
  213. mindspore/nn/layer/activation.py +22 -30
  214. mindspore/nn/layer/basic.py +15 -13
  215. mindspore/nn/layer/channel_shuffle.py +1 -1
  216. mindspore/nn/layer/container.py +271 -9
  217. mindspore/nn/layer/conv.py +323 -204
  218. mindspore/nn/layer/dense.py +8 -5
  219. mindspore/nn/layer/embedding.py +33 -27
  220. mindspore/nn/layer/flash_attention.py +61 -95
  221. mindspore/nn/layer/image.py +8 -6
  222. mindspore/nn/layer/math.py +16 -25
  223. mindspore/nn/layer/normalization.py +107 -66
  224. mindspore/nn/layer/padding.py +1 -1
  225. mindspore/nn/layer/pooling.py +131 -109
  226. mindspore/nn/layer/rnn_cells.py +27 -22
  227. mindspore/nn/layer/rnns.py +13 -16
  228. mindspore/nn/layer/thor_layer.py +1 -1
  229. mindspore/nn/layer/transformer.py +221 -154
  230. mindspore/nn/learning_rate_schedule.py +9 -1
  231. mindspore/nn/loss/loss.py +235 -174
  232. mindspore/nn/optim/ada_grad.py +2 -1
  233. mindspore/nn/optim/adadelta.py +1 -0
  234. mindspore/nn/optim/adafactor.py +2 -1
  235. mindspore/nn/optim/adam.py +7 -4
  236. mindspore/nn/optim/adamax.py +3 -2
  237. mindspore/nn/optim/adasum.py +2 -2
  238. mindspore/nn/optim/asgd.py +2 -3
  239. mindspore/nn/optim/ftrl.py +6 -5
  240. mindspore/nn/optim/lamb.py +7 -4
  241. mindspore/nn/optim/lars.py +1 -1
  242. mindspore/nn/optim/lazyadam.py +5 -3
  243. mindspore/nn/optim/momentum.py +2 -1
  244. mindspore/nn/optim/optimizer.py +53 -4
  245. mindspore/nn/optim/proximal_ada_grad.py +3 -4
  246. mindspore/nn/optim/rmsprop.py +4 -3
  247. mindspore/nn/optim/rprop.py +23 -12
  248. mindspore/nn/optim/sgd.py +26 -11
  249. mindspore/nn/optim/thor.py +9 -7
  250. mindspore/nn/probability/bijector/bijector.py +5 -5
  251. mindspore/nn/probability/bijector/power_transform.py +27 -27
  252. mindspore/nn/probability/bijector/softplus.py +3 -3
  253. mindspore/nn/probability/distribution/_utils/custom_ops.py +3 -3
  254. mindspore/nn/probability/distribution/bernoulli.py +5 -5
  255. mindspore/nn/probability/distribution/beta.py +3 -3
  256. mindspore/nn/probability/distribution/categorical.py +7 -7
  257. mindspore/nn/probability/distribution/cauchy.py +0 -1
  258. mindspore/nn/probability/distribution/distribution.py +3 -3
  259. mindspore/nn/probability/distribution/gamma.py +3 -3
  260. mindspore/nn/probability/distribution/geometric.py +4 -4
  261. mindspore/nn/probability/distribution/gumbel.py +4 -4
  262. mindspore/nn/probability/distribution/log_normal.py +2 -2
  263. mindspore/nn/probability/distribution/logistic.py +2 -2
  264. mindspore/nn/probability/distribution/poisson.py +4 -4
  265. mindspore/nn/probability/distribution/transformed_distribution.py +3 -3
  266. mindspore/nn/probability/distribution/uniform.py +6 -6
  267. mindspore/nn/wrap/__init__.py +4 -2
  268. mindspore/nn/wrap/cell_wrapper.py +87 -34
  269. mindspore/nn/wrap/grad_reducer.py +8 -5
  270. mindspore/nn/wrap/loss_scale.py +105 -42
  271. mindspore/numpy/array_creations.py +1 -2
  272. mindspore/numpy/array_ops.py +3 -2
  273. mindspore/numpy/utils_const.py +5 -5
  274. mindspore/offline_debug/convert_async.py +2 -2
  275. mindspore/ops/_grad_experimental/__init__.py +0 -5
  276. mindspore/ops/_grad_experimental/grad_array_ops.py +2 -3
  277. mindspore/ops/_grad_experimental/grad_comm_ops.py +15 -2
  278. mindspore/ops/_grad_experimental/grad_debug_ops.py +0 -37
  279. mindspore/ops/_grad_experimental/grad_implementations.py +11 -1
  280. mindspore/ops/_grad_experimental/grad_inner_ops.py +2 -216
  281. mindspore/ops/_grad_experimental/grad_math_ops.py +19 -199
  282. mindspore/ops/_grad_experimental/grad_sparse.py +15 -0
  283. mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
  284. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -1
  285. mindspore/ops/_op_impl/aicpu/__init__.py +14 -2
  286. mindspore/ops/_op_impl/aicpu/add.py +3 -3
  287. mindspore/ops/_op_impl/aicpu/bias_add_grad.py +0 -1
  288. mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
  289. mindspore/ops/_op_impl/{_custom_op/flash_attention/constants.py → aicpu/eps.py} +18 -27
  290. mindspore/ops/_op_impl/aicpu/gamma.py +2 -2
  291. mindspore/ops/_op_impl/aicpu/linear_sum_assignment.py +21 -2
  292. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +6 -3
  293. mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +0 -1
  294. mindspore/ops/_op_impl/aicpu/multinomial.py +3 -3
  295. mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +15 -7
  296. mindspore/ops/_op_impl/aicpu/random_categorical.py +39 -19
  297. mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +5 -2
  298. mindspore/ops/_op_impl/aicpu/random_poisson.py +103 -52
  299. mindspore/ops/_op_impl/aicpu/random_shuffle.py +17 -15
  300. mindspore/ops/_op_impl/aicpu/{sparseaddmm.py → sparse_addmm.py} +2 -2
  301. mindspore/ops/_op_impl/aicpu/{sparsesparsemaximum.py → sparse_sparse_maximum.py} +4 -4
  302. mindspore/ops/_op_impl/aicpu/standard_laplace.py +5 -5
  303. mindspore/ops/_op_impl/aicpu/standard_normal.py +5 -5
  304. mindspore/ops/_op_impl/aicpu/truncated_normal.py +9 -7
  305. mindspore/ops/_op_impl/aicpu/uniform.py +5 -3
  306. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +8 -4
  307. mindspore/ops/_op_impl/aicpu/uniform_int.py +5 -5
  308. mindspore/ops/_op_impl/aicpu/uniform_real.py +4 -4
  309. mindspore/ops/_op_impl/tbe/__init__.py +4 -4
  310. mindspore/ops/_op_impl/tbe/inplace_index_add.py +7 -3
  311. mindspore/ops/_op_impl/tbe/trans_data_ds.py +2 -0
  312. mindspore/ops/_primitive_cache.py +1 -1
  313. mindspore/ops/_tracefunc.py +45 -13
  314. mindspore/ops/_utils/utils.py +6 -1
  315. mindspore/ops/_vmap/vmap_array_ops.py +3 -3
  316. mindspore/ops/_vmap/vmap_base.py +3 -3
  317. mindspore/ops/_vmap/vmap_convolution_ops.py +1 -1
  318. mindspore/ops/_vmap/vmap_grad_math_ops.py +6 -4
  319. mindspore/ops/_vmap/vmap_math_ops.py +5 -2
  320. mindspore/ops/_vmap/vmap_nn_ops.py +61 -7
  321. mindspore/ops/arg_dtype_cast.py +54 -0
  322. mindspore/ops/composite/base.py +37 -10
  323. mindspore/ops/composite/math_ops.py +5 -4
  324. mindspore/ops/composite/multitype_ops/_compile_utils.py +275 -73
  325. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +16 -9
  326. mindspore/ops/composite/multitype_ops/add_impl.py +43 -4
  327. mindspore/ops/composite/multitype_ops/getitem_impl.py +42 -4
  328. mindspore/ops/composite/multitype_ops/ones_like_impl.py +6 -0
  329. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  330. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +9 -0
  331. mindspore/ops/deprecated.py +304 -0
  332. mindspore/ops/function/__init__.py +4 -1
  333. mindspore/ops/function/array_func.py +174 -193
  334. mindspore/ops/function/clip_func.py +81 -13
  335. mindspore/ops/function/debug_func.py +1 -1
  336. mindspore/ops/function/grad/grad_func.py +18 -9
  337. mindspore/ops/function/image_func.py +10 -4
  338. mindspore/ops/function/linalg_func.py +5 -5
  339. mindspore/ops/function/math_func.py +575 -386
  340. mindspore/ops/function/nn_func.py +568 -260
  341. mindspore/ops/function/random_func.py +88 -57
  342. mindspore/ops/function/sparse_func.py +1 -1
  343. mindspore/ops/function/sparse_unary_func.py +14 -12
  344. mindspore/ops/function/vmap_func.py +6 -5
  345. mindspore/ops/functional.py +15 -10
  346. mindspore/ops/op_info_register.py +244 -25
  347. mindspore/ops/operations/__init__.py +31 -19
  348. mindspore/ops/operations/_grad_ops.py +71 -7
  349. mindspore/ops/operations/_inner_ops.py +350 -17
  350. mindspore/ops/operations/_quant_ops.py +4 -8
  351. mindspore/ops/operations/_sequence_ops.py +42 -0
  352. mindspore/ops/operations/array_ops.py +68 -282
  353. mindspore/ops/operations/comm_ops.py +107 -59
  354. mindspore/ops/operations/custom_ops.py +94 -70
  355. mindspore/ops/operations/debug_ops.py +8 -4
  356. mindspore/ops/operations/image_ops.py +18 -12
  357. mindspore/ops/operations/inner_ops.py +26 -3
  358. mindspore/ops/operations/math_ops.py +192 -144
  359. mindspore/ops/operations/nn_ops.py +857 -489
  360. mindspore/ops/operations/other_ops.py +0 -22
  361. mindspore/ops/operations/random_ops.py +53 -111
  362. mindspore/ops/operations/sparse_ops.py +3 -1
  363. mindspore/ops/primitive.py +24 -18
  364. mindspore/parallel/_auto_parallel_context.py +68 -8
  365. mindspore/parallel/_cost_model_context.py +2 -2
  366. mindspore/parallel/_offload_context.py +17 -3
  367. mindspore/parallel/_parallel_serialization.py +12 -5
  368. mindspore/parallel/_ps_context.py +12 -0
  369. mindspore/parallel/_tensor.py +18 -13
  370. mindspore/parallel/_transformer/layers.py +5 -3
  371. mindspore/parallel/_transformer/loss.py +1 -0
  372. mindspore/parallel/_transformer/moe.py +2 -2
  373. mindspore/parallel/_transformer/op_parallel_config.py +12 -1
  374. mindspore/parallel/_transformer/transformer.py +23 -3
  375. mindspore/parallel/_utils.py +11 -7
  376. mindspore/parallel/algo_parameter_config.py +85 -5
  377. mindspore/parallel/checkpoint_transform.py +19 -12
  378. mindspore/parallel/shard.py +21 -14
  379. mindspore/profiler/common/struct_type.py +3 -3
  380. mindspore/profiler/common/util.py +4 -2
  381. mindspore/profiler/envprofiling.py +1 -1
  382. mindspore/profiler/parser/aicpu_data_parser.py +5 -3
  383. mindspore/profiler/parser/ascend_flops_generator.py +2 -2
  384. mindspore/profiler/parser/ascend_fpbp_generator.py +1 -1
  385. mindspore/profiler/parser/ascend_hccl_generator.py +249 -12
  386. mindspore/profiler/parser/ascend_msprof_exporter.py +150 -255
  387. mindspore/profiler/parser/ascend_msprof_generator.py +204 -17
  388. mindspore/profiler/parser/ascend_op_generator.py +6 -6
  389. mindspore/profiler/parser/ascend_steptrace_generator.py +6 -4
  390. mindspore/profiler/parser/ascend_timeline_generator.py +14 -187
  391. mindspore/profiler/parser/base_timeline_generator.py +10 -8
  392. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +16 -12
  393. mindspore/profiler/parser/flops_parser.py +15 -11
  394. mindspore/profiler/parser/framework_parser.py +38 -22
  395. mindspore/profiler/parser/hccl_parser.py +16 -12
  396. mindspore/profiler/parser/integrator.py +22 -11
  397. mindspore/profiler/parser/memory_usage_parser.py +2 -2
  398. mindspore/profiler/parser/minddata_analyzer.py +12 -14
  399. mindspore/profiler/parser/minddata_pipeline_parser.py +1 -1
  400. mindspore/profiler/parser/msadvisor_parser.py +8 -4
  401. mindspore/profiler/parser/op_intermediate_parser.py +5 -2
  402. mindspore/profiler/parser/optime_parser.py +1 -1
  403. mindspore/profiler/parser/profiler_info.py +21 -2
  404. mindspore/profiler/parser/step_trace_parser.py +11 -14
  405. mindspore/profiler/profiling.py +179 -89
  406. mindspore/rewrite/api/node.py +102 -19
  407. mindspore/rewrite/api/node_type.py +5 -1
  408. mindspore/rewrite/api/pattern_engine.py +1 -1
  409. mindspore/rewrite/api/scoped_value.py +9 -17
  410. mindspore/rewrite/api/symbol_tree.py +131 -47
  411. mindspore/rewrite/ast_helpers/__init__.py +2 -1
  412. mindspore/rewrite/ast_helpers/ast_finder.py +129 -0
  413. mindspore/rewrite/ast_helpers/ast_modifier.py +116 -104
  414. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +93 -46
  415. mindspore/rewrite/common/rewrite_elog.py +5 -1
  416. mindspore/rewrite/namer.py +33 -24
  417. mindspore/rewrite/namespace.py +14 -5
  418. mindspore/{_extends/graph_kernel/expanders/complex → rewrite/node}/__init__.py +9 -9
  419. mindspore/rewrite/node/call_function.py +79 -0
  420. mindspore/rewrite/node/cell_container.py +135 -0
  421. mindspore/rewrite/node/control_flow.py +88 -0
  422. mindspore/rewrite/{node.py → node/node.py} +273 -234
  423. mindspore/rewrite/node/node_manager.py +254 -0
  424. mindspore/rewrite/{topological_manager.py → node/node_topological_manager.py} +13 -46
  425. mindspore/rewrite/parsers/arguments_parser.py +22 -21
  426. mindspore/rewrite/parsers/assign_parser.py +216 -221
  427. mindspore/rewrite/parsers/attribute_parser.py +9 -7
  428. mindspore/rewrite/parsers/class_def_parser.py +174 -113
  429. mindspore/rewrite/parsers/constant_parser.py +9 -6
  430. mindspore/rewrite/parsers/container_parser.py +9 -7
  431. mindspore/rewrite/parsers/for_parser.py +42 -21
  432. mindspore/rewrite/parsers/function_def_parser.py +24 -16
  433. mindspore/rewrite/parsers/if_parser.py +28 -24
  434. mindspore/rewrite/parsers/module_parser.py +196 -25
  435. mindspore/rewrite/{parser.py → parsers/parser.py} +4 -2
  436. mindspore/rewrite/{parser_register.py → parsers/parser_register.py} +1 -1
  437. mindspore/rewrite/parsers/return_parser.py +6 -6
  438. mindspore/rewrite/sparsify/sparse_transformer.py +12 -3
  439. mindspore/rewrite/sparsify/utils.py +1 -1
  440. mindspore/rewrite/symbol_tree.py +523 -578
  441. mindspore/rewrite/symbol_tree_builder.py +9 -193
  442. mindspore/rewrite/symbol_tree_dumper.py +2 -2
  443. mindspore/run_check/_check_version.py +6 -4
  444. mindspore/{ops/bprop_mindir → safeguard}/__init__.py +4 -3
  445. mindspore/safeguard/rewrite_obfuscation.py +541 -0
  446. mindspore/scipy/linalg.py +1 -1
  447. mindspore/scipy/ops.py +55 -5
  448. mindspore/scipy/optimize/__init__.py +3 -2
  449. mindspore/scipy/optimize/linear_sum_assignment.py +38 -33
  450. mindspore/scipy/optimize/minimize.py +7 -3
  451. mindspore/train/_utils.py +7 -3
  452. mindspore/train/amp.py +323 -123
  453. mindspore/train/anf_ir_pb2.py +14 -2
  454. mindspore/train/callback/_backup_and_restore.py +2 -12
  455. mindspore/train/callback/_callback.py +29 -4
  456. mindspore/train/callback/_checkpoint.py +23 -8
  457. mindspore/train/callback/_early_stop.py +2 -2
  458. mindspore/train/callback/_landscape.py +4 -4
  459. mindspore/train/callback/_loss_monitor.py +2 -2
  460. mindspore/train/callback/_on_request_exit.py +2 -2
  461. mindspore/train/callback/_reduce_lr_on_plateau.py +3 -4
  462. mindspore/train/callback/_summary_collector.py +15 -8
  463. mindspore/train/callback/_time_monitor.py +58 -5
  464. mindspore/train/data_sink.py +5 -11
  465. mindspore/train/dataset_helper.py +84 -57
  466. mindspore/train/loss_scale_manager.py +2 -2
  467. mindspore/train/metrics/__init__.py +3 -3
  468. mindspore/train/metrics/cosine_similarity.py +1 -1
  469. mindspore/train/metrics/hausdorff_distance.py +3 -2
  470. mindspore/train/metrics/mean_surface_distance.py +3 -2
  471. mindspore/train/metrics/metric.py +39 -19
  472. mindspore/train/metrics/roc.py +2 -2
  473. mindspore/train/metrics/root_mean_square_surface_distance.py +4 -3
  474. mindspore/train/mind_ir_pb2.py +85 -36
  475. mindspore/train/model.py +187 -47
  476. mindspore/train/serialization.py +487 -161
  477. mindspore/train/summary/_summary_adapter.py +1 -1
  478. mindspore/train/summary/_writer_pool.py +3 -2
  479. mindspore/train/summary/summary_record.py +37 -17
  480. mindspore/train/train_thor/convert_utils.py +3 -3
  481. mindspore/train/train_thor/dataset_helper.py +1 -1
  482. mindspore/version.py +1 -1
  483. {mindspore-2.1.0.dist-info → mindspore-2.2.11.dist-info}/METADATA +8 -8
  484. {mindspore-2.1.0.dist-info → mindspore-2.2.11.dist-info}/RECORD +488 -539
  485. {mindspore-2.1.0.dist-info → mindspore-2.2.11.dist-info}/entry_points.txt +0 -1
  486. mindspore/_akg/akg/tvm/contrib/debugger/__init__.py +0 -16
  487. mindspore/_akg/akg/tvm/contrib/debugger/debug_result.py +0 -274
  488. mindspore/_akg/akg/tvm/contrib/debugger/debug_runtime.py +0 -259
  489. mindspore/_akg/akg/tvm/contrib/peak.py +0 -341
  490. mindspore/_akg/akg/tvm/contrib/rpc.py +0 -25
  491. mindspore/_akg/akg/tvm/contrib/xcode.py +0 -257
  492. mindspore/_akg/akg/tvm/exec/__init__.py +0 -17
  493. mindspore/_akg/akg/tvm/exec/autotvm_log_editor.py +0 -60
  494. mindspore/_akg/akg/tvm/exec/measure_peak.py +0 -48
  495. mindspore/_akg/akg/tvm/exec/query_rpc_tracker.py +0 -48
  496. mindspore/_akg/akg/tvm/exec/rpc_proxy.py +0 -98
  497. mindspore/_akg/akg/tvm/exec/rpc_server.py +0 -88
  498. mindspore/_akg/akg/tvm/exec/rpc_tracker.py +0 -62
  499. mindspore/_akg/akg/tvm/rpc/__init__.py +0 -29
  500. mindspore/_akg/akg/tvm/rpc/base.py +0 -182
  501. mindspore/_akg/akg/tvm/rpc/client.py +0 -436
  502. mindspore/_akg/akg/tvm/rpc/proxy.py +0 -595
  503. mindspore/_akg/akg/tvm/rpc/server.py +0 -413
  504. mindspore/_akg/akg/tvm/rpc/tornado_util.py +0 -121
  505. mindspore/_akg/akg/tvm/rpc/tracker.py +0 -431
  506. mindspore/_extends/graph_kernel/expander.py +0 -80
  507. mindspore/_extends/graph_kernel/expanders/__init__.py +0 -54
  508. mindspore/_extends/graph_kernel/expanders/_utils.py +0 -269
  509. mindspore/_extends/graph_kernel/expanders/addn.py +0 -33
  510. mindspore/_extends/graph_kernel/expanders/batchnorm.py +0 -152
  511. mindspore/_extends/graph_kernel/expanders/batchnorm_grad.py +0 -105
  512. mindspore/_extends/graph_kernel/expanders/clip_by_norm_no_div_sum.py +0 -33
  513. mindspore/_extends/graph_kernel/expanders/complex/abs.py +0 -30
  514. mindspore/_extends/graph_kernel/expanders/complex/add.py +0 -44
  515. mindspore/_extends/graph_kernel/expanders/complex/div.py +0 -62
  516. mindspore/_extends/graph_kernel/expanders/complex/mul.py +0 -52
  517. mindspore/_extends/graph_kernel/expanders/complex/real_div.py +0 -62
  518. mindspore/_extends/graph_kernel/expanders/complex/sub.py +0 -45
  519. mindspore/_extends/graph_kernel/expanders/conv2d.py +0 -200
  520. mindspore/_extends/graph_kernel/expanders/dropout_grad.py +0 -30
  521. mindspore/_extends/graph_kernel/expanders/equal_count.py +0 -50
  522. mindspore/_extends/graph_kernel/expanders/erfc.py +0 -35
  523. mindspore/_extends/graph_kernel/expanders/expand_dims.py +0 -50
  524. mindspore/_extends/graph_kernel/expanders/fused_adam.py +0 -44
  525. mindspore/_extends/graph_kernel/expanders/fused_adam_weight_decay.py +0 -47
  526. mindspore/_extends/graph_kernel/expanders/fused_mul_add.py +0 -28
  527. mindspore/_extends/graph_kernel/expanders/gelu_grad.py +0 -70
  528. mindspore/_extends/graph_kernel/expanders/gkdropout.py +0 -40
  529. mindspore/_extends/graph_kernel/expanders/identity.py +0 -25
  530. mindspore/_extends/graph_kernel/expanders/layernorm.py +0 -93
  531. mindspore/_extends/graph_kernel/expanders/layernorm_grad.py +0 -113
  532. mindspore/_extends/graph_kernel/expanders/logsoftmax.py +0 -46
  533. mindspore/_extends/graph_kernel/expanders/logsoftmax_grad.py +0 -36
  534. mindspore/_extends/graph_kernel/expanders/matmul.py +0 -80
  535. mindspore/_extends/graph_kernel/expanders/maximum_grad.py +0 -59
  536. mindspore/_extends/graph_kernel/expanders/minimum_grad.py +0 -80
  537. mindspore/_extends/graph_kernel/expanders/oneslike.py +0 -26
  538. mindspore/_extends/graph_kernel/expanders/reduce_mean.py +0 -43
  539. mindspore/_extends/graph_kernel/expanders/relu_grad.py +0 -32
  540. mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits.py +0 -41
  541. mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits_grad.py +0 -35
  542. mindspore/_extends/graph_kernel/expanders/sigmoid_grad.py +0 -31
  543. mindspore/_extends/graph_kernel/expanders/slice.py +0 -35
  544. mindspore/_extends/graph_kernel/expanders/softmax_cross_entropy_with_logits.py +0 -42
  545. mindspore/_extends/graph_kernel/expanders/softmax_grad_ext.py +0 -41
  546. mindspore/_extends/graph_kernel/expanders/softsign.py +0 -28
  547. mindspore/_extends/graph_kernel/expanders/sqrt_grad.py +0 -29
  548. mindspore/_extends/graph_kernel/expanders/square_sum_all.py +0 -44
  549. mindspore/_extends/graph_kernel/expanders/square_sum_v1.py +0 -37
  550. mindspore/_extends/graph_kernel/expanders/squared_difference.py +0 -43
  551. mindspore/_extends/graph_kernel/expanders/tanh_grad.py +0 -31
  552. mindspore/_extends/graph_kernel/model/op_infer.py +0 -506
  553. mindspore/dataset/datapreprocess/__init__.py +0 -20
  554. mindspore/dataset/datapreprocess/preprocess_imagenet_validate_dataset.py +0 -54
  555. mindspore/include/api/net.h +0 -142
  556. mindspore/nn/lr_scheduler.py +0 -262
  557. mindspore/ops/_grad_experimental/grad_image_ops.py +0 -248
  558. mindspore/ops/_grad_experimental/grad_linalg_ops.py +0 -181
  559. mindspore/ops/_grad_experimental/grad_other_ops.py +0 -72
  560. mindspore/ops/_grad_experimental/grad_scalar_ops.py +0 -112
  561. mindspore/ops/_grad_experimental/grad_sequence_ops.py +0 -351
  562. mindspore/ops/_op_impl/_custom_op/flash_attention/attention.py +0 -350
  563. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_bwd.py +0 -409
  564. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_fwd.py +0 -578
  565. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_impl.py +0 -199
  566. mindspore/ops/_op_impl/_custom_op/flash_attention/tik_ops_utils.py +0 -446
  567. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/__init__.py +0 -0
  568. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/sparse_tiling.py +0 -45
  569. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/strategy.py +0 -67
  570. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/wukong_tiling.py +0 -62
  571. mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +0 -0
  572. mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +0 -0
  573. mindspore/ops/bprop_mindir/Depend_bprop.mindir +0 -0
  574. mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +0 -138
  575. mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
  576. mindspore/ops/bprop_mindir/Load_bprop.mindir +0 -0
  577. mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +0 -0
  578. mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
  579. mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  580. mindspore/ops/bprop_mindir/Switch_bprop.mindir +0 -0
  581. mindspore/ops/bprop_mindir/TransShape_bprop.mindir +0 -0
  582. mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +0 -0
  583. mindspore/ops/bprop_mindir/Unique_bprop.mindir +0 -0
  584. mindspore/ops/bprop_mindir/Unstack_bprop.mindir +0 -0
  585. mindspore/ops/bprop_mindir/generate_mindir.py +0 -114
  586. mindspore/rewrite/node_visitor.py +0 -44
  587. /mindspore/{ops/_op_impl/_custom_op/flash_attention → _akg/akg/utils/ascend_profilier}/__init__.py +0 -0
  588. {mindspore-2.1.0.dist-info → mindspore-2.2.11.dist-info}/WHEEL +0 -0
  589. {mindspore-2.1.0.dist-info → mindspore-2.2.11.dist-info}/top_level.txt +0 -0
@@ -26,8 +26,7 @@ import mindspore.common.dtype as mstype
26
26
  from mindspore.ops import operations as P
27
27
  from mindspore.ops.primitive import constexpr
28
28
  from mindspore.ops.primitive import _primexpr
29
- import mindspore.ops.function as ops
30
- from mindspore.ops import functional as F
29
+ import mindspore.ops as ops
31
30
  from mindspore.ops.operations._inner_ops import DynamicBroadcastTo
32
31
  from mindspore.ops.operations._sequence_ops import TupleToTensor
33
32
  from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
@@ -65,8 +64,6 @@ from mindspore.ops._utils.utils import ms_arrange
65
64
  tuple_to_tensor_ = TupleToTensor()
66
65
  eye_ = P.Eye()
67
66
  fills_ = Fills()
68
- fill_ = P.Fill()
69
- fillv2_ = P.FillV2()
70
67
  ones_ = P.Ones()
71
68
  ones_like_ = P.OnesLike()
72
69
  tile_ = P.Tile()
@@ -115,9 +112,9 @@ reduce_min = P.ReduceMin()
115
112
 
116
113
  @_primexpr
117
114
  def get_x_shape(x_shape):
118
- if F.is_sequence_shape_unknown(x_shape):
115
+ if ops.is_sequence_shape_unknown(x_shape):
119
116
  return (-2,)
120
- if F.is_sequence_value_unknown(x_shape):
117
+ if ops.is_sequence_value_unknown(x_shape):
121
118
  return (-1,)
122
119
  s = 1
123
120
  for i in x_shape:
@@ -151,7 +148,7 @@ def _get_type(x):
151
148
  """get the dtype of input"""
152
149
  if isinstance(x, Tensor):
153
150
  return x.dtype
154
- return F.typeof(x)
151
+ return ops.typeof(x)
155
152
 
156
153
 
157
154
  def _get_max_type(start, end, step):
@@ -240,7 +237,8 @@ def arange(start=0, end=None, step=1, *, dtype=None):
240
237
  if start.shape != () or end.shape != () or step.shape != ():
241
238
  raise ValueError(f"For arange, the input args must be a TensorScalar,"
242
239
  f" but got start shape:{start.shape}, end shape:{end.shape}, step shape:{step.shape}")
243
- data = P.Range()(start, end, step)
240
+ range_op = _get_cache_prim(P.Range)()
241
+ data = range_op(start, end, step)
244
242
  if dtype is not None:
245
243
  data = cast_(data, dtype)
246
244
  return data
@@ -653,7 +651,7 @@ def _check_axis_type(axis, type_int=True, type_tuple=True, type_list=True, ops_n
653
651
  raise TypeError(f"For {ops_name}, the axis should be {type_str}, but got {type(axis)}.")
654
652
 
655
653
 
656
- def one_hot(indices, depth, on_value, off_value, axis=-1):
654
+ def one_hot(indices, depth, on_value=1, off_value=0, axis=-1):
657
655
  r"""
658
656
  Computes a one-hot tensor.
659
657
 
@@ -662,27 +660,28 @@ def one_hot(indices, depth, on_value, off_value, axis=-1):
662
660
 
663
661
  Note:
664
662
  If the input indices is rank `N`, the output will have rank `N+1`. The new axis is created at dimension `axis`.
663
+ On Ascend, if `on_value` is Int64 dtype, `indices` must be Int64 dtype.
665
664
 
666
665
  Args:
667
666
  indices(Tensor): A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
668
- Data type must be uint8, int32 or int64.
667
+ Data type must be int32 or int64.
669
668
  depth(int): A scalar defining the depth of the one-hot dimension.
670
- on_value(Union[Tensor, int, float]): A value to fill in output when `indices[j] = i`.
671
- Support uint8, uint16, uint32, uint64, int8, int16, int32, int64, float16, float32, float64,
672
- bool, complex64, complex128.
673
- off_value(Union[Tensor, int, float]): A value to fill in output when `indices[j] != i`.
674
- Has the same data type as `on_value`.
675
- axis(int): Position to insert the value. e.g. If shape of `self` is :math:`(N, C)`, and `axis` is -1,
669
+ on_value(Union[Tensor, int, float], optional): A value to fill in output when `indices[j] = i`.
670
+ Data type must be int32, int64, float16 or float32. Default: ``1`` .
671
+ off_value(Union[Tensor, int, float], optional): A value to fill in output when `indices[j] != i`.
672
+ Has the same data type as `on_value`. Default: ``0`` .
673
+ axis(int, optional): Position to insert the value. e.g. If shape of `self` is :math:`(N, C)`, and `axis` is -1,
676
674
  the output shape will be :math:`(N, C, depth)`, If `axis` is 0,
677
675
  the output shape will be :math:`(depth, N, C)`.
678
676
  Default: ``-1`` .
679
677
 
680
678
  Returns:
681
- Tensor, one-hot tensor. Tensor of shape :math:`(X_0, \ldots, X_{axis}, \text{depth} ,X_{axis+1}, \ldots, X_n)`.
679
+ Tensor, one-hot tensor. Tensor of shape :math:`(X_0, \ldots, X_{axis}, \text{depth} ,X_{axis+1}, \ldots, X_n)`,
680
+ and it has the same data type as `on_value`.
682
681
 
683
682
  Raises:
684
683
  TypeError: If `axis` or `depth` is not an int.
685
- TypeError: If dtype of `indices` is not uint8, int32 or int64.
684
+ TypeError: If dtype of `indices` is not int32 or int64.
686
685
  TypeError: If `indices`, `on_value` or `off_value` is not a Tensor.
687
686
  ValueError: If `axis` is not in range [-1, ndim].
688
687
  ValueError: If `depth` is less than 0.
@@ -716,8 +715,8 @@ def fill(type, shape, value): # pylint: disable=redefined-outer-name
716
715
 
717
716
  Args:
718
717
  type (mindspore.dtype): The specified type of output tensor. The data type only supports
719
- `bool_ <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_ and
720
- `number <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_ .
718
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ and
719
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ .
721
720
  shape (Union(Tensor, tuple[int])): The specified shape of output tensor.
722
721
  value (Union(Tensor, number.Number, bool)): Value to fill the returned tensor.
723
722
 
@@ -743,10 +742,11 @@ def fill(type, shape, value): # pylint: disable=redefined-outer-name
743
742
  [0. 0. 0.]
744
743
  [0. 0. 0.]]
745
744
  """
746
- return fill_(type, shape, value)
745
+ value = cast_(value, type)
746
+ return _get_cache_prim(P.FillV2)()(shape, value)
747
747
 
748
748
 
749
- def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-name
749
+ def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-name
750
750
  """
751
751
  Create a Tensor of the specified shape and fill it with the specified value.
752
752
 
@@ -788,7 +788,7 @@ def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-nam
788
788
  raise TypeError(f"For 'ops.full', 'dtype' must be mindspore.type, but got {dtype}.")
789
789
  if isinstance(size, list):
790
790
  size = tuple(size)
791
- return fill_(dtype, size, fill_value)
791
+ return ops.fill(dtype, size, fill_value)
792
792
 
793
793
 
794
794
  def full_like(input, fill_value, *, dtype=None):
@@ -839,7 +839,7 @@ def chunk(input, chunks, axis=0):
839
839
  Cut the input Tensor into `chunks` sub-tensors along the specified axis.
840
840
 
841
841
  Note:
842
- This function may return less then the specified number of chunks!
842
+ This function may return less than the specified number of chunks!
843
843
 
844
844
  Args:
845
845
  input (Tensor): A Tensor to be cut.
@@ -911,12 +911,12 @@ def fills(x, value):
911
911
  value_ = float(value)
912
912
  elif isinstance(value, Tensor):
913
913
  if value.ndim != 0:
914
- raise ValueError("For 'ops.fills', if the argument 'value' is a tensor, the number of its dimension"
915
- " should be 0, but got {}".format(value.ndim))
914
+ raise ValueError(f"For 'ops.fills', if the argument 'value' is a tensor, the number of its dimension"
915
+ f" should be 0, but got {value.ndim}")
916
916
  value_ = value.astype(mstype.float32)
917
917
  else:
918
- raise TypeError("For 'ops.fills', the type of argument 'value' should be int, float or Tensor,"
919
- " but got {}".format(type(value)))
918
+ raise TypeError(f"For 'ops.fills', the type of argument 'value' should be int, float or Tensor,"
919
+ f" but got {type(value)}")
920
920
  return fills_(x, value_)
921
921
 
922
922
 
@@ -952,7 +952,7 @@ def ones(shape, dtype=None): # pylint: disable=redefined-outer-name
952
952
  [1. 1.]]
953
953
  """
954
954
  _dtype = mstype.float32 if dtype is None else dtype
955
- ones_op = P.FillV2()
955
+ ones_op = _get_cache_prim(P.FillV2)()
956
956
  value = Tensor(1, _dtype)
957
957
  if isinstance(shape, int):
958
958
  shape = tuple([shape])
@@ -993,7 +993,7 @@ def ones_like(input, *, dtype=None):
993
993
  [[1 1]
994
994
  [1 1]]
995
995
  """
996
- ones_like_op = P.OnesLike()
996
+ ones_like_op = _get_cache_prim(P.OnesLike)()
997
997
  output = ones_like_op(input)
998
998
  _dtype = input.dtype if dtype is None else dtype
999
999
  output = cast_(output, _dtype)
@@ -1028,7 +1028,7 @@ def zeros(size, dtype=None): # pylint: disable=redefined-outer-name
1028
1028
  [[0. 0.]
1029
1029
  [0. 0.]]
1030
1030
  """
1031
- zero_op = P.FillV2()
1031
+ zero_op = _get_cache_prim(P.FillV2)()
1032
1032
  _dtype = mstype.float32 if dtype is None else dtype
1033
1033
  value = Tensor(0, _dtype)
1034
1034
  if isinstance(size, int):
@@ -1074,9 +1074,10 @@ def zeros_like(input, *, dtype=None):
1074
1074
  [0. 0.]]
1075
1075
  """
1076
1076
  _dtype = input.dtype if dtype is None else dtype
1077
- zeros_like_op = P.ZerosLike()
1078
- output = zeros_like_op(input)
1079
- output = cast_(output, _dtype)
1077
+ _zeros_like = _get_cache_prim(P.ZerosLike)()
1078
+ _cast = _get_cache_prim(P.Cast)()
1079
+ output = _zeros_like(input)
1080
+ output = _cast(output, _dtype)
1080
1081
  return output
1081
1082
 
1082
1083
 
@@ -1147,7 +1148,8 @@ def tile(input, multiples):
1147
1148
  [1. 2. 1. 2.]
1148
1149
  [3. 4. 3. 4.]]]
1149
1150
  """
1150
- return tile_(input, multiples)
1151
+ tile_op = _get_cache_prim(P.Tile)()
1152
+ return tile_op(input, multiples)
1151
1153
 
1152
1154
 
1153
1155
  def range(start, end, step):
@@ -1455,7 +1457,7 @@ def size(input_x):
1455
1457
 
1456
1458
  Args:
1457
1459
  input_x (Tensor): Input parameters, the shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is
1458
- `number <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_.
1460
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
1459
1461
 
1460
1462
  Returns:
1461
1463
  int. A scalar representing the elements' size of `input_x`, tensor is the number of elements
@@ -1607,7 +1609,7 @@ def reshape(input, shape):
1607
1609
 
1608
1610
 
1609
1611
  def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
1610
- """
1612
+ r"""
1611
1613
  Reverses variable length slices.
1612
1614
 
1613
1615
  Args:
@@ -1621,7 +1623,12 @@ def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
1621
1623
 
1622
1624
  Raises:
1623
1625
  TypeError: If `seq_dim` or `batch_dim` is not an int.
1624
- ValueError: If value of `batch_dim` is equal to or greater than length of shape of input.
1626
+ ValueError: If :math:`len(seq\_lengths) != x.shape[batch\_dim]`.
1627
+ ValueError: If :math:`batch\_dim == seq\_dim`.
1628
+ ValueError: If :math:`seq\_dim < 0` or :math:`seq\_dim >= len(x.shape)`.
1629
+ ValueError: If :math:`batch\_dim < 0` or :math:`batch\_dim >= len(x.shape)`.
1630
+ RuntimeError: If any value of `seq_lengths` is less than 0.
1631
+ RuntimeError: If any value of `seq_lengths` is larger than `x.shape[seq_dim]`.
1625
1632
 
1626
1633
  Supported Platforms:
1627
1634
  ``Ascend`` ``GPU`` ``CPU``
@@ -1724,12 +1731,16 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
1724
1731
  if not isinstance(input, Tensor):
1725
1732
  raise TypeError(f"For 'flatten', argument 'input' must be Tensor.")
1726
1733
  if not isinstance(start_dim, int) or not isinstance(end_dim, int) or \
1727
- isinstance(start_dim, bool) or isinstance(end_dim, bool):
1734
+ isinstance(start_dim, bool) or isinstance(end_dim, bool):
1728
1735
  raise TypeError(f"For 'flatten', both 'start_dim' and 'end_dim' must be int.")
1729
1736
  check_flatten_order_const(order)
1730
1737
  if order == 'F':
1731
- perm = F.make_range(0, F.rank(input))
1732
- new_order = F.tuple_reversed(perm)
1738
+ x_rank = rank_(input)
1739
+ # If input is a 0-dimensional Tensor, a 1-dimensional Tensor will be returned.
1740
+ if x_rank in (0, 1):
1741
+ return reshape_(input, (-1,))
1742
+ perm = ops.make_range(0, x_rank)
1743
+ new_order = ops.tuple_reversed(perm)
1733
1744
  input = _get_cache_prim(P.Transpose)()(input, new_order)
1734
1745
 
1735
1746
  # Handle the default case.
@@ -1911,15 +1922,15 @@ def select(cond, x, y):
1911
1922
  input_y = cast_(input_y, mstype.float32)
1912
1923
 
1913
1924
  if is_x_tensor and is_y_tensor and is_cond_tensor:
1914
- x_shape = F.shape(x)
1915
- y_shape = F.shape(y)
1916
- cond_shape = F.shape(cond)
1917
- all_constant = F.isconstant(cond_shape) and F.isconstant(x_shape) and F.isconstant(y_shape)
1925
+ x_shape = ops.shape(x)
1926
+ y_shape = ops.shape(y)
1927
+ cond_shape = ops.shape(cond)
1928
+ all_constant = ops.isconstant(cond_shape) and ops.isconstant(x_shape) and ops.isconstant(y_shape)
1918
1929
  if all_constant and not _check_select_shape_same(cond_shape, x_shape, y_shape):
1919
1930
  broadcast_shape = _calc_broadcast_shape(cond_shape, x_shape, y_shape)
1920
- new_cond = F.broadcast_to(cond, broadcast_shape)
1921
- new_x = F.broadcast_to(x, broadcast_shape)
1922
- new_y = F.broadcast_to(y, broadcast_shape)
1931
+ new_cond = ops.broadcast_to(cond, broadcast_shape)
1932
+ new_x = ops.broadcast_to(x, broadcast_shape)
1933
+ new_y = ops.broadcast_to(y, broadcast_shape)
1923
1934
  return tensor_select_(new_cond, new_x, new_y)
1924
1935
 
1925
1936
  return tensor_select_(cond, input_x, input_y)
@@ -2010,9 +2021,7 @@ def strided_slice(input_x,
2010
2021
  Args:
2011
2022
  input_x (Tensor): The input Tensor to be extracted from.
2012
2023
  begin (tuple[int]): A tuple which represents the location where to start.
2013
- Only non-negative int is allowed.
2014
2024
  end (tuple[int]): A tuple or which represents the maximum location where to end.
2015
- Only non-negative int is allowed.
2016
2025
  strides (tuple[int]): A tuple which represents the strides is continuously added
2017
2026
  before reaching the maximum location. Only int is allowed, it can be negative
2018
2027
  which results in reversed slicing.
@@ -2156,13 +2165,11 @@ def concat(tensors, axis=0):
2156
2165
  Alias for :func:`mindspore.ops.cat()`.
2157
2166
 
2158
2167
  Tutorial Examples:
2159
- - `Tensor - Tensor Operation <https://mindspore.cn/tutorials/en/r2.1/beginner/tensor.html#tensor-operation>`_
2160
- - `FGSM Network Adversarial Attack - Implementing FGSM
2161
- <https://mindspore.cn/tutorials/application/en/r2.1/cv/fgsm.html#implementing-fgsm>`_
2168
+ - `Tensor - Tensor Operation <https://mindspore.cn/tutorials/en/r2.2/beginner/tensor.html#tensor-operation>`_
2162
2169
  - `Vision Transformer Image Classification - Building ViT as a whole
2163
- <https://mindspore.cn/tutorials/application/en/r2.1/cv/vit.html#building-vit-as-a-whole>`_
2170
+ <https://mindspore.cn/tutorials/application/en/r2.2/cv/vit.html#building-vit-as-a-whole>`_
2164
2171
  - `Sentiment Classification Implemented by RNN - Dense
2165
- <https://mindspore.cn/tutorials/application/en/r2.1/nlp/sentiment_analysis.html#dense>`_
2172
+ <https://mindspore.cn/tutorials/application/en/r2.2/nlp/sentiment_analysis.html#dense>`_
2166
2173
  """
2167
2174
  return cat(tensors, axis)
2168
2175
 
@@ -2279,7 +2286,8 @@ def unbind(input, dim=0):
2279
2286
 
2280
2287
  def expand_dims(input_x, axis):
2281
2288
  """
2282
- Adds an additional dimension to `input_x` at the given axis.
2289
+ Adds an additional dimension to `input_x` at the given axis, the dimension
2290
+ of `input_x` should be greater than or equal to 1.
2283
2291
 
2284
2292
  Note:
2285
2293
  If the specified axis is a negative number, the index is counted
@@ -2357,18 +2365,19 @@ def squeeze(input, axis=None):
2357
2365
  If `axis` is specified, it will remove the dimensions of size 1 in the given `axis`.
2358
2366
  For example, if the dimension is not specified :math:`axis=None`, input shape is (A, 1, B, C, 1, D),
2359
2367
  then the shape of the output Tensor is (A, B, C, D). If the dimension is specified, the squeeze operation
2360
- is only performed in the specified dimension. If input shape is (A, 1, B), input Tensor will not be
2361
- changed when :math:`axis=0` , but when :math:`axis=1` , the shape of the input Tensor will be changed to (A, B).
2368
+ is only performed in the specified dimension. If input shape is (A, 1, B), input Tensor will be changed
2369
+ to (A, B) when :math:`axis=1`, but when :math:`axis=0` or :math:`axis=2`, an error will occur.
2362
2370
 
2363
2371
  Note:
2372
+ - Squeezing a dimension that is not 1 will raise an error.
2364
2373
  - Please note that in dynamic graph mode, the output Tensor will share data with the input Tensor,
2365
2374
  and there is no Tensor data copy process.
2366
2375
  - The dimension index starts at 0 and must be in the range `[-input.ndim, input.ndim]`.
2367
2376
 
2368
2377
  Args:
2369
2378
  input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
2370
- axis (Union[int, tuple(int)]): Specifies the dimension indexes of shape to be removed, which will remove
2371
- all the dimensions of size 1 in the given axis parameter. If specified, it must be int32 or int64.
2379
+ axis (Union[int, tuple(int), list(int)]): Specifies the dimension indexes of shape to be removed, which will
2380
+ remove all the dimensions of size 1 in the given axis parameter. If specified, it must be int32 or int64.
2372
2381
  Default: ``None`` , an empty tuple will be used.
2373
2382
 
2374
2383
  Returns:
@@ -2376,8 +2385,8 @@ def squeeze(input, axis=None):
2376
2385
 
2377
2386
  Raises:
2378
2387
  TypeError: If `input` is not a tensor.
2379
- TypeError: If `axis` is neither an int nor tuple.
2380
- TypeError: If `axis` is a tuple whose elements are not all int.
2388
+ TypeError: If `axis` is not an int, tuple or list.
2389
+ TypeError: If `axis` is a tuple or list whose elements are not all int.
2381
2390
  ValueError: If the corresponding dimension of the specified axis isn't equal to 1.
2382
2391
 
2383
2392
  Supported Platforms:
@@ -2396,6 +2405,8 @@ def squeeze(input, axis=None):
2396
2405
  """
2397
2406
  if axis is None:
2398
2407
  axis = ()
2408
+ if isinstance(axis, list):
2409
+ axis = tuple(axis)
2399
2410
  squeeze_ = _get_cache_prim(P.Squeeze)(axis)
2400
2411
  return squeeze_(input)
2401
2412
 
@@ -2478,7 +2489,6 @@ def scatter_mul(input_x, indices, updates):
2478
2489
  Tensor, the updated `input_x`, has the same shape and type as `input_x`.
2479
2490
 
2480
2491
  Raises:
2481
- TypeError: If `use_locking` is not a bool.
2482
2492
  TypeError: If `indices` is not an int32 or int64.
2483
2493
  ValueError: If the shape of `updates` is not equal to `indices.shape + input_x.shape[1:]`.
2484
2494
  RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
@@ -3486,11 +3496,12 @@ def gather(input_params, input_indices, axis, batch_dims=0):
3486
3496
  where params represents the input `input_params`, and indices represents the index to be sliced `input_indices`.
3487
3497
 
3488
3498
  .. note::
3489
- 1. The value of input_indices must be in the range of `[0, input_param.shape[axis])`, the result is undefined
3490
- out of range.
3499
+ 1. The value of input_indices must be in the range of `[0, input_param.shape[axis])`.
3500
+ On CPU and GPU, an error is raised if an out of bound indice is found. On Ascend, the results may be
3501
+ undefined.
3491
3502
 
3492
3503
  2. The data type of input_params cannot be
3493
- `bool_ <https://www.mindspore.cn/docs/en/r2.1/api_python/mindspore.html#mindspore.dtype>`_ on Ascend
3504
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ on Ascend
3494
3505
  platform currently.
3495
3506
 
3496
3507
  Args:
@@ -3512,6 +3523,7 @@ def gather(input_params, input_indices, axis, batch_dims=0):
3512
3523
  ValueError: If `axis` is a Tensor and its size is not 1.
3513
3524
  TypeError: If `input_params` is not a tensor.
3514
3525
  TypeError: If `input_indices` is not a tensor of type int.
3526
+ RuntimeError: If `input_indices` is out of range `[0, input_param.shape[axis])` on CPU or GPU.
3515
3527
 
3516
3528
  Supported Platforms:
3517
3529
  ``Ascend`` ``GPU`` ``CPU``
@@ -3976,7 +3988,7 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
3976
3988
  >>> reduction = "none"
3977
3989
  >>> output = ops.tensor_scatter_elements(input_x, indices, updates, axis, reduction)
3978
3990
  >>> print(output)
3979
- [[ 1, 2, 8, 4, 8]]
3991
+ [[1 2 8 4 8]]
3980
3992
  >>> input_x = Parameter(Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.int32), name="x")
3981
3993
  >>> indices = Tensor(np.array([[1, -1, 2], [0, 2, 1]]), mindspore.int32)
3982
3994
  >>> updates = Tensor(np.array([[1, 2, 2], [4, 5, 8]]), mindspore.int32)
@@ -3984,7 +3996,9 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
3984
3996
  >>> reduction = "add"
3985
3997
  >>> output = ops.tensor_scatter_elements(input_x, indices, updates, axis, reduction)
3986
3998
  >>> print(output)
3987
- [[5, 2, 3], [5, 5, 14], [7, 15, 11]]
3999
+ [[ 5 2 3]
4000
+ [ 5 5 14]
4001
+ [ 7 15 11]]
3988
4002
  """
3989
4003
  _tensor_scatter_elements = _get_cache_prim(TensorScatterElements)(axis, reduction)
3990
4004
  return _tensor_scatter_elements(input_x, indices, updates)
@@ -4048,7 +4062,7 @@ def scatter(input, axis, index, src):
4048
4062
  [0. 0. 0. 0. 0.]
4049
4063
  [0. 0. 0. 0. 0.]]
4050
4064
  """
4051
- return F.tensor_scatter_elements(input_x=input, indices=index, updates=src, axis=axis)
4065
+ return ops.tensor_scatter_elements(input_x=input, indices=index, updates=src, axis=axis)
4052
4066
 
4053
4067
 
4054
4068
  def _get_slice_scatter_const(x_shape, axis, start, end, step):
@@ -4087,6 +4101,7 @@ def slice_scatter(input, src, axis=0, start=None, end=None, step=1):
4087
4101
  Tensor after embedding, has the same shape and type as `input` .
4088
4102
 
4089
4103
  Raises:
4104
+ ValueError: The shape of `src` is not the same as the shape of `input` slice.
4090
4105
  TypeError: If `input` is not a Tensor.
4091
4106
  TypeError: If `src` is not a Tensor.
4092
4107
  TypeError: If `axis` or `step` is not an integer.
@@ -4115,23 +4130,13 @@ def slice_scatter(input, src, axis=0, start=None, end=None, step=1):
4115
4130
  for _ in builtins.range(axis):
4116
4131
  index_tensor = index_tensor.expand_dims(0)
4117
4132
 
4118
- if index_shape == src_shape:
4119
- for _ in builtins.range(input_rank - axis - 1):
4120
- index_tensor = index_tensor.expand_dims(-1)
4121
- index_tensor = index_tensor.broadcast_to(src.shape)
4122
- return tensor_scatter_elements(input, axis=axis, indices=index_tensor, updates=src)
4123
-
4124
- for _ in builtins.range(axis):
4125
- src = src.expand_dims(0)
4126
- if axis == input_rank - 1:
4127
- src = src.broadcast_to(input.shape[0:axis] + src_shape)
4128
- else:
4129
- for _ in builtins.range(len(src_shape)):
4130
- index_tensor = index_tensor.expand_dims(-1)
4131
- src = src.broadcast_to(input.shape[0:axis] + (len(index),) + src_shape)
4133
+ if index_shape != src_shape:
4134
+ raise ValueError(f"For slice_scatter, src shape should be equal to the slice size,"
4135
+ f"but got src shape {src_shape} and slice shape {index_shape}")
4136
+ for _ in builtins.range(input_rank - axis - 1):
4137
+ index_tensor = index_tensor.expand_dims(-1)
4132
4138
  index_tensor = index_tensor.broadcast_to(src.shape)
4133
- output = tensor_scatter_elements(input, axis=axis, indices=index_tensor, updates=src)
4134
- return output
4139
+ return tensor_scatter_elements(input, axis=axis, indices=index_tensor, updates=src)
4135
4140
 
4136
4141
 
4137
4142
  def select_scatter(input, src, axis, index):
@@ -4148,6 +4153,7 @@ def select_scatter(input, src, axis, index):
4148
4153
  Tensor after embedding, has the same shape and type as `input` .
4149
4154
 
4150
4155
  Raises:
4156
+ ValueError: The shape of `src` is not the same as the shape scattered over `input` .
4151
4157
  TypeError: If `input` is not a Tensor.
4152
4158
  TypeError: If `src` is not a Tensor.
4153
4159
  TypeError: If `axis` or `index` is not an integer.
@@ -4169,6 +4175,9 @@ def select_scatter(input, src, axis, index):
4169
4175
  [0. 0. 0.]]]
4170
4176
  """
4171
4177
  src = src.expand_dims(axis=axis)
4178
+ x_rank = input.ndim
4179
+ axis = axis if axis >= 0 else axis + x_rank
4180
+ index = index if index >= 0 else index + x_rank
4172
4181
  return slice_scatter(input, src, axis, start=index, end=index + 1)
4173
4182
 
4174
4183
 
@@ -4437,7 +4446,7 @@ def matrix_diag(x, k=0, num_rows=-1, num_cols=-1, padding_value=0, align="RIGHT_
4437
4446
  return matrix_diag_v3(x, k, num_rows, num_cols, padding_value)
4438
4447
 
4439
4448
 
4440
- def matrix_diag_part(x, k=0, padding_value=0, align="RIGHT_LEFT"):
4449
+ def matrix_diag_part(x, k, padding_value, align="RIGHT_LEFT"):
4441
4450
  r"""
4442
4451
  Returns the diagonal part of input tensor.
4443
4452
  Returns a tensor with the k[0]-th to k[1]-th diagonals of `x`. Some diagonals are shorter than
@@ -4445,13 +4454,13 @@ def matrix_diag_part(x, k=0, padding_value=0, align="RIGHT_LEFT"):
4445
4454
 
4446
4455
  Args:
4447
4456
  x (Tensor): The input Tensor with rank r, where r >= 2.
4448
- k (Union[int, Tensor], optional): A Tensor of type int32. Diagonal offset(s). Positive value means
4457
+ k (Tensor): A Tensor of type int32. Diagonal offset(s). Positive value means
4449
4458
  superdiagonal, 0 refers to the main diagonal, and negative value means subdiagonals. k can be
4450
4459
  a single integer (for a single diagonal) or a pair of integers specifying the low and high ends
4451
4460
  of a matrix band. k[0] must not be larger than k[1]. The value of k has restructions, meaning
4452
- value of k must be in (-x.shape[-2], x.shape[-1]). Default: ``0``.
4453
- padding_value (Union[int, float, Tensor], optional): A Tensor with only one value. Have the same dtype as x.
4454
- The number to fill the area outside the specified diagonal band. Default: ``0`` .
4461
+ value of k must be in (-x.shape[-2], x.shape[-1]).
4462
+ padding_value (Tensor): A Tensor with only one value. Have the same dtype as x.
4463
+ The number to fill the area outside the specified diagonal band.
4455
4464
  align (str, optional): An optional string from: ``"RIGHT_LEFT"`` , ``"LEFT_RIGHT"`` ,
4456
4465
  ``"LEFT_LEFT"`` , ``"RIGHT_RIGHT"`` . Align is a string specifying how superdiagonals and subdiagonals
4457
4466
  should be aligned, respectively. ``"RIGHT_LEFT"`` aligns superdiagonals to the right (left-pads the row)
@@ -4501,7 +4510,7 @@ def matrix_diag_part(x, k=0, padding_value=0, align="RIGHT_LEFT"):
4501
4510
  return matrix_diag_part_v3(x, k, padding_value)
4502
4511
 
4503
4512
 
4504
- def matrix_set_diag(x, diagonal, k=0, align="RIGHT_LEFT"): # pylint: disable=redefined-outer-name
4513
+ def matrix_set_diag(x, diagonal, k=0, align="RIGHT_LEFT"): # pylint: disable=redefined-outer-name
4505
4514
  r"""
4506
4515
  Returns a batched matrix tensor with new batched diagonal values.
4507
4516
  Given x and diagonal, this operation returns a tensor with the same shape and values as x, except for the specified
@@ -4713,7 +4722,7 @@ def affine_grid(theta, size, align_corners=False):
4713
4722
  return affine_grid_op(theta, size)
4714
4723
 
4715
4724
 
4716
- def broadcast_to(input, shape): # pylint: disable=redefined-outer-name
4725
+ def broadcast_to(input, shape): # pylint: disable=redefined-outer-name
4717
4726
  """
4718
4727
  Broadcasts input tensor to a given shape. The dim of input shape must be smaller
4719
4728
  than or equal to that of target shape. Suppose input shape is :math:`(x_1, x_2, ..., x_m)`,
@@ -4787,7 +4796,7 @@ def broadcast_to(input, shape): # pylint: disable=redefined-outer-name
4787
4796
  [[1. 1.]
4788
4797
  [2. 2.]]
4789
4798
  """
4790
- if isinstance(shape, Tensor) or F.is_sequence_value_unknown(shape):
4799
+ if isinstance(shape, Tensor) or ops.is_sequence_value_unknown(shape):
4791
4800
  _dyn_broadcast_to = _get_cache_prim(DynamicBroadcastTo)()
4792
4801
  return _dyn_broadcast_to(input, shape)
4793
4802
  _broadcast_to = _get_cache_prim(P.BroadcastTo)(shape)
@@ -5580,10 +5589,10 @@ def _split_int(x, split_size_or_sections, axis):
5580
5589
  arr_shape = x.shape
5581
5590
  length_along_dim = arr_shape[axis]
5582
5591
  if split_size_or_sections > length_along_dim:
5583
- res = P.Split(axis, 1)(x)
5592
+ res = _get_cache_prim(P.Split)(axis, 1)(x)
5584
5593
  elif length_along_dim % split_size_or_sections == 0:
5585
5594
  sections = length_along_dim // split_size_or_sections
5586
- res = P.Split(axis, sections)(x)
5595
+ res = _get_cache_prim(P.Split)(axis, sections)(x)
5587
5596
  else:
5588
5597
  num_sections = length_along_dim // split_size_or_sections
5589
5598
  length1 = num_sections * split_size_or_sections
@@ -5592,8 +5601,8 @@ def _split_int(x, split_size_or_sections, axis):
5592
5601
  size1 = _tuple_setitem(arr_shape, axis, length1)
5593
5602
  start2 = _tuple_setitem(start1, axis, length1)
5594
5603
  size2 = _tuple_setitem(arr_shape, axis, length2)
5595
- res = P.Split(axis, num_sections)(tensor_slice(x, start1, size1)) + \
5596
- P.Split(axis, 1)(tensor_slice(x, start2, size2))
5604
+ res = _get_cache_prim(P.Split)(axis, num_sections)(tensor_slice(x, start1, size1)) + \
5605
+ _get_cache_prim(P.Split)(axis, 1)(tensor_slice(x, start2, size2))
5597
5606
  return res
5598
5607
 
5599
5608
 
@@ -5687,7 +5696,7 @@ def split(tensor, split_size_or_sections, axis=0):
5687
5696
  return tuple(res)
5688
5697
 
5689
5698
 
5690
- def tril(input, diagonal=0): # pylint: disable=redefined-outer-name
5699
+ def tril(input, diagonal=0): # pylint: disable=redefined-outer-name
5691
5700
  """
5692
5701
  Returns the lower triangle part of 'input' (elements that contain the diagonal and below),
5693
5702
  and set the other elements to zeros.
@@ -6102,7 +6111,7 @@ def dsplit(input, indices_or_sections):
6102
6111
  return tensor_split(input, indices_or_sections, 2)
6103
6112
 
6104
6113
 
6105
- def _init_and_select_elem(input, initial, where, cmp_fn): # pylint: disable=redefined-outer-name
6114
+ def _init_and_select_elem(input, initial, where, cmp_fn): # pylint: disable=redefined-outer-name
6106
6115
  """Initialize the input according to Initial, and select the element according to where."""
6107
6116
  if initial is not None:
6108
6117
  initial = ops.fill(input.dtype, input.shape, initial)
@@ -6120,7 +6129,7 @@ def _init_and_select_elem(input, initial, where, cmp_fn): # pylint: disable=r
6120
6129
  return input
6121
6130
 
6122
6131
 
6123
- def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylint: disable=redefined-outer-name
6132
+ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylint: disable=redefined-outer-name
6124
6133
  """
6125
6134
  Calculates the maximum value along with the given axis for the input tensor. It returns the maximum values and
6126
6135
  indices.
@@ -6138,7 +6147,8 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pyl
6138
6147
 
6139
6148
  Args:
6140
6149
  input (Tensor): The input tensor, can be any dimension. Complex tensor is not supported for now.
6141
- axis (int): The dimension to reduce. Default: ``None`` .
6150
+ axis (int): The dimension to reduce. When `axis` is ``None``, computing the maximum value of all elements
6151
+ in `input` .Default: ``None`` .
6142
6152
  keepdims (bool): Whether to reduce dimension, if true, the output will keep same dimension with the input,
6143
6153
  the output will reduce dimension if false. Default: ``False`` .
6144
6154
 
@@ -6174,14 +6184,20 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pyl
6174
6184
  >>> import numpy as np
6175
6185
  >>> from mindspore import Tensor, ops
6176
6186
  >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
6177
- >>> output, index, = ops.max(x, keepdims=True)
6187
+ >>> output, index = ops.max(x)
6178
6188
  >>> print(output, index)
6179
6189
  0.7 0
6190
+ >>> y = Tensor(np.array([[0.0, 0.3, 0.4, 0.5, 0.1],
6191
+ ... [3.2, 0.4, 0.1, 2.9, 4.0]]), mindspore.float32)
6192
+ >>> output, index = ops.max(y, axis=0, keepdims=True)
6193
+ >>> print(output, index)
6194
+ [[3.2 0.4 0.4 2.9 4. ]] [[1 1 0 1 1]]
6180
6195
  """
6181
6196
  if not input.shape:
6182
6197
  return (input, Tensor(0, dtype=mstype.int32))
6183
6198
  if axis is None:
6184
- return (reduce_max(input), Tensor(0, dtype=mstype.int32))
6199
+ reduce_max_op = _get_cache_prim(P.ReduceMax)()
6200
+ return (reduce_max_op(input), Tensor(0, dtype=mstype.int32))
6185
6201
  if initial is not None and not isinstance(initial, numbers.Number):
6186
6202
  raise TypeError(f"For 'max', 'initial' must be a scalar, but got {type(initial)}")
6187
6203
  if axis is not None and not isinstance(axis, int):
@@ -6237,7 +6253,7 @@ def argmax(input, dim=None, keepdim=False):
6237
6253
  return out
6238
6254
 
6239
6255
 
6240
- def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylint: disable=redefined-outer-name
6256
+ def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylint: disable=redefined-outer-name
6241
6257
  """
6242
6258
  Calculates the minimum value along with the given axis for the input tensor. It returns the minimum values and
6243
6259
  indices.
@@ -6366,6 +6382,9 @@ def aminmax(input, *, axis=0, keepdims=False):
6366
6382
  argmax_with_value_op = P.ArgMaxWithValue(axis, keepdims)
6367
6383
  _, output0 = argmin_with_value_op(input)
6368
6384
  _, output1 = argmax_with_value_op(input)
6385
+ if keepdims is True and input.ndim == 0:
6386
+ output0 = ops.reshape(output0, [1])
6387
+ output1 = ops.reshape(output1, [1])
6369
6388
  return output0, output1
6370
6389
 
6371
6390
 
@@ -6477,7 +6496,6 @@ def unsorted_segment_sum(input_x, segment_ids, num_segments):
6477
6496
  return unsorted_segment_sum_(input_x, segment_ids, num_segments)
6478
6497
 
6479
6498
 
6480
-
6481
6499
  def topk(input, k, dim=None, largest=True, sorted=True):
6482
6500
  r"""
6483
6501
  Finds values and indices of the `k` largest or smallest entries along a given dimension.
@@ -6500,12 +6518,8 @@ def topk(input, k, dim=None, largest=True, sorted=True):
6500
6518
 
6501
6519
  If the two compared elements are the same, the one with the smaller index value is returned first.
6502
6520
 
6503
- Note:
6504
- Currently, Ascend/CPU supported all common data types except bool and complex type,
6505
- but GPU only supports float16, float32 currently.
6506
-
6507
6521
  Args:
6508
- input (Tensor): Input to be computed.
6522
+ input (Tensor): Input to be computed, data type must be float16, float32 or int32.
6509
6523
  k (int): The number of top or bottom elements to be computed along the last dimension, constant input is needed.
6510
6524
  dim (int, optional): The dimension to sort along. Default: ``None`` .
6511
6525
  largest (bool, optional): If largest is ``False`` then the k smallest elements are returned.
@@ -6523,6 +6537,7 @@ def topk(input, k, dim=None, largest=True, sorted=True):
6523
6537
  TypeError: If `sorted` is not a bool.
6524
6538
  TypeError: If `input` is not a Tensor.
6525
6539
  TypeError: If `k` is not an int.
6540
+ TypeError: If dtype of `input` is not one of the following: float16, float32 or int32.
6526
6541
 
6527
6542
  Supported Platforms:
6528
6543
  ``Ascend`` ``GPU`` ``CPU``
@@ -6574,50 +6589,8 @@ def topk(input, k, dim=None, largest=True, sorted=True):
6574
6589
 
6575
6590
  def expand(input_x, size):
6576
6591
  r"""
6577
- Returns a new tensor where the dimension of size is expanded to a larger size.
6578
-
6579
- Note:
6580
- - If the `size` for a dimension is -1, it means no change for the size of that dimension.
6581
- - When a Tensor is expanded to a larger number of dimensions, the new ones will be appended at
6582
- the front, and for the new dimensions, the `size` can not be -1.
6583
-
6584
- Args:
6585
- input_x (Tensor): A Tensor to be expanded.
6586
- size (Tensor): The expanded shape of `input_x`.
6587
-
6588
- Returns:
6589
- y (Tensor) - Tensor after expansion whose shape is `size`.
6590
-
6591
- Raises:
6592
- TypeError: If `input_x` or `size` is not Tensor.
6593
- TypeError: If the type of `size` is not one of the following dtype: int16, int32, int64.
6594
- ValueError: If the size of `size` is less than the size of `input_x.shape`.
6595
- ValueError: If `size` is not a 1-D tensor.
6596
- ValueError: If the expanded `size` is not equal to the existing shape of `input_x` at a dimension
6597
- that is not 1.
6598
- ValueError: If the expanded `size` < 0 and it is in a leading position, corresponding to
6599
- a non-existing dimension in `input_x`.
6600
- ValueError: If the number of elements of output is more than 1000000.
6601
-
6602
- Supported Platforms:
6603
- ``Ascend`` ``CPU``
6604
-
6605
- Examples:
6606
- >>> import mindspore
6607
- >>> import numpy as np
6608
- >>> from mindspore import Tensor, ops
6609
- >>> input_x = Tensor(np.array([[2], [3], [4]]), mindspore.float32)
6610
- >>> size = Tensor(np.array([3,4]), mindspore.int32)
6611
- >>> y = ops.expand(input_x, size)
6612
- >>> print(y)
6613
- [[2. 2. 2. 2.]
6614
- [3. 3. 3. 3.]
6615
- [4. 4. 4. 4.]]
6616
- >>> input_x = Tensor(2, mindspore.int16)
6617
- >>> size = Tensor(np.array([1, 1]), mindspore.int32)
6618
- >>> y = ops.expand(input_x, size)
6619
- >>> print(y)
6620
- [[2]]
6592
+ :func:`mindspore.ops.expand` will be deprecated in the future.
6593
+ Please use :func:`mindspore.ops.broadcast_to` instead.
6621
6594
  """
6622
6595
  expand_op = _get_cache_prim(Expand)()
6623
6596
  return expand_op(input_x, size)
@@ -6636,20 +6609,30 @@ def _check_fold_param(param, param_name):
6636
6609
  return param
6637
6610
 
6638
6611
 
6639
- @_primexpr
6640
- def _check_fold_input(input):
6641
- """Check the rank of fold's input."""
6642
- if not isinstance(input, (Tensor, Tensor_)) or F.rank(input) != 3:
6643
- raise ValueError(
6644
- f"For array function 'fold', 'input' must be a 3-D tensor.")
6645
-
6646
-
6647
6612
  def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
6648
6613
  r"""
6649
6614
  Combines an array of sliding local blocks into a large containing tensor.
6650
6615
 
6616
+ Consider a batched input tensor of shape :math:`(N, C \times \prod(\text{kernel_size}), L)` ,
6617
+ where :math:`N` is the batch dimension, :math:`C \times \prod(\text{kernel_size})` is the
6618
+ total number of values within each block (a block has :math:`\prod(\text{kernel_size})` spatial
6619
+ locations each containing a `C`-channeled vector), and :math:`L` is the total number of such blocks:
6620
+
6621
+ .. math::
6622
+ L = \prod_d \left\lfloor\frac{\text{output_size}[d] + 2 \times \text{padding}[d] %
6623
+ - \text{dilations}[d] \times (\text{kernel_size}[d] - 1) - 1}{\text{strides}[d]} + 1\right\rfloor,
6624
+
6625
+ where :math:`d` is over all spatial dimensions.
6626
+
6627
+ Therefore, `output_size` is the spatial shape of the large containing tensor of the sliding local blocks.
6628
+
6629
+ The `dilation`, `padding` and `stride` arguments specify how the sliding blocks are retrieved.
6630
+
6651
6631
  .. warning::
6652
- - The input must be a 3-dimensional Tensor with shape :math:`(N, C \times H, W)` .
6632
+ - The input must be a 3-dimensional Tensor with shape
6633
+ :math:`(N, C \times \prod(\text{kernel_size}), L)` .
6634
+ - The output must be a 4-dimensional Tensor with shape
6635
+ :math:`(N, C, output\_size[0], output\_size[1], ...)` .
6653
6636
 
6654
6637
  Args:
6655
6638
  input (Tensor): 3-D Tensor, supported dtypes: float16, float32, float64, complex64 and complex128.
@@ -6664,7 +6647,7 @@ def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
6664
6647
  for height and width. If type is int, it means that height equal with width. Default: ``1`` .
6665
6648
 
6666
6649
  Returns:
6667
- A Tensor, with same type as `input`.
6650
+ A Tensor, with same type as `input` . And its shape is as described above.
6668
6651
 
6669
6652
  Raises:
6670
6653
  TypeError: If `kernel_size`, `dilation`, `padding`, `stride` data type is not int, tuple or list.
@@ -6687,16 +6670,15 @@ def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
6687
6670
  >>> print(output.shape)
6688
6671
  (16, 16, 8, 8)
6689
6672
  """
6690
- _check_fold_input(input)
6691
6673
  kernel_size = _check_fold_param(kernel_size, "kernel_size")
6692
6674
  dilation = _check_fold_param(dilation, "dilation")
6693
6675
  padding = _check_fold_param(padding, "padding")
6694
6676
  stride = _check_fold_param(stride, "stride")
6695
6677
  fold_op = _get_cache_prim(Col2Im)(kernel_size, dilation, padding, stride)
6696
- input_shape = F.shape(input)
6678
+ input_shape = ops.shape(input)
6697
6679
  k = kernel_size[0] * kernel_size[-1]
6698
6680
  r_shape = input_shape[:1] + (-1, k) + input_shape[-1:]
6699
- input = F.reshape(input, r_shape)
6681
+ input = ops.reshape(input, r_shape)
6700
6682
  return fold_op(input, output_size)
6701
6683
 
6702
6684
 
@@ -6767,7 +6749,7 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
6767
6749
  A Tensor, with same type as `input` . And its shape is as described above.
6768
6750
 
6769
6751
  Raises:
6770
- TypeError: If any data type of `kernel_size`, `stride`, `dilation`, `kernel_size` is not int, tuple or list.
6752
+ TypeError: If any data type of `kernel_size`, `stride`, `dilation`, `padding` is not int, tuple or list.
6771
6753
  ValueError: If `kernel_size`, `dilation`, `stride` value is not
6772
6754
  greater than zero or elements number more than `2`.
6773
6755
  ValueError: If `padding` value is less than zero.
@@ -6793,9 +6775,9 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
6793
6775
  dilations=dilation,
6794
6776
  pads=padding)
6795
6777
  tmp = unfold_op(input)
6796
- tmp_shape = F.shape(tmp)
6778
+ tmp_shape = ops.shape(tmp)
6797
6779
  out_shape = tmp_shape[:1] + (-1,) + tmp_shape[-1:]
6798
- out = F.reshape(tmp, out_shape)
6780
+ out = ops.reshape(tmp, out_shape)
6799
6781
  return out
6800
6782
 
6801
6783
 
@@ -6849,7 +6831,7 @@ def diagonal(input, offset=0, dim1=0, dim2=1):
6849
6831
  """
6850
6832
  x_ndim = input.ndim
6851
6833
  if x_ndim < 2:
6852
- raise ValueError(f"ops.diagonal requires an array of at least two dimensions")
6834
+ raise ValueError(f"For 'ops.diagonal', the original tensor requires at least two dimensions, but got {x_ndim}")
6853
6835
  _check_attr_dtype("dim1", dim1, [int], "diagonal")
6854
6836
  _check_attr_dtype("dim2", dim2, [int], "diagonal")
6855
6837
  dtype = input.dtype
@@ -6865,36 +6847,36 @@ def diagonal(input, offset=0, dim1=0, dim2=1):
6865
6847
  x_shape = input.shape
6866
6848
  n, m = x_shape[-2:]
6867
6849
 
6868
- fill_op = _get_cache_prim(P.Fill)()
6869
- e = _get_cache_prim(P.Eye)()(n, m, dtype)
6850
+ e = ops.eye(n, m, dtype)
6870
6851
  if offset >= m or offset <= -n:
6871
- e = fill_op(dtype, (n, m), 0)
6872
- elif offset != 0:
6852
+ zero_shape = x_shape[:-2] + (0,)
6853
+ return ops.zeros(zero_shape, dtype)
6854
+ if offset != 0:
6873
6855
  e = e.astype(mstype.float32)
6874
6856
  if offset > 0:
6875
- e_left = fill_op(mstype.float32, (n, offset), 0)
6857
+ e_left = ops.fill(mstype.float32, (n, offset), 0)
6876
6858
  e_right = e[..., 0:m - offset:1]
6877
- e = _get_cache_prim(P.Concat)(1)((e_left, e_right)).astype(dtype)
6859
+ e = ops.cat((e_left, e_right), 1).astype(dtype)
6878
6860
  elif offset < 0:
6879
- e_upper = fill_op(mstype.float32, (-offset, m), 0)
6861
+ e_upper = ops.fill(mstype.float32, (-offset, m), 0)
6880
6862
  e_lower = e[0:n + offset:1, ...]
6881
- e = _get_cache_prim(P.Concat)(0)((e_upper, e_lower)).astype(dtype)
6882
- e = F.broadcast_to(e, x_shape)
6863
+ e = ops.cat((e_upper, e_lower), 0).astype(dtype)
6864
+ e = ops.broadcast_to(e, x_shape)
6883
6865
 
6884
- prod_val = _get_cache_prim(P.Mul)()(input, e)
6885
- res = _get_cache_prim(P.ReduceSum)()(prod_val.astype(mstype.float32), -1)
6866
+ prod_val = ops.mul(input, e)
6867
+ res = ops.ReduceSum()(prod_val.astype(mstype.float32), -1)
6886
6868
 
6887
6869
  begin = ()
6888
6870
  for _ in ms_arrange(x_ndim - 2):
6889
6871
  begin += (0,)
6890
- last_dim_begin = np.max((0, -offset)).astype(np.int64)
6872
+ last_dim_begin = builtins.max(0, -offset)
6891
6873
  begin += (last_dim_begin,)
6892
6874
  res_size = res.shape[:-1]
6893
- last_dim_end = np.min((x_shape[-2], np.max((0, (x_shape[-1] - offset))))) - last_dim_begin
6875
+ last_dim_end = builtins.min(x_shape[-2], builtins.max(0, x_shape[-1] - offset)) - last_dim_begin
6894
6876
  if last_dim_end <= 0:
6895
6877
  return Tensor([])
6896
6878
  res_size += (last_dim_end,)
6897
- res = _get_cache_prim(P.Slice)()(res, begin, res_size)
6879
+ res = ops.slice(res, begin, res_size)
6898
6880
  return res.astype(dtype)
6899
6881
 
6900
6882
 
@@ -7205,7 +7187,7 @@ def _check_axis_valid(axis, ndim):
7205
7187
  to the built-in operator (non-negative, int or tuple).
7206
7188
  """
7207
7189
  if axis is None:
7208
- axis = F.make_range(ndim)
7190
+ axis = ops.make_range(ndim)
7209
7191
  return axis
7210
7192
  if isinstance(axis, (tuple, list)):
7211
7193
  axis = tuple(map(lambda x: _check_check_axis_in_range(x, ndim), axis))
@@ -7279,7 +7261,7 @@ def movedim(x, source, destination):
7279
7261
  >>> print(output.shape)
7280
7262
  (4, 3, 5)
7281
7263
  """
7282
- ndim = F.rank(x)
7264
+ ndim = ops.rank(x)
7283
7265
  source = _check_axis_valid(source, ndim)
7284
7266
  destination = _check_axis_valid(destination, ndim)
7285
7267
  if len(source) != len(destination):
@@ -7354,7 +7336,7 @@ def swapaxes(input, axis0, axis1):
7354
7336
  if axis0 > axis1:
7355
7337
  axis0, axis1 = axis1, axis0
7356
7338
 
7357
- perm = F.make_range(0, input.ndim)
7339
+ perm = ops.make_range(0, input.ndim)
7358
7340
  if axis1 + 1 < input.ndim:
7359
7341
  new_perm = perm[0:axis0] + perm[axis1:axis1 + 1] + \
7360
7342
  perm[axis0 + 1:axis1] + perm[axis0:axis0 + 1] + perm[axis1 + 1:]
@@ -7395,7 +7377,7 @@ def swapdims(input, dim0, dim1):
7395
7377
  >>> print(output.shape)
7396
7378
  (4, 3, 2)
7397
7379
  '''
7398
- return F.swapaxes(input, dim0, dim1)
7380
+ return ops.swapaxes(input, dim0, dim1)
7399
7381
 
7400
7382
 
7401
7383
  @constexpr
@@ -7406,7 +7388,7 @@ def _check_is_int(arg_value, arg_name, op_name):
7406
7388
 
7407
7389
  @_primexpr
7408
7390
  def _check_positive_int(arg_value, arg_name, op_name):
7409
- arg_value = validator.check_positive_int(arg_value, arg_name, op_name)
7391
+ arg_value = validator.check_int_range(arg_value, 0, 2147483647, validator.INC_RIGHT, arg_name, op_name)
7410
7392
  return arg_value
7411
7393
 
7412
7394
 
@@ -7507,7 +7489,7 @@ def repeat_elements(x, rep, axis=0):
7507
7489
  [[0 0 1 1 2 2]
7508
7490
  [3 3 4 4 5 5]]
7509
7491
  """
7510
- const_utils.check_type_valid(F.dtype(x), mstype.number_type, 'input x')
7492
+ const_utils.check_type_valid(ops.dtype(x), mstype.number_type, 'input x')
7511
7493
  rep = _check_positive_int(rep, "rep", "repeat_elements")
7512
7494
  axis = _check_is_int(axis, "axis", "repeat_elements")
7513
7495
  shape_op = P.Shape()
@@ -7599,7 +7581,7 @@ def sequence_mask(lengths, maxlen=None):
7599
7581
  to_tensor_op = P.ScalarToTensor()
7600
7582
  shape_op = P.Shape()
7601
7583
 
7602
- const_utils.check_type_valid(F.dtype(lengths), [mstype.int64, mstype.int32], 'lengths')
7584
+ const_utils.check_type_valid(ops.dtype(lengths), [mstype.int64, mstype.int32], 'lengths')
7603
7585
  _check_sequence_mask_input_len(shape_op(lengths), "sequence_mask")
7604
7586
 
7605
7587
  if maxlen is None:
@@ -7662,7 +7644,6 @@ __all__ = [
7662
7644
  'matrix_band_part',
7663
7645
  'padding',
7664
7646
  'fill',
7665
- 'fill_',
7666
7647
  'fills',
7667
7648
  'tile',
7668
7649
  'size',