mindspore 2.0.0rc1__cp38-cp38-manylinux1_x86_64.whl → 2.2.0__cp38-cp38-manylinux1_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (884) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Third_Party_Open_Source_Software_Notice +2 -2
  3. mindspore/__init__.py +5 -2
  4. mindspore/_akg/akg/build_module.py +5 -6
  5. mindspore/_akg/akg/composite/build_module.py +49 -16
  6. mindspore/_akg/akg/composite/split_stitch.py +10 -11
  7. mindspore/_akg/akg/config/repository.json +195 -0
  8. mindspore/_akg/akg/global_configs.py +5 -1
  9. mindspore/_akg/akg/ms/info_version_adapt.py +67 -1
  10. mindspore/_akg/akg/tvm/api.py +4 -3
  11. mindspore/_akg/akg/tvm/autotvm/__init__.py +1 -2
  12. mindspore/_akg/akg/tvm/autotvm/graph_tuner/base_graph_tuner.py +1 -5
  13. mindspore/_akg/akg/tvm/autotvm/measure/__init__.py +1 -1
  14. mindspore/_akg/akg/tvm/autotvm/measure/measure.py +1 -10
  15. mindspore/_akg/akg/tvm/autotvm/measure/measure_methods.py +1 -372
  16. mindspore/_akg/akg/tvm/build_module.py +16 -1
  17. mindspore/_akg/akg/tvm/contrib/graph_runtime.py +0 -53
  18. mindspore/_akg/akg/tvm/hybrid/parser.py +7 -6
  19. mindspore/_akg/akg/tvm/ir_builder.py +1 -1
  20. mindspore/_akg/akg/tvm/module.py +1 -2
  21. mindspore/_akg/akg/tvm/stmt.py +2 -2
  22. mindspore/_akg/akg/utils/composite_op_helper.py +9 -10
  23. mindspore/_akg/akg/utils/kernel_exec.py +58 -260
  24. mindspore/_akg/akg/utils/op_dsl.py +17 -1
  25. mindspore/_akg/akg/utils/result_analysis.py +4 -24
  26. mindspore/_akg/akg/utils/tbe_codegen_utils.py +198 -0
  27. mindspore/_c_dataengine.cpython-38-x86_64-linux-gnu.so +0 -0
  28. mindspore/_c_expression.cpython-38-x86_64-linux-gnu.so +0 -0
  29. mindspore/_c_mindrecord.cpython-38-x86_64-linux-gnu.so +0 -0
  30. mindspore/_check_jit_forbidden_api.py +5 -1
  31. mindspore/_checkparam.py +79 -62
  32. mindspore/_extends/graph_kernel/__init__.py +0 -1
  33. mindspore/_extends/graph_kernel/model/graph_split.py +2 -0
  34. mindspore/_extends/graph_kernel/model/model_builder.py +9 -50
  35. mindspore/_extends/graph_kernel/splitter.py +1 -9
  36. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +128 -21
  37. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +2 -2
  38. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +4 -2
  39. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +18 -13
  40. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +13 -9
  41. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +1 -1
  42. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -1
  43. mindspore/_extends/parse/__init__.py +19 -17
  44. mindspore/_extends/parse/namespace.py +7 -36
  45. mindspore/_extends/parse/parser.py +375 -189
  46. mindspore/_extends/parse/resources.py +36 -41
  47. mindspore/_extends/parse/standard_method.py +350 -245
  48. mindspore/_extends/parse/trope.py +2 -12
  49. mindspore/_extends/remote/kernel_build_server.py +24 -7
  50. mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
  51. mindspore/_install_custom.py +43 -0
  52. mindspore/_mindspore_offline_debug.cpython-38-x86_64-linux-gnu.so +0 -0
  53. mindspore/amp.py +85 -19
  54. mindspore/bin/cache_admin +0 -0
  55. mindspore/bin/cache_server +0 -0
  56. mindspore/boost/base.py +2 -2
  57. mindspore/boost/boost.py +27 -32
  58. mindspore/boost/boost_cell_wrapper.py +37 -13
  59. mindspore/boost/grad_accumulation.py +1 -1
  60. mindspore/boost/grad_freeze.py +34 -6
  61. mindspore/boost/group_loss_scale_manager.py +15 -14
  62. mindspore/boost/less_batch_normalization.py +28 -3
  63. mindspore/common/__init__.py +15 -11
  64. mindspore/common/_auto_dynamic.py +68 -0
  65. mindspore/common/_jit_fallback_utils.py +111 -0
  66. mindspore/common/_register_for_adapter.py +17 -5
  67. mindspore/common/_register_for_tensor.py +2 -2
  68. mindspore/common/_stub_tensor.py +18 -15
  69. mindspore/common/_utils.py +31 -7
  70. mindspore/common/api.py +269 -101
  71. mindspore/common/auto_dynamic_shape.py +498 -0
  72. mindspore/common/dtype.py +61 -21
  73. mindspore/common/dump.py +9 -7
  74. mindspore/common/initializer.py +106 -76
  75. mindspore/common/jit_config.py +35 -14
  76. mindspore/common/lazy_inline.py +187 -0
  77. mindspore/common/mindir_util.py +101 -0
  78. mindspore/common/mutable.py +10 -13
  79. mindspore/common/parameter.py +246 -55
  80. mindspore/common/seed.py +13 -7
  81. mindspore/common/sparse_tensor.py +29 -33
  82. mindspore/common/tensor.py +907 -251
  83. mindspore/communication/__init__.py +7 -4
  84. mindspore/communication/_comm_helper.py +84 -4
  85. mindspore/communication/management.py +160 -88
  86. mindspore/config/op_info.config +99 -75
  87. mindspore/config/super_bar_config.json +36 -4
  88. mindspore/context.py +526 -219
  89. mindspore/dataset/__init__.py +9 -46
  90. mindspore/dataset/audio/__init__.py +4 -19
  91. mindspore/dataset/audio/transforms.py +545 -233
  92. mindspore/dataset/audio/utils.py +21 -18
  93. mindspore/dataset/callback/ds_callback.py +42 -13
  94. mindspore/dataset/core/config.py +158 -100
  95. mindspore/dataset/core/validator_helpers.py +1 -63
  96. mindspore/dataset/debug/debug_hook.py +45 -13
  97. mindspore/dataset/debug/pre_defined_hook.py +5 -5
  98. mindspore/dataset/engine/__init__.py +0 -5
  99. mindspore/dataset/engine/cache_client.py +38 -15
  100. mindspore/dataset/engine/datasets.py +615 -278
  101. mindspore/dataset/engine/datasets_audio.py +154 -283
  102. mindspore/dataset/engine/datasets_standard_format.py +104 -116
  103. mindspore/dataset/engine/datasets_text.py +443 -326
  104. mindspore/dataset/engine/datasets_user_defined.py +251 -164
  105. mindspore/dataset/engine/datasets_vision.py +839 -1443
  106. mindspore/dataset/engine/iterators.py +11 -4
  107. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +7 -3
  108. mindspore/dataset/engine/obs/util.py +3 -0
  109. mindspore/dataset/engine/offload.py +6 -6
  110. mindspore/dataset/engine/queue.py +15 -14
  111. mindspore/dataset/engine/samplers.py +39 -23
  112. mindspore/dataset/engine/serializer_deserializer.py +22 -6
  113. mindspore/dataset/engine/validators.py +21 -331
  114. mindspore/dataset/text/__init__.py +5 -33
  115. mindspore/dataset/text/transforms.py +334 -165
  116. mindspore/dataset/text/utils.py +215 -145
  117. mindspore/dataset/transforms/__init__.py +1 -1
  118. mindspore/dataset/transforms/c_transforms.py +3 -2
  119. mindspore/dataset/transforms/py_transforms_util.py +40 -12
  120. mindspore/dataset/transforms/transforms.py +174 -71
  121. mindspore/dataset/utils/browse_dataset.py +25 -17
  122. mindspore/dataset/utils/line_reader.py +24 -21
  123. mindspore/dataset/vision/__init__.py +5 -26
  124. mindspore/dataset/vision/c_transforms.py +177 -165
  125. mindspore/dataset/vision/py_transforms.py +114 -119
  126. mindspore/dataset/vision/py_transforms_util.py +54 -51
  127. mindspore/dataset/vision/transforms.py +1127 -381
  128. mindspore/dataset/vision/utils.py +54 -38
  129. mindspore/dataset/vision/validators.py +12 -2
  130. mindspore/experimental/map_parameter.py +38 -4
  131. mindspore/{dataset/datapreprocess → experimental/optim}/__init__.py +14 -4
  132. mindspore/experimental/optim/adam.py +192 -0
  133. mindspore/experimental/optim/adamw.py +181 -0
  134. mindspore/experimental/optim/lr_scheduler.py +1427 -0
  135. mindspore/experimental/optim/optimizer.py +252 -0
  136. mindspore/experimental/optim/sgd.py +147 -0
  137. mindspore/gen_ops.py +273 -0
  138. mindspore/include/OWNERS +1 -2
  139. mindspore/include/api/context.h +21 -1
  140. mindspore/include/api/data_type.h +2 -1
  141. mindspore/include/api/graph.h +0 -15
  142. mindspore/include/api/kernel.h +2 -0
  143. mindspore/include/api/kernel_api.h +37 -12
  144. mindspore/include/api/model.h +29 -42
  145. mindspore/include/api/model_group.h +14 -3
  146. mindspore/include/api/model_parallel_runner.h +18 -2
  147. mindspore/include/api/serialization.h +26 -0
  148. mindspore/include/api/status.h +1 -0
  149. mindspore/include/api/types.h +38 -4
  150. mindspore/include/c_api/ms/abstract.h +67 -0
  151. mindspore/include/c_api/ms/attribute.h +197 -0
  152. mindspore/include/c_api/ms/base/handle_types.h +43 -0
  153. mindspore/include/c_api/ms/base/macros.h +32 -0
  154. mindspore/include/c_api/ms/base/status.h +33 -0
  155. mindspore/include/c_api/ms/base/types.h +282 -0
  156. mindspore/include/c_api/ms/context.h +102 -0
  157. mindspore/include/c_api/ms/graph.h +160 -0
  158. mindspore/include/c_api/ms/node.h +606 -0
  159. mindspore/include/c_api/ms/tensor.h +161 -0
  160. mindspore/include/c_api/ms/value.h +84 -0
  161. mindspore/include/c_api/status_c.h +3 -0
  162. mindspore/include/dataset/constants.h +6 -12
  163. mindspore/include/dataset/execute.h +23 -13
  164. mindspore/include/dataset/text.h +26 -26
  165. mindspore/include/dataset/transforms.h +25 -31
  166. mindspore/include/dataset/vision.h +60 -60
  167. mindspore/include/dataset/vision_ascend.h +5 -6
  168. mindspore/include/dataset/vision_lite.h +17 -17
  169. mindspore/include/mindapi/base/format.h +0 -1
  170. mindspore/include/mindapi/base/type_id.h +2 -1
  171. mindspore/include/mindapi/base/types.h +5 -1
  172. mindspore/lib/libdnnl.so.2 +0 -0
  173. mindspore/lib/libjemalloc.so.2 +0 -0
  174. mindspore/lib/libmindspore.so +0 -0
  175. mindspore/lib/libmindspore_backend.so +0 -0
  176. mindspore/lib/libmindspore_common.so +0 -0
  177. mindspore/lib/libmindspore_core.so +0 -0
  178. mindspore/lib/libmindspore_glog.so.0 +0 -0
  179. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  180. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  181. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  182. mindspore/lib/libmindspore_shared_lib.so +0 -0
  183. mindspore/lib/libmpi_adapter.so +0 -0
  184. mindspore/lib/libnnacl.so +0 -0
  185. mindspore/lib/libopencv_core.so.4.5 +0 -0
  186. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  187. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  188. mindspore/lib/libps_cache.so +0 -0
  189. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
  190. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  191. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +9000 -0
  192. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  193. mindspore/lib/plugin/ascend/libakg.so +0 -0
  194. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  195. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  196. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  197. mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
  198. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  199. mindspore/lib/plugin/cpu/libakg.so +0 -0
  200. mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
  201. mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
  202. mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
  203. mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
  204. mindspore/lib/plugin/gpu10.1/libnvidia_collective.so +0 -0
  205. mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
  206. mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
  207. mindspore/lib/plugin/gpu11.1/libnvidia_collective.so +0 -0
  208. mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
  209. mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
  210. mindspore/lib/plugin/gpu11.6/libnvidia_collective.so +0 -0
  211. mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
  212. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  213. mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
  214. mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
  215. mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
  216. mindspore/log.py +9 -6
  217. mindspore/mindrecord/filereader.py +33 -4
  218. mindspore/mindrecord/filewriter.py +70 -35
  219. mindspore/mindrecord/mindpage.py +40 -34
  220. mindspore/mindrecord/shardreader.py +1 -1
  221. mindspore/mindrecord/shardsegment.py +1 -1
  222. mindspore/mindrecord/tools/cifar100_to_mr.py +25 -18
  223. mindspore/mindrecord/tools/cifar10_to_mr.py +25 -18
  224. mindspore/mindrecord/tools/csv_to_mr.py +29 -13
  225. mindspore/mindrecord/tools/imagenet_to_mr.py +24 -10
  226. mindspore/mindrecord/tools/mnist_to_mr.py +24 -11
  227. mindspore/mindrecord/tools/tfrecord_to_mr.py +31 -26
  228. mindspore/nn/cell.py +463 -169
  229. mindspore/nn/dynamic_lr.py +47 -43
  230. mindspore/nn/layer/activation.py +225 -82
  231. mindspore/nn/layer/basic.py +121 -79
  232. mindspore/nn/layer/channel_shuffle.py +21 -21
  233. mindspore/nn/layer/combined.py +33 -26
  234. mindspore/nn/layer/container.py +277 -22
  235. mindspore/nn/layer/conv.py +441 -304
  236. mindspore/nn/layer/dense.py +19 -13
  237. mindspore/nn/layer/embedding.py +62 -49
  238. mindspore/nn/layer/flash_attention.py +264 -0
  239. mindspore/nn/layer/image.py +50 -39
  240. mindspore/nn/layer/math.py +62 -51
  241. mindspore/nn/layer/normalization.py +219 -167
  242. mindspore/nn/layer/padding.py +58 -70
  243. mindspore/nn/layer/pooling.py +334 -287
  244. mindspore/nn/layer/rnn_cells.py +53 -38
  245. mindspore/nn/layer/rnns.py +59 -56
  246. mindspore/nn/layer/thor_layer.py +52 -44
  247. mindspore/nn/layer/timedistributed.py +6 -4
  248. mindspore/nn/layer/transformer.py +284 -164
  249. mindspore/nn/learning_rate_schedule.py +34 -25
  250. mindspore/nn/loss/__init__.py +3 -2
  251. mindspore/nn/loss/loss.py +554 -311
  252. mindspore/nn/optim/ada_grad.py +12 -9
  253. mindspore/nn/optim/adadelta.py +14 -11
  254. mindspore/nn/optim/adafactor.py +19 -16
  255. mindspore/nn/optim/adam.py +62 -47
  256. mindspore/nn/optim/adamax.py +13 -10
  257. mindspore/nn/optim/adasum.py +12 -8
  258. mindspore/nn/optim/asgd.py +10 -9
  259. mindspore/nn/optim/ftrl.py +20 -17
  260. mindspore/nn/optim/lamb.py +16 -12
  261. mindspore/nn/optim/lars.py +8 -6
  262. mindspore/nn/optim/lazyadam.py +25 -20
  263. mindspore/nn/optim/momentum.py +10 -7
  264. mindspore/nn/optim/optimizer.py +61 -9
  265. mindspore/nn/optim/proximal_ada_grad.py +14 -13
  266. mindspore/nn/optim/rmsprop.py +17 -13
  267. mindspore/nn/optim/rprop.py +30 -17
  268. mindspore/nn/optim/sgd.py +40 -23
  269. mindspore/nn/optim/thor.py +24 -26
  270. mindspore/nn/probability/bijector/bijector.py +11 -11
  271. mindspore/nn/probability/bijector/exp.py +1 -1
  272. mindspore/nn/probability/bijector/gumbel_cdf.py +3 -3
  273. mindspore/nn/probability/bijector/invert.py +1 -1
  274. mindspore/nn/probability/bijector/power_transform.py +29 -29
  275. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  276. mindspore/nn/probability/bijector/softplus.py +5 -5
  277. mindspore/nn/probability/bnn_layers/bnn_cell_wrapper.py +4 -2
  278. mindspore/nn/probability/bnn_layers/conv_variational.py +13 -13
  279. mindspore/nn/probability/bnn_layers/dense_variational.py +12 -12
  280. mindspore/nn/probability/bnn_layers/layer_distribution.py +9 -8
  281. mindspore/nn/probability/distribution/_utils/custom_ops.py +19 -3
  282. mindspore/nn/probability/distribution/_utils/utils.py +1 -1
  283. mindspore/nn/probability/distribution/bernoulli.py +9 -9
  284. mindspore/nn/probability/distribution/beta.py +8 -8
  285. mindspore/nn/probability/distribution/categorical.py +23 -15
  286. mindspore/nn/probability/distribution/cauchy.py +5 -6
  287. mindspore/nn/probability/distribution/distribution.py +3 -3
  288. mindspore/nn/probability/distribution/exponential.py +4 -4
  289. mindspore/nn/probability/distribution/gamma.py +10 -10
  290. mindspore/nn/probability/distribution/geometric.py +8 -8
  291. mindspore/nn/probability/distribution/gumbel.py +8 -9
  292. mindspore/nn/probability/distribution/half_normal.py +5 -5
  293. mindspore/nn/probability/distribution/laplace.py +5 -5
  294. mindspore/nn/probability/distribution/log_normal.py +12 -11
  295. mindspore/nn/probability/distribution/logistic.py +8 -8
  296. mindspore/nn/probability/distribution/normal.py +6 -5
  297. mindspore/nn/probability/distribution/poisson.py +10 -11
  298. mindspore/nn/probability/distribution/student_t.py +8 -9
  299. mindspore/nn/probability/distribution/transformed_distribution.py +5 -5
  300. mindspore/nn/probability/distribution/uniform.py +11 -11
  301. mindspore/nn/reinforcement/tensor_array.py +2 -2
  302. mindspore/nn/sparse/sparse.py +9 -9
  303. mindspore/nn/wrap/cell_wrapper.py +188 -63
  304. mindspore/nn/wrap/grad_reducer.py +21 -12
  305. mindspore/nn/wrap/loss_scale.py +136 -49
  306. mindspore/numpy/__init__.py +4 -4
  307. mindspore/numpy/array_creations.py +55 -56
  308. mindspore/numpy/array_ops.py +134 -35
  309. mindspore/numpy/logic_ops.py +66 -20
  310. mindspore/numpy/math_ops.py +142 -139
  311. mindspore/numpy/utils_const.py +2 -2
  312. mindspore/offline_debug/convert_async.py +2 -2
  313. mindspore/ops/_grad_experimental/__init__.py +7 -5
  314. mindspore/ops/_grad_experimental/grad_array_ops.py +231 -348
  315. mindspore/ops/{_grad → _grad_experimental}/grad_base.py +1 -33
  316. mindspore/ops/{_grad → _grad_experimental}/grad_comm_ops.py +25 -13
  317. mindspore/ops/{_grad/__init__.py → _grad_experimental/grad_debug_ops.py} +15 -7
  318. mindspore/ops/{_grad → _grad_experimental}/grad_implementations.py +17 -11
  319. mindspore/ops/_grad_experimental/grad_inner_ops.py +33 -52
  320. mindspore/ops/_grad_experimental/grad_math_ops.py +151 -1224
  321. mindspore/ops/_grad_experimental/grad_nn_ops.py +141 -414
  322. mindspore/ops/{_grad → _grad_experimental}/grad_quant_ops.py +10 -6
  323. mindspore/ops/_grad_experimental/grad_sparse.py +317 -2
  324. mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -13
  325. mindspore/ops/{_grad → _grad_experimental}/taylor_rule.py +1 -1
  326. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -1
  327. mindspore/ops/_op_impl/_custom_op/flash_attention/__init__.py +0 -0
  328. mindspore/ops/_op_impl/_custom_op/flash_attention/attention.py +406 -0
  329. mindspore/{_extends/graph_kernel/expanders/complex/__init__.py → ops/_op_impl/_custom_op/flash_attention/constants.py} +27 -8
  330. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_bwd.py +467 -0
  331. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_fwd.py +563 -0
  332. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_impl.py +193 -0
  333. mindspore/ops/_op_impl/_custom_op/flash_attention/tik_ops_utils.py +435 -0
  334. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/__init__.py +0 -0
  335. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/sparse_tiling.py +45 -0
  336. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/strategy.py +67 -0
  337. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/wukong_tiling.py +62 -0
  338. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
  339. mindspore/ops/_op_impl/aicpu/__init__.py +41 -1
  340. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d.py +37 -0
  341. mindspore/ops/_op_impl/aicpu/bias_add_grad.py +0 -1
  342. mindspore/ops/_op_impl/aicpu/cast.py +52 -0
  343. mindspore/ops/_op_impl/aicpu/coalesce.py +2 -0
  344. mindspore/ops/_op_impl/aicpu/col2im.py +3 -1
  345. mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
  346. mindspore/ops/_op_impl/aicpu/dropout_genmask.py +6 -0
  347. mindspore/ops/_op_impl/aicpu/eps.py +32 -0
  348. mindspore/ops/_op_impl/aicpu/eye.py +4 -4
  349. mindspore/ops/_op_impl/aicpu/fft_with_size.py +6 -0
  350. mindspore/ops/_op_impl/aicpu/fill_diagonal.py +5 -0
  351. mindspore/ops/_op_impl/aicpu/gamma.py +2 -2
  352. mindspore/ops/_op_impl/aicpu/im2col.py +3 -5
  353. mindspore/ops/_op_impl/aicpu/lgamma.py +1 -0
  354. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +6 -3
  355. mindspore/ops/_op_impl/aicpu/lu.py +39 -0
  356. mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +0 -1
  357. mindspore/ops/_op_impl/aicpu/masked_scatter.py +1 -0
  358. mindspore/ops/_op_impl/aicpu/masked_select_grad.py +3 -0
  359. mindspore/ops/_op_impl/aicpu/matrix_band_part.py +59 -0
  360. mindspore/ops/_op_impl/aicpu/matrix_power.py +6 -1
  361. mindspore/ops/_op_impl/aicpu/median.py +1 -0
  362. mindspore/ops/_op_impl/aicpu/multinomial.py +9 -9
  363. mindspore/ops/_op_impl/aicpu/not_equal.py +0 -5
  364. mindspore/ops/_op_impl/aicpu/pad_v3.py +3 -1
  365. mindspore/ops/_op_impl/aicpu/pad_v3_grad.py +2 -0
  366. mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +15 -7
  367. mindspore/ops/_op_impl/aicpu/random_categorical.py +39 -19
  368. mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +5 -2
  369. mindspore/ops/_op_impl/aicpu/random_poisson.py +103 -52
  370. mindspore/ops/_op_impl/aicpu/random_shuffle.py +17 -15
  371. mindspore/ops/_op_impl/aicpu/resize_bilinear_grad.py +0 -1
  372. mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2.py +0 -6
  373. mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2_grad.py +0 -7
  374. mindspore/ops/_op_impl/aicpu/scatter_nd.py +2 -0
  375. mindspore/ops/_op_impl/aicpu/sequence_concat.py +40 -0
  376. mindspore/ops/_op_impl/aicpu/sequence_stack.py +40 -0
  377. mindspore/ops/_op_impl/aicpu/{sparseaddmm.py → sparse_addmm.py} +2 -2
  378. mindspore/ops/_op_impl/aicpu/{sparsesparsemaximum.py → sparse_sparse_maximum.py} +4 -4
  379. mindspore/ops/_op_impl/aicpu/standard_laplace.py +5 -4
  380. mindspore/ops/_op_impl/aicpu/standard_normal.py +5 -4
  381. mindspore/ops/_op_impl/aicpu/truncated_normal.py +9 -7
  382. mindspore/ops/_op_impl/aicpu/uniform.py +5 -3
  383. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +8 -4
  384. mindspore/ops/_op_impl/aicpu/uniform_int.py +5 -5
  385. mindspore/ops/_op_impl/aicpu/uniform_real.py +4 -4
  386. mindspore/ops/_op_impl/aicpu/upsample_nearest_3d.py +14 -6
  387. mindspore/ops/_op_impl/aicpu/upsample_nearest_3d_grad.py +22 -8
  388. mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d.py +11 -6
  389. mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d_grad.py +21 -10
  390. mindspore/ops/_op_impl/tbe/__init__.py +6 -4
  391. mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
  392. mindspore/ops/_op_impl/tbe/avg_pool.py +2 -2
  393. mindspore/ops/_op_impl/tbe/avg_pool_3d.py +3 -3
  394. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +4 -4
  395. mindspore/ops/_op_impl/tbe/avg_pool_ds.py +2 -2
  396. mindspore/ops/_op_impl/tbe/avg_pool_grad.py +3 -3
  397. mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +3 -3
  398. mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
  399. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +2 -2
  400. mindspore/ops/_op_impl/tbe/bn_infer.py +2 -2
  401. mindspore/ops/_op_impl/tbe/bn_infer_ds.py +3 -2
  402. mindspore/ops/_op_impl/tbe/broadcast_to.py +1 -1
  403. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +3 -3
  404. mindspore/ops/_op_impl/tbe/expand_dims.py +1 -1
  405. mindspore/ops/_op_impl/tbe/gather_v2.py +56 -0
  406. mindspore/ops/_op_impl/tbe/im2col.py +4 -4
  407. mindspore/ops/_op_impl/tbe/inplace_index_add.py +7 -3
  408. mindspore/ops/_op_impl/tbe/mem_set.py +38 -0
  409. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +3 -0
  410. mindspore/ops/_op_impl/tbe/scatter_nd_d.py +1 -1
  411. mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
  412. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +2 -2
  413. mindspore/ops/_op_impl/tbe/trans_data_ds.py +2 -0
  414. mindspore/ops/_primitive_cache.py +1 -1
  415. mindspore/ops/_tracefunc.py +241 -0
  416. mindspore/ops/_utils/utils.py +10 -2
  417. mindspore/ops/_vmap/vmap_array_ops.py +5 -3
  418. mindspore/ops/_vmap/vmap_base.py +5 -4
  419. mindspore/ops/_vmap/vmap_convolution_ops.py +1 -1
  420. mindspore/ops/_vmap/vmap_grad_math_ops.py +6 -4
  421. mindspore/ops/_vmap/vmap_grad_nn_ops.py +11 -6
  422. mindspore/ops/_vmap/vmap_math_ops.py +5 -2
  423. mindspore/ops/_vmap/vmap_nn_ops.py +135 -11
  424. mindspore/ops/arg_dtype_cast.py +54 -0
  425. mindspore/ops/composite/__init__.py +7 -5
  426. mindspore/ops/composite/base.py +78 -34
  427. mindspore/ops/composite/math_ops.py +5 -695
  428. mindspore/ops/composite/multitype_ops/_compile_utils.py +403 -97
  429. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +28 -22
  430. mindspore/ops/composite/multitype_ops/add_impl.py +69 -7
  431. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  432. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  433. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -0
  434. mindspore/ops/composite/multitype_ops/div_impl.py +1 -0
  435. mindspore/ops/composite/multitype_ops/floordiv_impl.py +1 -0
  436. mindspore/ops/composite/multitype_ops/getitem_impl.py +48 -10
  437. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +2 -0
  438. mindspore/ops/composite/multitype_ops/greater_impl.py +2 -0
  439. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -0
  440. mindspore/ops/composite/multitype_ops/less_equal_impl.py +2 -0
  441. mindspore/ops/composite/multitype_ops/less_impl.py +2 -0
  442. mindspore/ops/composite/multitype_ops/logic_not_impl.py +2 -2
  443. mindspore/ops/composite/multitype_ops/mod_impl.py +1 -0
  444. mindspore/ops/composite/multitype_ops/mul_impl.py +1 -0
  445. mindspore/ops/composite/multitype_ops/negative_impl.py +1 -0
  446. mindspore/ops/composite/multitype_ops/not_in_impl.py +1 -0
  447. mindspore/ops/composite/multitype_ops/ones_like_impl.py +6 -0
  448. mindspore/ops/composite/multitype_ops/pow_impl.py +1 -0
  449. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -0
  450. mindspore/ops/composite/multitype_ops/setitem_impl.py +10 -7
  451. mindspore/ops/composite/multitype_ops/sub_impl.py +1 -0
  452. mindspore/ops/composite/multitype_ops/uadd_impl.py +2 -0
  453. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +9 -0
  454. mindspore/ops/deprecated.py +304 -0
  455. mindspore/ops/function/__init__.py +41 -4
  456. mindspore/ops/function/array_func.py +1108 -467
  457. mindspore/ops/function/clip_func.py +94 -27
  458. mindspore/ops/function/debug_func.py +3 -1
  459. mindspore/ops/function/grad/grad_func.py +82 -73
  460. mindspore/ops/function/image_func.py +28 -12
  461. mindspore/ops/function/linalg_func.py +135 -39
  462. mindspore/ops/function/math_func.py +3779 -894
  463. mindspore/ops/function/nn_func.py +1584 -657
  464. mindspore/ops/function/parameter_func.py +13 -3
  465. mindspore/ops/function/random_func.py +247 -153
  466. mindspore/ops/function/sparse_func.py +14 -11
  467. mindspore/ops/function/sparse_unary_func.py +173 -47
  468. mindspore/ops/function/spectral_func.py +8 -4
  469. mindspore/ops/function/vmap_func.py +8 -7
  470. mindspore/ops/functional.py +47 -16
  471. mindspore/ops/op_info_register.py +346 -86
  472. mindspore/ops/operations/__init__.py +38 -22
  473. mindspore/ops/operations/_grad_ops.py +145 -149
  474. mindspore/ops/operations/_inner_ops.py +298 -56
  475. mindspore/ops/operations/_ms_kernel.py +3 -3
  476. mindspore/ops/operations/_quant_ops.py +24 -28
  477. mindspore/ops/operations/_rl_inner_ops.py +9 -7
  478. mindspore/ops/operations/_scalar_ops.py +115 -0
  479. mindspore/ops/operations/_sequence_ops.py +148 -10
  480. mindspore/ops/operations/_tensor_array.py +1 -1
  481. mindspore/ops/operations/_thor_ops.py +2 -2
  482. mindspore/ops/operations/array_ops.py +1239 -561
  483. mindspore/ops/operations/comm_ops.py +166 -90
  484. mindspore/ops/operations/control_ops.py +3 -3
  485. mindspore/ops/operations/custom_ops.py +124 -102
  486. mindspore/ops/operations/debug_ops.py +24 -11
  487. mindspore/ops/operations/image_ops.py +86 -71
  488. mindspore/ops/operations/inner_ops.py +18 -13
  489. mindspore/ops/operations/linalg_ops.py +30 -11
  490. mindspore/ops/operations/math_ops.py +1730 -435
  491. mindspore/ops/operations/nn_ops.py +1953 -943
  492. mindspore/ops/operations/other_ops.py +65 -43
  493. mindspore/ops/operations/random_ops.py +258 -98
  494. mindspore/ops/operations/rl_ops.py +4 -36
  495. mindspore/ops/operations/sparse_ops.py +38 -33
  496. mindspore/ops/operations/spectral_ops.py +8 -4
  497. mindspore/ops/primitive.py +66 -44
  498. mindspore/ops/signature.py +5 -5
  499. mindspore/parallel/_auto_parallel_context.py +80 -19
  500. mindspore/parallel/_cost_model_context.py +42 -0
  501. mindspore/parallel/_offload_context.py +162 -72
  502. mindspore/parallel/_parallel_serialization.py +2 -2
  503. mindspore/parallel/_ps_context.py +16 -4
  504. mindspore/parallel/_recovery_context.py +2 -1
  505. mindspore/parallel/_tensor.py +15 -13
  506. mindspore/parallel/_transformer/layers.py +8 -6
  507. mindspore/parallel/_transformer/loss.py +1 -0
  508. mindspore/parallel/_transformer/moe.py +7 -7
  509. mindspore/parallel/_transformer/op_parallel_config.py +12 -1
  510. mindspore/parallel/_transformer/transformer.py +34 -14
  511. mindspore/parallel/_utils.py +36 -14
  512. mindspore/parallel/algo_parameter_config.py +114 -20
  513. mindspore/parallel/checkpoint_transform.py +16 -18
  514. mindspore/parallel/shard.py +16 -13
  515. mindspore/profiler/__init__.py +1 -1
  516. mindspore/profiler/common/struct_type.py +3 -3
  517. mindspore/profiler/common/util.py +3 -2
  518. mindspore/profiler/envprofiling.py +11 -4
  519. mindspore/profiler/parser/aicpu_data_parser.py +5 -3
  520. mindspore/profiler/parser/ascend_flops_generator.py +94 -0
  521. mindspore/profiler/parser/ascend_fpbp_generator.py +76 -0
  522. mindspore/profiler/parser/ascend_hccl_generator.py +288 -0
  523. mindspore/profiler/parser/ascend_msprof_exporter.py +213 -0
  524. mindspore/profiler/parser/ascend_msprof_generator.py +199 -0
  525. mindspore/profiler/parser/ascend_op_generator.py +276 -0
  526. mindspore/profiler/parser/ascend_steptrace_generator.py +94 -0
  527. mindspore/profiler/parser/ascend_timeline_generator.py +110 -54
  528. mindspore/profiler/parser/base_timeline_generator.py +11 -7
  529. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +45 -46
  530. mindspore/profiler/parser/flops_parser.py +15 -11
  531. mindspore/profiler/parser/framework_parser.py +92 -73
  532. mindspore/profiler/parser/hccl_parser.py +16 -12
  533. mindspore/profiler/parser/integrator.py +22 -11
  534. mindspore/profiler/parser/memory_usage_parser.py +36 -11
  535. mindspore/profiler/parser/minddata_analyzer.py +12 -14
  536. mindspore/profiler/parser/minddata_pipeline_parser.py +1 -1
  537. mindspore/profiler/parser/msadvisor_parser.py +8 -4
  538. mindspore/profiler/parser/op_intermediate_parser.py +5 -2
  539. mindspore/profiler/parser/optime_parser.py +1 -1
  540. mindspore/profiler/parser/profiler_info.py +4 -5
  541. mindspore/profiler/parser/step_trace_parser.py +11 -14
  542. mindspore/profiler/profiling.py +678 -377
  543. mindspore/rewrite/api/node.py +211 -54
  544. mindspore/rewrite/api/node_type.py +5 -0
  545. mindspore/rewrite/api/pattern_engine.py +22 -23
  546. mindspore/rewrite/api/scoped_value.py +20 -17
  547. mindspore/rewrite/api/symbol_tree.py +252 -106
  548. mindspore/rewrite/api/tree_node_helper.py +3 -0
  549. mindspore/rewrite/ast_helpers/__init__.py +2 -1
  550. mindspore/rewrite/ast_helpers/ast_finder.py +129 -0
  551. mindspore/rewrite/ast_helpers/ast_modifier.py +116 -104
  552. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +97 -46
  553. mindspore/rewrite/common/rewrite_elog.py +5 -1
  554. mindspore/rewrite/namer.py +51 -51
  555. mindspore/rewrite/namespace.py +14 -5
  556. mindspore/{ops/bprop_mindir → rewrite/node}/__init__.py +9 -4
  557. mindspore/rewrite/node/call_function.py +79 -0
  558. mindspore/rewrite/node/cell_container.py +135 -0
  559. mindspore/rewrite/node/control_flow.py +88 -0
  560. mindspore/rewrite/{node.py → node/node.py} +313 -247
  561. mindspore/rewrite/node/node_manager.py +254 -0
  562. mindspore/rewrite/node/node_topological_manager.py +243 -0
  563. mindspore/rewrite/parsers/arguments_parser.py +22 -21
  564. mindspore/rewrite/parsers/assign_parser.py +225 -239
  565. mindspore/rewrite/parsers/attribute_parser.py +9 -7
  566. mindspore/rewrite/parsers/class_def_parser.py +179 -218
  567. mindspore/rewrite/parsers/constant_parser.py +9 -6
  568. mindspore/rewrite/parsers/container_parser.py +9 -7
  569. mindspore/rewrite/parsers/for_parser.py +36 -15
  570. mindspore/rewrite/parsers/function_def_parser.py +23 -20
  571. mindspore/rewrite/parsers/if_parser.py +28 -24
  572. mindspore/rewrite/parsers/module_parser.py +202 -25
  573. mindspore/rewrite/{parser.py → parsers/parser.py} +4 -2
  574. mindspore/rewrite/{parser_register.py → parsers/parser_register.py} +1 -1
  575. mindspore/rewrite/parsers/return_parser.py +6 -6
  576. mindspore/rewrite/sparsify/sparse_transformer.py +12 -3
  577. mindspore/rewrite/sparsify/sparsify.py +4 -1
  578. mindspore/rewrite/sparsify/utils.py +11 -5
  579. mindspore/rewrite/symbol_tree.py +577 -732
  580. mindspore/rewrite/symbol_tree_builder.py +9 -175
  581. mindspore/rewrite/symbol_tree_dumper.py +2 -2
  582. mindspore/run_check/_check_version.py +46 -39
  583. mindspore/run_check/run_check.py +3 -2
  584. mindspore/{scipy/sparse → safeguard}/__init__.py +4 -5
  585. mindspore/safeguard/rewrite_obfuscation.py +517 -0
  586. mindspore/scipy/__init__.py +1 -1
  587. mindspore/scipy/linalg.py +67 -61
  588. mindspore/scipy/ops.py +5 -41
  589. mindspore/scipy/ops_grad.py +3 -2
  590. mindspore/scipy/ops_wrapper.py +5 -5
  591. mindspore/scipy/optimize/line_search.py +8 -8
  592. mindspore/scipy/optimize/linear_sum_assignment.py +4 -4
  593. mindspore/scipy/optimize/minimize.py +16 -12
  594. mindspore/scipy/utils.py +1 -52
  595. mindspore/scipy/utils_const.py +4 -4
  596. mindspore/train/__init__.py +4 -4
  597. mindspore/train/_utils.py +13 -5
  598. mindspore/train/amp.py +410 -148
  599. mindspore/train/anf_ir_pb2.py +16 -4
  600. mindspore/train/callback/_backup_and_restore.py +8 -11
  601. mindspore/train/callback/_callback.py +80 -3
  602. mindspore/train/callback/_checkpoint.py +82 -51
  603. mindspore/train/callback/_early_stop.py +12 -15
  604. mindspore/train/callback/_history.py +1 -1
  605. mindspore/train/callback/_lambda_callback.py +13 -13
  606. mindspore/train/callback/_landscape.py +21 -17
  607. mindspore/train/callback/_loss_monitor.py +9 -10
  608. mindspore/train/callback/_on_request_exit.py +16 -33
  609. mindspore/train/callback/_reduce_lr_on_plateau.py +21 -24
  610. mindspore/train/callback/_summary_collector.py +44 -30
  611. mindspore/train/callback/_time_monitor.py +62 -12
  612. mindspore/train/data_sink.py +10 -16
  613. mindspore/train/dataset_helper.py +154 -86
  614. mindspore/train/loss_scale_manager.py +14 -9
  615. mindspore/train/metrics/__init__.py +10 -2
  616. mindspore/train/metrics/accuracy.py +1 -1
  617. mindspore/train/metrics/auc.py +1 -1
  618. mindspore/train/metrics/bleu_score.py +2 -2
  619. mindspore/train/metrics/confusion_matrix.py +14 -14
  620. mindspore/train/metrics/cosine_similarity.py +3 -3
  621. mindspore/train/metrics/dice.py +1 -1
  622. mindspore/train/metrics/fbeta.py +1 -1
  623. mindspore/train/metrics/hausdorff_distance.py +8 -6
  624. mindspore/train/metrics/mean_surface_distance.py +5 -4
  625. mindspore/train/metrics/metric.py +49 -17
  626. mindspore/train/metrics/occlusion_sensitivity.py +4 -4
  627. mindspore/train/metrics/perplexity.py +1 -1
  628. mindspore/train/metrics/precision.py +2 -2
  629. mindspore/train/metrics/recall.py +2 -3
  630. mindspore/train/metrics/roc.py +7 -7
  631. mindspore/train/metrics/root_mean_square_surface_distance.py +5 -4
  632. mindspore/train/metrics/topk.py +7 -4
  633. mindspore/train/mind_ir_pb2.py +193 -48
  634. mindspore/train/model.py +377 -133
  635. mindspore/train/serialization.py +697 -245
  636. mindspore/train/summary/_summary_adapter.py +5 -2
  637. mindspore/train/summary/_writer_pool.py +4 -3
  638. mindspore/train/summary/summary_record.py +25 -23
  639. mindspore/train/train_thor/convert_utils.py +39 -23
  640. mindspore/train/train_thor/dataset_helper.py +4 -3
  641. mindspore/train/train_thor/model_thor.py +8 -8
  642. mindspore/version.py +1 -1
  643. {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/METADATA +7 -8
  644. {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/RECORD +647 -818
  645. {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/entry_points.txt +0 -1
  646. mindspore/_akg/akg/tvm/contrib/debugger/__init__.py +0 -16
  647. mindspore/_akg/akg/tvm/contrib/debugger/debug_result.py +0 -274
  648. mindspore/_akg/akg/tvm/contrib/debugger/debug_runtime.py +0 -259
  649. mindspore/_akg/akg/tvm/contrib/peak.py +0 -341
  650. mindspore/_akg/akg/tvm/contrib/rpc.py +0 -25
  651. mindspore/_akg/akg/tvm/contrib/xcode.py +0 -257
  652. mindspore/_akg/akg/tvm/exec/__init__.py +0 -17
  653. mindspore/_akg/akg/tvm/exec/autotvm_log_editor.py +0 -60
  654. mindspore/_akg/akg/tvm/exec/measure_peak.py +0 -48
  655. mindspore/_akg/akg/tvm/exec/query_rpc_tracker.py +0 -48
  656. mindspore/_akg/akg/tvm/exec/rpc_proxy.py +0 -98
  657. mindspore/_akg/akg/tvm/exec/rpc_server.py +0 -88
  658. mindspore/_akg/akg/tvm/exec/rpc_tracker.py +0 -62
  659. mindspore/_akg/akg/tvm/rpc/__init__.py +0 -29
  660. mindspore/_akg/akg/tvm/rpc/base.py +0 -182
  661. mindspore/_akg/akg/tvm/rpc/client.py +0 -436
  662. mindspore/_akg/akg/tvm/rpc/proxy.py +0 -595
  663. mindspore/_akg/akg/tvm/rpc/server.py +0 -413
  664. mindspore/_akg/akg/tvm/rpc/tornado_util.py +0 -121
  665. mindspore/_akg/akg/tvm/rpc/tracker.py +0 -431
  666. mindspore/_extends/graph_kernel/expander.py +0 -80
  667. mindspore/_extends/graph_kernel/expanders/__init__.py +0 -57
  668. mindspore/_extends/graph_kernel/expanders/_utils.py +0 -269
  669. mindspore/_extends/graph_kernel/expanders/addn.py +0 -33
  670. mindspore/_extends/graph_kernel/expanders/batchnorm.py +0 -152
  671. mindspore/_extends/graph_kernel/expanders/batchnorm_grad.py +0 -105
  672. mindspore/_extends/graph_kernel/expanders/bias_add_grad.py +0 -49
  673. mindspore/_extends/graph_kernel/expanders/clip_by_norm_no_div_sum.py +0 -33
  674. mindspore/_extends/graph_kernel/expanders/complex/abs.py +0 -30
  675. mindspore/_extends/graph_kernel/expanders/complex/add.py +0 -44
  676. mindspore/_extends/graph_kernel/expanders/complex/div.py +0 -62
  677. mindspore/_extends/graph_kernel/expanders/complex/mul.py +0 -52
  678. mindspore/_extends/graph_kernel/expanders/complex/real_div.py +0 -62
  679. mindspore/_extends/graph_kernel/expanders/complex/sub.py +0 -45
  680. mindspore/_extends/graph_kernel/expanders/conv2d.py +0 -200
  681. mindspore/_extends/graph_kernel/expanders/dropout_grad.py +0 -30
  682. mindspore/_extends/graph_kernel/expanders/equal_count.py +0 -50
  683. mindspore/_extends/graph_kernel/expanders/erfc.py +0 -35
  684. mindspore/_extends/graph_kernel/expanders/expand_dims.py +0 -50
  685. mindspore/_extends/graph_kernel/expanders/fused_adam.py +0 -44
  686. mindspore/_extends/graph_kernel/expanders/fused_adam_weight_decay.py +0 -47
  687. mindspore/_extends/graph_kernel/expanders/fused_mul_add.py +0 -28
  688. mindspore/_extends/graph_kernel/expanders/gather.py +0 -43
  689. mindspore/_extends/graph_kernel/expanders/gelu_grad.py +0 -70
  690. mindspore/_extends/graph_kernel/expanders/gkdropout.py +0 -40
  691. mindspore/_extends/graph_kernel/expanders/identity.py +0 -25
  692. mindspore/_extends/graph_kernel/expanders/layernorm.py +0 -93
  693. mindspore/_extends/graph_kernel/expanders/layernorm_grad.py +0 -113
  694. mindspore/_extends/graph_kernel/expanders/logsoftmax.py +0 -46
  695. mindspore/_extends/graph_kernel/expanders/logsoftmax_grad.py +0 -36
  696. mindspore/_extends/graph_kernel/expanders/matmul.py +0 -80
  697. mindspore/_extends/graph_kernel/expanders/maximum_grad.py +0 -59
  698. mindspore/_extends/graph_kernel/expanders/minimum_grad.py +0 -80
  699. mindspore/_extends/graph_kernel/expanders/oneslike.py +0 -26
  700. mindspore/_extends/graph_kernel/expanders/reduce_mean.py +0 -43
  701. mindspore/_extends/graph_kernel/expanders/relu_grad.py +0 -32
  702. mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits.py +0 -41
  703. mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits_grad.py +0 -35
  704. mindspore/_extends/graph_kernel/expanders/sigmoid_grad.py +0 -31
  705. mindspore/_extends/graph_kernel/expanders/slice.py +0 -35
  706. mindspore/_extends/graph_kernel/expanders/softmax_cross_entropy_with_logits.py +0 -42
  707. mindspore/_extends/graph_kernel/expanders/softmax_grad_ext.py +0 -41
  708. mindspore/_extends/graph_kernel/expanders/softsign.py +0 -28
  709. mindspore/_extends/graph_kernel/expanders/sqrt_grad.py +0 -29
  710. mindspore/_extends/graph_kernel/expanders/square_sum_all.py +0 -44
  711. mindspore/_extends/graph_kernel/expanders/square_sum_v1.py +0 -37
  712. mindspore/_extends/graph_kernel/expanders/squared_difference.py +0 -43
  713. mindspore/_extends/graph_kernel/expanders/tanh_grad.py +0 -31
  714. mindspore/_extends/graph_kernel/expanders/tile.py +0 -54
  715. mindspore/_extends/graph_kernel/model/op_infer.py +0 -506
  716. mindspore/_extends/parse/jit_fallback_modules.py +0 -51
  717. mindspore/dataset/datapreprocess/preprocess_imagenet_validate_dataset.py +0 -54
  718. mindspore/dataset/engine/graphdata.py +0 -1586
  719. mindspore/include/api/net.h +0 -142
  720. mindspore/ops/_grad/grad_array_ops.py +0 -1347
  721. mindspore/ops/_grad/grad_clip_ops.py +0 -84
  722. mindspore/ops/_grad/grad_debug_ops.py +0 -68
  723. mindspore/ops/_grad/grad_inner_ops.py +0 -235
  724. mindspore/ops/_grad/grad_math_ops.py +0 -1684
  725. mindspore/ops/_grad/grad_nn_ops.py +0 -1529
  726. mindspore/ops/_grad/grad_other_ops.py +0 -89
  727. mindspore/ops/_grad/grad_sequence_ops.py +0 -296
  728. mindspore/ops/_grad/grad_sparse.py +0 -323
  729. mindspore/ops/_grad_experimental/grad_image_ops.py +0 -249
  730. mindspore/ops/_grad_experimental/grad_linalg_ops.py +0 -195
  731. mindspore/ops/_grad_experimental/grad_scalar_ops.py +0 -112
  732. mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
  733. mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
  734. mindspore/ops/bprop_mindir/ApproximateEqual_bprop.mindir +0 -19
  735. mindspore/ops/bprop_mindir/Argmax_bprop.mindir +0 -15
  736. mindspore/ops/bprop_mindir/Argmin_bprop.mindir +0 -15
  737. mindspore/ops/bprop_mindir/AssignSub_bprop.mindir +0 -19
  738. mindspore/ops/bprop_mindir/Assign_bprop.mindir +0 -17
  739. mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +0 -150
  740. mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +0 -66
  741. mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
  742. mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +0 -15
  743. mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
  744. mindspore/ops/bprop_mindir/BatchToSpaceND_bprop.mindir +0 -28
  745. mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
  746. mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +0 -33
  747. mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +0 -306
  748. mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +0 -13
  749. mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
  750. mindspore/ops/bprop_mindir/Concat_bprop.mindir +0 -0
  751. mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +0 -240
  752. mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +0 -247
  753. mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +0 -247
  754. mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +0 -315
  755. mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +0 -278
  756. mindspore/ops/bprop_mindir/DType_bprop.mindir +0 -14
  757. mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +0 -58
  758. mindspore/ops/bprop_mindir/Depend_bprop.mindir +0 -13
  759. mindspore/ops/bprop_mindir/DepthToSpace_bprop.mindir +0 -23
  760. mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +0 -138
  761. mindspore/ops/bprop_mindir/DiagPart_bprop.mindir +0 -15
  762. mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
  763. mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
  764. mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +0 -25
  765. mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +0 -18
  766. mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +0 -27
  767. mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
  768. mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
  769. mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
  770. mindspore/ops/bprop_mindir/DynamicShape_bprop.mindir +0 -14
  771. mindspore/ops/bprop_mindir/Elu_bprop.mindir +0 -16
  772. mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
  773. mindspore/ops/bprop_mindir/Equal_bprop.mindir +0 -19
  774. mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +0 -58
  775. mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +0 -16
  776. mindspore/ops/bprop_mindir/Flatten_bprop.mindir +0 -54
  777. mindspore/ops/bprop_mindir/FloorDiv_bprop.mindir +0 -19
  778. mindspore/ops/bprop_mindir/GatherD_bprop.mindir +0 -26
  779. mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +0 -57
  780. mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
  781. mindspore/ops/bprop_mindir/GreaterEqual_bprop.mindir +0 -19
  782. mindspore/ops/bprop_mindir/Greater_bprop.mindir +0 -19
  783. mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +0 -16
  784. mindspore/ops/bprop_mindir/HSwish_bprop.mindir +0 -16
  785. mindspore/ops/bprop_mindir/IOU_bprop.mindir +0 -19
  786. mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
  787. mindspore/ops/bprop_mindir/IsFinite_bprop.mindir +0 -15
  788. mindspore/ops/bprop_mindir/IsInf_bprop.mindir +0 -15
  789. mindspore/ops/bprop_mindir/IsNan_bprop.mindir +0 -15
  790. mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +0 -126
  791. mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +0 -15
  792. mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +0 -30
  793. mindspore/ops/bprop_mindir/LRN_bprop.mindir +0 -43
  794. mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
  795. mindspore/ops/bprop_mindir/LessEqual_bprop.mindir +0 -19
  796. mindspore/ops/bprop_mindir/Less_bprop.mindir +0 -19
  797. mindspore/ops/bprop_mindir/LinSpace_bprop.mindir +0 -23
  798. mindspore/ops/bprop_mindir/Load_bprop.mindir +0 -13
  799. mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +0 -23
  800. mindspore/ops/bprop_mindir/LogicalAnd_bprop.mindir +0 -19
  801. mindspore/ops/bprop_mindir/LogicalNot_bprop.mindir +0 -15
  802. mindspore/ops/bprop_mindir/MaskedSelect_bprop.mindir +0 -21
  803. mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +0 -74
  804. mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +0 -74
  805. mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +0 -75
  806. mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +0 -65
  807. mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
  808. mindspore/ops/bprop_mindir/Maximum_bprop.mindir +0 -0
  809. mindspore/ops/bprop_mindir/Minimum_bprop.mindir +0 -0
  810. mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +0 -27
  811. mindspore/ops/bprop_mindir/Mish_bprop.mindir +0 -35
  812. mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
  813. mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
  814. mindspore/ops/bprop_mindir/NonZero_bprop.mindir +0 -14
  815. mindspore/ops/bprop_mindir/NotEqual_bprop.mindir +0 -19
  816. mindspore/ops/bprop_mindir/OneHot_bprop.mindir +0 -26
  817. mindspore/ops/bprop_mindir/OnesLike_bprop.mindir +0 -14
  818. mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
  819. mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
  820. mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
  821. mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +0 -29
  822. mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +0 -82
  823. mindspore/ops/bprop_mindir/Range_bprop.mindir +0 -22
  824. mindspore/ops/bprop_mindir/Rank_bprop.mindir +0 -14
  825. mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +0 -16
  826. mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
  827. mindspore/ops/bprop_mindir/ReduceAll_bprop.mindir +0 -19
  828. mindspore/ops/bprop_mindir/ReduceAny_bprop.mindir +0 -19
  829. mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +0 -20
  830. mindspore/ops/bprop_mindir/Reshape_bprop.mindir +0 -60
  831. mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +0 -29
  832. mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +0 -89
  833. mindspore/ops/bprop_mindir/ReverseSequence_bprop.mindir +0 -52
  834. mindspore/ops/bprop_mindir/ReverseV2_bprop.mindir +0 -22
  835. mindspore/ops/bprop_mindir/Round_bprop.mindir +0 -15
  836. mindspore/ops/bprop_mindir/ScatterMax_bprop.mindir +0 -0
  837. mindspore/ops/bprop_mindir/ScatterMin_bprop.mindir +0 -0
  838. mindspore/ops/bprop_mindir/ScatterNdUpdate_bprop.mindir +0 -22
  839. mindspore/ops/bprop_mindir/ScatterNd_bprop.mindir +0 -24
  840. mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +0 -22
  841. mindspore/ops/bprop_mindir/ScatterUpdate_bprop.mindir +0 -0
  842. mindspore/ops/bprop_mindir/SeLU_bprop.mindir +0 -21
  843. mindspore/ops/bprop_mindir/Select_bprop.mindir +0 -31
  844. mindspore/ops/bprop_mindir/Shape_bprop.mindir +0 -14
  845. mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +0 -21
  846. mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
  847. mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +0 -16
  848. mindspore/ops/bprop_mindir/Sign_bprop.mindir +0 -15
  849. mindspore/ops/bprop_mindir/Slice_bprop.mindir +0 -26
  850. mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +0 -36
  851. mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  852. mindspore/ops/bprop_mindir/Softplus_bprop.mindir +0 -16
  853. mindspore/ops/bprop_mindir/Softsign_bprop.mindir +0 -33
  854. mindspore/ops/bprop_mindir/Sort_bprop.mindir +0 -0
  855. mindspore/ops/bprop_mindir/SpaceToBatchND_bprop.mindir +0 -28
  856. mindspore/ops/bprop_mindir/SpaceToDepth_bprop.mindir +0 -23
  857. mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
  858. mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  859. mindspore/ops/bprop_mindir/Split_bprop.mindir +0 -22
  860. mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +0 -54
  861. mindspore/ops/bprop_mindir/StridedSliceGrad_bprop.mindir +0 -95
  862. mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +0 -98
  863. mindspore/ops/bprop_mindir/Switch_bprop.mindir +0 -29
  864. mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
  865. mindspore/ops/bprop_mindir/Tanh_bprop.mindir +0 -66
  866. mindspore/ops/bprop_mindir/TensorScatterAdd_bprop.mindir +0 -22
  867. mindspore/ops/bprop_mindir/TensorScatterUpdate_bprop.mindir +0 -29
  868. mindspore/ops/bprop_mindir/TensorShape_bprop.mindir +0 -14
  869. mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
  870. mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
  871. mindspore/ops/bprop_mindir/TransShape_bprop.mindir +0 -23
  872. mindspore/ops/bprop_mindir/TruncateDiv_bprop.mindir +0 -19
  873. mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +0 -20
  874. mindspore/ops/bprop_mindir/Unique_bprop.mindir +0 -16
  875. mindspore/ops/bprop_mindir/Unstack_bprop.mindir +0 -22
  876. mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +0 -32
  877. mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +0 -38
  878. mindspore/ops/bprop_mindir/ZerosLike_bprop.mindir +0 -15
  879. mindspore/ops/bprop_mindir/generate_mindir.py +0 -114
  880. mindspore/rewrite/node_visitor.py +0 -44
  881. mindspore/rewrite/topological_manager.py +0 -203
  882. mindspore/scipy/sparse/linalg.py +0 -192
  883. {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/WHEEL +0 -0
  884. {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/top_level.txt +0 -0
@@ -26,11 +26,11 @@ import mindspore.common.dtype as mstype
26
26
  from mindspore.ops import operations as P
27
27
  from mindspore.ops.primitive import constexpr
28
28
  from mindspore.ops.primitive import _primexpr
29
- import mindspore.ops.function as ops
30
- from mindspore.ops import functional as F
29
+ import mindspore.ops as ops
31
30
  from mindspore.ops.operations._inner_ops import DynamicBroadcastTo
32
31
  from mindspore.ops.operations._sequence_ops import TupleToTensor
33
32
  from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
33
+ from mindspore.ops.operations._sequence_ops import TensorToList
34
34
 
35
35
  from mindspore.ops.operations.array_ops import (
36
36
  UniqueConsecutive,
@@ -59,11 +59,11 @@ from mindspore.common import Tensor
59
59
  from mindspore.ops._primitive_cache import _get_cache_prim
60
60
  from mindspore import _checkparam as validator
61
61
  from mindspore._c_expression import Tensor as Tensor_
62
+ from mindspore.ops._utils.utils import ms_arrange
62
63
 
63
64
  tuple_to_tensor_ = TupleToTensor()
64
65
  eye_ = P.Eye()
65
66
  fills_ = Fills()
66
- fill_ = P.Fill()
67
67
  ones_ = P.Ones()
68
68
  ones_like_ = P.OnesLike()
69
69
  tile_ = P.Tile()
@@ -112,9 +112,9 @@ reduce_min = P.ReduceMin()
112
112
 
113
113
  @_primexpr
114
114
  def get_x_shape(x_shape):
115
- if F.is_sequence_shape_unknown(x_shape):
115
+ if ops.is_sequence_shape_unknown(x_shape):
116
116
  return (-2,)
117
- if F.is_sequence_value_unknown(x_shape):
117
+ if ops.is_sequence_value_unknown(x_shape):
118
118
  return (-1,)
119
119
  s = 1
120
120
  for i in x_shape:
@@ -148,7 +148,7 @@ def _get_type(x):
148
148
  """get the dtype of input"""
149
149
  if isinstance(x, Tensor):
150
150
  return x.dtype
151
- return F.typeof(x)
151
+ return ops.typeof(x)
152
152
 
153
153
 
154
154
  def _get_max_type(start, end, step):
@@ -178,16 +178,16 @@ def arange(start=0, end=None, step=1, *, dtype=None):
178
178
 
179
179
  Args:
180
180
  start (Union[float, int, Tensor], optional): The start of the interval.
181
- If Tensor, the shape must be (). Default: 0.
181
+ If Tensor, the shape must be :math:`()` . Default: ``0`` .
182
182
  end (Union[float, int, Tensor], optional): The end of the interval, exclusive.
183
- If Tensor, the shape must be ().
184
- Default: None. If None, it defaults to the value of `start`, and 0 is used as the starting value.
183
+ If Tensor, the shape must be :math:`()`.
184
+ Default: ``None`` . If ``None`` , it defaults to the value of `start`, and 0 is used as the starting value.
185
185
  step (Union[float, int, Tensor], optional): Number that increments `start`.
186
- If Tensor, the shape must be (). Default: 1.
186
+ If Tensor, the shape must be :math:`()`. Default: ``1`` .
187
187
 
188
188
  Keyword Args:
189
- dtype (mindspore.dtype, optional): The required data type of returned Tensor. Default: None.
190
- If the value is not specified or is None, the type with the highest precision in the
189
+ dtype (mindspore.dtype, optional): The required data type of returned Tensor. Default: ``None`` .
190
+ If the value is not specified or is ``None`` , the type with the highest precision in the
191
191
  `start`, `end`, and `step` parameters is inferred.
192
192
 
193
193
  Returns:
@@ -237,7 +237,8 @@ def arange(start=0, end=None, step=1, *, dtype=None):
237
237
  if start.shape != () or end.shape != () or step.shape != ():
238
238
  raise ValueError(f"For arange, the input args must be a TensorScalar,"
239
239
  f" but got start shape:{start.shape}, end shape:{end.shape}, step shape:{step.shape}")
240
- data = P.Range()(start, end, step)
240
+ range_op = _get_cache_prim(P.Range)()
241
+ data = range_op(start, end, step)
241
242
  if dtype is not None:
242
243
  data = cast_(data, dtype)
243
244
  return data
@@ -263,7 +264,7 @@ def cat(tensors, axis=0):
263
264
  all other dimensions should be equal, that is,
264
265
  :math:`t1.shape[1] = t2.shape[1], t1.shape[2] = t2.shape[2], ..., t1.shape[R-1] = t2.shape[R-1]`,
265
266
  where :math:`R` represents the rank of tensor.
266
- axis (int): The specified axis, whose value is in range :math:`[-R, R)`. Default: 0.
267
+ axis (int): The specified axis, whose value is in range :math:`[-R, R)`. Default: ``0`` .
267
268
 
268
269
  Returns:
269
270
  Tensor, the shape is :math:`(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)`.
@@ -279,6 +280,9 @@ def cat(tensors, axis=0):
279
280
  ``Ascend`` ``GPU`` ``CPU``
280
281
 
281
282
  Examples:
283
+ >>> import mindspore
284
+ >>> import numpy as np
285
+ >>> from mindspore import Tensor, ops
282
286
  >>> input_x1 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
283
287
  >>> input_x2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
284
288
  >>> output = ops.cat((input_x1, input_x2))
@@ -307,10 +311,10 @@ def eye(n, m=None, dtype=None):
307
311
  Args:
308
312
  n (int): The number of rows of returned tensor. Constant value only.
309
313
  m (int): The number of columns of returned tensor. Constant value only.
310
- Default: if None, the number of columns is as the same as n.
314
+ Default: ``None`` , if ``None`` , the number of columns is as the same as n.
311
315
  dtype (mindspore.dtype): MindSpore's dtype, the data type of the returned tensor.
312
316
  The data type can be bool or Number.
313
- Default: None, the data type of the returned tensor is mindspore.float32.
317
+ Default: ``None`` , the data type of the returned tensor is mindspore.float32.
314
318
 
315
319
  Returns:
316
320
  Tensor, a tensor with ones on the diagonal and the rest of elements are zero. The shape of `output` depends on
@@ -318,12 +322,14 @@ def eye(n, m=None, dtype=None):
318
322
 
319
323
  Raises:
320
324
  TypeError: If `m` or `n` is not an int.
321
- ValueError: If `m` or `n` is less than 1.
325
+ ValueError: If `m` or `n` is less than 0.
322
326
 
323
327
  Supported Platforms:
324
328
  ``Ascend`` ``GPU`` ``CPU``
325
329
 
326
330
  Examples:
331
+ >>> import mindspore
332
+ >>> from mindspore import ops
327
333
  >>> output = ops.eye(2, 2, mindspore.int32)
328
334
  >>> print(output)
329
335
  [[1 0]
@@ -368,11 +374,12 @@ def hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype
368
374
  Args:
369
375
  window_length (int): The size of returned window. Must be a non negative integer.
370
376
  periodic (bool, optional): If True, return a periodic window. If False, return a symmetric window.
371
- alpha (float, optional): The coefficient α.
372
- beta (float, optional): The coefficient β.
377
+ Default: ``True`` .
378
+ alpha (float, optional): The coefficient α. Default: ``0.54`` .
379
+ beta (float, optional): The coefficient β. Default: ``0.46`` .
373
380
 
374
381
  Keyword Args:
375
- dtype (mindspore.dtype, optional): The output window data type. Default: None.
382
+ dtype (mindspore.dtype, optional): The output window data type. Default: ``None`` .
376
383
 
377
384
  Returns:
378
385
  Tensor, a 1-D tensor of size (window_length) containing the window.
@@ -385,12 +392,13 @@ def hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype
385
392
  ``Ascend`` ``GPU`` ``CPU``
386
393
 
387
394
  Examples:
395
+ >>> from mindspore import ops
388
396
  >>> print(ops.hamming_window(6, False))
389
397
  [0.08 0.39785218 0.91214782 0.91214782 0.39785218 0.08]
390
398
  """
391
399
  if not isinstance(window_length, int):
392
400
  raise TypeError(f"For array function 'hamming_window', 'window_length' must be int, but got" \
393
- f" {type(window_length)}.")
401
+ f" {type(window_length)}.")
394
402
  if window_length < 0:
395
403
  raise ValueError(f"For array function 'hamming_window', 'window_length' must be non negative number.")
396
404
  if not isinstance(periodic, bool):
@@ -404,14 +412,11 @@ def hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype
404
412
  if dtype is not None and dtype not in mstype.float_type:
405
413
  raise TypeError(f"For array function 'hamming_window', 'dtype' must be floating point dtypes, but got {dtype}.")
406
414
 
407
- if periodic:
408
- window_length += 1
409
- n = arange(0, window_length)
410
- w = alpha - beta * ops.cos((2 * np.pi / (window_length - 1)) * n)
411
-
412
- if dtype is not None:
413
- w = P.Cast()(w, dtype)
414
- return w[:-1] if periodic else w
415
+ dtype = mstype.float32 if dtype is None else dtype
416
+ op = _get_cache_prim(P.HammingWindow)(periodic, alpha, beta, dtype)
417
+ length = Tensor(np.array([window_length]).astype(np.int32))
418
+ out = op(length)
419
+ return out
415
420
 
416
421
 
417
422
  def where(condition, x, y):
@@ -438,6 +443,9 @@ def where(condition, x, y):
438
443
  ``Ascend`` ``GPU`` ``CPU``
439
444
 
440
445
  Examples:
446
+ >>> import numpy as np
447
+ >>> from mindspore import Tensor, ops
448
+ >>> from mindspore import dtype as mstype
441
449
  >>> a = Tensor(np.arange(4).reshape((2, 2)), mstype.float32)
442
450
  >>> b = Tensor(np.ones((2, 2)), mstype.float32)
443
451
  >>> condition = a < 3
@@ -450,13 +458,15 @@ def where(condition, x, y):
450
458
  raise TypeError(f"For 'where', 'condition' must be a Tensor, but got {type(condition)}.")
451
459
  if isinstance(x, (int, float)):
452
460
  if not isinstance(y, Tensor):
453
- raise TypeError(f"For 'where', at least one of 'x' and 'y' should be Tensor, \
454
- but got x:{type(x)}, y:{type(y)}.")
461
+ raise TypeError(
462
+ f"For 'where', at least one of 'x' and 'y' should be Tensor, but got x:{type(x)}, y:{type(y)}."
463
+ )
455
464
  x = cast_(x, y.dtype)
456
465
  elif isinstance(y, (int, float)):
457
466
  if not isinstance(x, Tensor):
458
- raise TypeError(f"For 'where', at least one of 'x' and 'y' should be Tensor, \
459
- but got x:{type(x)}, y:{type(y)}.")
467
+ raise TypeError(
468
+ f"For 'where', at least one of 'x' and 'y' should be Tensor, but got x:{type(x)}, y:{type(y)}."
469
+ )
460
470
  y = cast_(y, x.dtype)
461
471
  output_shape = _calc_broadcast_shape(x.shape, y.shape, condition.shape)
462
472
  condition = broadcast_to(condition, output_shape)
@@ -474,7 +484,7 @@ def reverse(x, axis):
474
484
  The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "input_x".
475
485
 
476
486
  Args:
477
- x (Tensor): The target tensor. The data type is Number except float64.
487
+ x (Tensor): The target tensor.
478
488
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
479
489
  axis (Union[tuple(int), list(int)]): The indices of the dimensions to reverse.
480
490
 
@@ -489,6 +499,9 @@ def reverse(x, axis):
489
499
  ``Ascend`` ``GPU`` ``CPU``
490
500
 
491
501
  Examples:
502
+ >>> import mindspore
503
+ >>> import numpy as np
504
+ >>> from mindspore import Tensor, ops
492
505
  >>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
493
506
  >>> output = ops.reverse(input_x, axis=[1])
494
507
  >>> print(output)
@@ -520,6 +533,8 @@ def ravel(input):
520
533
  ``Ascend`` ``GPU`` ``CPU``
521
534
 
522
535
  Examples:
536
+ >>> import numpy as np
537
+ >>> from mindspore import Tensor, ops
523
538
  >>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
524
539
  >>> output = ops.ravel(x)
525
540
  >>> print(output)
@@ -534,9 +549,11 @@ def matrix_band_part(x, lower, upper):
534
549
  r"""
535
550
  Copy a tensor setting everything outside a central band in each innermost matrix to zero.
536
551
 
552
+ .. warning::
553
+ This is an experimental API that is subject to change or deletion.
554
+
537
555
  Args:
538
556
  x (Tensor): Input tensor. :math:`(*, m, n)` where :math:`*` means, any number of additional dimensions.
539
- The data type must be float16, float32, float64, int32 or int64.
540
557
  lower (Union[int, Tensor]): Number of subdiagonals to keep. The data type must be int32 or int64.
541
558
  If negative, keep entire lower triangle.
542
559
  upper (Union[int, Tensor]): Number of superdiagonals to keep. The data type must be int32 or int64.
@@ -547,7 +564,7 @@ def matrix_band_part(x, lower, upper):
547
564
 
548
565
  Raises:
549
566
  TypeError: If `x` is not a Tensor.
550
- TypeError: If dtype of `x` is not one of float16, float32, float64, int32 or int64.
567
+ TypeError: If dtype of `x` is not valid.
551
568
  TypeError: If `lower` is neither a number nor a Tensor.
552
569
  TypeError: If `upper` is neither a number nor a Tensor.
553
570
  TypeError: If dtype of `lower` is neither int32 nor int64.
@@ -557,9 +574,11 @@ def matrix_band_part(x, lower, upper):
557
574
  ValueError: If the shape of `upper` is not equal to 0D.
558
575
 
559
576
  Supported Platforms:
560
-
577
+ ``Ascend`` ``GPU`` ``CPU``
561
578
 
562
579
  Examples:
580
+ >>> import numpy as np
581
+ >>> from mindspore import Tensor, ops
563
582
  >>> x = Tensor(np.ones([2, 4, 4]).astype(np.float32))
564
583
  >>> output = ops.matrix_band_part(x, 2, 1)
565
584
  >>> print(output)
@@ -582,7 +601,8 @@ def padding(x, pad_dim_size=8):
582
601
  Args:
583
602
  x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The rank of `x` must be at least 2.
584
603
  The last dimension of `x` must be 1. The data type is Number.
585
- pad_dim_size (int): The value of the last dimension of `x` to be extended, which must be positive. Default: 8.
604
+ pad_dim_size (int): The value of the last dimension of `x` to be extended, which must be positive.
605
+ Default: ``8`` .
586
606
 
587
607
  Returns:
588
608
  Tensor, has the same type and shape as input shape value.
@@ -596,6 +616,9 @@ def padding(x, pad_dim_size=8):
596
616
  ``Ascend`` ``GPU`` ``CPU``
597
617
 
598
618
  Examples:
619
+ >>> import mindspore
620
+ >>> import numpy as np
621
+ >>> from mindspore import Tensor, ops
599
622
  >>> x = Tensor(np.array([[8], [10]]), mindspore.float32)
600
623
  >>> pad_dim_size = 4
601
624
  >>> output = ops.padding(x, pad_dim_size)
@@ -628,7 +651,7 @@ def _check_axis_type(axis, type_int=True, type_tuple=True, type_list=True, ops_n
628
651
  raise TypeError(f"For {ops_name}, the axis should be {type_str}, but got {type(axis)}.")
629
652
 
630
653
 
631
- def one_hot(indices, depth, on_value, off_value, axis=-1):
654
+ def one_hot(indices, depth, on_value=1, off_value=0, axis=-1):
632
655
  r"""
633
656
  Computes a one-hot tensor.
634
657
 
@@ -640,24 +663,24 @@ def one_hot(indices, depth, on_value, off_value, axis=-1):
640
663
 
641
664
  Args:
642
665
  indices(Tensor): A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
643
- Data type must be uint8, int32 or int64.
666
+ Data type must be int32 or int64.
644
667
  depth(int): A scalar defining the depth of the one-hot dimension.
645
- on_value(Union[Tensor, int, float]): A value to fill in output when `indices[j] = i`.
668
+ on_value(Union[Tensor, int, float], optional): A value to fill in output when `indices[j] = i`.
646
669
  Support uint8, uint16, uint32, uint64, int8, int16, int32, int64, float16, float32, float64,
647
- bool, complex64, complex128.
648
- off_value(Union[Tensor, int, float]): A value to fill in output when `indices[j] != i`.
649
- Has the same data type as `on_value`.
650
- axis(int): Position to insert the value. e.g. If shape of `self` is :math:`(N, C)`, and `axis` is -1,
670
+ bool, complex64, complex128. Default: ``1`` .
671
+ off_value(Union[Tensor, int, float], optional): A value to fill in output when `indices[j] != i`.
672
+ Has the same data type as `on_value`. Default: ``0`` .
673
+ axis(int, optional): Position to insert the value. e.g. If shape of `self` is :math:`(N, C)`, and `axis` is -1,
651
674
  the output shape will be :math:`(N, C, depth)`, If `axis` is 0,
652
675
  the output shape will be :math:`(depth, N, C)`.
653
- Default: -1.
676
+ Default: ``-1`` .
654
677
 
655
678
  Returns:
656
679
  Tensor, one-hot tensor. Tensor of shape :math:`(X_0, \ldots, X_{axis}, \text{depth} ,X_{axis+1}, \ldots, X_n)`.
657
680
 
658
681
  Raises:
659
682
  TypeError: If `axis` or `depth` is not an int.
660
- TypeError: If dtype of `indices` is not uint8, int32 or int64.
683
+ TypeError: If dtype of `indices` is not int32 or int64.
661
684
  TypeError: If `indices`, `on_value` or `off_value` is not a Tensor.
662
685
  ValueError: If `axis` is not in range [-1, ndim].
663
686
  ValueError: If `depth` is less than 0.
@@ -666,6 +689,9 @@ def one_hot(indices, depth, on_value, off_value, axis=-1):
666
689
  ``Ascend`` ``GPU`` ``CPU``
667
690
 
668
691
  Examples:
692
+ >>> import mindspore
693
+ >>> import numpy as np
694
+ >>> from mindspore import Tensor, ops
669
695
  >>> indices = Tensor(np.array([0, 1, 2]), mindspore.int32)
670
696
  >>> depth, on_value, off_value = 3, Tensor(1.0, mindspore.float32), Tensor(0.0, mindspore.float32)
671
697
  >>> output = ops.one_hot(indices, depth, on_value, off_value, axis=-1)
@@ -688,8 +714,8 @@ def fill(type, shape, value): # pylint: disable=redefined-outer-name
688
714
 
689
715
  Args:
690
716
  type (mindspore.dtype): The specified type of output tensor. The data type only supports
691
- `bool_ <https://www.mindspore.cn/docs/en/r2.0/api_python/mindspore.html#mindspore.dtype>`_ and
692
- `number <https://www.mindspore.cn/docs/en/r2.0/api_python/mindspore.html#mindspore.dtype>`_ .
717
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ and
718
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ .
693
719
  shape (Union(Tensor, tuple[int])): The specified shape of output tensor.
694
720
  value (Union(Tensor, number.Number, bool)): Value to fill the returned tensor.
695
721
 
@@ -703,6 +729,8 @@ def fill(type, shape, value): # pylint: disable=redefined-outer-name
703
729
  ``Ascend`` ``GPU`` ``CPU``
704
730
 
705
731
  Examples:
732
+ >>> import mindspore
733
+ >>> from mindspore import ops
706
734
  >>> output = ops.fill(mindspore.float32, (2, 2), 1)
707
735
  >>> print(output)
708
736
  [[1. 1.]
@@ -713,10 +741,11 @@ def fill(type, shape, value): # pylint: disable=redefined-outer-name
713
741
  [0. 0. 0.]
714
742
  [0. 0. 0.]]
715
743
  """
716
- return fill_(type, shape, value)
744
+ value = cast_(value, type)
745
+ return _get_cache_prim(P.FillV2)()(shape, value)
717
746
 
718
747
 
719
- def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-name
748
+ def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-name
720
749
  """
721
750
  Create a Tensor of the specified shape and fill it with the specified value.
722
751
 
@@ -726,7 +755,7 @@ def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-nam
726
755
 
727
756
  Keyword Args:
728
757
  dtype (mindspore.dtype): The specified type of output tensor. `bool_` and `number` are supported, for details,
729
- please refer to :class:`mindspore.dtype` . Default: None.
758
+ please refer to :class:`mindspore.dtype` . Default: ``None`` .
730
759
 
731
760
  Returns:
732
761
  Tensor.
@@ -739,6 +768,7 @@ def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-nam
739
768
  ``Ascend`` ``GPU`` ``CPU``
740
769
 
741
770
  Examples:
771
+ >>> from mindspore import ops
742
772
  >>> output = ops.full((2, 2), 1)
743
773
  >>> print(output)
744
774
  [[1. 1.]
@@ -757,7 +787,7 @@ def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-nam
757
787
  raise TypeError(f"For 'ops.full', 'dtype' must be mindspore.type, but got {dtype}.")
758
788
  if isinstance(size, list):
759
789
  size = tuple(size)
760
- return fill_(dtype, size, fill_value)
790
+ return ops.fill(dtype, size, fill_value)
761
791
 
762
792
 
763
793
  def full_like(input, fill_value, *, dtype=None):
@@ -770,7 +800,7 @@ def full_like(input, fill_value, *, dtype=None):
770
800
 
771
801
  Keyword Args:
772
802
  dtype (mindspore.dtype, optional): The specified type of output tensor. `bool_` and `number` are supported,
773
- for details, please refer to :class:`mindspore.dtype` . Default: None.
803
+ for details, please refer to :class:`mindspore.dtype` . Default: ``None`` .
774
804
 
775
805
  Returns:
776
806
  Tensor.
@@ -782,6 +812,8 @@ def full_like(input, fill_value, *, dtype=None):
782
812
  ``Ascend`` ``GPU`` ``CPU``
783
813
 
784
814
  Examples:
815
+ >>> import mindspore
816
+ >>> from mindspore import Tensor, ops
785
817
  >>> input = Tensor([[0, 1], [2, 1]], dtype=mindspore.int32)
786
818
  >>> output = ops.full_like(input, 1)
787
819
  >>> print(output)
@@ -806,12 +838,12 @@ def chunk(input, chunks, axis=0):
806
838
  Cut the input Tensor into `chunks` sub-tensors along the specified axis.
807
839
 
808
840
  Note:
809
- This function may return less then the specified number of chunks!
841
+ This function may return less than the specified number of chunks!
810
842
 
811
843
  Args:
812
844
  input (Tensor): A Tensor to be cut.
813
845
  chunks (int): Number of sub-tensors to cut.
814
- axis (int, optional): Specify the dimensions that you want to split. Default: 0.
846
+ axis (int, optional): Specify the dimensions that you want to split. Default: ``0`` .
815
847
 
816
848
  Returns:
817
849
  A tuple of sub-tensors.
@@ -827,6 +859,8 @@ def chunk(input, chunks, axis=0):
827
859
  ``Ascend`` ``GPU`` ``CPU``
828
860
 
829
861
  Examples:
862
+ >>> import numpy as np
863
+ >>> from mindspore import ops, Tensor
830
864
  >>> input_x = np.arange(9).astype("float32")
831
865
  >>> output = ops.chunk(Tensor(input_x), 3)
832
866
  >>> print(output)
@@ -876,12 +910,12 @@ def fills(x, value):
876
910
  value_ = float(value)
877
911
  elif isinstance(value, Tensor):
878
912
  if value.ndim != 0:
879
- raise ValueError("For 'ops.fills', if the argument 'value' is a tensor, the number of its dimension"
880
- " should be 0, but got {}".format(value.ndim))
913
+ raise ValueError(f"For 'ops.fills', if the argument 'value' is a tensor, the number of its dimension"
914
+ f" should be 0, but got {value.ndim}")
881
915
  value_ = value.astype(mstype.float32)
882
916
  else:
883
- raise TypeError("For 'ops.fills', the type of argument 'value' should be int, float or Tensor,"
884
- " but got {}".format(type(value)))
917
+ raise TypeError(f"For 'ops.fills', the type of argument 'value' should be int, float or Tensor,"
918
+ f" but got {type(value)}")
885
919
  return fills_(x, value_)
886
920
 
887
921
 
@@ -893,38 +927,39 @@ def ones(shape, dtype=None): # pylint: disable=redefined-outer-name
893
927
  argument.
894
928
 
895
929
  Args:
896
- shape (Union[tuple[int], int]): The specified shape of output tensor. Only constant positive int is allowed.
897
- dtype (:class:`mindspore.dtype`): The specified type of output tensor. If `dtype` is None,
898
- `mindspore.float32` will be used. Default: None.
930
+ shape (Union[tuple[int], int, Tensor]): The specified shape of output tensor. Only positive integer or
931
+ tuple or Tensor containing positive integers are allowed. If it is a Tensor,
932
+ it must be a 0-D or 1-D Tensor with int32 or int64 dtypes.
933
+ dtype (:class:`mindspore.dtype`): The specified type of output tensor. If `dtype` is ``None`` ,
934
+ `mindspore.float32` will be used. Default: ``None`` .
899
935
 
900
936
  Returns:
901
937
  Tensor, has the same type and shape as input shape value.
902
938
 
903
939
  Raises:
904
- TypeError: If `shape` is neither tuple nor int.
940
+ TypeError: If `shape` is not tuple, int or Tensor.
905
941
 
906
942
  Supported Platforms:
907
943
  ``Ascend`` ``GPU`` ``CPU``
908
944
 
909
945
  Examples:
946
+ >>> import mindspore
947
+ >>> from mindspore import ops
910
948
  >>> output = ops.ones((2, 2), mindspore.float32)
911
949
  >>> print(output)
912
950
  [[1. 1.]
913
951
  [1. 1.]]
914
952
  """
915
953
  _dtype = mstype.float32 if dtype is None else dtype
916
- ones_op = P.FillV2()
954
+ ones_op = _get_cache_prim(P.FillV2)()
917
955
  value = Tensor(1, _dtype)
918
956
  if isinstance(shape, int):
919
957
  shape = tuple([shape])
920
- shape_tensor = shape
921
- if isinstance(shape, (list, tuple)) and not shape:
922
- shape_tensor = Tensor(shape, dtype=mstype.int64)
923
- elif not isinstance(shape, Tensor):
924
- shape_tensor = Tensor(shape)
925
- if shape_tensor.ndim == 0 and shape_tensor.size == 1:
926
- shape_tensor = shape_tensor.reshape(1)
927
- output = ones_op(shape_tensor, value)
958
+ elif isinstance(shape, list):
959
+ shape = Tensor(shape, dtype=mstype.int64)
960
+ elif isinstance(shape, Tensor) and shape.ndim == 0 and shape.size == 1:
961
+ shape = shape.reshape(1)
962
+ output = ones_op(shape, value)
928
963
  return output
929
964
 
930
965
 
@@ -936,8 +971,8 @@ def ones_like(input, *, dtype=None):
936
971
  input (Tensor): Tensor of any dimension.
937
972
 
938
973
  Keyword Args:
939
- dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype` is None,
940
- the dtype of the input tensor will be used. Default: None.
974
+ dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype` is ``None`` ,
975
+ the dtype of the input tensor will be used. Default: ``None`` .
941
976
 
942
977
  Returns:
943
978
  Tensor, has the same shape as `input` but filled with ones.
@@ -949,13 +984,15 @@ def ones_like(input, *, dtype=None):
949
984
  ``Ascend`` ``GPU`` ``CPU``
950
985
 
951
986
  Examples:
987
+ >>> import numpy as np
988
+ >>> from mindspore import Tensor, ops
952
989
  >>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
953
990
  >>> output = ops.ones_like(x)
954
991
  >>> print(output)
955
992
  [[1 1]
956
993
  [1 1]]
957
994
  """
958
- ones_like_op = P.OnesLike()
995
+ ones_like_op = _get_cache_prim(P.OnesLike)()
959
996
  output = ones_like_op(input)
960
997
  _dtype = input.dtype if dtype is None else dtype
961
998
  output = cast_(output, _dtype)
@@ -967,38 +1004,39 @@ def zeros(size, dtype=None): # pylint: disable=redefined-outer-name
967
1004
  Creates a tensor filled with 0 with shape described by `shape` and fills it with value 0 in type of `dtype`.
968
1005
 
969
1006
  Args:
970
- size (Union[tuple[int], int]): The specified shape of output tensor. Only constant positive int is allowed.
971
- dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor. If `dtype` is None,
972
- mindspore.float32 will be used. Default: None.
1007
+ size (Union[tuple[int], int, Tensor]): The specified shape of output tensor. Only positive integer or
1008
+ tuple or Tensor containing positive integers are allowed. If it is a Tensor,
1009
+ it must be a 0-D or 1-D Tensor with int32 or int64 dtypes.
1010
+ dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor. If `dtype` is ``None`` ,
1011
+ mindspore.float32 will be used. Default: ``None`` .
973
1012
 
974
1013
  Returns:
975
1014
  Tensor, has the same dtype and size as input.
976
1015
 
977
1016
  Raises:
978
- TypeError: If `size` is neither a tuple of int nor an int.
1017
+ TypeError: If `size` is not tuple, int or Tensor.
979
1018
 
980
1019
  Supported Platforms:
981
1020
  ``Ascend`` ``GPU`` ``CPU``
982
1021
 
983
1022
  Examples:
1023
+ >>> import mindspore
1024
+ >>> from mindspore import ops
984
1025
  >>> output = ops.zeros((2, 2), mindspore.float32)
985
1026
  >>> print(output)
986
1027
  [[0. 0.]
987
1028
  [0. 0.]]
988
1029
  """
989
- zero_op = P.FillV2()
1030
+ zero_op = _get_cache_prim(P.FillV2)()
990
1031
  _dtype = mstype.float32 if dtype is None else dtype
991
1032
  value = Tensor(0, _dtype)
992
1033
  if isinstance(size, int):
993
1034
  size = tuple([size])
994
- shape_tensor = size
995
- if isinstance(size, (list, tuple)) and not size:
996
- shape_tensor = Tensor(size, dtype=mstype.int64)
997
- elif not isinstance(size, Tensor):
998
- shape_tensor = Tensor(size, dtype=mstype.int64)
999
- if shape_tensor.ndim == 0 and shape_tensor.size == 1:
1000
- shape_tensor = shape_tensor.reshape(1)
1001
- output = zero_op(shape_tensor, value)
1035
+ elif isinstance(size, list):
1036
+ size = Tensor(size, dtype=mstype.int64)
1037
+ elif isinstance(size, Tensor) and size.ndim == 0 and size.size == 1:
1038
+ size = size.reshape(1)
1039
+ output = zero_op(size, value)
1002
1040
  return output
1003
1041
 
1004
1042
 
@@ -1012,8 +1050,8 @@ def zeros_like(input, *, dtype=None):
1012
1050
  input (Tensor): Tensor of any dimension.
1013
1051
 
1014
1052
  Keyword Args:
1015
- dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype` is None,
1016
- the dtype of the input tensor will be used. Default: None.
1053
+ dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype` is ``None`` ,
1054
+ the dtype of the input tensor will be used. Default: ``None`` .
1017
1055
 
1018
1056
  Returns:
1019
1057
  Tensor, filled with 0.
@@ -1025,6 +1063,9 @@ def zeros_like(input, *, dtype=None):
1025
1063
  ``Ascend`` ``GPU`` ``CPU``
1026
1064
 
1027
1065
  Examples:
1066
+ >>> import mindspore
1067
+ >>> import numpy as np
1068
+ >>> from mindspore import Tensor, ops
1028
1069
  >>> x = Tensor(np.arange(4).reshape(2, 2))
1029
1070
  >>> output = ops.zeros_like(x, dtype=mindspore.float32)
1030
1071
  >>> print(output)
@@ -1032,9 +1073,10 @@ def zeros_like(input, *, dtype=None):
1032
1073
  [0. 0.]]
1033
1074
  """
1034
1075
  _dtype = input.dtype if dtype is None else dtype
1035
- zeros_like_op = P.ZerosLike()
1036
- output = zeros_like_op(input)
1037
- output = cast_(output, _dtype)
1076
+ _zeros_like = _get_cache_prim(P.ZerosLike)()
1077
+ _cast = _get_cache_prim(P.Cast)()
1078
+ output = _zeros_like(input)
1079
+ output = _cast(output, _dtype)
1038
1080
  return output
1039
1081
 
1040
1082
 
@@ -1052,6 +1094,7 @@ def tile(input, multiples):
1052
1094
  Args:
1053
1095
  input (Tensor): 1-D or higher dimensional Tensor. Set the shape of input tensor as
1054
1096
  :math:`(x_1, x_2, ..., x_S)` .
1097
+
1055
1098
  multiples (tuple[int]): The parameter that specifies the number of replications,
1056
1099
  the parameter type is tuple, and the data type is int, i.e., :math:`(y_1, y_2, ..., y_S)`.
1057
1100
  The length of `multiples` cannot be smaller than the length of the shape of `input`.
@@ -1062,7 +1105,7 @@ def tile(input, multiples):
1062
1105
  the dimension of `input` is `input.dim`, and the shape of `input` is :math:`(x_1, x_2, ..., x_S)`.
1063
1106
 
1064
1107
  - If `input.dim = d`, then the shape of their corresponding positions can be multiplied, and
1065
- the shape of Outputs is :math:`(x_1*y_1, x_2*y_2, ..., x_S*y_R)`.
1108
+ the shape of Outputs is :math:`(x_1*y_1, x_2*y_2, ..., x_S*y_S)`.
1066
1109
  - If `input.dim < d`, fill in multiple 1 in the length of the shape of `input` until their
1067
1110
  lengths are consistent. Such as set the shape of `input` as :math:`(1, ..., x_1, x_2, ..., x_S)`,
1068
1111
  then the shape of their corresponding positions can be multiplied, and the shape of Outputs is
@@ -1071,22 +1114,25 @@ def tile(input, multiples):
1071
1114
  Raises:
1072
1115
  TypeError: If `multiples` is not a tuple or its elements are not all int.
1073
1116
  ValueError: If the elements of `multiples` are not all greater than 0.
1074
- ValueError: If the length of `multiples` are smaller than the length of dimension in `input_x`.
1117
+ ValueError: If the length of `multiples` are smaller than the length of dimension in `input`.
1075
1118
 
1076
1119
  Supported Platforms:
1077
1120
  ``Ascend`` ``GPU`` ``CPU``
1078
1121
 
1079
1122
  Examples:
1080
- >>> input_x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32)
1123
+ >>> import mindspore
1124
+ >>> import numpy as np
1125
+ >>> from mindspore import Tensor, ops
1126
+ >>> input = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32)
1081
1127
  >>> multiples = (2, 3)
1082
- >>> output = ops.tile(input_x, multiples)
1128
+ >>> output = ops.tile(input, multiples)
1083
1129
  >>> print(output)
1084
1130
  [[1. 2. 1. 2. 1. 2.]
1085
1131
  [3. 4. 3. 4. 3. 4.]
1086
1132
  [1. 2. 1. 2. 1. 2.]
1087
1133
  [3. 4. 3. 4. 3. 4.]]
1088
1134
  >>> multiples = (2, 3, 2)
1089
- >>> output = ops.tile(input_x, multiples)
1135
+ >>> output = ops.tile(input, multiples)
1090
1136
  >>> print(output)
1091
1137
  [[[1. 2. 1. 2.]
1092
1138
  [3. 4. 3. 4.]
@@ -1101,7 +1147,8 @@ def tile(input, multiples):
1101
1147
  [1. 2. 1. 2.]
1102
1148
  [3. 4. 3. 4.]]]
1103
1149
  """
1104
- return tile_(input, multiples)
1150
+ tile_op = _get_cache_prim(P.Tile)()
1151
+ return tile_op(input, multiples)
1105
1152
 
1106
1153
 
1107
1154
  def range(start, end, step):
@@ -1135,6 +1182,8 @@ def range(start, end, step):
1135
1182
  ``GPU`` ``CPU``
1136
1183
 
1137
1184
  Examples:
1185
+ >>> from mindspore import Tensor, ops
1186
+ >>> from mindspore import dtype as mstype
1138
1187
  >>> start = Tensor(0, mstype.int32)
1139
1188
  >>> end = Tensor(10, mstype.int32)
1140
1189
  >>> step = Tensor(4, mstype.int32)
@@ -1159,7 +1208,7 @@ def unique(input):
1159
1208
  The shape of Tensor `y` and Tensor `idx` is different in most cases, because Tensor `y` will be deduplicated,
1160
1209
  and the shape of Tensor `idx` is consistent with the input.
1161
1210
 
1162
- To get the same shape between `idx` and `y`, please ref to :class:'mindspore.ops.UniqueWithPad' operator.
1211
+ To get the same shape between `idx` and `y`, please ref to :class:`mindspore.ops.UniqueWithPad` operator.
1163
1212
 
1164
1213
  Args:
1165
1214
  input (Tensor): The input tensor.
@@ -1262,10 +1311,10 @@ def unique_consecutive(input, return_idx=False, return_counts=False, axis=None):
1262
1311
  Args:
1263
1312
  input (Tensor): The input tensor.
1264
1313
  return_idx (bool, optional): Whether to return the index of where the element in the original input
1265
- maps to the position in the output. Default: False.
1266
- return_counts (bool, optional): Whether to return the counts of each unique element. Default: False.
1267
- axis (int, optional): The dimension to apply unique. If None, the unique of the flattened input is
1268
- returned. If specified, it must be int32 or int64. Default: None.
1314
+ maps to the position in the output. Default: ``False`` .
1315
+ return_counts (bool, optional): Whether to return the counts of each unique element. Default: ``False`` .
1316
+ axis (int, optional): The dimension to apply unique. If ``None`` , the unique of the flattened input is
1317
+ returned. If specified, it must be int32 or int64. Default: ``None`` .
1269
1318
 
1270
1319
  Returns:
1271
1320
  A tensor or a tuple of tensors containing tensor objects (`output`, `idx`, `counts`). `output` has the
@@ -1287,6 +1336,9 @@ def unique_consecutive(input, return_idx=False, return_counts=False, axis=None):
1287
1336
  ``Ascend`` ``GPU`` ``CPU``
1288
1337
 
1289
1338
  Examples:
1339
+ >>> import numpy as np
1340
+ >>> from mindspore import Tensor, ops
1341
+ >>> from mindspore import dtype as mstype
1290
1342
  >>> x = Tensor(np.array([1, 1, 2, 2, 3, 1, 1, 2]), mstype.int32)
1291
1343
  >>> output, idx, counts = ops.unique_consecutive(x, True, True, None)
1292
1344
  >>> print(output)
@@ -1321,15 +1373,16 @@ def searchsorted(sorted_sequence, values, *, out_int32=False, right=False):
1321
1373
  values (Tensor): The value that should be inserted.
1322
1374
 
1323
1375
  Keyword Args:
1324
- out_int32 (bool, optional): Output datatype. If True, the output datatype will be int32;
1325
- if False, the output datatype will be int64. Default: False.
1326
- right (bool, optional): Search Strategy. If True, return the last suitable index found;
1327
- if False, return the first such index. Default: False.
1376
+ out_int32 (bool, optional): Output datatype. If ``True`` , the output datatype will be int32;
1377
+ if ``False`` , the output datatype will be int64. Default: ``False`` .
1378
+ right (bool, optional): Search Strategy. If ``True`` , return the last suitable index found;
1379
+ if ``False`` , return the first such index. Default: ``False`` .
1328
1380
 
1329
1381
  Returns:
1330
1382
  Tensor containing the indices from the innermost dimension of `sorted_sequence` such that,
1331
1383
  if insert the corresponding value in the `values` tensor, the order of `sorted_sequence` would be preserved,
1332
- whose datatype is int32 if out_int32 is True, otherwise int64, and shape is the same as the shape of `values`.
1384
+ whose datatype is int32 if out_int32 is ``True`` , otherwise int64, and shape is the same as the shape of
1385
+ `values`.
1333
1386
 
1334
1387
  Raises:
1335
1388
  ValueError: If the dimension of `sorted_sequence` isn't 1 and all dimensions except the last dimension of
@@ -1339,6 +1392,9 @@ def searchsorted(sorted_sequence, values, *, out_int32=False, right=False):
1339
1392
  ``Ascend`` ``GPU`` ``CPU``
1340
1393
 
1341
1394
  Examples:
1395
+ >>> import mindspore
1396
+ >>> import numpy as np
1397
+ >>> from mindspore import Tensor, ops
1342
1398
  >>> sorted_sequence = Tensor(np.array([[0, 1, 3, 5, 7], [2, 4, 6, 8, 10]]), mindspore.float32)
1343
1399
  >>> values = Tensor(np.array([[3, 6, 9], [3, 6, 9]]), mindspore.float32)
1344
1400
  >>> output = ops.searchsorted(sorted_sequence, values)
@@ -1379,6 +1435,8 @@ def ger(input, vec2):
1379
1435
  ``Ascend`` ``GPU`` ``CPU``
1380
1436
 
1381
1437
  Examples:
1438
+ >>> import mindspore
1439
+ >>> from mindspore import Tensor, ops
1382
1440
  >>> input = Tensor([1., 2., 3., 4.], mindspore.float32)
1383
1441
  >>> vec2 = Tensor([1., 2., 3.], mindspore.float32)
1384
1442
  >>> output = ops.ger(input, vec2)
@@ -1398,7 +1456,7 @@ def size(input_x):
1398
1456
 
1399
1457
  Args:
1400
1458
  input_x (Tensor): Input parameters, the shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is
1401
- `number <https://www.mindspore.cn/docs/en/r2.0/api_python/mindspore.html#mindspore.dtype>`_.
1459
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
1402
1460
 
1403
1461
  Returns:
1404
1462
  int. A scalar representing the elements' size of `input_x`, tensor is the number of elements
@@ -1411,6 +1469,9 @@ def size(input_x):
1411
1469
  ``Ascend`` ``GPU`` ``CPU``
1412
1470
 
1413
1471
  Examples:
1472
+ >>> import mindspore
1473
+ >>> import numpy as np
1474
+ >>> from mindspore import Tensor, ops
1414
1475
  >>> input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
1415
1476
  >>> output = ops.size(input_x)
1416
1477
  >>> print(output)
@@ -1437,6 +1498,9 @@ def shape(input_x):
1437
1498
  ``Ascend`` ``GPU`` ``CPU``
1438
1499
 
1439
1500
  Examples:
1501
+ >>> import mindspore
1502
+ >>> import numpy as np
1503
+ >>> from mindspore import Tensor, ops
1440
1504
  >>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
1441
1505
  >>> output = ops.shape(input_x)
1442
1506
  >>> print(output)
@@ -1462,6 +1526,9 @@ def dyn_shape(input_x):
1462
1526
  ``Ascend`` ``GPU`` ``CPU``
1463
1527
 
1464
1528
  Examples:
1529
+ >>> import mindspore
1530
+ >>> import numpy as np
1531
+ >>> from mindspore import Tensor, ops
1465
1532
  >>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
1466
1533
  >>> output = ops.dyn_shape(input_x)
1467
1534
  >>> print(output)
@@ -1490,6 +1557,9 @@ def rank(input_x):
1490
1557
  ``Ascend`` ``GPU`` ``CPU``
1491
1558
 
1492
1559
  Examples:
1560
+ >>> import mindspore
1561
+ >>> import numpy as np
1562
+ >>> from mindspore import Tensor, ops
1493
1563
  >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
1494
1564
  >>> output = ops.rank(input_tensor)
1495
1565
  >>> print(output)
@@ -1504,7 +1574,7 @@ def reshape(input, shape):
1504
1574
  """
1505
1575
  Rearranges the input Tensor based on the given shape.
1506
1576
 
1507
- The 'shape' can only have one -1 at most, in which case its inferred from the remaining dimensions and
1577
+ The 'shape' can only have one -1 at most, in which case it's inferred from the remaining dimensions and
1508
1578
  the number of elements in the input.
1509
1579
 
1510
1580
  Args:
@@ -1524,6 +1594,9 @@ def reshape(input, shape):
1524
1594
  ``Ascend`` ``GPU`` ``CPU``
1525
1595
 
1526
1596
  Examples:
1597
+ >>> import mindspore
1598
+ >>> import numpy as np
1599
+ >>> from mindspore import Tensor, ops
1527
1600
  >>> input = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
1528
1601
  >>> output = ops.reshape(input, (3, 2))
1529
1602
  >>> print(output)
@@ -1535,26 +1608,34 @@ def reshape(input, shape):
1535
1608
 
1536
1609
 
1537
1610
  def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
1538
- """
1611
+ r"""
1539
1612
  Reverses variable length slices.
1540
1613
 
1541
1614
  Args:
1542
1615
  x (Tensor): The input to reverse, supporting all number types including bool.
1543
1616
  seq_lengths (Tensor): Specified reversing length, must be a 1-D vector with int32 or int64 types.
1544
1617
  seq_dim (int): The dimension where reversal is performed. Required.
1545
- batch_dim (int): The input is sliced in this dimension. Default: 0.
1618
+ batch_dim (int): The input is sliced in this dimension. Default: ``0`` .
1546
1619
 
1547
1620
  Returns:
1548
1621
  Tensor, with the same shape and data type as `x`.
1549
1622
 
1550
1623
  Raises:
1551
1624
  TypeError: If `seq_dim` or `batch_dim` is not an int.
1552
- ValueError: If value of `batch_dim` is equal to or greater than length of shape of input.
1625
+ ValueError: If :math:`len(seq\_lengths) != x.shape[batch\_dim]`.
1626
+ ValueError: If :math:`batch\_dim == seq\_dim`.
1627
+ ValueError: If :math:`seq\_dim < 0` or :math:`seq\_dim >= len(x.shape)`.
1628
+ ValueError: If :math:`batch\_dim < 0` or :math:`batch\_dim >= len(x.shape)`.
1629
+ RuntimeError: If any value of `seq_lengths` is less than 0.
1630
+ RuntimeError: If any value of `seq_lengths` is larger than `x.shape[seq_dim]`.
1553
1631
 
1554
1632
  Supported Platforms:
1555
1633
  ``Ascend`` ``GPU`` ``CPU``
1556
1634
 
1557
1635
  Examples:
1636
+ >>> import mindspore
1637
+ >>> import numpy as np
1638
+ >>> from mindspore import Tensor, ops
1558
1639
  >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
1559
1640
  >>> seq_lengths = Tensor(np.array([1, 2, 3]))
1560
1641
  >>> output = ops.reverse_sequence(x, seq_lengths, seq_dim=1)
@@ -1599,12 +1680,13 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
1599
1680
 
1600
1681
  Args:
1601
1682
  input (Tensor): The input Tensor.
1602
- order (str, optional): Only 'C' and 'F' are supported. 'C' means to flatten in row-major (C-style) order.
1603
- 'F' means to flatten in column-major (Fortran-style) order. Default: 'C'.
1683
+ order (str, optional): Only ``'C'`` and ``'F'`` are supported.
1684
+ ``'C'`` means to flatten in row-major (C-style) order.
1685
+ ``'F'`` means to flatten in column-major (Fortran-style) order. Default: ``'C'`` .
1604
1686
 
1605
1687
  Keyword Args:
1606
- start_dim (int, optional): The first dimension to flatten. Default: 1.
1607
- end_dim (int, optional): The last dimension to flatten. Default: -1.
1688
+ start_dim (int, optional): The first dimension to flatten. Default: ``1`` .
1689
+ end_dim (int, optional): The last dimension to flatten. Default: ``-1`` .
1608
1690
 
1609
1691
  Returns:
1610
1692
  Tensor. If no dimensions are flattened, returns the original `input`, otherwise return the flattened Tensor.
@@ -1622,26 +1704,38 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
1622
1704
  ``Ascend`` ``GPU`` ``CPU``
1623
1705
 
1624
1706
  Examples:
1707
+ >>> import mindspore
1708
+ >>> import numpy as np
1709
+ >>> from mindspore import Tensor, ops
1625
1710
  >>> input_x = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
1626
1711
  >>> output = ops.flatten(input_x)
1627
1712
  >>> print(output.shape)
1628
1713
  (1, 24)
1629
1714
  """
1715
+
1716
+ def check_axis_valid(axis, ndim):
1717
+ if axis < -ndim or axis >= ndim:
1718
+ raise ValueError("'start_dim' or 'end_dim' out of range.")
1719
+
1720
+ def check_dim_valid(start_dim, end_dim):
1721
+ if start_dim > end_dim:
1722
+ raise ValueError("For 'flatten', 'start_dim' cannot come after 'end_dim'.")
1723
+
1630
1724
  def canonicalize_axis(axis, x_rank):
1631
1725
  ndim = x_rank if x_rank != 0 else 1
1632
- if axis < -ndim or axis >= ndim:
1633
- const_utils.raise_value_error("'start_dim' or 'end_dim' out of range.")
1726
+ check_axis_valid(axis, ndim)
1634
1727
  return axis if axis >= 0 else axis + ndim
1635
1728
 
1636
1729
  # Check the types of arguments.
1637
1730
  if not isinstance(input, Tensor):
1638
1731
  raise TypeError(f"For 'flatten', argument 'input' must be Tensor.")
1639
- if not isinstance(start_dim, int) or not isinstance(end_dim, int):
1732
+ if not isinstance(start_dim, int) or not isinstance(end_dim, int) or \
1733
+ isinstance(start_dim, bool) or isinstance(end_dim, bool):
1640
1734
  raise TypeError(f"For 'flatten', both 'start_dim' and 'end_dim' must be int.")
1641
1735
  check_flatten_order_const(order)
1642
1736
  if order == 'F':
1643
- perm = F.make_range(0, F.rank(input))
1644
- new_order = F.tuple_reversed(perm)
1737
+ perm = ops.make_range(0, ops.rank(input))
1738
+ new_order = ops.tuple_reversed(perm)
1645
1739
  input = _get_cache_prim(P.Transpose)()(input, new_order)
1646
1740
 
1647
1741
  # Handle the default case.
@@ -1655,8 +1749,7 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
1655
1749
  # Check axis.
1656
1750
  start_dim = canonicalize_axis(start_dim, x_rank)
1657
1751
  end_dim = canonicalize_axis(end_dim, x_rank)
1658
- if start_dim > end_dim:
1659
- const_utils.raise_value_error("For 'flatten', 'start_dim' cannot come after 'end_dim'.")
1752
+ check_dim_valid(start_dim, end_dim)
1660
1753
  # If input is a 0-dimensional Tensor, a 1-dimensional Tensor will be returned.
1661
1754
  if x_rank in (0, 1):
1662
1755
  return reshape_(input, (-1,))
@@ -1779,6 +1872,8 @@ def select(cond, x, y):
1779
1872
  ``Ascend`` ``GPU`` ``CPU``
1780
1873
 
1781
1874
  Examples:
1875
+ >>> import mindspore
1876
+ >>> from mindspore import Tensor, ops
1782
1877
  >>> # 1) Both inputs are Tensor
1783
1878
  >>>
1784
1879
  >>> cond = Tensor([True, False])
@@ -1822,15 +1917,15 @@ def select(cond, x, y):
1822
1917
  input_y = cast_(input_y, mstype.float32)
1823
1918
 
1824
1919
  if is_x_tensor and is_y_tensor and is_cond_tensor:
1825
- x_shape = F.shape(x)
1826
- y_shape = F.shape(y)
1827
- cond_shape = F.shape(cond)
1828
- all_constant = F.isconstant(cond_shape) and F.isconstant(x_shape) and F.isconstant(y_shape)
1920
+ x_shape = ops.shape(x)
1921
+ y_shape = ops.shape(y)
1922
+ cond_shape = ops.shape(cond)
1923
+ all_constant = ops.isconstant(cond_shape) and ops.isconstant(x_shape) and ops.isconstant(y_shape)
1829
1924
  if all_constant and not _check_select_shape_same(cond_shape, x_shape, y_shape):
1830
1925
  broadcast_shape = _calc_broadcast_shape(cond_shape, x_shape, y_shape)
1831
- new_cond = F.broadcast_to(cond, broadcast_shape)
1832
- new_x = F.broadcast_to(x, broadcast_shape)
1833
- new_y = F.broadcast_to(y, broadcast_shape)
1926
+ new_cond = ops.broadcast_to(cond, broadcast_shape)
1927
+ new_x = ops.broadcast_to(x, broadcast_shape)
1928
+ new_y = ops.broadcast_to(y, broadcast_shape)
1834
1929
  return tensor_select_(new_cond, new_x, new_y)
1835
1930
 
1836
1931
  return tensor_select_(cond, input_x, input_y)
@@ -1852,7 +1947,7 @@ def strided_slice(input_x,
1852
1947
  Starting from the beginning position, the fragment continues adding strides to the index until
1853
1948
  all dimensions are not less than the ending position.
1854
1949
 
1855
- .. warning::
1950
+ Note:
1856
1951
  - `begin` , `end` and `strides` must have the same shape.
1857
1952
  - `begin` , `end` and `strides` are all 1-D Tensor, and their shape size
1858
1953
  must not greater than the dim of `input_x`.
@@ -1921,17 +2016,15 @@ def strided_slice(input_x,
1921
2016
  Args:
1922
2017
  input_x (Tensor): The input Tensor to be extracted from.
1923
2018
  begin (tuple[int]): A tuple which represents the location where to start.
1924
- Only non-negative int is allowed.
1925
2019
  end (tuple[int]): A tuple or which represents the maximum location where to end.
1926
- Only non-negative int is allowed.
1927
2020
  strides (tuple[int]): A tuple which represents the strides is continuously added
1928
2021
  before reaching the maximum location. Only int is allowed, it can be negative
1929
2022
  which results in reversed slicing.
1930
- begin_mask (int, optional): Starting index of the slice. Default: 0.
1931
- end_mask (int, optional): Ending index of the slice. Default: 0.
1932
- ellipsis_mask (int, optional): An int mask, ignore slicing operation when set to 1. Default: 0.
1933
- new_axis_mask (int, optional): An int mask for adding new dims. Default: 0.
1934
- shrink_axis_mask (int, optional): An int mask for shrinking dims. Default: 0.
2023
+ begin_mask (int, optional): Starting index of the slice. Default: ``0`` .
2024
+ end_mask (int, optional): Ending index of the slice. Default: ``0`` .
2025
+ ellipsis_mask (int, optional): An int mask, ignore slicing operation when set to 1. Default: ``0`` .
2026
+ new_axis_mask (int, optional): An int mask for adding new dims. Default: ``0`` .
2027
+ shrink_axis_mask (int, optional): An int mask for shrinking dims. Default: ``0`` .
1935
2028
 
1936
2029
  Returns:
1937
2030
  Tensor, return the extracts a strided slice of a Tensor based on `begin/end` index and `strides`.
@@ -1948,6 +2041,8 @@ def strided_slice(input_x,
1948
2041
  ``Ascend`` ``GPU`` ``CPU``
1949
2042
 
1950
2043
  Examples:
2044
+ >>> import mindspore
2045
+ >>> from mindspore import Tensor, ops
1951
2046
  >>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
1952
2047
  ... [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
1953
2048
  >>> output = ops.strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1))
@@ -2061,7 +2156,18 @@ def slice(input_x, begin, size):
2061
2156
 
2062
2157
 
2063
2158
  def concat(tensors, axis=0):
2064
- """Alias for :func:`mindspore.ops.cat()`"""
2159
+ """
2160
+ Alias for :func:`mindspore.ops.cat()`.
2161
+
2162
+ Tutorial Examples:
2163
+ - `Tensor - Tensor Operation <https://mindspore.cn/tutorials/en/r2.2/beginner/tensor.html#tensor-operation>`_
2164
+ - `FGSM Network Adversarial Attack - Implementing FGSM
2165
+ <https://mindspore.cn/tutorials/application/en/r2.2/cv/fgsm.html#implementing-fgsm>`_
2166
+ - `Vision Transformer Image Classification - Building ViT as a whole
2167
+ <https://mindspore.cn/tutorials/application/en/r2.2/cv/vit.html#building-vit-as-a-whole>`_
2168
+ - `Sentiment Classification Implemented by RNN - Dense
2169
+ <https://mindspore.cn/tutorials/application/en/r2.2/nlp/sentiment_analysis.html#dense>`_
2170
+ """
2065
2171
  return cat(tensors, axis)
2066
2172
 
2067
2173
 
@@ -2077,15 +2183,14 @@ def stack(tensors, axis=0):
2077
2183
 
2078
2184
  Args:
2079
2185
  tensors (Union[tuple, list]): A Tuple or list of Tensor objects with the same shape and type.
2080
- axis (int): Dimension to stack. Default: 0.
2081
- Negative values wrap around. The range is [-(R+1), R+1).
2186
+ axis (int): Dimension to stack. The range is [-(R+1), R+1). Default: ``0`` .
2082
2187
 
2083
2188
  Returns:
2084
2189
  Tensor. A stacked Tensor with the same type as `tensors`.
2085
2190
 
2086
2191
  Raises:
2087
2192
  TypeError: If the data types of elements in `tensors` are not the same.
2088
- ValueError: If the length of `tensors` is not greater than 0;
2193
+ ValueError: If the length of `tensors` is not greater than zero;
2089
2194
  or if axis is out of the range [-(R+1), R+1);
2090
2195
  or if the shapes of elements in tensors are not the same.
2091
2196
 
@@ -2093,6 +2198,8 @@ def stack(tensors, axis=0):
2093
2198
  ``Ascend`` ``GPU`` ``CPU``
2094
2199
 
2095
2200
  Examples:
2201
+ >>> import numpy as np
2202
+ >>> from mindspore import Tensor, ops
2096
2203
  >>> input_x1 = Tensor(np.array([0, 1]).astype(np.float32))
2097
2204
  >>> input_x2 = Tensor(np.array([2, 3]).astype(np.float32))
2098
2205
  >>> output = ops.stack((input_x1, input_x2), 0)
@@ -2106,23 +2213,19 @@ def stack(tensors, axis=0):
2106
2213
 
2107
2214
  def unstack(input_x, axis=0):
2108
2215
  r"""
2109
- Unstacks tensor in specified axis.
2110
-
2111
- Unstacks a tensor of rank `R` along axis dimension, output tensors will have rank `(R-1)`.
2112
-
2113
- Given a tensor of shape :math:`(x_1, x_2, ..., x_R)`. If :math:`0 \le axis`,
2114
- the shape of tensor in output is :math:`(x_1, x_2, ..., x_{axis}, x_{axis+2}, ..., x_R)`.
2115
-
2116
- This is the opposite of pack.
2216
+ Unstacks tensor in specified axis, this is the opposite of :func:`mindspore.ops.stack`.
2217
+ Assuming input is a tensor of rank `R`, output tensors will have rank `(R-1)`.
2117
2218
 
2118
2219
  Args:
2119
2220
  input_x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`.
2120
2221
  A tensor to be unstacked and the rank of the tensor must be greater than 0.
2121
- axis (int): Dimension along which to unpack. Default: 0.
2222
+ axis (int): Dimension along which to unpack. Default: ``0`` .
2122
2223
  Negative values wrap around. The range is [-R, R).
2123
2224
 
2124
2225
  Returns:
2125
2226
  A tuple of tensors, the shape of each objects is the same.
2227
+ Given a tensor of shape :math:`(x_1, x_2, ..., x_R)`. If :math:`0 \le axis`,
2228
+ the shape of tensor in output is :math:`(x_1, x_2, ..., x_{axis}, x_{axis+2}, ..., x_R)`.
2126
2229
 
2127
2230
  Raises:
2128
2231
  ValueError: If axis is out of the range [-len(input_x.shape), len(input_x.shape)).
@@ -2131,6 +2234,8 @@ def unstack(input_x, axis=0):
2131
2234
  ``Ascend`` ``GPU`` ``CPU``
2132
2235
 
2133
2236
  Examples:
2237
+ >>> import numpy as np
2238
+ >>> from mindspore import Tensor, ops
2134
2239
  >>> input_x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]))
2135
2240
  >>> output = ops.unstack(input_x, 0)
2136
2241
  >>> print(output)
@@ -2152,7 +2257,7 @@ def unbind(input, dim=0):
2152
2257
  Args:
2153
2258
  input (Tensor): The shape is :math:`(n_1, n_2, ..., n_R)`.
2154
2259
  A tensor to be unstacked and the rank of the tensor must be greater than 0.
2155
- dim (int): Dimension along which to unpack. Negative values wrap around. The range is [-R, R). Default: 0.
2260
+ dim (int): Dimension along which to unpack. Negative values wrap around. The range is [-R, R). Default: ``0`` .
2156
2261
 
2157
2262
  Returns:
2158
2263
  A tuple of tensors, the shape of each objects is the same.
@@ -2164,6 +2269,8 @@ def unbind(input, dim=0):
2164
2269
  ``Ascend`` ``GPU`` ``CPU``
2165
2270
 
2166
2271
  Examples:
2272
+ >>> import numpy as np
2273
+ >>> from mindspore import Tensor, ops
2167
2274
  >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
2168
2275
  >>> output = ops.unbind(x, dim=0)
2169
2276
  >>> print(output)
@@ -2176,7 +2283,8 @@ def unbind(input, dim=0):
2176
2283
 
2177
2284
  def expand_dims(input_x, axis):
2178
2285
  """
2179
- Adds an additional dimension to `input_x` at the given axis.
2286
+ Adds an additional dimension to `input_x` at the given axis, the dimension
2287
+ of `input_x` should be greater than or equal to 1.
2180
2288
 
2181
2289
  Note:
2182
2290
  If the specified axis is a negative number, the index is counted
@@ -2200,6 +2308,9 @@ def expand_dims(input_x, axis):
2200
2308
  ``Ascend`` ``GPU`` ``CPU``
2201
2309
 
2202
2310
  Examples:
2311
+ >>> import mindspore
2312
+ >>> import numpy as np
2313
+ >>> from mindspore import Tensor, ops
2203
2314
  >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
2204
2315
  >>> output = ops.expand_dims(input_tensor, 0)
2205
2316
  >>> print(output)
@@ -2231,6 +2342,9 @@ def unsqueeze(input, dim):
2231
2342
  ``Ascend`` ``GPU`` ``CPU``
2232
2343
 
2233
2344
  Examples:
2345
+ >>> import mindspore
2346
+ >>> import numpy as np
2347
+ >>> from mindspore import Tensor, ops
2234
2348
  >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
2235
2349
  >>> output = ops.unsqueeze(input_tensor, dim=0)
2236
2350
  >>> print(output)
@@ -2248,33 +2362,37 @@ def squeeze(input, axis=None):
2248
2362
  If `axis` is specified, it will remove the dimensions of size 1 in the given `axis`.
2249
2363
  For example, if the dimension is not specified :math:`axis=None`, input shape is (A, 1, B, C, 1, D),
2250
2364
  then the shape of the output Tensor is (A, B, C, D). If the dimension is specified, the squeeze operation
2251
- is only performed in the specified dimension. If input shape is (A, 1, B), input Tensor will not be
2252
- changed when :math:`axis=0` , but when :math:`axis=1` , the shape of the input Tensor will be changed to (A, B).
2365
+ is only performed in the specified dimension. If input shape is (A, 1, B), input Tensor will be changed
2366
+ to (A, B) when :math:`axis=1`, but when :math:`axis=0` or :math:`axis=2`, an error will occur.
2253
2367
 
2254
2368
  Note:
2369
+ - Squeezing a dimension that is not 1 will raise an error.
2255
2370
  - Please note that in dynamic graph mode, the output Tensor will share data with the input Tensor,
2256
2371
  and there is no Tensor data copy process.
2257
2372
  - The dimension index starts at 0 and must be in the range `[-input.ndim, input.ndim]`.
2258
2373
 
2259
2374
  Args:
2260
2375
  input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
2261
- axis (Union[int, tuple(int)]): Specifies the dimension indexes of shape to be removed, which will remove
2262
- all the dimensions of size 1 in the given axis parameter. If specified, it must be int32 or int64.
2263
- Default: None, an empty tuple will be used.
2376
+ axis (Union[int, tuple(int), list(int)]): Specifies the dimension indexes of shape to be removed, which will
2377
+ remove all the dimensions of size 1 in the given axis parameter. If specified, it must be int32 or int64.
2378
+ Default: ``None`` , an empty tuple will be used.
2264
2379
 
2265
2380
  Returns:
2266
2381
  Tensor, the shape of tensor is :math:`(x_1, x_2, ..., x_S)`.
2267
2382
 
2268
2383
  Raises:
2269
2384
  TypeError: If `input` is not a tensor.
2270
- TypeError: If `axis` is neither an int nor tuple.
2271
- TypeError: If `axis` is a tuple whose elements are not all int.
2385
+ TypeError: If `axis` is not an int, tuple or list.
2386
+ TypeError: If `axis` is a tuple or list whose elements are not all int.
2272
2387
  ValueError: If the corresponding dimension of the specified axis isn't equal to 1.
2273
2388
 
2274
2389
  Supported Platforms:
2275
2390
  ``Ascend`` ``GPU`` ``CPU``
2276
2391
 
2277
2392
  Examples:
2393
+ >>> import mindspore
2394
+ >>> import numpy as np
2395
+ >>> from mindspore import Tensor, ops
2278
2396
  >>> input = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
2279
2397
  >>> output = ops.squeeze(input)
2280
2398
  >>> print(output)
@@ -2284,6 +2402,8 @@ def squeeze(input, axis=None):
2284
2402
  """
2285
2403
  if axis is None:
2286
2404
  axis = ()
2405
+ if isinstance(axis, list):
2406
+ axis = tuple(axis)
2287
2407
  squeeze_ = _get_cache_prim(P.Squeeze)(axis)
2288
2408
  return squeeze_(input)
2289
2409
 
@@ -2322,6 +2442,9 @@ def transpose(input, input_perm):
2322
2442
  ``Ascend`` ``GPU`` ``CPU``
2323
2443
 
2324
2444
  Examples:
2445
+ >>> import mindspore
2446
+ >>> import numpy as np
2447
+ >>> from mindspore import Tensor, ops
2325
2448
  >>> input = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32)
2326
2449
  >>> input_perm = (0, 2, 1)
2327
2450
  >>> output = ops.transpose(input, input_perm)
@@ -2354,7 +2477,7 @@ def scatter_mul(input_x, indices, updates):
2354
2477
 
2355
2478
  Args:
2356
2479
  input_x (Parameter): The target tensor to be updated, with data type of Parameter.
2357
- The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
2480
+ The shape is :math:`(N,*)` where :math:`*` means any number of additional dimensions.
2358
2481
  indices (Tensor): The index to do mul operation whose data type must be int32 or int64.
2359
2482
  updates (Tensor): The tensor doing the mul operation with `input_x`,
2360
2483
  the data type is same as `input_x`, the shape is `indices.shape + input_x.shape[1:]`.
@@ -2363,7 +2486,6 @@ def scatter_mul(input_x, indices, updates):
2363
2486
  Tensor, the updated `input_x`, has the same shape and type as `input_x`.
2364
2487
 
2365
2488
  Raises:
2366
- TypeError: If `use_locking` is not a bool.
2367
2489
  TypeError: If `indices` is not an int32 or int64.
2368
2490
  ValueError: If the shape of `updates` is not equal to `indices.shape + input_x.shape[1:]`.
2369
2491
  RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
@@ -2373,6 +2495,9 @@ def scatter_mul(input_x, indices, updates):
2373
2495
  ``Ascend`` ``GPU`` ``CPU``
2374
2496
 
2375
2497
  Examples:
2498
+ >>> import mindspore
2499
+ >>> import numpy as np
2500
+ >>> from mindspore import Tensor, ops, Parameter
2376
2501
  >>> input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mindspore.float32), name="x")
2377
2502
  >>> indices = Tensor(np.array([0, 1]), mindspore.int32)
2378
2503
  >>> updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mindspore.float32)
@@ -2442,7 +2567,7 @@ def scatter_max(input_x, indices, updates):
2442
2567
  .. math::
2443
2568
 
2444
2569
  \text{input_x}[\text{indices}[i, ..., j], :]
2445
- = max(\text{input_x}[\text{indices}[i, ..., j], :], \text{updates}[i, ..., j, :])
2570
+ = \max(\text{input_x}[\text{indices}[i, ..., j], :], \text{updates}[i, ..., j, :])
2446
2571
 
2447
2572
  Inputs of `input_x` and `updates` follow the implicit type conversion rules to keep the data types consistent.
2448
2573
  If they have different data types, the lower priority data type will be converted to the relatively highest
@@ -2451,6 +2576,7 @@ def scatter_max(input_x, indices, updates):
2451
2576
 
2452
2577
  Args:
2453
2578
  input_x (Parameter): The target tensor, with data type of Parameter.
2579
+ The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
2454
2580
  indices (Tensor): The index to do max operation whose data type must be mindspore.int32.
2455
2581
  updates (Tensor): The tensor doing the max operation with `input_x`,
2456
2582
  the data type is same as `input_x`, the shape is `indices.shape + x.shape[1:]`.
@@ -2470,6 +2596,9 @@ def scatter_max(input_x, indices, updates):
2470
2596
  ``Ascend`` ``GPU`` ``CPU``
2471
2597
 
2472
2598
  Examples:
2599
+ >>> import mindspore
2600
+ >>> import numpy as np
2601
+ >>> from mindspore import Tensor, ops, Parameter
2473
2602
  >>> input_x = Parameter(Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32), name="input_x")
2474
2603
  >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
2475
2604
  >>> updates = Tensor(np.ones([2, 2, 3]) * 88, mindspore.float32)
@@ -2488,7 +2617,6 @@ def scatter_add(input_x, indices, updates):
2488
2617
 
2489
2618
  Args:
2490
2619
  input_x (Parameter): The target tensor, with data type of Parameter.
2491
- The shape is :math:`(N, *)` where :math:`*` means,any number of additional dimensions.
2492
2620
  indices (Tensor): The index to do add operation whose data type must be int32 or int64.
2493
2621
  updates (Tensor): The tensor doing the add operation with `input_x`,
2494
2622
  the data type is same as `input_x`, the shape is `indices.shape + x.shape[1:]`.
@@ -2532,7 +2660,7 @@ def scatter_min(input_x, indices, updates):
2532
2660
  .. math::
2533
2661
 
2534
2662
  \text{input_x}[\text{indices}[i, ..., j], :]
2535
- = min(\text{input_x}[\text{indices}[i, ..., j], :], \text{updates}[i, ..., j, :])
2663
+ = \min(\text{input_x}[\text{indices}[i, ..., j], :], \text{updates}[i, ..., j, :])
2536
2664
 
2537
2665
  Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
2538
2666
  If they have different data types, the lower priority data type will be converted to
@@ -2599,7 +2727,7 @@ def scatter_div(input_x, indices, updates):
2599
2727
  the shape is `indices.shape + input_x.shape[1:]`.
2600
2728
 
2601
2729
  Returns:
2602
- Tensor, the updated `input_x`, has the same type and shape as `input_x`.
2730
+ Tensor, the updated `input_x`, has the same shape and type as `input_x`.
2603
2731
 
2604
2732
  Raises:
2605
2733
  TypeError: If the type of `indices` is not one of the following dtype: int32, int64.
@@ -2613,6 +2741,9 @@ def scatter_div(input_x, indices, updates):
2613
2741
  ``Ascend`` ``GPU`` ``CPU``
2614
2742
 
2615
2743
  Examples:
2744
+ >>> import mindspore
2745
+ >>> import numpy as np
2746
+ >>> from mindspore import Tensor, ops, Parameter
2616
2747
  >>> input_x = Parameter(Tensor(np.array([[6.0, 6.0, 6.0], [2.0, 2.0, 2.0]]), mindspore.float32), name="x")
2617
2748
  >>> indices = Tensor(np.array([0, 1]), mindspore.int32)
2618
2749
  >>> updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mindspore.float32)
@@ -2701,6 +2832,9 @@ def scatter_nd(indices, updates, shape):
2701
2832
  ``Ascend`` ``GPU`` ``CPU``
2702
2833
 
2703
2834
  Examples:
2835
+ >>> import mindspore
2836
+ >>> import numpy as np
2837
+ >>> from mindspore import Tensor, ops
2704
2838
  >>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
2705
2839
  >>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2],
2706
2840
  ... [3, 3, 3, 3], [4, 4, 4, 4]],
@@ -2796,6 +2930,9 @@ def scatter_update(input_x, indices, updates):
2796
2930
  ``Ascend`` ``GPU`` ``CPU``
2797
2931
 
2798
2932
  Examples:
2933
+ >>> import mindspore
2934
+ >>> import numpy as np
2935
+ >>> from mindspore import Tensor, ops
2799
2936
  >>> np_x = np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]])
2800
2937
  >>> input_x = mindspore.Parameter(Tensor(np_x, mindspore.float32), name="x")
2801
2938
  >>> indices = Tensor(np.array([0, 1]), mindspore.int32)
@@ -2832,7 +2969,7 @@ def scatter_nd_add(input_x, indices, updates, use_locking=False):
2832
2969
  The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
2833
2970
  updates (Tensor): The tensor doing the addition operation with `input_x`,
2834
2971
  the data type is same as `input_x`, the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
2835
- use_locking (bool): Whether to protect the assignment by a lock. Default: False.
2972
+ use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
2836
2973
 
2837
2974
  Returns:
2838
2975
  Tensor, the updated `input_x`, has the same shape and type as `input_x`.
@@ -2849,6 +2986,9 @@ def scatter_nd_add(input_x, indices, updates, use_locking=False):
2849
2986
  ``Ascend`` ``GPU`` ``CPU``
2850
2987
 
2851
2988
  Examples:
2989
+ >>> import mindspore
2990
+ >>> import numpy as np
2991
+ >>> from mindspore import Tensor, ops, Parameter
2852
2992
  >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
2853
2993
  >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
2854
2994
  >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
@@ -2904,7 +3044,7 @@ def scatter_nd_sub(input_x, indices, updates, use_locking=False):
2904
3044
  The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
2905
3045
  updates (Tensor): The tensor doing the subtraction operation with `input_x`, has the same type as input.
2906
3046
  The shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
2907
- use_locking (bool): Whether to protect the assignment by a lock. Default: False.
3047
+ use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
2908
3048
 
2909
3049
  Returns:
2910
3050
  Tensor, has the same shape and type as `input_x`.
@@ -2921,6 +3061,9 @@ def scatter_nd_sub(input_x, indices, updates, use_locking=False):
2921
3061
  ``Ascend`` ``GPU`` ``CPU``
2922
3062
 
2923
3063
  Examples:
3064
+ >>> import mindspore
3065
+ >>> import numpy as np
3066
+ >>> from mindspore import Tensor, ops, Parameter
2924
3067
  >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
2925
3068
  >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
2926
3069
  >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
@@ -2976,7 +3119,7 @@ def scatter_nd_mul(input_x, indices, updates, use_locking=False):
2976
3119
  mindspore.int64. The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
2977
3120
  updates (Tensor): The tensor to do the multiplication operation with `input_x`.
2978
3121
  The data type is same as `input_x`, and the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
2979
- use_locking (bool): Whether to protect the assignment by a lock. Default: False.
3122
+ use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
2980
3123
 
2981
3124
  Returns:
2982
3125
  Tensor, the updated `input_x`, has the same shape and type as `input_x`.
@@ -2993,6 +3136,9 @@ def scatter_nd_mul(input_x, indices, updates, use_locking=False):
2993
3136
  ``GPU`` ``CPU``
2994
3137
 
2995
3138
  Examples:
3139
+ >>> import mindspore
3140
+ >>> import numpy as np
3141
+ >>> from mindspore import Tensor, ops, Parameter
2996
3142
  >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
2997
3143
  >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
2998
3144
  >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
@@ -3048,7 +3194,7 @@ def scatter_nd_div(input_x, indices, updates, use_locking=False):
3048
3194
  The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
3049
3195
  updates (Tensor): The tensor to do the div operation with `input_x`.
3050
3196
  The data type is same as `input_x`, and the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
3051
- use_locking (bool): Whether to protect the assignment by a lock. Default: False.
3197
+ use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
3052
3198
 
3053
3199
  Returns:
3054
3200
  Tensor, the updated `input_x`, has the same shape and type as `input_x`.
@@ -3065,6 +3211,9 @@ def scatter_nd_div(input_x, indices, updates, use_locking=False):
3065
3211
  ``GPU`` ``CPU``
3066
3212
 
3067
3213
  Examples:
3214
+ >>> import mindspore
3215
+ >>> import numpy as np
3216
+ >>> from mindspore import Tensor, ops, Parameter
3068
3217
  >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
3069
3218
  >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
3070
3219
  >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
@@ -3121,7 +3270,7 @@ def scatter_nd_max(input_x, indices, updates, use_locking=False):
3121
3270
  The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
3122
3271
  updates (Tensor): The tensor to do the max operation with `input_x`.
3123
3272
  The data type is same as `input_x`, and the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
3124
- use_locking (bool): Whether to protect the assignment by a lock. Default: False.
3273
+ use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
3125
3274
 
3126
3275
  Returns:
3127
3276
  Tensor, the updated `input_x`, has the same shape and type as `input_x`.
@@ -3138,6 +3287,9 @@ def scatter_nd_max(input_x, indices, updates, use_locking=False):
3138
3287
  ``Ascend`` ``GPU`` ``CPU``
3139
3288
 
3140
3289
  Examples:
3290
+ >>> import mindspore
3291
+ >>> import numpy as np
3292
+ >>> from mindspore import Tensor, ops, Parameter
3141
3293
  >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
3142
3294
  >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
3143
3295
  >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
@@ -3189,12 +3341,11 @@ def scatter_nd_min(input_x, indices, updates, use_locking=False):
3189
3341
 
3190
3342
  Args:
3191
3343
  input_x (Parameter): The target tensor, with data type of Parameter.
3192
- The shape is :math:`(N,*)`, where :math:`*` means any number of additional dimensions.
3193
3344
  indices (Tensor): The index to do min operation whose data type must be mindspore.int32 or mindspore.int64.
3194
3345
  The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
3195
3346
  updates (Tensor): The tensor to do the min operation with `input_x`.
3196
3347
  The data type is same as `input_x`, and the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
3197
- use_locking (bool): Whether to protect the assignment by a lock. Default: False.
3348
+ use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
3198
3349
 
3199
3350
  Returns:
3200
3351
  Tensor, the updated `input_x`, has the same shape and type as `input_x`.
@@ -3211,6 +3362,9 @@ def scatter_nd_min(input_x, indices, updates, use_locking=False):
3211
3362
  ``Ascend`` ``GPU`` ``CPU``
3212
3363
 
3213
3364
  Examples:
3365
+ >>> import mindspore
3366
+ >>> import numpy as np
3367
+ >>> from mindspore import Tensor, ops, Parameter
3214
3368
  >>> input_x = Parameter(Tensor(np.ones(8) * 10, mindspore.float32), name="x")
3215
3369
  >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
3216
3370
  >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
@@ -3251,9 +3405,10 @@ def sort(input_x, axis=-1, descending=False):
3251
3405
  Args:
3252
3406
  input_x(Tensor): The input tensor to sort.
3253
3407
  The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
3254
- axis (int, optional): The dimension to sort along. Default: -1.
3408
+ axis (int, optional): The dimension to sort along. Default: ``-1``, means the last dimension.
3409
+ The Ascend backend only supports sorting the last dimension.
3255
3410
  descending (bool, optional): Controls the sort order. If `descending` is True, the elements
3256
- are sorted in descending order, or else sorted in ascending order. Default: False.
3411
+ are sorted in descending order, or else sorted in ascending order. Default: ``False`` .
3257
3412
 
3258
3413
  .. warning::
3259
3414
  Currently, the data types of Float16, UInt8, Int8, Int16, Int32, Int64 are well supported.
@@ -3275,6 +3430,9 @@ def sort(input_x, axis=-1, descending=False):
3275
3430
  ``Ascend`` ``GPU`` ``CPU``
3276
3431
 
3277
3432
  Examples:
3433
+ >>> import mindspore
3434
+ >>> import numpy as np
3435
+ >>> from mindspore import Tensor, ops
3278
3436
  >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
3279
3437
  >>> output = ops.sort(x)
3280
3438
  >>> # The output below is based on the Ascend platform.
@@ -3297,9 +3455,10 @@ def argsort(input, axis=-1, descending=False):
3297
3455
 
3298
3456
  Args:
3299
3457
  input(Tensor): The input tensor to sort.
3300
- axis (int): The axis to sort along. Default: -1, means the last axis
3458
+ axis (int): The axis to sort along. Default: ``-1`` , means the last dimension.
3459
+ The Ascend backend only supports sorting the last dimension.
3301
3460
  descending (bool): The sort order. If `descending` is True then the elements
3302
- are sorted in descending order by value. Otherwise sort in ascending order. Default: False.
3461
+ are sorted in descending order by value. Otherwise sort in ascending order. Default: ``False`` .
3303
3462
 
3304
3463
  Returns:
3305
3464
  Tensor, the indices of sorted input tensor. Data type is int32.
@@ -3308,6 +3467,9 @@ def argsort(input, axis=-1, descending=False):
3308
3467
  ``Ascend`` ``GPU`` ``CPU``
3309
3468
 
3310
3469
  Examples:
3470
+ >>> import mindspore
3471
+ >>> import numpy as np
3472
+ >>> from mindspore import Tensor, ops
3311
3473
  >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
3312
3474
  >>> sort = ops.argsort(x)
3313
3475
  >>> print(sort)
@@ -3331,34 +3493,42 @@ def gather(input_params, input_indices, axis, batch_dims=0):
3331
3493
  where params represents the input `input_params`, and indices represents the index to be sliced `input_indices`.
3332
3494
 
3333
3495
  .. note::
3334
- 1. The value of input_indices must be in the range of `[0, input_param.shape[axis])`, the result is undefined
3335
- out of range.
3496
+ 1. The value of input_indices must be in the range of `[0, input_param.shape[axis])`.
3497
+ On CPU and GPU, an error is raised if an out of bound indice is found. On Ascend, the results may be
3498
+ undefined.
3336
3499
 
3337
3500
  2. The data type of input_params cannot be
3338
- `bool_ <https://www.mindspore.cn/docs/en/r2.0/api_python/mindspore.html#mindspore.dtype>`_ on Ascend
3501
+ `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ on Ascend
3339
3502
  platform currently.
3340
3503
 
3341
3504
  Args:
3342
3505
  input_params (Tensor): The original Tensor. The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
3343
3506
  input_indices (Tensor): Index tensor to be sliced, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
3344
3507
  Specifies the indices of elements of the original Tensor. The data type can be int32 or int64.
3345
- axis (int): Specifies the dimension index to gather indices. It must be greater than or equal to `batch_dims`.
3508
+ axis (Union(int, Tensor[int])): Specifies the dimension index to gather indices.
3509
+ It must be greater than or equal to `batch_dims`.
3510
+ When `axis` is a Tensor, the size must be 1.
3346
3511
  batch_dims (int): Specifies the number of batch dimensions. It must be less than or euqal to the rank
3347
- of `input_indices`. Default: 0.
3512
+ of `input_indices`. Default: ``0`` .
3348
3513
 
3349
3514
  Returns:
3350
3515
  Tensor, the shape of tensor is
3351
3516
  :math:`input\_params.shape[:axis] + input\_indices.shape[batch\_dims:] + input\_params.shape[axis + 1:]`.
3352
3517
 
3353
3518
  Raises:
3354
- TypeError: If `axis` is not an int.
3355
- TypeError: If `input_params` is not a tensor.
3356
- TypeError: If `input_indices` is not a tensor of type int.
3519
+ TypeError: If `axis` is not an int or Tensor.
3520
+ ValueError: If `axis` is a Tensor and its size is not 1.
3521
+ TypeError: If `input_params` is not a tensor.
3522
+ TypeError: If `input_indices` is not a tensor of type int.
3523
+ RuntimeError: If `input_indices` is out of range `[0, input_param.shape[axis])` on CPU or GPU.
3357
3524
 
3358
3525
  Supported Platforms:
3359
3526
  ``Ascend`` ``GPU`` ``CPU``
3360
3527
 
3361
3528
  Examples:
3529
+ >>> import mindspore
3530
+ >>> import numpy as np
3531
+ >>> from mindspore import Tensor, ops
3362
3532
  >>> # case1: input_indices is a Tensor with shape (5, ).
3363
3533
  >>> input_params = Tensor(np.array([1, 2, 3, 4, 5, 6, 7]), mindspore.float32)
3364
3534
  >>> input_indices = Tensor(np.array([0, 2, 4, 2, 6]), mindspore.int32)
@@ -3407,6 +3577,9 @@ def gather_d(x, dim, index):
3407
3577
  ``Ascend`` ``GPU`` ``CPU``
3408
3578
 
3409
3579
  Examples:
3580
+ >>> import mindspore
3581
+ >>> import numpy as np
3582
+ >>> from mindspore import Tensor, ops
3410
3583
  >>> x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.int32)
3411
3584
  >>> index = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32)
3412
3585
  >>> dim = 1
@@ -3502,6 +3675,9 @@ def gather_nd(input_x, indices):
3502
3675
  ``Ascend`` ``GPU`` ``CPU``
3503
3676
 
3504
3677
  Examples:
3678
+ >>> import mindspore
3679
+ >>> import numpy as np
3680
+ >>> from mindspore import Tensor, ops
3505
3681
  >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
3506
3682
  >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
3507
3683
  >>> output = ops.gather_nd(input_x, indices)
@@ -3512,7 +3688,7 @@ def gather_nd(input_x, indices):
3512
3688
 
3513
3689
 
3514
3690
  def tensor_scatter_add(input_x, indices, updates):
3515
- """
3691
+ r"""
3516
3692
  Creates a new tensor by adding the values from the positions in `input_x` indicated by
3517
3693
  `indices`, with values from `updates`. When multiple values are given for the same
3518
3694
  index, the updated result will be the sum of all values. This operation is almost
@@ -3523,18 +3699,23 @@ def tensor_scatter_add(input_x, indices, updates):
3523
3699
  there must be a corresponding value in `updates`. The shape of `updates` should be
3524
3700
  equal to the shape of `input_x[indices]`. For more details, see use cases.
3525
3701
 
3702
+ .. math::
3703
+ output\left [indices \right ] = input\_x + update
3704
+
3526
3705
  Note:
3527
- On GPU, if some values of the `indices` are out of bound, instead of raising an index error,
3528
- the corresponding `updates` will not be updated to self tensor. On CPU, if some values of
3529
- the `indices` are out of bound, raising an index error. On Ascend, out of bound checking is
3530
- not supported, if some values of the `indices` are out of bound, unknown errors may be caused.
3706
+ - On GPU, if some values of the `indices` are out of bound, instead of raising an index error,
3707
+ the corresponding `updates` will not be updated to self tensor.
3708
+ - On CPU, if some values of the `indices` are out of bound, raising an index error.
3709
+ - On Ascend, out of bound checking is not supported, if some values of the `indices` are out of bound,
3710
+ unknown errors may be caused.
3531
3711
 
3532
3712
  Args:
3533
- input_x (Tensor): The target tensor. The dimension of input_x must be no less than indices.shape[-1].
3713
+ input_x (Tensor): The input tensor. The dimension of input_x must be no less than indices.shape[-1].
3534
3714
  indices (Tensor): The index of input tensor whose data type is int32 or int64.
3535
3715
  The rank must be at least 2.
3536
3716
  updates (Tensor): The tensor to update the input tensor, has the same type as input,
3537
- and updates. Shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].
3717
+ and updates. And the shape should be
3718
+ equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
3538
3719
 
3539
3720
  Returns:
3540
3721
  Tensor, has the same shape and type as `input_x`.
@@ -3542,7 +3723,7 @@ def tensor_scatter_add(input_x, indices, updates):
3542
3723
  Raises:
3543
3724
  TypeError: If dtype of `indices` is neither int32 nor int64.
3544
3725
  ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.
3545
- RuntimeError: If a value of `indices` is not in `input_x`.
3726
+ RuntimeError: If a value of `indices` is not in `input_x` on CPU backend.
3546
3727
 
3547
3728
  Supported Platforms:
3548
3729
  ``Ascend`` ``GPU`` ``CPU``
@@ -3565,7 +3746,7 @@ def tensor_scatter_add(input_x, indices, updates):
3565
3746
 
3566
3747
 
3567
3748
  def tensor_scatter_sub(input_x, indices, updates):
3568
- """
3749
+ r"""
3569
3750
  Creates a new tensor by subtracting the values from the positions in `input_x` indicated by
3570
3751
  `indices`, with values from `updates`. When multiple values are provided for the same
3571
3752
  index, the result of the update will be to subtract these values respectively. This operation is almost
@@ -3576,6 +3757,9 @@ def tensor_scatter_sub(input_x, indices, updates):
3576
3757
  there must be a corresponding value in `updates`. The shape of `updates` should be
3577
3758
  equal to the shape of `input_x[indices]`. For more details, see use cases.
3578
3759
 
3760
+ .. math::
3761
+ output[indices] = input\_x - update
3762
+
3579
3763
  Note:
3580
3764
  On GPU, if some values of the `indices` are out of bound, instead of raising an index error,
3581
3765
  the corresponding `updates` will not be updated to self tensor. On CPU, if some values of
@@ -3583,11 +3767,11 @@ def tensor_scatter_sub(input_x, indices, updates):
3583
3767
  not supported, if some values of the `indices` are out of bound, unknown errors may be caused.
3584
3768
 
3585
3769
  Args:
3586
- input_x (Tensor): The target tensor. The dimension of input_x must be no less than indices.shape[-1].
3770
+ input_x (Tensor): The input tensor. The dimension of input_x must be no less than indices.shape[-1].
3587
3771
  indices (Tensor): The index of input tensor whose data type is int32 or int64.
3588
3772
  The rank must be at least 2.
3589
- updates (Tensor): The tensor to update the input tensor, has the same type as input,
3590
- and updates.shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].
3773
+ updates (Tensor): The tensor to update the input tensor, has the same type as `input_x`,
3774
+ and the shape of `updates` should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].
3591
3775
 
3592
3776
  Returns:
3593
3777
  Tensor, has the same shape and type as `input_x`.
@@ -3618,7 +3802,7 @@ def tensor_scatter_sub(input_x, indices, updates):
3618
3802
 
3619
3803
 
3620
3804
  def tensor_scatter_max(input_x, indices, updates):
3621
- """
3805
+ r"""
3622
3806
  By comparing the value at the position indicated by `indices` in `input_x` with the value in the `updates`,
3623
3807
  the value at the index will eventually be equal to the largest one to create a new tensor.
3624
3808
 
@@ -3626,16 +3810,22 @@ def tensor_scatter_max(input_x, indices, updates):
3626
3810
  there must be a corresponding value in `updates`. The shape of `updates` should be
3627
3811
  equal to the shape of input_x[indices].
3628
3812
 
3813
+ .. math::
3814
+ output\left [indices \right ] = \max(input\_x, update)
3815
+
3629
3816
  Note:
3630
- If some values of the `indices` are out of bound, instead of raising an index error,
3631
- the corresponding `updates` will not be updated to `input_x`.
3817
+ - On GPU, if some values of the `indices` are out of bound, instead of raising an index error,
3818
+ the corresponding `updates` will not be updated to self tensor.
3819
+ - On CPU, if some values of the `indices` are out of bound, raising an index error.
3820
+ - On Ascend, out of bound checking is not supported, if some values of the `indices` are out of bound,
3821
+ unknown errors may be caused.
3632
3822
 
3633
3823
  Args:
3634
- input_x (Tensor): The target tensor. The dimension of input_x must be no less than indices.shape[-1].
3635
- indices (Tensor): The index of input tensor whose data type is int32 or int64.
3824
+ input_x (Tensor): The input tensor. The dimension of `input_x` must be no less than indices.shape[-1].
3825
+ indices (Tensor): The index of input tensor whose data type must be int32 or int64.
3636
3826
  The rank must be at least 2.
3637
- updates (Tensor): The tensor to update the input tensor, has the same type as input,
3638
- and updates.shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].
3827
+ updates (Tensor): The tensor to update the `input_x` tensor, has the same type as input,
3828
+ and updates.shape should be equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
3639
3829
 
3640
3830
  Returns:
3641
3831
  Tensor, has the same shape and type as `input_x`.
@@ -3643,12 +3833,15 @@ def tensor_scatter_max(input_x, indices, updates):
3643
3833
  Raises:
3644
3834
  TypeError: If dtype of `indices` is neither int32 nor int64.
3645
3835
  ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.
3646
- RuntimeError: If a value of `indices` is not in `input_x`.
3836
+ RuntimeError: If a value of `indices` is not in `input_x` on CPU backend.
3647
3837
 
3648
3838
  Supported Platforms:
3649
3839
  ``GPU`` ``CPU``
3650
3840
 
3651
3841
  Examples:
3842
+ >>> import mindspore
3843
+ >>> import numpy as np
3844
+ >>> from mindspore import Tensor, ops
3652
3845
  >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
3653
3846
  >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
3654
3847
  >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
@@ -3670,7 +3863,7 @@ def tensor_scatter_max(input_x, indices, updates):
3670
3863
 
3671
3864
 
3672
3865
  def tensor_scatter_min(input_x, indices, updates):
3673
- """
3866
+ r"""
3674
3867
  By comparing the value at the position indicated by `indices` in `input_x` with the value in the `updates`,
3675
3868
  the value at the index will eventually be equal to the smallest one to create a new tensor.
3676
3869
 
@@ -3678,16 +3871,23 @@ def tensor_scatter_min(input_x, indices, updates):
3678
3871
  there must be a corresponding value in `updates`. The shape of `updates` should be
3679
3872
  equal to the shape of `input_x[indices]`. For more details, see case below.
3680
3873
 
3874
+ .. math::
3875
+ output\left [indices \right ] = \min(input\_x, update)
3876
+
3681
3877
  Note:
3682
- If some values of the `indices` are out of range, instead of raising an index error,
3683
- the corresponding `updates` will not be hw to `input_x`.
3878
+ - On GPU, if some values of the `indices` are out of bound, instead of raising an index error,
3879
+ the corresponding `updates` will not be updated to self tensor.
3880
+ - On CPU, if some values of the `indices` are out of bound, raising an index error.
3881
+ - On Ascend, out of bound checking is not supported, if some values of the `indices` are out of bound,
3882
+ unknown errors may be caused.
3684
3883
 
3685
3884
  Args:
3686
- input_x (Tensor): The input tensor. The dimension of input_x must be no less than indices.shape[-1].
3885
+ input_x (Tensor): The input tensor. The dimension of `input_x` must be no less than indices.shape[-1].
3687
3886
  indices (Tensor): The index of input tensor whose data type is int32 or int64.
3688
3887
  The rank must be at least 2.
3689
- updates (Tensor): The tensor to update the input tensor, has the same type as input,
3690
- and updates.shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].
3888
+ updates (Tensor): The tensor to update the input tensor, has the same type as `input_x`
3889
+ And the shape of `updates` should be
3890
+ equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
3691
3891
 
3692
3892
  Returns:
3693
3893
  Tensor, has the same shape and type as `input_x`.
@@ -3695,7 +3895,7 @@ def tensor_scatter_min(input_x, indices, updates):
3695
3895
  Raises:
3696
3896
  TypeError: If dtype of `indices` is neither int32 nor int64.
3697
3897
  ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.
3698
- RuntimeError: If a value of `indices` is not in `input_x`.
3898
+ RuntimeError: If a value of `indices` is not in `input_x` on CPU backend.
3699
3899
 
3700
3900
  Supported Platforms:
3701
3901
  ``Ascend`` ``GPU`` ``CPU``
@@ -3718,13 +3918,11 @@ def tensor_scatter_min(input_x, indices, updates):
3718
3918
 
3719
3919
  def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none"):
3720
3920
  """
3721
- Updates the value of the input tensor through the reduction operation.
3921
+ Write all elements in `updates` to the index specified by `indices` in `input_x` according to the reduction
3922
+ operation specified by `reduction`.
3923
+ `axis` controls the direction of the scatter operation.
3722
3924
 
3723
- tensor_scatter_elements takes three inputs data, updates, and indices of the same rank r >= 1,
3724
- an optional attribute axis that identifies an axis of data (default is 0), and another optional attribute reduction
3725
- that identifies reduction operation. When reduction is set to "none", the update value will be assigned to the
3726
- output value according to the indices. When reduction is set to "add", the update value will be added to the output
3727
- value according to the indices.
3925
+ `tensor_scatter_elements` takes three inputs `input_x`, `updates` and `indices` of the same rank r >= 1.
3728
3926
 
3729
3927
  For a 3-D tensor, the output is:
3730
3928
 
@@ -3743,18 +3941,23 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
3743
3941
  - On Ascend, the reduction only support set to "none" for now.
3744
3942
  - On Ascend, the data type of `input_x` must be float16 or float32.
3745
3943
 
3746
- .. note::
3747
- If some values of the `indices` are out of bound, instead of raising an index error,
3748
- the corresponding `updates` will not be updated to `input_x`.
3944
+ Note:
3945
+ If some values of the `indices` exceed the upper or lower bounds of the index of `input_x`, instead of raising
3946
+ an index error, the corresponding `updates` will not be updated to `input_x`.
3947
+
3948
+ .. warning::
3949
+ This is an experimental API that is subject to change or deletion.
3749
3950
 
3750
3951
  Args:
3751
- input_x (Tensor): The target tensor. The rank of `input` must be at least 1.
3752
- indices (Tensor): The index to do add operation whose data type must be mindspore.int32 or
3753
- mindspore.int64. Same rank as input_x. And accepted range is [-s, s) where s is the size along axis.
3754
- updates (Tensor): The tensor doing the add operation with `input_x`, has the same type as input_x,
3755
- and update.shape should be equal to indices.shape.
3756
- axis (int): Which axis to scatter, default is 0. Accepted range is [-r, r) where r = rank(input_x).
3757
- reduction (str): Which reduction operation to scatter, default is "none". Other option: "add".
3952
+ input_x (Tensor): The target tensor. The rank must be at least 1.
3953
+ indices (Tensor): The index of `input_x` to do scatter operation whose data type must be mindspore.int32 or
3954
+ mindspore.int64. Same rank as `input_x`. And accepted range is [-s, s) where s is the size along axis.
3955
+ updates (Tensor): The tensor doing the scatter operation with `input_x`, has the same type as `input_x` and
3956
+ the same shape as `indices`.
3957
+ axis (int): Which axis to scatter. Accepted range is [-r, r) where r = rank(input_x). Default: ``0``.
3958
+ reduction (str): Which reduction operation to scatter, supports ``"none"`` , ``"add"`` . Default: ``"none"``.
3959
+ When `reduction` is set to ``"none"``, `updates` will be assigned to `input_x` according to `indices`.
3960
+ When `reduction` is set to ``"add"``, `updates` will be added to `input_x` according to `indices`.
3758
3961
 
3759
3962
  Returns:
3760
3963
  Tensor, has the same shape and type as `input_x`.
@@ -3771,14 +3974,28 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
3771
3974
  ``Ascend`` ``GPU`` ``CPU``
3772
3975
 
3773
3976
  Examples:
3774
- >>> input_x = Parameter(Tensor(np.array([[1, 2, 3, 4, 5]]), mindspore.float32), name="x")
3977
+ >>> import mindspore
3978
+ >>> from mindspore import Tensor, ops
3979
+ >>> from mindspore import Parameter
3980
+ >>> import numpy as np
3981
+ >>> input_x = Parameter(Tensor(np.array([[1, 2, 3, 4, 5]]), mindspore.int32), name="x")
3775
3982
  >>> indices = Tensor(np.array([[2, 4]]), mindspore.int32)
3776
- >>> updates = Tensor(np.array([[8, 8]]), mindspore.float32)
3983
+ >>> updates = Tensor(np.array([[8, 8]]), mindspore.int32)
3777
3984
  >>> axis = 1
3778
3985
  >>> reduction = "none"
3779
3986
  >>> output = ops.tensor_scatter_elements(input_x, indices, updates, axis, reduction)
3780
3987
  >>> print(output)
3781
- [[ 1 2 8 4 8]]
3988
+ [[1 2 8 4 8]]
3989
+ >>> input_x = Parameter(Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.int32), name="x")
3990
+ >>> indices = Tensor(np.array([[1, -1, 2], [0, 2, 1]]), mindspore.int32)
3991
+ >>> updates = Tensor(np.array([[1, 2, 2], [4, 5, 8]]), mindspore.int32)
3992
+ >>> axis = 0
3993
+ >>> reduction = "add"
3994
+ >>> output = ops.tensor_scatter_elements(input_x, indices, updates, axis, reduction)
3995
+ >>> print(output)
3996
+ [[ 5 2 3]
3997
+ [ 5 5 14]
3998
+ [ 7 15 11]]
3782
3999
  """
3783
4000
  _tensor_scatter_elements = _get_cache_prim(TensorScatterElements)(axis, reduction)
3784
4001
  return _tensor_scatter_elements(input_x, indices, updates)
@@ -3812,6 +4029,9 @@ def scatter(input, axis, index, src):
3812
4029
  ``Ascend`` ``GPU`` ``CPU``
3813
4030
 
3814
4031
  Examples:
4032
+ >>> import numpy as np
4033
+ >>> import mindspore as ms
4034
+ >>> from mindspore import Tensor, ops
3815
4035
  >>> input = Tensor(np.array([[1, 2, 3, 4, 5]]), dtype=ms.float32)
3816
4036
  >>> src = Tensor(np.array([[8, 8]]), dtype=ms.float32)
3817
4037
  >>> index = Tensor(np.array([[2, 4]]), dtype=ms.int64)
@@ -3839,7 +4059,123 @@ def scatter(input, axis, index, src):
3839
4059
  [0. 0. 0. 0. 0.]
3840
4060
  [0. 0. 0. 0. 0.]]
3841
4061
  """
3842
- return F.tensor_scatter_elements(input_x=input, indices=index, updates=src, axis=axis)
4062
+ return ops.tensor_scatter_elements(input_x=input, indices=index, updates=src, axis=axis)
4063
+
4064
+
4065
+ def _get_slice_scatter_const(x_shape, axis, start, end, step):
4066
+ r"""
4067
+ Calculate the rank of input, embedded dimensions and index.
4068
+ """
4069
+ x_rank = len(x_shape)
4070
+ axis = axis if axis >= 0 else axis + x_rank
4071
+ start = start if start is not None else 0
4072
+ start = start if start >= 0 else start + x_rank
4073
+ end = end if end is not None else x_shape[axis]
4074
+ end = end if end >= 0 else end + x_rank
4075
+ end = end if end < x_shape[axis] else x_shape[axis]
4076
+ index = list(builtins.range(start, end, step))
4077
+ return x_rank, index, axis
4078
+
4079
+
4080
+ def slice_scatter(input, src, axis=0, start=None, end=None, step=1):
4081
+ r"""
4082
+ Slice the input Tensor in the specified dimension and overlay the slice results with the source Tensor.
4083
+ The `input` is sliced along the specified dimension. The start position of the slice is `start` ,
4084
+ the end position is `end` , and the step size is `step` .
4085
+ Then the slicing result is overwritten with `src` to get the output Tensor.
4086
+
4087
+ Args:
4088
+ input (Tensor): The target Tensor.
4089
+ src (Tensor): The source Tensor.
4090
+ axis (int, optional): The dimension of `input` to be sliced. Default: ``0`` .
4091
+ start (int, optional): The start index to slice in the specified dimension.
4092
+ Default: ``None``, `start` is ``0`` .
4093
+ end (int, optional): The end index to slice in the specified dimension.
4094
+ Default: ``None``, `end` is the length of `input` in the specified dimension.
4095
+ step (int, optional): Step size. Default: ``1``, the distance from the next slice element is ``1`` .
4096
+
4097
+ Returns:
4098
+ Tensor after embedding, has the same shape and type as `input` .
4099
+
4100
+ Raises:
4101
+ ValueError: The shape of `src` is not the same as the shape of `input` slice.
4102
+ TypeError: If `input` is not a Tensor.
4103
+ TypeError: If `src` is not a Tensor.
4104
+ TypeError: If `axis` or `step` is not an integer.
4105
+ TypeError: If `start` or `end` is not ``None`` or an integer.
4106
+
4107
+ Supported Platforms:
4108
+ ``Ascend`` ``GPU`` ``CPU``
4109
+
4110
+ Examples:
4111
+ >>> import mindspore as ms
4112
+ >>> a = ms.ops.zeros((4, 6))
4113
+ >>> b = ms.ops.ones((4, 3))
4114
+ >>> output = ms.ops.slice_scatter(a, b, axis=1, start=0, end=5, step=2)
4115
+ >>> print(output)
4116
+ [[1. 0. 1. 0. 1. 0.]
4117
+ [1. 0. 1. 0. 1. 0.]
4118
+ [1. 0. 1. 0. 1. 0.]
4119
+ [1. 0. 1. 0. 1. 0.]]
4120
+ """
4121
+ input_shape = input.shape
4122
+ input_rank, index, axis = _get_slice_scatter_const(input_shape, axis, start, end, step)
4123
+
4124
+ src_shape = src.shape
4125
+ index_shape = input_shape[:axis] + (len(index),) + input_shape[axis + 1:]
4126
+ index_tensor = ms.Tensor(index)
4127
+ for _ in builtins.range(axis):
4128
+ index_tensor = index_tensor.expand_dims(0)
4129
+
4130
+ if index_shape != src_shape:
4131
+ raise ValueError(f"For slice_scatter, src shape should be equal to the slice size,"
4132
+ f"but got src shape {src_shape} and slice shape {index_shape}")
4133
+ for _ in builtins.range(input_rank - axis - 1):
4134
+ index_tensor = index_tensor.expand_dims(-1)
4135
+ index_tensor = index_tensor.broadcast_to(src.shape)
4136
+ return tensor_scatter_elements(input, axis=axis, indices=index_tensor, updates=src)
4137
+
4138
+
4139
+ def select_scatter(input, src, axis, index):
4140
+ r"""
4141
+ On the specified dimension `axis` of `input` , `src` is scattered into `input` on the specified `index` of `input` .
4142
+
4143
+ Args:
4144
+ input (Tensor): The target Tensor.
4145
+ src (Tensor): The source Tensor.
4146
+ axis (int): The dimension of `input` to be embedded.
4147
+ index (int): The location of scattering on the specified dimension.
4148
+
4149
+ Returns:
4150
+ Tensor after embedding, has the same shape and type as `input` .
4151
+
4152
+ Raises:
4153
+ ValueError: The shape of `src` is not the same as the shape scattered over `input` .
4154
+ TypeError: If `input` is not a Tensor.
4155
+ TypeError: If `src` is not a Tensor.
4156
+ TypeError: If `axis` or `index` is not an integer.
4157
+
4158
+ Supported Platforms:
4159
+ ``Ascend`` ``GPU`` ``CPU``
4160
+
4161
+ Examples:
4162
+ >>> import mindspore as ms
4163
+ >>> a = ms.ops.zeros((2, 3, 3))
4164
+ >>> b = ms.ops.ones((2, 3))
4165
+ >>> output = ms.ops.select_scatter(a, b, axis=1, index=1)
4166
+ >>> print(output)
4167
+ [[[0. 0. 0.]
4168
+ [1. 1. 1.]
4169
+ [0. 0. 0.]]
4170
+ [[0. 0. 0.]
4171
+ [1. 1. 1.]
4172
+ [0. 0. 0.]]]
4173
+ """
4174
+ src = src.expand_dims(axis=axis)
4175
+ x_rank = input.ndim
4176
+ axis = axis if axis >= 0 else axis + x_rank
4177
+ index = index if index >= 0 else index + x_rank
4178
+ return slice_scatter(input, src, axis, start=index, end=index + 1)
3843
4179
 
3844
4180
 
3845
4181
  def space_to_batch_nd(input_x, block_size, paddings):
@@ -3888,6 +4224,8 @@ def space_to_batch_nd(input_x, block_size, paddings):
3888
4224
  ``Ascend`` ``GPU`` ``CPU``
3889
4225
 
3890
4226
  Examples:
4227
+ >>> import numpy as np
4228
+ >>> from mindspore import Tensor, ops
3891
4229
  >>> block_size = [2, 2]
3892
4230
  >>> paddings = [[0, 0], [0, 0]]
3893
4231
  >>> input_x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32)
@@ -3915,10 +4253,10 @@ def batch_to_space_nd(input_x, block_shape, crops):
3915
4253
  :math:`(n', c_1, ... c_k, w'_1, ..., w'_M)`, where
3916
4254
 
3917
4255
  .. math::
3918
- \begin{array}{ll} \\
3919
- n' = n//(block\_shape[0]*...*block\_shape[M-1]) \\
3920
- w'_i = w_i*block\_shape[i-1]-crops[i-1][0]-crops[i-1][1]
3921
- \end{array}
4256
+ \begin{array}{ll} \\
4257
+ n' = n//(block\_shape[0]*...*block\_shape[M-1]) \\
4258
+ w'_i = w_i*block\_shape[i-1]-crops[i-1][0]-crops[i-1][1]
4259
+ \end{array}
3922
4260
 
3923
4261
  Args:
3924
4262
  input_x (Tensor): The input tensor. It must be greater or equal to 2-D tensor(equal to 4-D tensor on Ascend),
@@ -3949,6 +4287,9 @@ def batch_to_space_nd(input_x, block_shape, crops):
3949
4287
  ``Ascend`` ``GPU`` ``CPU``
3950
4288
 
3951
4289
  Examples:
4290
+ >>> import mindspore
4291
+ >>> import numpy as np
4292
+ >>> from mindspore import Tensor, ops
3952
4293
  >>> block_shape = [2, 2]
3953
4294
  >>> crops = [[0, 0], [0, 0]]
3954
4295
  >>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)
@@ -3969,7 +4310,7 @@ def nonzero(input):
3969
4310
  Return a Tensor of the positions of all non-zero values.
3970
4311
 
3971
4312
  Args:
3972
- input (Tensor): The shape of Tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is int, float or bool.
4313
+ input (Tensor): The input Tensor, its rank should be greater than or eaqual to 1.
3973
4314
 
3974
4315
  Returns:
3975
4316
  Tensor, a 2-D Tensor whose data type is int64, containing the positions of all non-zero values of the input.
@@ -4017,23 +4358,23 @@ def matrix_diag(x, k=0, num_rows=-1, num_cols=-1, padding_value=0, align="RIGHT_
4017
4358
  0 refers to the main diagonal, and negative value means subdiagonals. `k` can be a single integer
4018
4359
  (for a single diagonal) or a pair of integers specifying the low and high ends of a matrix band.
4019
4360
  k[0] must not be larger than k[1]. The value must be in the range of given or derivated `num_rows`
4020
- and `num_cols`, meaning value of k must be in (-num_rows, num_cols). Default: 0.
4361
+ and `num_cols`, meaning value of k must be in (-num_rows, num_cols). Default: ``0`` .
4021
4362
  num_rows (Union[int, Tensor], optional): The number of rows of the output Tensor. A Tensor of type int32 with
4022
4363
  only one value. If `num_rows` is -1, indicating that the innermost matrix of the output Tensor is a square
4023
4364
  matrix, and the real number of rows will be derivated by other inputs. That is
4024
4365
  :math:`num\_rows = x.shape[-1] - min(k[1], 0)`. Otherwise, the value must be equal or greater than
4025
- :math:`x.shape[-1] - min(k[1], 0)`. Default: -1.
4366
+ :math:`x.shape[-1] - min(k[1], 0)`. Default: ``-1`` .
4026
4367
  num_cols (Union[int, Tensor], optional): The number of columns of
4027
4368
  the output Tensor. A Tensor of type int32 with only one value.
4028
4369
  If `num_cols` is -1, indicating that the innermost matrix of the output
4029
4370
  Tensor is a square matrix, and the real number of columns will be derivated by other inputs.
4030
4371
  That is :math:`num\_cols = x.shape[-1] + max(k[0], 0)`. Otherwise, the value must be equal or
4031
- greater than :math:`x.shape[-1] - min(k[1], 0)`. Default: -1.
4372
+ greater than :math:`x.shape[-1] - min(k[1], 0)`. Default: ``-1`` .
4032
4373
  padding_value (Union[int, float, Tensor], optional): The number to fill the area outside the specified
4033
- diagonal band. A Tensor with only one value. Have the same dtype as x. Default: 0.
4374
+ diagonal band. A Tensor with only one value. Have the same dtype as x. Default: ``0`` .
4034
4375
  align (str, optional): specifies how superdiagonals and subdiagonals should be aligned.
4035
- Supported values:"RIGHT_LEFT", "LEFT_RIGHT", "LEFT_LEFT", "RIGHT_RIGHT".
4036
- Default: "RIGHT_LEFT".
4376
+ Supported values: ``"RIGHT_LEFT"`` , ``"LEFT_RIGHT"`` , ``"LEFT_LEFT"`` , ``"RIGHT_RIGHT"`` .
4377
+ Default: ``"RIGHT_LEFT"`` .
4037
4378
 
4038
4379
  - When set to "RIGHT_LEFT", the alignment of superdiagonals will be towards the right side
4039
4380
  (padding the row on the left), while subdiagonals will be towards the left side
@@ -4049,8 +4390,8 @@ def matrix_diag(x, k=0, num_rows=-1, num_cols=-1, padding_value=0, align="RIGHT_
4049
4390
  Returns:
4050
4391
  A Tensor. Has the same type as `x`.
4051
4392
  Suppose `x` has r dimensions with shape :math:`(I, J, ..., M, N)` . The output Tensor has rank r + 1 with shape
4052
- :math:`(I, J, ..., M, num_rows, num_cols)` when only one diagonal is given (k is an integer or k[0] == k[1]).
4053
- Otherwise, it has rank r with shape :math:`(I, J, ..., num_rows, num_cols)` .
4393
+ :math:`(I, J, ..., M, num\_rows, num\_cols)` when only one diagonal is given (k is an integer or k[0] == k[1]).
4394
+ Otherwise, it has rank r with shape :math:`(I, J, ..., num\_rows, num\_cols)` .
4054
4395
 
4055
4396
  Raises:
4056
4397
  TypeError: If `x` is not Tensor.
@@ -4102,7 +4443,7 @@ def matrix_diag(x, k=0, num_rows=-1, num_cols=-1, padding_value=0, align="RIGHT_
4102
4443
  return matrix_diag_v3(x, k, num_rows, num_cols, padding_value)
4103
4444
 
4104
4445
 
4105
- def matrix_diag_part(x, k=0, padding_value=0, align="RIGHT_LEFT"):
4446
+ def matrix_diag_part(x, k, padding_value, align="RIGHT_LEFT"):
4106
4447
  r"""
4107
4448
  Returns the diagonal part of input tensor.
4108
4449
  Returns a tensor with the k[0]-th to k[1]-th diagonals of `x`. Some diagonals are shorter than
@@ -4110,21 +4451,21 @@ def matrix_diag_part(x, k=0, padding_value=0, align="RIGHT_LEFT"):
4110
4451
 
4111
4452
  Args:
4112
4453
  x (Tensor): The input Tensor with rank r, where r >= 2.
4113
- k (Union[int, Tensor], optional): A Tensor of type int32. Diagonal offset(s). Positive value means
4454
+ k (Tensor): A Tensor of type int32. Diagonal offset(s). Positive value means
4114
4455
  superdiagonal, 0 refers to the main diagonal, and negative value means subdiagonals. k can be
4115
4456
  a single integer (for a single diagonal) or a pair of integers specifying the low and high ends
4116
4457
  of a matrix band. k[0] must not be larger than k[1]. The value of k has restructions, meaning
4117
4458
  value of k must be in (-x.shape[-2], x.shape[-1]).
4118
- padding_value (Union[int, float, Tensor], optional): A Tensor with only one value. Have the same dtype as x.
4119
- The number to fill the area outside the specified diagonal band. Default: 0.
4120
- align (str, optional): An optional string from: "RIGHT_LEFT"(default), "LEFT_RIGHT", "LEFT_LEFT",
4121
- "RIGHT_RIGHT". Align is a string specifying how superdiagonals and subdiagonals should be aligned,
4122
- respectively. "RIGHT_LEFT" aligns superdiagonals to the right (left-pads the row) and subdiagonals
4123
- to the left (right-pads the row).
4459
+ padding_value (Tensor): A Tensor with only one value. Have the same dtype as x.
4460
+ The number to fill the area outside the specified diagonal band.
4461
+ align (str, optional): An optional string from: ``"RIGHT_LEFT"`` , ``"LEFT_RIGHT"`` ,
4462
+ ``"LEFT_LEFT"`` , ``"RIGHT_RIGHT"`` . Align is a string specifying how superdiagonals and subdiagonals
4463
+ should be aligned, respectively. ``"RIGHT_LEFT"`` aligns superdiagonals to the right (left-pads the row)
4464
+ and subdiagonals to the left (right-pads the row). Default: ``"RIGHT_LEFT"`` . Default: ``"RIGHT_LEFT"``.
4124
4465
 
4125
4466
  Returns:
4126
4467
  A Tensor. Has the same type as `x`.
4127
- Assume `x` has r dimensions :math:`(I, J, ..., L, M, N)` . Let `max_diag_len` be the maximum length among all
4468
+ Assume `x` has r dimensions :math:`(I, J, ..., M, N)` . Let `max_diag_len` be the maximum length among all
4128
4469
  diagonals to be extracted, :math:`max\_diag\_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
4129
4470
  Let `num_diags` be the number of diagonals to extract, :math:`num\_diags = k[1] - k[0] + 1`.
4130
4471
  If :math:`num\_diags == 1`, the output tensor is of rank r - 1 with shape :math:`(I, J, ..., L, max\_diag\_len)`
@@ -4146,6 +4487,9 @@ def matrix_diag_part(x, k=0, padding_value=0, align="RIGHT_LEFT"):
4146
4487
  ``Ascend`` ``GPU`` ``CPU``
4147
4488
 
4148
4489
  Examples:
4490
+ >>> import mindspore
4491
+ >>> import numpy as np
4492
+ >>> from mindspore import Tensor, ops
4149
4493
  >>> x = Tensor(np.array([[1, 2, 3, 4],
4150
4494
  ... [5, 6, 7, 8],
4151
4495
  ... [9, 8, 7, 6]]), mindspore.float32)
@@ -4163,7 +4507,7 @@ def matrix_diag_part(x, k=0, padding_value=0, align="RIGHT_LEFT"):
4163
4507
  return matrix_diag_part_v3(x, k, padding_value)
4164
4508
 
4165
4509
 
4166
- def matrix_set_diag(x, diagonal, k=0, align="RIGHT_LEFT"): # pylint: disable=redefined-outer-name
4510
+ def matrix_set_diag(x, diagonal, k=0, align="RIGHT_LEFT"): # pylint: disable=redefined-outer-name
4167
4511
  r"""
4168
4512
  Returns a batched matrix tensor with new batched diagonal values.
4169
4513
  Given x and diagonal, this operation returns a tensor with the same shape and values as x, except for the specified
@@ -4187,10 +4531,10 @@ def matrix_set_diag(x, diagonal, k=0, align="RIGHT_LEFT"): # pylint: disable=red
4187
4531
  single integer (for a single diagonal) or a pair of integers specifying the low and high ends of
4188
4532
  a matrix band. k[0] must not be larger than k[1].
4189
4533
  The alue of k has restructions, meaning value of k must be in :math:`(-x.shape[-2], x.shape[-1])`.
4190
- Input k must be const Tensor when taking Graph mode.
4191
- align (str, optional): An optional string from: "RIGHT_LEFT"(default), "LEFT_RIGHT", "LEFT_LEFT",
4192
- "RIGHT_RIGHT". Align is a string specifying how superdiagonals and subdiagonals should be aligned,
4193
- respectively. "RIGHT_LEFT" aligns superdiagonals to the right (left-pads the row) and subdiagonals
4534
+ Input k must be const Tensor when taking Graph mode. Default: ``0`` .
4535
+ align (str, optional): An optional string from: ``"RIGHT_LEFT"`` (default), ``"LEFT_RIGHT"`` , ``"LEFT_LEFT"`` ,
4536
+ ``"RIGHT_RIGHT"`` . Align is a string specifying how superdiagonals and subdiagonals should be aligned,
4537
+ respectively. ``"RIGHT_LEFT"`` aligns superdiagonals to the right (left-pads the row) and subdiagonals
4194
4538
  to the left (right-pads the row).
4195
4539
 
4196
4540
  Returns:
@@ -4218,6 +4562,9 @@ def matrix_set_diag(x, diagonal, k=0, align="RIGHT_LEFT"): # pylint: disable=red
4218
4562
  ``Ascend`` ``GPU`` ``CPU``
4219
4563
 
4220
4564
  Examples:
4565
+ >>> import mindspore
4566
+ >>> import numpy as np
4567
+ >>> from mindspore import Tensor, ops
4221
4568
  >>> x = Tensor(np.array([[7, 7, 7, 7],
4222
4569
  ... [7, 7, 7, 7],
4223
4570
  ... [7, 7, 7, 7]]), mindspore.float32)
@@ -4258,7 +4605,7 @@ def meshgrid(*inputs, indexing='xy'):
4258
4605
  inputs of length `M` and `N`, the outputs are of shape :math:`(N, M)`
4259
4606
  for 'xy' indexing and :math:`(M, N)` for 'ij' indexing. In the 3-D
4260
4607
  case with inputs of length `M`, `N` and `P`, outputs are of shape
4261
- :math:`(N, M, P)` for 'xy' indexing and :math:`(M, N, P)` for 'ij' indexing. Default: 'xy'.
4608
+ :math:`(N, M, P)` for 'xy' indexing and :math:`(M, N, P)` for 'ij' indexing. Default: ``'xy'`` .
4262
4609
 
4263
4610
  Returns:
4264
4611
  Tensors, a Tuple of N N-D Tensor objects. The data type is the same with the Inputs.
@@ -4334,9 +4681,10 @@ def affine_grid(theta, size, align_corners=False):
4334
4681
  The value of target output with format :math:`(N, C, H, W)` for 2D grid or :math:`(N, C, D, H, W)` for 3D
4335
4682
  grid.
4336
4683
  align_corners (bool, optional): Geometrically, each pixel of input is viewed as a squqre instead of dot.
4337
- If True, consider extremum -1 and 1 referring to the centers of the pixels rather than pixel corners.
4338
- The default value is False, extremum -1 and 1 refer to the corners of the pixels, so that sampling is
4339
- irrelevant to resolution of the image. Default: False.
4684
+ If ``True`` , consider extremum -1 and 1 referring to the centers of the pixels rather than pixel corners.
4685
+ The default value is ``False`` , extremum -1 and 1 refer to the corners of the pixels, so that sampling is
4686
+ irrelevant to resolution of the image. Default: ``False`` .
4687
+
4340
4688
  Returns:
4341
4689
  Tensor, a tensor whose data type is same as 'theta', and the shape is :math:`(N, H, W, 2)` for 2D grid
4342
4690
  or :math:`(N, D, H, W, 3)` for 3D grid.
@@ -4371,7 +4719,7 @@ def affine_grid(theta, size, align_corners=False):
4371
4719
  return affine_grid_op(theta, size)
4372
4720
 
4373
4721
 
4374
- def broadcast_to(input, shape): # pylint: disable=redefined-outer-name
4722
+ def broadcast_to(input, shape): # pylint: disable=redefined-outer-name
4375
4723
  """
4376
4724
  Broadcasts input tensor to a given shape. The dim of input shape must be smaller
4377
4725
  than or equal to that of target shape. Suppose input shape is :math:`(x_1, x_2, ..., x_m)`,
@@ -4414,7 +4762,7 @@ def broadcast_to(input, shape): # pylint: disable=redefined-outer-name
4414
4762
  input shape :math:`(1, 5, 9)`, instead of operating the dim-filling process first, it raises errors directly.
4415
4763
 
4416
4764
  Args:
4417
- input (Tensor): The input Tensor. Supported types are: float16, float32, int32, int8, uint8, bool.
4765
+ input (Tensor): The input Tensor.
4418
4766
  shape (tuple): The target shape to broadcast. Can be fully specified, or have -1 in one position
4419
4767
  where it will be substituted by the input tensor's shape in that position, see example.
4420
4768
 
@@ -4430,6 +4778,8 @@ def broadcast_to(input, shape): # pylint: disable=redefined-outer-name
4430
4778
  ``Ascend`` ``GPU`` ``CPU``
4431
4779
 
4432
4780
  Examples:
4781
+ >>> import numpy as np
4782
+ >>> from mindspore import Tensor, ops
4433
4783
  >>> shape = (2, 3)
4434
4784
  >>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
4435
4785
  >>> output = ops.broadcast_to(x, shape)
@@ -4443,7 +4793,7 @@ def broadcast_to(input, shape): # pylint: disable=redefined-outer-name
4443
4793
  [[1. 1.]
4444
4794
  [2. 2.]]
4445
4795
  """
4446
- if isinstance(shape, Tensor) or F.is_sequence_value_unknown(shape):
4796
+ if isinstance(shape, Tensor) or ops.is_sequence_value_unknown(shape):
4447
4797
  _dyn_broadcast_to = _get_cache_prim(DynamicBroadcastTo)()
4448
4798
  return _dyn_broadcast_to(input, shape)
4449
4799
  _broadcast_to = _get_cache_prim(P.BroadcastTo)(shape)
@@ -4471,8 +4821,8 @@ def unsorted_segment_min(x, segment_ids, num_segments):
4471
4821
 
4472
4822
  Args:
4473
4823
  x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
4474
- segment_ids (Tensor): A `1-D` tensor whose shape is :math:`(x_1)`,
4475
- the value must be non-negative tensor. The data type must be int32.
4824
+ segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
4825
+ Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
4476
4826
  num_segments (int): The value specifies the number of distinct `segment_ids`.
4477
4827
 
4478
4828
  Returns:
@@ -4522,8 +4872,8 @@ def unsorted_segment_max(x, segment_ids, num_segments):
4522
4872
 
4523
4873
  Args:
4524
4874
  x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
4525
- segment_ids (Tensor): A `1-D` tensor whose shape is :math:`(x_1)`,
4526
- the value must be non-negative tensor. The data type must be int32.
4875
+ segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
4876
+ Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
4527
4877
  num_segments (int): The value specifies the number of distinct `segment_ids`.
4528
4878
 
4529
4879
  Returns:
@@ -4730,6 +5080,8 @@ def population_count(input_x):
4730
5080
  ``Ascend`` ``GPU`` ``CPU``
4731
5081
 
4732
5082
  Examples:
5083
+ >>> import mindspore
5084
+ >>> from mindspore import Tensor, ops
4733
5085
  >>> input_x = Tensor([0, 1, 3], mindspore.int16)
4734
5086
  >>> output = ops.population_count(input_x)
4735
5087
  >>> print(output)
@@ -4765,6 +5117,43 @@ def is_tensor(obj):
4765
5117
  return isinstance(obj, Tensor)
4766
5118
 
4767
5119
 
5120
+ def is_nonzero(input):
5121
+ """
5122
+ Determine whether the input Tensor contains 0 or False. The input can only be a single element.
5123
+
5124
+ Args:
5125
+ input (Tensor): The input tensor.
5126
+
5127
+ Returns:
5128
+ Bool, returns False if the input Tensor contains a unit element of 0 or a single element of False,
5129
+ otherwise returns True.
5130
+
5131
+ Raises:
5132
+ TypeError: If `input` is not Tensor.
5133
+ ValueError: If the element number of `input` is not equal to 1.
5134
+
5135
+ Supported Platforms:
5136
+ ``Ascend`` ``GPU`` ``CPU``
5137
+
5138
+ Examples:
5139
+ >>> from mindspore import Tensor, ops
5140
+ >>> x1 = Tensor([[[False]]])
5141
+ >>> x2 = Tensor([[3.5]])
5142
+ >>> out1 = ops.is_nonzero(x1)
5143
+ >>> print(out1)
5144
+ False
5145
+ >>> out2 = ops.is_nonzero(x2)
5146
+ >>> print(out2)
5147
+ True
5148
+ """
5149
+ if not isinstance(input, Tensor):
5150
+ raise TypeError(f'For is_nonzero, the input must be a Tensor, but got {type(input)}.')
5151
+ if input.numel() != 1:
5152
+ raise ValueError(f"For is_nonzero, the numel of input must be 1, but got {input.numel()}.")
5153
+ out = ops.squeeze(input)
5154
+ return bool(out)
5155
+
5156
+
4768
5157
  def scalar_cast(input_x, input_y):
4769
5158
  """
4770
5159
  Casts the input scalar to another type.
@@ -4783,6 +5172,8 @@ def scalar_cast(input_x, input_y):
4783
5172
  ``Ascend`` ``GPU`` ``CPU``
4784
5173
 
4785
5174
  Examples:
5175
+ >>> import mindspore
5176
+ >>> from mindspore import ops
4786
5177
  >>> output = ops.scalar_cast(255.0, mindspore.int32)
4787
5178
  >>> print(output)
4788
5179
  255
@@ -4791,7 +5182,7 @@ def scalar_cast(input_x, input_y):
4791
5182
 
4792
5183
 
4793
5184
  def tensor_scatter_mul(input_x, indices, updates):
4794
- """
5185
+ r"""
4795
5186
  Creates a new tensor by multiplying the values from the positions in `input_x` indicated by
4796
5187
  `indices`, with values from `updates`. When divided values are provided for the same
4797
5188
  index, the result of the update will multiply these values respectively. Except that
@@ -4801,15 +5192,19 @@ def tensor_scatter_mul(input_x, indices, updates):
4801
5192
  there must be a corresponding value in `updates`. The shape of `updates` should be
4802
5193
  equal to the shape of `input_x[indices]`. For more details, see use cases.
4803
5194
 
5195
+ .. math::
5196
+ output[indices] = input\_x \times update
5197
+
4804
5198
  Note:
4805
5199
  - If some values of the `indices` are out of bound, instead of raising an index error,
4806
5200
  the corresponding `updates` will not be updated to `input_x`.
4807
5201
 
4808
5202
  Args:
4809
- input_x (Tensor): The target tensor. The dimension of input_x must be no less than indices.shape[-1].
5203
+ input_x (Tensor): The input tensor. The dimension of `input_x` must be no less than indices.shape[-1].
4810
5204
  indices (Tensor): The index of input tensor whose data type is int32 or int64. The rank must be at least 2.
4811
- updates (Tensor): The tensor to update the input tensor, has the same type as input,
4812
- and updates shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].
5205
+ updates (Tensor): The tensor to update the input tensor, has the same type as `input_x`,
5206
+ and the shape of `updates` should be equal to
5207
+ :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
4813
5208
 
4814
5209
  Returns:
4815
5210
  Tensor, has the same shape and type as `input_x`.
@@ -4817,12 +5212,15 @@ def tensor_scatter_mul(input_x, indices, updates):
4817
5212
  Raises:
4818
5213
  TypeError: If dtype of `indices` is neither int32 nor int64.
4819
5214
  ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.
4820
- RuntimeError: If a value of `indices` is not in `input_x`.
5215
+ RuntimeError: If a value of `indices` is not in `input_x` on CPU backend.
4821
5216
 
4822
5217
  Supported Platforms:
4823
5218
  ``GPU`` ``CPU``
4824
5219
 
4825
5220
  Examples:
5221
+ >>> import mindspore
5222
+ >>> import numpy as np
5223
+ >>> from mindspore import Tensor, ops
4826
5224
  >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
4827
5225
  >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
4828
5226
  >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
@@ -4844,7 +5242,7 @@ def tensor_scatter_mul(input_x, indices, updates):
4844
5242
 
4845
5243
 
4846
5244
  def tensor_scatter_div(input_x, indices, updates):
4847
- """
5245
+ r"""
4848
5246
  Creates a new tensor by dividing the values from the positions in `input_x` indicated by
4849
5247
  `indices`, with values from `updates`. When divided values are provided for the same
4850
5248
  index, the result of the update will be to divided these values respectively. Except that
@@ -4854,18 +5252,25 @@ def tensor_scatter_div(input_x, indices, updates):
4854
5252
  there must be a corresponding value in `updates`. The shape of `updates` should be
4855
5253
  equal to the shape of `input_x[indices]`. For more details, see use cases.
4856
5254
 
5255
+ .. math::
5256
+ output\left [indices \right ] = input\_x \div update
5257
+
4857
5258
  Note:
4858
- - If some values of the `indices` are out of bound, instead of raising an index error,
4859
- the corresponding `updates` will not be updated to `input_x`.
5259
+ - On GPU, if some values of the `indices` are out of bound, instead of raising an index error,
5260
+ the corresponding `updates` will not be updated to self tensor.
5261
+ - On CPU, if some values of the `indices` are out of bound, raising an index error.
5262
+ - On Ascend, out of bound checking is not supported, if some values of the `indices` are out of bound,
5263
+ unknown errors may be caused.
4860
5264
  - The operator can't handle division by 0 exceptions, so the user needs to make sure
4861
5265
  there is no 0 value in `updates`.
4862
5266
 
4863
5267
  Args:
4864
- input_x (Tensor): The target tensor. The dimension of input_x must be no less than indices.shape[-1].
5268
+ input_x (Tensor): The input tensor. The dimension of input_x must be no less than indices.shape[-1].
4865
5269
  indices (Tensor): The index of input tensor whose data type is int32 or int64.
4866
5270
  The rank must be at least 2.
4867
- updates (Tensor): The tensor to update the input tensor, has the same type as input,
4868
- and updates.shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].
5271
+ updates (Tensor): The tensor to update the `input_x` tensor, has the same type as `input_x`.
5272
+ And the shape of `updates` should be
5273
+ equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
4869
5274
 
4870
5275
  Returns:
4871
5276
  Tensor, has the same shape and type as `input_x`.
@@ -4906,8 +5311,7 @@ def scalar_to_tensor(input_x, dtype=mstype.float32):
4906
5311
 
4907
5312
  Args:
4908
5313
  input_x (Union[bool, int, float]): The input is a scalar. Only constant value is allowed.
4909
- dtype (mindspore.dtype): The target data type. Default: mindspore.float32. Only
4910
- constant value is allowed.
5314
+ dtype (mindspore.dtype): The target data type. Only constant value is allowed. Default: ``mstype.float32``.
4911
5315
 
4912
5316
  Returns:
4913
5317
  Tensor. 0-D Tensor and the content is the input.
@@ -4919,6 +5323,8 @@ def scalar_to_tensor(input_x, dtype=mstype.float32):
4919
5323
  ``Ascend`` ``GPU`` ``CPU``
4920
5324
 
4921
5325
  Examples:
5326
+ >>> import mindspore
5327
+ >>> from mindspore import ops
4922
5328
  >>> data = 1
4923
5329
  >>> output = ops.scalar_to_tensor(data, mindspore.float32)
4924
5330
  >>> print(output)
@@ -4936,7 +5342,7 @@ def tuple_to_array(input_x):
4936
5342
 
4937
5343
  Args:
4938
5344
  input_x (tuple): A tuple of numbers. These numbers have the same type. Only constant value is allowed.
4939
- The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
5345
+ The shape is :math:`(N,*)` where :math:`*` means any number of additional dimensions.
4940
5346
 
4941
5347
  Returns:
4942
5348
  Tensor, if the input tuple contains `N` numbers, then the shape of the output tensor is (N,).
@@ -5024,6 +5430,9 @@ def masked_fill(input_x, mask, value):
5024
5430
  ``Ascend`` ``GPU`` ``CPU``
5025
5431
 
5026
5432
  Examples:
5433
+ >>> import mindspore
5434
+ >>> import numpy as np
5435
+ >>> from mindspore import Tensor, ops
5027
5436
  >>> input_x = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
5028
5437
  >>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)
5029
5438
  >>> output = ops.masked_fill(input_x, mask, 0.5)
@@ -5077,7 +5486,7 @@ def diagflat(input, offset=0):
5077
5486
 
5078
5487
  Args:
5079
5488
  input (Tensor): Input Tensor, which is flattened and set as the diagonal of the output.
5080
- offset (int, optional): `offset` controls which diagonal to choose. Default: 0.
5489
+ offset (int, optional): `offset` controls which diagonal to choose. Default: ``0`` .
5081
5490
 
5082
5491
  - When `offset` is zero, the diagonal chosen is the main diagonal.
5083
5492
  - When `offset` is a positive integer, the diagonal chosen is up the main diagonal.
@@ -5094,6 +5503,8 @@ def diagflat(input, offset=0):
5094
5503
  ``Ascend`` ``GPU`` ``CPU``
5095
5504
 
5096
5505
  Examples:
5506
+ >>> import mindspore
5507
+ >>> from mindspore import Tensor, ops
5097
5508
  >>> x = Tensor([1, 2], mindspore.float32)
5098
5509
  >>> output = ops.diagflat(x, 1)
5099
5510
  >>> print(output)
@@ -5132,11 +5543,11 @@ def col2im(input_x, output_size, kernel_size, dilation, padding_value, stride):
5132
5543
  kernel_size (Union[int, tuple[int], list[int]]): The size of the kernel, should be two int
5133
5544
  for height and width. If type is int, it means that height equal with width. Must be specified.
5134
5545
  dilation (Union[int, tuple[int], list[int]]): The size of the dilation, should be two int
5135
- for height and width. If type is int, it means that height equal with width. Default: 1.
5546
+ for height and width. If type is int, it means that height equal with width.
5136
5547
  padding_value (Union[int, tuple[int], list[int]]): The size of the padding, should be two int
5137
- for height and width. If type is int, it means that height equal with width. Default: 1.
5548
+ for height and width. If type is int, it means that height equal with width.
5138
5549
  stride (Union[int, tuple[int], list[int]]): The size of the stride, should be two int
5139
- for height and width. If type is int, it means that height equal with width. Default: 0.
5550
+ for height and width. If type is int, it means that height equal with width.
5140
5551
 
5141
5552
  Returns:
5142
5553
  A 4D Tensor, with same type as 'input_x'.
@@ -5154,6 +5565,9 @@ def col2im(input_x, output_size, kernel_size, dilation, padding_value, stride):
5154
5565
  ``Ascend`` ``GPU`` ``CPU``
5155
5566
 
5156
5567
  Examples:
5568
+ >>> import numpy as np
5569
+ >>> from mindspore import Tensor, ops
5570
+ >>> from mindspore import dtype as mstype
5157
5571
  >>> x = Tensor(input_data=np.random.rand(16, 16, 4, 25), dtype=mstype.float32)
5158
5572
  >>> output_size = Tensor(input_data=[8, 8], dtype=mstype.int32)
5159
5573
  >>> output = ops.col2im(x, output_size, [2, 2], [2, 2], [2, 2], [2, 2])
@@ -5172,10 +5586,10 @@ def _split_int(x, split_size_or_sections, axis):
5172
5586
  arr_shape = x.shape
5173
5587
  length_along_dim = arr_shape[axis]
5174
5588
  if split_size_or_sections > length_along_dim:
5175
- res = P.Split(axis, 1)(x)
5589
+ res = _get_cache_prim(P.Split)(axis, 1)(x)
5176
5590
  elif length_along_dim % split_size_or_sections == 0:
5177
5591
  sections = length_along_dim // split_size_or_sections
5178
- res = P.Split(axis, sections)(x)
5592
+ res = _get_cache_prim(P.Split)(axis, sections)(x)
5179
5593
  else:
5180
5594
  num_sections = length_along_dim // split_size_or_sections
5181
5595
  length1 = num_sections * split_size_or_sections
@@ -5184,8 +5598,8 @@ def _split_int(x, split_size_or_sections, axis):
5184
5598
  size1 = _tuple_setitem(arr_shape, axis, length1)
5185
5599
  start2 = _tuple_setitem(start1, axis, length1)
5186
5600
  size2 = _tuple_setitem(arr_shape, axis, length2)
5187
- res = P.Split(axis, num_sections)(tensor_slice(x, start1, size1)) + \
5188
- P.Split(axis, 1)(tensor_slice(x, start2, size2))
5601
+ res = _get_cache_prim(P.Split)(axis, num_sections)(tensor_slice(x, start1, size1)) + \
5602
+ _get_cache_prim(P.Split)(axis, 1)(tensor_slice(x, start2, size2))
5189
5603
  return res
5190
5604
 
5191
5605
 
@@ -5202,7 +5616,8 @@ def _split_sub_tensors(x, split_size_or_sections, axis):
5202
5616
  strides = _list_comprehensions(x.ndim, 1, True)
5203
5617
  begin = _list_comprehensions(x.ndim, 0)
5204
5618
  end = _list_comprehensions(x.shape)
5205
- for i, idx in enumerate(new_indices):
5619
+ for i in ms_arrange(len(new_indices)):
5620
+ idx = new_indices[i]
5206
5621
  begin[axis] = 0 if i == 0 else new_indices[i - 1]
5207
5622
  end[axis] = idx
5208
5623
  sliced_tensor = strided_slice(x, tuple(begin), tuple(end), strides)
@@ -5222,7 +5637,7 @@ def split(tensor, split_size_or_sections, axis=0):
5222
5637
  if `tensor.shape[axis]` is not divisible by `split_size_or_sections`.
5223
5638
  If `split_size_or_sections` is a list type, then `tensor` will be split into len(split_size_or_sections)
5224
5639
  chunks with sizes `split_size_or_sections` along the given `axis`.
5225
- axis (int): The axis along which to split. Default: 0.
5640
+ axis (int): The axis along which to split. Default: ``0`` .
5226
5641
 
5227
5642
  Returns:
5228
5643
  A tuple of sub-tensors.
@@ -5239,6 +5654,8 @@ def split(tensor, split_size_or_sections, axis=0):
5239
5654
  ``Ascend`` ``GPU`` ``CPU``
5240
5655
 
5241
5656
  Examples:
5657
+ >>> import numpy as np
5658
+ >>> from mindspore import ops, Tensor
5242
5659
  >>> input_x = np.arange(9).astype("float32")
5243
5660
  >>> output = ops.split(Tensor(input_x), 3)
5244
5661
  >>> print(output)
@@ -5276,7 +5693,7 @@ def split(tensor, split_size_or_sections, axis=0):
5276
5693
  return tuple(res)
5277
5694
 
5278
5695
 
5279
- def tril(input, diagonal=0): # pylint: disable=redefined-outer-name
5696
+ def tril(input, diagonal=0): # pylint: disable=redefined-outer-name
5280
5697
  """
5281
5698
  Returns the lower triangle part of 'input' (elements that contain the diagonal and below),
5282
5699
  and set the other elements to zeros.
@@ -5300,6 +5717,8 @@ def tril(input, diagonal=0): # pylint: disable=redefined-outer-name
5300
5717
  ``Ascend`` ``GPU`` ``CPU``
5301
5718
 
5302
5719
  Examples:
5720
+ >>> import numpy as np
5721
+ >>> from mindspore import Tensor, ops
5303
5722
  >>> x = Tensor(np.array([[ 1, 2, 3, 4],
5304
5723
  ... [ 5, 6, 7, 8],
5305
5724
  ... [10, 11, 12, 13],
@@ -5335,13 +5754,16 @@ def tril(input, diagonal=0): # pylint: disable=redefined-outer-name
5335
5754
  return tril_(input)
5336
5755
 
5337
5756
 
5338
- def triu(input, diagonal=0): # pylint: disable=redefined-outer-name
5757
+ def triu(input, diagonal=0): # pylint: disable=redefined-outer-name
5339
5758
  r"""
5340
5759
  Returns the upper triangle part of 'input' (elements that contain the diagonal and below),
5341
5760
  and set the other elements to zeros.
5342
5761
 
5762
+ .. warning::
5763
+ This is an experimental API that is subject to change or deletion.
5764
+
5343
5765
  Args:
5344
- input (Tensor): The input tensor with shape :math:`(N,∗)` where means any number of additional dimensions.
5766
+ input (Tensor): The input tensor with shape :math:`(M, N, *)` where * means any number of additional dimensions.
5345
5767
  diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: 0,
5346
5768
  indicating the main diagonal.
5347
5769
 
@@ -5351,12 +5773,14 @@ def triu(input, diagonal=0): # pylint: disable=redefined-outer-name
5351
5773
  Raises:
5352
5774
  TypeError: If `diagonal` is not an int.
5353
5775
  TypeError: If `input` is not a Tensor.
5354
- ValueError: If length of shape of `input` is less than 1.
5776
+ ValueError: If the dimension of `input` is less than 2.
5355
5777
 
5356
5778
  Supported Platforms:
5357
- ``GPU`` ``CPU``
5779
+ ``Ascend`` ``GPU`` ``CPU``
5358
5780
 
5359
5781
  Examples:
5782
+ >>> import numpy as np
5783
+ >>> from mindspore import Tensor, ops
5360
5784
  >>> x = Tensor(np.array([[ 1, 2, 3, 4],
5361
5785
  ... [ 5, 6, 7, 8],
5362
5786
  ... [10, 11, 12, 13],
@@ -5391,7 +5815,7 @@ def triu(input, diagonal=0): # pylint: disable=redefined-outer-name
5391
5815
  return _get_cache_prim(P.Triu)(diagonal)(input)
5392
5816
 
5393
5817
 
5394
- @constexpr
5818
+ @_primexpr
5395
5819
  def _canonicalize_axis(axis, ndim):
5396
5820
  """
5397
5821
  Check axes are within the number of dimensions of tensor x and normalize the negative axes.
@@ -5420,7 +5844,7 @@ def _canonicalize_axis(axis, ndim):
5420
5844
  raise ValueError(f"duplicate axis in {axis}.")
5421
5845
 
5422
5846
 
5423
- @constexpr
5847
+ @_primexpr
5424
5848
  def _list_comprehensions(obj, item=None, return_tuple=False):
5425
5849
  """
5426
5850
  Generates a new list or tuple by list comprehension.
@@ -5428,17 +5852,19 @@ def _list_comprehensions(obj, item=None, return_tuple=False):
5428
5852
  Args:
5429
5853
  obj (Union[int, list, tuple]):
5430
5854
  If integer, it will be the length of the returned tuple/list.
5431
- item: The value to be filled. Default: None.
5432
- If None, the values in the new list/tuple are the same as obj
5855
+ item: The value to be filled. Default: ``None`` .
5856
+ If ``None`` , the values in the new list/tuple are the same as obj
5433
5857
  or range(obj) when obj is integer.
5434
- return_tuple(bool): If true, returns tuple, else returns list.
5858
+ return_tuple(bool): If ``true`` , returns tuple, else returns list.
5435
5859
 
5436
5860
  Returns:
5437
5861
  List or tuple.
5438
5862
  """
5439
5863
  lst = obj
5440
5864
  if isinstance(obj, int):
5441
- lst = np.arange(obj)
5865
+ lst = []
5866
+ for i in ms_arrange(obj):
5867
+ lst.append(i)
5442
5868
  if item is None:
5443
5869
  res = list(lst)
5444
5870
  else:
@@ -5448,7 +5874,7 @@ def _list_comprehensions(obj, item=None, return_tuple=False):
5448
5874
  return res
5449
5875
 
5450
5876
 
5451
- @constexpr
5877
+ @_primexpr
5452
5878
  def _tuple_setitem(tup, idx, value):
5453
5879
  """
5454
5880
  Returns a tuple with specified `idx` set to `value`.
@@ -5471,7 +5897,8 @@ def _tensor_split_sub_tensors(x, indices_or_sections, axis):
5471
5897
  strides = _list_comprehensions(x.ndim, 1, True)
5472
5898
  begin = _list_comprehensions(x.ndim, 0)
5473
5899
  end = _list_comprehensions(x.shape)
5474
- for i, idx in enumerate(indices_or_sections):
5900
+ for i in ms_arrange(len(indices_or_sections)):
5901
+ idx = indices_or_sections[i]
5475
5902
  begin[axis] = 0 if i == 0 else indices_or_sections[i - 1]
5476
5903
  end[axis] = idx
5477
5904
  sliced_tensor = strided_slice(x, tuple(begin), tuple(end), strides)
@@ -5518,18 +5945,17 @@ def tensor_split(input, indices_or_sections, axis=0):
5518
5945
 
5519
5946
  - If `indices_or_sections` is an integer n, input tensor will be split into n sections.
5520
5947
 
5521
- - If :math:`input.size(axis)` can be divisible by n, sub-sections will have equal size
5522
- :math:`input.size(axis) / n` .
5523
- - If :math:`input.size(axis)` is not divisible by n, the first :math:`input.size(axis) % n` sections
5524
- will have size :math:`x.size(axis) // n + 1` , and the rest will have
5525
- size :math:`input.size(axis) // n` .
5526
-
5948
+ - If :math:`input.shape(axis)` can be divisible by n, sub-sections will have equal size
5949
+ :math:`input.shape(axis) / n` .
5950
+ - If :math:`input.shape(axis)` is not divisible by n, the first :math:`input.shape(axis) % n` sections
5951
+ will have size :math:`input.shape(axis) // n + 1` , and the rest will have
5952
+ size :math:`input.shape(axis) // n` .
5527
5953
  - If `indices_or_sections` is of type tuple(int) or list(int), the input tensor will be split at the
5528
5954
  indices in the list or tuple. For example, given parameters :math:`indices\_or\_sections=[1, 4]`
5529
5955
  and :math:`axis=0` , the input tensor will be split into sections :math:`input[:1]` ,
5530
5956
  :math:`input[1:4]` , and :math:`input[4:]` .
5531
5957
 
5532
- axis (int): The axis along which to split. Default: 0.
5958
+ axis (int): The axis along which to split. Default: ``0`` .
5533
5959
 
5534
5960
  Returns:
5535
5961
  A tuple of sub-tensors.
@@ -5545,6 +5971,8 @@ def tensor_split(input, indices_or_sections, axis=0):
5545
5971
  ``Ascend`` ``GPU`` ``CPU``
5546
5972
 
5547
5973
  Examples:
5974
+ >>> import numpy as np
5975
+ >>> from mindspore import Tensor, ops
5548
5976
  >>> input_x = np.arange(9).astype("float32")
5549
5977
  >>> output = ops.tensor_split(Tensor(input_x), 3)
5550
5978
  >>> print(output)
@@ -5594,6 +6022,8 @@ def vsplit(input, indices_or_sections):
5594
6022
  ``Ascend`` ``GPU`` ``CPU``
5595
6023
 
5596
6024
  Examples:
6025
+ >>> import numpy as np
6026
+ >>> from mindspore import Tensor, ops
5597
6027
  >>> input_x = np.arange(9).reshape((3, 3)).astype('float32')
5598
6028
  >>> output = ops.vsplit(Tensor(input_x), 3)
5599
6029
  >>> print(output)
@@ -5628,6 +6058,8 @@ def hsplit(input, indices_or_sections):
5628
6058
  ``Ascend`` ``GPU`` ``CPU``
5629
6059
 
5630
6060
  Examples:
6061
+ >>> import numpy as np
6062
+ >>> from mindspore import Tensor, ops
5631
6063
  >>> input_x = np.arange(6).reshape((2, 3)).astype('float32')
5632
6064
  >>> output = ops.hsplit(Tensor(input_x), 3)
5633
6065
  >>> print(output)
@@ -5659,6 +6091,8 @@ def dsplit(input, indices_or_sections):
5659
6091
  ``Ascend`` ``GPU`` ``CPU``
5660
6092
 
5661
6093
  Examples:
6094
+ >>> import numpy as np
6095
+ >>> from mindspore import Tensor, ops
5662
6096
  >>> input_x = np.arange(6).reshape((1, 2, 3)).astype('float32')
5663
6097
  >>> output = ops.dsplit(Tensor(input_x), 3)
5664
6098
  >>> print(output)
@@ -5674,13 +6108,16 @@ def dsplit(input, indices_or_sections):
5674
6108
  return tensor_split(input, indices_or_sections, 2)
5675
6109
 
5676
6110
 
5677
- def _init_and_select_elem(input, initial, where, cmp_fn): # pylint: disable=redefined-outer-name
6111
+ def _init_and_select_elem(input, initial, where, cmp_fn): # pylint: disable=redefined-outer-name
5678
6112
  """Initialize the input according to Initial, and select the element according to where."""
5679
6113
  if initial is not None:
5680
6114
  initial = ops.fill(input.dtype, input.shape, initial)
5681
6115
  input = cmp_fn(input, initial)
5682
6116
 
5683
- if isinstance(where, Tensor):
6117
+ if where is not None and not isinstance(where, Tensor):
6118
+ where = Tensor(where, dtype=mstype.bool_)
6119
+
6120
+ if where is not None and (where.shape or not where):
5684
6121
  if initial is None:
5685
6122
  raise ValueError('initial value must be provided for where masks')
5686
6123
  where = where.broadcast_to(input.shape)
@@ -5689,13 +6126,15 @@ def _init_and_select_elem(input, initial, where, cmp_fn): # pylint: disable=r
5689
6126
  return input
5690
6127
 
5691
6128
 
5692
- def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylint: disable=redefined-outer-name
6129
+ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylint: disable=redefined-outer-name
5693
6130
  """
5694
6131
  Calculates the maximum value along with the given axis for the input tensor. It returns the maximum values and
5695
6132
  indices.
5696
6133
 
5697
6134
  Note:
5698
- In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
6135
+ - In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
6136
+ - When `axis` is ``None``, `keepdims` and subsequent parameters have no
6137
+ effect. At the same time, the index is fixed to return 0.
5699
6138
 
5700
6139
  .. warning::
5701
6140
  - If there are multiple maximum values, the index of the first maximum value is used.
@@ -5705,16 +6144,18 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pyl
5705
6144
 
5706
6145
  Args:
5707
6146
  input (Tensor): The input tensor, can be any dimension. Complex tensor is not supported for now.
5708
- axis (int): The dimension to reduce. Default: 0.
6147
+ axis (int): The dimension to reduce. When `axis` is ``None``, computing the maximum value of all elements
6148
+ in `input` .Default: ``None`` .
5709
6149
  keepdims (bool): Whether to reduce dimension, if true, the output will keep same dimension with the input,
5710
- the output will reduce dimension if false. Default: False.
6150
+ the output will reduce dimension if false. Default: ``False`` .
5711
6151
 
5712
6152
  Keyword Args:
5713
6153
  initial (scalar, optional): The minimum value of an output element. Must be present to allow computation
5714
- on empty slice. Default: None.
5715
- where (Tensor[bool], optional): A Tensor indicating whether you need to replace the primitive value in 'input'
5716
- with the initial value. If True, do not replace, if False, replace. The 'where' position is False and the
5717
- corresponding 'initial' value must be provided. Default value: None, which indicates True by default.
6154
+ on empty slice. Default: ``None`` .
6155
+ where (Tensor[bool], optional): A Tensor indicating whether to replace the primitive value in `input`
6156
+ with the value in `initial`. If ``True`` , do not replace, otherwise replace. For the index of ``True``
6157
+ in `where`, the corresponding value in `initial` must be assigned. Default: ``None`` , which indicates
6158
+ ``True`` by default.
5718
6159
 
5719
6160
  Returns:
5720
6161
  tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the maximum value of the input
@@ -5722,8 +6163,9 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pyl
5722
6163
 
5723
6164
  - values (Tensor) - The maximum value of input tensor, with the same shape as index, and same dtype as x.
5724
6165
  - index (Tensor) - The index for the maximum value of the input tensor, with dtype int32. If `keepdims`
5725
- is true, the shape of output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`.
5726
- Otherwise, the shape is :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)` .
6166
+ is true, the shape of output tensors is :math:`(input_1, input_2, ..., input_{axis-1}, 1, input_{axis+1},
6167
+ ..., input_N)` . Otherwise, the shape is :math:`(input_1, input_2, ..., input_{axis-1}, input_{axis+1},
6168
+ ..., input_N)` .
5727
6169
 
5728
6170
  Raises:
5729
6171
  TypeError: If `input` is not Tensor.
@@ -5735,15 +6177,24 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pyl
5735
6177
  ``Ascend`` ``GPU`` ``CPU``
5736
6178
 
5737
6179
  Examples:
6180
+ >>> import mindspore
6181
+ >>> import numpy as np
6182
+ >>> from mindspore import Tensor, ops
5738
6183
  >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
5739
- >>> output, index, = ops.max(x, keepdims=True)
6184
+ >>> output, index = ops.max(x)
5740
6185
  >>> print(output, index)
5741
6186
  0.7 0
6187
+ >>> y = Tensor(np.array([[0.0, 0.3, 0.4, 0.5, 0.1],
6188
+ ... [3.2, 0.4, 0.1, 2.9, 4.0]]), mindspore.float32)
6189
+ >>> output, index = ops.max(y, axis=0, keepdims=True)
6190
+ >>> print(output, index)
6191
+ [[3.2 0.4 0.4 2.9 4. ]] [[1 1 0 1 1]]
5742
6192
  """
5743
6193
  if not input.shape:
5744
6194
  return (input, Tensor(0, dtype=mstype.int32))
5745
6195
  if axis is None:
5746
- return (reduce_max(input), Tensor(0, dtype=mstype.int32))
6196
+ reduce_max_op = _get_cache_prim(P.ReduceMax)()
6197
+ return (reduce_max_op(input), Tensor(0, dtype=mstype.int32))
5747
6198
  if initial is not None and not isinstance(initial, numbers.Number):
5748
6199
  raise TypeError(f"For 'max', 'initial' must be a scalar, but got {type(initial)}")
5749
6200
  if axis is not None and not isinstance(axis, int):
@@ -5760,28 +6211,34 @@ def argmax(input, dim=None, keepdim=False):
5760
6211
 
5761
6212
  Args:
5762
6213
  input (Tensor): Input tensor.
5763
- dim (Union[int, None], optional): The dimension to reduce. If `dim` is None, the indices of the maximum
5764
- value within the flattened input will be returned. Default: None.
6214
+ dim (Union[int, None], optional): The dimension to reduce. If `dim` is ``None`` , the indices of the maximum
6215
+ value within the flattened input will be returned. Default: ``None`` .
5765
6216
  keepdim (bool, optional): Whether the output tensor retains the specified
5766
- dimension. Ignored if `dim` is None. Default: False.
6217
+ dimension. Ignored if `dim` is None. Default: ``False`` .
5767
6218
 
5768
6219
  Returns:
5769
6220
  Tensor, indices of the maximum values across a dimension.
5770
6221
 
5771
6222
  Raises:
6223
+ TypeError: If `keepdim` is not bool.
5772
6224
  ValueError: If `dim` is out of range.
5773
6225
 
5774
6226
  Supported Platforms:
5775
6227
  ``Ascend`` ``GPU`` ``CPU``
5776
6228
 
5777
6229
  Examples:
6230
+ >>> import numpy as np
6231
+ >>> from mindspore import Tensor, ops
5778
6232
  >>> x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
5779
6233
  >>> output = ops.argmax(x, dim=-1)
5780
6234
  >>> print(output)
5781
6235
  [1 0 0]
5782
6236
  """
6237
+ _check_attr_dtype("keepdim", keepdim, [bool], "argmax")
5783
6238
  if not input.shape:
5784
6239
  return Tensor(0)
6240
+ if input.dtype == mstype.bool_:
6241
+ input = input.astype(mstype.int32)
5785
6242
  is_dim_none = False
5786
6243
  if dim is None:
5787
6244
  input = reshape_(input, (-1,))
@@ -5793,13 +6250,15 @@ def argmax(input, dim=None, keepdim=False):
5793
6250
  return out
5794
6251
 
5795
6252
 
5796
- def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylint: disable=redefined-outer-name
6253
+ def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylint: disable=redefined-outer-name
5797
6254
  """
5798
6255
  Calculates the minimum value along with the given axis for the input tensor. It returns the minimum values and
5799
6256
  indices.
5800
6257
 
5801
6258
  Note:
5802
- In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
6259
+ - In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
6260
+ - When `axis` is ``None``, `keepdims` and subsequent parameters have no
6261
+ effect. At the same time, the index is fixed to return 0.
5803
6262
 
5804
6263
  .. warning::
5805
6264
  - If there are multiple minimum values, the index of the first minimum value is used.
@@ -5807,16 +6266,17 @@ def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pyl
5807
6266
 
5808
6267
  Args:
5809
6268
  input (Tensor): The input tensor, can be any dimension. Complex tensor is not supported for now.
5810
- axis (int): The dimension to reduce. Default: None.
5811
- keepdims (bool): Whether to reduce dimension, if true the output will keep the same dimension as the input,
5812
- the output will reduce dimension if false. Default: False.
6269
+ axis (int): The dimension to reduce. Default: ``None`` .
6270
+ keepdims (bool): Whether to reduce dimension, if ``True`` the output will keep the same dimension as the input,
6271
+ the output will reduce dimension if ``False`` . Default: ``False`` .
5813
6272
 
5814
6273
  Keyword Args:
5815
6274
  initial (scalar, optional): The maximum value of an output element. Must be present to allow computation
5816
- on empty slice. Default: None.
6275
+ on empty slice. Default: ``None`` .
5817
6276
  where (Tensor[bool], optional): A Tensor indicating whether to replace the primitive value in `input`
5818
- with the value in `initial`. If True, do not replace, otherwise replace. For the index of True in `where`,
5819
- the corresponding value in `initial` must be assigned. Default: None, which indicates True by default.
6277
+ with the value in `initial`. If ``True`` , do not replace, otherwise replace. For the index of ``True``
6278
+ in `where`, the corresponding value in `initial` must be assigned. Default: ``None`` , which indicates
6279
+ ``True`` by default.
5820
6280
 
5821
6281
  Returns:
5822
6282
  tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the minimum value of the input
@@ -5825,8 +6285,9 @@ def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pyl
5825
6285
  - **values** (Tensor) - The minimum value of input tensor, with the same
5826
6286
  shape as `index`, and same dtype as `x`.
5827
6287
  - **index** (Tensor) - The index for the minimum value of the input tensor, with dtype int32. If `keepdims`
5828
- is true, the shape of output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`.
5829
- Otherwise, the shape is :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)` .
6288
+ is true, the shape of output tensors is :math:`(input_1, input_2, ..., input_{axis-1}, 1, input_{axis+1},
6289
+ ..., input_N)` . Otherwise, the shape is :math:`(input_1, input_2, ..., input_{axis-1}, input_{axis+1},
6290
+ ..., input_N)` .
5830
6291
 
5831
6292
  Raises:
5832
6293
  TypeError: If `x` is not Tensor.
@@ -5838,6 +6299,9 @@ def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pyl
5838
6299
  ``Ascend`` ``GPU`` ``CPU``
5839
6300
 
5840
6301
  Examples:
6302
+ >>> import mindspore
6303
+ >>> import numpy as np
6304
+ >>> from mindspore import Tensor, ops
5841
6305
  >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
5842
6306
  >>> output, index = ops.min(x, keepdims=True)
5843
6307
  >>> print(output, index)
@@ -5867,9 +6331,10 @@ def aminmax(input, *, axis=0, keepdims=False):
5867
6331
 
5868
6332
  Keyword Args:
5869
6333
  axis (int, optional): The dimension to reduce. The value range of `axis` is [-rank, rank),
5870
- where "rank" is the dimension of `input`. Default: 0.
6334
+ where "rank" is the dimension of `input`. If `axis` is None, computes the minimum and maximum value
6335
+ along the entire input tensor. Default: ``0`` .
5871
6336
  keepdims (bool, optional): Whether to maintain dimension. When set to True, the output will keep the same
5872
- dimension as the input, or the dimension specified by `axis` is reduced. Default: False.
6337
+ dimension as the input, or the dimension specified by `axis` is reduced. Default: ``False`` .
5873
6338
 
5874
6339
  Returns:
5875
6340
  tuple (Tensor), containing the minimum value and maximum value of the input tensor.
@@ -5881,13 +6346,16 @@ def aminmax(input, *, axis=0, keepdims=False):
5881
6346
 
5882
6347
  Raises:
5883
6348
  TypeError: If `keepdims` is not a bool.
5884
- TypeError: If `axis` is not an int.
6349
+ TypeError: If `axis` is not an int and not None.
5885
6350
  ValueError: If `axis` is not in range [-rank, rank).
5886
6351
 
5887
6352
  Supported Platforms:
5888
6353
  ``Ascend`` ``GPU`` ``CPU``
5889
6354
 
5890
6355
  Examples:
6356
+ >>> import mindspore
6357
+ >>> import numpy as np
6358
+ >>> from mindspore import Tensor, ops
5891
6359
  >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
5892
6360
  >>> output0, output1 = ops.aminmax(x)
5893
6361
  >>> print(output0, output1)
@@ -5895,11 +6363,25 @@ def aminmax(input, *, axis=0, keepdims=False):
5895
6363
  >>> output2, output3 = ops.aminmax(x, axis=-1, keepdims=True)
5896
6364
  >>> print(output2, output3)
5897
6365
  [0.] [0.7]
6366
+ >>> x = Tensor(np.array([[0.0, 0.4, 0.6, 0.7, 0.1], [0.78, 0.97, 0.5, 0.82, 0.99]]), mindspore.float32)
6367
+ >>> output4, output5 = ops.aminmax(x, axis=None, keepdims=True)
6368
+ >>> print(output4, output5)
6369
+ [[0.]] [[0.99]]
5898
6370
  """
6371
+ if axis is None:
6372
+ output0, _ = ops.min(input, axis, keepdims)
6373
+ output1, _ = ops.max(input, axis, keepdims)
6374
+ if keepdims is True:
6375
+ output0 = ops.reshape(output0, [1] * input.ndim)
6376
+ output1 = ops.reshape(output1, [1] * input.ndim)
6377
+ return output0, output1
5899
6378
  argmin_with_value_op = P.ArgMinWithValue(axis, keepdims)
5900
6379
  argmax_with_value_op = P.ArgMaxWithValue(axis, keepdims)
5901
6380
  _, output0 = argmin_with_value_op(input)
5902
6381
  _, output1 = argmax_with_value_op(input)
6382
+ if keepdims is True and input.ndim == 0:
6383
+ output0 = ops.reshape(output0, [1])
6384
+ output1 = ops.reshape(output1, [1])
5903
6385
  return output0, output1
5904
6386
 
5905
6387
 
@@ -5940,6 +6422,7 @@ def narrow(input, axis, start, length):
5940
6422
  [ 5 6]
5941
6423
  [ 8 9]]
5942
6424
  """
6425
+ validator.check_value_type("input", input, Tensor, "narrow")
5943
6426
  validator.check_axis_in_range(axis, input.ndim)
5944
6427
  validator.check_int_range(start, 0, input.shape[axis], validator.INC_LEFT)
5945
6428
  validator.check_int_range(length, 1, input.shape[axis] - start, validator.INC_BOTH)
@@ -5974,9 +6457,11 @@ def unsorted_segment_sum(input_x, segment_ids, num_segments):
5974
6457
  is negative, the value will be ignored. 'num_segments' must be equal to the number of different segment_ids.
5975
6458
 
5976
6459
  Args:
5977
- input_x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`.
5978
- segment_ids (Tensor): Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
5979
- num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments.
6460
+ input_x (Tensor): Input Tensor contains the data to be summed.
6461
+ The shape is :math:`(x_1, x_2, ..., x_R)`.
6462
+ segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
6463
+ Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
6464
+ num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
5980
6465
 
5981
6466
  Returns:
5982
6467
  Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
@@ -6033,10 +6518,11 @@ def topk(input, k, dim=None, largest=True, sorted=True):
6033
6518
  Args:
6034
6519
  input (Tensor): Input to be computed, data type must be float16, float32 or int32.
6035
6520
  k (int): The number of top or bottom elements to be computed along the last dimension, constant input is needed.
6036
- dim (int, optional): The dimension to sort along. Default: None.
6037
- largest (bool, optional): If largest is False then the k smallest elements are returned. Default: True.
6038
- sorted (bool, optional): If True, the obtained elements will be sorted by the values in descending order.
6039
- If False, the obtained elements will not be sorted. Default: True.
6521
+ dim (int, optional): The dimension to sort along. Default: ``None`` .
6522
+ largest (bool, optional): If largest is ``False`` then the k smallest elements are returned.
6523
+ Default: ``True`` .
6524
+ sorted (bool, optional): If ``True`` , the obtained elements will be sorted by the values in descending order.
6525
+ If ``False`` , the obtained elements will not be sorted. Default: ``True`` .
6040
6526
 
6041
6527
  Returns:
6042
6528
  A tuple consisting of `values` and `indexes`.
@@ -6100,53 +6586,19 @@ def topk(input, k, dim=None, largest=True, sorted=True):
6100
6586
 
6101
6587
  def expand(input_x, size):
6102
6588
  r"""
6103
- Returns a new tensor where the dimension of size is expanded to a larger size.
6104
-
6105
- Note:
6106
- - If the `size` for a dimension is -1, it means no change for the size of that dimension.
6107
- - When a Tensor is expanded to a larger number of dimensions, the new ones will be appended at
6108
- the front, and for the new dimensions, the `size` can not be -1.
6109
-
6110
- Args:
6111
- input_x (Tensor): A Tensor to be expanded. The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
6112
- size (Tensor): The expanded shape of `input_x`.
6113
-
6114
- Returns:
6115
- y (Tensor) - Tensor after expansion whose shape is `size`.
6116
-
6117
- Raises:
6118
- TypeError: If `input_x` or `size` is not Tensor.
6119
- TypeError: If the type of `size` is not one of the following dtype: int16, int32, int64.
6120
- ValueError: If the size of `size` is less than the size of `input_x.shape`.
6121
- ValueError: If `size` is not a 1-D tensor.
6122
- ValueError: If the expanded `size` is not equal to the existing shape of `input_x` at a dimension
6123
- that is not 1.
6124
- ValueError: If the expanded `size` < 0 and it is in a leading position, corresponding to
6125
- a non-existing dimension in `input_x`.
6126
- ValueError: If the number of elements of output is more than 1000000.
6127
-
6128
- Supported Platforms:
6129
- ``Ascend`` ``CPU``
6130
-
6131
- Examples:
6132
- >>> input_x = Tensor(np.array([[2], [3], [4]]), mindspore.float32)
6133
- >>> size = Tensor(np.array([3,4]), mindspore.int32)
6134
- >>> y = ops.expand(input_x, size)
6135
- >>> print(y)
6136
- [[2. 2. 2. 2.]
6137
- [3. 3. 3. 3.]
6138
- [4. 4. 4. 4.]]
6589
+ :func:`mindspore.ops.expand` will be deprecated in the future.
6590
+ Please use :func:`mindspore.ops.broadcast_to` instead.
6139
6591
  """
6140
6592
  expand_op = _get_cache_prim(Expand)()
6141
6593
  return expand_op(input_x, size)
6142
6594
 
6143
6595
 
6144
- @constexpr
6596
+ @_primexpr
6145
6597
  def _check_fold_param(param, param_name):
6146
6598
  """Check the parameters of fold op."""
6147
6599
  validator.check_value_type(param_name, param, [int, list, tuple], 'fold')
6148
6600
  param = (param, param) if isinstance(param, int) else param
6149
- validator.check(param_name + " size", len(param), "", 2, validator.EQ, 'fold')
6601
+ validator.check_int(len(param), 2, validator.EQ, param_name, 'fold')
6150
6602
  if param_name == "padding":
6151
6603
  validator.check_non_negative_int_sequence(param, param_name, 'fold')
6152
6604
  else:
@@ -6155,40 +6607,61 @@ def _check_fold_param(param, param_name):
6155
6607
 
6156
6608
 
6157
6609
  def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
6158
- """
6610
+ r"""
6159
6611
  Combines an array of sliding local blocks into a large containing tensor.
6160
6612
 
6613
+ Consider a batched input tensor of shape :math:`(N, C \times \prod(\text{kernel_size}), L)` ,
6614
+ where :math:`N` is the batch dimension, :math:`C \times \prod(\text{kernel_size})` is the
6615
+ total number of values within each block (a block has :math:`\prod(\text{kernel_size})` spatial
6616
+ locations each containing a `C`-channeled vector), and :math:`L` is the total number of such blocks:
6617
+
6618
+ .. math::
6619
+ L = \prod_d \left\lfloor\frac{\text{output_size}[d] + 2 \times \text{padding}[d] %
6620
+ - \text{dilations}[d] \times (\text{kernel_size}[d] - 1) - 1}{\text{strides}[d]} + 1\right\rfloor,
6621
+
6622
+ where :math:`d` is over all spatial dimensions.
6623
+
6624
+ Therefore, `output_size` is the spatial shape of the large containing tensor of the sliding local blocks.
6625
+
6626
+ The `dilation`, `padding` and `stride` arguments specify how the sliding blocks are retrieved.
6627
+
6161
6628
  .. warning::
6162
- - Currently, only 4-D output tensors (batched image-like tensors) are supported.
6629
+ - The input must be a 3-dimensional Tensor with shape
6630
+ :math:`(N, C \times \prod(\text{kernel_size}), L)` .
6631
+ - The output must be a 4-dimensional Tensor with shape
6632
+ :math:`(N, C, output\_size[0], output\_size[1], ...)` .
6163
6633
 
6164
6634
  Args:
6165
- input (Tensor): 4-D Tensor with data type float16 or float32.
6635
+ input (Tensor): 3-D Tensor, supported dtypes: float16, float32, float64, complex64 and complex128.
6166
6636
  output_size (Tensor): 1D tensor with `2` elements of data type int.
6167
6637
  kernel_size (Union[int, tuple[int], list[int]]): The size of the kernel, should be two int
6168
6638
  for height and width. If type is int, it means that height equal with width. Must be specified.
6169
6639
  dilation (Union[int, tuple[int], list[int]], optional): The size of the dilation, should be two int
6170
- for height and width. If type is int, it means that height equal with width. Default: 1.
6640
+ for height and width. If type is int, it means that height equal with width. Default: ``1`` .
6171
6641
  padding (Union[int, tuple[int], list[int]], optional): The size of the padding, should be two int
6172
- for height and width. If type is int, it means that height equal with width. Default: 0.
6642
+ for height and width. If type is int, it means that height equal with width. Default: ``0`` .
6173
6643
  stride (Union[int, tuple[int], list[int]], optional): The size of the stride, should be two int
6174
- for height and width. If type is int, it means that height equal with width. Default: 1.
6644
+ for height and width. If type is int, it means that height equal with width. Default: ``1`` .
6175
6645
 
6176
6646
  Returns:
6177
- A Tensor, with same type as `input` , format of the Tensor is (N, C, H, W).
6647
+ A Tensor, with same type as `input` . And its shape is as described above.
6178
6648
 
6179
6649
  Raises:
6180
6650
  TypeError: If `kernel_size`, `dilation`, `padding`, `stride` data type is not int, tuple or list.
6181
6651
  ValueError: If `kernel_size`, `dilation`, `stride` value is not
6182
6652
  greater than zero or elements number more than `2`.
6183
6653
  ValueError: If `padding` value is less than zero or elements number more than `2`.
6184
- ValueError: If `input.shape[2] != kernel_size[0] * kernel_size[1]`.
6185
- ValueError: If `input.shape[3]` does not match the calculated number of sliding blocks.
6654
+ ValueError: If `input.shape[1] != kernel_size[0] * kernel_size[1]`
6655
+ ValueError: If `input.shape[2]` does not match the calculated number of sliding blocks.
6186
6656
 
6187
6657
  Supported Platforms:
6188
6658
  ``Ascend`` ``GPU`` ``CPU``
6189
6659
 
6190
6660
  Examples:
6191
- >>> x = Tensor(input_data=np.random.rand(16, 16, 4, 25), dtype=mstype.float32)
6661
+ >>> import numpy as np
6662
+ >>> from mindspore import Tensor, ops
6663
+ >>> from mindspore import dtype as mstype
6664
+ >>> x = Tensor(input_data=np.random.rand(16, 64, 25), dtype=mstype.float32)
6192
6665
  >>> output_size = Tensor(input_data=[8, 8], dtype=mstype.int32)
6193
6666
  >>> output = ops.fold(x, output_size, [2, 2], [2, 2], [2, 2], [2, 2])
6194
6667
  >>> print(output.shape)
@@ -6199,10 +6672,14 @@ def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
6199
6672
  padding = _check_fold_param(padding, "padding")
6200
6673
  stride = _check_fold_param(stride, "stride")
6201
6674
  fold_op = _get_cache_prim(Col2Im)(kernel_size, dilation, padding, stride)
6675
+ input_shape = ops.shape(input)
6676
+ k = kernel_size[0] * kernel_size[-1]
6677
+ r_shape = input_shape[:1] + (-1, k) + input_shape[-1:]
6678
+ input = ops.reshape(input, r_shape)
6202
6679
  return fold_op(input, output_size)
6203
6680
 
6204
6681
 
6205
- @constexpr
6682
+ @_primexpr
6206
6683
  def _check_unfold_params(param, param_name, param_size):
6207
6684
  """Check the parameters of unfold op."""
6208
6685
  validator.check_value_type(param_name, param, [int, tuple, list], 'unfold')
@@ -6216,32 +6693,60 @@ def _check_unfold_params(param, param_name, param_size):
6216
6693
 
6217
6694
 
6218
6695
  def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
6219
- """
6220
- Reshapes a tensor of format (N, C, H, W) by extracting sliding local blocks from the input Tensor
6221
- and concatenating them along a new dimension.
6696
+ r"""
6697
+ Extracts sliding local blocks from a batched input tensor.
6698
+
6699
+ Consider a batched input tensor of shape :math:`(N, C, *)`,
6700
+ where :math:`N` is the batch dimension, :math:`C` is the channel dimension,
6701
+ and :math:`*` represent arbitrary spatial dimensions. This operation flattens
6702
+ each sliding `Kernel_size`- sized block within the spatial dimensions
6703
+ of input `x` into a column (i.e., last dimension) of a 3-D output
6704
+ tensor of shape :math:`(N, C \times \prod(\text{kernel_size}), L)`, where
6705
+ :math:`C \times \prod(\text{kernel_size})` is the total number of values
6706
+ within each block (a block has :math:`\prod(\text{kernel_size})` spatial
6707
+ locations each containing a `C`-channeled vector), and :math:`L` is
6708
+ the total number of such blocks:
6709
+
6710
+ .. math::
6711
+ L = \prod_d \left\lfloor\frac{\text{spatial_size}[d] + 2 \times \text{pads}[d] %
6712
+ - \text{dilations}[d] \times (\text{kernel_size}[d] - 1) - 1}{\text{strides}[d]} + 1\right\rfloor,
6713
+
6714
+ where :math:`\text{spatial_size}` is formed by the spatial dimensions
6715
+ of input `x` (:math:`*` above), and :math:`d` is over all spatial
6716
+ dimensions.
6717
+
6718
+ Therefore, indexing `output` at the last dimension (column dimension)
6719
+ gives all values within a certain block.
6720
+
6721
+ The `dilation`, `padding` and `stride` arguments specify
6722
+ how the sliding blocks are retrieved.
6723
+
6724
+ .. warning::
6725
+ - The output is a 3-dimensional Tensor whose shape is
6726
+ :math:`(N, C \times \prod(\text{kernel_size}), L)` .
6222
6727
 
6223
6728
  .. warning::
6224
- - Currently, only 4-D input tensors (batched image-like tensors) are supported.
6729
+ This is an experimental API that is subject to change or deletion.
6225
6730
 
6226
6731
  Args:
6227
- input (Tensor): 4-D Tensor. Support all real number data type.
6732
+ input (Tensor): 4-D Tensor, supported dtypes: float16, float32, float64, complex64 and complex128.
6228
6733
  kernel_size (Union[int, tuple[int], list[int]]): The size of the kernel, should be two int
6229
6734
  for height and width. If type is int, it means that height equal with width. Must be specified.
6230
6735
  dilation (Union[int, tuple[int], list[int]], optional): The dilation of the window, should be two int
6231
- for height and width. If type is int, it means that height equal with width. Default: 1.
6736
+ for height and width. If type is int, it means that height equal with width. Default: ``1`` .
6232
6737
  padding (Union[int, tuple[int], list[int]], optional): The pad of the window, that must be
6233
6738
  a tuple/list of one or two `int` for height and width.
6234
6739
  If one int, pad_height = pad_width.
6235
6740
  If two int, pad_height = padding[0], pad_width = padding[1].
6236
- Default: 0.
6741
+ Default: ``0`` .
6237
6742
  stride (Union[int, tuple[int], list[int]], optional): The stride of the window, should be two int
6238
- for height and width. If type is int, it means that height equal with width. Default: 1.
6743
+ for height and width. If type is int, it means that height equal with width. Default: ``1`` .
6239
6744
 
6240
6745
  Returns:
6241
- A Tensor, with same type as `input`.
6746
+ A Tensor, with same type as `input` . And its shape is as described above.
6242
6747
 
6243
6748
  Raises:
6244
- TypeError: If any data type of `kernel_size`, `stride`, `dilation`, `kernel_size` is not int, tuple or list.
6749
+ TypeError: If any data type of `kernel_size`, `stride`, `dilation`, `padding` is not int, tuple or list.
6245
6750
  ValueError: If `kernel_size`, `dilation`, `stride` value is not
6246
6751
  greater than zero or elements number more than `2`.
6247
6752
  ValueError: If `padding` value is less than zero.
@@ -6250,23 +6755,30 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
6250
6755
  ``Ascend`` ``GPU`` ``CPU``
6251
6756
 
6252
6757
  Examples:
6758
+ >>> import mindspore
6759
+ >>> import numpy as np
6760
+ >>> from mindspore import Tensor, ops
6253
6761
  >>> x = Tensor(np.random.rand(4, 4, 32, 32), mindspore.float64)
6254
6762
  >>> output = ops.unfold(x, kernel_size=3, dilation=1, stride=1)
6255
6763
  >>> print(output.shape)
6256
- (4, 4, 9, 900)
6764
+ (4, 36, 900)
6257
6765
  """
6258
6766
  kernel_size = _check_unfold_params(kernel_size, "kernel_size", [1, 2])
6259
6767
  dilation = _check_unfold_params(dilation, "dilation", [1, 2])
6260
- padding = _check_unfold_params(padding, "padding", [1, 2, 4])
6768
+ padding = _check_unfold_params(padding, "padding", [1, 2])
6261
6769
  stride = _check_unfold_params(stride, "stride", [1, 2])
6262
6770
  unfold_op = _get_cache_prim(Im2Col)(ksizes=kernel_size,
6263
6771
  strides=stride,
6264
6772
  dilations=dilation,
6265
6773
  pads=padding)
6266
- return unfold_op(input)
6774
+ tmp = unfold_op(input)
6775
+ tmp_shape = ops.shape(tmp)
6776
+ out_shape = tmp_shape[:1] + (-1,) + tmp_shape[-1:]
6777
+ out = ops.reshape(tmp, out_shape)
6778
+ return out
6267
6779
 
6268
6780
 
6269
- @constexpr
6781
+ @_primexpr
6270
6782
  def _check_diagonal_axes(dim1, dim2, x_ndim):
6271
6783
  """Check the parameters of unfold op."""
6272
6784
  axes = validator.check_axis_valid((dim1, dim2), x_ndim)
@@ -6286,13 +6798,13 @@ def diagonal(input, offset=0, dim1=0, dim2=1):
6286
6798
  Args:
6287
6799
  input (Tensor): Array from which the diagonals are taken.
6288
6800
  offset (int, optional): Offset of the diagonal from the main diagonal.
6289
- Can be positive or negative. Defaults: 0.
6801
+ Can be positive or negative. Default: ``0`` .
6290
6802
  dim1 (int, optional): Axis to be used as the first axis of the 2-D
6291
6803
  sub-arrays from which the diagonals should be taken. Defaults to
6292
- first axis (0).
6804
+ first axis (0). Default: ``0`` .
6293
6805
  dim2 (int, optional): Axis to be used as the second axis of the 2-D
6294
6806
  sub-arrays from which the diagonals should be taken. Defaults to
6295
- second axis (1).
6807
+ second axis (1). Default: ``1`` .
6296
6808
 
6297
6809
  Returns:
6298
6810
  Tensor, if `input` is 2-D, then `input` 1-D array containing the diagonal. If
@@ -6300,12 +6812,15 @@ def diagonal(input, offset=0, dim1=0, dim2=1):
6300
6812
  and a new axis inserted at the end corresponding to the diagonal.
6301
6813
 
6302
6814
  Raises:
6815
+ TypeError: if `dim1` or `dim2` are not an int.
6303
6816
  ValueError: if the input tensor has less than two dimensions.
6304
6817
 
6305
6818
  Supported Platforms:
6306
6819
  ``Ascend`` ``GPU`` ``CPU``
6307
6820
 
6308
6821
  Examples:
6822
+ >>> from mindspore import Tensor, ops
6823
+ >>> from mindspore import dtype as mstype
6309
6824
  >>> x = Tensor([[0, 1], [2, 3]], mstype.float32)
6310
6825
  >>> output = ops.diagonal(x)
6311
6826
  >>> print(output)
@@ -6314,11 +6829,13 @@ def diagonal(input, offset=0, dim1=0, dim2=1):
6314
6829
  x_ndim = input.ndim
6315
6830
  if x_ndim < 2:
6316
6831
  raise ValueError(f"ops.diagonal requires an array of at least two dimensions")
6832
+ _check_attr_dtype("dim1", dim1, [int], "diagonal")
6833
+ _check_attr_dtype("dim2", dim2, [int], "diagonal")
6317
6834
  dtype = input.dtype
6318
6835
 
6319
6836
  axes = _check_diagonal_axes(dim1, dim2, x_ndim)
6320
6837
  perm = ()
6321
- for i in np.arange(x_ndim):
6838
+ for i in ms_arrange(x_ndim):
6322
6839
  if i not in axes:
6323
6840
  perm += (i,)
6324
6841
  perm += axes
@@ -6327,39 +6844,106 @@ def diagonal(input, offset=0, dim1=0, dim2=1):
6327
6844
  x_shape = input.shape
6328
6845
  n, m = x_shape[-2:]
6329
6846
 
6330
- fill_op = _get_cache_prim(P.Fill)()
6331
- e = _get_cache_prim(P.Eye)()(n, m, dtype)
6847
+ e = ops.eye(n, m, dtype)
6332
6848
  if offset >= m or offset <= -n:
6333
- e = fill_op(dtype, (n, m), 0)
6334
- elif offset != 0:
6849
+ zero_shape = x_shape[:-2] + (0,)
6850
+ return ops.zeros(zero_shape, dtype)
6851
+ if offset != 0:
6335
6852
  e = e.astype(mstype.float32)
6336
6853
  if offset > 0:
6337
- e_left = fill_op(mstype.float32, (n, offset), 0)
6854
+ e_left = ops.fill(mstype.float32, (n, offset), 0)
6338
6855
  e_right = e[..., 0:m - offset:1]
6339
- e = _get_cache_prim(P.Concat)(1)((e_left, e_right)).astype(dtype)
6856
+ e = ops.cat((e_left, e_right), 1).astype(dtype)
6340
6857
  elif offset < 0:
6341
- e_upper = fill_op(mstype.float32, (-offset, m), 0)
6858
+ e_upper = ops.fill(mstype.float32, (-offset, m), 0)
6342
6859
  e_lower = e[0:n + offset:1, ...]
6343
- e = _get_cache_prim(P.Concat)(0)((e_upper, e_lower)).astype(dtype)
6344
- e = F.broadcast_to(e, x_shape)
6860
+ e = ops.cat((e_upper, e_lower), 0).astype(dtype)
6861
+ e = ops.broadcast_to(e, x_shape)
6345
6862
 
6346
- prod_val = _get_cache_prim(P.Mul)()(input, e)
6347
- res = _get_cache_prim(P.ReduceSum)()(prod_val.astype(mstype.float32), -1)
6863
+ prod_val = ops.mul(input, e)
6864
+ res = ops.ReduceSum()(prod_val.astype(mstype.float32), -1)
6348
6865
 
6349
6866
  begin = ()
6350
- for _ in np.arange((x_ndim - 2)):
6867
+ for _ in ms_arrange(x_ndim - 2):
6351
6868
  begin += (0,)
6352
- last_dim_begin = np.max((0, -offset)).astype(np.int64)
6869
+ last_dim_begin = builtins.max(0, -offset)
6353
6870
  begin += (last_dim_begin,)
6354
6871
  res_size = res.shape[:-1]
6355
- last_dim_end = np.min((x_shape[-2], np.max((0, (x_shape[-1] - offset))))) - last_dim_begin
6872
+ last_dim_end = builtins.min(x_shape[-2], builtins.max(0, x_shape[-1] - offset)) - last_dim_begin
6356
6873
  if last_dim_end <= 0:
6357
6874
  return Tensor([])
6358
6875
  res_size += (last_dim_end,)
6359
- res = _get_cache_prim(P.Slice)()(res, begin, res_size)
6876
+ res = ops.slice(res, begin, res_size)
6360
6877
  return res.astype(dtype)
6361
6878
 
6362
6879
 
6880
+ def _check_is_tensor(param_name, input, cls_name):
6881
+ """Returns True if input is Tensor."""
6882
+ if not isinstance(input, Tensor):
6883
+ raise TypeError(f"For {cls_name}, {param_name} must be a Tensor, but got {type(input)}.")
6884
+
6885
+
6886
+ @_primexpr
6887
+ def _check_diagonal_scatter_shape(diag_shape, src_shape):
6888
+ if diag_shape != src_shape:
6889
+ raise ValueError(f"For diagonal_scatter, the shape of src should equal to the shape of input diagonal,"
6890
+ f"but got src.shape {src_shape} and diagonal shape {diag_shape}.")
6891
+
6892
+
6893
+ def diagonal_scatter(input, src, offset=0, dim1=0, dim2=1):
6894
+ """
6895
+ `dim1` and `dim2` specify the two dimensions of `input`,
6896
+ the elements in these two dimensions will be treated as elements of a matrix,
6897
+ and `src` is embedded on the diagonal of the matrix.
6898
+
6899
+ Args:
6900
+ input (Tensor): Input Tensor, whose dimension is larger than 1.
6901
+ src (Tensor): The source Tensor to embed.
6902
+ offset (int, optional): `offset` controls which diagonal to choose. Default: ``0`` .
6903
+
6904
+ - When `offset` is zero, the diagonal chosen is the main diagonal.
6905
+ - When `offset` is a positive integer, the diagonal chosen is up the main diagonal.
6906
+ - When `offset` is a negative integer, the diagonal chosen is down the main diagonal.
6907
+
6908
+ dim1 (int, optional): Axis to be used as the first axis of the 2-D
6909
+ sub-arrays from which the diagonals should be taken. Default: ``0`` .
6910
+ dim2 (int, optional): Axis to be used as the second axis of the 2-D
6911
+ sub-arrays from which the diagonals should be taken. Default: ``1`` .
6912
+
6913
+ Returns:
6914
+ Tensor after embedding, has the same shape and dtype as `input`.
6915
+
6916
+ Raises:
6917
+ TypeError: If `input` or `src` is not a Tensor.
6918
+ TypeError: If `offset` , `dim1` or `dim2` is not an integer.
6919
+
6920
+ Supported Platforms:
6921
+ ``Ascend`` ``GPU`` ``CPU``
6922
+
6923
+ Examples:
6924
+ >>> import mindspore as ms
6925
+ >>> input = ms.ops.zeros((3,3))
6926
+ >>> src = ms.ops.ones(2)
6927
+ >>> out = ms.ops.diagonal_scatter(input, src, 1, dim1=1, dim2=0)
6928
+ >>> print(out)
6929
+ [[0. 0. 0.]
6930
+ [1. 0. 0.]
6931
+ [0. 1. 0.]]
6932
+ """
6933
+ _check_is_tensor("input", input, "diagonal_scatter")
6934
+ _check_is_tensor("src", src, "diagonal_scatter")
6935
+ _check_is_int(offset, "offset", "diagonal_scatter")
6936
+ _check_is_int(dim1, "dim1", "diagonal_scatter")
6937
+ _check_is_int(dim2, "dim2", "diagonal_scatter")
6938
+ input_diag = input.diagonal(offset, dim1, dim2)
6939
+ _check_diagonal_scatter_shape(input_diag.shape, src.shape)
6940
+ embed = ones_like(src)
6941
+ embed = ops.diag_embed(embed, offset, dim1, dim2)
6942
+ embed = input * embed
6943
+ src = ops.diag_embed(src, offset, dim1, dim2)
6944
+ return input + src - embed
6945
+
6946
+
6363
6947
  def lstsq(input, A):
6364
6948
  r"""
6365
6949
  Computes the solutions of the least squares and minimum norm problems of full-rank
@@ -6405,6 +6989,9 @@ def lstsq(input, A):
6405
6989
  ``CPU``
6406
6990
 
6407
6991
  Examples:
6992
+ >>> import mindspore
6993
+ >>> import numpy as np
6994
+ >>> from mindspore import Tensor, ops
6408
6995
  >>> x = Tensor(np.array([[2,1,5],[3,5,1],[1,1,1]]),mindspore.float32)
6409
6996
  >>> a = Tensor(np.array([[10,5],[15,8],[7,4]]),mindspore.float32)
6410
6997
  >>> output = ops.lstsq(x, a)
@@ -6449,6 +7036,9 @@ def mvlgamma(input, p):
6449
7036
  ``Ascend`` ``GPU`` ``CPU``
6450
7037
 
6451
7038
  Examples:
7039
+ >>> import mindspore
7040
+ >>> import numpy as np
7041
+ >>> from mindspore import Tensor, ops
6452
7042
  >>> x = Tensor(np.array([[3, 4, 5], [4, 2, 6]]), mindspore.float32)
6453
7043
  >>> y = ops.mvlgamma(x, p=3)
6454
7044
  >>> print(y)
@@ -6491,18 +7081,19 @@ def argwhere(input):
6491
7081
 
6492
7082
  def column_stack(tensors):
6493
7083
  """
6494
- Stacks 1-D tensors as columns into a 2-D tensor. 2-D tensors are stacked as-is,
6495
- like ops.hstack.
7084
+ Stacks 1-D tensors as columns into a 2-D tensor. Tensors of other dimension are stacked as-is,
7085
+ like :func:`mindspore.ops.hstack`.
6496
7086
 
6497
7087
  Args:
6498
- tensors (Union[Tensor, tuple, list]): A sequence of 1-D or 2-D tensors. All
7088
+ tensors (Union[tuple[Tensor], list[Tensor]]): A sequence of tensors. All
6499
7089
  of them must have the same shape except the axis to be concatenated.
6500
7090
 
6501
7091
  Returns:
6502
7092
  2-D Tensor, formed by stacking the given tensors.
6503
7093
 
6504
7094
  Raises:
6505
- TypeError: If `tensors` is not Tensor, list or tuple.
7095
+ TypeError: If `tensors` is not list or tuple.
7096
+ TypeError: If element in `tensors` is not Tensor.
6506
7097
  ValueError: If `tensors` is empty.
6507
7098
 
6508
7099
  Supported Platforms:
@@ -6519,11 +7110,13 @@ def column_stack(tensors):
6519
7110
  [1 2]]
6520
7111
  """
6521
7112
  if not isinstance(tensors, (list, tuple)):
6522
- raise TypeError(f"For column_stack, the input must be list or tuple or tensor, but got {type(tensors)}.")
7113
+ raise TypeError(f"For column_stack, the input must be list or tuple of tensors, but got {type(tensors)}.")
6523
7114
 
6524
7115
  trans_x = ()
6525
7116
  _expand_dims = _get_cache_prim(P.ExpandDims)()
6526
7117
  for tensor in tensors:
7118
+ if not isinstance(tensor, Tensor):
7119
+ raise TypeError(f"For column_stack, the input element must be tensor, but got {type(tensor)}.")
6527
7120
  if tensor.ndim < 1:
6528
7121
  tensor = _expand_dims(tensor, 0)
6529
7122
  if tensor.ndim == 1:
@@ -6531,7 +7124,7 @@ def column_stack(tensors):
6531
7124
  trans_x += (tensor,)
6532
7125
  if not trans_x:
6533
7126
  raise ValueError(f"For column_stack, the input must have at least 1 tensor, but got 0.")
6534
- _concat = _get_cache_prim(P.Concat)(-1)
7127
+ _concat = _get_cache_prim(P.Concat)(1)
6535
7128
  return _concat(trans_x)
6536
7129
 
6537
7130
 
@@ -6542,7 +7135,7 @@ def hstack(tensors):
6542
7135
  where it concatenates along the first axis.
6543
7136
 
6544
7137
  Args:
6545
- tensors (Union[Tensor, tuple, list]): A sequence of 1-D or 2-D tensors. The
7138
+ tensors (Union[tuple[Tensor], list[Tensor]]): A sequence of tensors. The
6546
7139
  tensors must have the same shape along all but the second axis, except
6547
7140
  1-D tensors which can be any length.
6548
7141
 
@@ -6550,7 +7143,8 @@ def hstack(tensors):
6550
7143
  Stacked Tensor, formed by stacking the given tensors.
6551
7144
 
6552
7145
  Raises:
6553
- TypeError: If `tensors` is not Tensor, list or tuple.
7146
+ TypeError: If `tensors` is not list or tuple.
7147
+ TypeError: If element in `tensors` is not Tensor.
6554
7148
  ValueError: If `tensors` is empty.
6555
7149
 
6556
7150
  Supported Platforms:
@@ -6569,6 +7163,8 @@ def hstack(tensors):
6569
7163
 
6570
7164
  tuple_of_tensor = ()
6571
7165
  for tensor in tensors:
7166
+ if not isinstance(tensor, Tensor):
7167
+ raise TypeError(f"For hstack, the input element must be tensor, but got {type(tensor)}.")
6572
7168
  if tensor.ndim < 1:
6573
7169
  tensor = expand_dims_(tensor, 0)
6574
7170
  tuple_of_tensor += (tensor,)
@@ -6588,7 +7184,7 @@ def _check_axis_valid(axis, ndim):
6588
7184
  to the built-in operator (non-negative, int or tuple).
6589
7185
  """
6590
7186
  if axis is None:
6591
- axis = F.make_range(ndim)
7187
+ axis = ops.make_range(ndim)
6592
7188
  return axis
6593
7189
  if isinstance(axis, (tuple, list)):
6594
7190
  axis = tuple(map(lambda x: _check_check_axis_in_range(x, ndim), axis))
@@ -6630,16 +7226,17 @@ def movedim(x, source, destination):
6630
7226
 
6631
7227
  Args:
6632
7228
  x (Tensor): The tensor array whose axis should be reordered.
7229
+ The dimension of `x` must not be 0.
6633
7230
  source (Union[int, sequence[int]]): Original positions of the
6634
- axis to move. These must be unique.
7231
+ axis to move. The length of `source` and `destination` must be the same.
6635
7232
  destination (Union[int, sequence[int]]): Destination positions
6636
- for each of the original axis. These must also be unique.
7233
+ for each of the original axis. The length of `source` and `destination` must be the same.
6637
7234
 
6638
7235
  Returns:
6639
7236
  Tensor, array with moved axis.
6640
7237
 
6641
7238
  Raises:
6642
- ValueError: If axis are out of the range of `[-a.ndim, a.ndim)`, or
7239
+ ValueError: If axis are out of the range of `[-x.ndim, x.ndim)`, or
6643
7240
  if the axis contain duplicates.
6644
7241
 
6645
7242
  Supported Platforms:
@@ -6661,7 +7258,7 @@ def movedim(x, source, destination):
6661
7258
  >>> print(output.shape)
6662
7259
  (4, 3, 5)
6663
7260
  """
6664
- ndim = F.rank(x)
7261
+ ndim = ops.rank(x)
6665
7262
  source = _check_axis_valid(source, ndim)
6666
7263
  destination = _check_axis_valid(destination, ndim)
6667
7264
  if len(source) != len(destination):
@@ -6693,7 +7290,7 @@ def moveaxis(x, source, destination):
6693
7290
  return movedim(x, source, destination)
6694
7291
 
6695
7292
 
6696
- @constexpr
7293
+ @_primexpr
6697
7294
  def _check_swapaxes_axis(axes, ndim):
6698
7295
  return validator.check_swapaxes_axis(axes, ndim)
6699
7296
 
@@ -6725,7 +7322,7 @@ def swapaxes(input, axis0, axis1):
6725
7322
  >>> input = Tensor(np.ones((2,3,4), dtype=np.float32))
6726
7323
  >>> output = ops.swapaxes(input, 0, 2)
6727
7324
  >>> print(output.shape)
6728
- (4,3,2)
7325
+ (4, 3, 2)
6729
7326
  '''
6730
7327
  if not isinstance(input, Tensor):
6731
7328
  raise TypeError(f'For ops.swapaxes, parameter `input` must be Tensor, but got {type(input)}')
@@ -6736,7 +7333,7 @@ def swapaxes(input, axis0, axis1):
6736
7333
  if axis0 > axis1:
6737
7334
  axis0, axis1 = axis1, axis0
6738
7335
 
6739
- perm = F.make_range(0, input.ndim)
7336
+ perm = ops.make_range(0, input.ndim)
6740
7337
  if axis1 + 1 < input.ndim:
6741
7338
  new_perm = perm[0:axis0] + perm[axis1:axis1 + 1] + \
6742
7339
  perm[axis0 + 1:axis1] + perm[axis0:axis0 + 1] + perm[axis1 + 1:]
@@ -6775,9 +7372,9 @@ def swapdims(input, dim0, dim1):
6775
7372
  >>> input = Tensor(np.ones((2,3,4), dtype=np.float32))
6776
7373
  >>> output = ops.swapdims(input, 0, 2)
6777
7374
  >>> print(output.shape)
6778
- (4,3,2)
7375
+ (4, 3, 2)
6779
7376
  '''
6780
- return F.swapaxes(input, dim0, dim1)
7377
+ return ops.swapaxes(input, dim0, dim1)
6781
7378
 
6782
7379
 
6783
7380
  @constexpr
@@ -6786,9 +7383,9 @@ def _check_is_int(arg_value, arg_name, op_name):
6786
7383
  return arg_value
6787
7384
 
6788
7385
 
6789
- @constexpr
7386
+ @_primexpr
6790
7387
  def _check_positive_int(arg_value, arg_name, op_name):
6791
- arg_value = validator.check_positive_int(arg_value, arg_name, op_name)
7388
+ arg_value = validator.check_int_range(arg_value, 0, 2147483647, validator.INC_RIGHT, arg_name, op_name)
6792
7389
  return arg_value
6793
7390
 
6794
7391
 
@@ -6805,7 +7402,7 @@ def _cal_repeat_dims(x_rank, rep, expand_axis):
6805
7402
  return tuple(rep_dims)
6806
7403
 
6807
7404
 
6808
- @constexpr
7405
+ @_primexpr
6809
7406
  def _cal_reshape(x_shape, rep, axis):
6810
7407
  x_reshape = list(x_shape)
6811
7408
  x_reshape[axis] *= rep
@@ -6819,9 +7416,9 @@ def repeat_interleave(input, repeats, axis=None):
6819
7416
  Args:
6820
7417
  input (Tensor): The tensor to repeat values for. Must be of type: float16,
6821
7418
  float32, int8, uint8, int16, int32, or int64.
6822
- repeats (int): The number of times to repeat, must be positive.
6823
- axis (int, optional): The axis along which to repeat, default: None. if dims is None, the input Tensor will be
6824
- flattened and the output will alse be flattened.
7419
+ repeats (Union[int, tuple, list, Tensor]): The number of times to repeat, must be positive.
7420
+ axis (int, optional): The axis along which to repeat, Default: ``None``. if dims is None,
7421
+ the input Tensor will be flattened and the output will alse be flattened.
6825
7422
 
6826
7423
  Returns:
6827
7424
  One tensor with values repeated along the specified axis. If input has shape
@@ -6832,6 +7429,9 @@ def repeat_interleave(input, repeats, axis=None):
6832
7429
  ``Ascend`` ``GPU`` ``CPU``
6833
7430
 
6834
7431
  Examples:
7432
+ >>> import mindspore
7433
+ >>> import numpy as np
7434
+ >>> from mindspore import Tensor, ops
6835
7435
  >>> input = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
6836
7436
  >>> output = ops.repeat_interleave(input, repeats=2, axis=0)
6837
7437
  >>> print(output)
@@ -6843,7 +7443,10 @@ def repeat_interleave(input, repeats, axis=None):
6843
7443
  if axis is None:
6844
7444
  input = input.reshape(-1)
6845
7445
  axis = 0
6846
- return repeat_elements(input, repeats, axis)
7446
+ if isinstance(repeats, Tensor):
7447
+ repeats = TensorToList()(repeats)
7448
+ output = input.repeat(repeats, axis)
7449
+ return output
6847
7450
 
6848
7451
 
6849
7452
  def repeat_elements(x, rep, axis=0):
@@ -6854,7 +7457,7 @@ def repeat_elements(x, rep, axis=0):
6854
7457
  x (Tensor): The tensor to repeat values for. Must be of type: float16,
6855
7458
  float32, int8, uint8, int16, int32, or int64.
6856
7459
  rep (int): The number of times to repeat, must be positive.
6857
- axis (int): The axis along which to repeat, default 0.
7460
+ axis (int): The axis along which to repeat. Default: 0.
6858
7461
 
6859
7462
  Returns:
6860
7463
  One tensor with values repeated along the specified axis. If x has shape
@@ -6865,6 +7468,9 @@ def repeat_elements(x, rep, axis=0):
6865
7468
  ``Ascend`` ``GPU`` ``CPU``
6866
7469
 
6867
7470
  Examples:
7471
+ >>> import mindspore
7472
+ >>> import numpy as np
7473
+ >>> from mindspore import Tensor, ops
6868
7474
  >>> # case 1 : repeat on axis 0
6869
7475
  >>> x = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
6870
7476
  >>> output = ops.repeat_elements(x, rep = 2, axis = 0)
@@ -6880,7 +7486,7 @@ def repeat_elements(x, rep, axis=0):
6880
7486
  [[0 0 1 1 2 2]
6881
7487
  [3 3 4 4 5 5]]
6882
7488
  """
6883
- const_utils.check_type_valid(F.dtype(x), mstype.number_type, 'input x')
7489
+ const_utils.check_type_valid(ops.dtype(x), mstype.number_type, 'input x')
6884
7490
  rep = _check_positive_int(rep, "rep", "repeat_elements")
6885
7491
  axis = _check_is_int(axis, "axis", "repeat_elements")
6886
7492
  shape_op = P.Shape()
@@ -6922,7 +7528,7 @@ def sequence_mask(lengths, maxlen=None):
6922
7528
  lengths (Tensor): Tensor to calculate the mask for. All values in this tensor should be
6923
7529
  less than or equal to `maxlen`. Values greater than `maxlen` will be treated as `maxlen`.
6924
7530
  maxlen (int): size of the last dimension of returned tensor. Must be positive and same
6925
- type as elements in `lengths`. Default is None.
7531
+ type as elements in `lengths`. Default is ``None`` .
6926
7532
 
6927
7533
  Returns:
6928
7534
  One mask tensor of shape `lengths.shape + (maxlen,)` .
@@ -6936,6 +7542,8 @@ def sequence_mask(lengths, maxlen=None):
6936
7542
  ``GPU`` ``CPU``
6937
7543
 
6938
7544
  Examples:
7545
+ >>> import numpy as np
7546
+ >>> from mindspore import Tensor, ops
6939
7547
  >>> # case 1: When maxlen is assigned
6940
7548
  >>> x = Tensor(np.array([1, 2, 3, 4]))
6941
7549
  >>> output = ops.sequence_mask(x, 5)
@@ -6970,7 +7578,7 @@ def sequence_mask(lengths, maxlen=None):
6970
7578
  to_tensor_op = P.ScalarToTensor()
6971
7579
  shape_op = P.Shape()
6972
7580
 
6973
- const_utils.check_type_valid(F.dtype(lengths), [mstype.int64, mstype.int32], 'lengths')
7581
+ const_utils.check_type_valid(ops.dtype(lengths), [mstype.int64, mstype.int32], 'lengths')
6974
7582
  _check_sequence_mask_input_len(shape_op(lengths), "sequence_mask")
6975
7583
 
6976
7584
  if maxlen is None:
@@ -6996,6 +7604,35 @@ def top_k(input_x, k, sorted=True):
6996
7604
  return top_k_(input_x, k)
6997
7605
 
6998
7606
 
7607
+ def deepcopy(input_x):
7608
+ """
7609
+ Returns a deepcopy of input tensor.
7610
+
7611
+ Args:
7612
+ input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
7613
+
7614
+ Returns:
7615
+ Tensor, a deepcopy of `input_x`.
7616
+
7617
+ Raises:
7618
+ TypeError: If `input_x` is not a Tensor.
7619
+
7620
+ Supported Platforms:
7621
+ ``Ascend`` ``GPU`` ``CPU``
7622
+
7623
+ Examples:
7624
+ >>> import mindspore
7625
+ >>> from mindspore import Tensor, ops
7626
+ >>> input = Tensor([[0, 1], [2, 1]], dtype=mindspore.int32)
7627
+ >>> output = ops.deepcopy(input)
7628
+ >>> print(output)
7629
+ [[0 1]
7630
+ [2 1]]
7631
+ """
7632
+ _deepcopy = _get_cache_prim(P.Identity)()
7633
+ return _deepcopy(input_x)
7634
+
7635
+
6999
7636
  __all__ = [
7000
7637
  'unique',
7001
7638
  'unique_with_pad',
@@ -7004,7 +7641,6 @@ __all__ = [
7004
7641
  'matrix_band_part',
7005
7642
  'padding',
7006
7643
  'fill',
7007
- 'fill_',
7008
7644
  'fills',
7009
7645
  'tile',
7010
7646
  'size',
@@ -7031,6 +7667,8 @@ __all__ = [
7031
7667
  'tensor_slice',
7032
7668
  'strided_slice',
7033
7669
  'slice',
7670
+ 'slice_scatter',
7671
+ 'select_scatter',
7034
7672
  'cat',
7035
7673
  'concat',
7036
7674
  'stack',
@@ -7085,6 +7723,7 @@ __all__ = [
7085
7723
  'tril',
7086
7724
  'triu',
7087
7725
  'nonzero',
7726
+ 'is_nonzero',
7088
7727
  'matrix_diag',
7089
7728
  'matrix_diag_part',
7090
7729
  'matrix_set_diag',
@@ -7112,6 +7751,7 @@ __all__ = [
7112
7751
  'fold',
7113
7752
  'unfold',
7114
7753
  'diagonal',
7754
+ 'diagonal_scatter',
7115
7755
  'lstsq',
7116
7756
  'mvlgamma',
7117
7757
  'swapaxes',
@@ -7128,6 +7768,7 @@ __all__ = [
7128
7768
  'moveaxis',
7129
7769
  'aminmax',
7130
7770
  'sort',
7131
- 'top_k'
7771
+ 'top_k',
7772
+ 'deepcopy'
7132
7773
  ]
7133
7774
  __all__.sort()