mindspore 2.0.0rc1__cp38-none-any.whl → 2.2.0__cp38-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (870) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Third_Party_Open_Source_Software_Notice +2 -2
  3. mindspore/__init__.py +5 -2
  4. mindspore/_akg/akg/build_module.py +5 -6
  5. mindspore/_akg/akg/composite/build_module.py +49 -16
  6. mindspore/_akg/akg/composite/split_stitch.py +10 -11
  7. mindspore/_akg/akg/config/repository.json +195 -0
  8. mindspore/_akg/akg/global_configs.py +5 -1
  9. mindspore/_akg/akg/ms/info_version_adapt.py +67 -1
  10. mindspore/_akg/akg/tvm/api.py +4 -3
  11. mindspore/_akg/akg/tvm/autotvm/__init__.py +1 -2
  12. mindspore/_akg/akg/tvm/autotvm/graph_tuner/base_graph_tuner.py +1 -5
  13. mindspore/_akg/akg/tvm/autotvm/measure/__init__.py +1 -1
  14. mindspore/_akg/akg/tvm/autotvm/measure/measure.py +1 -10
  15. mindspore/_akg/akg/tvm/autotvm/measure/measure_methods.py +1 -372
  16. mindspore/_akg/akg/tvm/build_module.py +16 -1
  17. mindspore/_akg/akg/tvm/contrib/graph_runtime.py +0 -53
  18. mindspore/_akg/akg/tvm/hybrid/parser.py +7 -6
  19. mindspore/_akg/akg/tvm/ir_builder.py +1 -1
  20. mindspore/_akg/akg/tvm/module.py +1 -2
  21. mindspore/_akg/akg/tvm/stmt.py +2 -2
  22. mindspore/_akg/akg/utils/composite_op_helper.py +9 -10
  23. mindspore/_akg/akg/utils/kernel_exec.py +58 -260
  24. mindspore/_akg/akg/utils/op_dsl.py +17 -1
  25. mindspore/_akg/akg/utils/result_analysis.py +4 -24
  26. mindspore/_akg/akg/utils/tbe_codegen_utils.py +198 -0
  27. mindspore/_c_dataengine.cpython-38-aarch64-linux-gnu.so +0 -0
  28. mindspore/_c_expression.cpython-38-aarch64-linux-gnu.so +0 -0
  29. mindspore/_c_mindrecord.cpython-38-aarch64-linux-gnu.so +0 -0
  30. mindspore/_check_jit_forbidden_api.py +5 -1
  31. mindspore/_checkparam.py +79 -62
  32. mindspore/_extends/graph_kernel/__init__.py +0 -1
  33. mindspore/_extends/graph_kernel/model/graph_split.py +2 -0
  34. mindspore/_extends/graph_kernel/model/model_builder.py +9 -50
  35. mindspore/_extends/graph_kernel/splitter.py +1 -9
  36. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +128 -21
  37. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +2 -2
  38. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +4 -2
  39. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +18 -13
  40. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +13 -9
  41. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +1 -1
  42. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -1
  43. mindspore/_extends/parse/__init__.py +19 -17
  44. mindspore/_extends/parse/namespace.py +7 -36
  45. mindspore/_extends/parse/parser.py +375 -189
  46. mindspore/_extends/parse/resources.py +36 -41
  47. mindspore/_extends/parse/standard_method.py +350 -245
  48. mindspore/_extends/parse/trope.py +2 -12
  49. mindspore/_extends/remote/kernel_build_server.py +24 -7
  50. mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
  51. mindspore/_install_custom.py +43 -0
  52. mindspore/_mindspore_offline_debug.cpython-38-aarch64-linux-gnu.so +0 -0
  53. mindspore/amp.py +85 -19
  54. mindspore/bin/cache_admin +0 -0
  55. mindspore/bin/cache_server +0 -0
  56. mindspore/boost/base.py +2 -2
  57. mindspore/boost/boost.py +27 -32
  58. mindspore/boost/boost_cell_wrapper.py +37 -13
  59. mindspore/boost/grad_accumulation.py +1 -1
  60. mindspore/boost/grad_freeze.py +34 -6
  61. mindspore/boost/group_loss_scale_manager.py +15 -14
  62. mindspore/boost/less_batch_normalization.py +28 -3
  63. mindspore/common/__init__.py +15 -11
  64. mindspore/common/_auto_dynamic.py +68 -0
  65. mindspore/common/_jit_fallback_utils.py +111 -0
  66. mindspore/common/_register_for_adapter.py +17 -5
  67. mindspore/common/_register_for_tensor.py +2 -2
  68. mindspore/common/_stub_tensor.py +18 -15
  69. mindspore/common/_utils.py +31 -7
  70. mindspore/common/api.py +269 -101
  71. mindspore/common/auto_dynamic_shape.py +498 -0
  72. mindspore/common/dtype.py +61 -21
  73. mindspore/common/dump.py +9 -7
  74. mindspore/common/initializer.py +106 -76
  75. mindspore/common/jit_config.py +35 -14
  76. mindspore/common/lazy_inline.py +187 -0
  77. mindspore/common/mindir_util.py +101 -0
  78. mindspore/common/mutable.py +10 -13
  79. mindspore/common/parameter.py +246 -55
  80. mindspore/common/seed.py +13 -7
  81. mindspore/common/sparse_tensor.py +29 -33
  82. mindspore/common/tensor.py +907 -251
  83. mindspore/communication/__init__.py +7 -4
  84. mindspore/communication/_comm_helper.py +84 -4
  85. mindspore/communication/management.py +160 -88
  86. mindspore/config/op_info.config +99 -75
  87. mindspore/config/super_bar_config.json +36 -4
  88. mindspore/context.py +526 -219
  89. mindspore/dataset/__init__.py +9 -46
  90. mindspore/dataset/audio/__init__.py +4 -19
  91. mindspore/dataset/audio/transforms.py +545 -233
  92. mindspore/dataset/audio/utils.py +21 -18
  93. mindspore/dataset/callback/ds_callback.py +42 -13
  94. mindspore/dataset/core/config.py +158 -100
  95. mindspore/dataset/core/validator_helpers.py +1 -63
  96. mindspore/dataset/debug/debug_hook.py +45 -13
  97. mindspore/dataset/debug/pre_defined_hook.py +5 -5
  98. mindspore/dataset/engine/__init__.py +0 -5
  99. mindspore/dataset/engine/cache_client.py +38 -15
  100. mindspore/dataset/engine/datasets.py +615 -278
  101. mindspore/dataset/engine/datasets_audio.py +154 -283
  102. mindspore/dataset/engine/datasets_standard_format.py +104 -116
  103. mindspore/dataset/engine/datasets_text.py +443 -326
  104. mindspore/dataset/engine/datasets_user_defined.py +251 -164
  105. mindspore/dataset/engine/datasets_vision.py +839 -1443
  106. mindspore/dataset/engine/iterators.py +11 -4
  107. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +7 -3
  108. mindspore/dataset/engine/obs/util.py +3 -0
  109. mindspore/dataset/engine/offload.py +6 -6
  110. mindspore/dataset/engine/queue.py +15 -14
  111. mindspore/dataset/engine/samplers.py +39 -23
  112. mindspore/dataset/engine/serializer_deserializer.py +22 -6
  113. mindspore/dataset/engine/validators.py +21 -331
  114. mindspore/dataset/text/__init__.py +5 -33
  115. mindspore/dataset/text/transforms.py +334 -165
  116. mindspore/dataset/text/utils.py +215 -145
  117. mindspore/dataset/transforms/__init__.py +1 -1
  118. mindspore/dataset/transforms/c_transforms.py +3 -2
  119. mindspore/dataset/transforms/py_transforms_util.py +40 -12
  120. mindspore/dataset/transforms/transforms.py +174 -71
  121. mindspore/dataset/utils/browse_dataset.py +25 -17
  122. mindspore/dataset/utils/line_reader.py +24 -21
  123. mindspore/dataset/vision/__init__.py +5 -26
  124. mindspore/dataset/vision/c_transforms.py +177 -165
  125. mindspore/dataset/vision/py_transforms.py +114 -119
  126. mindspore/dataset/vision/py_transforms_util.py +54 -51
  127. mindspore/dataset/vision/transforms.py +1127 -381
  128. mindspore/dataset/vision/utils.py +54 -38
  129. mindspore/dataset/vision/validators.py +12 -2
  130. mindspore/experimental/map_parameter.py +38 -4
  131. mindspore/{dataset/datapreprocess → experimental/optim}/__init__.py +14 -4
  132. mindspore/experimental/optim/adam.py +192 -0
  133. mindspore/experimental/optim/adamw.py +181 -0
  134. mindspore/experimental/optim/lr_scheduler.py +1427 -0
  135. mindspore/experimental/optim/optimizer.py +252 -0
  136. mindspore/experimental/optim/sgd.py +147 -0
  137. mindspore/gen_ops.py +273 -0
  138. mindspore/include/OWNERS +1 -2
  139. mindspore/include/api/context.h +21 -1
  140. mindspore/include/api/data_type.h +2 -1
  141. mindspore/include/api/graph.h +0 -15
  142. mindspore/include/api/kernel.h +2 -0
  143. mindspore/include/api/kernel_api.h +37 -12
  144. mindspore/include/api/model.h +29 -42
  145. mindspore/include/api/model_group.h +14 -3
  146. mindspore/include/api/model_parallel_runner.h +18 -2
  147. mindspore/include/api/serialization.h +26 -0
  148. mindspore/include/api/status.h +1 -0
  149. mindspore/include/api/types.h +38 -4
  150. mindspore/include/c_api/ms/abstract.h +67 -0
  151. mindspore/include/c_api/ms/attribute.h +197 -0
  152. mindspore/include/c_api/ms/base/handle_types.h +43 -0
  153. mindspore/include/c_api/ms/base/macros.h +32 -0
  154. mindspore/include/c_api/ms/base/status.h +33 -0
  155. mindspore/include/c_api/ms/base/types.h +282 -0
  156. mindspore/include/c_api/ms/context.h +102 -0
  157. mindspore/include/c_api/ms/graph.h +160 -0
  158. mindspore/include/c_api/ms/node.h +606 -0
  159. mindspore/include/c_api/ms/tensor.h +161 -0
  160. mindspore/include/c_api/ms/value.h +84 -0
  161. mindspore/include/c_api/status_c.h +3 -0
  162. mindspore/include/dataset/constants.h +6 -12
  163. mindspore/include/dataset/execute.h +23 -13
  164. mindspore/include/dataset/text.h +26 -26
  165. mindspore/include/dataset/transforms.h +25 -31
  166. mindspore/include/dataset/vision.h +60 -60
  167. mindspore/include/dataset/vision_ascend.h +5 -6
  168. mindspore/include/dataset/vision_lite.h +17 -17
  169. mindspore/include/mindapi/base/format.h +0 -1
  170. mindspore/include/mindapi/base/type_id.h +2 -1
  171. mindspore/include/mindapi/base/types.h +5 -1
  172. mindspore/lib/libdnnl.so.2 +0 -0
  173. mindspore/lib/libjemalloc.so.2 +0 -0
  174. mindspore/lib/libmindspore.so +0 -0
  175. mindspore/lib/libmindspore_backend.so +0 -0
  176. mindspore/lib/libmindspore_common.so +0 -0
  177. mindspore/lib/libmindspore_core.so +0 -0
  178. mindspore/lib/libmindspore_glog.so.0 +0 -0
  179. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  180. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  181. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  182. mindspore/lib/libmindspore_shared_lib.so +0 -0
  183. mindspore/lib/libmpi_adapter.so +0 -0
  184. mindspore/lib/libnnacl.so +0 -0
  185. mindspore/lib/libopencv_core.so.4.5 +0 -0
  186. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  187. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  188. mindspore/lib/libps_cache.so +0 -0
  189. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
  190. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  191. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +9000 -0
  192. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  193. mindspore/lib/plugin/ascend/libakg.so +0 -0
  194. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  195. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  196. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  197. mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
  198. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  199. mindspore/lib/plugin/cpu/libakg.so +0 -0
  200. mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
  201. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  202. mindspore/log.py +9 -6
  203. mindspore/mindrecord/filereader.py +33 -4
  204. mindspore/mindrecord/filewriter.py +70 -35
  205. mindspore/mindrecord/mindpage.py +40 -34
  206. mindspore/mindrecord/shardreader.py +1 -1
  207. mindspore/mindrecord/shardsegment.py +1 -1
  208. mindspore/mindrecord/tools/cifar100_to_mr.py +25 -18
  209. mindspore/mindrecord/tools/cifar10_to_mr.py +25 -18
  210. mindspore/mindrecord/tools/csv_to_mr.py +29 -13
  211. mindspore/mindrecord/tools/imagenet_to_mr.py +24 -10
  212. mindspore/mindrecord/tools/mnist_to_mr.py +24 -11
  213. mindspore/mindrecord/tools/tfrecord_to_mr.py +31 -26
  214. mindspore/nn/cell.py +463 -169
  215. mindspore/nn/dynamic_lr.py +47 -43
  216. mindspore/nn/layer/activation.py +225 -82
  217. mindspore/nn/layer/basic.py +121 -79
  218. mindspore/nn/layer/channel_shuffle.py +21 -21
  219. mindspore/nn/layer/combined.py +33 -26
  220. mindspore/nn/layer/container.py +277 -22
  221. mindspore/nn/layer/conv.py +441 -304
  222. mindspore/nn/layer/dense.py +19 -13
  223. mindspore/nn/layer/embedding.py +62 -49
  224. mindspore/nn/layer/flash_attention.py +264 -0
  225. mindspore/nn/layer/image.py +50 -39
  226. mindspore/nn/layer/math.py +62 -51
  227. mindspore/nn/layer/normalization.py +219 -167
  228. mindspore/nn/layer/padding.py +58 -70
  229. mindspore/nn/layer/pooling.py +334 -287
  230. mindspore/nn/layer/rnn_cells.py +53 -38
  231. mindspore/nn/layer/rnns.py +59 -56
  232. mindspore/nn/layer/thor_layer.py +52 -44
  233. mindspore/nn/layer/timedistributed.py +6 -4
  234. mindspore/nn/layer/transformer.py +284 -164
  235. mindspore/nn/learning_rate_schedule.py +34 -25
  236. mindspore/nn/loss/__init__.py +3 -2
  237. mindspore/nn/loss/loss.py +554 -311
  238. mindspore/nn/optim/ada_grad.py +12 -9
  239. mindspore/nn/optim/adadelta.py +14 -11
  240. mindspore/nn/optim/adafactor.py +19 -16
  241. mindspore/nn/optim/adam.py +62 -47
  242. mindspore/nn/optim/adamax.py +13 -10
  243. mindspore/nn/optim/adasum.py +12 -8
  244. mindspore/nn/optim/asgd.py +10 -9
  245. mindspore/nn/optim/ftrl.py +20 -17
  246. mindspore/nn/optim/lamb.py +16 -12
  247. mindspore/nn/optim/lars.py +8 -6
  248. mindspore/nn/optim/lazyadam.py +25 -20
  249. mindspore/nn/optim/momentum.py +10 -7
  250. mindspore/nn/optim/optimizer.py +61 -9
  251. mindspore/nn/optim/proximal_ada_grad.py +14 -13
  252. mindspore/nn/optim/rmsprop.py +17 -13
  253. mindspore/nn/optim/rprop.py +30 -17
  254. mindspore/nn/optim/sgd.py +40 -23
  255. mindspore/nn/optim/thor.py +24 -26
  256. mindspore/nn/probability/bijector/bijector.py +11 -11
  257. mindspore/nn/probability/bijector/exp.py +1 -1
  258. mindspore/nn/probability/bijector/gumbel_cdf.py +3 -3
  259. mindspore/nn/probability/bijector/invert.py +1 -1
  260. mindspore/nn/probability/bijector/power_transform.py +29 -29
  261. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  262. mindspore/nn/probability/bijector/softplus.py +5 -5
  263. mindspore/nn/probability/bnn_layers/bnn_cell_wrapper.py +4 -2
  264. mindspore/nn/probability/bnn_layers/conv_variational.py +13 -13
  265. mindspore/nn/probability/bnn_layers/dense_variational.py +12 -12
  266. mindspore/nn/probability/bnn_layers/layer_distribution.py +9 -8
  267. mindspore/nn/probability/distribution/_utils/custom_ops.py +19 -3
  268. mindspore/nn/probability/distribution/_utils/utils.py +1 -1
  269. mindspore/nn/probability/distribution/bernoulli.py +9 -9
  270. mindspore/nn/probability/distribution/beta.py +8 -8
  271. mindspore/nn/probability/distribution/categorical.py +23 -15
  272. mindspore/nn/probability/distribution/cauchy.py +5 -6
  273. mindspore/nn/probability/distribution/distribution.py +3 -3
  274. mindspore/nn/probability/distribution/exponential.py +4 -4
  275. mindspore/nn/probability/distribution/gamma.py +10 -10
  276. mindspore/nn/probability/distribution/geometric.py +8 -8
  277. mindspore/nn/probability/distribution/gumbel.py +8 -9
  278. mindspore/nn/probability/distribution/half_normal.py +5 -5
  279. mindspore/nn/probability/distribution/laplace.py +5 -5
  280. mindspore/nn/probability/distribution/log_normal.py +12 -11
  281. mindspore/nn/probability/distribution/logistic.py +8 -8
  282. mindspore/nn/probability/distribution/normal.py +6 -5
  283. mindspore/nn/probability/distribution/poisson.py +10 -11
  284. mindspore/nn/probability/distribution/student_t.py +8 -9
  285. mindspore/nn/probability/distribution/transformed_distribution.py +5 -5
  286. mindspore/nn/probability/distribution/uniform.py +11 -11
  287. mindspore/nn/reinforcement/tensor_array.py +2 -2
  288. mindspore/nn/sparse/sparse.py +9 -9
  289. mindspore/nn/wrap/cell_wrapper.py +188 -63
  290. mindspore/nn/wrap/grad_reducer.py +21 -12
  291. mindspore/nn/wrap/loss_scale.py +136 -49
  292. mindspore/numpy/__init__.py +4 -4
  293. mindspore/numpy/array_creations.py +55 -56
  294. mindspore/numpy/array_ops.py +134 -35
  295. mindspore/numpy/logic_ops.py +66 -20
  296. mindspore/numpy/math_ops.py +142 -139
  297. mindspore/numpy/utils_const.py +2 -2
  298. mindspore/offline_debug/convert_async.py +2 -2
  299. mindspore/ops/_grad_experimental/__init__.py +7 -5
  300. mindspore/ops/_grad_experimental/grad_array_ops.py +231 -348
  301. mindspore/ops/{_grad → _grad_experimental}/grad_base.py +1 -33
  302. mindspore/ops/{_grad → _grad_experimental}/grad_comm_ops.py +25 -13
  303. mindspore/ops/{_grad/__init__.py → _grad_experimental/grad_debug_ops.py} +15 -7
  304. mindspore/ops/{_grad → _grad_experimental}/grad_implementations.py +17 -11
  305. mindspore/ops/_grad_experimental/grad_inner_ops.py +33 -52
  306. mindspore/ops/_grad_experimental/grad_math_ops.py +151 -1224
  307. mindspore/ops/_grad_experimental/grad_nn_ops.py +141 -414
  308. mindspore/ops/{_grad → _grad_experimental}/grad_quant_ops.py +10 -6
  309. mindspore/ops/_grad_experimental/grad_sparse.py +317 -2
  310. mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -13
  311. mindspore/ops/{_grad → _grad_experimental}/taylor_rule.py +1 -1
  312. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -1
  313. mindspore/ops/_op_impl/_custom_op/flash_attention/__init__.py +0 -0
  314. mindspore/ops/_op_impl/_custom_op/flash_attention/attention.py +406 -0
  315. mindspore/{_extends/graph_kernel/expanders/complex/__init__.py → ops/_op_impl/_custom_op/flash_attention/constants.py} +27 -8
  316. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_bwd.py +467 -0
  317. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_fwd.py +563 -0
  318. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_impl.py +193 -0
  319. mindspore/ops/_op_impl/_custom_op/flash_attention/tik_ops_utils.py +435 -0
  320. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/__init__.py +0 -0
  321. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/sparse_tiling.py +45 -0
  322. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/strategy.py +67 -0
  323. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/wukong_tiling.py +62 -0
  324. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
  325. mindspore/ops/_op_impl/aicpu/__init__.py +41 -1
  326. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d.py +37 -0
  327. mindspore/ops/_op_impl/aicpu/bias_add_grad.py +0 -1
  328. mindspore/ops/_op_impl/aicpu/cast.py +52 -0
  329. mindspore/ops/_op_impl/aicpu/coalesce.py +2 -0
  330. mindspore/ops/_op_impl/aicpu/col2im.py +3 -1
  331. mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
  332. mindspore/ops/_op_impl/aicpu/dropout_genmask.py +6 -0
  333. mindspore/ops/_op_impl/aicpu/eps.py +32 -0
  334. mindspore/ops/_op_impl/aicpu/eye.py +4 -4
  335. mindspore/ops/_op_impl/aicpu/fft_with_size.py +6 -0
  336. mindspore/ops/_op_impl/aicpu/fill_diagonal.py +5 -0
  337. mindspore/ops/_op_impl/aicpu/gamma.py +2 -2
  338. mindspore/ops/_op_impl/aicpu/im2col.py +3 -5
  339. mindspore/ops/_op_impl/aicpu/lgamma.py +1 -0
  340. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +6 -3
  341. mindspore/ops/_op_impl/aicpu/lu.py +39 -0
  342. mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +0 -1
  343. mindspore/ops/_op_impl/aicpu/masked_scatter.py +1 -0
  344. mindspore/ops/_op_impl/aicpu/masked_select_grad.py +3 -0
  345. mindspore/ops/_op_impl/aicpu/matrix_band_part.py +59 -0
  346. mindspore/ops/_op_impl/aicpu/matrix_power.py +6 -1
  347. mindspore/ops/_op_impl/aicpu/median.py +1 -0
  348. mindspore/ops/_op_impl/aicpu/multinomial.py +9 -9
  349. mindspore/ops/_op_impl/aicpu/not_equal.py +0 -5
  350. mindspore/ops/_op_impl/aicpu/pad_v3.py +3 -1
  351. mindspore/ops/_op_impl/aicpu/pad_v3_grad.py +2 -0
  352. mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +15 -7
  353. mindspore/ops/_op_impl/aicpu/random_categorical.py +39 -19
  354. mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +5 -2
  355. mindspore/ops/_op_impl/aicpu/random_poisson.py +103 -52
  356. mindspore/ops/_op_impl/aicpu/random_shuffle.py +17 -15
  357. mindspore/ops/_op_impl/aicpu/resize_bilinear_grad.py +0 -1
  358. mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2.py +0 -6
  359. mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2_grad.py +0 -7
  360. mindspore/ops/_op_impl/aicpu/scatter_nd.py +2 -0
  361. mindspore/ops/_op_impl/aicpu/sequence_concat.py +40 -0
  362. mindspore/ops/_op_impl/aicpu/sequence_stack.py +40 -0
  363. mindspore/ops/_op_impl/aicpu/{sparseaddmm.py → sparse_addmm.py} +2 -2
  364. mindspore/ops/_op_impl/aicpu/{sparsesparsemaximum.py → sparse_sparse_maximum.py} +4 -4
  365. mindspore/ops/_op_impl/aicpu/standard_laplace.py +5 -4
  366. mindspore/ops/_op_impl/aicpu/standard_normal.py +5 -4
  367. mindspore/ops/_op_impl/aicpu/truncated_normal.py +9 -7
  368. mindspore/ops/_op_impl/aicpu/uniform.py +5 -3
  369. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +8 -4
  370. mindspore/ops/_op_impl/aicpu/uniform_int.py +5 -5
  371. mindspore/ops/_op_impl/aicpu/uniform_real.py +4 -4
  372. mindspore/ops/_op_impl/aicpu/upsample_nearest_3d.py +14 -6
  373. mindspore/ops/_op_impl/aicpu/upsample_nearest_3d_grad.py +22 -8
  374. mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d.py +11 -6
  375. mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d_grad.py +21 -10
  376. mindspore/ops/_op_impl/tbe/__init__.py +6 -4
  377. mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
  378. mindspore/ops/_op_impl/tbe/avg_pool.py +2 -2
  379. mindspore/ops/_op_impl/tbe/avg_pool_3d.py +3 -3
  380. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +4 -4
  381. mindspore/ops/_op_impl/tbe/avg_pool_ds.py +2 -2
  382. mindspore/ops/_op_impl/tbe/avg_pool_grad.py +3 -3
  383. mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +3 -3
  384. mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
  385. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +2 -2
  386. mindspore/ops/_op_impl/tbe/bn_infer.py +2 -2
  387. mindspore/ops/_op_impl/tbe/bn_infer_ds.py +3 -2
  388. mindspore/ops/_op_impl/tbe/broadcast_to.py +1 -1
  389. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +3 -3
  390. mindspore/ops/_op_impl/tbe/expand_dims.py +1 -1
  391. mindspore/ops/_op_impl/tbe/gather_v2.py +56 -0
  392. mindspore/ops/_op_impl/tbe/im2col.py +4 -4
  393. mindspore/ops/_op_impl/tbe/inplace_index_add.py +7 -3
  394. mindspore/ops/_op_impl/tbe/mem_set.py +38 -0
  395. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +3 -0
  396. mindspore/ops/_op_impl/tbe/scatter_nd_d.py +1 -1
  397. mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
  398. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +2 -2
  399. mindspore/ops/_op_impl/tbe/trans_data_ds.py +2 -0
  400. mindspore/ops/_primitive_cache.py +1 -1
  401. mindspore/ops/_tracefunc.py +241 -0
  402. mindspore/ops/_utils/utils.py +10 -2
  403. mindspore/ops/_vmap/vmap_array_ops.py +5 -3
  404. mindspore/ops/_vmap/vmap_base.py +5 -4
  405. mindspore/ops/_vmap/vmap_convolution_ops.py +1 -1
  406. mindspore/ops/_vmap/vmap_grad_math_ops.py +6 -4
  407. mindspore/ops/_vmap/vmap_grad_nn_ops.py +11 -6
  408. mindspore/ops/_vmap/vmap_math_ops.py +5 -2
  409. mindspore/ops/_vmap/vmap_nn_ops.py +135 -11
  410. mindspore/ops/arg_dtype_cast.py +54 -0
  411. mindspore/ops/composite/__init__.py +7 -5
  412. mindspore/ops/composite/base.py +78 -34
  413. mindspore/ops/composite/math_ops.py +5 -695
  414. mindspore/ops/composite/multitype_ops/_compile_utils.py +403 -97
  415. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +28 -22
  416. mindspore/ops/composite/multitype_ops/add_impl.py +69 -7
  417. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  418. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  419. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -0
  420. mindspore/ops/composite/multitype_ops/div_impl.py +1 -0
  421. mindspore/ops/composite/multitype_ops/floordiv_impl.py +1 -0
  422. mindspore/ops/composite/multitype_ops/getitem_impl.py +48 -10
  423. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +2 -0
  424. mindspore/ops/composite/multitype_ops/greater_impl.py +2 -0
  425. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -0
  426. mindspore/ops/composite/multitype_ops/less_equal_impl.py +2 -0
  427. mindspore/ops/composite/multitype_ops/less_impl.py +2 -0
  428. mindspore/ops/composite/multitype_ops/logic_not_impl.py +2 -2
  429. mindspore/ops/composite/multitype_ops/mod_impl.py +1 -0
  430. mindspore/ops/composite/multitype_ops/mul_impl.py +1 -0
  431. mindspore/ops/composite/multitype_ops/negative_impl.py +1 -0
  432. mindspore/ops/composite/multitype_ops/not_in_impl.py +1 -0
  433. mindspore/ops/composite/multitype_ops/ones_like_impl.py +6 -0
  434. mindspore/ops/composite/multitype_ops/pow_impl.py +1 -0
  435. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -0
  436. mindspore/ops/composite/multitype_ops/setitem_impl.py +10 -7
  437. mindspore/ops/composite/multitype_ops/sub_impl.py +1 -0
  438. mindspore/ops/composite/multitype_ops/uadd_impl.py +2 -0
  439. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +9 -0
  440. mindspore/ops/deprecated.py +304 -0
  441. mindspore/ops/function/__init__.py +41 -4
  442. mindspore/ops/function/array_func.py +1108 -467
  443. mindspore/ops/function/clip_func.py +94 -27
  444. mindspore/ops/function/debug_func.py +3 -1
  445. mindspore/ops/function/grad/grad_func.py +82 -73
  446. mindspore/ops/function/image_func.py +28 -12
  447. mindspore/ops/function/linalg_func.py +135 -39
  448. mindspore/ops/function/math_func.py +3779 -894
  449. mindspore/ops/function/nn_func.py +1584 -657
  450. mindspore/ops/function/parameter_func.py +13 -3
  451. mindspore/ops/function/random_func.py +247 -153
  452. mindspore/ops/function/sparse_func.py +14 -11
  453. mindspore/ops/function/sparse_unary_func.py +173 -47
  454. mindspore/ops/function/spectral_func.py +8 -4
  455. mindspore/ops/function/vmap_func.py +8 -7
  456. mindspore/ops/functional.py +47 -16
  457. mindspore/ops/op_info_register.py +346 -86
  458. mindspore/ops/operations/__init__.py +38 -22
  459. mindspore/ops/operations/_grad_ops.py +145 -149
  460. mindspore/ops/operations/_inner_ops.py +298 -56
  461. mindspore/ops/operations/_ms_kernel.py +3 -3
  462. mindspore/ops/operations/_quant_ops.py +24 -28
  463. mindspore/ops/operations/_rl_inner_ops.py +9 -7
  464. mindspore/ops/operations/_scalar_ops.py +115 -0
  465. mindspore/ops/operations/_sequence_ops.py +148 -10
  466. mindspore/ops/operations/_tensor_array.py +1 -1
  467. mindspore/ops/operations/_thor_ops.py +2 -2
  468. mindspore/ops/operations/array_ops.py +1239 -561
  469. mindspore/ops/operations/comm_ops.py +166 -90
  470. mindspore/ops/operations/control_ops.py +3 -3
  471. mindspore/ops/operations/custom_ops.py +124 -102
  472. mindspore/ops/operations/debug_ops.py +24 -11
  473. mindspore/ops/operations/image_ops.py +86 -71
  474. mindspore/ops/operations/inner_ops.py +18 -13
  475. mindspore/ops/operations/linalg_ops.py +30 -11
  476. mindspore/ops/operations/math_ops.py +1730 -435
  477. mindspore/ops/operations/nn_ops.py +1953 -943
  478. mindspore/ops/operations/other_ops.py +65 -43
  479. mindspore/ops/operations/random_ops.py +258 -98
  480. mindspore/ops/operations/rl_ops.py +4 -36
  481. mindspore/ops/operations/sparse_ops.py +38 -33
  482. mindspore/ops/operations/spectral_ops.py +8 -4
  483. mindspore/ops/primitive.py +66 -44
  484. mindspore/ops/signature.py +5 -5
  485. mindspore/parallel/_auto_parallel_context.py +80 -19
  486. mindspore/parallel/_cost_model_context.py +42 -0
  487. mindspore/parallel/_offload_context.py +162 -72
  488. mindspore/parallel/_parallel_serialization.py +2 -2
  489. mindspore/parallel/_ps_context.py +16 -4
  490. mindspore/parallel/_recovery_context.py +2 -1
  491. mindspore/parallel/_tensor.py +15 -13
  492. mindspore/parallel/_transformer/layers.py +8 -6
  493. mindspore/parallel/_transformer/loss.py +1 -0
  494. mindspore/parallel/_transformer/moe.py +7 -7
  495. mindspore/parallel/_transformer/op_parallel_config.py +12 -1
  496. mindspore/parallel/_transformer/transformer.py +34 -14
  497. mindspore/parallel/_utils.py +36 -14
  498. mindspore/parallel/algo_parameter_config.py +114 -20
  499. mindspore/parallel/checkpoint_transform.py +16 -18
  500. mindspore/parallel/shard.py +16 -13
  501. mindspore/profiler/__init__.py +1 -1
  502. mindspore/profiler/common/struct_type.py +3 -3
  503. mindspore/profiler/common/util.py +3 -2
  504. mindspore/profiler/envprofiling.py +11 -4
  505. mindspore/profiler/parser/aicpu_data_parser.py +5 -3
  506. mindspore/profiler/parser/ascend_flops_generator.py +94 -0
  507. mindspore/profiler/parser/ascend_fpbp_generator.py +76 -0
  508. mindspore/profiler/parser/ascend_hccl_generator.py +288 -0
  509. mindspore/profiler/parser/ascend_msprof_exporter.py +213 -0
  510. mindspore/profiler/parser/ascend_msprof_generator.py +199 -0
  511. mindspore/profiler/parser/ascend_op_generator.py +276 -0
  512. mindspore/profiler/parser/ascend_steptrace_generator.py +94 -0
  513. mindspore/profiler/parser/ascend_timeline_generator.py +110 -54
  514. mindspore/profiler/parser/base_timeline_generator.py +11 -7
  515. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +45 -46
  516. mindspore/profiler/parser/flops_parser.py +15 -11
  517. mindspore/profiler/parser/framework_parser.py +92 -73
  518. mindspore/profiler/parser/hccl_parser.py +16 -12
  519. mindspore/profiler/parser/integrator.py +22 -11
  520. mindspore/profiler/parser/memory_usage_parser.py +36 -11
  521. mindspore/profiler/parser/minddata_analyzer.py +12 -14
  522. mindspore/profiler/parser/minddata_pipeline_parser.py +1 -1
  523. mindspore/profiler/parser/msadvisor_parser.py +8 -4
  524. mindspore/profiler/parser/op_intermediate_parser.py +5 -2
  525. mindspore/profiler/parser/optime_parser.py +1 -1
  526. mindspore/profiler/parser/profiler_info.py +4 -5
  527. mindspore/profiler/parser/step_trace_parser.py +11 -14
  528. mindspore/profiler/profiling.py +678 -377
  529. mindspore/rewrite/api/node.py +211 -54
  530. mindspore/rewrite/api/node_type.py +5 -0
  531. mindspore/rewrite/api/pattern_engine.py +22 -23
  532. mindspore/rewrite/api/scoped_value.py +20 -17
  533. mindspore/rewrite/api/symbol_tree.py +252 -106
  534. mindspore/rewrite/api/tree_node_helper.py +3 -0
  535. mindspore/rewrite/ast_helpers/__init__.py +2 -1
  536. mindspore/rewrite/ast_helpers/ast_finder.py +129 -0
  537. mindspore/rewrite/ast_helpers/ast_modifier.py +116 -104
  538. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +97 -46
  539. mindspore/rewrite/common/rewrite_elog.py +5 -1
  540. mindspore/rewrite/namer.py +51 -51
  541. mindspore/rewrite/namespace.py +14 -5
  542. mindspore/{ops/bprop_mindir → rewrite/node}/__init__.py +9 -4
  543. mindspore/rewrite/node/call_function.py +79 -0
  544. mindspore/rewrite/node/cell_container.py +135 -0
  545. mindspore/rewrite/node/control_flow.py +88 -0
  546. mindspore/rewrite/{node.py → node/node.py} +313 -247
  547. mindspore/rewrite/node/node_manager.py +254 -0
  548. mindspore/rewrite/node/node_topological_manager.py +243 -0
  549. mindspore/rewrite/parsers/arguments_parser.py +22 -21
  550. mindspore/rewrite/parsers/assign_parser.py +225 -239
  551. mindspore/rewrite/parsers/attribute_parser.py +9 -7
  552. mindspore/rewrite/parsers/class_def_parser.py +179 -218
  553. mindspore/rewrite/parsers/constant_parser.py +9 -6
  554. mindspore/rewrite/parsers/container_parser.py +9 -7
  555. mindspore/rewrite/parsers/for_parser.py +36 -15
  556. mindspore/rewrite/parsers/function_def_parser.py +23 -20
  557. mindspore/rewrite/parsers/if_parser.py +28 -24
  558. mindspore/rewrite/parsers/module_parser.py +202 -25
  559. mindspore/rewrite/{parser.py → parsers/parser.py} +4 -2
  560. mindspore/rewrite/{parser_register.py → parsers/parser_register.py} +1 -1
  561. mindspore/rewrite/parsers/return_parser.py +6 -6
  562. mindspore/rewrite/sparsify/sparse_transformer.py +12 -3
  563. mindspore/rewrite/sparsify/sparsify.py +4 -1
  564. mindspore/rewrite/sparsify/utils.py +11 -5
  565. mindspore/rewrite/symbol_tree.py +577 -732
  566. mindspore/rewrite/symbol_tree_builder.py +9 -175
  567. mindspore/rewrite/symbol_tree_dumper.py +2 -2
  568. mindspore/run_check/_check_version.py +46 -39
  569. mindspore/run_check/run_check.py +3 -2
  570. mindspore/{scipy/sparse → safeguard}/__init__.py +4 -5
  571. mindspore/safeguard/rewrite_obfuscation.py +517 -0
  572. mindspore/scipy/__init__.py +1 -1
  573. mindspore/scipy/linalg.py +67 -61
  574. mindspore/scipy/ops.py +5 -41
  575. mindspore/scipy/ops_grad.py +3 -2
  576. mindspore/scipy/ops_wrapper.py +5 -5
  577. mindspore/scipy/optimize/line_search.py +8 -8
  578. mindspore/scipy/optimize/linear_sum_assignment.py +4 -4
  579. mindspore/scipy/optimize/minimize.py +16 -12
  580. mindspore/scipy/utils.py +1 -52
  581. mindspore/scipy/utils_const.py +4 -4
  582. mindspore/train/__init__.py +4 -4
  583. mindspore/train/_utils.py +13 -5
  584. mindspore/train/amp.py +410 -148
  585. mindspore/train/anf_ir_pb2.py +16 -4
  586. mindspore/train/callback/_backup_and_restore.py +8 -11
  587. mindspore/train/callback/_callback.py +80 -3
  588. mindspore/train/callback/_checkpoint.py +82 -51
  589. mindspore/train/callback/_early_stop.py +12 -15
  590. mindspore/train/callback/_history.py +1 -1
  591. mindspore/train/callback/_lambda_callback.py +13 -13
  592. mindspore/train/callback/_landscape.py +21 -17
  593. mindspore/train/callback/_loss_monitor.py +9 -10
  594. mindspore/train/callback/_on_request_exit.py +16 -33
  595. mindspore/train/callback/_reduce_lr_on_plateau.py +21 -24
  596. mindspore/train/callback/_summary_collector.py +44 -30
  597. mindspore/train/callback/_time_monitor.py +62 -12
  598. mindspore/train/data_sink.py +10 -16
  599. mindspore/train/dataset_helper.py +154 -86
  600. mindspore/train/loss_scale_manager.py +14 -9
  601. mindspore/train/metrics/__init__.py +10 -2
  602. mindspore/train/metrics/accuracy.py +1 -1
  603. mindspore/train/metrics/auc.py +1 -1
  604. mindspore/train/metrics/bleu_score.py +2 -2
  605. mindspore/train/metrics/confusion_matrix.py +14 -14
  606. mindspore/train/metrics/cosine_similarity.py +3 -3
  607. mindspore/train/metrics/dice.py +1 -1
  608. mindspore/train/metrics/fbeta.py +1 -1
  609. mindspore/train/metrics/hausdorff_distance.py +8 -6
  610. mindspore/train/metrics/mean_surface_distance.py +5 -4
  611. mindspore/train/metrics/metric.py +49 -17
  612. mindspore/train/metrics/occlusion_sensitivity.py +4 -4
  613. mindspore/train/metrics/perplexity.py +1 -1
  614. mindspore/train/metrics/precision.py +2 -2
  615. mindspore/train/metrics/recall.py +2 -3
  616. mindspore/train/metrics/roc.py +7 -7
  617. mindspore/train/metrics/root_mean_square_surface_distance.py +5 -4
  618. mindspore/train/metrics/topk.py +7 -4
  619. mindspore/train/mind_ir_pb2.py +193 -48
  620. mindspore/train/model.py +377 -133
  621. mindspore/train/serialization.py +697 -245
  622. mindspore/train/summary/_summary_adapter.py +5 -2
  623. mindspore/train/summary/_writer_pool.py +4 -3
  624. mindspore/train/summary/summary_record.py +25 -23
  625. mindspore/train/train_thor/convert_utils.py +39 -23
  626. mindspore/train/train_thor/dataset_helper.py +4 -3
  627. mindspore/train/train_thor/model_thor.py +8 -8
  628. mindspore/version.py +1 -1
  629. {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/METADATA +7 -8
  630. {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/RECORD +633 -804
  631. {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/entry_points.txt +0 -1
  632. mindspore/_akg/akg/tvm/contrib/debugger/__init__.py +0 -16
  633. mindspore/_akg/akg/tvm/contrib/debugger/debug_result.py +0 -274
  634. mindspore/_akg/akg/tvm/contrib/debugger/debug_runtime.py +0 -259
  635. mindspore/_akg/akg/tvm/contrib/peak.py +0 -341
  636. mindspore/_akg/akg/tvm/contrib/rpc.py +0 -25
  637. mindspore/_akg/akg/tvm/contrib/xcode.py +0 -257
  638. mindspore/_akg/akg/tvm/exec/__init__.py +0 -17
  639. mindspore/_akg/akg/tvm/exec/autotvm_log_editor.py +0 -60
  640. mindspore/_akg/akg/tvm/exec/measure_peak.py +0 -48
  641. mindspore/_akg/akg/tvm/exec/query_rpc_tracker.py +0 -48
  642. mindspore/_akg/akg/tvm/exec/rpc_proxy.py +0 -98
  643. mindspore/_akg/akg/tvm/exec/rpc_server.py +0 -88
  644. mindspore/_akg/akg/tvm/exec/rpc_tracker.py +0 -62
  645. mindspore/_akg/akg/tvm/rpc/__init__.py +0 -29
  646. mindspore/_akg/akg/tvm/rpc/base.py +0 -182
  647. mindspore/_akg/akg/tvm/rpc/client.py +0 -436
  648. mindspore/_akg/akg/tvm/rpc/proxy.py +0 -595
  649. mindspore/_akg/akg/tvm/rpc/server.py +0 -413
  650. mindspore/_akg/akg/tvm/rpc/tornado_util.py +0 -121
  651. mindspore/_akg/akg/tvm/rpc/tracker.py +0 -431
  652. mindspore/_extends/graph_kernel/expander.py +0 -80
  653. mindspore/_extends/graph_kernel/expanders/__init__.py +0 -57
  654. mindspore/_extends/graph_kernel/expanders/_utils.py +0 -269
  655. mindspore/_extends/graph_kernel/expanders/addn.py +0 -33
  656. mindspore/_extends/graph_kernel/expanders/batchnorm.py +0 -152
  657. mindspore/_extends/graph_kernel/expanders/batchnorm_grad.py +0 -105
  658. mindspore/_extends/graph_kernel/expanders/bias_add_grad.py +0 -49
  659. mindspore/_extends/graph_kernel/expanders/clip_by_norm_no_div_sum.py +0 -33
  660. mindspore/_extends/graph_kernel/expanders/complex/abs.py +0 -30
  661. mindspore/_extends/graph_kernel/expanders/complex/add.py +0 -44
  662. mindspore/_extends/graph_kernel/expanders/complex/div.py +0 -62
  663. mindspore/_extends/graph_kernel/expanders/complex/mul.py +0 -52
  664. mindspore/_extends/graph_kernel/expanders/complex/real_div.py +0 -62
  665. mindspore/_extends/graph_kernel/expanders/complex/sub.py +0 -45
  666. mindspore/_extends/graph_kernel/expanders/conv2d.py +0 -200
  667. mindspore/_extends/graph_kernel/expanders/dropout_grad.py +0 -30
  668. mindspore/_extends/graph_kernel/expanders/equal_count.py +0 -50
  669. mindspore/_extends/graph_kernel/expanders/erfc.py +0 -35
  670. mindspore/_extends/graph_kernel/expanders/expand_dims.py +0 -50
  671. mindspore/_extends/graph_kernel/expanders/fused_adam.py +0 -44
  672. mindspore/_extends/graph_kernel/expanders/fused_adam_weight_decay.py +0 -47
  673. mindspore/_extends/graph_kernel/expanders/fused_mul_add.py +0 -28
  674. mindspore/_extends/graph_kernel/expanders/gather.py +0 -43
  675. mindspore/_extends/graph_kernel/expanders/gelu_grad.py +0 -70
  676. mindspore/_extends/graph_kernel/expanders/gkdropout.py +0 -40
  677. mindspore/_extends/graph_kernel/expanders/identity.py +0 -25
  678. mindspore/_extends/graph_kernel/expanders/layernorm.py +0 -93
  679. mindspore/_extends/graph_kernel/expanders/layernorm_grad.py +0 -113
  680. mindspore/_extends/graph_kernel/expanders/logsoftmax.py +0 -46
  681. mindspore/_extends/graph_kernel/expanders/logsoftmax_grad.py +0 -36
  682. mindspore/_extends/graph_kernel/expanders/matmul.py +0 -80
  683. mindspore/_extends/graph_kernel/expanders/maximum_grad.py +0 -59
  684. mindspore/_extends/graph_kernel/expanders/minimum_grad.py +0 -80
  685. mindspore/_extends/graph_kernel/expanders/oneslike.py +0 -26
  686. mindspore/_extends/graph_kernel/expanders/reduce_mean.py +0 -43
  687. mindspore/_extends/graph_kernel/expanders/relu_grad.py +0 -32
  688. mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits.py +0 -41
  689. mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits_grad.py +0 -35
  690. mindspore/_extends/graph_kernel/expanders/sigmoid_grad.py +0 -31
  691. mindspore/_extends/graph_kernel/expanders/slice.py +0 -35
  692. mindspore/_extends/graph_kernel/expanders/softmax_cross_entropy_with_logits.py +0 -42
  693. mindspore/_extends/graph_kernel/expanders/softmax_grad_ext.py +0 -41
  694. mindspore/_extends/graph_kernel/expanders/softsign.py +0 -28
  695. mindspore/_extends/graph_kernel/expanders/sqrt_grad.py +0 -29
  696. mindspore/_extends/graph_kernel/expanders/square_sum_all.py +0 -44
  697. mindspore/_extends/graph_kernel/expanders/square_sum_v1.py +0 -37
  698. mindspore/_extends/graph_kernel/expanders/squared_difference.py +0 -43
  699. mindspore/_extends/graph_kernel/expanders/tanh_grad.py +0 -31
  700. mindspore/_extends/graph_kernel/expanders/tile.py +0 -54
  701. mindspore/_extends/graph_kernel/model/op_infer.py +0 -506
  702. mindspore/_extends/parse/jit_fallback_modules.py +0 -51
  703. mindspore/dataset/datapreprocess/preprocess_imagenet_validate_dataset.py +0 -54
  704. mindspore/dataset/engine/graphdata.py +0 -1586
  705. mindspore/include/api/net.h +0 -142
  706. mindspore/ops/_grad/grad_array_ops.py +0 -1347
  707. mindspore/ops/_grad/grad_clip_ops.py +0 -84
  708. mindspore/ops/_grad/grad_debug_ops.py +0 -68
  709. mindspore/ops/_grad/grad_inner_ops.py +0 -235
  710. mindspore/ops/_grad/grad_math_ops.py +0 -1684
  711. mindspore/ops/_grad/grad_nn_ops.py +0 -1529
  712. mindspore/ops/_grad/grad_other_ops.py +0 -89
  713. mindspore/ops/_grad/grad_sequence_ops.py +0 -296
  714. mindspore/ops/_grad/grad_sparse.py +0 -323
  715. mindspore/ops/_grad_experimental/grad_image_ops.py +0 -249
  716. mindspore/ops/_grad_experimental/grad_linalg_ops.py +0 -195
  717. mindspore/ops/_grad_experimental/grad_scalar_ops.py +0 -112
  718. mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
  719. mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
  720. mindspore/ops/bprop_mindir/ApproximateEqual_bprop.mindir +0 -19
  721. mindspore/ops/bprop_mindir/Argmax_bprop.mindir +0 -15
  722. mindspore/ops/bprop_mindir/Argmin_bprop.mindir +0 -15
  723. mindspore/ops/bprop_mindir/AssignSub_bprop.mindir +0 -19
  724. mindspore/ops/bprop_mindir/Assign_bprop.mindir +0 -17
  725. mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +0 -150
  726. mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +0 -66
  727. mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
  728. mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +0 -15
  729. mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
  730. mindspore/ops/bprop_mindir/BatchToSpaceND_bprop.mindir +0 -28
  731. mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
  732. mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +0 -33
  733. mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +0 -306
  734. mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +0 -13
  735. mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
  736. mindspore/ops/bprop_mindir/Concat_bprop.mindir +0 -0
  737. mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +0 -240
  738. mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +0 -247
  739. mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +0 -247
  740. mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +0 -315
  741. mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +0 -278
  742. mindspore/ops/bprop_mindir/DType_bprop.mindir +0 -14
  743. mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +0 -58
  744. mindspore/ops/bprop_mindir/Depend_bprop.mindir +0 -13
  745. mindspore/ops/bprop_mindir/DepthToSpace_bprop.mindir +0 -23
  746. mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +0 -138
  747. mindspore/ops/bprop_mindir/DiagPart_bprop.mindir +0 -15
  748. mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
  749. mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
  750. mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +0 -25
  751. mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +0 -18
  752. mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +0 -27
  753. mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
  754. mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
  755. mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
  756. mindspore/ops/bprop_mindir/DynamicShape_bprop.mindir +0 -14
  757. mindspore/ops/bprop_mindir/Elu_bprop.mindir +0 -16
  758. mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
  759. mindspore/ops/bprop_mindir/Equal_bprop.mindir +0 -19
  760. mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +0 -58
  761. mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +0 -16
  762. mindspore/ops/bprop_mindir/Flatten_bprop.mindir +0 -54
  763. mindspore/ops/bprop_mindir/FloorDiv_bprop.mindir +0 -19
  764. mindspore/ops/bprop_mindir/GatherD_bprop.mindir +0 -26
  765. mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +0 -57
  766. mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
  767. mindspore/ops/bprop_mindir/GreaterEqual_bprop.mindir +0 -19
  768. mindspore/ops/bprop_mindir/Greater_bprop.mindir +0 -19
  769. mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +0 -16
  770. mindspore/ops/bprop_mindir/HSwish_bprop.mindir +0 -16
  771. mindspore/ops/bprop_mindir/IOU_bprop.mindir +0 -19
  772. mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
  773. mindspore/ops/bprop_mindir/IsFinite_bprop.mindir +0 -15
  774. mindspore/ops/bprop_mindir/IsInf_bprop.mindir +0 -15
  775. mindspore/ops/bprop_mindir/IsNan_bprop.mindir +0 -15
  776. mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +0 -126
  777. mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +0 -15
  778. mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +0 -30
  779. mindspore/ops/bprop_mindir/LRN_bprop.mindir +0 -43
  780. mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
  781. mindspore/ops/bprop_mindir/LessEqual_bprop.mindir +0 -19
  782. mindspore/ops/bprop_mindir/Less_bprop.mindir +0 -19
  783. mindspore/ops/bprop_mindir/LinSpace_bprop.mindir +0 -23
  784. mindspore/ops/bprop_mindir/Load_bprop.mindir +0 -13
  785. mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +0 -23
  786. mindspore/ops/bprop_mindir/LogicalAnd_bprop.mindir +0 -19
  787. mindspore/ops/bprop_mindir/LogicalNot_bprop.mindir +0 -15
  788. mindspore/ops/bprop_mindir/MaskedSelect_bprop.mindir +0 -21
  789. mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +0 -74
  790. mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +0 -74
  791. mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +0 -75
  792. mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +0 -65
  793. mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
  794. mindspore/ops/bprop_mindir/Maximum_bprop.mindir +0 -0
  795. mindspore/ops/bprop_mindir/Minimum_bprop.mindir +0 -0
  796. mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +0 -27
  797. mindspore/ops/bprop_mindir/Mish_bprop.mindir +0 -35
  798. mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
  799. mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
  800. mindspore/ops/bprop_mindir/NonZero_bprop.mindir +0 -14
  801. mindspore/ops/bprop_mindir/NotEqual_bprop.mindir +0 -19
  802. mindspore/ops/bprop_mindir/OneHot_bprop.mindir +0 -26
  803. mindspore/ops/bprop_mindir/OnesLike_bprop.mindir +0 -14
  804. mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
  805. mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
  806. mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
  807. mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +0 -29
  808. mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +0 -82
  809. mindspore/ops/bprop_mindir/Range_bprop.mindir +0 -22
  810. mindspore/ops/bprop_mindir/Rank_bprop.mindir +0 -14
  811. mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +0 -16
  812. mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
  813. mindspore/ops/bprop_mindir/ReduceAll_bprop.mindir +0 -19
  814. mindspore/ops/bprop_mindir/ReduceAny_bprop.mindir +0 -19
  815. mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +0 -20
  816. mindspore/ops/bprop_mindir/Reshape_bprop.mindir +0 -60
  817. mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +0 -29
  818. mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +0 -89
  819. mindspore/ops/bprop_mindir/ReverseSequence_bprop.mindir +0 -52
  820. mindspore/ops/bprop_mindir/ReverseV2_bprop.mindir +0 -22
  821. mindspore/ops/bprop_mindir/Round_bprop.mindir +0 -15
  822. mindspore/ops/bprop_mindir/ScatterMax_bprop.mindir +0 -0
  823. mindspore/ops/bprop_mindir/ScatterMin_bprop.mindir +0 -0
  824. mindspore/ops/bprop_mindir/ScatterNdUpdate_bprop.mindir +0 -22
  825. mindspore/ops/bprop_mindir/ScatterNd_bprop.mindir +0 -24
  826. mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +0 -22
  827. mindspore/ops/bprop_mindir/ScatterUpdate_bprop.mindir +0 -0
  828. mindspore/ops/bprop_mindir/SeLU_bprop.mindir +0 -21
  829. mindspore/ops/bprop_mindir/Select_bprop.mindir +0 -31
  830. mindspore/ops/bprop_mindir/Shape_bprop.mindir +0 -14
  831. mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +0 -21
  832. mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
  833. mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +0 -16
  834. mindspore/ops/bprop_mindir/Sign_bprop.mindir +0 -15
  835. mindspore/ops/bprop_mindir/Slice_bprop.mindir +0 -26
  836. mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +0 -36
  837. mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  838. mindspore/ops/bprop_mindir/Softplus_bprop.mindir +0 -16
  839. mindspore/ops/bprop_mindir/Softsign_bprop.mindir +0 -33
  840. mindspore/ops/bprop_mindir/Sort_bprop.mindir +0 -0
  841. mindspore/ops/bprop_mindir/SpaceToBatchND_bprop.mindir +0 -28
  842. mindspore/ops/bprop_mindir/SpaceToDepth_bprop.mindir +0 -23
  843. mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
  844. mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  845. mindspore/ops/bprop_mindir/Split_bprop.mindir +0 -22
  846. mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +0 -54
  847. mindspore/ops/bprop_mindir/StridedSliceGrad_bprop.mindir +0 -95
  848. mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +0 -98
  849. mindspore/ops/bprop_mindir/Switch_bprop.mindir +0 -29
  850. mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
  851. mindspore/ops/bprop_mindir/Tanh_bprop.mindir +0 -66
  852. mindspore/ops/bprop_mindir/TensorScatterAdd_bprop.mindir +0 -22
  853. mindspore/ops/bprop_mindir/TensorScatterUpdate_bprop.mindir +0 -29
  854. mindspore/ops/bprop_mindir/TensorShape_bprop.mindir +0 -14
  855. mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
  856. mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
  857. mindspore/ops/bprop_mindir/TransShape_bprop.mindir +0 -23
  858. mindspore/ops/bprop_mindir/TruncateDiv_bprop.mindir +0 -19
  859. mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +0 -20
  860. mindspore/ops/bprop_mindir/Unique_bprop.mindir +0 -16
  861. mindspore/ops/bprop_mindir/Unstack_bprop.mindir +0 -22
  862. mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +0 -32
  863. mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +0 -38
  864. mindspore/ops/bprop_mindir/ZerosLike_bprop.mindir +0 -15
  865. mindspore/ops/bprop_mindir/generate_mindir.py +0 -114
  866. mindspore/rewrite/node_visitor.py +0 -44
  867. mindspore/rewrite/topological_manager.py +0 -203
  868. mindspore/scipy/sparse/linalg.py +0 -192
  869. {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/WHEEL +0 -0
  870. {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/top_level.txt +0 -0
@@ -29,6 +29,7 @@ from mindspore.common._utils import is_shape_unknown, is_dim_unknown
29
29
  from mindspore.ops.primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register, _run_op
30
30
  from mindspore import _checkparam as validator
31
31
  from mindspore._checkparam import _check_3d_int_or_tuple
32
+ from mindspore.ops._tracefunc import PackFunc
32
33
  from mindspore.common import dtype as mstype
33
34
  from mindspore.common._decorator import deprecated
34
35
  from mindspore.common.parameter import Parameter
@@ -174,27 +175,11 @@ def _check_infer_attr_reduce(axis, keep_dims, prim_name):
174
175
 
175
176
  class Expand(Primitive):
176
177
  """
177
- Expands the Tensor along singleton dimensions(dim with size 1) to match given desired shape.
178
-
179
- .. warning::
180
- This is an experimental API that is subject to change or deletion.
181
-
182
- Refer to :func:`mindspore.ops.expand` for more details.
183
-
184
- Supported Platforms:
185
- ``Ascend`` ``CPU``
186
-
187
- Examples:
188
- >>> x = Tensor(np.array([[1], [2], [3]]), mindspore.float32)
189
- >>> shape = Tensor(np.array([3,4]), mindspore.int32)
190
- >>> expand = ops.Expand()
191
- >>> y = expand(x, shape)
192
- >>> print(y)
193
- [[1. 1. 1. 1.]
194
- [2. 2. 2. 2.]
195
- [3. 3. 3. 3.]]
178
+ :class:`mindspore.ops.Expand` will be deprecated in the future.
179
+ Please use :class:`mindspore.ops.BroadcastTo` instead.
196
180
  """
197
181
 
182
+ @deprecated("2.1", "BroadcastTo", False)
198
183
  @prim_attr_register
199
184
  def __init__(self):
200
185
  """Initialize Expand."""
@@ -204,14 +189,28 @@ class Expand(Primitive):
204
189
 
205
190
  class ExpandDims(PrimitiveWithCheck):
206
191
  """
207
- Adds an additional dimension to `input_x` at the given axis.
192
+ Adds an additional dimension to `input_x` at the given axis, the dimension of
193
+ `input_x` should be greater than or equal to 1.
208
194
 
209
195
  Refer to :func:`mindspore.ops.expand_dims` for more details.
210
196
 
197
+ Inputs:
198
+ - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
199
+ - **axis** (int) - Specifies the dimension index at which to expand
200
+ the shape of `input_x`. The value of axis must be in the range
201
+ `[-input_x.ndim-1, input_x.ndim]`. Only constant value is allowed.
202
+
203
+ Outputs:
204
+ Tensor, the shape of tensor is :math:`(1, x_1, x_2, ..., x_R)` if the
205
+ value of `axis` is 0. It has the same data type as `input_x`.
206
+
211
207
  Supported Platforms:
212
208
  ``Ascend`` ``GPU`` ``CPU``
213
209
 
214
210
  Examples:
211
+ >>> import mindspore
212
+ >>> import numpy as np
213
+ >>> from mindspore import Tensor, ops
215
214
  >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
216
215
  >>> expand_dims = ops.ExpandDims()
217
216
  >>> output = expand_dims(input_tensor, 0)
@@ -249,6 +248,9 @@ class DType(Primitive):
249
248
  ``Ascend`` ``GPU`` ``CPU``
250
249
 
251
250
  Examples:
251
+ >>> import mindspore
252
+ >>> import numpy as np
253
+ >>> from mindspore import Tensor, ops
252
254
  >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
253
255
  >>> output = ops.DType()(input_tensor)
254
256
  >>> print(output)
@@ -302,6 +304,10 @@ class Cast(PrimitiveWithCheck):
302
304
  """
303
305
  Returns a tensor with the new specified data type.
304
306
 
307
+ Note:
308
+ When converting complex numbers to boolean type, the imaginary part of the complex number is not
309
+ taken into account. As long as the real part is non-zero, it returns True; otherwise, it returns False.
310
+
305
311
  Inputs:
306
312
  - **input_x** (Union[Tensor, Number]) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
307
313
  The tensor to be cast.
@@ -318,6 +324,9 @@ class Cast(PrimitiveWithCheck):
318
324
  ``Ascend`` ``GPU`` ``CPU``
319
325
 
320
326
  Examples:
327
+ >>> import mindspore
328
+ >>> import numpy as np
329
+ >>> from mindspore import Tensor, ops
321
330
  >>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
322
331
  >>> input_x = Tensor(input_np)
323
332
  >>> type_dst = mindspore.int32
@@ -340,7 +349,7 @@ class Cast(PrimitiveWithCheck):
340
349
  data = x.data
341
350
  if data.dtype == dtype:
342
351
  return (True, x)
343
- if isinstance(x, Tensor) and x.dtype == dtype:
352
+ if isinstance(x, Tensor) and x.dtype == dtype and not PackFunc.is_tracing():
344
353
  x = Tensor(x)
345
354
  x.set_cast_dtype()
346
355
  return (True, x)
@@ -353,20 +362,20 @@ class Cast(PrimitiveWithCheck):
353
362
  return None
354
363
  src_type = mstype.get_py_obj_dtype(x)
355
364
  validator.check_subclass("input_x", src_type,
356
- [mstype.tensor, mstype.number], self.name)
365
+ [mstype.tensor_type, mstype.number], self.name)
357
366
  validator.check_subclass("type", dst_type, mstype.number, self.name)
358
367
 
359
- if isinstance(src_type, type(mstype.tensor)):
368
+ if isinstance(src_type, type(mstype.tensor_type)):
360
369
  src_type = src_type.element_type()
361
- if isinstance(dst_type, type(mstype.tensor)):
370
+ if isinstance(dst_type, type(mstype.tensor_type)):
362
371
  dst_type = dst_type.element_type()
363
372
 
364
373
  value = None
365
374
  np_dst_type = mstype.dtype_to_nptype(dst_type)
366
375
  if isinstance(x, (int, float)):
367
- value = Tensor(np.array(x).astype(np_dst_type))
376
+ value = Tensor(np.array(x).astype(np_dst_type), dtype=dst_type)
368
377
  else:
369
- value = Tensor(x.asnumpy().astype(np_dst_type))
378
+ value = Tensor(x.asnumpy().astype(np_dst_type), dtype=dst_type)
370
379
  return value
371
380
 
372
381
 
@@ -402,15 +411,19 @@ class Im2Col(Primitive):
402
411
  Note:
403
412
  Currently, only 4-D input tensors (batched image-like tensors) are supported.
404
413
 
414
+ .. warning::
415
+ This is an experimental API that is subject to change or deletion.
416
+
405
417
  Args:
406
418
  ksizes (Union[int, tuple[int], list[int]]): The size of the kernel, should be two int
407
419
  for height and width. If type is int, it means that height equal with width. Must be specified.
408
420
  strides (Union[int, tuple[int], list[int]], optional): The stride of the window, should be two int
409
- for height and width. If type is int, it means that height equal with width. Default: 1.
421
+ for height and width. If type is int, it means that height equal with width. Default: ``1`` .
410
422
  dilations (Union[int, tuple[int], list[int]], optional): The dilation of the window, should be two int
411
- for height and width. If type is int, it means that height equal with width. Default: 1.
423
+ for height and width. If type is int, it means that height equal with width. Default: ``1`` .
424
+
412
425
  pads (Union[int, tuple[int], list[int]], optional): The pad of the window, that must be a tuple of
413
- one or two `int` for height and width. Default: 0.
426
+ one or two `int` for height and width. Default: ``0`` .
414
427
 
415
428
  - If one int, :math:`pad\_height = pad\_width`.
416
429
  - If two int, :math:`pad\_height = pads[0]`, :math:`pad\_width = pads[1]`.
@@ -418,7 +431,6 @@ class Im2Col(Primitive):
418
431
 
419
432
  Inputs:
420
433
  - **x** (Tensor) - input tensor, only 4-D input tensors (batched image-like tensors) are supported.
421
- support all real number data type.
422
434
 
423
435
  Outputs:
424
436
  Tensor, a 4-D Tensor with same type of input `x`.
@@ -437,6 +449,9 @@ class Im2Col(Primitive):
437
449
  ``Ascend`` ``GPU`` ``CPU``
438
450
 
439
451
  Examples:
452
+ >>> import numpy as np
453
+ >>> from mindspore import Tensor, ops
454
+ >>> from mindspore import dtype as mstype
440
455
  >>> x = Tensor(input_data=np.random.rand(4, 4, 32, 32), dtype=mstype.float64)
441
456
  >>> im2col = ops.Im2Col(ksizes=3, strides=1, dilations=1)
442
457
  >>> y = im2col(x)
@@ -503,15 +518,15 @@ class Col2Im(Primitive):
503
518
  kernel_size (Union[int, tuple[int], list[int]]): The size of the kernel, should be two positive int
504
519
  for height and width. If type is int, it means that height equal with width. Must be specified.
505
520
  dilation (Union[int, tuple[int], list[int]], optional): The size of the dilation, should be two positive int
506
- for height and width. If type is int, it means that height equal with width. Default: 1.
521
+ for height and width. If type is int, it means that height equal with width. Default: ``1`` .
507
522
  padding (Union[int, tuple[int], list[int]], optional): The size of the padding, should be two int
508
- for height and width. If type is int, it means that height equal with width. Default: 0.
523
+ for height and width. If type is int, it means that height equal with width. Default: ``0`` .
509
524
  stride (Union[int, tuple[int], list[int]], optional): The size of the stride, should be two positive int
510
- for height and width. If type is int, it means that height equal with width. Default: 1.
525
+ for height and width. If type is int, it means that height equal with width. Default: ``1`` .
511
526
 
512
527
  Inputs:
513
- - **x** (Tensor) - 4D tensor with data type float16 or float32.
514
- - **output_size** (Tensor) - 1D tensor with 2 elements of data type int32.
528
+ - **x** (Tensor) - 4D input Tensor.
529
+ - **output_size** (Tensor) - 1D tensor with 2 elements of data type int32 or int64.
515
530
 
516
531
  Outputs:
517
532
  Tensor, a 4-D Tensor with same type of input `x`.
@@ -574,10 +589,21 @@ class Reshape(PrimitiveWithCheck):
574
589
 
575
590
  Refer to :func:`mindspore.ops.reshape` for more details.
576
591
 
592
+ Inputs:
593
+ - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
594
+ - **input_shape** (tuple[int]) - The input tuple is constructed by multiple
595
+ integers, i.e., :math:`(y_1, y_2, ..., y_S)`.
596
+
597
+ Outputs:
598
+ Tensor, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
599
+
577
600
  Supported Platforms:
578
601
  ``Ascend`` ``GPU`` ``CPU``
579
602
 
580
603
  Examples:
604
+ >>> import mindspore
605
+ >>> import numpy as np
606
+ >>> from mindspore import Tensor, ops
581
607
  >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
582
608
  >>> reshape = ops.Reshape()
583
609
  >>> output = reshape(input_x, (3, 2))
@@ -597,8 +623,9 @@ class Reshape(PrimitiveWithCheck):
597
623
  # for shape is not constant
598
624
  if shape is None or self.none_in_tuple_or_list(shape) or x is None:
599
625
  return None
626
+
600
627
  if isinstance(shape, (Tensor, Tensor_)):
601
- validator.check_tensor_dtype_valid("shape", mstype.tensor_type(shape.dtype),
628
+ validator.check_tensor_dtype_valid("shape", mstype.TensorType(shape.dtype),
602
629
  [mstype.int32, mstype.int64], self.name)
603
630
  shape = shape.asnumpy().tolist()
604
631
  else:
@@ -644,10 +671,20 @@ class Shape(Primitive):
644
671
 
645
672
  Refer to :func:`mindspore.ops.shape` for more details.
646
673
 
674
+ Inputs:
675
+ - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
676
+
677
+ Outputs:
678
+ tuple[int], the output tuple is constructed by multiple integers,
679
+ :math:`(x_1, x_2, ..., x_R)`.
680
+
647
681
  Supported Platforms:
648
682
  ``Ascend`` ``GPU`` ``CPU``
649
683
 
650
684
  Examples:
685
+ >>> import mindspore
686
+ >>> import numpy as np
687
+ >>> from mindspore import Tensor, ops
651
688
  >>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
652
689
  >>> shape = ops.Shape()
653
690
  >>> output = shape(input_x)
@@ -673,6 +710,9 @@ class TensorShape(Primitive):
673
710
  ``Ascend`` ``GPU`` ``CPU``
674
711
 
675
712
  Examples:
713
+ >>> import mindspore
714
+ >>> import numpy as np
715
+ >>> from mindspore import Tensor, ops
676
716
  >>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
677
717
  >>> shape = ops.TensorShape()
678
718
  >>> output = shape(input_x)
@@ -695,33 +735,30 @@ class Unsqueeze(PrimitiveWithCheck):
695
735
  self.axis = axis
696
736
 
697
737
 
698
- class DynamicShape(Primitive):
699
- """
700
- Same as operator TensorShape. DynamicShape will be deprecated in the future.
701
- Please use TensorShape instead.
702
-
703
- Supported Platforms:
704
- Deprecated
705
- """
706
-
707
- @deprecated("1.7", "TensorShape", True)
708
- @prim_attr_register
709
- def __init__(self, dtype=9):
710
- """init Shape"""
711
- self.init_prim_io_names(inputs=['tensor'], outputs=['output'])
712
- self.add_prim_attr('is_dynamic_shape', True)
713
-
714
-
715
738
  class Squeeze(Primitive):
716
739
  """
717
740
  Return the Tensor after deleting the dimension of size 1 in the specified `axis`.
718
741
 
719
742
  Refer to :func:`mindspore.ops.squeeze` for more details.
720
743
 
744
+ Args:
745
+ axis (Union[int, tuple(int)]): Specifies the dimension indexes of shape to be removed, which will remove
746
+ all the dimensions of size 1 in the given axis parameter. If specified, it must be int32 or int64.
747
+ Default: ``()`` .
748
+
749
+ Inputs:
750
+ - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
751
+
752
+ Outputs:
753
+ Tensor, the shape of tensor is :math:`(x_1, x_2, ..., x_S)`.
754
+
721
755
  Supported Platforms:
722
756
  ``Ascend`` ``GPU`` ``CPU``
723
757
 
724
758
  Examples:
759
+ >>> import mindspore
760
+ >>> import numpy as np
761
+ >>> from mindspore import Tensor, ops
725
762
  >>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
726
763
  >>> squeeze = ops.Squeeze(2)
727
764
  >>> output = squeeze(input_x)
@@ -750,10 +787,23 @@ class Transpose(Primitive):
750
787
 
751
788
  Refer to :func:`mindspore.ops.transpose` for more details.
752
789
 
790
+ Inputs:
791
+ - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
792
+ - **input_perm** (tuple[int]) - The permutation to be converted. The elements in `input_perm` are composed of
793
+ the indexes of each dimension of `input_x`. The length of `input_perm` and the shape of `input_x` must be
794
+ the same. Only constant value is allowed. Must be in the range [0, rank(input_x)).
795
+
796
+ Outputs:
797
+ Tensor, the type of output tensor is the same as `input_x` and the shape of output tensor is decided by the
798
+ shape of `input_x` and the value of `input_perm`.
799
+
753
800
  Supported Platforms:
754
801
  ``Ascend`` ``GPU`` ``CPU``
755
802
 
756
803
  Examples:
804
+ >>> import mindspore
805
+ >>> import numpy as np
806
+ >>> from mindspore import Tensor, ops
757
807
  >>> input_x = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32)
758
808
  >>> input_perm = (0, 2, 1)
759
809
  >>> transpose = ops.Transpose()
@@ -847,6 +897,9 @@ class Unique(Primitive):
847
897
  ``Ascend`` ``GPU`` ``CPU``
848
898
 
849
899
  Examples:
900
+ >>> import mindspore
901
+ >>> import numpy as np
902
+ >>> from mindspore import Tensor, ops, nn
850
903
  >>> input_x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
851
904
  >>> output = ops.Unique()(input_x)
852
905
  >>> print(output)
@@ -889,10 +942,33 @@ class UniqueConsecutive(Primitive):
889
942
 
890
943
  Refer to :func:`mindspore.ops.unique_consecutive` for more details.
891
944
 
945
+ Args:
946
+ return_idx (bool, optional): Whether to return the index of where the element in the original input
947
+ maps to the position in the output. Default: ``False`` .
948
+ return_counts (bool, optional): Whether to return the counts of each unique element. Default: ``False`` .
949
+ axis (int, optional): The dimension to apply unique. If ``None`` , the unique of the flattened input is
950
+ returned. If specified, it must be int32 or int64. Default: ``None`` .
951
+
952
+ Inputs:
953
+ - **x** (Tensor) - The input tensor.
954
+
955
+ Outputs:
956
+ A tensor or a tuple of tensors containing tensor objects (`output`, `idx`, `counts`).
957
+
958
+ - `output` has the same type as `x` and is used to represent the output list of unique scalar elements.
959
+ - If `return_idx` is True, there will be an additional returned tensor, `idx`,
960
+ which has the same shape as `x` and represents
961
+ the index of where the element in the original input maps to the position in the output.
962
+ - If `return_counts` is True, there will be an additional returned tensor, `counts`,
963
+ which represents the number of occurrences for each unique value or tensor.
964
+
892
965
  Supported Platforms:
893
966
  ``Ascend`` ``GPU`` ``CPU``
894
967
 
895
968
  Examples:
969
+ >>> import numpy as np
970
+ >>> from mindspore import Tensor, ops
971
+ >>> from mindspore import dtype as mstype
896
972
  >>> x = Tensor(np.array([1, 1, 2, 2, 3, 1, 1, 2]), mstype.int32)
897
973
  >>> unique_consecutive = ops.UniqueConsecutive(True, True, None)
898
974
  >>> output, idx, counts = unique_consecutive(x)
@@ -920,18 +996,30 @@ class Gather(Primitive):
920
996
  r"""
921
997
  Returns the slice of the input tensor corresponding to the elements of `input_indices` on the specified `axis`.
922
998
 
923
- The following figure shows the calculation process of Gather commonly:
999
+ Refer to :func:`mindspore.ops.gather` for more details.
924
1000
 
925
- .. image:: Gather.png
1001
+ Args:
1002
+ batch_dims (int, optional): Specifies the number of batch dimensions.
1003
+ It must be less than or equal to the rank of `input_indices`. Default: ``0`` .
926
1004
 
927
- where params represents the input `input_params`, and indices represents the index to be sliced `input_indices`.
1005
+ Inputs:
1006
+ - **input_params** (Tensor) - The original Tensor. The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
1007
+ - **input_indices** (Tensor) - Index tensor to be sliced, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
1008
+ Specifies the indices of elements of the original Tensor. The data type can be int32 or int64.
1009
+ - **axis** (Union(int, Tensor[int])) - Specifies the dimension index to gather indices.
1010
+ When axis is Tensor, the size must be 1.
928
1011
 
929
- Refer to :func:`mindspore.ops.gather` for more details.
1012
+ Outputs:
1013
+ Tensor, the shape of tensor is
1014
+ :math:`input\_params.shape[:axis] + input\_indices.shape + input\_params.shape[axis + 1:]`.
930
1015
 
931
1016
  Supported Platforms:
932
1017
  ``Ascend`` ``GPU`` ``CPU``
933
1018
 
934
1019
  Examples:
1020
+ >>> import mindspore
1021
+ >>> import numpy as np
1022
+ >>> from mindspore import Tensor, ops
935
1023
  >>> # case1: input_indices is a Tensor with shape (5, ).
936
1024
  >>> input_params = Tensor(np.array([1, 2, 3, 4, 5, 6, 7]), mindspore.float32)
937
1025
  >>> input_indices = Tensor(np.array([0, 2, 4, 2, 6]), mindspore.int32)
@@ -988,7 +1076,7 @@ class GatherV2(PrimitiveWithCheck):
988
1076
  self.init_prim_io_names(inputs=['params', 'indices', 'axis'], outputs=['output'])
989
1077
 
990
1078
  def __check__(self, params, indices, axis):
991
- validator.check_subclass("params", params['dtype'], mstype.tensor, self.name)
1079
+ validator.check_subclass("params", params['dtype'], mstype.tensor_type, self.name)
992
1080
  validator.check_tensor_dtype_valid("indices", indices['dtype'], mstype.int_type, self.name)
993
1081
  validator.check_subclass("axis", axis['dtype'], [mstype.number], self.name)
994
1082
  axis_v = axis['value']
@@ -997,7 +1085,7 @@ class GatherV2(PrimitiveWithCheck):
997
1085
  validator.check_int_range(axis_v, -rank, rank, validator.INC_LEFT, "axis", self.name)
998
1086
 
999
1087
 
1000
- class SparseGatherV2(PrimitiveWithCheck):
1088
+ class SparseGatherV2(Primitive):
1001
1089
  """
1002
1090
  Returns a slice of input tensor based on the specified indices and axis.
1003
1091
 
@@ -1006,18 +1094,19 @@ class SparseGatherV2(PrimitiveWithCheck):
1006
1094
  - **input_indices** (Tensor) - The shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
1007
1095
  Specifies the indices of elements of the original Tensor, must be in the range
1008
1096
  `[0, input_params.shape[axis])`.
1009
- - **axis** (int) - Specifies the dimension index to gather indices.
1097
+ - **axis** (Union(int, Tensor[int])) - Specifies the dimension index to gather indices.
1098
+ When axis is Tensor, the size must be 1.
1010
1099
 
1011
1100
  Outputs:
1012
1101
  Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`.
1013
1102
 
1014
- Raises:
1015
- TypeError: If `axis` is not an int.
1016
-
1017
1103
  Supported Platforms:
1018
1104
  ``Ascend`` ``GPU``
1019
1105
 
1020
1106
  Examples:
1107
+ >>> import mindspore
1108
+ >>> import numpy as np
1109
+ >>> from mindspore import Tensor, ops
1021
1110
  >>> input_params = Tensor(np.array([[1, 2, 7, 42], [3, 4, 54, 22], [2, 2, 55, 3]]), mindspore.float32)
1022
1111
  >>> input_indices = Tensor(np.array([1, 2]), mindspore.int32)
1023
1112
  >>> axis = 1
@@ -1034,15 +1123,6 @@ class SparseGatherV2(PrimitiveWithCheck):
1034
1123
  self.init_prim_io_names(inputs=['params', 'indices', 'axis'], outputs=['output'])
1035
1124
  self.add_prim_attr('bprop_return_sparse', True)
1036
1125
 
1037
- def __check__(self, params, indices, axis):
1038
- validator.check_subclass("params", params['dtype'], mstype.tensor, self.name)
1039
- validator.check_tensor_dtype_valid("indices", indices['dtype'], mstype.int_type, self.name)
1040
- validator.check_subclass("axis", axis['dtype'], [mstype.number], self.name)
1041
- axis_v = axis['value']
1042
- validator.check_value_type('axis', axis_v, [int], self.name)
1043
- rank = len(params['shape'])
1044
- validator.check_int_range(axis_v, -rank, rank, validator.INC_LEFT, "axis", self.name)
1045
-
1046
1126
 
1047
1127
  class Padding(Primitive):
1048
1128
  """
@@ -1050,10 +1130,24 @@ class Padding(Primitive):
1050
1130
 
1051
1131
  Refer to :func:`mindspore.ops.padding` for more details.
1052
1132
 
1133
+ Args:
1134
+ pad_dim_size (int, optional): The value of the last dimension of `x` to be
1135
+ extended, which must be positive. Default: ``8`` .
1136
+
1137
+ Inputs:
1138
+ - **x** (Tensor) - Input Tensor of 2D or higher-dimensional.
1139
+ The last dimension of `x` must be 1. The data type is Number.
1140
+
1141
+ Outputs:
1142
+ Tensor, the padded Tensor.
1143
+
1053
1144
  Supported Platforms:
1054
1145
  ``Ascend`` ``GPU`` ``CPU``
1055
1146
 
1056
1147
  Examples:
1148
+ >>> import mindspore
1149
+ >>> import numpy as np
1150
+ >>> from mindspore import Tensor, ops
1057
1151
  >>> x = Tensor(np.array([[8], [10]]), mindspore.float32)
1058
1152
  >>> pad_dim_size = 4
1059
1153
  >>> output = ops.Padding(pad_dim_size)(x)
@@ -1082,10 +1176,23 @@ class UniqueWithPad(Primitive):
1082
1176
 
1083
1177
  Refer to :func:`mindspore.ops.unique_with_pad` for more details.
1084
1178
 
1179
+ Inputs:
1180
+ - **x** (Tensor) - The tensor need to be unique. Must be 1-D vector with types: int32, int64.
1181
+ - **pad_num** (int) - Pad num. The data type is an int.
1182
+
1183
+ Outputs:
1184
+ tuple(Tensor), tuple of 2 tensors, `y` and `idx`.
1185
+
1186
+ - y (Tensor) - The unique elements filled with pad_num, the shape and data type same as `x`.
1187
+ - idx (Tensor) - The index of each value of `x` in the unique output `y`, the shape and data type same as `x`.
1188
+
1085
1189
  Supported Platforms:
1086
1190
  ``Ascend`` ``GPU`` ``CPU``
1087
1191
 
1088
1192
  Examples:
1193
+ >>> import mindspore
1194
+ >>> import numpy as np
1195
+ >>> from mindspore import Tensor, ops
1089
1196
  >>> x = Tensor(np.array([1, 1, 2, 2, 3, 3, 4, 5]), mindspore.int32)
1090
1197
  >>> pad_num = 8
1091
1198
  >>> output = ops.UniqueWithPad()(x, pad_num)
@@ -1107,20 +1214,24 @@ class Split(Primitive):
1107
1214
  Refer to :func:`mindspore.ops.split` for more details.
1108
1215
 
1109
1216
  Args:
1110
- axis (int): Index of the split position. Default: 0.
1111
- output_num (int): The number of output tensors. Must be positive int. Default: 1.
1217
+ axis (int): Index of the split position. Default: ``0`` .
1218
+ output_num (int): The number of output tensors. Must be positive int. Default: ``1`` .
1112
1219
 
1113
1220
  Inputs:
1114
- - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
1221
+ - **input_x** (Tensor) - The shape of tensor is :math:`(x_0, x_1, ..., x_{R-1})`, R >= 1.
1115
1222
 
1116
1223
  Outputs:
1117
1224
  tuple[Tensor], the shape of each output tensor is the same, which is
1118
- :math:`(y_1, y_2, ..., y_S)`. And the data type is the same with `input_x`.
1225
+ :math:`(x_0, x_1, ..., x_{axis}/{output_num}, ..., x_{R-1})`.
1226
+ And the data type is the same as `input_x`.
1119
1227
 
1120
1228
  Supported Platforms:
1121
1229
  ``Ascend`` ``GPU`` ``CPU``
1122
1230
 
1123
1231
  Examples:
1232
+ >>> import mindspore
1233
+ >>> import numpy as np
1234
+ >>> from mindspore import Tensor, ops
1124
1235
  >>> split = ops.Split(1, 2)
1125
1236
  >>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]), mindspore.int32)
1126
1237
  >>> print(x)
@@ -1168,6 +1279,9 @@ class Rank(Primitive):
1168
1279
  ``Ascend`` ``GPU`` ``CPU``
1169
1280
 
1170
1281
  Examples:
1282
+ >>> import mindspore
1283
+ >>> import numpy as np
1284
+ >>> from mindspore import Tensor, ops
1171
1285
  >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
1172
1286
  >>> rank = ops.Rank()
1173
1287
  >>> output = rank(input_tensor)
@@ -1194,10 +1308,21 @@ class Size(Primitive):
1194
1308
 
1195
1309
  Refer to :func:`mindspore.ops.size` for more details.
1196
1310
 
1311
+ Inputs:
1312
+ - **input_x** (Tensor) - Input parameters, the shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is
1313
+ `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
1314
+
1315
+ Outputs:
1316
+ int. A scalar representing the elements' size of `input_x`, tensor is the number of elements
1317
+ in a tensor, :math:`size=x_1*x_2*...x_R`. The data type is an int.
1318
+
1197
1319
  Supported Platforms:
1198
1320
  ``Ascend`` ``GPU`` ``CPU``
1199
1321
 
1200
1322
  Examples:
1323
+ >>> import mindspore
1324
+ >>> import numpy as np
1325
+ >>> from mindspore import Tensor, ops
1201
1326
  >>> input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
1202
1327
  >>> size = ops.Size()
1203
1328
  >>> output = size(input_x)
@@ -1211,7 +1336,7 @@ class Size(Primitive):
1211
1336
 
1212
1337
 
1213
1338
  class MatrixDiagV3(Primitive):
1214
- """
1339
+ r"""
1215
1340
  Constructs a diagonal matrix or a batch of diagonal matrices from a given input Tensor.
1216
1341
 
1217
1342
  .. warning::
@@ -1219,10 +1344,58 @@ class MatrixDiagV3(Primitive):
1219
1344
 
1220
1345
  Refer to :func:`mindspore.ops.matrix_diag` for more details.
1221
1346
 
1347
+ Args:
1348
+ align (str, optional): specifies how superdiagonals and subdiagonals should be aligned.
1349
+ Supported values: ``"RIGHT_LEFT"`` , ``"LEFT_RIGHT"`` , ``"LEFT_LEFT"`` , ``"RIGHT_RIGHT"`` .
1350
+ Default: ``"RIGHT_LEFT"`` .
1351
+
1352
+ - When set to ``"RIGHT_LEFT"`` , the alignment of superdiagonals will be towards the right side
1353
+ (padding the row on the left), while subdiagonals will be towards the left side
1354
+ (padding the row on the right)
1355
+ - When set to ``"LEFT_RIGHT"`` , the alignment of superdiagonals will be towards the left side
1356
+ (padding the row on the right), while subdiagonals will be towards the right side
1357
+ (padding the row on the left)
1358
+ - When set to ``"LEFT_LEFT"`` , the alignment of both superdiagonals and subdiagonals will be towards
1359
+ the left side(padding the row on the right).
1360
+ - When set to ``"RIGHT_RIGHT"`` , the alignment of both superdiagonals and subdiagonals will be towards
1361
+ the right side(padding the row on the left).
1362
+
1363
+ Inputs:
1364
+ - **x** (Tensor) - The diagonal Tensor.
1365
+ - **k** (Union[int, Tensor], optional) - Diagonal offsets.
1366
+ A Tensor of type int32. Positive value means superdiagonal,
1367
+ 0 refers to the main diagonal, and negative value means subdiagonals. `k` can be a single integer
1368
+ (for a single diagonal) or a pair of integers specifying the low and high ends of a matrix band.
1369
+ k[0] must not be larger than k[1]. The value must be in the range of given or derivated `num_rows`
1370
+ and `num_cols`, meaning value of k must be in (-num_rows, num_cols). Default: ``0`` .
1371
+ - **num_rows** (Union[int, Tensor], optional) - The number of rows of the output Tensor.
1372
+ A Tensor of type int32 with only one value. If `num_rows` is -1, indicating that the innermost
1373
+ matrix of the output Tensor is a square
1374
+ matrix, and the real number of rows will be derivated by other inputs. That is
1375
+ :math:`num\_rows = x.shape[-1] - min(k[1], 0)`. Otherwise, the value must be equal or greater than
1376
+ :math:`x.shape[-1] - min(k[1], 0)`. Default: -1.
1377
+ - **num_cols** (Union[int, Tensor], optional) - The number of columns of
1378
+ the output Tensor. A Tensor of type int32 with only one value.
1379
+ If `num_cols` is -1, indicating that the innermost matrix of the output
1380
+ Tensor is a square matrix, and the real number of columns will be derivated by other inputs.
1381
+ That is :math:`num\_cols = x.shape[-1] + max(k[0], 0)`. Otherwise, the value must be equal or
1382
+ greater than :math:`x.shape[-1] - min(k[1], 0)`. Default: -1.
1383
+ - **padding_value** (Union[int, float, Tensor], optional) - The number to fill the area outside the specified
1384
+ diagonal band. A Tensor with only one value. Have the same dtype as x. Default: ``0`` .
1385
+
1386
+ Outputs:
1387
+ A Tensor. Has the same type as `x`.
1388
+ Suppose `x` has r dimensions with shape :math:`(I, J, ..., M, N)` . The output Tensor has rank r + 1 with shape
1389
+ :math:`(I, J, ..., M, num\_rows, num\_cols)` when only one diagonal is given (k is an integer or k[0] == k[1]).
1390
+ Otherwise, it has rank r with shape :math:`(I, J, ..., num\_rows, num\_cols)` .
1391
+
1222
1392
  Supported Platforms:
1223
1393
  ``Ascend`` ``GPU`` ``CPU``
1224
1394
 
1225
1395
  Examples:
1396
+ >>> import mindspore
1397
+ >>> import numpy as np
1398
+ >>> from mindspore import Tensor, ops
1226
1399
  >>> x = Tensor(np.array([[8, 9, 0],
1227
1400
  ... [1, 2, 3],
1228
1401
  ... [0, 4, 5]]), mindspore.float32)
@@ -1257,10 +1430,46 @@ class MatrixDiagPartV3(Primitive):
1257
1430
 
1258
1431
  Refer to :func:`mindspore.ops.matrix_diag_part` for more details.
1259
1432
 
1433
+ Args:
1434
+ align (str, optional): specifies how superdiagonals and subdiagonals should be aligned.
1435
+ Supported values: ``"RIGHT_LEFT"`` , ``"LEFT_RIGHT"`` , ``"LEFT_LEFT"`` , ``"RIGHT_RIGHT"`` .
1436
+ Default: ``"RIGHT_LEFT"`` .
1437
+
1438
+ - When set to ``"RIGHT_LEFT"`` , the alignment of superdiagonals will be towards the right side
1439
+ (padding the row on the left), while subdiagonals will be towards the left side
1440
+ (padding the row on the right)
1441
+ - When set to ``"LEFT_RIGHT"`` , the alignment of superdiagonals will be towards the left side
1442
+ (padding the row on the right), while subdiagonals will be towards the right side
1443
+ (padding the row on the left)
1444
+ - When set to ``"LEFT_LEFT"`` , the alignment of both superdiagonals and subdiagonals will be towards
1445
+ the left side(padding the row on the right).
1446
+ - When set to ``"RIGHT_RIGHT"`` , the alignment of both superdiagonals and subdiagonals will be towards
1447
+ the right side(padding the row on the left).
1448
+
1449
+ Inputs:
1450
+ - **x** (Tensor) - Rank r, where r >= 2.
1451
+ - **k** (Tensor) - A Tensor of type int32. Diagonal offset(s). Positive value means superdiagonal, 0 refers to
1452
+ the main diagonal, and negative value means subdiagonals. k can be a single integer (for a single diagonal) or
1453
+ a pair of integers specifying the low and high ends of a matrix band. k[0] must not be larger than k[1]. The
1454
+ value of k has restructions, meaning value of k must be in (-x.shape[-2], x.shape[-1]).
1455
+ - **padding_value** (Tensor) - A Tensor. Have the same dtype as x. The number to fill the area outside the
1456
+ specified diagonal band with. There must be only one value.
1457
+
1458
+ Outputs:
1459
+ A Tensor. Has the same type as `x`.
1460
+ Assume `x` has r dimensions :math:`(I, J, ..., M, N)` . Let `max_diag_len` be the maximum length among all
1461
+ diagonals to be extracted, :math:`max\_diag\_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
1462
+ Let `num_diags` be the number of diagonals to extract, :math:`num\_diags = k[1] - k[0] + 1`.
1463
+ If :math:`num\_diags == 1`, the output tensor is of rank r - 1 with shape :math:`(I, J, ..., L, max\_diag\_len)`
1464
+ Otherwise, the output tensor has rank r with dimensions :math:`(I, J, ..., L, num\_diags, max\_diag\_len)` .
1465
+
1260
1466
  Supported Platforms:
1261
1467
  ``Ascend`` ``GPU`` ``CPU``
1262
1468
 
1263
1469
  Examples:
1470
+ >>> import mindspore
1471
+ >>> import numpy as np
1472
+ >>> from mindspore import Tensor, ops
1264
1473
  >>> x = Tensor(np.array([[1, 2, 3, 4],
1265
1474
  ... [5, 6, 7, 8],
1266
1475
  ... [9, 8, 7, 6]]), mindspore.float32)
@@ -1310,18 +1519,18 @@ class MatrixSetDiagV3(Primitive):
1310
1519
 
1311
1520
  Args:
1312
1521
  align (str, optional): specifies how superdiagonals and subdiagonals should be aligned.
1313
- Supported values:"RIGHT_LEFT", "LEFT_RIGHT", "LEFT_LEFT", "RIGHT_RIGHT".
1314
- Default: "RIGHT_LEFT".
1522
+ Supported values: ``"RIGHT_LEFT"`` , ``"LEFT_RIGHT"``, ``"LEFT_LEFT"`` , ``"RIGHT_RIGHT"`` .
1523
+ Default: ``"RIGHT_LEFT"`` .
1315
1524
 
1316
- - When set to "RIGHT_LEFT", the alignment of superdiagonals will be towards the right side
1525
+ - When set to ``"RIGHT_LEFT"`` , the alignment of superdiagonals will be towards the right side
1317
1526
  (padding the row on the left), while subdiagonals will be towards the left side
1318
1527
  (padding the row on the right)
1319
- - When set to "LEFT_RIGHT", the alignment of superdiagonals will be towards the left side
1528
+ - When set to ``"LEFT_RIGHT"`` , the alignment of superdiagonals will be towards the left side
1320
1529
  (padding the row on the right), while subdiagonals will be towards the right side
1321
1530
  (padding the row on the left)
1322
- - When set to "LEFT_LEFT", the alignment of both superdiagonals and subdiagonals will be towards
1531
+ - When set to ``"LEFT_LEFT"`` , the alignment of both superdiagonals and subdiagonals will be towards
1323
1532
  the left side(padding the row on the right).
1324
- - When set to "RIGHT_RIGHT", the alignment of both superdiagonals and subdiagonals will be towards
1533
+ - When set to ``"RIGHT_RIGHT"`` , the alignment of both superdiagonals and subdiagonals will be towards
1325
1534
  the right side(padding the row on the left).
1326
1535
 
1327
1536
  Inputs:
@@ -1355,15 +1564,19 @@ class MatrixSetDiagV3(Primitive):
1355
1564
  ValueError: If `k[1]` is not greater equal to `k[0]` in case the size of `k` is 2.
1356
1565
  ValueError: If the `diagonal` rank size don't match with input `x` rank size.
1357
1566
  ValueError: If the `diagonal` shape value don't match with input `x` shape value.
1358
- ValueError: If the diagonal :math:`shape[-2]` is not equal to num_diags calculated by :math:`k[1] - k[0] + 1` .
1567
+ ValueError: If the diagonal :math:`shape[-2]` is not equal to num_diags calculated by
1568
+ :math:`num\_diags = k[1] - k[0] + 1` .
1359
1569
  ValueError: If the value of `k` is not in :math:`(-x.shape[-2], x.shape[-1])`.
1360
1570
  ValueError: If the diagonal :math:`shape[-1]` is not equal to the max_diag_len calculated by
1361
- :math:`min(x.shape[-2] + min(k[1], 0), x.shape[-1] + min(-k[0], 0))` .
1571
+ :math:`max\_diag\_len = min(x.shape[-2] + min(k[1], 0), x.shape[-1] + min(-k[0], 0))` .
1362
1572
 
1363
1573
  Supported Platforms:
1364
1574
  ``Ascend`` ``GPU`` ``CPU``
1365
1575
 
1366
1576
  Examples:
1577
+ >>> import mindspore
1578
+ >>> import numpy as np
1579
+ >>> from mindspore import Tensor, ops
1367
1580
  >>> x = Tensor(np.array([[7, 7, 7, 7],
1368
1581
  ... [7, 7, 7, 7],
1369
1582
  ... [7, 7, 7, 7]]), mindspore.float32)
@@ -1403,10 +1616,25 @@ class MatrixBandPart(Primitive):
1403
1616
 
1404
1617
  Refer to :func:`mindspore.ops.matrix_band_part` for more details.
1405
1618
 
1406
- Supported Platforms:
1619
+ .. warning::
1620
+ This is an experimental API that is subject to change or deletion.
1621
+
1622
+ Inputs:
1623
+ - **x** (Tensor) - Input tensor. :math:`(*, m, n)` where :math:`*` means, any number of additional dimensions.
1624
+ - **lower** (Union[int, Tensor]) - Number of subdiagonals to keep. The data type must be int32 or int64.
1625
+ If negative, keep entire lower triangle.
1626
+ - **upper** (Union[int, Tensor]) - Number of superdiagonals to keep. The data type must be int32 or int64.
1627
+ If negative, keep entire upper triangle.
1407
1628
 
1629
+ Outputs:
1630
+ Tensor, has the same type and shape as `x`.
1631
+
1632
+ Supported Platforms:
1633
+ ``Ascend`` ``GPU`` ``CPU``
1408
1634
 
1409
1635
  Examples:
1636
+ >>> import numpy as np
1637
+ >>> from mindspore import Tensor, ops
1410
1638
  >>> matrix_band_part = ops.MatrixBandPart()
1411
1639
  >>> x = np.ones([2, 4, 4]).astype(np.float32)
1412
1640
  >>> output = matrix_band_part(Tensor(x), 2, 1)
@@ -1455,7 +1683,7 @@ class Fill(PrimitiveWithCheck):
1455
1683
  if isinstance(x, Tensor):
1456
1684
  x = x.asnumpy()
1457
1685
  ret = np.full(dims, x, x_nptype)
1458
- return Tensor(ret)
1686
+ return Tensor(ret, dtype=dtype)
1459
1687
 
1460
1688
  def infer_value(self, dtype, dims, x):
1461
1689
  x_nptype = mstype.dtype_to_nptype(dtype)
@@ -1465,7 +1693,7 @@ class Fill(PrimitiveWithCheck):
1465
1693
  if isinstance(x, Tensor):
1466
1694
  x = x.asnumpy()
1467
1695
  ret = np.full(dims, x, x_nptype)
1468
- return Tensor(ret)
1696
+ return Tensor(ret, dtype=dtype)
1469
1697
  return None
1470
1698
 
1471
1699
 
@@ -1515,6 +1743,8 @@ class FillV2(PrimitiveWithCheck):
1515
1743
  ``Ascend`` ``GPU`` ``CPU``
1516
1744
 
1517
1745
  Examples:
1746
+ >>> import mindspore
1747
+ >>> from mindspore import Tensor, ops
1518
1748
  >>> fillV2 = ops.FillV2()
1519
1749
  >>> output = fillV2(Tensor([2, 3], mindspore.int32), Tensor(1, mindspore.float32))
1520
1750
  >>> print(output)
@@ -1532,15 +1762,19 @@ class FillV2(PrimitiveWithCheck):
1532
1762
  """Initialize FillV2"""
1533
1763
  self.init_prim_io_names(inputs=['shape', 'value'], outputs=['y'])
1534
1764
 
1765
+ def check_elim(self, dims, x):
1766
+ if x is None or (not isinstance(x, (Tensor, Tensor_))) or (x.shape != ()) or\
1767
+ dims is None or (isinstance(dims, (tuple, list)) and dims) or\
1768
+ isinstance(dims, (Tensor, Tensor_)):
1769
+ return (False, None)
1770
+ return (True, x)
1771
+
1535
1772
  def infer_value(self, dims, x):
1536
- if isinstance(dims, (Tensor, Tensor_)):
1537
- dims = dims.asnumpy()
1538
- if isinstance(x, (Tensor, Tensor_)):
1539
- x = x.asnumpy()
1540
- if dims is not None and None not in dims and x is not None:
1541
- ret = np.full(dims, x)
1542
- return Tensor(ret)
1543
- return None
1773
+ if x is None or dims is None or\
1774
+ (isinstance(dims, (tuple, list)) and dims) or\
1775
+ isinstance(dims, (Tensor, Tensor_)):
1776
+ return None
1777
+ return x
1544
1778
 
1545
1779
 
1546
1780
  class Ones(Primitive):
@@ -1560,6 +1794,8 @@ class Ones(Primitive):
1560
1794
  ``Ascend`` ``GPU`` ``CPU``
1561
1795
 
1562
1796
  Examples:
1797
+ >>> import mindspore
1798
+ >>> from mindspore import ops
1563
1799
  >>> ones = ops.Ones()
1564
1800
  >>> output = ones((2, 2), mindspore.float32)
1565
1801
  >>> print(output)
@@ -1601,6 +1837,8 @@ class Zeros(Primitive):
1601
1837
  Deprecated
1602
1838
 
1603
1839
  Examples:
1840
+ >>> import mindspore
1841
+ >>> from mindspore import ops
1604
1842
  >>> zeros = ops.Zeros()
1605
1843
  >>> output = zeros((2, 2), mindspore.float32)
1606
1844
  >>> print(output)
@@ -1620,10 +1858,18 @@ class OnesLike(Primitive):
1620
1858
 
1621
1859
  Refer to :func:`mindspore.ops.ones_like` for more details.
1622
1860
 
1861
+ Inputs:
1862
+ - **input_x** (Tensor) - Tensor of any dimension.
1863
+
1864
+ Outputs:
1865
+ Tensor, has the same shape and type as `input_x` but filled with ones.
1866
+
1623
1867
  Supported Platforms:
1624
1868
  ``Ascend`` ``GPU`` ``CPU``
1625
1869
 
1626
1870
  Examples:
1871
+ >>> import numpy as np
1872
+ >>> from mindspore import Tensor, ops
1627
1873
  >>> oneslike = ops.OnesLike()
1628
1874
  >>> input_x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
1629
1875
  >>> output = oneslike(input_x)
@@ -1643,7 +1889,7 @@ class ZerosLike(Primitive):
1643
1889
  Returns a Tensor with a value of 0 and its shape and data type is the same as the input.
1644
1890
 
1645
1891
  Inputs:
1646
- - **input_x** (Tensor) - Input Tensor of any dimension. The data type is Number.
1892
+ - **input_x** (Tensor) - Input Tensor of any dimension.
1647
1893
 
1648
1894
  Outputs:
1649
1895
  Tensor, has the same shape and data type as `input_x` but filled with zeros.
@@ -1655,6 +1901,8 @@ class ZerosLike(Primitive):
1655
1901
  ``Ascend`` ``GPU`` ``CPU``
1656
1902
 
1657
1903
  Examples:
1904
+ >>> import numpy as np
1905
+ >>> from mindspore import Tensor, ops
1658
1906
  >>> zeroslike = ops.ZerosLike()
1659
1907
  >>> input_x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
1660
1908
  >>> output = zeroslike(input_x)
@@ -1675,10 +1923,18 @@ class TupleToArray(PrimitiveWithInfer):
1675
1923
 
1676
1924
  Refer to :func:`mindspore.ops.tuple_to_array` for more details.
1677
1925
 
1926
+ Inputs:
1927
+ - **input_x** (tuple) - A tuple of numbers. These numbers have the same type.
1928
+ The shape is :math:`(N,*)` where :math:`*` means any number of additional dimensions.
1929
+
1930
+ Outputs:
1931
+ Tensor, if the input tuple contains `N` numbers, then the shape of the output tensor is :math:`(N,)`.
1932
+
1678
1933
  Supported Platforms:
1679
1934
  ``Ascend`` ``GPU`` ``CPU``
1680
1935
 
1681
1936
  Examples:
1937
+ >>> from mindspore import ops
1682
1938
  >>> input_x = (1,2,3)
1683
1939
  >>> print(type(input_x))
1684
1940
  <class 'tuple'>
@@ -1707,7 +1963,8 @@ class TupleToArray(PrimitiveWithInfer):
1707
1963
  ret = np.array(x, np.float32)
1708
1964
  return Tensor(ret)
1709
1965
 
1710
- def __call__(self, x):
1966
+ def __call__(self, *args):
1967
+ x, = args
1711
1968
  args = list()
1712
1969
  if isinstance(x, range):
1713
1970
  args.append(tuple(x))
@@ -1716,34 +1973,26 @@ class TupleToArray(PrimitiveWithInfer):
1716
1973
  return _run_op(self, self.name, args)
1717
1974
 
1718
1975
 
1719
- class ScalarToArray(PrimitiveWithInfer):
1720
- """
1721
- The `ScalarToArray` primitive is deprecated. Please use the :class:`mindspore.ops.ScalarToTensor` instead.
1722
- """
1723
- @deprecated("2.0", "ops.scalar_to_tensor", False)
1724
- @prim_attr_register
1725
- def __init__(self):
1726
- pass
1727
-
1728
- def infer_value(self, x):
1729
- validator.check_value_type("x", x, [int, float], self.name)
1730
- if isinstance(x, int):
1731
- ret = np.array(x, np.int32)
1732
- else:
1733
- ret = np.array(x, np.float32)
1734
- return Tensor(ret)
1735
-
1736
-
1737
1976
  class ScalarToTensor(PrimitiveWithInfer):
1738
1977
  """
1739
1978
  Converts a scalar to a `Tensor`, and converts the data type to the specified type.
1740
1979
 
1741
1980
  Refer to :func:`mindspore.ops.scalar_to_tensor` for more details.
1742
1981
 
1982
+ Inputs:
1983
+ - **input_x** (Union[int, float]) - The input is a scalar. Only constant value is allowed.
1984
+ - **dtype** (mindspore.dtype) - The target data type. Default: ``mindspore.float32`` . Only
1985
+ constant value is allowed.
1986
+
1987
+ Outputs:
1988
+ Tensor. 0-D Tensor and the content is the input.
1989
+
1743
1990
  Supported Platforms:
1744
1991
  ``Ascend`` ``GPU`` ``CPU``
1745
1992
 
1746
1993
  Examples:
1994
+ >>> import mindspore
1995
+ >>> from mindspore import ops
1747
1996
  >>> op = ops.ScalarToTensor()
1748
1997
  >>> data = 1
1749
1998
  >>> output = op(data, mindspore.float32)
@@ -1759,7 +2008,7 @@ class ScalarToTensor(PrimitiveWithInfer):
1759
2008
  validator.check_value_type("x", x, [bool, int, float], self.name)
1760
2009
  validator.check_subclass("dtype", dtype, mstype.number, self.name)
1761
2010
  data_type = mstype.dtype_to_nptype(dtype)
1762
- return Tensor(np.array(x, data_type))
2011
+ return Tensor(np.array(x, data_type), dtype=dtype)
1763
2012
 
1764
2013
 
1765
2014
  class InvertPermutation(PrimitiveWithInfer):
@@ -1794,6 +2043,7 @@ class InvertPermutation(PrimitiveWithInfer):
1794
2043
  ``Ascend`` ``GPU`` ``CPU``
1795
2044
 
1796
2045
  Examples:
2046
+ >>> from mindspore import ops
1797
2047
  >>> invert = ops.InvertPermutation()
1798
2048
  >>> input_data = (3, 4, 0, 2, 1)
1799
2049
  >>> output = invert(input_data)
@@ -1804,12 +2054,11 @@ class InvertPermutation(PrimitiveWithInfer):
1804
2054
  @prim_attr_register
1805
2055
  def __init__(self):
1806
2056
  """Initialize InvertPermutation"""
1807
- self.set_const_prim(True)
1808
2057
 
1809
2058
  def __infer__(self, x):
1810
2059
  x_shp = x['shape']
1811
2060
  x_value = x['value']
1812
- if mstype._issubclass_(x['dtype'], mstype.tensor): # pylint: disable=W0212
2061
+ if mstype._issubclass_(x['dtype'], mstype.tensor_type): # pylint: disable=W0212
1813
2062
  raise ValueError(f"For \'{self.name}\', the value of 'input_x' must be non-Tensor, but got {x['dtype']}")
1814
2063
  if x_value is None:
1815
2064
  raise ValueError(f"For '{self.name}', the value of 'input_x' can not be None, but got {x_value}.")
@@ -1843,22 +2092,18 @@ class InvertPermutation(PrimitiveWithInfer):
1843
2092
 
1844
2093
  class Argmax(Primitive):
1845
2094
  """
1846
- Returns the indices of the maximum value of a tensor across the axis.
2095
+ Returns the indices of the maximum value along a specified `axis` of a Tensor.
1847
2096
 
1848
2097
  Refer to :func:`mindspore.ops.argmax` for more details.
1849
2098
 
1850
2099
  Args:
1851
- axis (int): Axis where the Argmax operation applies to. Default: -1.
1852
- output_type (:class:`mindspore.dtype`): An optional data type of `mindspore.dtype.int32`.
1853
- Default: `mindspore.dtype.int32`.
2100
+ axis (int): Axis where the Argmax operation applies to. Default: ``-1`` .
2101
+ output_type (:class:`mindspore.dtype`): Output data type.
2102
+ Supported types: ``mstype.int32`` , ``mstype.int64`` . Default: ``mstype.int32`` .
1854
2103
 
1855
2104
  Inputs:
1856
- - **input_x** (Tensor) - Input tensor. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
1857
- Support data type list as follows:
1858
-
1859
- - Ascend: Float16, Float32.
1860
- - GPU: Float16, Float32.
1861
- - CPU: Float16, Float32, Float64.
2105
+ - **input_x** (Tensor) - The input tensor. :math:`(N, *)` where :math:`*` means, any number of additional
2106
+ dimensions.
1862
2107
 
1863
2108
  Outputs:
1864
2109
  Tensor, indices of the max value of input tensor across the axis.
@@ -1867,6 +2112,9 @@ class Argmax(Primitive):
1867
2112
  ``Ascend`` ``GPU`` ``CPU``
1868
2113
 
1869
2114
  Examples:
2115
+ >>> import mindspore
2116
+ >>> import numpy as np
2117
+ >>> from mindspore import Tensor, ops
1870
2118
  >>> input_x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
1871
2119
  >>> output = ops.Argmax(output_type=mindspore.int32)(input_x)
1872
2120
  >>> print(output)
@@ -1885,22 +2133,20 @@ class Argmax(Primitive):
1885
2133
 
1886
2134
  class Argmin(Primitive):
1887
2135
  """
1888
- Returns the indices of the minimum value of a tensor across the axis.
2136
+ Returns the indices of the minimum value along a specified `axis` of a Tensor.
1889
2137
 
1890
2138
  If the shape of input tensor is :math:`(x_1, ..., x_N)`, the shape of the output tensor is
1891
2139
  :math:`(x_1, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.
1892
2140
 
1893
2141
  Args:
1894
- axis (int): Axis where the Argmin operation applies to. Default: -1.
1895
- output_type (:class:`mindspore.dtype`): An optional data type of `mindspore.dtype.int32` and
1896
- `mindspore.dtype.int64`. Default: `mindspore.dtype.int32`.
2142
+ axis (int): Axis where the Argmin operation applies to. Default: ``-1`` .
2143
+ output_type (:class:`mindspore.dtype`): Output data type.
2144
+ Supported types: ``mstype.int32`` , ``mstype.int64`` . Default: ``mstype.int32`` .
1897
2145
 
1898
2146
  Inputs:
1899
2147
  - **input_x** (Tensor) - Input tensor.
1900
2148
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
1901
2149
 
1902
- - Ascend: Float16, Float32, Float64, Int8, Int16, Int32, Int64, UInt8, UInt16, UInt32, UInt64.
1903
-
1904
2150
  Outputs:
1905
2151
  Tensor, whose dtype is determined by `output_type`.
1906
2152
 
@@ -1912,6 +2158,9 @@ class Argmin(Primitive):
1912
2158
  ``Ascend`` ``GPU`` ``CPU``
1913
2159
 
1914
2160
  Examples:
2161
+ >>> import mindspore
2162
+ >>> import numpy as np
2163
+ >>> from mindspore import Tensor, ops
1915
2164
  >>> input_x = Tensor(np.array([2.0, 3.1, 1.2]), mindspore.float32)
1916
2165
  >>> index = ops.Argmin()(input_x)
1917
2166
  >>> print(index)
@@ -1942,7 +2191,7 @@ class ArgminV2(Primitive):
1942
2191
  Inputs:
1943
2192
  - **x** (Tensor) - Input tensor.
1944
2193
  The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
1945
- - **axis** (int) - Axis where the Argmin operator applies to. Default: -1.
2194
+ - **axis** (int) - Axis where the Argmin operator applies to. Default: ``-1`` .
1946
2195
 
1947
2196
  Outputs:
1948
2197
  Tensor, indices of the min value of input tensor across the axis.
@@ -1999,12 +2248,12 @@ class ArgMaxWithValue(Primitive):
1999
2248
  - If there are multiple maximum values, the index of the first maximum value is used.
2000
2249
  - The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "x".
2001
2250
 
2002
- Also see: func: `mindspore.ops.max`.
2251
+ Also see :func:`mindspore.ops.max`.
2003
2252
 
2004
2253
  Args:
2005
- axis (int): The dimension to reduce. Default: 0.
2006
- keep_dims (bool): Whether to reduce dimension, if true, the output will keep same dimension with the input,
2007
- the output will reduce dimension if false. Default: False.
2254
+ axis (int): The dimension to reduce. Default: ``0`` .
2255
+ keep_dims (bool): Whether to reduce dimension, if ``True`` , the output will keep same dimension with the
2256
+ input, the output will reduce dimension if ``false`` . Default: ``False`` .
2008
2257
 
2009
2258
  Inputs:
2010
2259
  - **x** (Tensor) - The input tensor, can be any dimension. Set the shape of input tensor as
@@ -2015,7 +2264,7 @@ class ArgMaxWithValue(Primitive):
2015
2264
  tensor.
2016
2265
 
2017
2266
  - **index** (Tensor) - The index for the maximum value of the input tensor, with dtype int32. If `keep_dims`
2018
- is true, the shape of output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`.
2267
+ is ``True`` , the shape of output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`.
2019
2268
  Otherwise, the shape is :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)` .
2020
2269
  - **values** (Tensor) - The maximum value of input tensor, with the same shape as index, and same dtype as x.
2021
2270
 
@@ -2028,6 +2277,9 @@ class ArgMaxWithValue(Primitive):
2028
2277
  ``Ascend`` ``GPU`` ``CPU``
2029
2278
 
2030
2279
  Examples:
2280
+ >>> import mindspore
2281
+ >>> import numpy as np
2282
+ >>> from mindspore import Tensor, ops
2031
2283
  >>> input_x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
2032
2284
  >>> index, output = ops.ArgMaxWithValue()(input_x)
2033
2285
  >>> print(index, output)
@@ -2060,12 +2312,12 @@ class ArgMinWithValue(Primitive):
2060
2312
  - If there are multiple minimum values, the index of the first minimum value is used.
2061
2313
  - The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "x".
2062
2314
 
2063
- Also see: func: `mindspore.ops.min`.
2315
+ Also see :func:`mindspore.ops.min`.
2064
2316
 
2065
2317
  Args:
2066
- axis (int): The dimension to reduce. Default: 0.
2067
- keep_dims (bool): Whether to reduce dimension, if true the output will keep the same dimension as the input,
2068
- the output will reduce dimension if false. Default: False.
2318
+ axis (int): The dimension to reduce. Default: ``0`` .
2319
+ keep_dims (bool): Whether to reduce dimension, if ``True`` the output will keep the same dimension as the
2320
+ input, the output will reduce dimension if ``false`` . Default: ``False`` .
2069
2321
 
2070
2322
  Inputs:
2071
2323
  - **x** (Tensor) - The input tensor, can be any dimension. Set the shape of input tensor as
@@ -2076,7 +2328,7 @@ class ArgMinWithValue(Primitive):
2076
2328
  tensor.
2077
2329
 
2078
2330
  - **index** (Tensor) - The index for the minimum value of the input tensor, with dtype int32. If `keep_dims`
2079
- is true, the shape of output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`.
2331
+ is ``True`` , the shape of output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`.
2080
2332
  Otherwise, the shape is :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)` .
2081
2333
  - **values** (Tensor) - The minimum value of input tensor, with the same
2082
2334
  shape as `index`, and same dtype as `x`.
@@ -2090,6 +2342,9 @@ class ArgMinWithValue(Primitive):
2090
2342
  ``Ascend`` ``GPU`` ``CPU``
2091
2343
 
2092
2344
  Examples:
2345
+ >>> import mindspore
2346
+ >>> import numpy as np
2347
+ >>> from mindspore import Tensor, ops
2093
2348
  >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
2094
2349
  >>> index, output = ops.ArgMinWithValue()(x)
2095
2350
  >>> print(index, output)
@@ -2116,10 +2371,32 @@ class Tile(PrimitiveWithInfer):
2116
2371
 
2117
2372
  Refer to :func:`mindspore.ops.tile` for more details.
2118
2373
 
2374
+ Inputs:
2375
+ - **input_x** (Tensor) - 1-D or higher dimensional Tensor. Set the shape of input tensor as
2376
+ :math:`(x_1, x_2, ..., x_S)` .
2377
+ - **multiples** (tuple[int]) - The parameter that specifies the number of replications,
2378
+ the parameter type is tuple, and the data type is int, i.e., :math:`(y_1, y_2, ..., y_S)`.
2379
+ The length of `multiples` cannot be smaller than the length of the shape of `input_x`.
2380
+ Only constant value is allowed.
2381
+
2382
+ Outputs:
2383
+ Tensor, has the same data type as the `input_x`. Suppose the length of `multiples` is `d`,
2384
+ the dimension of `input_x` is `input_x.dim`, and the shape of `input_x` is :math:`(x_1, x_2, ..., x_S)`.
2385
+
2386
+ - If `input_x.dim = d`, then the shape of their corresponding positions can be multiplied, and
2387
+ the shape of Outputs is :math:`(x_1*y_1, x_2*y_2, ..., x_S*y_S)`.
2388
+ - If `input_x.dim < d`, fill in multiple 1 in the length of the shape of `input_x` until their
2389
+ lengths are consistent. Such as set the shape of `input_x` as :math:`(1, ..., x_1, x_2, ..., x_S)`,
2390
+ then the shape of their corresponding positions can be multiplied, and the shape of Outputs is
2391
+ :math:`(1*y_1, ..., x_R*y_R, x_S*y_S)`.
2392
+
2119
2393
  Supported Platforms:
2120
2394
  ``Ascend`` ``GPU`` ``CPU``
2121
2395
 
2122
2396
  Examples:
2397
+ >>> import mindspore
2398
+ >>> import numpy as np
2399
+ >>> from mindspore import Tensor, ops
2123
2400
  >>> tile = ops.Tile()
2124
2401
  >>> input_x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32)
2125
2402
  >>> multiples = (2, 3)
@@ -2151,7 +2428,10 @@ class Tile(PrimitiveWithInfer):
2151
2428
  """Initialize Tile"""
2152
2429
  self.init_prim_io_names(inputs=['x', 'multiples'], outputs=['output'])
2153
2430
 
2154
- def check_elim(self, base_tensor, multiplier):
2431
+ def check_elim(self, *args):
2432
+ base_tensor, multiplier = args
2433
+ if PackFunc.is_tracing() and not PackFunc.current.is_pynative_mode:
2434
+ return (False, None)
2155
2435
  if not isinstance(base_tensor, Tensor):
2156
2436
  raise TypeError(f"For '{self.name}', the type of 'input_x' must be Tensor, "
2157
2437
  f"but got {type(base_tensor).__name__}.")
@@ -2206,6 +2486,8 @@ class Tile(PrimitiveWithInfer):
2206
2486
  raise ValueError(f'For \'{self.name}\', the dim of multiples must be 1.')
2207
2487
  rank = max(len(x['shape']), shape[0])
2208
2488
  out_shape = [-1] * rank
2489
+ if -2 in x['shape']:
2490
+ out_shape = [-2]
2209
2491
  return {
2210
2492
  'shape': out_shape,
2211
2493
  'dtype': x['dtype'],
@@ -2224,7 +2506,7 @@ class Tile(PrimitiveWithInfer):
2224
2506
  validator.check_positive_int(
2225
2507
  multiple, "multiples[%d]" % i, self.name)
2226
2508
  validator.check_value_type(
2227
- "x[\'dtype\']", x["dtype"], mstype.tensor_type, self.name)
2509
+ "x[\'dtype\']", x["dtype"], mstype.TensorType, self.name)
2228
2510
  out_shp, value = self._get_shape_and_range(x, multiples)
2229
2511
  shp = out_shp.get('shape', None)
2230
2512
  out = {'shape': shp,
@@ -2239,6 +2521,16 @@ class UnsortedSegmentSum(Primitive):
2239
2521
 
2240
2522
  Refer to :func:`mindspore.ops.unsorted_segment_sum` for more details.
2241
2523
 
2524
+ Inputs:
2525
+ - **input_x** (Tensor) - Input Tensor contains the data to be summed.
2526
+ The shape is :math:`(x_1, x_2, ..., x_R)`.
2527
+ - **segment_ids** (Tensor) - The label indicates the segment to which each element belongs.
2528
+ Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
2529
+ - **num_segments** (int) - Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
2530
+
2531
+ Outputs:
2532
+ Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
2533
+
2242
2534
  Supported Platforms:
2243
2535
  ``Ascend`` ``GPU`` ``CPU``
2244
2536
 
@@ -2272,6 +2564,16 @@ class UnsortedSegmentMin(PrimitiveWithCheck):
2272
2564
 
2273
2565
  Refer to :func:`mindspore.ops.unsorted_segment_min` for more details.
2274
2566
 
2567
+ Inputs:
2568
+ - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.
2569
+ The data type must be float16, float32 or int32.
2570
+ - **segment_ids** (Tensor) - The label indicates the segment to which each element belongs.
2571
+ Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
2572
+ - **num_segments** (int) - The value specifies the number of distinct `segment_ids`.
2573
+
2574
+ Outputs:
2575
+ Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
2576
+
2275
2577
  Supported Platforms:
2276
2578
  ``Ascend`` ``GPU`` ``CPU``
2277
2579
 
@@ -2305,7 +2607,7 @@ class UnsortedSegmentMin(PrimitiveWithCheck):
2305
2607
  # support vmap : segment_ids_shape support batch rank
2306
2608
  if not hasattr(self, 'batch_rank'):
2307
2609
  if not is_dim_unknown(x_shape) and not is_dim_unknown(segment_ids_shape):
2308
- validator.check_equal_int(len(segment_ids_shape), 1, "rank of segment_ids_shape", self.name)
2610
+ validator.check_int(len(segment_ids_shape), 1, validator.GE, "rank of segment_ids_shape", self.name)
2309
2611
 
2310
2612
  num_segments_type = num_segments['dtype']
2311
2613
  validator.check_subclass("num_segments", num_segments_type, [mstype.number], self.name)
@@ -2324,6 +2626,16 @@ class UnsortedSegmentMax(PrimitiveWithCheck):
2324
2626
 
2325
2627
  Refer to :func:`mindspore.ops.unsorted_segment_max` for more details.
2326
2628
 
2629
+ Inputs:
2630
+ - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.
2631
+ The data type must be float16, float32 or int32.
2632
+ - **segment_ids** (Tensor) - The label indicates the segment to which each element belongs.
2633
+ Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
2634
+ - **num_segments** (int) - The value specifies the number of distinct `segment_ids`.
2635
+
2636
+ Outputs:
2637
+ Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
2638
+
2327
2639
  Supported Platforms:
2328
2640
  ``Ascend`` ``GPU`` ``CPU``
2329
2641
 
@@ -2415,7 +2727,7 @@ class UnsortedSegmentMax(PrimitiveWithCheck):
2415
2727
  # support vmap : segment_ids_shape support batch rank
2416
2728
  if not hasattr(self, 'batch_rank'):
2417
2729
  if not is_dim_unknown(x_shape) and not is_dim_unknown(segment_ids_shape):
2418
- validator.check_equal_int(len(segment_ids_shape), 1, "rank of segment_ids_shape", self.name)
2730
+ validator.check_int(len(segment_ids_shape), 1, validator.GE, "rank of segment_ids_shape", self.name)
2419
2731
 
2420
2732
  num_segments_type = num_segments['dtype']
2421
2733
  validator.check_subclass("num_segments", num_segments_type, [mstype.number], self.name)
@@ -2424,8 +2736,9 @@ class UnsortedSegmentMax(PrimitiveWithCheck):
2424
2736
  validator.check(f'first shape of input_x', x_shape[0],
2425
2737
  'length of segments_id', segment_ids_shape[0], validator.EQ, self.name)
2426
2738
  num_segments_v = num_segments['value']
2427
- validator.check_value_type('num_segments', num_segments_v, [int], self.name)
2428
- validator.check_positive_int(num_segments_v, "num_segments", self.name)
2739
+ if num_segments_v is not None:
2740
+ validator.check_value_type('num_segments', num_segments_v, [int], self.name)
2741
+ validator.check_positive_int(num_segments_v, "num_segments", self.name)
2429
2742
 
2430
2743
 
2431
2744
  class UnsortedSegmentProd(Primitive):
@@ -2434,6 +2747,17 @@ class UnsortedSegmentProd(Primitive):
2434
2747
 
2435
2748
  Refer to :func:`mindspore.ops.unsorted_segment_prod` for more details.
2436
2749
 
2750
+ Inputs:
2751
+ - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.
2752
+ With float16, float32 or int32 data type.
2753
+ - **segment_ids** (Tensor) - A `1-D` tensor whose shape is :math:`(x_1)`, the value must be non-negative tensor.
2754
+ Data type must be int32.
2755
+ - **num_segments** (int) - The value specifies the number of distinct `segment_ids`,
2756
+ must be greater than 0.
2757
+
2758
+ Outputs:
2759
+ Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
2760
+
2437
2761
  Supported Platforms:
2438
2762
  ``Ascend`` ``GPU`` ``CPU``
2439
2763
 
@@ -2463,10 +2787,26 @@ class Concat(PrimitiveWithCheck):
2463
2787
 
2464
2788
  Refer to :func:`mindspore.ops.concat` for more details.
2465
2789
 
2790
+ Args:
2791
+ axis (int, optional): The specified axis. Default: ``0`` .
2792
+
2793
+ Inputs:
2794
+ - **input_x** (Union[tuple, list]) - A tuple or a list of input tensors.
2795
+ Suppose there are two tensors in this tuple or list, namely x1 and x2.
2796
+ To perform `Concat` in the axis 0 direction, except for the 0th axis, all other axes should be equal,
2797
+ that is, :math:`x1.shape[1] == x2.shape[1], x1.shape[2] == x2.shape[2], ..., x1.shape[R] == x2.shape[R]`,
2798
+ where the :math:`R` indicates the last axis.
2799
+
2800
+ Outputs:
2801
+ - Tensor, the shape is :math:`(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)`.
2802
+ The data type is the same with `input_x`.
2803
+
2466
2804
  Supported Platforms:
2467
2805
  ``Ascend`` ``GPU`` ``CPU``
2468
2806
 
2469
2807
  Examples:
2808
+ >>> import numpy as np
2809
+ >>> from mindspore import Tensor, ops
2470
2810
  >>> input_x1 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
2471
2811
  >>> input_x2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
2472
2812
  >>> op = ops.Concat()
@@ -2586,6 +2926,8 @@ class ParallelConcat(Primitive):
2586
2926
  ``Ascend`` ``GPU`` ``CPU``
2587
2927
 
2588
2928
  Examples:
2929
+ >>> import numpy as np
2930
+ >>> from mindspore import Tensor, ops
2589
2931
  >>> data1 = Tensor(np.array([[0, 1]]).astype(np.int32))
2590
2932
  >>> data2 = Tensor(np.array([[2, 1]]).astype(np.int32))
2591
2933
  >>> op = ops.ParallelConcat()
@@ -2604,11 +2946,12 @@ def _get_stack_shape(value, x_shape, x_type, axis, prim_name):
2604
2946
  """for stack output shape"""
2605
2947
  validator.check_value_type("shape", x_shape, [tuple, list], prim_name)
2606
2948
  validator.check_int(len(x_shape), 1, validator.GE, "len of input_x", prim_name)
2607
- validator.check_subclass("input_x[0]", x_type[0], mstype.tensor, prim_name)
2949
+ validator.check_subclass("input_x[0]", x_type[0], mstype.tensor_type, prim_name)
2608
2950
 
2609
2951
  out_n = len(x_shape)
2610
2952
  for i in range(1, out_n):
2611
- validator.check('x_type[%d]' % i, x_type[i], 'base', x_type[0], validator.EQ, prim_name, TypeError)
2953
+ if x_type[i] != x_type[i-1]:
2954
+ raise TypeError(f"For {prim_name}, all types should be same, but got {x_type}")
2612
2955
 
2613
2956
  new_x_shape = []
2614
2957
  for i, shp in enumerate(x_shape):
@@ -2630,8 +2973,8 @@ def _get_stack_shape(value, x_shape, x_type, axis, prim_name):
2630
2973
  for j in range(0, rank_base):
2631
2974
  if new_x_shape[i]["shape"][j] != new_x_shape[0]["shape"][j] and \
2632
2975
  new_x_shape[i]["shape"][j] != -1 and new_x_shape[0]["shape"][j] != -1:
2633
- raise ValueError("For \'{}\' element {} shape in input can not pack with first element".format(
2634
- prim_name, new_x_shape[i]['id']))
2976
+ raise ValueError(f"For {prim_name} element {new_x_shape[i]['id']} shape"
2977
+ f"in input can not pack with first element")
2635
2978
 
2636
2979
  validator.check_int_range(axis, -rank_base - 1, rank_base, validator.INC_BOTH, 'axis', prim_name)
2637
2980
  if axis < 0:
@@ -2647,40 +2990,28 @@ def _get_stack_shape(value, x_shape, x_type, axis, prim_name):
2647
2990
  return out_shape
2648
2991
 
2649
2992
 
2650
- class Pack(PrimitiveWithInfer):
2651
- """
2652
- Same as operator Stack. Pack will be deprecated in the future.
2653
- Please use Stack instead.
2654
- """
2655
-
2656
- @deprecated("1.1", "Stack", True)
2657
- @prim_attr_register
2658
- def __init__(self, axis=0):
2659
- """Initialize Pack"""
2660
- validator.check_value_type("axis", axis, [int], self.name)
2661
- self.axis = axis
2662
-
2663
- def __infer__(self, value):
2664
- x_shape = value['shape']
2665
- x_type = value['dtype']
2666
- self.add_prim_attr('num', len(x_shape))
2667
- all_shape = _get_stack_shape(value, x_shape, x_type, self.axis, self.name)
2668
- out = {'shape': all_shape,
2669
- 'dtype': x_type[0],
2670
- 'value': None}
2671
- return out
2672
-
2673
-
2674
2993
  class Stack(PrimitiveWithInfer):
2675
2994
  r"""
2676
2995
  Stacks a list of tensors in specified axis.
2677
2996
 
2678
2997
  Refer to :func:`mindspore.ops.stack` for more details.
2679
2998
 
2999
+ Args:
3000
+ axis (int, optional): Dimension to stack. The range is [-(R+1), R+1). Default: ``0`` .
3001
+
3002
+ Inputs:
3003
+ - **input_x** (Union[tuple, list]) - A Tuple or list of Tensor objects with the same shape and type.
3004
+
3005
+ Outputs:
3006
+ Tensor. A stacked Tensor with the same type as `input_x`.
3007
+
2680
3008
  Supported Platforms:
2681
3009
  ``Ascend`` ``GPU`` ``CPU``
2682
3010
 
2683
3011
  Examples:
3012
+ >>> import mindspore
3013
+ >>> from mindspore import Tensor, ops
3014
+ >>> import numpy as np
2684
3015
  >>> data1 = Tensor(np.array([0, 1]).astype(np.float32))
2685
3016
  >>> data2 = Tensor(np.array([2, 3]).astype(np.float32))
2686
3017
  >>> stack = ops.Stack()
@@ -2723,75 +3054,30 @@ class Stack(PrimitiveWithInfer):
2723
3054
  return unpack(x[0])
2724
3055
  return x
2725
3056
 
2726
- if 'shape_value' in value and value['shape_value'] is not None:
2727
- input_shape_value = []
2728
- for item in value['shape_value']:
2729
- item = unpack(item)
2730
- item = np.array(item)
2731
- input_shape_value.append(item)
2732
- infered_shape_value = np.stack(input_shape_value, axis=self.axis)
2733
- infered_shape_value = tuple(infered_shape_value.tolist())
2734
- out['shape_value'] = infered_shape_value
2735
- return out
2736
-
2737
-
2738
- class Unpack(PrimitiveWithInfer):
2739
- """
2740
- Same as operator Unstack. Unpack will be deprecated in the future.
2741
- Please use Unstack instead.
2742
- """
2743
-
2744
- @deprecated("1.1", "Unstack", True)
2745
- @prim_attr_register
2746
- def __init__(self, axis=0):
2747
- """Initialize Unpack"""
2748
- validator.check_value_type("axis", axis, [int], self.name)
2749
- self.axis = axis
2750
-
2751
- def __infer__(self, x):
2752
- validator.check_subclass("x", x['dtype'], mstype.tensor, self.name)
2753
- x_shape = list(x['shape'])
2754
- dim = len(x_shape)
2755
- validator.check_int_range(self.axis, -dim, dim, validator.INC_LEFT, 'axis value', self.name)
2756
- if self.axis < 0:
2757
- self.axis = self.axis + dim
2758
- output_num = x_shape[self.axis]
2759
- validator.check_value_type("num", output_num, [int], self.name)
2760
- validator.check_positive_int(output_num, "output_num", self.name)
2761
- self.add_prim_attr('num', output_num)
2762
- output_valid_check = x_shape[self.axis] - output_num
2763
- validator.check_int(output_valid_check, 0, validator.EQ,
2764
- "The dimension which to unstack divides output_num", self.name)
2765
- out_shapes = []
2766
- out_dtypes = []
2767
- out_shape = x_shape[:self.axis] + x_shape[self.axis + 1:]
2768
- for _ in range(output_num):
2769
- out_shapes.append(tuple(out_shape))
2770
- out_dtypes.append(x['dtype'])
2771
- out_shapes = tuple(out_shapes)
2772
- out_dtypes = tuple(out_dtypes)
2773
- out = {'shape': out_shapes,
2774
- 'dtype': out_dtypes,
2775
- 'value': None}
3057
+ if 'shape_value' in value and value['shape_value'] is not None:
3058
+ input_shape_value = []
3059
+ for item in value['shape_value']:
3060
+ item = unpack(item)
3061
+ item = np.array(item)
3062
+ input_shape_value.append(item)
3063
+ infered_shape_value = np.stack(input_shape_value, axis=self.axis)
3064
+ infered_shape_value = tuple(infered_shape_value.tolist())
3065
+ out['shape_value'] = infered_shape_value
2776
3066
  return out
2777
3067
 
2778
3068
 
2779
3069
  class Unstack(Primitive):
2780
3070
  r"""
2781
- Unstacks tensor in specified axis.
2782
-
2783
- Unstacks a tensor of rank `R` along axis dimension, output tensors will have rank `(R-1)`.
3071
+ Unstacks tensor in specified axis, this is the opposite of ops.Stack.
3072
+ Assuming input is a tensor of rank `R`, output tensors will have rank `(R-1)`.
2784
3073
 
2785
- Given a tensor of shape :math:`(x_1, x_2, ..., x_R)`. If :math:`0 \le axis`,
2786
- the shape of tensor in output is :math:`(x_1, x_2, ..., x_{axis}, x_{axis+2}, ..., x_R)`.
2787
-
2788
- This is the opposite of pack.
3074
+ Refer to :func:`mindspore.ops.unstack` for more details.
2789
3075
 
2790
3076
  Args:
2791
- axis (int): Dimension along which to unpack. Default: 0.
3077
+ axis (int): Dimension along which to unpack. Default: ``0`` .
2792
3078
  Negative values wrap around. The range is [-R, R).
2793
3079
  num (Union[None, int]): The number of output tensors.
2794
- Automatically inferred by input_x and axis if None. Default: None.
3080
+ Automatically inferred by input_x and axis if ``None`` . Default: ``None`` .
2795
3081
 
2796
3082
  Inputs:
2797
3083
  - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.
@@ -2799,14 +3085,15 @@ class Unstack(Primitive):
2799
3085
 
2800
3086
  Outputs:
2801
3087
  A tuple of tensors, the shape of each objects is the same.
2802
-
2803
- Raises:
2804
- ValueError: If axis is out of the range [-len(input_x.shape), len(input_x.shape)).
3088
+ Given a tensor of shape :math:`(x_1, x_2, ..., x_R)`. If :math:`0 \le axis`,
3089
+ the shape of tensor in output is :math:`(x_1, x_2, ..., x_{axis}, x_{axis+2}, ..., x_R)`.
2805
3090
 
2806
3091
  Supported Platforms:
2807
3092
  ``Ascend`` ``GPU`` ``CPU``
2808
3093
 
2809
3094
  Examples:
3095
+ >>> import numpy as np
3096
+ >>> from mindspore import Tensor, ops
2810
3097
  >>> unstack = ops.Unstack()
2811
3098
  >>> input_x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]))
2812
3099
  >>> output = unstack(input_x)
@@ -2829,6 +3116,15 @@ class Slice(Primitive):
2829
3116
 
2830
3117
  Refer to :func:`mindspore.ops.slice` for more details.
2831
3118
 
3119
+ Inputs:
3120
+ - **input_x** (Tensor) - The target tensor.
3121
+ The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
3122
+ - **begin** (Union[tuple, list]) - The beginning of the slice. Only constant value(>=0) is allowed.
3123
+ - **size** (Union[tuple, list]) - The size of the slice. Only constant value is allowed.
3124
+
3125
+ Outputs:
3126
+ Tensor, the shape is: input `size`, the data type is the same as `input_x`.
3127
+
2832
3128
  Supported Platforms:
2833
3129
  ``Ascend`` ``GPU`` ``CPU``
2834
3130
 
@@ -2930,7 +3226,7 @@ class ReverseV2(Primitive):
2930
3226
  axis (Union[tuple(int), list(int)]): The indices of the dimensions to reverse.
2931
3227
 
2932
3228
  Inputs:
2933
- - **input_x** (Tensor) - The target tensor. The data type is Number except float64.
3229
+ - **input_x** (Tensor) - The target tensor.
2934
3230
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
2935
3231
 
2936
3232
  Outputs:
@@ -2945,6 +3241,9 @@ class ReverseV2(Primitive):
2945
3241
  ``Ascend`` ``GPU`` ``CPU``
2946
3242
 
2947
3243
  Examples:
3244
+ >>> import mindspore
3245
+ >>> import numpy as np
3246
+ >>> from mindspore import Tensor, ops
2948
3247
  >>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
2949
3248
  >>> op = ops.ReverseV2(axis=[1])
2950
3249
  >>> output = op(input_x)
@@ -2965,6 +3264,9 @@ class ReverseV2(Primitive):
2965
3264
  for i, each in enumerate(axis):
2966
3265
  validator.check_value_type(f'axis[{i}]', each, [int], self.name)
2967
3266
  self.axis = axis
3267
+ if isinstance(axis, list):
3268
+ self.axis = tuple(axis)
3269
+ self.add_prim_attr('axis', self.axis)
2968
3270
  self.init_prim_io_names(inputs=['x'], outputs=['output'])
2969
3271
 
2970
3272
 
@@ -2973,10 +3275,8 @@ class Rint(Primitive):
2973
3275
  Returns an integer that is closest to `input_x` element-wise.
2974
3276
 
2975
3277
  Inputs:
2976
- - **input_x** (Tensor) - The target tensor, which must be one of the following types:
2977
- float16, float32, float64. The shape is :math:`(N,*)` where :math:`*` means
2978
- any number of additional dimensions.
2979
-
3278
+ - **input_x** (Tensor) - Input tensor of any dimension, which must be one of the following types:
3279
+ float16, float32, float64.
2980
3280
  Outputs:
2981
3281
  Tensor, has the same shape and type as `input_x`.
2982
3282
 
@@ -2987,6 +3287,9 @@ class Rint(Primitive):
2987
3287
  ``Ascend`` ``GPU`` ``CPU``
2988
3288
 
2989
3289
  Examples:
3290
+ >>> import mindspore
3291
+ >>> import numpy as np
3292
+ >>> from mindspore import Tensor, ops
2990
3293
  >>> input_x = Tensor(np.array([-1.6, -0.1, 1.5, 2.0]), mindspore.float32)
2991
3294
  >>> op = ops.Rint()
2992
3295
  >>> output = op(input_x)
@@ -3037,6 +3340,8 @@ class Select(Primitive):
3037
3340
  ``Ascend`` ``GPU`` ``CPU``
3038
3341
 
3039
3342
  Examples:
3343
+ >>> import mindspore
3344
+ >>> from mindspore import Tensor, ops
3040
3345
  >>> select = ops.Select()
3041
3346
  >>> input_cond = Tensor([True, False])
3042
3347
  >>> input_x = Tensor([2,3], mindspore.float32)
@@ -3059,11 +3364,11 @@ class StridedSliceV2(Primitive):
3059
3364
  Refer to class StridedSlice for more details.
3060
3365
 
3061
3366
  Args:
3062
- begin_mask (int): Starting index of the slice. Default: 0.
3063
- end_mask (int): Ending index of the slice. Default: 0.
3064
- ellipsis_mask (int): An int mask. Default: 0.
3065
- new_axis_mask (int): An int mask. Default: 0.
3066
- shrink_axis_mask (int): An int mask. Default: 0.
3367
+ begin_mask (int): Starting index of the slice. Default: ``0`` .
3368
+ end_mask (int): Ending index of the slice. Default: ``0`` .
3369
+ ellipsis_mask (int): An int mask. Default: ``0`` .
3370
+ new_axis_mask (int): An int mask. Default: ``0`` .
3371
+ shrink_axis_mask (int): An int mask. Default: ``0`` .
3067
3372
 
3068
3373
  Inputs:
3069
3374
  - **input_x** (Tensor) - The input Tensor.
@@ -3113,10 +3418,30 @@ class StridedSlice(PrimitiveWithInfer):
3113
3418
 
3114
3419
  Refer to :func:`mindspore.ops.strided_slice` for more details.
3115
3420
 
3421
+ Args:
3422
+ begin_mask (int, optional): Starting index of the slice. Default: ``0`` .
3423
+ end_mask (int, optional): Ending index of the slice. Default: ``0`` .
3424
+ ellipsis_mask (int, optional): An int mask, ignore slicing operation when set to 1. Default: ``0`` .
3425
+ new_axis_mask (int, optional): An int mask for adding new dims. Default: ``0`` .
3426
+ shrink_axis_mask (int, optional): An int mask for shrinking dims. Default: ``0`` .
3427
+
3428
+ Inputs:
3429
+ - **input_x** (Tensor) - The input Tensor to be extracted from.
3430
+ - **begin** (tuple[int]) - A tuple which represents the location where to start.
3431
+ - **end** (tuple[int]) - A tuple or which represents the maximum location where to end.
3432
+ - **strides** (tuple[int]) - A tuple which represents the strides is continuously added
3433
+ before reaching the maximum location. Only int is allowed, it can be negative
3434
+ which results in reversed slicing.
3435
+
3436
+ Outputs:
3437
+ Tensor, return the extracts a strided slice of a Tensor based on `begin/end` index and `strides`.
3438
+
3116
3439
  Supported Platforms:
3117
3440
  ``Ascend`` ``GPU`` ``CPU``
3118
3441
 
3119
3442
  Examples:
3443
+ >>> import mindspore
3444
+ >>> from mindspore import Tensor, ops
3120
3445
  >>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
3121
3446
  ... [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
3122
3447
  >>> # [[[1. 1. 1.]
@@ -3127,7 +3452,7 @@ class StridedSlice(PrimitiveWithInfer):
3127
3452
  >>> #
3128
3453
  >>> # [[5. 5. 5.]
3129
3454
  >>> # [6. 6. 6.]]]
3130
- >>> # In order to visually view the multi-dimensional array, write the above as follows
3455
+ >>> # In order to visually view the multi-dimensional array, write the above as follows
3131
3456
  >>> # [
3132
3457
  >>> # [
3133
3458
  >>> # [1,1,1]
@@ -3218,10 +3543,10 @@ class StridedSlice(PrimitiveWithInfer):
3218
3543
  end_v, end_len = self._check_and_get_value(end, 'end')
3219
3544
  strides_v, strides_len = self._check_and_get_value(strides, 'strides')
3220
3545
 
3221
- is_dynamic_tuple = (self._is_none_in_tuple(begin_v['value'])
3222
- or self._is_none_in_tuple(end_v['value'])
3223
- or self._is_none_in_tuple(strides_v['value']))
3224
- is_dynamic = None in (begin_v['value'], end_v['value'], strides_v['value'])
3546
+ is_dynamic_tuple = (self._is_none_in_tuple(begin_v.get('value'))
3547
+ or self._is_none_in_tuple(end_v.get('value'))
3548
+ or self._is_none_in_tuple(strides_v.get('value')))
3549
+ is_dynamic = None in (begin_v.get('value'), end_v.get('value'), strides_v.get('value'))
3225
3550
 
3226
3551
  if not is_dynamic and (begin_len != strides_len or end_len != strides_len):
3227
3552
  raise ValueError(
@@ -3341,8 +3666,7 @@ class StridedSlice(PrimitiveWithInfer):
3341
3666
  strides_value = strides_v['shape_value']
3342
3667
  return begin_value, end_value, strides_value
3343
3668
 
3344
- @staticmethod
3345
- def _is_none_in_tuple(x):
3669
+ def _is_none_in_tuple(self, x):
3346
3670
  return isinstance(x, tuple) and None in x
3347
3671
 
3348
3672
  def _compute_slicing_length(self, begin, end, stride, x_dim):
@@ -3569,10 +3893,17 @@ class Diag(PrimitiveWithCheck):
3569
3893
 
3570
3894
  Refer to :func:`mindspore.ops.diag` for more details.
3571
3895
 
3896
+ Inputs:
3897
+ - **input_x** (Tensor) - The input tensor.
3898
+
3899
+ Outputs:
3900
+ Tensor, has the same dtype as the `input_x`.
3901
+
3572
3902
  Supported Platforms:
3573
3903
  ``Ascend`` ``GPU`` ``CPU``
3574
3904
 
3575
3905
  Examples:
3906
+ >>> from mindspore import Tensor, ops
3576
3907
  >>> input_x = Tensor([1, 2, 3, 4]).astype('int32')
3577
3908
  >>> diag = ops.Diag()
3578
3909
  >>> output = diag(input_x)
@@ -3602,9 +3933,9 @@ class DiagPart(PrimitiveWithCheck):
3602
3933
 
3603
3934
  Extracts the diagonal elements from the given Tensor.
3604
3935
 
3605
- If the input_x is a Tensor of shape :math:`[D_1,..., D_k, D_1,..., D_k]`, then the
3936
+ If the `input_x` is a Tensor of shape :math:`[D_1,..., D_k, D_1,..., D_k]`, then the
3606
3937
  output will be a Tensor of rank k of shape :math:`[D_1,..., D_k]` where:
3607
- :math:`output[i_1,..., i_k] = input_x[i_1,..., i_k, i_1,..., i_k]`.
3938
+ :math:`output[i_1,..., i_k] = input\_x[i_1,..., i_k, i_1,..., i_k]`.
3608
3939
 
3609
3940
  Inputs:
3610
3941
  - **input_x** (Tensor) - The rank of input tensor is 2k(k > 0).
@@ -3654,10 +3985,25 @@ class Mvlgamma(Primitive):
3654
3985
 
3655
3986
  Refer to :func:`mindspore.ops.mvlgamma` for more details.
3656
3987
 
3988
+ Args:
3989
+ p(int): The number of dimensions. And the value of `p` must be greater than or equal to 1.
3990
+
3991
+ Inputs:
3992
+ - **x** (Tensor) - The tensor to compute the multivariate log-gamma function,
3993
+ which must be one of the following types: float32, float64.
3994
+ The shape is :math:`(N,*)`, where :math:`*` means any number of additional dimensions.
3995
+ And the value of any element in `x` must be greater than :math:`(p - 1) / 2`.
3996
+
3997
+ Outputs:
3998
+ Tensor, has the same shape and type as `x`.
3999
+
3657
4000
  Supported Platforms:
3658
4001
  ``Ascend`` ``GPU`` ``CPU``
3659
4002
 
3660
4003
  Examples:
4004
+ >>> import mindspore
4005
+ >>> import numpy as np
4006
+ >>> from mindspore import Tensor, ops
3661
4007
  >>> x = Tensor(np.array([[3, 4, 5], [4, 2, 6]]), mindspore.float32)
3662
4008
  >>> op = ops.Mvlgamma(p=3)
3663
4009
  >>> y = op(x)
@@ -3684,8 +4030,7 @@ class Eye(Primitive):
3684
4030
  - **n** (int) - The number of rows of returned tensor. Constant value only.
3685
4031
  - **m** (int) - The number of columns of returned tensor. Constant value only.
3686
4032
  - **t** (mindspore.dtype) - MindSpore's dtype, the data type of the returned tensor.
3687
- The data type can be bool or Number.
3688
- Default: None, the data type of the returned tensor is mindspore.float32.
4033
+ Default: ``None`` , the data type of the returned tensor is mindspore.float32.
3689
4034
 
3690
4035
  Outputs:
3691
4036
  Tensor, a tensor with ones on the diagonal and the rest of elements are zero. The shape of `output` depends on
@@ -3695,6 +4040,8 @@ class Eye(Primitive):
3695
4040
  ``Ascend`` ``GPU`` ``CPU``
3696
4041
 
3697
4042
  Examples:
4043
+ >>> import mindspore
4044
+ >>> from mindspore import ops
3698
4045
  >>> eye = ops.Eye()
3699
4046
  >>> output = eye(2, 2, mindspore.int32)
3700
4047
  >>> print(output)
@@ -3719,17 +4066,29 @@ class ScatterNd(Primitive):
3719
4066
  r"""
3720
4067
  Scatters a tensor into a new tensor depending on the specified indices.
3721
4068
 
3722
- The following figure shows the calculation process of inserting two slices in the first dimension of a rank-3
3723
- with two matrices of new values:
4069
+ Refer to :func:`mindspore.ops.scatter_nd` for more details.
3724
4070
 
3725
- .. image:: ScatterNd.png
4071
+ Inputs:
4072
+ - **indices** (Tensor) - The index of scattering in the new tensor with int32 or int64 data type.
4073
+ The rank of indices must be at least 2 and `indices_shape[-1] <= len(shape)`.
4074
+ - **updates** (Tensor) - The source Tensor to be scattered.
4075
+ It has shape `indices_shape[:-1] + shape[indices_shape[-1]:]`.
4076
+ - **shape** (tuple[int]) - Define the shape of the output tensor, has the same data type as indices.
4077
+ The shape of `shape` is :math:`(x_1, x_2, ..., x_R)`, and the length of 'shape' is greater than or equal to 2.
4078
+ In other words, the shape of `shape` is at least :math:`(x_1, x_2)`.
4079
+ And the value of any element in `shape` must be greater than or equal to 1.
4080
+ In other words, :math:`x_1` >= 1, :math:`x_2` >= 1.
3726
4081
 
3727
- Refer to :func:`mindspore.ops.scatter_nd` for more details.
4082
+ Outputs:
4083
+ Tensor, the new tensor, has the same type as `update` and the same shape as `shape`.
3728
4084
 
3729
4085
  Supported Platforms:
3730
4086
  ``Ascend`` ``GPU`` ``CPU``
3731
4087
 
3732
4088
  Examples:
4089
+ >>> import mindspore
4090
+ >>> import numpy as np
4091
+ >>> from mindspore import Tensor, ops
3733
4092
  >>> op = ops.ScatterNd()
3734
4093
  >>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
3735
4094
  >>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2],
@@ -3803,7 +4162,7 @@ class ResizeNearestNeighbor(Primitive):
3803
4162
  Args:
3804
4163
  size (Union[tuple, list]): The target size. The dimension of size must be 2.
3805
4164
  align_corners (bool): Whether the centers of the 4 corner pixels of the input
3806
- and output tensors are aligned. Default: False.
4165
+ and output tensors are aligned. Default: ``False`` .
3807
4166
 
3808
4167
  Inputs:
3809
4168
  - **input_x** (Tensor) - The input tensor. The shape of the tensor is :math:`(N, C, H, W)`.
@@ -3851,33 +4210,24 @@ class ResizeNearestNeighborV2(Primitive):
3851
4210
  values of neighboring points at all, yielding a piecewise-constant interpolant.
3852
4211
 
3853
4212
  Args:
3854
- align_corners (bool, optional): If true, the centers of the 4 corner pixels of the input and output
3855
- tensors are aligned, preserving the values at the corner pixels. Defaults: False.
3856
- half_pixel_centers (bool, optional): Whether half pixel center. If set to True,
3857
- `align_corners` should be False. Default: False.
3858
- data_format (str, optional): An optional `string` that describes the
3859
- format of the input `x`. Default: `NHWC`.
4213
+ align_corners (bool, optional): If ``True`` , the centers of the 4 corner pixels of the input and output
4214
+ tensors are aligned, preserving the values at the corner pixels. Default: ``False`` .
4215
+ half_pixel_centers (bool, optional): Whether half pixel center. If set to ``True`` ,
4216
+ `align_corners` should be False. Default: ``False`` .
3860
4217
 
3861
4218
  Inputs:
3862
- - **x** (Tensor) - 4-D with shape :math:`(batch, height, width, channels)`
3863
- or :math:`(batch, channels, height, width)` depending on the attr 'data_format'. Support
3864
- type [`int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `float16`, `float32`, `float64`].
4219
+ - **x** (Tensor) - 4-D with shape :math:`(batch, channels, height, width)` .
3865
4220
  - **size** (Tensor) - The new size for the images. A 1-D int32 Tensor
3866
4221
  of 2 elements: [`new_height, new_width`].
3867
4222
 
3868
4223
  Outputs:
3869
4224
  - **y** (Tensor) - The resized images. A 4-D with shape
3870
- :math:`(batch, new\_height, new\_width, channels)`
3871
- or :math:`(batch, channels, new\_height, new\_width)`
3872
- depending on the attr `data_format`. It has the same dtype as `x`.
4225
+ :math:`(batch, channels, new\_height, new\_width)`. It has the same dtype as `x`.
3873
4226
 
3874
4227
  Raises:
3875
4228
  TypeError: If `x` or `size` is not a Tensor.
3876
- TypeError: If the data type of `x` is not in supported list.
3877
4229
  TypeError: If the data type of `size` is not int32.
3878
4230
  TypeError: If `align_corners` or `half_pixel_centers` is not bool.
3879
- TypeError: If `data_format` is not string.
3880
- ValueError: If `data_format` not in [`NHWC`, `NCHW`].
3881
4231
  ValueError: If any value of `size` is non positive.
3882
4232
  ValueError: If the dimension of `x` is not 4.
3883
4233
  ValueError: If the dimension of `size` is not 1.
@@ -3888,29 +4238,26 @@ class ResizeNearestNeighborV2(Primitive):
3888
4238
  ``Ascend`` ``GPU`` ``CPU``
3889
4239
 
3890
4240
  Examples:
3891
- >>> input_tensor = Tensor(np.ones((1, 4, 4, 1)), mstype.float32)
4241
+ >>> import numpy as np
4242
+ >>> from mindspore import Tensor, ops
4243
+ >>> from mindspore import dtype as mstype
4244
+ >>> input_tensor = Tensor(np.ones((1, 1, 4, 4)), mstype.float32)
3892
4245
  >>> size = Tensor([2, 2], mstype.int32)
3893
4246
  >>> resize = ops.ResizeNearestNeighborV2()
3894
4247
  >>> output = resize(input_tensor, size)
3895
4248
  >>> print(output)
3896
- [[[[1.]
3897
- [1.]]
3898
- [[1.]
3899
- [1.]]]]
4249
+ [[[[1. 1.]
4250
+ [1. 1.]]]]
3900
4251
  >>> print(output.shape)
3901
- (1, 2, 2, 1)
4252
+ (1, 1, 2, 2)
3902
4253
  """
3903
4254
 
3904
4255
  @prim_attr_register
3905
- def __init__(self, align_corners=False, half_pixel_centers=False, data_format='NHWC'):
4256
+ def __init__(self, align_corners=False, half_pixel_centers=False):
3906
4257
  """Initialize ResizeNearestNeighborV2"""
3907
4258
  self.init_prim_io_names(inputs=['x', 'size'], outputs=['y'])
3908
-
3909
4259
  validator.check_bool(align_corners, 'align_corners', self.name)
3910
4260
  validator.check_bool(half_pixel_centers, 'half_pixel_centers', self.name)
3911
- validator.check_value_type('data_format', data_format, [str], self.name)
3912
- self.format = validator.check_string(data_format, ['NHWC', 'NCHW'], 'data_format', self.name)
3913
- self.add_prim_attr('data_format', self.format)
3914
4261
 
3915
4262
 
3916
4263
  class GatherNd(Primitive):
@@ -3919,10 +4266,20 @@ class GatherNd(Primitive):
3919
4266
 
3920
4267
  Refer to :func:`mindspore.ops.gather_nd` for more details.
3921
4268
 
4269
+ Inputs:
4270
+ - **input_x** (Tensor) - The target tensor to gather values.
4271
+ - **indices** (Tensor) - The index tensor, with int32 or int64 data type.
4272
+
4273
+ Outputs:
4274
+ Tensor, has the same type as `input_x` and the shape is indices_shape[:-1] + x_shape[indices_shape[-1]:].
4275
+
3922
4276
  Supported Platforms:
3923
4277
  ``Ascend`` ``GPU`` ``CPU``
3924
4278
 
3925
4279
  Examples:
4280
+ >>> import mindspore
4281
+ >>> import numpy as np
4282
+ >>> from mindspore import Tensor, ops
3926
4283
  >>> op = ops.GatherNd()
3927
4284
  >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
3928
4285
  >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
@@ -3953,11 +4310,11 @@ class ScatterUpdate(Primitive):
3953
4310
  the relatively highest priority data type.
3954
4311
 
3955
4312
  Args:
3956
- use_locking (bool): Whether to protect the assignment by a lock. Default: True.
4313
+ use_locking (bool): Whether to protect the assignment by a lock. Default: ``True`` .
3957
4314
 
3958
4315
  Inputs:
3959
4316
  - **input_x** (Parameter) - The target tensor, with data type of Parameter.
3960
- The shape is :math:`(N, *)` where :math:`*` means,any number of additional dimensions.
4317
+ The shape is 0-D or :math:`(N, *)` where :math:`*` means any number of additional dimensions.
3961
4318
  - **indices** (Tensor) - The index of input tensor. With int32 data type.
3962
4319
  If there are duplicates in indices, the order for updating is undefined.
3963
4320
  - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,
@@ -3977,6 +4334,9 @@ class ScatterUpdate(Primitive):
3977
4334
  ``Ascend`` ``GPU`` ``CPU``
3978
4335
 
3979
4336
  Examples:
4337
+ >>> import mindspore
4338
+ >>> import numpy as np
4339
+ >>> from mindspore import Tensor, ops
3980
4340
  >>> np_x = np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]])
3981
4341
  >>> input_x = mindspore.Parameter(Tensor(np_x, mindspore.float32), name="x")
3982
4342
  >>> indices = Tensor(np.array([0, 1]), mindspore.int32)
@@ -4022,11 +4382,11 @@ class ScatterNdUpdate(Primitive):
4022
4382
  the relatively highest priority data type.
4023
4383
 
4024
4384
  Args:
4025
- use_locking (bool): Whether to protect the assignment by a lock. Default: True.
4385
+ use_locking (bool): Whether to protect the assignment by a lock. Default: ``True`` .
4026
4386
 
4027
4387
  Inputs:
4028
4388
  - **input_x** (Parameter) - The target tensor, with data type of Parameter.
4029
- The shape is :math:`(N, *)` where :math:`*` means,any number of additional dimensions.
4389
+ The shape is :math:`(N, *)` where :math:`*` means any number of additional dimensions.
4030
4390
  - **indices** (Tensor) - The index of input tensor, with int32 or int64 data type.
4031
4391
  - **updates** (Tensor) - N-D(2D or 3D) Tensor The tensor to be updated to the input tensor,
4032
4392
  has the same type as input. The shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
@@ -4044,6 +4404,9 @@ class ScatterNdUpdate(Primitive):
4044
4404
  ``Ascend`` ``GPU`` ``CPU``
4045
4405
 
4046
4406
  Examples:
4407
+ >>> import mindspore
4408
+ >>> import numpy as np
4409
+ >>> from mindspore import Tensor, ops
4047
4410
  >>> np_x = np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]])
4048
4411
  >>> input_x = mindspore.Parameter(Tensor(np_x, mindspore.float32), name="x")
4049
4412
  >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
@@ -4081,7 +4444,7 @@ class ScatterMax(_ScatterOpDynamic):
4081
4444
  .. math::
4082
4445
 
4083
4446
  \text{input_x}[\text{indices}[i, ..., j], :]
4084
- = max(\text{input_x}[\text{indices}[i, ..., j], :], \text{updates}[i, ..., j, :])
4447
+ = \max(\text{input_x}[\text{indices}[i, ..., j], :], \text{updates}[i, ..., j, :])
4085
4448
 
4086
4449
  Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
4087
4450
  If they have different data types, the lower priority data type will be converted to
@@ -4089,11 +4452,11 @@ class ScatterMax(_ScatterOpDynamic):
4089
4452
  when `updates` does not support conversion to the data type required by `input_x`.
4090
4453
 
4091
4454
  Args:
4092
- use_locking (bool): Whether to protect the assignment by a lock. Default: False.
4455
+ use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
4093
4456
 
4094
4457
  Inputs:
4095
4458
  - **input_x** (Parameter) - The target tensor, with data type of Parameter.
4096
- The shape is :math:`(N, *)` where :math:`*` means,any number of additional dimensions.
4459
+ The shape is :math:`(N, *)` where :math:`*` means any number of additional dimensions.
4097
4460
  - **indices** (Tensor) - The index to do max operation whose data type must be mindspore.int32 or
4098
4461
  mindspore.int64.
4099
4462
  - **updates** (Tensor) - The tensor that performs the maximum operation with `input_x`,
@@ -4115,6 +4478,9 @@ class ScatterMax(_ScatterOpDynamic):
4115
4478
  ``Ascend`` ``GPU`` ``CPU``
4116
4479
 
4117
4480
  Examples:
4481
+ >>> import mindspore
4482
+ >>> import numpy as np
4483
+ >>> from mindspore import Tensor, ops, Parameter
4118
4484
  >>> input_x = Parameter(Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32),
4119
4485
  ... name="input_x")
4120
4486
  >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
@@ -4139,7 +4505,7 @@ class ScatterMin(_ScatterOpDynamic):
4139
4505
  .. math::
4140
4506
 
4141
4507
  \text{input_x}[\text{indices}[i, ..., j], :]
4142
- = min(\text{input_x}[\text{indices}[i, ..., j], :], \text{updates}[i, ..., j, :])
4508
+ = \min(\text{input_x}[\text{indices}[i, ..., j], :], \text{updates}[i, ..., j, :])
4143
4509
 
4144
4510
  Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
4145
4511
  If they have different data types, the lower priority data type will be converted to
@@ -4147,11 +4513,11 @@ class ScatterMin(_ScatterOpDynamic):
4147
4513
  when `updates` does not support conversion to the data type required by `input_x`.
4148
4514
 
4149
4515
  Args:
4150
- use_locking (bool): Whether to protect the assignment by a lock. Default: False.
4516
+ use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
4151
4517
 
4152
4518
  Inputs:
4153
4519
  - **input_x** (Parameter) - The target tensor, with data type of Parameter.
4154
- The shape is :math:`(N, *)` where :math:`*` means,any number of additional dimensions.
4520
+ The shape is :math:`(N, *)` where :math:`*` means any number of additional dimensions.
4155
4521
  - **indices** (Tensor) - The index to do min operation whose data type must be mindspore.int32 or
4156
4522
  mindspore.int64.
4157
4523
  - **updates** (Tensor) - The tensor doing the min operation with `input_x`,
@@ -4173,6 +4539,9 @@ class ScatterMin(_ScatterOpDynamic):
4173
4539
  ``Ascend`` ``GPU`` ``CPU``
4174
4540
 
4175
4541
  Examples:
4542
+ >>> import mindspore
4543
+ >>> import numpy as np
4544
+ >>> from mindspore import Tensor, ops, Parameter
4176
4545
  >>> input_x = Parameter(Tensor(np.array([[0.0, 1.0, 2.0], [0.0, 0.0, 0.0]]), mindspore.float32),
4177
4546
  ... name="input_x")
4178
4547
  >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
@@ -4207,12 +4576,11 @@ class ScatterAdd(Primitive):
4207
4576
 
4208
4577
  Args:
4209
4578
  use_locking (bool): Whether to protect the assignment by a lock.
4210
- If true, `input_x` will be protected by the lock.
4211
- Otherwise, the calculation result is undefined. Default: False.
4579
+ If ``True`` , `input_x` will be protected by the lock.
4580
+ Otherwise, the calculation result is undefined. Default: ``False`` .
4212
4581
 
4213
4582
  Inputs:
4214
4583
  - **input_x** (Parameter) - The target tensor, with data type of Parameter.
4215
- The shape is :math:`(N, *)` where :math:`*` means,any number of additional dimensions.
4216
4584
  - **indices** (Tensor) - The index to do min operation whose data type must be mindspore.int32 or
4217
4585
  mindspore.int64.
4218
4586
  - **updates** (Tensor) - The tensor doing the min operation with `input_x`,
@@ -4232,6 +4600,9 @@ class ScatterAdd(Primitive):
4232
4600
  ``Ascend`` ``GPU`` ``CPU``
4233
4601
 
4234
4602
  Examples:
4603
+ >>> import mindspore
4604
+ >>> import numpy as np
4605
+ >>> from mindspore import Tensor, ops, Parameter
4235
4606
  >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name="x")
4236
4607
  >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)
4237
4608
  >>> updates = Tensor(np.ones([2, 2, 3]), mindspore.float32)
@@ -4324,7 +4695,7 @@ class ScatterSub(Primitive):
4324
4695
  the relatively highest priority data type.
4325
4696
 
4326
4697
  Args:
4327
- use_locking (bool): Whether to protect the assignment by a lock. Default: False.
4698
+ use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
4328
4699
 
4329
4700
  Inputs:
4330
4701
  - **input_x** (Parameter) - The target tensor, with data type of Parameter.
@@ -4348,6 +4719,9 @@ class ScatterSub(Primitive):
4348
4719
  ``Ascend`` ``GPU`` ``CPU``
4349
4720
 
4350
4721
  Examples:
4722
+ >>> import mindspore
4723
+ >>> import numpy as np
4724
+ >>> from mindspore import Tensor, ops, Parameter
4351
4725
  >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]), mindspore.float32), name="x")
4352
4726
  >>> indices = Tensor(np.array([[0, 1]]), mindspore.int32)
4353
4727
  >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]]), mindspore.float32)
@@ -4429,12 +4803,15 @@ class Triu(Primitive):
4429
4803
  The upper triangular section of the matrix comprises of the
4430
4804
  elements present on and above the main diagonal.
4431
4805
 
4806
+ .. warning::
4807
+ This is an experimental API that is subject to change or deletion.
4808
+
4432
4809
  Args:
4433
- diagonal (int, optional): The index of diagonal. Default: 0, indicating the main diagonal.
4810
+ diagonal (int, optional): The index of diagonal. Default: ``0`` , indicating the main diagonal.
4434
4811
 
4435
4812
  Inputs:
4436
- - **x** (Tensor) - The input tensor with shape :math:`(N, *)`
4437
- where :math:`*` means any number of additional dimensions. The data type is Number.
4813
+ - **x** (Tensor) - The input tensor with shape :math:`(M, N, *)`
4814
+ where :math:`*` means any number of additional dimensions.
4438
4815
 
4439
4816
  Outputs:
4440
4817
  - **y** (Tensor) - A tensor has the same shape and data type as input.
@@ -4442,12 +4819,14 @@ class Triu(Primitive):
4442
4819
  Raises:
4443
4820
  TypeError: If `x` is not an Tensor.
4444
4821
  TypeError: If `diagonal` is not an int.
4445
- ValueError: If length of shape of x is less than 1.
4822
+ ValueError: If the dimension of `input` is less than 2.
4446
4823
 
4447
4824
  Supported Platforms:
4448
- ``GPU`` ``CPU``
4825
+ ``Ascend`` ``GPU`` ``CPU``
4449
4826
 
4450
4827
  Examples:
4828
+ >>> import numpy as np
4829
+ >>> from mindspore import Tensor, ops
4451
4830
  >>> x = Tensor(np.array([[ 1, 2, 3, 4],
4452
4831
  ... [ 5, 6, 7, 8],
4453
4832
  ... [10, 11, 12, 13],
@@ -4509,11 +4888,11 @@ class ScatterMul(_ScatterOpDynamic):
4509
4888
  the relatively highest priority data type.
4510
4889
 
4511
4890
  Args:
4512
- use_locking (bool): Whether to protect the assignment by a lock. Default: False.
4891
+ use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
4513
4892
 
4514
4893
  Inputs:
4515
4894
  - **input_x** (Parameter) - The target tensor, with data type of Parameter.
4516
- The shape is :math:`(N, *)` where :math:`*` means,any number of additional dimensions.
4895
+ The shape is :math:`(N, *)` where :math:`*` means any number of additional dimensions.
4517
4896
  - **indices** (Tensor) - The index to do multiply operation whose data type must be mstype.int32 or
4518
4897
  mstype.int64.
4519
4898
  - **updates** (Tensor) - The tensor doing the multiply operation with `input_x`,
@@ -4533,6 +4912,9 @@ class ScatterMul(_ScatterOpDynamic):
4533
4912
  ``Ascend`` ``GPU`` ``CPU``
4534
4913
 
4535
4914
  Examples:
4915
+ >>> import numpy as np
4916
+ >>> from mindspore import dtype as mstype
4917
+ >>> from mindspore import Tensor, ops, Parameter
4536
4918
  >>> input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mstype.float32), name="x")
4537
4919
  >>> indices = Tensor(np.array([0, 1]), mstype.int32)
4538
4920
  >>> updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mstype.float32)
@@ -4614,11 +4996,11 @@ class ScatterDiv(_ScatterOpDynamic):
4614
4996
  when `updates` does not support conversion to the data type required by `input_x`.
4615
4997
 
4616
4998
  Args:
4617
- use_locking (bool): Whether to protect the assignment by a lock. Default: False.
4999
+ use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
4618
5000
 
4619
5001
  Inputs:
4620
5002
  - **input_x** (Parameter) - The target tensor, with data type of Parameter.
4621
- The shape is :math:`(N, *)` where :math:`*` means,any number of additional dimensions.
5003
+ The shape is :math:`(N, *)` where :math:`*` means any number of additional dimensions.
4622
5004
  - **indices** (Tensor) - The index to do divide operation whose data type must be mstype.int32 or
4623
5005
  mstype.int64.
4624
5006
  - **updates** (Tensor) - The tensor doing the divide operation with `input_x`,
@@ -4640,6 +5022,9 @@ class ScatterDiv(_ScatterOpDynamic):
4640
5022
  ``Ascend`` ``GPU`` ``CPU``
4641
5023
 
4642
5024
  Examples:
5025
+ >>> import numpy as np
5026
+ >>> from mindspore import dtype as mstype
5027
+ >>> from mindspore import Tensor, ops, Parameter
4643
5028
  >>> input_x = Parameter(Tensor(np.array([[6.0, 6.0, 6.0], [2.0, 2.0, 2.0]]), mstype.float32), name="x")
4644
5029
  >>> indices = Tensor(np.array([0, 1]), mstype.int32)
4645
5030
  >>> updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mstype.float32)
@@ -4714,10 +5099,27 @@ class ScatterNdAdd(Primitive):
4714
5099
 
4715
5100
  Refer to :func:`mindspore.ops.scatter_nd_add` for more details.
4716
5101
 
5102
+ Args:
5103
+ use_locking (bool, optional): Whether to protect the assignment by a lock. Default: ``False`` .
5104
+
5105
+ Inputs:
5106
+ - **input_x** (Parameter) - The target tensor, with data type of Parameter.
5107
+ The shape is :math:`(N, *)` where :math:`*` means any number of additional dimensions.
5108
+ - **indices** (Tensor) - The index to do add operation whose data type must be mindspore.int32.
5109
+ The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
5110
+ - **updates** (Tensor) - The tensor doing the add operation with `input_x`,
5111
+ the data type is same as `input_x`, the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
5112
+
5113
+ Outputs:
5114
+ Tensor, the updated `input_x`, has the same shape and type as `input_x`.
5115
+
4717
5116
  Supported Platforms:
4718
5117
  ``Ascend`` ``GPU`` ``CPU``
4719
5118
 
4720
5119
  Examples:
5120
+ >>> import mindspore
5121
+ >>> import numpy as np
5122
+ >>> from mindspore import Tensor, ops, Parameter
4721
5123
  >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
4722
5124
  >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
4723
5125
  >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
@@ -4774,10 +5176,27 @@ class ScatterNdSub(Primitive):
4774
5176
 
4775
5177
  Refer to :func:`mindspore.ops.scatter_nd_sub` for more details.
4776
5178
 
5179
+ Args:
5180
+ use_locking (bool, optional): Whether to protect the assignment by a lock. Default: ``False`` .
5181
+
5182
+ Inputs:
5183
+ - **input_x** (Parameter) - The target tensor, with data type of Parameter.
5184
+ The shape is :math:`(N, *)` where :math:`*` means any number of additional dimensions.
5185
+ - **indices** (Tensor) - The index to do sub operation whose data type must be mindspore.int32.
5186
+ The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
5187
+ - **updates** (Tensor) - The tensor doing the sub operation with `input_x`,
5188
+ the data type is same as `input_x`, the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
5189
+
5190
+ Outputs:
5191
+ Tensor, the updated `input_x`, has the same shape and type as `input_x`.
5192
+
4777
5193
  Supported Platforms:
4778
5194
  ``Ascend`` ``GPU`` ``CPU``
4779
5195
 
4780
5196
  Examples:
5197
+ >>> import mindspore
5198
+ >>> import numpy as np
5199
+ >>> from mindspore import Tensor, ops, Parameter
4781
5200
  >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
4782
5201
  >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
4783
5202
  >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
@@ -4838,10 +5257,26 @@ class ScatterNdMul(_ScatterNdOp):
4838
5257
 
4839
5258
  Refer to :func:`mindspore.ops.scatter_nd_mul` for more details.
4840
5259
 
5260
+ Args:
5261
+ use_locking (bool, optional): Whether to protect the assignment by a lock. Default: ``False`` .
5262
+
5263
+ Inputs:
5264
+ - **input_x** (Parameter) - The target tensor, with data type of Parameter.
5265
+ - **indices** (Tensor) - The index to do mul operation whose data type must be int32 or int64.
5266
+ The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
5267
+ - **updates** (Tensor) - The tensor to do the mul operation with `input_x`.
5268
+ The data type is same as `input_x`, and the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
5269
+
5270
+ Outputs:
5271
+ Tensor, the updated `input_x`, has the same shape and type as `input_x`.
5272
+
4841
5273
  Supported Platforms:
4842
5274
  ``GPU`` ``CPU``
4843
5275
 
4844
5276
  Examples:
5277
+ >>> import mindspore
5278
+ >>> import numpy as np
5279
+ >>> from mindspore import Tensor, ops, Parameter
4845
5280
  >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
4846
5281
  >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
4847
5282
  >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
@@ -4887,10 +5322,26 @@ class ScatterNdDiv(_ScatterNdOp):
4887
5322
 
4888
5323
  Refer to :func:`mindspore.ops.scatter_nd_div` for more details.
4889
5324
 
5325
+ Args:
5326
+ use_locking (bool, optional): Whether to protect the assignment by a lock. Default: ``False`` .
5327
+
5328
+ Inputs:
5329
+ - **input_x** (Parameter) - The target tensor, with data type of Parameter.
5330
+ - **indices** (Tensor) - The index to do div operation whose data type must be int32 or int64.
5331
+ The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
5332
+ - **updates** (Tensor) - The tensor to do the div operation with `input_x`.
5333
+ The data type is same as `input_x`, and the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
5334
+
5335
+ Outputs:
5336
+ Tensor, the updated `input_x`, has the same shape and type as `input_x`.
5337
+
4890
5338
  Supported Platforms:
4891
5339
  ``GPU`` ``CPU``
4892
5340
 
4893
5341
  Examples:
5342
+ >>> import mindspore
5343
+ >>> import numpy as np
5344
+ >>> from mindspore import Tensor, ops, Parameter
4894
5345
  >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
4895
5346
  >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
4896
5347
  >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
@@ -4936,10 +5387,26 @@ class ScatterNdMax(_ScatterNdOp):
4936
5387
 
4937
5388
  Refer to :func:`mindspore.ops.scatter_nd_max` for more details.
4938
5389
 
5390
+ Args:
5391
+ use_locking (bool, optional): Whether to protect the assignment by a lock. Default: ``False`` .
5392
+
5393
+ Inputs:
5394
+ - **input_x** (Parameter) -The target tensor, with data type of Parameter.
5395
+ - **indices** (Tensor) - The index to do maximum operation whose data type must be int32 or int64.
5396
+ The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
5397
+ - **updates** (Tensor) - The tensor to do the max operation with `input_x`.
5398
+ The data type is same as `input_x`, and the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
5399
+
5400
+ Outputs:
5401
+ Tensor, the updated `input_x`, has the same shape and type as `input_x`.
5402
+
4939
5403
  Supported Platforms:
4940
5404
  ``Ascend`` ``GPU`` ``CPU``
4941
5405
 
4942
5406
  Examples:
5407
+ >>> import mindspore
5408
+ >>> import numpy as np
5409
+ >>> from mindspore import Tensor, ops, Parameter
4943
5410
  >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
4944
5411
  >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
4945
5412
  >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
@@ -4987,10 +5454,26 @@ class ScatterNdMin(_ScatterNdOp):
4987
5454
 
4988
5455
  Refer to :func:`mindspore.ops.scatter_nd_min` for more details.
4989
5456
 
5457
+ Args:
5458
+ use_locking (bool, optional): Whether to protect the assignment by a lock. Default: ``False`` .
5459
+
5460
+ Inputs:
5461
+ - **input_x** (Parameter) -The target tensor, with data type of Parameter.
5462
+ - **indices** (Tensor) - The index to do minimum operation whose data type must be int32 or int64.
5463
+ The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
5464
+ - **updates** (Tensor) - The tensor to do the max operation with `input_x`.
5465
+ The data type is same as `input_x`, and the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
5466
+
5467
+ Outputs:
5468
+ Tensor, the updated `input_x`, has the same shape and type as `input_x`.
5469
+
4990
5470
  Supported Platforms:
4991
5471
  ``Ascend`` ``GPU`` ``CPU``
4992
5472
 
4993
5473
  Examples:
5474
+ >>> import mindspore
5475
+ >>> import numpy as np
5476
+ >>> from mindspore import Tensor, ops, Parameter
4994
5477
  >>> input_x = Parameter(Tensor(np.ones(8) * 10, mindspore.float32), name="x")
4995
5478
  >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
4996
5479
  >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
@@ -5031,59 +5514,6 @@ class ScatterNdMin(_ScatterNdOp):
5031
5514
  super().__init__(use_locking)
5032
5515
 
5033
5516
 
5034
- class ScatterNonAliasingAdd(Primitive):
5035
- """
5036
- Applies sparse addition to the input using individual values or slices.
5037
-
5038
- Using given values to update tensor value through the add operation, along with the input indices.
5039
- This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
5040
-
5041
- Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
5042
- If they have different data types, the lower priority data type will be converted to
5043
- the relatively highest priority data type.
5044
-
5045
- Inputs:
5046
- - **input_x** (Parameter) - The target parameter. The data type must be float16, float32 or int32.
5047
- - **indices** (Tensor) - The index to perform the addition operation whose data type must be mindspore.int32.
5048
- - **updates** (Tensor) - The tensor that performs the addition operation with `input_x`,
5049
- the data type is the same as `input_x`, the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
5050
-
5051
- Outputs:
5052
- Parameter, the updated `input_x`.
5053
-
5054
- Raises:
5055
- TypeError: If dtype of `indices` is not int32.
5056
- TypeError: If dtype of `input_x` is not one of float16, float32, int32.
5057
- ValueError: If the shape of `updates` is not equal to `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
5058
- RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
5059
- is required when data type conversion of Parameter is not supported.
5060
-
5061
- Supported Platforms:
5062
- ``Ascend``
5063
-
5064
- Examples:
5065
- >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
5066
- >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
5067
- >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
5068
- >>> scatter_non_aliasing_add = ops.ScatterNonAliasingAdd()
5069
- >>> output = scatter_non_aliasing_add(input_x, indices, updates)
5070
- >>> print(output)
5071
- [ 1. 10. 9. 4. 12. 6. 7. 17.]
5072
- """
5073
-
5074
- __mindspore_signature__ = (
5075
- sig.make_sig('input_x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
5076
- sig.make_sig('indices', dtype=sig.sig_dtype.T1),
5077
- sig.make_sig('updates', dtype=sig.sig_dtype.T)
5078
- )
5079
-
5080
- @prim_attr_register
5081
- def __init__(self):
5082
- """Initialize ScatterNonAliasingAdd"""
5083
- self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])
5084
- self.add_prim_attr('side_effect_mem', True)
5085
-
5086
-
5087
5517
  class SpaceToDepth(Primitive):
5088
5518
  r"""
5089
5519
  Rearrange blocks of spatial data into depth.
@@ -5116,6 +5546,9 @@ class SpaceToDepth(Primitive):
5116
5546
  ``Ascend`` ``GPU`` ``CPU``
5117
5547
 
5118
5548
  Examples:
5549
+ >>> import mindspore
5550
+ >>> import numpy as np
5551
+ >>> from mindspore import Tensor, ops
5119
5552
  >>> x = Tensor(np.random.rand(1,3,2,2), mindspore.float32)
5120
5553
  >>> block_size = 2
5121
5554
  >>> space_to_depth = ops.SpaceToDepth(block_size)
@@ -5169,6 +5602,9 @@ class DepthToSpace(Primitive):
5169
5602
  ``Ascend`` ``GPU`` ``CPU``
5170
5603
 
5171
5604
  Examples:
5605
+ >>> import mindspore
5606
+ >>> import numpy as np
5607
+ >>> from mindspore import Tensor, ops
5172
5608
  >>> x = Tensor(np.random.rand(1, 12, 1, 1), mindspore.float32)
5173
5609
  >>> block_size = 2
5174
5610
  >>> depth_to_space = ops.DepthToSpace(block_size)
@@ -5297,6 +5733,9 @@ class BatchToSpace(PrimitiveWithInfer):
5297
5733
  ``Ascend`` ``GPU``
5298
5734
 
5299
5735
  Examples:
5736
+ >>> import mindspore
5737
+ >>> import numpy as np
5738
+ >>> from mindspore import Tensor, ops
5300
5739
  >>> block_size = 2
5301
5740
  >>> crops = [[0, 0], [0, 0]]
5302
5741
  >>> batch_to_space = ops.BatchToSpace(block_size, crops)
@@ -5437,59 +5876,42 @@ class SpaceToBatchND(Primitive):
5437
5876
  self.paddings = paddings
5438
5877
 
5439
5878
 
5440
- class BatchToSpaceND(Primitive):
5879
+ class BatchToSpaceNDV2(Primitive):
5441
5880
  r"""
5442
- `ops.BatchToSpaceND` is deprecated from version 2.0 and will be removed in a future version,
5443
- use `ops.batch_to_space_nd` instead.
5444
-
5445
- Supported Platforms:
5446
- ``Ascend`` ``GPU`` ``CPU``
5447
-
5448
- Examples:
5449
- >>> block_size = 2
5450
- >>> crops = [[0, 0], [0, 0]]
5451
- >>> batch_to_space = ops.BatchToSpaceND(block_size, crops)
5452
- >>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)
5453
- >>> output = batch_to_space(input_x)
5454
- >>> print(output)
5455
- [[[[1. 2.]
5456
- [3. 4.]]]]
5457
- """
5458
-
5459
- @deprecated("2.0", "ops.batch_to_space_nd", False)
5460
- @prim_attr_register
5461
- def __init__(self, block_shape, crops):
5462
- """Initialize BatchToSpaceND"""
5463
- if isinstance(block_shape, int):
5464
- block_shape = (block_shape,) * np.array(crops).shape[0]
5465
- self.add_prim_attr("block_shape", block_shape)
5466
- validator.check_value_type('block_shape type', block_shape, [list, tuple], self.name)
5467
- validator.check('block_shape shape', len(np.array(block_shape).shape), '', 1, validator.EQ, self.name)
5468
- block_rank = len(block_shape)
5469
- if context.get_context("device_target") == "Ascend":
5470
- validator.check('block_shape length', block_rank, '', 2, validator.EQ, self.name)
5471
- for elem in block_shape:
5472
- validator.check('block_shape element', elem, '', 1, validator.GE, self.name)
5473
- validator.check_value_type('block_shape element', elem, [int], self.name)
5474
- self.block_shape = block_shape
5881
+ Divides batch dimension with blocks and interleaves these blocks back into spatial dimensions.
5475
5882
 
5476
- validator.check_value_type('crops type', crops, [list, tuple], self.name)
5477
- validator.check('crops length', len(crops), '', 1, validator.GE, self.name)
5478
- validator.check('crops shape', np.array(crops).shape, '', (block_rank, 2), validator.EQ, self.name)
5479
- for elem in itertools.chain(*crops):
5480
- validator.check_non_negative_int(elem, 'crops element', self.name)
5481
- validator.check_value_type('crops element', elem, [int], self.name)
5482
- self.crops = crops
5883
+ Refer to :func:`mindspore.ops.batch_to_space_nd` for more details.
5483
5884
 
5885
+ .. warning::
5886
+ This is an experimental API that is subject to change or deletion.
5484
5887
 
5485
- class BatchToSpaceNDV2(Primitive):
5486
- r"""
5487
- Divides batch dimension with blocks and interleaves these blocks back into spatial dimensions.
5888
+ Inputs:
5889
+ - **input_x** (Tensor) - The input tensor. It must be greater or equal to 2-D
5890
+ tensor(equal to 4-D tensor on Ascend), batch dimension must be divisible by product of `block_shape`.
5891
+ - **block_shape** (Tensor) - The block shape of dividing block with all value greater
5892
+ than or equal to 1. If `block_shape` is a tuple or list, the length of `block_shape` is M corresponding
5893
+ to the number of spatial dimensions. If `block_shape` is an int, the block size of M dimensions are the
5894
+ same, equal to `block_shape`. In this case of Ascend, M must be 2.
5895
+ - **crops** (Union[list(int), tuple(int)]) - The crops values for spatial dimensions, containing
5896
+ M subtraction list. Each contains 2 integer values. All values must be >= 0. crops[i] specifies
5897
+ the crops values for spatial dimension i, which corresponds to input dimension i + offset,
5898
+ where offset = N-M, and N is the number of input dimensions. It is required that
5899
+ :math:`input\_shape[i+offset]*block\_shape[i] > crops[i][0]+crops[i][1]`
5488
5900
 
5489
- Refer to :func:`mindspore.ops.batch_to_space_nd` for more details.
5901
+ Outputs:
5902
+ Tensor, contains the result of batch division and rearrangement of the original Tensor.
5490
5903
 
5491
5904
  Supported Platforms:
5492
5905
  ``Ascend``
5906
+
5907
+ Examples:
5908
+ >>> block_shape = Tensor(np.array([2, 2]), mindspore.int32)
5909
+ >>> crops = [[0, 0], [0, 0]]
5910
+ >>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)
5911
+ >>> output = ops.BatchToSpaceNDV2(input_x, block_shape, crops)
5912
+ >>> print(output)
5913
+ [[[[1. 2.]
5914
+ [3. 4.]]]]
5493
5915
  """
5494
5916
 
5495
5917
  @prim_attr_register
@@ -5505,10 +5927,22 @@ class BroadcastTo(PrimitiveWithCheck):
5505
5927
 
5506
5928
  Refer to :func:`mindspore.ops.broadcast_to` for more details.
5507
5929
 
5930
+ Args:
5931
+ shape (tuple): The target shape to broadcast. Can be fully specified, or have -1 in one position
5932
+ where it will be substituted by the input tensor's shape in that position, see example.
5933
+
5934
+ Inputs:
5935
+ - **input_x** (Tensor) - The input tensor of any dimension.
5936
+
5937
+ Outputs:
5938
+ Tensor, with the given `shape` and the same data type as `input_x`.
5939
+
5508
5940
  Supported Platforms:
5509
5941
  ``Ascend`` ``GPU`` ``CPU``
5510
5942
 
5511
5943
  Examples:
5944
+ >>> import numpy as np
5945
+ >>> from mindspore import Tensor, ops
5512
5946
  >>> shape = (2, 3)
5513
5947
  >>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
5514
5948
  >>> output = ops.BroadcastTo(shape=shape)(x)
@@ -5547,12 +5981,13 @@ class Meshgrid(PrimitiveWithInfer):
5547
5981
  Refer to :func:`mindspore.ops.meshgrid` for more details.
5548
5982
 
5549
5983
  Args:
5550
- indexing (str, optional): Cartesian ('xy', default) or
5984
+ indexing (str, optional): Cartesian ('xy') or
5551
5985
  matrix ('ij') indexing of output. Valid options: xy' or 'ij'. In the 2-D case with
5552
- inputs of length `M` and `N`, the outputs are of shape `(N, M)`
5553
- for 'xy' indexing and `(M, N)` for 'ij' indexing. In the 3-D
5986
+ inputs of length `M` and `N`, the outputs are of shape :math:`(N, M)`
5987
+ for 'xy' indexing and :math:`(M, N)` for 'ij' indexing. In the 3-D
5554
5988
  case with inputs of length `M`, `N` and `P`, outputs are of shape
5555
- `(N, M, P)` for 'xy' indexing and `(M, N, P)` for 'ij' indexing.
5989
+ :math:`(N, M, P)` for 'xy' indexing and :math:`(M, N, P)` for 'ij' indexing.
5990
+ Default: 'xy'.
5556
5991
 
5557
5992
  Inputs:
5558
5993
  - **input** (Union[tuple]) - A Tuple of N 1-D Tensor objects.
@@ -5565,6 +6000,8 @@ class Meshgrid(PrimitiveWithInfer):
5565
6000
  ``Ascend`` ``GPU`` ``CPU``
5566
6001
 
5567
6002
  Examples:
6003
+ >>> import numpy as np
6004
+ >>> from mindspore import Tensor, ops
5568
6005
  >>> x = Tensor(np.array([1, 2, 3, 4]).astype(np.int32))
5569
6006
  >>> y = Tensor(np.array([5, 6, 7]).astype(np.int32))
5570
6007
  >>> z = Tensor(np.array([8, 9, 0, 1, 2]).astype(np.int32))
@@ -5634,7 +6071,7 @@ class Meshgrid(PrimitiveWithInfer):
5634
6071
  return out_shape
5635
6072
 
5636
6073
  def infer_dtype(self, x_type):
5637
- validator.check_subclass("input[0]", x_type[0], mstype.tensor, self.name)
6074
+ validator.check_subclass("input[0]", x_type[0], mstype.tensor_type, self.name)
5638
6075
  n = len(x_type)
5639
6076
  for i in range(1, n):
5640
6077
  validator.check('x_type[%d]' % i, x_type[i], 'base', x_type[0], validator.EQ, self.name, TypeError)
@@ -5642,12 +6079,12 @@ class Meshgrid(PrimitiveWithInfer):
5642
6079
 
5643
6080
 
5644
6081
  class ReverseSequence(PrimitiveWithInfer):
5645
- """
6082
+ r"""
5646
6083
  Reverses variable length slices.
5647
6084
 
5648
6085
  Args:
5649
6086
  seq_dim (int): The dimension where reversal is performed. Required.
5650
- batch_dim (int): The input is sliced in this dimension. Default: 0.
6087
+ batch_dim (int): The input is sliced in this dimension. Default: ``0`` .
5651
6088
 
5652
6089
  Inputs:
5653
6090
  - **x** (Tensor) - The input to reverse, supporting all number types including bool.
@@ -5658,12 +6095,20 @@ class ReverseSequence(PrimitiveWithInfer):
5658
6095
 
5659
6096
  Raises:
5660
6097
  TypeError: If `seq_dim` or `batch_dim` is not an int.
5661
- ValueError: If value of `batch_dim` is equal to or greater than length of shape of `x` .
6098
+ ValueError: If :math:`len(seq\_lengths) != x.shape[batch\_dim]`.
6099
+ ValueError: If :math:`batch\_dim == seq\_dim`.
6100
+ ValueError: If :math:`seq\_dim < 0` or :math:`seq\_dim >= len(x.shape)`.
6101
+ ValueError: If :math:`batch\_dim < 0` or :math:`batch\_dim >= len(x.shape)`.
6102
+ RuntimeError: If any value of `seq_lengths` is less than 0.
6103
+ RuntimeError: If any value of `seq_lengths` is larger than `x.shape[seq_dim]`.
5662
6104
 
5663
6105
  Supported Platforms:
5664
6106
  ``Ascend`` ``GPU`` ``CPU``
5665
6107
 
5666
6108
  Examples:
6109
+ >>> import mindspore
6110
+ >>> import numpy as np
6111
+ >>> from mindspore import Tensor, ops
5667
6112
  >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
5668
6113
  >>> seq_lengths = Tensor(np.array([1, 2, 3]))
5669
6114
  >>> reverse_sequence = ops.ReverseSequence(seq_dim=1)
@@ -5741,7 +6186,7 @@ class EditDistance(Primitive):
5741
6186
  calling this API.
5742
6187
 
5743
6188
  Args:
5744
- normalize (bool): If true, edit distances are normalized by length of truth. Default: True.
6189
+ normalize (bool): If ``True`` , edit distances are normalized by length of truth. Default: ``True`` .
5745
6190
 
5746
6191
  Inputs:
5747
6192
  - **hypothesis_indices** (Tensor) - The indices of the hypothesis list SparseTensor. With int64 data type.
@@ -5798,7 +6243,6 @@ class EditDistance(Primitive):
5798
6243
  def __init__(self, normalize=True):
5799
6244
  """Initialize EditDistance"""
5800
6245
  self.normalize = validator.check_value_type("normalize", normalize, [bool], self.name)
5801
- self.set_const_input_indexes([2, 5])
5802
6246
 
5803
6247
 
5804
6248
  class TransShape(PrimitiveWithInfer):
@@ -5837,12 +6281,13 @@ class Sort(Primitive):
5837
6281
  Using Float32 might cause loss of accuracy.
5838
6282
 
5839
6283
  Args:
5840
- axis (int): The dimension to sort along. Default: -1.
5841
- descending (bool): Controls the sort order. If descending is True then the elements
5842
- are sorted in descending order by value. Default: False.
6284
+ axis (int, optional): The dimension to sort along. Default: ``-1``, means the last dimension.
6285
+ The Ascend backend only supports sorting the last dimension.
6286
+ descending (bool, optional): Controls the sort order. If descending is ``True`` then the elements
6287
+ are sorted in descending order by value. Default: ``False`` .
5843
6288
 
5844
6289
  Inputs:
5845
- - **x** (Tensor) - The input tensor of any dimension, with a type of float16 or float32.
6290
+ - **x** (Tensor) - The input tensor.
5846
6291
 
5847
6292
  Outputs:
5848
6293
  - **y1** (Tensor) - A tensor whose values are the sorted values, with the same shape and data type as input.
@@ -5851,13 +6296,15 @@ class Sort(Primitive):
5851
6296
  Raises:
5852
6297
  TypeError: If `axis` is not an int.
5853
6298
  TypeError: If `descending` is not a bool.
5854
- TypeError: If dtype of `x` is neither float16 nor float32.
5855
6299
  ValueError: If `axis` is not in range of [-len(x.shape), len(x.shape)).
5856
6300
 
5857
6301
  Supported Platforms:
5858
6302
  ``Ascend`` ``GPU`` ``CPU``
5859
6303
 
5860
6304
  Examples:
6305
+ >>> import mindspore
6306
+ >>> import numpy as np
6307
+ >>> from mindspore import Tensor, ops
5861
6308
  >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
5862
6309
  >>> sort = ops.Sort()
5863
6310
  >>> output = sort(x)
@@ -5880,7 +6327,7 @@ class Sort(Primitive):
5880
6327
  self.init_prim_io_names(inputs=['x'], outputs=['y1', 'y2'])
5881
6328
 
5882
6329
 
5883
- class EmbeddingLookup(PrimitiveWithCheck):
6330
+ class EmbeddingLookup(Primitive):
5884
6331
  """
5885
6332
  Returns a slice of input tensor based on the specified indices.
5886
6333
 
@@ -5908,6 +6355,9 @@ class EmbeddingLookup(PrimitiveWithCheck):
5908
6355
  ``Ascend`` ``GPU`` ``CPU``
5909
6356
 
5910
6357
  Examples:
6358
+ >>> import mindspore
6359
+ >>> import numpy as np
6360
+ >>> from mindspore import Tensor, ops
5911
6361
  >>> input_params = Tensor(np.array([[8, 9], [10, 11], [12, 13], [14, 15]]), mindspore.float32)
5912
6362
  >>> input_indices = Tensor(np.array([[5, 2], [8, 5]]), mindspore.int32)
5913
6363
  >>> offset = 4
@@ -5927,19 +6377,6 @@ class EmbeddingLookup(PrimitiveWithCheck):
5927
6377
  outputs=['output'])
5928
6378
  self.add_prim_attr('bprop_return_sparse', True)
5929
6379
 
5930
- def __check__(self, params, indices, offset):
5931
- validator.check_subclass("params", params['dtype'], mstype.tensor, self.name)
5932
- validator.check_tensor_dtype_valid("indices", indices['dtype'], mstype.int_type, self.name)
5933
- validator.check_subclass("offset", offset['dtype'], mstype.int_, self.name)
5934
- indices_shp = indices['shape']
5935
- if not indices_shp:
5936
- raise ValueError(f"For '{self.name}', the dimension of 'input_indices' should not "
5937
- f"be zero, but got {len(indices_shp)}.")
5938
- params_shp = params['shape']
5939
- if len(params_shp) > 2:
5940
- raise ValueError(f"For '{self.name}', the dimension of 'input_params' must <= 2, "
5941
- f"but got {len(params_shp)}.")
5942
-
5943
6380
 
5944
6381
  class GatherD(Primitive):
5945
6382
  """
@@ -5947,10 +6384,22 @@ class GatherD(Primitive):
5947
6384
 
5948
6385
  Refer to :func:`mindspore.ops.gather_elements` for more details.
5949
6386
 
6387
+ Inputs:
6388
+ - **x** (Tensor) - The input tensor.
6389
+ - **dim** (int) - The axis along which to index. It must be int32 or int64.
6390
+ - **index** (Tensor) - The indices of elements to gather. It can be one of the following data types:
6391
+ int32, int64. The value range of each index element is [-x_rank[dim], x_rank[dim]).
6392
+
6393
+ Outputs:
6394
+ Tensor, has the same data type with `x`.
6395
+
5950
6396
  Supported Platforms:
5951
6397
  ``Ascend`` ``GPU`` ``CPU``
5952
6398
 
5953
6399
  Examples:
6400
+ >>> import mindspore
6401
+ >>> import numpy as np
6402
+ >>> from mindspore import Tensor, ops
5954
6403
  >>> x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.int32)
5955
6404
  >>> index = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32)
5956
6405
  >>> dim = 1
@@ -5968,25 +6417,10 @@ class GatherD(Primitive):
5968
6417
 
5969
6418
  class Identity(Primitive):
5970
6419
  """
5971
- Returns a Tensor with the same shape and contents as input.
5972
-
5973
- Inputs:
5974
- - **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is Number.
5975
-
5976
- Outputs:
5977
- Tensor, the shape of tensor and the data type are the same as `input_x`, :math:`(x_1, x_2, ..., x_R)`.
5978
-
5979
- Raises:
5980
- TypeError: If `x` is not a Tensor.
6420
+ The `mindspore.ops.Identity` interface is deprecated, please use the :func:`mindspore.ops.deepcopy` instead.
5981
6421
 
5982
6422
  Supported Platforms:
5983
- ``Ascend`` ``GPU`` ``CPU``
5984
-
5985
- Examples:
5986
- >>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
5987
- >>> output = ops.Identity()(x)
5988
- >>> print(output)
5989
- [1 2 3 4]
6423
+ Deprecated
5990
6424
  """
5991
6425
 
5992
6426
  @prim_attr_register
@@ -6041,17 +6475,14 @@ class Range(PrimitiveWithCheck):
6041
6475
 
6042
6476
  Args:
6043
6477
  maxlen (int, optional): Memory that can fit `maxlen` many elements
6044
- will be allocated for the output. Optional, must be positive, defaults to 1000000.
6478
+ will be allocated for the output. Optional, must be positive. Default: 1000000.
6045
6479
  If the output has more than `maxlen` elements, a runtime error
6046
6480
  will occur.
6047
6481
 
6048
6482
  Inputs:
6049
- - **start** (Tensor) - A scalar Tensor. The first number in the sequence. Must have
6050
- type: int32 ,int64, float32 or float64.
6051
- - **limit** (Tensor) - A scalar Tensor. Upper limit of the sequence, exclusive. Must
6052
- have type: int32 ,int64, float32 or float64.
6053
- - **delta** (Tensor) - A scalar Tensor. Number that increments `start`. Must have
6054
- type: int32 ,int64, float32 or float64.
6483
+ - **start** (Tensor) - A scalar Tensor. The first number in the sequence.
6484
+ - **limit** (Tensor) - A scalar Tensor. Upper limit of the sequence, exclusive.
6485
+ - **delta** (Tensor) - A scalar Tensor. Number that increments `start`.
6055
6486
 
6056
6487
  Outputs:
6057
6488
  A 1-D Tensor, with the same type as the inputs.
@@ -6060,6 +6491,8 @@ class Range(PrimitiveWithCheck):
6060
6491
  ``GPU`` ``CPU``
6061
6492
 
6062
6493
  Examples:
6494
+ >>> from mindspore import Tensor, ops
6495
+ >>> from mindspore import dtype as mstype
6063
6496
  >>> start = Tensor(0, mstype.int32)
6064
6497
  >>> limit = Tensor(10, mstype.int32)
6065
6498
  >>> delta = Tensor(4, mstype.int32)
@@ -6166,10 +6599,21 @@ class MaskedFill(Primitive):
6166
6599
 
6167
6600
  Refer to :func:`mindspore.ops.masked_fill` for more details.
6168
6601
 
6602
+ Inputs:
6603
+ - **input** (Tensor) - The input Tensor.
6604
+ - **mask** (Tensor[bool]) - The boolean mask.
6605
+ - **value** (Union[float, Tensor]) - The value to fill in with, which dtype is the same as `input`.
6606
+
6607
+ Outputs:
6608
+ Tensor, has the same type and shape as `input`.
6609
+
6169
6610
  Supported Platforms:
6170
6611
  ``Ascend`` ``GPU`` ``CPU``
6171
6612
 
6172
6613
  Examples:
6614
+ >>> import mindspore
6615
+ >>> import numpy as np
6616
+ >>> from mindspore import Tensor, ops
6173
6617
  >>> input = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
6174
6618
  >>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)
6175
6619
  >>> output = ops.MaskedFill()(input, mask, 0.5)
@@ -6184,17 +6628,20 @@ class MaskedFill(Primitive):
6184
6628
 
6185
6629
  class MaskedScatter(Primitive):
6186
6630
  """
6187
- Updates the value in the input with the updates value according to the mask.
6188
- The shapes of `mask` and `x` must be the same or broadcastable.
6631
+ Updates the value in the input with value in `updates` according to the `mask`.
6632
+
6633
+ .. warning::
6634
+ This is an experimental API that is subject to change or deletion.
6189
6635
 
6190
6636
  Inputs:
6191
- - **x** (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
6192
- - **mask** (Tensor[bool]): A bool tensor with a shape broadcastable to x.
6193
- - **updates** (Tensor): A tensor with the same data type as x. The
6637
+ - **x** (Tensor): The input Tensor to be updated.
6638
+ - **mask** (Tensor[bool]): The mask Tensor indicating which elements should be modified or replaced.
6639
+ The shapes of `mask` and `x` must be the same or broadcastable.
6640
+ - **updates** (Tensor): The values to scatter into the target tensor `x`. It has the same data type as `x`. The
6194
6641
  number of elements must be greater than or equal to the number of True's in `mask`.
6195
6642
 
6196
6643
  Outputs:
6197
- Tensor, with the same type and shape as x.
6644
+ Tensor, with the same type and shape as `x`.
6198
6645
 
6199
6646
  Raises:
6200
6647
  TypeError: If `x`, `mask` or `updates` is not a Tensor.
@@ -6202,16 +6649,19 @@ class MaskedScatter(Primitive):
6202
6649
  TypeError: If dtype of `mask` is not bool.
6203
6650
  TypeError: If the dim of `x` less than the dim of `mask`.
6204
6651
  ValueError: If `mask` can not be broadcastable to `x`.
6205
- ValueError: If the number of elements in `updates` is less than the number required for the updates.
6652
+ ValueError: If the number of elements in `updates` is less than number of True's in `mask`.
6206
6653
 
6207
6654
  Supported Platforms:
6208
- ``CPU``
6655
+ ``Ascend`` ``CPU``
6209
6656
 
6210
6657
  Examples:
6211
- >>> x= Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
6658
+ >>> import mindspore
6659
+ >>> import numpy as np
6660
+ >>> from mindspore import Tensor, ops
6661
+ >>> input_x = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
6212
6662
  >>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)
6213
6663
  >>> updates = Tensor(np.array([5., 6., 7.]), mindspore.float32)
6214
- >>> output = ops.MaskedScatter()(input_X, mask, updates)
6664
+ >>> output = ops.MaskedScatter()(input_x, mask, updates)
6215
6665
  >>> print(output)
6216
6666
  [5. 6. 3. 7.]
6217
6667
  """
@@ -6228,8 +6678,8 @@ class MaskedSelect(PrimitiveWithCheck):
6228
6678
  The shapes of the `mask` tensor and the `x` tensor don't need to match, but they must be broadcastable.
6229
6679
 
6230
6680
  Inputs:
6231
- - **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
6232
- - **mask** (Tensor[bool]) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
6681
+ - **x** (Tensor) - Input Tensor of any dimension.
6682
+ - **mask** (Tensor[bool]) - Boolean mask Tensor, has the same shape as `x`.
6233
6683
 
6234
6684
  Outputs:
6235
6685
  A 1-D Tensor, with the same type as x.
@@ -6242,11 +6692,19 @@ class MaskedSelect(PrimitiveWithCheck):
6242
6692
  ``Ascend`` ``GPU`` ``CPU``
6243
6693
 
6244
6694
  Examples:
6695
+ >>> import mindspore
6696
+ >>> import numpy as np
6697
+ >>> from mindspore import Tensor, ops
6245
6698
  >>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int32)
6246
6699
  >>> mask = Tensor(np.array([1, 0, 1, 0]), mindspore.bool_)
6247
6700
  >>> output = ops.MaskedSelect()(x, mask)
6248
6701
  >>> print(output)
6249
6702
  [1 3]
6703
+ >>> x = Tensor(2.1, mindspore.float32)
6704
+ >>> mask = Tensor(True, mindspore.bool_)
6705
+ >>> output = ops.MaskedSelect()(x, mask)
6706
+ >>> print(output)
6707
+ [2.1]
6250
6708
  """
6251
6709
 
6252
6710
  @prim_attr_register
@@ -6273,9 +6731,9 @@ class SearchSorted(Primitive):
6273
6731
 
6274
6732
  Args:
6275
6733
  dtype (:class:`mindspore.dtype`, optional): Output data type. An optional data type of
6276
- `mstype.int32` and `mstype.int64`. Default: `mstype.int64`.
6277
- right (bool, optional): Search Strategy. If True, return the last suitable index found;
6278
- if False, return the first such index. Default: False.
6734
+ ``mstype.int32`` and ``mstype.int64``. Default: ``mstype.int64``.
6735
+ right (bool, optional): Search Strategy. If ``True`` , return the last suitable index found;
6736
+ if ``False`` , return the first such index. Default: ``False`` .
6279
6737
 
6280
6738
  Inputs:
6281
6739
  - **sorted_sequence** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R-1, x_R)` or `(x_1)`.
@@ -6292,6 +6750,9 @@ class SearchSorted(Primitive):
6292
6750
  ``Ascend`` ``GPU`` ``CPU``
6293
6751
 
6294
6752
  Examples:
6753
+ >>> import mindspore
6754
+ >>> import numpy as np
6755
+ >>> from mindspore import Tensor, ops
6295
6756
  >>> sorted_sequence = Tensor(np.array([[0, 1, 3, 5, 7], [2, 4, 6, 8, 10]]), mindspore.float32)
6296
6757
  >>> values = Tensor(np.array([[3, 6, 9], [3, 6, 9]]), mindspore.float32)
6297
6758
  >>> output = ops.SearchSorted()(sorted_sequence, values)
@@ -6358,7 +6819,7 @@ class _TensorScatterOp(PrimitiveWithInfer):
6358
6819
 
6359
6820
 
6360
6821
  class TensorScatterUpdate(_TensorScatterOp):
6361
- """
6822
+ r"""
6362
6823
  Creates a new tensor by updating the positions in `input_x` indicated by
6363
6824
  `indices`, with values from `update`. This operation is almost equivalent to using
6364
6825
  `mindspore.ops.ScatterNdUpdate` , except that the updates are applied on `input_x` instead of a zero tensor.
@@ -6374,14 +6835,17 @@ class TensorScatterUpdate(_TensorScatterOp):
6374
6835
  are multiple index vectors in `indices` that correspond to the same position, the
6375
6836
  value of that position in the output will be nondeterministic.
6376
6837
 
6838
+ .. math::
6839
+ output[indices] = update
6840
+
6377
6841
  Inputs:
6378
- - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].
6842
+ - **input_x** (Tensor) - The input tensor. The dimension of input_x must be no less than indices.shape[-1].
6379
6843
  The shape is :math:`(N, *)` where :math:`*` means,any number of additional dimensions.
6380
6844
  The data type is Number.
6381
6845
  - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
6382
6846
  The rank must be at least 2.
6383
- - **update** (Tensor) - The tensor to update the input tensor, has the same type as input, and
6384
- :math:`update.shape = indices.shape[:-1]+input_x.shape[indices.shape[-1]:]`
6847
+ - **update** (Tensor) - The tensor to update the input tensor, has the same type as `input_x`, and
6848
+ :math:`update.shape = indices.shape[:-1]+input\_x.shape[indices.shape[-1]:]`
6385
6849
 
6386
6850
  Outputs:
6387
6851
  Tensor, has the same shape and type as `input_x`.
@@ -6396,6 +6860,9 @@ class TensorScatterUpdate(_TensorScatterOp):
6396
6860
  ``Ascend`` ``GPU`` ``CPU``
6397
6861
 
6398
6862
  Examples:
6863
+ >>> import mindspore
6864
+ >>> import numpy as np
6865
+ >>> from mindspore import Tensor, ops
6399
6866
  >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
6400
6867
  >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
6401
6868
  >>> update = Tensor(np.array([1.0, 2.2]), mindspore.float32)
@@ -6452,10 +6919,23 @@ class TensorScatterMax(Primitive):
6452
6919
 
6453
6920
  Refer to :func:`mindspore.ops.tensor_scatter_max` for more details.
6454
6921
 
6922
+ Inputs:
6923
+ - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].
6924
+ - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
6925
+ The rank must be at least 2.
6926
+ - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,
6927
+ and updates.shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].
6928
+
6929
+ Outputs:
6930
+ Tensor, has the same shape and type as `input_x`.
6931
+
6455
6932
  Supported Platforms:
6456
6933
  ``GPU`` ``CPU``
6457
6934
 
6458
6935
  Examples:
6936
+ >>> import mindspore
6937
+ >>> import numpy as np
6938
+ >>> from mindspore import Tensor, ops
6459
6939
  >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
6460
6940
  >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
6461
6941
  >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
@@ -6487,10 +6967,23 @@ class TensorScatterMin(Primitive):
6487
6967
 
6488
6968
  Refer to :func:`mindspore.ops.tensor_scatter_min` for more details.
6489
6969
 
6970
+ Inputs:
6971
+ - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].
6972
+ - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
6973
+ The rank must be at least 2.
6974
+ - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,
6975
+ and updates.shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].
6976
+
6977
+ Outputs:
6978
+ Tensor, has the same shape and type as `input_x`.
6979
+
6490
6980
  Supported Platforms:
6491
6981
  ``Ascend`` ``GPU`` ``CPU``
6492
6982
 
6493
6983
  Examples:
6984
+ >>> import mindspore
6985
+ >>> import numpy as np
6986
+ >>> from mindspore import Tensor, ops
6494
6987
  >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
6495
6988
  >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
6496
6989
  >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
@@ -6516,18 +7009,35 @@ class TensorScatterMin(Primitive):
6516
7009
 
6517
7010
 
6518
7011
  class TensorScatterSub(Primitive):
6519
- """
7012
+ r"""
6520
7013
  Creates a new tensor by subtracting the values from the positions in `input_x` indicated by
6521
7014
  `indices`, with values from `updates`. When multiple values are provided for the same
6522
7015
  index, the result of the update will be to subtract these values respectively. This operation is almost
6523
7016
  equivalent to using :class:`mindspore.ops.ScatterNdSub` , except that the updates are applied on output `Tensor`
6524
7017
  instead of input `Parameter`.
7018
+
7019
+ .. math::
7020
+ output[indices] = input\_x - update
7021
+
6525
7022
  Refer to :func:`mindspore.ops.tensor_scatter_sub` for more details.
6526
7023
 
7024
+ Inputs:
7025
+ - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].
7026
+ - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
7027
+ The rank must be at least 2.
7028
+ - **updates** (Tensor) - The tensor to update the input tensor, has the same type as `input_x`,
7029
+ and the shape of `updates` should be equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
7030
+
7031
+ Outputs:
7032
+ Tensor, has the same shape and type as `input_x`.
7033
+
6527
7034
  Supported Platforms:
6528
7035
  ``Ascend`` ``GPU`` ``CPU``
6529
7036
 
6530
7037
  Examples:
7038
+ >>> import mindspore
7039
+ >>> import numpy as np
7040
+ >>> from mindspore import Tensor, ops
6531
7041
  >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
6532
7042
  >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
6533
7043
  >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
@@ -6562,10 +7072,23 @@ class TensorScatterAdd(Primitive):
6562
7072
 
6563
7073
  Refer to :func:`mindspore.ops.tensor_scatter_add` for more details.
6564
7074
 
7075
+ Inputs:
7076
+ - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].
7077
+ - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
7078
+ The rank must be at least 2.
7079
+ - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,
7080
+ and updates. Shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].
7081
+
7082
+ Outputs:
7083
+ Tensor, has the same shape and type as `input_x`.
7084
+
6565
7085
  Supported Platforms:
6566
7086
  ``Ascend`` ``GPU`` ``CPU``
6567
7087
 
6568
7088
  Examples:
7089
+ >>> import mindspore
7090
+ >>> import numpy as np
7091
+ >>> from mindspore import Tensor, ops
6569
7092
  >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
6570
7093
  >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
6571
7094
  >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
@@ -6591,18 +7114,34 @@ class TensorScatterAdd(Primitive):
6591
7114
 
6592
7115
 
6593
7116
  class TensorScatterMul(_TensorScatterOp):
6594
- """
7117
+ r"""
6595
7118
  Creates a new tensor by multiplying the values from the positions in `input_x` indicated by
6596
7119
  `indices`, with values from `updates`. When multiple values are provided for the same
6597
7120
  index, the result of the update will be to multiply these values respectively.
6598
7121
  The updates are applied on output `Tensor` instead of input `Parameter`.
6599
7122
 
7123
+ .. math::
7124
+ output[indices] = input\_x \times update
7125
+
6600
7126
  Refer to :func:`mindspore.ops.tensor_scatter_mul` for more details.
6601
7127
 
7128
+ Inputs:
7129
+ - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].
7130
+ - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
7131
+ The rank must be at least 2.
7132
+ - **updates** (Tensor) - The tensor to update the input tensor, has the same type as `input_x`,
7133
+ and the shape of `updates` should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].
7134
+
7135
+ Outputs:
7136
+ Tensor, has the same shape and type as `input_x`.
7137
+
6602
7138
  Supported Platforms:
6603
7139
  ``GPU`` ``CPU``
6604
7140
 
6605
7141
  Examples:
7142
+ >>> import mindspore
7143
+ >>> import numpy as np
7144
+ >>> from mindspore import Tensor, ops
6606
7145
  >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
6607
7146
  >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
6608
7147
  >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
@@ -6636,10 +7175,23 @@ class TensorScatterDiv(_TensorScatterOp):
6636
7175
 
6637
7176
  Refer to :func:`mindspore.ops.tensor_scatter_div` for more details.
6638
7177
 
7178
+ Inputs:
7179
+ - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].
7180
+ - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
7181
+ The rank must be at least 2.
7182
+ - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,
7183
+ and updates.shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].
7184
+
7185
+ Outputs:
7186
+ Tensor, has the same shape and type as `input_x`.
7187
+
6639
7188
  Supported Platforms:
6640
7189
  ``GPU`` ``CPU``
6641
7190
 
6642
7191
  Examples:
7192
+ >>> import mindspore
7193
+ >>> import numpy as np
7194
+ >>> from mindspore import Tensor, ops
6643
7195
  >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
6644
7196
  >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
6645
7197
  >>> updates = Tensor(np.array([1.0, 2.0]), mindspore.float32)
@@ -6677,8 +7229,8 @@ class ListDiff(Primitive):
6677
7229
 
6678
7230
  Args:
6679
7231
  out_idx (:class:`mindspore.dtype`, optional): The dtype of `idx`,
6680
- an optioanal datatype of `mstype.int32` and `mstype.int64`.
6681
- Default: `mstype.int32`.
7232
+ an optioanal datatype of ``mstype.int32`` and ``mstype.int64`` .
7233
+ Default: ``mstype.int32`` .
6682
7234
 
6683
7235
  Inputs:
6684
7236
  - **x** - Values to keep. A 1-D `Tensor`.
@@ -6810,7 +7362,9 @@ class SplitV(Primitive):
6810
7362
 
6811
7363
  class TensorScatterElements(Primitive):
6812
7364
  """
6813
- Updates the value of the input Tensor through specified reduction operation.
7365
+ Write all elements in `updates` to the index specified by `indices` in `input_x` according to the reduction
7366
+ operation specified by `reduction`.
7367
+ `axis` controls the direction of the scatter operation.
6814
7368
 
6815
7369
  Refer to :func:`mindspore.ops.tensor_scatter_elements` for more details.
6816
7370
 
@@ -6818,10 +7372,32 @@ class TensorScatterElements(Primitive):
6818
7372
  If there are multiple index vectors in `indices` that correspond to the same position,
6819
7373
  the value of that position in the output will be nondeterministic.
6820
7374
 
7375
+ .. warning::
7376
+ This is an experimental API that is subject to change or deletion.
7377
+
7378
+ Args:
7379
+ axis (int, optional): Specify which axis to do scatter operation. Default: ``0`` .
7380
+ reduction (str, optional): Which reduction operation to scatter, default is ``"none"`` . Other option: "add".
7381
+
7382
+ Inputs:
7383
+ - **data** (Tensor) - The target tensor. Its rank must be at least 1.
7384
+ - **indices** (Tensor) - The index of `input_x` to do scatter operation whose data type must be int32 or
7385
+ int64. It has the same rank as `data`. And accepted range is [-s, s) where s is the size along axis.
7386
+ - **updates** (Tensor) - The tensor doing the scatter operation with `data`,
7387
+ it has the same shape and type as `data`.
7388
+ - **update** (Tensor) - The tensor doing the scatter operation with `data`,
7389
+ it has the same type as `data` and the same shape as `indices`.
7390
+
7391
+ Outputs:
7392
+ Tensor, has the same shape and type as `data`.
7393
+
6821
7394
  Supported Platforms:
6822
7395
  ``Ascend`` ``GPU`` ``CPU``
6823
7396
 
6824
7397
  Examples:
7398
+ >>> import mindspore
7399
+ >>> import mindspore.ops as ops
7400
+ >>> from mindspore import Tensor
6825
7401
  >>> op = ops.TensorScatterElements(0, "none")
6826
7402
  >>> data = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
6827
7403
  >>> indices = Tensor(np.array([[1, 0, 2], [0, 2, 1]]), mindspore.int32)
@@ -6831,9 +7407,12 @@ class TensorScatterElements(Primitive):
6831
7407
  [[ 0.0 0.0 3.0]
6832
7408
  [ 0.0 5.0 0.0]
6833
7409
  [ 7.0 0.0 0.0]]
7410
+ >>> import mindspore as ms
7411
+ >>> import mindspore.ops as ops
7412
+ >>> from mindspore import Tensor
6834
7413
  >>> op = ops.TensorScatterElements(1, "add")
6835
- >>> data = Tensor(np.array([[1, 2, 3, 4, 5]), mindspore.float32)
6836
- >>> indices = Tensor(np.array([[2, 4]), mindspore.int32)
7414
+ >>> data = Tensor(np.array([[1, 2, 3, 4, 5]]), mindspore.float32)
7415
+ >>> indices = Tensor(np.array([[2, 4]]), mindspore.int32)
6837
7416
  >>> updates = Tensor(np.array([[8, 8]]), mindspore.float32)
6838
7417
  >>> output = op(data, indices, updates)
6839
7418
  >>> print(output)
@@ -6849,7 +7428,8 @@ class TensorScatterElements(Primitive):
6849
7428
  self.init_prim_io_names(inputs=['data', 'indices', 'updates'], outputs=['y'])
6850
7429
  target = context.get_context("device_target")
6851
7430
  if reduction != 'none' and target.lower() == "ascend":
6852
- raise ValueError(f"Currently Ascend device_target only support `reduction`='none', "
7431
+ raise ValueError(f"For '{self.name}', "
7432
+ f"Currently Ascend device_target only support `reduction`='none', "
6853
7433
  f"but got {reduction}")
6854
7434
 
6855
7435
 
@@ -6858,6 +7438,9 @@ class ExtractVolumePatches(Primitive):
6858
7438
  Extract patches from input and put them in the "depth" output dimension.
6859
7439
  "depth" dimension is the second dim of output.
6860
7440
 
7441
+ .. warning::
7442
+ This is an experimental API that is subject to change or deletion.
7443
+
6861
7444
  Args:
6862
7445
  kernel_size (Union[int, tuple[int], list[int]]): A list of ints which's length is 3 or 5.
6863
7446
  The size of the sliding window for each dimension of input. Must be: :math:`[1, 1, k_d, k_h, k_w]` or
@@ -6865,10 +7448,10 @@ class ExtractVolumePatches(Primitive):
6865
7448
  strides (Union[int, tuple[int], list[int]]): A list of ints which's length is 3 or 5.
6866
7449
  How far the centers of two consecutive patches are in input. Must be: :math:`[1, 1, s_d, s_h, s_w]` or
6867
7450
  :math:`[s_d, s_h, s_w]`. If :math:`s_d = s_h = s_w`, you can enter an integer.
6868
- padding (str): A string from: "SAME", "VALID". The type of padding algorithm to use.
7451
+ padding (str): A string from: ``"SAME"`` , ``"VALID"`` . The type of padding algorithm to use.
6869
7452
 
6870
7453
  Inputs:
6871
- - **input_x** (Tensor) - A Tensor. 5-D Tensor with shape :math:`()`.
7454
+ - **input_x** (Tensor) - A Tensor. 5-D Tensor with shape :math:`(x_n, x_c, x_d, x_h, x_w)`.
6872
7455
 
6873
7456
  Outputs:
6874
7457
  Tensor, has the same type as input.
@@ -6882,7 +7465,7 @@ class ExtractVolumePatches(Primitive):
6882
7465
  TypeError: If padding is not str.
6883
7466
  ValueError: If the length of kernel_size is neither 3 nor 5 and kernel_size is not an integer.
6884
7467
  ValueError: If the length of strides is neither 3 nor 5 and strides is not an integer.
6885
- ValueError: If padding is neither "VALID" nor "SAME".
7468
+ ValueError: If padding is neither ``"VALID"`` nor ``"SAME"`` .
6886
7469
  ValueError: If elements of kernel_size or strides are not positive integer.
6887
7470
  ValueError: If input_x is not a tensor in dimension 5.
6888
7471
  ValueError: If input_x's shape has zero.
@@ -6897,10 +7480,13 @@ class ExtractVolumePatches(Primitive):
6897
7480
  ``Ascend`` ``GPU`` ``CPU``
6898
7481
 
6899
7482
  Examples:
7483
+ >>> import numpy as np
7484
+ >>> from mindspore import Tensor, ops
7485
+ >>> from mindspore import dtype as mstype
6900
7486
  >>> kernel_size = (1, 1, 2, 2, 2)
6901
7487
  >>> strides = (1, 1, 1, 1, 1)
6902
7488
  >>> padding = "VALID"
6903
- >>> input_x = P.Reshape()(Tensor(np.arange(1, 28), mstype.float16), (1, 1, 3, 3, 3))
7489
+ >>> input_x = ops.Reshape()(Tensor(np.arange(1, 28), mstype.float16), (1, 1, 3, 3, 3))
6904
7490
  >>> output_y = ops.ExtractVolumePatches(kernel_size, strides, padding)(input_x)
6905
7491
  >>> print(output_y.shape)
6906
7492
  (1, 8, 2, 2, 2)
@@ -6994,14 +7580,14 @@ class Lstsq(Primitive):
6994
7580
  \end{array}
6995
7581
 
6996
7582
  Args:
6997
- fast (bool, optional): Solving algorithm. Default: True.
7583
+ fast (bool, optional): Solving algorithm. Default: ``True`` .
6998
7584
 
6999
7585
  - If `fast` is True, then the solution is computed by solving
7000
7586
  the normal equations using Cholesky decomposition.
7001
7587
  - If `fast` is False, an algorithm based on numerically robust
7002
7588
  completed orthogonal decomposition is used.
7003
7589
 
7004
- l2_regularizer (float, optional): L2 regularization coefficient. Default: 0.0.
7590
+ l2_regularizer (float, optional): L2 regularization coefficient. Default: ``0.0`` .
7005
7591
 
7006
7592
  Inputs:
7007
7593
  - **x** (Tensor) - :math:`(m \times n)` matrix `x`. The input tensor whose data type is
@@ -7050,8 +7636,8 @@ class LowerBound(Primitive):
7050
7636
 
7051
7637
  Args:
7052
7638
  out_type (:class:`mindspore.dtype`, optional): An optional data type of
7053
- `mindspore.dtype.int32` and `mindspore.dtype.int64`.
7054
- Default: `mindspore.dtype.int32`.
7639
+ ``mindspore.dtype.int32`` and ``mindspore.dtype.int64`` .
7640
+ Default: ``mindspore.dtype.int32`` .
7055
7641
 
7056
7642
  Inputs:
7057
7643
  - **sorted_x** (Tensor) - The input tensor whose dtype is real number and
@@ -7106,8 +7692,8 @@ class UpperBound(Primitive):
7106
7692
 
7107
7693
  Args:
7108
7694
  out_type (:class:`mindspore.dtype`, optional): Specified output type.
7109
- Supported types: `mindspore.dtype.int32` and `mindspore.dtype.int64`.
7110
- Default: `mindspore.dtype.int32`.
7695
+ Supported types: ``mindspore.dtype.int32`` and ``mindspore.dtype.int64`` .
7696
+ Default: ``mindspore.dtype.int32`` .
7111
7697
 
7112
7698
  Inputs:
7113
7699
  - **sorted_x** (Tensor) - The input tensor whose dtype is real number. The rank must be 2.
@@ -7158,6 +7744,16 @@ class Cummax(Primitive):
7158
7744
 
7159
7745
  Refer to :func:`mindspore.ops.cummax` for more details.
7160
7746
 
7747
+ Args:
7748
+ axis (int): The axis to accumulate the tensor's value. Must be in the range [-rank(input), rank(input)).
7749
+
7750
+ Inputs:
7751
+ - **input** (Tensor) - The input tensor.
7752
+
7753
+ Outputs:
7754
+ A tuple of 2 Tensors(values, indices), containing the cumulative maximum of elements and the index,
7755
+ The shape of each output tensor is the same as input `input`.
7756
+
7161
7757
  Supported Platforms:
7162
7758
  ``GPU`` ``CPU``
7163
7759
 
@@ -7206,7 +7802,7 @@ class RightShift(Primitive):
7206
7802
 
7207
7803
  Inputs:
7208
7804
  - **input_x** (Tensor) - The target tensor, will be shifted to the right
7209
- by `input_y` bits element-wise.
7805
+ by `input_y` bits element-wise. Support all int and uint types.
7210
7806
  - **input_y** (Tensor) - Number of bits shifted, the tensor must have the same type as `input_x`.
7211
7807
 
7212
7808
  Outputs:
@@ -7220,6 +7816,8 @@ class RightShift(Primitive):
7220
7816
  ``Ascend`` ``GPU`` ``CPU``
7221
7817
 
7222
7818
  Examples:
7819
+ >>> import numpy as np
7820
+ >>> from mindspore import Tensor, ops
7223
7821
  >>> rightshift = ops.RightShift()
7224
7822
  >>> input_x = Tensor(np.array([1, 2, 3]).astype(np.uint8))
7225
7823
  >>> input_y = Tensor(np.array([1, 1, 1]).astype(np.uint8))
@@ -7251,11 +7849,10 @@ class LogSpace(Primitive):
7251
7849
  This is an experimental API that is subject to change or deletion.
7252
7850
 
7253
7851
  Args:
7254
- steps (int, optional): The steps must be a non-negative integer. Default: 10.
7255
- base (int, optional): The base must be a non-negative integer. Default: 10.
7256
- dtype (mindspore.dtype, optional): The dtype of output, include mindspore.float16,
7257
- mindspore.float32 or mindspore.float64. Default: mindspore.float32.
7258
-
7852
+ steps (int, optional): The steps must be a non-negative integer. Default: ``10`` .
7853
+ base (int, optional): The base must be a non-negative integer. Default: ``10`` .
7854
+ dtype (mindspore.dtype, optional): The dtype of output, include ``mstype.float16`` ,
7855
+ ``mstype.float32`` or ``mstype.float64`` . Default: ``mstype.float32`` .
7259
7856
 
7260
7857
  Inputs:
7261
7858
  - **start** (Tensor) - Start value of interval, with shape of 0-D,
@@ -7270,8 +7867,8 @@ class LogSpace(Primitive):
7270
7867
  TypeError: If `input` is not a Tensor.
7271
7868
  TypeError: If `steps` is not an int.
7272
7869
  TypeError: If `base` is not an int.
7273
- TypeError: If `dtype` is not mindspore.float16, mindspore.float32 or
7274
- mindspore.float64.
7870
+ TypeError: If `dtype` is not mstype.float16, mstype.float32 or
7871
+ mstype.float64.
7275
7872
  ValueError: If `steps` is not a non-negative integer.
7276
7873
  ValueError: If `base` is not a non-negative integer.
7277
7874
 
@@ -7279,9 +7876,11 @@ class LogSpace(Primitive):
7279
7876
  ``Ascend`` ``GPU`` ``CPU``
7280
7877
 
7281
7878
  Examples:
7282
- >>> logspace = ops.LogSpace(steps = 10, base = 10, dtype=mindspore.float32)
7283
- >>> start = Tensor(1, mindspore.float32)
7284
- >>> end = Tensor(10, mindspore.float32)
7879
+ >>> from mindspore import Tensor, ops
7880
+ >>> from mindspore import dtype as mstype
7881
+ >>> logspace = ops.LogSpace(steps = 10, base = 10, dtype=mstype.float32)
7882
+ >>> start = Tensor(1, mstype.float32)
7883
+ >>> end = Tensor(10, mstype.float32)
7285
7884
  >>> output = logspace(start, end)
7286
7885
  >>> print(output)
7287
7886
  [1.e+01 1.e+02 1.e+03 1.e+04 1.e+05 1.e+06 1.e+07 1.e+08 1.e+09 1.e+10]
@@ -7306,6 +7905,12 @@ class NonZero(Primitive):
7306
7905
 
7307
7906
  Refer to :func:`mindspore.ops.nonzero` for more details.
7308
7907
 
7908
+ Inputs:
7909
+ - **x** (Tensor) - The input Tensor, its rank should be greater than or eaqual to 1.
7910
+
7911
+ Outputs:
7912
+ - **y** (Tensor), 2-D Tensor of data type int64.
7913
+
7309
7914
  Supported Platforms:
7310
7915
  ``Ascend`` ``GPU`` ``CPU``
7311
7916
 
@@ -7345,12 +7950,12 @@ class Tril(Primitive):
7345
7950
  This is an experimental API that is subject to change or deletion.
7346
7951
 
7347
7952
  Args:
7348
- diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: 0,
7953
+ diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: ``0`` ,
7349
7954
  indicating the main didiagonal.
7350
7955
 
7351
7956
  Inputs:
7352
- - **x** (Tensor) - A Tensor with shape :math:`(x_1, x_2, ..., x_R)`. The rank must be at least 2.
7353
- Supporting all number types including bool.
7957
+ - **x** (Tensor) - The input tensor with shape :math:`(M, N, *)`
7958
+ where :math:`*` means any number of additional dimensions.
7354
7959
 
7355
7960
  Outputs:
7356
7961
  Tensor, the same shape and data type as the input `x`.
@@ -7358,13 +7963,14 @@ class Tril(Primitive):
7358
7963
  Raises:
7359
7964
  TypeError: If `x` is not a Tensor.
7360
7965
  TypeError: If `diagonal` is not an int.
7361
- TypeError: If the type of `x` is neither number nor bool.
7362
7966
  ValueError: If the rank of `x` is less than 2.
7363
7967
 
7364
7968
  Supported Platforms:
7365
7969
  ``Ascend`` ``GPU`` ``CPU``
7366
7970
 
7367
7971
  Examples:
7972
+ >>> import numpy as np
7973
+ >>> from mindspore import Tensor, ops
7368
7974
  >>> x = Tensor(np.array([[ 1, 2, 3, 4],
7369
7975
  ... [ 5, 6, 7, 8],
7370
7976
  ... [10, 11, 12, 13],
@@ -7417,10 +8023,23 @@ class IndexFill(Primitive):
7417
8023
 
7418
8024
  Refer to :func:`mindspore.ops.index_fill` for more details.
7419
8025
 
8026
+ Inputs:
8027
+ - **x** (Tensor) - Input tensor.
8028
+ - **dim** (Union[int, Tensor]) - Dimension along which to fill the input tensor. Only supports
8029
+ a 0-dimensional tensor or an int number.
8030
+ - **index** (Tensor) - Indices of the input tensor to fill in.
8031
+ - **value** (Union[bool, int, float, Tensor]) - Value to fill the input tensor.
8032
+
8033
+ Outputs:
8034
+ Tensor, has the same type and shape as input tensor.
8035
+
7420
8036
  Supported Platforms:
7421
8037
  ``Ascend`` ``GPU`` ``CPU``
7422
8038
 
7423
8039
  Examples:
8040
+ >>> import mindspore
8041
+ >>> import numpy as np
8042
+ >>> from mindspore import Tensor, ops
7424
8043
  >>> index_fill = ops.IndexFill()
7425
8044
  >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32))
7426
8045
  >>> index = Tensor([0, 2], mindspore.int32)
@@ -7440,39 +8059,44 @@ class IndexFill(Primitive):
7440
8059
 
7441
8060
  class IndexPut(Primitive):
7442
8061
  r"""
7443
- According to the index number of indexes, replace the value corresponding to x1 with the value in x2.
8062
+ According to the index number of `indexes`, replace the value corresponding to `x1` with the value in `x2`.
7444
8063
 
7445
8064
  Args:
7446
8065
  accumulate (int): If accumulate is 1, the elements in x2 are added to x1,
7447
- else the elements in x2 replace the corresponding element in x1, should be 0 or 1. Default: 0.
8066
+ else the elements in x2 replace the corresponding element in x1, should be 0 or 1. Default: ``0`` .
8067
+
7448
8068
  Inputs:
7449
8069
  - **x1** (Tensor) - The assigned target tensor, 1-D or higher dimensional.
7450
- - **x2** (Tensor) - 1-D Tensor of the same type as "x1". if size= 1 will be broadcast
8070
+ - **x2** (Tensor) - 1-D Tensor of the same type as `x1`. If the size of `x2` is 1,
8071
+ it will broadcast to the same size as `x1`.
7451
8072
  - **indices** (tuple[Tensor], list[Tensor]) - the indices of type int32 or int64, used to index into x1.
7452
- The rank of tensors in indices should be 1-D, size of indices should <= x1.rank and the tensors in indices
7453
- should be broadcastable.
8073
+ The rank of tensors in indices should be 1-D, size of indices should <= x1.rank and the tensors in indices
8074
+ should be broadcastable.
7454
8075
 
7455
8076
  Outputs:
7456
- The Tensor to be assigned. Should be of the same type and shape as "x1".
8077
+ Tensor, has the same dtype and shape as `x1`.
7457
8078
 
7458
8079
  Raises:
7459
- TypeError: If the dtype of `x1` is not equal to the dtype of `x2`.
7460
- TypeError: If the dtype of `indices` is not tuple[Tensor], list[Tensor].
7461
- TypeError: If the dtype of tensors in `indices` are not int32 or int64.
7462
- TypeError: If the dtype of tensors in `indices` are inconsistent.
7463
- TypeError: If the dtype of `accumulate` are not int.
7464
- ValueError: If rank(x2) is not 1-D.
7465
- ValueError: If size(x2) is not 1 or max size of the tensors in `indices` when rank(x1) == size(indices).
7466
- ValueError: If size(x2) is not 1 or x1.shape[-1] when rank(x1) > size(indices).
7467
- ValueError: If the rank of tensors in `indices` is not 1-D.
7468
- ValueError: If the tensors in `indices` is not be broadcastable.
7469
- ValueError: If size(indices) > rank(x1).
7470
- ValueError: If `accumulate` is not equal to 0 or 1.
8080
+ TypeError: If the dtype of `x1` is not equal to the dtype of `x2`.
8081
+ TypeError: If `indices` is not tuple[Tensor] or list[Tensor].
8082
+ TypeError: If the dtype of tensors in `indices` are not int32 or int64.
8083
+ TypeError: If the dtype of tensors in `indices` are inconsistent.
8084
+ TypeError: If the dtype of `accumulate` are not int.
8085
+ ValueError: If rank(x2) is not 1-D.
8086
+ ValueError: If size(x2) is not 1 or max size of the tensors in `indices` when rank(x1) == size(indices).
8087
+ ValueError: If size(x2) is not 1 or x1.shape[-1] when rank(x1) > size(indices).
8088
+ ValueError: If the rank of tensors in `indices` is not 1-D.
8089
+ ValueError: If the tensors in `indices` is not be broadcastable.
8090
+ ValueError: If size(indices) > rank(x1).
8091
+ ValueError: If `accumulate` is not equal to 0 or 1.
7471
8092
 
7472
8093
  Supported Platforms:
7473
- ``CPU``
8094
+ ``Ascend`` ``CPU``
7474
8095
 
7475
8096
  Examples:
8097
+ >>> import mindspore
8098
+ >>> import numpy as np
8099
+ >>> from mindspore import Tensor, ops
7476
8100
  >>> x1 = Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32))
7477
8101
  >>> x2 = Tensor(np.array([3]).astype(np.int32))
7478
8102
  >>> indices = [Tensor(np.array([0, 0]).astype(np.int32)), Tensor(np.array([0, 1]).astype(np.int32))]
@@ -7671,15 +8295,15 @@ class LeftShift(Primitive):
7671
8295
  This is an experimental API that is subject to change or deletion.
7672
8296
 
7673
8297
  Inputs:
7674
- - **x1** (Tensor) - The target tensor whose dtype supports int8, int16, int32, int64,
7675
- uint8, uint16, uint32, uint64, will be shifted to the left by x2 in element-wise.
7676
- - **x2** (Tensor) - The tensor must have the same dtype as x1. And the tensor must have the same shape as x1
7677
- or could be broadcast with x1.
8298
+ - **x1** (Tensor) - The target tensor whose dtype supports all int and uint type,
8299
+ will be shifted to the left by `x2` in element-wise.
8300
+ - **x2** (Tensor) - The tensor must have the same dtype as `x1`.
8301
+ And the tensor must have the same shape as `x1` or could be broadcast with `x1`.
7678
8302
 
7679
8303
  Outputs:
7680
- - **output** (Tensor) - The output tensor, has the same dtype as x1.
7681
- And the shape of the output tensor is the same shape as x1, or the same shape
7682
- as x1 and x2 after broadcasting.
8304
+ - **output** (Tensor) - The output tensor, has the same dtype as `x1`.
8305
+ And the shape of the output tensor is the same shape as `x1`, or the same shape
8306
+ as `x1` and `x2` after broadcasting.
7683
8307
 
7684
8308
  Raises:
7685
8309
  TypeError: If `x1` or `x2` has wrong type.
@@ -7690,12 +8314,14 @@ class LeftShift(Primitive):
7690
8314
  ``Ascend`` ``GPU`` ``CPU``
7691
8315
 
7692
8316
  Examples:
8317
+ >>> import numpy as np
8318
+ >>> from mindspore import Tensor, ops
7693
8319
  >>> left_shift = ops.LeftShift()
7694
8320
  >>> x1 = Tensor(np.array([1, 2, 3]).astype(np.int8))
7695
8321
  >>> x2 = Tensor(np.array([0, 1, -1]).astype(np.int8))
7696
8322
  >>> output = left_shift(x1, x2)
7697
8323
  >>> print(output)
7698
- [1 4 3]
8324
+ [1 4 0]
7699
8325
  """
7700
8326
 
7701
8327
  @prim_attr_register
@@ -7710,22 +8336,23 @@ class FillDiagonal(Primitive):
7710
8336
  The input has at least 2 dimensions, and all dimensions of input must be equal in length
7711
8337
  when the dimension of input is greater than 2.
7712
8338
 
8339
+ .. warning::
8340
+ This is an experimental API that is subject to change or deletion.
8341
+
7713
8342
  Args:
7714
8343
  fill_value (float): The value to fill the diagonal of `input_x`.
7715
8344
  wrap (bool, optional): Controls whether the diagonal elements continue onto the
7716
8345
  remaining rows in case of a tall matrix(A matrix has more rows than columns).
7717
- Examples blow demonstrates how it works on a tall matrix if `wrap` is set True.
7718
- Default: False.
8346
+ Examples blow demonstrates how it works on a tall matrix if `wrap` is set ``True`` .
8347
+ Default: ``False`` .
7719
8348
 
7720
8349
  Inputs:
7721
8350
  - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
7722
- The data type must be float32, int32 or int64.
7723
8351
 
7724
8352
  Outputs:
7725
8353
  - **y** (Tensor) - Tensor, has the same shape and data type as the input `input_x`.
7726
8354
 
7727
8355
  Raises:
7728
- TypeError: If data type of `input_x` is not one of the following: float32, int32, int64.
7729
8356
  ValueError: If the dimension of `input_x` is not greater than 1.
7730
8357
  ValueError: If the size of each dimension is not equal, when the dimension is greater than 2.
7731
8358
 
@@ -7733,6 +8360,8 @@ class FillDiagonal(Primitive):
7733
8360
  ``Ascend`` ``GPU`` ``CPU``
7734
8361
 
7735
8362
  Examples:
8363
+ >>> import numpy as np
8364
+ >>> from mindspore import Tensor, ops
7736
8365
  >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32))
7737
8366
  >>> fill_value = 9.9
7738
8367
  >>> fill_diagonal = ops.FillDiagonal(fill_value)
@@ -7792,16 +8421,16 @@ class HammingWindow(Primitive):
7792
8421
 
7793
8422
  Args:
7794
8423
  periodic (bool, optional): a flag determines whether the returned window trims off
7795
- the last duplicate value from the symmetric window. Default: True.
8424
+ the last duplicate value from the symmetric window. Default: ``True`` .
7796
8425
 
7797
8426
  - If True, returns a window to be used as periodic function, in above formula,
7798
8427
  :math:`N = \text{length} + 1`.
7799
8428
  - If False, return a symmetric window, :math:`N = \text{length}`.
7800
8429
 
7801
- alpha (float, optional): The coefficient :math:`\alpha` in the equation above. Default: 0.54.
7802
- beta (float, optional): The coefficient :math:`\beta` in the equation above. Default: 0.46.
7803
- dtype (:class:`mindspore.dtype`, optional): An optional data type of `mstype.float16`,
7804
- `mstype.float32` and `mstype.float64`. Default: `mstype.float32`.
8430
+ alpha (float, optional): The coefficient :math:`\alpha` in the equation above. Default: ``0.54`` .
8431
+ beta (float, optional): The coefficient :math:`\beta` in the equation above. Default: ``0.46`` .
8432
+ dtype (:class:`mindspore.dtype`, optional): An optional data type of ``mstype.float16`` ,
8433
+ ``mstype.float32`` and ``mstype.float64`` . Default: ``mstype.float32``.
7805
8434
 
7806
8435
  Inputs:
7807
8436
  - **length** (Tensor) - a positive integer tensor controlling the returned window size, must be 1D.
@@ -7823,6 +8452,8 @@ class HammingWindow(Primitive):
7823
8452
  ``Ascend`` ``GPU`` ``CPU``
7824
8453
 
7825
8454
  Examples:
8455
+ >>> import numpy as np
8456
+ >>> from mindspore import Tensor, ops
7826
8457
  >>> # case 1: periodic=True.
7827
8458
  >>> length = Tensor(np.array([6]).astype(np.int32))
7828
8459
  >>> hamming_window = ops.HammingWindow(periodic=True)
@@ -7864,10 +8495,29 @@ class AffineGrid(Primitive):
7864
8495
 
7865
8496
  Refer to :func:`mindspore.ops.affine_grid` for more details.
7866
8497
 
8498
+ Args:
8499
+ align_corners (bool, optional): Geometrically, each pixel of input is viewed as a squqre instead of dot.
8500
+ If True, consider extremum -1 and 1 referring to the centers of the pixels rather than pixel corners.
8501
+ The default value is ``False`` , extremum -1 and 1 refer to the corners of the pixels, so that sampling is
8502
+ irrelevant to resolution of the image. Default: ``False`` .
8503
+
8504
+ Inputs:
8505
+ - **theta** (Tensor) - The input tensor of flow field whose dtype is float16, float32.
8506
+ Input batch of affine matrices with shape :math:`(N, 2, 3)` for 2D grid or :math:`(N, 3, 4)` for 3D grid.
8507
+ - **output_size** (tuple[int]) - The target output image size.
8508
+ The value of target output with format :math:`(N, C, H, W)` for 2D grid
8509
+ or :math:`(N, C, D, H, W)` for 3D grid.
8510
+
8511
+ Outputs:
8512
+ Tensor, a tensor whose data type is same as 'theta', and the shape is :math:`(N, H, W, 2)` for 2D grid
8513
+ or :math:`(N, D, H, W, 3)` for 3D grid.
8514
+
7867
8515
  Supported Platforms:
7868
8516
  ``Ascend`` ``GPU`` ``CPU``
7869
8517
 
7870
8518
  Examples:
8519
+ >>> import mindspore
8520
+ >>> from mindspore import Tensor, ops
7871
8521
  >>> affinegrid = ops.AffineGrid(align_corners=False)
7872
8522
  >>> theta = Tensor([[[0.8, 0.5, 0],[-0.5, 0.8, 0]]], mindspore.float32)
7873
8523
  >>> out_size = (1, 3, 2, 3)
@@ -8008,10 +8658,19 @@ class PopulationCount(Primitive):
8008
8658
 
8009
8659
  Refer to :func:`mindspore.ops.population_count` for more details.
8010
8660
 
8661
+ Inputs:
8662
+ - **input_x** (Tensor) - Tensor of any dimension. The data type must be int16 or uint16 (Ascend).
8663
+ The data type must be int8, int16, int32, int64, uint8, uint16, uint32, uint64 (CPU and GPU).
8664
+
8665
+ Outputs:
8666
+ Tensor, with the same shape as the input, and the data type is uint8.
8667
+
8011
8668
  Supported Platforms:
8012
8669
  ``Ascend`` ``GPU`` ``CPU``
8013
8670
 
8014
8671
  Examples:
8672
+ >>> import mindspore
8673
+ >>> from mindspore import Tensor, ops
8015
8674
  >>> input_x = Tensor([0, 1, 3], mindspore.int16)
8016
8675
  >>> output = ops.PopulationCount()(input_x)
8017
8676
  >>> print(output)
@@ -8047,12 +8706,17 @@ class TopK(Primitive):
8047
8706
  If the two compared elements are the same, the one with the smaller index value is returned first.
8048
8707
 
8049
8708
  Args:
8050
- sorted (bool, optional): If True, the obtained elements will be sorted by the values in descending order.
8051
- If False, the obtained elements will not be sorted. Default: True.
8709
+ sorted (bool, optional): If ``True`` , the obtained elements will be sorted by the values in descending order.
8710
+ If ``False`` , the obtained elements will not be sorted. Default: ``True`` .
8052
8711
 
8053
8712
  Inputs:
8054
- - **input_x** (Tensor) - Input to be computed, data type must be float16, float32 or int32 on CPU,
8055
- and float16 or float32 on GPU.
8713
+ - **input_x** (Tensor) - Input to be computed, 0-D input is supported on GPU, but not on Ascend or CPU.
8714
+ supported dtypes:
8715
+
8716
+ - Ascend: int8, uint8, int32, int64, float16, float32.
8717
+ - GPU: float16, float32.
8718
+ - CPU: all numeric types.
8719
+
8056
8720
  - **k** (int) - The number of top elements to be computed along the last dimension, constant input is needed.
8057
8721
 
8058
8722
  Outputs:
@@ -8065,7 +8729,7 @@ class TopK(Primitive):
8065
8729
  TypeError: If `sorted` is not a bool.
8066
8730
  TypeError: If `input_x` is not a Tensor.
8067
8731
  TypeError: If `k` is not an int.
8068
- TypeError: If dtype of `input_x` is not one of the following: float16, float32 or int32.
8732
+ TypeError: If dtype of `input_x` is not supported.
8069
8733
 
8070
8734
  Supported Platforms:
8071
8735
  ``Ascend`` ``GPU`` ``CPU``
@@ -8119,6 +8783,9 @@ class Bincount(Primitive):
8119
8783
  ``Ascend`` ``GPU`` ``CPU``
8120
8784
 
8121
8785
  Examples:
8786
+ >>> import mindspore
8787
+ >>> import numpy as np
8788
+ >>> from mindspore import Tensor, ops
8122
8789
  >>> array = Tensor(np.array([1, 2, 2, 3, 3, 3, 4, 4, 4, 4]), mindspore.int32)
8123
8790
  >>> size = Tensor(5, mindspore.int32)
8124
8791
  >>> weights = Tensor(np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), mindspore.float32)
@@ -8141,6 +8808,17 @@ class CountNonZero(Primitive):
8141
8808
 
8142
8809
  Refer to :func:`mindspore.ops.count_nonzero` for more details.
8143
8810
 
8811
+ Args:
8812
+ dims (Union[int, tuple(int), list(int)], optional): The dimensions to reduce.
8813
+ Default: ``None`` , reduce over all dimensions.
8814
+
8815
+ Inputs:
8816
+ - **x** (Tensor) - Input data is used to count non-zero numbers. With shape
8817
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
8818
+
8819
+ Outputs:
8820
+ Tensor, number of nonzero element across axis specified by `dims`.
8821
+
8144
8822
  Supported Platforms:
8145
8823
  ``Ascend`` ``CPU``
8146
8824