mindspore 2.2.11__cp39-cp39-win_amd64.whl → 2.3.0__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (1151) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +7 -5
  3. mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
  6. mindspore/_checkparam.py +76 -18
  7. mindspore/_extends/builtin_operations.py +2 -1
  8. mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
  9. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
  10. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
  11. mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
  12. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
  13. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
  14. mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
  15. mindspore/_extends/parse/__init__.py +18 -14
  16. mindspore/_extends/parse/compile_config.py +258 -0
  17. mindspore/_extends/parse/namespace.py +2 -2
  18. mindspore/_extends/parse/parser.py +174 -62
  19. mindspore/_extends/parse/resources.py +45 -14
  20. mindspore/_extends/parse/standard_method.py +142 -240
  21. mindspore/{ops/_op_impl/tbe/atomic_addr_clean.py → _extends/pijit/__init__.py} +6 -16
  22. mindspore/_extends/pijit/pijit_func_white_list.py +343 -0
  23. mindspore/_extends/remote/kernel_build_server.py +2 -0
  24. mindspore/_profiler.py +30 -0
  25. mindspore/amp.py +51 -24
  26. mindspore/avcodec-59.dll +0 -0
  27. mindspore/avdevice-59.dll +0 -0
  28. mindspore/avfilter-8.dll +0 -0
  29. mindspore/avformat-59.dll +0 -0
  30. mindspore/avutil-57.dll +0 -0
  31. mindspore/boost/adasum.py +1 -1
  32. mindspore/boost/base.py +1 -1
  33. mindspore/boost/boost_cell_wrapper.py +2 -2
  34. mindspore/boost/grad_freeze.py +2 -2
  35. mindspore/boost/group_loss_scale_manager.py +1 -1
  36. mindspore/boost/less_batch_normalization.py +9 -6
  37. mindspore/common/__init__.py +15 -4
  38. mindspore/common/_jit_fallback_utils.py +2 -3
  39. mindspore/common/_register_for_adapter.py +7 -0
  40. mindspore/common/_register_for_recompute.py +48 -0
  41. mindspore/common/_register_for_tensor.py +8 -9
  42. mindspore/common/_stub_tensor.py +7 -1
  43. mindspore/common/_utils.py +5 -17
  44. mindspore/common/api.py +411 -106
  45. mindspore/common/auto_dynamic_shape.py +27 -14
  46. mindspore/common/dtype.py +17 -10
  47. mindspore/common/dump.py +6 -8
  48. mindspore/common/file_system.py +48 -0
  49. mindspore/common/generator.py +260 -0
  50. mindspore/common/hook_handle.py +51 -4
  51. mindspore/common/initializer.py +1 -1
  52. mindspore/common/jit_config.py +34 -14
  53. mindspore/common/lazy_inline.py +72 -19
  54. mindspore/common/mindir_util.py +12 -2
  55. mindspore/common/mutable.py +79 -14
  56. mindspore/common/no_inline.py +54 -0
  57. mindspore/common/np_dtype.py +25 -0
  58. mindspore/common/parameter.py +30 -11
  59. mindspore/common/recompute.py +262 -0
  60. mindspore/common/seed.py +9 -9
  61. mindspore/common/sparse_tensor.py +272 -24
  62. mindspore/common/symbol.py +122 -0
  63. mindspore/common/tensor.py +468 -496
  64. mindspore/communication/__init__.py +6 -11
  65. mindspore/communication/_comm_helper.py +5 -0
  66. mindspore/communication/comm_func.py +1140 -0
  67. mindspore/communication/management.py +118 -102
  68. mindspore/config/op_info.config +22 -54
  69. mindspore/context.py +378 -65
  70. mindspore/dataset/__init__.py +5 -5
  71. mindspore/dataset/audio/__init__.py +6 -6
  72. mindspore/dataset/audio/transforms.py +711 -158
  73. mindspore/dataset/callback/ds_callback.py +2 -2
  74. mindspore/dataset/engine/cache_client.py +2 -2
  75. mindspore/dataset/engine/datasets.py +163 -83
  76. mindspore/dataset/engine/datasets_audio.py +14 -14
  77. mindspore/dataset/engine/datasets_standard_format.py +33 -3
  78. mindspore/dataset/engine/datasets_text.py +38 -38
  79. mindspore/dataset/engine/datasets_user_defined.py +78 -59
  80. mindspore/dataset/engine/datasets_vision.py +77 -73
  81. mindspore/dataset/engine/offload.py +5 -7
  82. mindspore/dataset/engine/queue.py +56 -38
  83. mindspore/dataset/engine/validators.py +11 -5
  84. mindspore/dataset/text/__init__.py +3 -3
  85. mindspore/dataset/text/transforms.py +408 -121
  86. mindspore/dataset/text/utils.py +9 -9
  87. mindspore/dataset/transforms/__init__.py +1 -1
  88. mindspore/dataset/transforms/transforms.py +261 -76
  89. mindspore/dataset/utils/browse_dataset.py +9 -9
  90. mindspore/dataset/vision/__init__.py +8 -8
  91. mindspore/dataset/vision/c_transforms.py +10 -10
  92. mindspore/dataset/vision/py_transforms_util.py +3 -3
  93. mindspore/dataset/vision/transforms.py +2844 -549
  94. mindspore/dataset/vision/utils.py +161 -10
  95. mindspore/dataset/vision/validators.py +14 -2
  96. mindspore/dnnl.dll +0 -0
  97. mindspore/experimental/optim/__init__.py +12 -2
  98. mindspore/experimental/optim/adadelta.py +161 -0
  99. mindspore/experimental/optim/adagrad.py +168 -0
  100. mindspore/experimental/optim/adam.py +35 -34
  101. mindspore/experimental/optim/adamax.py +170 -0
  102. mindspore/experimental/optim/adamw.py +40 -16
  103. mindspore/experimental/optim/asgd.py +153 -0
  104. mindspore/experimental/optim/lr_scheduler.py +71 -127
  105. mindspore/experimental/optim/nadam.py +157 -0
  106. mindspore/experimental/optim/optimizer.py +15 -8
  107. mindspore/experimental/optim/radam.py +194 -0
  108. mindspore/experimental/optim/rmsprop.py +154 -0
  109. mindspore/experimental/optim/rprop.py +164 -0
  110. mindspore/experimental/optim/sgd.py +28 -19
  111. mindspore/hal/__init__.py +40 -0
  112. mindspore/hal/_ascend.py +57 -0
  113. mindspore/hal/_base.py +57 -0
  114. mindspore/hal/_cpu.py +56 -0
  115. mindspore/hal/_gpu.py +57 -0
  116. mindspore/hal/device.py +356 -0
  117. mindspore/hal/event.py +179 -0
  118. mindspore/hal/memory.py +326 -0
  119. mindspore/hal/stream.py +339 -0
  120. mindspore/include/api/data_type.h +2 -2
  121. mindspore/include/api/dual_abi_helper.h +16 -3
  122. mindspore/include/api/model.h +4 -3
  123. mindspore/include/api/status.h +14 -0
  124. mindspore/include/c_api/model_c.h +173 -0
  125. mindspore/include/c_api/ms/base/types.h +1 -0
  126. mindspore/include/c_api/types_c.h +19 -0
  127. mindspore/include/dataset/execute.h +1 -3
  128. mindspore/include/dataset/vision.h +54 -2
  129. mindspore/jpeg62.dll +0 -0
  130. mindspore/log.py +2 -2
  131. mindspore/mindrecord/__init__.py +5 -1
  132. mindspore/mindrecord/config.py +809 -0
  133. mindspore/mindrecord/filereader.py +25 -0
  134. mindspore/mindrecord/filewriter.py +76 -58
  135. mindspore/mindrecord/mindpage.py +40 -6
  136. mindspore/mindrecord/shardutils.py +3 -2
  137. mindspore/mindrecord/shardwriter.py +7 -0
  138. mindspore/mindrecord/tools/cifar100_to_mr.py +53 -66
  139. mindspore/mindrecord/tools/cifar10_to_mr.py +48 -63
  140. mindspore/mindrecord/tools/csv_to_mr.py +7 -17
  141. mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
  142. mindspore/mindrecord/tools/mnist_to_mr.py +11 -21
  143. mindspore/mindrecord/tools/tfrecord_to_mr.py +2 -10
  144. mindspore/mindspore_backend.dll +0 -0
  145. mindspore/mindspore_common.dll +0 -0
  146. mindspore/mindspore_core.dll +0 -0
  147. mindspore/mindspore_glog.dll +0 -0
  148. mindspore/mindspore_np_dtype.dll +0 -0
  149. mindspore/mindspore_shared_lib.dll +0 -0
  150. mindspore/mint/__init__.py +1137 -0
  151. mindspore/{rewrite/ast_transformers → mint/linalg}/__init__.py +9 -4
  152. mindspore/mint/nn/__init__.py +512 -0
  153. mindspore/mint/nn/functional.py +573 -0
  154. mindspore/mint/optim/__init__.py +24 -0
  155. mindspore/mint/optim/adamw.py +185 -0
  156. mindspore/multiprocessing/__init__.py +72 -0
  157. mindspore/nn/__init__.py +1 -0
  158. mindspore/nn/cell.py +213 -257
  159. mindspore/nn/dynamic_lr.py +2 -2
  160. mindspore/nn/extend/__init__.py +29 -0
  161. mindspore/nn/extend/basic.py +140 -0
  162. mindspore/nn/extend/embedding.py +143 -0
  163. mindspore/{rewrite/ast_creator_register.py → nn/extend/layer/__init__.py} +9 -19
  164. mindspore/nn/extend/layer/normalization.py +109 -0
  165. mindspore/nn/extend/pooling.py +117 -0
  166. mindspore/nn/layer/activation.py +84 -94
  167. mindspore/nn/layer/basic.py +177 -82
  168. mindspore/nn/layer/channel_shuffle.py +3 -16
  169. mindspore/nn/layer/container.py +3 -3
  170. mindspore/nn/layer/conv.py +75 -66
  171. mindspore/nn/layer/embedding.py +103 -45
  172. mindspore/nn/layer/embedding_service.py +531 -0
  173. mindspore/nn/layer/embedding_service_layer.py +393 -0
  174. mindspore/nn/layer/image.py +4 -7
  175. mindspore/nn/layer/math.py +1 -1
  176. mindspore/nn/layer/normalization.py +52 -66
  177. mindspore/nn/layer/padding.py +30 -39
  178. mindspore/nn/layer/pooling.py +18 -9
  179. mindspore/nn/layer/rnn_cells.py +6 -16
  180. mindspore/nn/layer/rnns.py +6 -5
  181. mindspore/nn/layer/thor_layer.py +1 -2
  182. mindspore/nn/layer/timedistributed.py +1 -1
  183. mindspore/nn/layer/transformer.py +52 -50
  184. mindspore/nn/learning_rate_schedule.py +6 -5
  185. mindspore/nn/loss/loss.py +63 -84
  186. mindspore/nn/optim/ada_grad.py +6 -4
  187. mindspore/nn/optim/adadelta.py +3 -1
  188. mindspore/nn/optim/adafactor.py +1 -1
  189. mindspore/nn/optim/adam.py +102 -181
  190. mindspore/nn/optim/adamax.py +4 -2
  191. mindspore/nn/optim/adasum.py +3 -3
  192. mindspore/nn/optim/asgd.py +4 -2
  193. mindspore/nn/optim/ftrl.py +31 -61
  194. mindspore/nn/optim/lamb.py +5 -3
  195. mindspore/nn/optim/lars.py +2 -2
  196. mindspore/nn/optim/lazyadam.py +6 -4
  197. mindspore/nn/optim/momentum.py +13 -25
  198. mindspore/nn/optim/optimizer.py +6 -3
  199. mindspore/nn/optim/proximal_ada_grad.py +4 -2
  200. mindspore/nn/optim/rmsprop.py +9 -3
  201. mindspore/nn/optim/rprop.py +4 -2
  202. mindspore/nn/optim/sgd.py +7 -4
  203. mindspore/nn/optim/thor.py +2 -2
  204. mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
  205. mindspore/nn/probability/distribution/beta.py +2 -2
  206. mindspore/nn/probability/distribution/categorical.py +4 -6
  207. mindspore/nn/probability/distribution/cauchy.py +2 -2
  208. mindspore/nn/probability/distribution/exponential.py +2 -2
  209. mindspore/nn/probability/distribution/geometric.py +1 -1
  210. mindspore/nn/probability/distribution/gumbel.py +2 -2
  211. mindspore/nn/probability/distribution/logistic.py +1 -1
  212. mindspore/nn/probability/distribution/poisson.py +2 -2
  213. mindspore/nn/probability/distribution/uniform.py +2 -2
  214. mindspore/nn/reinforcement/_tensors_queue.py +13 -1
  215. mindspore/nn/wrap/__init__.py +2 -1
  216. mindspore/nn/wrap/cell_wrapper.py +58 -13
  217. mindspore/nn/wrap/grad_reducer.py +148 -8
  218. mindspore/nn/wrap/loss_scale.py +32 -9
  219. mindspore/numpy/__init__.py +2 -0
  220. mindspore/numpy/array_creations.py +2 -0
  221. mindspore/numpy/array_ops.py +6 -6
  222. mindspore/numpy/dtypes.py +3 -3
  223. mindspore/numpy/fft.py +431 -0
  224. mindspore/numpy/math_ops.py +61 -67
  225. mindspore/numpy/utils.py +3 -0
  226. mindspore/opencv_core452.dll +0 -0
  227. mindspore/opencv_imgcodecs452.dll +0 -0
  228. mindspore/opencv_imgproc452.dll +0 -0
  229. mindspore/ops/__init__.py +8 -4
  230. mindspore/ops/_grad_experimental/grad_array_ops.py +4 -160
  231. mindspore/ops/_grad_experimental/grad_comm_ops.py +93 -36
  232. mindspore/ops/_grad_experimental/grad_inner_ops.py +8 -0
  233. mindspore/ops/_grad_experimental/grad_math_ops.py +92 -287
  234. mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
  235. mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
  236. mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
  237. mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
  238. mindspore/ops/_op_impl/__init__.py +0 -1
  239. mindspore/ops/_op_impl/aicpu/__init__.py +1 -0
  240. mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
  241. mindspore/ops/_op_impl/{cpu/concat.py → aicpu/generate_eod_mask.py} +16 -17
  242. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
  243. mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
  244. mindspore/ops/_op_impl/cpu/__init__.py +1 -3
  245. mindspore/ops/_op_impl/cpu/adam.py +2 -2
  246. mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
  247. mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
  248. mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
  249. mindspore/ops/_vmap/vmap_array_ops.py +164 -101
  250. mindspore/ops/_vmap/vmap_base.py +8 -1
  251. mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
  252. mindspore/ops/_vmap/vmap_grad_nn_ops.py +143 -58
  253. mindspore/ops/_vmap/vmap_image_ops.py +70 -13
  254. mindspore/ops/_vmap/vmap_math_ops.py +130 -58
  255. mindspore/ops/_vmap/vmap_nn_ops.py +249 -115
  256. mindspore/ops/_vmap/vmap_other_ops.py +1 -1
  257. mindspore/ops/auto_generate/__init__.py +31 -0
  258. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +231 -0
  259. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +250 -0
  260. mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
  261. mindspore/ops/auto_generate/gen_extend_func.py +980 -0
  262. mindspore/ops/auto_generate/gen_ops_def.py +6443 -0
  263. mindspore/ops/auto_generate/gen_ops_prim.py +13167 -0
  264. mindspore/ops/auto_generate/pyboost_inner_prim.py +429 -0
  265. mindspore/ops/composite/__init__.py +5 -2
  266. mindspore/ops/composite/base.py +121 -23
  267. mindspore/ops/composite/math_ops.py +10 -49
  268. mindspore/ops/composite/multitype_ops/_compile_utils.py +191 -618
  269. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +25 -134
  270. mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
  271. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
  272. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
  273. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
  274. mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
  275. mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
  276. mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
  277. mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
  278. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
  279. mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
  280. mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
  281. mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
  282. mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
  283. mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
  284. mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
  285. mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
  286. mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
  287. mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
  288. mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
  289. mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
  290. mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
  291. mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
  292. mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
  293. mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
  294. mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
  295. mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
  296. mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
  297. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
  298. mindspore/ops/deprecated.py +14 -3
  299. mindspore/ops/extend/__init__.py +53 -0
  300. mindspore/ops/extend/array_func.py +218 -0
  301. mindspore/ops/extend/math_func.py +76 -0
  302. mindspore/ops/extend/nn_func.py +308 -0
  303. mindspore/ops/function/__init__.py +31 -11
  304. mindspore/ops/function/array_func.py +848 -1736
  305. mindspore/ops/function/clip_func.py +19 -31
  306. mindspore/ops/function/debug_func.py +2 -5
  307. mindspore/ops/function/fft_func.py +31 -0
  308. mindspore/ops/function/grad/grad_func.py +27 -20
  309. mindspore/ops/function/image_func.py +27 -21
  310. mindspore/ops/function/linalg_func.py +30 -53
  311. mindspore/ops/function/math_func.py +916 -2791
  312. mindspore/ops/function/nn_func.py +1445 -889
  313. mindspore/ops/function/other_func.py +6 -7
  314. mindspore/ops/function/parameter_func.py +6 -92
  315. mindspore/ops/function/random_func.py +254 -108
  316. mindspore/ops/function/reshard_func.py +102 -0
  317. mindspore/ops/function/sparse_func.py +4 -4
  318. mindspore/ops/function/sparse_unary_func.py +11 -18
  319. mindspore/ops/function/spectral_func.py +1 -1
  320. mindspore/ops/function/vmap_func.py +15 -14
  321. mindspore/ops/functional.py +342 -343
  322. mindspore/ops/op_info_register.py +16 -43
  323. mindspore/ops/operations/__init__.py +32 -23
  324. mindspore/ops/operations/_embedding_cache_ops.py +1 -1
  325. mindspore/ops/operations/_grad_ops.py +21 -853
  326. mindspore/ops/operations/_infer_ops.py +19 -0
  327. mindspore/ops/operations/_inner_ops.py +155 -511
  328. mindspore/ops/operations/_quant_ops.py +4 -4
  329. mindspore/ops/operations/_rl_inner_ops.py +3 -3
  330. mindspore/ops/operations/_scalar_ops.py +5 -480
  331. mindspore/ops/operations/_sequence_ops.py +6 -36
  332. mindspore/ops/operations/_tensor_array.py +8 -8
  333. mindspore/ops/operations/array_ops.py +112 -2698
  334. mindspore/ops/operations/comm_ops.py +801 -118
  335. mindspore/ops/operations/custom_ops.py +62 -121
  336. mindspore/ops/operations/debug_ops.py +105 -36
  337. mindspore/ops/operations/image_ops.py +3 -219
  338. mindspore/ops/operations/inner_ops.py +54 -40
  339. mindspore/ops/operations/linalg_ops.py +1 -49
  340. mindspore/ops/operations/manually_defined/__init__.py +24 -0
  341. mindspore/ops/operations/manually_defined/_inner.py +61 -0
  342. mindspore/ops/operations/manually_defined/ops_def.py +2016 -0
  343. mindspore/ops/operations/math_ops.py +621 -4654
  344. mindspore/ops/operations/nn_ops.py +316 -2226
  345. mindspore/ops/operations/other_ops.py +53 -45
  346. mindspore/ops/operations/random_ops.py +4 -51
  347. mindspore/ops/operations/reshard_ops.py +53 -0
  348. mindspore/ops/operations/sparse_ops.py +8 -8
  349. mindspore/ops/primitive.py +204 -103
  350. mindspore/ops/silent_check.py +162 -0
  351. mindspore/ops_generate/__init__.py +27 -0
  352. mindspore/ops_generate/arg_dtype_cast.py +250 -0
  353. mindspore/ops_generate/arg_handler.py +197 -0
  354. mindspore/ops_generate/gen_aclnn_implement.py +263 -0
  355. mindspore/ops_generate/gen_ops.py +1084 -0
  356. mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
  357. mindspore/ops_generate/gen_pyboost_func.py +968 -0
  358. mindspore/ops_generate/gen_utils.py +209 -0
  359. mindspore/ops_generate/op_proto.py +138 -0
  360. mindspore/ops_generate/pyboost_utils.py +354 -0
  361. mindspore/ops_generate/template.py +239 -0
  362. mindspore/parallel/__init__.py +7 -4
  363. mindspore/parallel/_auto_parallel_context.py +155 -6
  364. mindspore/parallel/_cell_wrapper.py +16 -9
  365. mindspore/parallel/_cost_model_context.py +1 -1
  366. mindspore/parallel/_dp_allreduce_fusion.py +159 -159
  367. mindspore/parallel/_parallel_serialization.py +62 -14
  368. mindspore/parallel/_ps_context.py +1 -1
  369. mindspore/parallel/_recovery_context.py +1 -1
  370. mindspore/parallel/_tensor.py +18 -9
  371. mindspore/parallel/_transformer/__init__.py +1 -1
  372. mindspore/parallel/_transformer/layers.py +1 -1
  373. mindspore/parallel/_transformer/loss.py +1 -1
  374. mindspore/parallel/_transformer/moe.py +1 -1
  375. mindspore/parallel/_transformer/op_parallel_config.py +1 -1
  376. mindspore/parallel/_transformer/transformer.py +10 -10
  377. mindspore/parallel/_utils.py +161 -6
  378. mindspore/parallel/algo_parameter_config.py +6 -8
  379. mindspore/parallel/checkpoint_transform.py +369 -64
  380. mindspore/parallel/cluster/__init__.py +15 -0
  381. mindspore/parallel/cluster/process_entity/__init__.py +18 -0
  382. mindspore/parallel/cluster/process_entity/_api.py +344 -0
  383. mindspore/parallel/cluster/process_entity/_utils.py +126 -0
  384. mindspore/parallel/cluster/run.py +136 -0
  385. mindspore/parallel/mpi/__init__.py +1 -1
  386. mindspore/parallel/mpi/_mpi_config.py +1 -1
  387. mindspore/parallel/parameter_broadcast.py +152 -0
  388. mindspore/parallel/shard.py +128 -17
  389. mindspore/profiler/__init__.py +3 -2
  390. mindspore/profiler/common/process_pool.py +41 -0
  391. mindspore/profiler/common/singleton.py +28 -0
  392. mindspore/profiler/common/util.py +125 -0
  393. mindspore/profiler/envprofiling.py +2 -2
  394. mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
  395. mindspore/profiler/parser/ascend_analysis/constant.py +53 -0
  396. mindspore/profiler/parser/ascend_analysis/file_manager.py +159 -0
  397. mindspore/profiler/parser/ascend_analysis/function_event.py +161 -0
  398. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +131 -0
  399. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +85 -0
  400. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +57 -0
  401. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +116 -0
  402. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
  403. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +68 -0
  404. mindspore/profiler/parser/ascend_cluster_generator.py +116 -0
  405. mindspore/profiler/parser/ascend_communicate_generator.py +314 -0
  406. mindspore/profiler/parser/ascend_flops_generator.py +27 -5
  407. mindspore/profiler/parser/ascend_fpbp_generator.py +8 -2
  408. mindspore/profiler/parser/ascend_hccl_generator.py +31 -280
  409. mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
  410. mindspore/profiler/parser/ascend_memory_generator.py +185 -0
  411. mindspore/profiler/parser/ascend_msprof_exporter.py +151 -126
  412. mindspore/profiler/parser/ascend_msprof_generator.py +75 -274
  413. mindspore/profiler/parser/ascend_op_generator.py +94 -36
  414. mindspore/profiler/parser/ascend_timeline_generator.py +297 -131
  415. mindspore/profiler/parser/base_timeline_generator.py +17 -3
  416. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +2 -1
  417. mindspore/profiler/parser/framework_parser.py +11 -4
  418. mindspore/profiler/parser/integrator.py +3 -1
  419. mindspore/profiler/parser/memory_usage_parser.py +8 -2
  420. mindspore/profiler/parser/minddata_analyzer.py +8 -2
  421. mindspore/profiler/parser/minddata_parser.py +73 -4
  422. mindspore/profiler/parser/msadvisor_analyzer.py +5 -3
  423. mindspore/profiler/parser/msadvisor_parser.py +10 -4
  424. mindspore/profiler/parser/profiler_info.py +16 -1
  425. mindspore/profiler/profiling.py +522 -195
  426. mindspore/rewrite/__init__.py +2 -13
  427. mindspore/rewrite/api/node.py +123 -37
  428. mindspore/rewrite/api/pattern_engine.py +2 -3
  429. mindspore/rewrite/api/scoped_value.py +16 -15
  430. mindspore/rewrite/api/symbol_tree.py +46 -30
  431. mindspore/rewrite/ast_helpers/__init__.py +3 -6
  432. mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
  433. mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
  434. mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
  435. mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
  436. mindspore/rewrite/common/__init__.py +1 -2
  437. mindspore/rewrite/common/config.py +24 -0
  438. mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
  439. mindspore/rewrite/{namer.py → common/namer.py} +63 -18
  440. mindspore/rewrite/common/namespace.py +118 -0
  441. mindspore/rewrite/node/__init__.py +5 -5
  442. mindspore/rewrite/node/call_function.py +23 -7
  443. mindspore/rewrite/node/cell_container.py +7 -3
  444. mindspore/rewrite/node/control_flow.py +53 -28
  445. mindspore/rewrite/node/node.py +212 -196
  446. mindspore/rewrite/node/node_manager.py +51 -22
  447. mindspore/rewrite/node/node_topological_manager.py +3 -23
  448. mindspore/rewrite/parsers/__init__.py +12 -0
  449. mindspore/rewrite/parsers/arguments_parser.py +8 -9
  450. mindspore/rewrite/parsers/assign_parser.py +637 -413
  451. mindspore/rewrite/parsers/attribute_parser.py +3 -4
  452. mindspore/rewrite/parsers/class_def_parser.py +115 -148
  453. mindspore/rewrite/parsers/constant_parser.py +5 -5
  454. mindspore/rewrite/parsers/container_parser.py +4 -6
  455. mindspore/rewrite/parsers/expr_parser.py +55 -0
  456. mindspore/rewrite/parsers/for_parser.py +31 -98
  457. mindspore/rewrite/parsers/function_def_parser.py +13 -5
  458. mindspore/rewrite/parsers/if_parser.py +28 -10
  459. mindspore/rewrite/parsers/module_parser.py +8 -182
  460. mindspore/rewrite/parsers/parser.py +1 -5
  461. mindspore/rewrite/parsers/parser_register.py +1 -1
  462. mindspore/rewrite/parsers/return_parser.py +5 -10
  463. mindspore/rewrite/parsers/while_parser.py +59 -0
  464. mindspore/rewrite/sparsify/utils.py +1 -1
  465. mindspore/rewrite/symbol_tree/__init__.py +20 -0
  466. mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
  467. mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
  468. mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
  469. mindspore/run_check/_check_version.py +6 -14
  470. mindspore/run_check/run_check.py +1 -1
  471. mindspore/safeguard/rewrite_obfuscation.py +9 -19
  472. mindspore/swresample-4.dll +0 -0
  473. mindspore/swscale-6.dll +0 -0
  474. mindspore/tinyxml2.dll +0 -0
  475. mindspore/train/__init__.py +6 -5
  476. mindspore/train/_utils.py +178 -4
  477. mindspore/train/amp.py +167 -245
  478. mindspore/train/anf_ir_pb2.py +14 -2
  479. mindspore/train/callback/__init__.py +5 -2
  480. mindspore/train/callback/_backup_and_restore.py +5 -5
  481. mindspore/train/callback/_callback.py +4 -4
  482. mindspore/train/callback/_checkpoint.py +151 -37
  483. mindspore/train/callback/_cluster_monitor.py +201 -0
  484. mindspore/train/callback/_early_stop.py +2 -2
  485. mindspore/train/callback/_flops_collector.py +238 -0
  486. mindspore/train/callback/_landscape.py +16 -11
  487. mindspore/train/callback/_loss_monitor.py +2 -2
  488. mindspore/train/callback/_mindio_ttp.py +443 -0
  489. mindspore/train/callback/_on_request_exit.py +2 -2
  490. mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
  491. mindspore/train/callback/_summary_collector.py +13 -14
  492. mindspore/train/callback/_time_monitor.py +3 -3
  493. mindspore/train/data_sink.py +6 -5
  494. mindspore/train/dataset_helper.py +66 -21
  495. mindspore/train/loss_scale_manager.py +2 -2
  496. mindspore/train/metrics/accuracy.py +7 -7
  497. mindspore/train/metrics/confusion_matrix.py +8 -6
  498. mindspore/train/metrics/cosine_similarity.py +6 -4
  499. mindspore/train/metrics/error.py +2 -2
  500. mindspore/train/metrics/metric.py +3 -3
  501. mindspore/train/metrics/perplexity.py +2 -1
  502. mindspore/train/metrics/topk.py +2 -2
  503. mindspore/train/mind_ir_pb2.py +89 -15
  504. mindspore/train/model.py +298 -56
  505. mindspore/train/serialization.py +501 -221
  506. mindspore/train/summary/_summary_adapter.py +1 -1
  507. mindspore/train/summary/_writer_pool.py +1 -1
  508. mindspore/train/summary/summary_record.py +56 -34
  509. mindspore/train/train_thor/convert_utils.py +3 -3
  510. mindspore/turbojpeg.dll +0 -0
  511. mindspore/version.py +1 -1
  512. {mindspore-2.2.11.dist-info → mindspore-2.3.0.dist-info}/METADATA +3 -3
  513. mindspore-2.3.0.dist-info/RECORD +1400 -0
  514. {mindspore-2.2.11.dist-info → mindspore-2.3.0.dist-info}/entry_points.txt +1 -0
  515. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
  516. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
  517. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
  518. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
  519. mindspore/gen_ops.py +0 -273
  520. mindspore/nn/layer/flash_attention.py +0 -189
  521. mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
  522. mindspore/ops/_op_impl/tbe/__init__.py +0 -47
  523. mindspore/ops/_op_impl/tbe/abs.py +0 -38
  524. mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
  525. mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
  526. mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
  527. mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
  528. mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
  529. mindspore/ops/_op_impl/tbe/acos.py +0 -37
  530. mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
  531. mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
  532. mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
  533. mindspore/ops/_op_impl/tbe/acosh.py +0 -37
  534. mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
  535. mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
  536. mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
  537. mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
  538. mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
  539. mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
  540. mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
  541. mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
  542. mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
  543. mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
  544. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
  545. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
  546. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
  547. mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
  548. mindspore/ops/_op_impl/tbe/add.py +0 -42
  549. mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
  550. mindspore/ops/_op_impl/tbe/add_n.py +0 -39
  551. mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
  552. mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
  553. mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
  554. mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
  555. mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
  556. mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
  557. mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
  558. mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
  559. mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
  560. mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
  561. mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
  562. mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
  563. mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
  564. mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
  565. mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
  566. mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
  567. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
  568. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
  569. mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
  570. mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
  571. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
  572. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
  573. mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
  574. mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
  575. mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
  576. mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
  577. mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
  578. mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
  579. mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
  580. mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
  581. mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
  582. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
  583. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
  584. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
  585. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
  586. mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
  587. mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
  588. mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
  589. mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
  590. mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
  591. mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
  592. mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
  593. mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
  594. mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
  595. mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
  596. mindspore/ops/_op_impl/tbe/asin.py +0 -37
  597. mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
  598. mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
  599. mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
  600. mindspore/ops/_op_impl/tbe/asinh.py +0 -37
  601. mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
  602. mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
  603. mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
  604. mindspore/ops/_op_impl/tbe/assign.py +0 -79
  605. mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
  606. mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
  607. mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
  608. mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
  609. mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
  610. mindspore/ops/_op_impl/tbe/atan.py +0 -37
  611. mindspore/ops/_op_impl/tbe/atan2.py +0 -38
  612. mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
  613. mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
  614. mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
  615. mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
  616. mindspore/ops/_op_impl/tbe/atanh.py +0 -37
  617. mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
  618. mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
  619. mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
  620. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
  621. mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
  622. mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
  623. mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
  624. mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
  625. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
  626. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
  627. mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
  628. mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
  629. mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
  630. mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
  631. mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
  632. mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
  633. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
  634. mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
  635. mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
  636. mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
  637. mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
  638. mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
  639. mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
  640. mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
  641. mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
  642. mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
  643. mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
  644. mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
  645. mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
  646. mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
  647. mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
  648. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
  649. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
  650. mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
  651. mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
  652. mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
  653. mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
  654. mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
  655. mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
  656. mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
  657. mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
  658. mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
  659. mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
  660. mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
  661. mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
  662. mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
  663. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
  664. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
  665. mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
  666. mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
  667. mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
  668. mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
  669. mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
  670. mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
  671. mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
  672. mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
  673. mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
  674. mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
  675. mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
  676. mindspore/ops/_op_impl/tbe/cast.py +0 -55
  677. mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
  678. mindspore/ops/_op_impl/tbe/cdist.py +0 -38
  679. mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
  680. mindspore/ops/_op_impl/tbe/ceil.py +0 -37
  681. mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
  682. mindspore/ops/_op_impl/tbe/celu.py +0 -39
  683. mindspore/ops/_op_impl/tbe/centralization.py +0 -39
  684. mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
  685. mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
  686. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
  687. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
  688. mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
  689. mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
  690. mindspore/ops/_op_impl/tbe/concat.py +0 -40
  691. mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
  692. mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
  693. mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
  694. mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
  695. mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
  696. mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
  697. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
  698. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
  699. mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
  700. mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
  701. mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
  702. mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
  703. mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
  704. mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
  705. mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
  706. mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
  707. mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
  708. mindspore/ops/_op_impl/tbe/cos.py +0 -37
  709. mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
  710. mindspore/ops/_op_impl/tbe/cosh.py +0 -37
  711. mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
  712. mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
  713. mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
  714. mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
  715. mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
  716. mindspore/ops/_op_impl/tbe/cummin.py +0 -41
  717. mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
  718. mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
  719. mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
  720. mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
  721. mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
  722. mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
  723. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
  724. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
  725. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
  726. mindspore/ops/_op_impl/tbe/diag.py +0 -38
  727. mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
  728. mindspore/ops/_op_impl/tbe/dilation.py +0 -40
  729. mindspore/ops/_op_impl/tbe/div.py +0 -41
  730. mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
  731. mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
  732. mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
  733. mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
  734. mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
  735. mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
  736. mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
  737. mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
  738. mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
  739. mindspore/ops/_op_impl/tbe/elu.py +0 -38
  740. mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
  741. mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
  742. mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
  743. mindspore/ops/_op_impl/tbe/equal.py +0 -42
  744. mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
  745. mindspore/ops/_op_impl/tbe/erf.py +0 -37
  746. mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
  747. mindspore/ops/_op_impl/tbe/erfc.py +0 -37
  748. mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
  749. mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
  750. mindspore/ops/_op_impl/tbe/exp.py +0 -40
  751. mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
  752. mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
  753. mindspore/ops/_op_impl/tbe/expm1.py +0 -37
  754. mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
  755. mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
  756. mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
  757. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
  758. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
  759. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
  760. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
  761. mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
  762. mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
  763. mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
  764. mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
  765. mindspore/ops/_op_impl/tbe/fill.py +0 -56
  766. mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
  767. mindspore/ops/_op_impl/tbe/flatten.py +0 -48
  768. mindspore/ops/_op_impl/tbe/floor.py +0 -37
  769. mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
  770. mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
  771. mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
  772. mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
  773. mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
  774. mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
  775. mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
  776. mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
  777. mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
  778. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
  779. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
  780. mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
  781. mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
  782. mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
  783. mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
  784. mindspore/ops/_op_impl/tbe/gelu.py +0 -37
  785. mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
  786. mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
  787. mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
  788. mindspore/ops/_op_impl/tbe/ger.py +0 -43
  789. mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
  790. mindspore/ops/_op_impl/tbe/greater.py +0 -43
  791. mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
  792. mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
  793. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
  794. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
  795. mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
  796. mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
  797. mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
  798. mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
  799. mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
  800. mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
  801. mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
  802. mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
  803. mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
  804. mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
  805. mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
  806. mindspore/ops/_op_impl/tbe/im2col.py +0 -42
  807. mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
  808. mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
  809. mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
  810. mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
  811. mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
  812. mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
  813. mindspore/ops/_op_impl/tbe/inv.py +0 -38
  814. mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
  815. mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
  816. mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
  817. mindspore/ops/_op_impl/tbe/invert.py +0 -37
  818. mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
  819. mindspore/ops/_op_impl/tbe/iou.py +0 -38
  820. mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
  821. mindspore/ops/_op_impl/tbe/is_close.py +0 -40
  822. mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
  823. mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
  824. mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
  825. mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
  826. mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
  827. mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
  828. mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
  829. mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
  830. mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
  831. mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
  832. mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
  833. mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
  834. mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
  835. mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
  836. mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
  837. mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
  838. mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
  839. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
  840. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
  841. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
  842. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
  843. mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
  844. mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
  845. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
  846. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
  847. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
  848. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
  849. mindspore/ops/_op_impl/tbe/lerp.py +0 -38
  850. mindspore/ops/_op_impl/tbe/less.py +0 -41
  851. mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
  852. mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
  853. mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
  854. mindspore/ops/_op_impl/tbe/log.py +0 -40
  855. mindspore/ops/_op_impl/tbe/log1p.py +0 -37
  856. mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
  857. mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
  858. mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
  859. mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
  860. mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
  861. mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
  862. mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
  863. mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
  864. mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
  865. mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
  866. mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
  867. mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
  868. mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
  869. mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
  870. mindspore/ops/_op_impl/tbe/lrn.py +0 -41
  871. mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
  872. mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
  873. mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
  874. mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
  875. mindspore/ops/_op_impl/tbe/matmul.py +0 -53
  876. mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
  877. mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
  878. mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
  879. mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
  880. mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
  881. mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
  882. mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
  883. mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
  884. mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
  885. mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
  886. mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
  887. mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
  888. mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
  889. mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
  890. mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
  891. mindspore/ops/_op_impl/tbe/maximum.py +0 -39
  892. mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
  893. mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
  894. mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
  895. mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
  896. mindspore/ops/_op_impl/tbe/minimum.py +0 -40
  897. mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
  898. mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
  899. mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
  900. mindspore/ops/_op_impl/tbe/mish.py +0 -37
  901. mindspore/ops/_op_impl/tbe/mod.py +0 -41
  902. mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
  903. mindspore/ops/_op_impl/tbe/mul.py +0 -37
  904. mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
  905. mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
  906. mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
  907. mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
  908. mindspore/ops/_op_impl/tbe/neg.py +0 -39
  909. mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
  910. mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
  911. mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
  912. mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
  913. mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
  914. mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
  915. mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
  916. mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
  917. mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
  918. mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
  919. mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
  920. mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
  921. mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
  922. mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
  923. mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
  924. mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
  925. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
  926. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
  927. mindspore/ops/_op_impl/tbe/pack.py +0 -58
  928. mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
  929. mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
  930. mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
  931. mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
  932. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
  933. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
  934. mindspore/ops/_op_impl/tbe/pdist.py +0 -36
  935. mindspore/ops/_op_impl/tbe/pooling.py +0 -46
  936. mindspore/ops/_op_impl/tbe/population_count.py +0 -38
  937. mindspore/ops/_op_impl/tbe/pow.py +0 -41
  938. mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
  939. mindspore/ops/_op_impl/tbe/prelu.py +0 -37
  940. mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
  941. mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
  942. mindspore/ops/_op_impl/tbe/range.py +0 -39
  943. mindspore/ops/_op_impl/tbe/real_div.py +0 -38
  944. mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
  945. mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
  946. mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
  947. mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
  948. mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
  949. mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
  950. mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
  951. mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
  952. mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
  953. mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
  954. mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
  955. mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
  956. mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
  957. mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
  958. mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
  959. mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
  960. mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
  961. mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
  962. mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
  963. mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
  964. mindspore/ops/_op_impl/tbe/relu.py +0 -39
  965. mindspore/ops/_op_impl/tbe/relu6.py +0 -38
  966. mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
  967. mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
  968. mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
  969. mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
  970. mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
  971. mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
  972. mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
  973. mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
  974. mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
  975. mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
  976. mindspore/ops/_op_impl/tbe/renorm.py +0 -39
  977. mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
  978. mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
  979. mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
  980. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
  981. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
  982. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
  983. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
  984. mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
  985. mindspore/ops/_op_impl/tbe/rint.py +0 -37
  986. mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
  987. mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
  988. mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
  989. mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
  990. mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
  991. mindspore/ops/_op_impl/tbe/roll.py +0 -42
  992. mindspore/ops/_op_impl/tbe/round.py +0 -38
  993. mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
  994. mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
  995. mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
  996. mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
  997. mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
  998. mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
  999. mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
  1000. mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
  1001. mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
  1002. mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
  1003. mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
  1004. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
  1005. mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
  1006. mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
  1007. mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
  1008. mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
  1009. mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
  1010. mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
  1011. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
  1012. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
  1013. mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
  1014. mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
  1015. mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
  1016. mindspore/ops/_op_impl/tbe/select.py +0 -38
  1017. mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
  1018. mindspore/ops/_op_impl/tbe/selu.py +0 -39
  1019. mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
  1020. mindspore/ops/_op_impl/tbe/sgd.py +0 -62
  1021. mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
  1022. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
  1023. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
  1024. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
  1025. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
  1026. mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
  1027. mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
  1028. mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
  1029. mindspore/ops/_op_impl/tbe/sign.py +0 -38
  1030. mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
  1031. mindspore/ops/_op_impl/tbe/sin.py +0 -37
  1032. mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
  1033. mindspore/ops/_op_impl/tbe/sinh.py +0 -37
  1034. mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
  1035. mindspore/ops/_op_impl/tbe/slice.py +0 -58
  1036. mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
  1037. mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
  1038. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
  1039. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
  1040. mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
  1041. mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
  1042. mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
  1043. mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
  1044. mindspore/ops/_op_impl/tbe/softmax.py +0 -37
  1045. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
  1046. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
  1047. mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
  1048. mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
  1049. mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
  1050. mindspore/ops/_op_impl/tbe/softplus.py +0 -37
  1051. mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
  1052. mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
  1053. mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
  1054. mindspore/ops/_op_impl/tbe/softsign.py +0 -37
  1055. mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
  1056. mindspore/ops/_op_impl/tbe/sort.py +0 -38
  1057. mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
  1058. mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
  1059. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
  1060. mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
  1061. mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
  1062. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
  1063. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
  1064. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
  1065. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
  1066. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
  1067. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
  1068. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
  1069. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
  1070. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
  1071. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
  1072. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
  1073. mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
  1074. mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
  1075. mindspore/ops/_op_impl/tbe/split_d.py +0 -38
  1076. mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
  1077. mindspore/ops/_op_impl/tbe/split_v.py +0 -39
  1078. mindspore/ops/_op_impl/tbe/splitv.py +0 -39
  1079. mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
  1080. mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
  1081. mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
  1082. mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
  1083. mindspore/ops/_op_impl/tbe/square.py +0 -38
  1084. mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
  1085. mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
  1086. mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
  1087. mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
  1088. mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
  1089. mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
  1090. mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
  1091. mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
  1092. mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
  1093. mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
  1094. mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
  1095. mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
  1096. mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
  1097. mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
  1098. mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
  1099. mindspore/ops/_op_impl/tbe/sub.py +0 -39
  1100. mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
  1101. mindspore/ops/_op_impl/tbe/tan.py +0 -38
  1102. mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
  1103. mindspore/ops/_op_impl/tbe/tanh.py +0 -37
  1104. mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
  1105. mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
  1106. mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
  1107. mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
  1108. mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
  1109. mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
  1110. mindspore/ops/_op_impl/tbe/tile.py +0 -37
  1111. mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
  1112. mindspore/ops/_op_impl/tbe/top_k.py +0 -42
  1113. mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
  1114. mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
  1115. mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
  1116. mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
  1117. mindspore/ops/_op_impl/tbe/transpose.py +0 -60
  1118. mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
  1119. mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
  1120. mindspore/ops/_op_impl/tbe/trunc.py +0 -39
  1121. mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
  1122. mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
  1123. mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
  1124. mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
  1125. mindspore/ops/_op_impl/tbe/unpack.py +0 -38
  1126. mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
  1127. mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
  1128. mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
  1129. mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
  1130. mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
  1131. mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
  1132. mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
  1133. mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
  1134. mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
  1135. mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
  1136. mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
  1137. mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
  1138. mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
  1139. mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
  1140. mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
  1141. mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
  1142. mindspore/ops/_tracefunc.py +0 -241
  1143. mindspore/ops/arg_dtype_cast.py +0 -54
  1144. mindspore/rewrite/api/tree_node_helper.py +0 -60
  1145. mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
  1146. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
  1147. mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
  1148. mindspore/rewrite/namespace.py +0 -53
  1149. mindspore-2.2.11.dist-info/RECORD +0 -1920
  1150. {mindspore-2.2.11.dist-info → mindspore-2.3.0.dist-info}/WHEEL +0 -0
  1151. {mindspore-2.2.11.dist-info → mindspore-2.3.0.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,4 @@
1
- # Copyright 2022 Huawei Technologies Co., Ltd
1
+ # Copyright 2022-2023 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -24,6 +24,7 @@ import numpy as np
24
24
  import mindspore as ms
25
25
  import mindspore.common.dtype as mstype
26
26
  from mindspore.ops import operations as P
27
+ from mindspore.ops import functional as F
27
28
  from mindspore.ops.primitive import constexpr
28
29
  from mindspore.ops.primitive import _primexpr
29
30
  import mindspore.ops as ops
@@ -31,18 +32,19 @@ from mindspore.ops.operations._inner_ops import DynamicBroadcastTo
31
32
  from mindspore.ops.operations._sequence_ops import TupleToTensor
32
33
  from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
33
34
  from mindspore.ops.operations._sequence_ops import TensorToList
35
+ from mindspore.ops.auto_generate import OnesLikeExt, ZerosLikeExt, FillScalar, FillTensor, Arange, Chunk, UniqueDim,\
36
+ Unique2, SortExt, NonZero, NonZeroExt
37
+ from mindspore.ops.auto_generate.gen_ops_prim import SplitTensor
38
+ from mindspore.ops.auto_generate.gen_ops_prim import SplitWithSize, RepeatInterleaveInt, RepeatInterleaveTensor
34
39
 
35
40
  from mindspore.ops.operations.array_ops import (
36
41
  UniqueConsecutive,
37
42
  SearchSorted,
38
- NonZero,
39
43
  MatrixDiagV3,
40
44
  MatrixDiagPartV3,
41
45
  MatrixSetDiagV3,
42
46
  Fills,
43
47
  Col2Im,
44
- ArgMaxWithValue,
45
- ArgMinWithValue,
46
48
  ScatterNdMax,
47
49
  ScatterNdMul,
48
50
  IndexFill,
@@ -52,7 +54,9 @@ from mindspore.ops.operations.array_ops import (
52
54
  Lstsq,
53
55
  Mvlgamma,
54
56
  Tril,
55
- Argmax
57
+ Argmax,
58
+ ArgMaxWithValue,
59
+ ArgMinWithValue
56
60
  )
57
61
  from mindspore.ops.operations.array_ops import TensorScatterElements
58
62
  from mindspore.common import Tensor
@@ -61,53 +65,83 @@ from mindspore import _checkparam as validator
61
65
  from mindspore._c_expression import Tensor as Tensor_
62
66
  from mindspore.ops._utils.utils import ms_arrange
63
67
 
64
- tuple_to_tensor_ = TupleToTensor()
68
+ from mindspore.ops.auto_generate import cat, range, scatter_nd, deepcopy, masked_fill, diagonal, expand_dims, \
69
+ flip, transpose, triu, unsorted_segment_sum, diag, gather, gather_d, gather_nd, reshape, \
70
+ broadcast_to, strided_slice, ones, zeros, max_, min_, select
71
+ from mindspore.ops.auto_generate.gen_ops_prim import scatter_add_ext_op, slice_ext_op
72
+ from mindspore.ops.operations.manually_defined import tile, rank, scalar_cast
73
+
74
+ arg_max_with_value_ = ArgMaxWithValue()
75
+ arg_min_with_value_ = ArgMinWithValue()
76
+ batch_to_space_nd_v2_ = P.BatchToSpaceNDV2()
77
+ cast_ = P.Cast()
78
+ diag_ = P.Diag()
79
+ dynamic_broadcast_to_ = DynamicBroadcastTo()
65
80
  eye_ = P.Eye()
66
81
  fills_ = Fills()
82
+ fillv2_ = P.FillV2()
83
+ flatten_ = P.Flatten()
84
+ gather_ = P.Gather()
85
+ gather_d_ = P.GatherD()
86
+ gather_nd_ = P.GatherNd()
87
+ ger_ = P.Ger()
88
+ index_fill_ = IndexFill()
89
+ lstsq_ = Lstsq()
90
+ masked_select_ = P.MaskedSelect()
91
+ matrix_band_part_ = P.array_ops.MatrixBandPart()
67
92
  ones_ = P.Ones()
68
- ones_like_ = P.OnesLike()
69
- tile_ = P.Tile()
70
- unique_with_pad_ = P.UniqueWithPad()
71
- size_ = P.Size()
72
- shape_ = P.Shape()
93
+ population_count_ = P.PopulationCount()
94
+ range_ = P.Range()
73
95
  rank_ = P.Rank()
74
- tensor_shape_ = P.TensorShape()
96
+ reduce_max_ = P.ReduceMax()
97
+ reduce_min_ = P.ReduceMin()
75
98
  reshape_ = P.Reshape()
76
- tensor_slice = P.Slice()
77
- expand_dims_ = P.ExpandDims()
78
- transpose_ = P.Transpose()
99
+ scalar_to_tensor_ = P.ScalarToTensor()
79
100
  scatter_add_ = P.ScatterAdd()
101
+ scatter_div_ = P.ScatterDiv()
80
102
  scatter_max_ = P.ScatterMax()
81
103
  scatter_min_ = P.ScatterMin()
82
104
  scatter_mul_ = P.ScatterMul()
83
- scatter_div_ = P.ScatterDiv()
84
105
  scatter_nd_ = P.ScatterNd()
85
- gather_ = P.Gather()
86
- gather_d_ = P.GatherD()
87
- gather_nd_ = P.GatherNd()
88
- nonzero_ = NonZero()
89
- scalar_cast_ = P.ScalarCast()
106
+ scatter_update_ = P.ScatterUpdate()
107
+ shape_ = P.Shape()
108
+ split_tensor = SplitTensor()
109
+ split_with_size = SplitWithSize()
110
+ size_ = P.Size()
90
111
  tensor_scatter_add_ = P.TensorScatterAdd()
91
- tensor_scatter_sub_ = P.TensorScatterSub()
92
- tensor_scatter_mul_ = P.TensorScatterMul()
93
112
  tensor_scatter_div_ = P.TensorScatterDiv()
94
- tensor_scatter_min_ = P.TensorScatterMin()
95
113
  tensor_scatter_max_ = P.TensorScatterMax()
96
- scalar_to_tensor_ = P.ScalarToTensor()
97
- tuple_to_array_ = P.TupleToArray()
98
- masked_select_ = P.MaskedSelect()
99
- matrix_band_part_ = P.array_ops.MatrixBandPart()
100
- ger_ = P.Ger()
101
- diag_ = P.Diag()
102
- range_ = P.Range()
103
- zeros_like_ = P.ZerosLike()
104
- cast_ = P.Cast()
114
+ tensor_scatter_min_ = P.TensorScatterMin()
115
+ tensor_scatter_mul_ = P.TensorScatterMul()
116
+ tensor_scatter_sub_ = P.TensorScatterSub()
105
117
  tensor_select_ = P.Select()
106
- index_fill_ = IndexFill()
118
+ tensor_shape_ = P.TensorShape()
119
+ tensor_slice = P.Slice()
120
+ tile_ = P.Tile()
121
+ transpose_ = P.Transpose()
122
+ tuple_to_array_ = P.TupleToArray()
123
+ tuple_to_tensor_ = TupleToTensor()
124
+ unique_ = P.Unique()
125
+ unique_with_pad_ = P.UniqueWithPad()
126
+ unsorted_segment_max_ = P.UnsortedSegmentMax()
127
+ unsorted_segment_min_ = P.UnsortedSegmentMin()
128
+ unsorted_segment_prod_ = P.UnsortedSegmentProd()
107
129
  unsorted_segment_sum_ = P.UnsortedSegmentSum()
108
- population_count_ = P.PopulationCount()
109
- reduce_max = P.ReduceMax()
110
- reduce_min = P.ReduceMin()
130
+ ones_like_ = P.OnesLike()
131
+ zeros_like_ = P.ZerosLike()
132
+ ones_like_ext_ = OnesLikeExt()
133
+ zeros_like_ext_ = ZerosLikeExt()
134
+ fill_scalar_ = FillScalar()
135
+ fill_tensor_ = FillTensor()
136
+ sort_ext_ = SortExt()
137
+ arange_ = Arange()
138
+ chunk_ = Chunk()
139
+ repeat_interleave_int_ = RepeatInterleaveInt()
140
+ repeat_interleave_tensor_ = RepeatInterleaveTensor()
141
+ unique_dim_ = UniqueDim()
142
+ unique2_ = Unique2()
143
+ non_zero_ = NonZero()
144
+ non_zero_ext_ = NonZeroExt()
111
145
 
112
146
 
113
147
  @_primexpr
@@ -187,8 +221,11 @@ def arange(start=0, end=None, step=1, *, dtype=None):
187
221
 
188
222
  Keyword Args:
189
223
  dtype (mindspore.dtype, optional): The required data type of returned Tensor. Default: ``None`` .
190
- If the value is not specified or is ``None`` , the type with the highest precision in the
191
- `start`, `end`, and `step` parameters is inferred.
224
+ When `dtype` is not specified or ``None``:
225
+
226
+ If `start`, `end`, and `step` are all integers, the dtype of output is int64,
227
+
228
+ If `start`, `end`, and `step` contain at least one floating-point number, the dtype of output is float32.
192
229
 
193
230
  Returns:
194
231
  A 1-D Tensor, with the same type as the inputs.
@@ -225,7 +262,7 @@ def arange(start=0, end=None, step=1, *, dtype=None):
225
262
  >>> print(output)
226
263
  [12. 11. 10. 9. 8. 7. 6. 5. 4. 3.]
227
264
  >>> print(output.dtype)
228
- Float64
265
+ Float32
229
266
  """
230
267
  if end is None:
231
268
  start, end = 0, start
@@ -237,67 +274,84 @@ def arange(start=0, end=None, step=1, *, dtype=None):
237
274
  if start.shape != () or end.shape != () or step.shape != ():
238
275
  raise ValueError(f"For arange, the input args must be a TensorScalar,"
239
276
  f" but got start shape:{start.shape}, end shape:{end.shape}, step shape:{step.shape}")
240
- range_op = _get_cache_prim(P.Range)()
241
- data = range_op(start, end, step)
277
+ data = range_(start, end, step)
242
278
  if dtype is not None:
243
279
  data = cast_(data, dtype)
244
280
  return data
245
281
 
246
282
 
247
- def cat(tensors, axis=0):
283
+ def arange_ext(start=0, end=None, step=1, *, dtype=None):
248
284
  r"""
249
- Connect input tensors along with the given axis.
285
+ Creates a sequence of numbers that begins at `start` and extends by increments of
286
+ `step` up to but not including `end`.
250
287
 
251
- The input data is a tuple or a list of tensors. These tensors have the same rank :math:`R`.
252
- Set the given axis as :math:`m`, and :math:`0 \le m < R`. Set the number of input tensors as :math:`N`.
253
- For the :math:`i`-th tensor :math:`t_i`, it has the shape of :math:`(x_1, x_2, ..., x_{mi}, ..., x_R)`.
254
- :math:`x_{mi}` is the :math:`m`-th dimension of the :math:`t_i`. Then, the shape of the output tensor is
288
+ Args:
289
+ start (Union[float, int], optional): The start of the interval. Default: ``0`` .
290
+ end (Union[float, int], optional): The end of the interval, exclusive.
291
+ Default: ``None`` . If ``None`` , it defaults to the value of `start`, and 0 is used as the starting value.
292
+ step (Union[float, int], optional): The step size with which the array element increments. Default: ``1`` .
255
293
 
256
- .. math::
294
+ Keyword Args:
295
+ dtype (mindspore.dtype, optional): The required data type of returned Tensor. Default: ``None`` .
296
+ When `dtype` is not specified or ``None``:
257
297
 
258
- (x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)
298
+ If `start`, `end`, and `step` are all integers, the dtype of output is int64,
259
299
 
260
- Args:
261
- tensors (Union[tuple, list]): A tuple or a list of input tensors.
262
- Suppose there are two tensors in this tuple or list, namely t1 and t2.
263
- To perform `concat` in the axis 0 direction, except for the :math:`0`-th axis,
264
- all other dimensions should be equal, that is,
265
- :math:`t1.shape[1] = t2.shape[1], t1.shape[2] = t2.shape[2], ..., t1.shape[R-1] = t2.shape[R-1]`,
266
- where :math:`R` represents the rank of tensor.
267
- axis (int): The specified axis, whose value is in range :math:`[-R, R)`. Default: ``0`` .
300
+ If `start`, `end`, and `step` contain at least one floating-point number, the dtype of output is float32.
268
301
 
269
302
  Returns:
270
- Tensor, the shape is :math:`(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)`.
271
- The data type is the same with `tensors`.
303
+ A 1-D Tensor, cast to `dtype` if provided, may potentially lose precision due to casting.
272
304
 
273
305
  Raises:
274
- TypeError: If `axis` is not an int.
275
- ValueError: If `tensors` have different dimension of tensor.
276
- ValueError: If `axis` not in range :math:`[-R, R)`.
277
- RuntimeError: If tensor's shape in `tensors` except for `axis` are different.
306
+ TypeError: If `start`, `end` or `step` are not of type int or float.
307
+ ValueError: If `step` = 0.
308
+ ValueError: If `start` >= `end` when `step` > 0.
309
+ ValueError: If `start` <= `end` when `step` < 0.
278
310
 
279
311
  Supported Platforms:
280
- ``Ascend`` ``GPU`` ``CPU``
312
+ ``Ascend``
281
313
 
282
314
  Examples:
283
- >>> import mindspore
284
- >>> import numpy as np
315
+ >>> import mindspore as ms
285
316
  >>> from mindspore import Tensor, ops
286
- >>> input_x1 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
287
- >>> input_x2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
288
- >>> output = ops.cat((input_x1, input_x2))
317
+ >>> output = ops.arange_ext(1, 6)
289
318
  >>> print(output)
290
- [[0. 1.]
291
- [2. 1.]
292
- [0. 1.]
293
- [2. 1.]]
294
- >>> output = ops.cat((input_x1, input_x2), 1)
319
+ [1 2 3 4 5]
320
+ >>> print(output.dtype)
321
+ Int64
322
+ >>> output = ops.arange_ext(0, 3, 1.2)
323
+ >>> print(output)
324
+ [0. 1.2 2.4]
325
+ >>> print(output.dtype)
326
+ Float32
327
+ >>> output = ops.arange_ext(7, 1, -2)
295
328
  >>> print(output)
296
- [[0. 1. 0. 1.]
297
- [2. 1. 2. 1.]]
329
+ [7 5 3]
330
+ >>> print(output.dtype)
331
+ Int64
332
+ >>> output = ops.arange_ext(12, 2, -1, dtype=ms.bfloat16))
333
+ >>> print(output)
334
+ [12. 11. 10. 9. 8. 7. 6. 5. 4. 3.]
335
+ >>> print(output.dtype)
336
+ BFloat16
337
+ """
338
+ if end is None:
339
+ start, end = 0, start
340
+ return arange_(start, end, step, dtype)
341
+
342
+
343
+ def concat(tensors, axis=0):
344
+ """
345
+ Alias for :func:`mindspore.ops.cat()`.
346
+
347
+ Tutorial Examples:
348
+ - `Tensor - Tensor Operation <https://mindspore.cn/tutorials/en/master/beginner/tensor.html#tensor-operation>`_
349
+ - `Vision Transformer Image Classification - Building ViT as a whole
350
+ <https://mindspore.cn/tutorials/application/en/master/cv/vit.html#building-vit-as-a-whole>`_
351
+ - `Sentiment Classification Implemented by RNN - Dense
352
+ <https://mindspore.cn/tutorials/application/en/master/nlp/sentiment_analysis.html#dense>`_
298
353
  """
299
- _concat = _get_cache_prim(P.Concat)(axis)
300
- return _concat(tensors)
354
+ return cat(tensors, axis)
301
355
 
302
356
 
303
357
  def eye(n, m=None, dtype=None):
@@ -305,14 +359,14 @@ def eye(n, m=None, dtype=None):
305
359
  Creates a tensor with ones on the diagonal and zeros in the rest.
306
360
 
307
361
  Note:
308
- Combines ReverseV2 operator to get an anti-diagonal Tensor,
309
- but ReverseV2 only supports Ascend and GPU platforms currently.
362
+ The data type of returned tensor can be float16, float32, int8, int16, int32, int64, uint8
363
+ or bool on Ascend platforms.
310
364
 
311
365
  Args:
312
366
  n (int): The number of rows of returned tensor. Constant value only.
313
- m (int): The number of columns of returned tensor. Constant value only.
367
+ m (int, optional): The number of columns of returned tensor. Constant value only.
314
368
  Default: ``None`` , if ``None`` , the number of columns is as the same as n.
315
- dtype (mindspore.dtype): MindSpore's dtype, the data type of the returned tensor.
369
+ dtype (mindspore.dtype, optional): MindSpore's dtype, the data type of the returned tensor.
316
370
  The data type can be bool or Number.
317
371
  Default: ``None`` , the data type of the returned tensor is mindspore.float32.
318
372
 
@@ -336,11 +390,11 @@ def eye(n, m=None, dtype=None):
336
390
  [0 1]]
337
391
  >>> print(output.dtype)
338
392
  Int32
339
- >>> output = ops.eye(1, 2, mindspore.float64)
393
+ >>> output = ops.eye(1, 2, mindspore.float32)
340
394
  >>> print(output)
341
395
  [[1. 0.]]
342
396
  >>> print(output.dtype)
343
- Float64
397
+ Float32
344
398
  >>> output = ops.eye(2, dtype=mindspore.int32)
345
399
  >>> print(output)
346
400
  [[1 0]
@@ -419,25 +473,25 @@ def hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype
419
473
  return out
420
474
 
421
475
 
422
- def where(condition, x, y):
476
+ def where(condition, input, other):
423
477
  r"""
424
- Selects elements from `x` or `y` based on `condition` and returns a tensor.
478
+ Selects elements from `input` or `other` based on `condition` and returns a tensor.
425
479
 
426
480
  .. math::
427
- output_i = \begin{cases} x_i,\quad &if\ condition_i \\ y_i,\quad &otherwise \end{cases}
481
+ output_i = \begin{cases} input_i,\quad &if\ condition_i \\ other_i,\quad &otherwise \end{cases}
428
482
 
429
483
  Args:
430
- condition (Tensor[bool]): If True, yield `x`, otherwise yield `y`.
431
- x (Union[Tensor, Scalar]): When `condition` is True, values to select from.
432
- y (Union[Tensor, Scalar]): When `condition` is False, values to select from.
484
+ condition (Tensor[bool]): If True, yield `input`, otherwise yield `other`.
485
+ input (Union[Tensor, Scalar]): When `condition` is True, values to select from.
486
+ other (Union[Tensor, Scalar]): When `condition` is False, values to select from.
433
487
 
434
488
  Returns:
435
- Tensor, elements are selected from `x` and `y`.
489
+ Tensor, elements are selected from `input` and `other`.
436
490
 
437
491
  Raises:
438
492
  TypeError: If `condition` is not a Tensor.
439
- TypeError: If both `x` and `y` are scalars.
440
- ValueError: If `condition`, `x` and `y` can not broadcast to each other.
493
+ TypeError: If both `input` and `other` are scalars.
494
+ ValueError: If `condition`, `input` and `other` can not broadcast to each other.
441
495
 
442
496
  Supported Platforms:
443
497
  ``Ascend`` ``GPU`` ``CPU``
@@ -454,66 +508,15 @@ def where(condition, x, y):
454
508
  [[0. 1.]
455
509
  [2. 1.]]
456
510
  """
457
- if not isinstance(condition, Tensor):
458
- raise TypeError(f"For 'where', 'condition' must be a Tensor, but got {type(condition)}.")
459
- if isinstance(x, (int, float)):
460
- if not isinstance(y, Tensor):
461
- raise TypeError(
462
- f"For 'where', at least one of 'x' and 'y' should be Tensor, but got x:{type(x)}, y:{type(y)}."
463
- )
464
- x = cast_(x, y.dtype)
465
- elif isinstance(y, (int, float)):
466
- if not isinstance(x, Tensor):
467
- raise TypeError(
468
- f"For 'where', at least one of 'x' and 'y' should be Tensor, but got x:{type(x)}, y:{type(y)}."
469
- )
470
- y = cast_(y, x.dtype)
471
- output_shape = _calc_broadcast_shape(x.shape, y.shape, condition.shape)
472
- condition = broadcast_to(condition, output_shape)
473
- x = broadcast_to(x, output_shape)
474
- y = broadcast_to(y, output_shape)
475
- _select = P.Select()
476
- return _select(condition, x, y)
511
+ return tensor_select_(condition, input, other)
477
512
 
478
513
 
479
514
  def reverse(x, axis):
480
515
  """
481
- Reverses specific dimensions of a tensor.
482
-
483
- .. warning::
484
- The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "input_x".
485
-
486
- Args:
487
- x (Tensor): The target tensor.
488
- The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
489
- axis (Union[tuple(int), list(int)]): The indices of the dimensions to reverse.
490
-
491
- Outputs:
492
- Tensor, has the same shape and type as `x`.
493
-
494
- Raises:
495
- TypeError: If `axis` is neither list nor tuple.
496
- TypeError: If element of `axis` is not an int.
497
-
498
- Supported Platforms:
499
- ``Ascend`` ``GPU`` ``CPU``
500
-
501
- Examples:
502
- >>> import mindspore
503
- >>> import numpy as np
504
- >>> from mindspore import Tensor, ops
505
- >>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
506
- >>> output = ops.reverse(input_x, axis=[1])
507
- >>> print(output)
508
- [[4 3 2 1]
509
- [8 7 6 5]]
510
- >>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
511
- >>> output = ops.reverse(input_x, axis=[1, 0])
512
- >>> print(output)
513
- [[8 7 6 5]
514
- [4 3 2 1]]
516
+ :func:`mindspore.ops.reverse` will be deprecated in the future.
517
+ Please use :func:`mindspore.ops.flip` instead.
515
518
  """
516
- return P.ReverseV2(axis)(x)
519
+ return flip(x, axis)
517
520
 
518
521
 
519
522
  def ravel(input):
@@ -659,8 +662,9 @@ def one_hot(indices, depth, on_value=1, off_value=0, axis=-1):
659
662
  other locations take value `off_value`.
660
663
 
661
664
  Note:
662
- If the input indices is rank `N`, the output will have rank `N+1`. The new axis is created at dimension `axis`.
663
- On Ascend, if `on_value` is Int64 dtype, `indices` must be Int64 dtype.
665
+ If the input `indices` has rank `N`, the output will have rank `N+1`.
666
+ The new axis is created at dimension `axis`. On Ascend, if `on_value` is int64 dtype, `indices` must be
667
+ int64 dtype, and the value for `on_value` and `off_value` can only be 1 and 0.
664
668
 
665
669
  Args:
666
670
  indices(Tensor): A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
@@ -682,6 +686,7 @@ def one_hot(indices, depth, on_value=1, off_value=0, axis=-1):
682
686
  Raises:
683
687
  TypeError: If `axis` or `depth` is not an int.
684
688
  TypeError: If dtype of `indices` is not int32 or int64.
689
+ TypeError: If dtype of `on_value` is not int32, int64, float16 or float32.
685
690
  TypeError: If `indices`, `on_value` or `off_value` is not a Tensor.
686
691
  ValueError: If `axis` is not in range [-1, ndim].
687
692
  ValueError: If `depth` is less than 0.
@@ -715,8 +720,8 @@ def fill(type, shape, value): # pylint: disable=redefined-outer-name
715
720
 
716
721
  Args:
717
722
  type (mindspore.dtype): The specified type of output tensor. The data type only supports
718
- `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ and
719
- `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ .
723
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ and
724
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ .
720
725
  shape (Union(Tensor, tuple[int])): The specified shape of output tensor.
721
726
  value (Union(Tensor, number.Number, bool)): Value to fill the returned tensor.
722
727
 
@@ -743,7 +748,7 @@ def fill(type, shape, value): # pylint: disable=redefined-outer-name
743
748
  [0. 0. 0.]]
744
749
  """
745
750
  value = cast_(value, type)
746
- return _get_cache_prim(P.FillV2)()(shape, value)
751
+ return fillv2_(shape, value)
747
752
 
748
753
 
749
754
  def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-name
@@ -791,6 +796,45 @@ def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-na
791
796
  return ops.fill(dtype, size, fill_value)
792
797
 
793
798
 
799
+ def full_ext(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-name
800
+ """
801
+ Create a Tensor of the specified shape and fill it with the specified value.
802
+
803
+ Args:
804
+ size (Union(tuple[int], list[int])): The specified shape of output tensor.
805
+ fill_value (number.Number): Value to fill the returned tensor. Complex numbers are not supported for now.
806
+
807
+ Keyword Args:
808
+ dtype (mindspore.dtype): The specified type of output tensor. `bool_` and `number` are supported, for details,
809
+ please refer to :class:`mindspore.dtype` . Default: ``None`` .
810
+
811
+ Returns:
812
+ Tensor.
813
+
814
+ Raises:
815
+ TypeError: If `size` is not a tuple or list.
816
+ ValueError: The element in `size` is less than 0.
817
+
818
+ Supported Platforms:
819
+ ``Ascend`` ``GPU`` ``CPU``
820
+
821
+ Examples:
822
+ >>> from mindspore import ops
823
+ >>> output = ops.full((2, 2), 1)
824
+ >>> print(output)
825
+ [[1. 1.]
826
+ [1. 1.]]
827
+ >>> output = ops.full((3, 3), 0)
828
+ >>> print(output)
829
+ [[0. 0. 0.]
830
+ [0. 0. 0.]
831
+ [0. 0. 0.]]
832
+ """
833
+ if isinstance(fill_value, Tensor):
834
+ return fill_tensor_(size, fill_value, dtype)
835
+ return fill_scalar_(size, fill_value, dtype)
836
+
837
+
794
838
  def full_like(input, fill_value, *, dtype=None):
795
839
  """
796
840
  Return a Tensor of the same shape as `input` and filled with `fill_value`.
@@ -883,24 +927,63 @@ def chunk(input, chunks, axis=0):
883
927
  length_along_dim = arr_shape[arr_axis]
884
928
 
885
929
  if chunks > length_along_dim:
886
- res = P.Split(arr_axis, length_along_dim)(input)
930
+ res = _get_cache_prim(P.Split)(arr_axis, length_along_dim)(input)
887
931
  elif length_along_dim % chunks == 0:
888
- res = P.Split(arr_axis, chunks)(input)
932
+ res = _get_cache_prim(P.Split)(arr_axis, chunks)(input)
889
933
  else:
890
934
  block_size = int(np.ceil(length_along_dim / chunks))
891
935
  true_chunks = int(length_along_dim // block_size)
892
936
  length1 = true_chunks * block_size
893
937
  length2 = length_along_dim - length1
894
- start1 = _list_comprehensions(rank(input), 0, True)
938
+ start1 = _list_comprehensions(rank_(input), 0, True)
895
939
  size1 = _tuple_setitem(arr_shape, arr_axis, length1)
896
940
  start2 = _tuple_setitem(start1, arr_axis, length1)
897
941
  size2 = _tuple_setitem(arr_shape, arr_axis, length2)
898
- res = P.Split(arr_axis, true_chunks)(tensor_slice(input, start1, size1))
942
+ res = _get_cache_prim(P.Split)(arr_axis, true_chunks)(tensor_slice(input, start1, size1))
899
943
  if length2:
900
- res += P.Split(arr_axis, 1)(tensor_slice(input, start2, size2))
944
+ res += _get_cache_prim(P.Split)(arr_axis, 1)(tensor_slice(input, start2, size2))
901
945
  return res
902
946
 
903
947
 
948
+ def chunk_ext(input, chunks, dim=0):
949
+ """
950
+ Cut the input Tensor into `chunks` sub-tensors along the specified axis.
951
+
952
+ Note:
953
+ This function may return less than the specified number of chunks!
954
+
955
+ Args:
956
+ input (Tensor): A Tensor to be cut.
957
+ chunks (int): Number of sub-tensors to cut.
958
+ dim (int, optional): Specify the dimensions that you want to split. Default: ``0`` .
959
+
960
+ Returns:
961
+ A tuple of sub-tensors.
962
+
963
+ Raises:
964
+ TypeError: If argument `input` is not Tensor.
965
+ TypeError: The sum of `chunks` is not int.
966
+ TypeError: If argument `dim` is not int.
967
+ ValueError: If argument `dim` is out of range of :math:`[-input.ndim, input.ndim)` .
968
+ ValueError: If argument `chunks` is not positive number.
969
+
970
+ Supported Platforms:
971
+ ``Ascend``
972
+
973
+ Examples:
974
+ >>> import numpy as np
975
+ >>> import mindspore
976
+ >>> from mindspore import Tensor
977
+ >>> input_x = np.arange(9).astype("float32")
978
+ >>> output = mindspore.mint.chunk(Tensor(input_x), 3)
979
+ >>> print(output)
980
+ (Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
981
+ Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
982
+ Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))
983
+ """
984
+ return chunk_(input, chunks, dim)
985
+
986
+
904
987
  def fills(x, value):
905
988
  """
906
989
  `fills` is deprecated, please use `ops.fill` instead.
@@ -920,50 +1003,6 @@ def fills(x, value):
920
1003
  return fills_(x, value_)
921
1004
 
922
1005
 
923
- def ones(shape, dtype=None): # pylint: disable=redefined-outer-name
924
- r"""
925
- Creates a tensor filled with value ones.
926
-
927
- Creates a tensor with shape described by the first argument and fills it with value ones in type of the second
928
- argument.
929
-
930
- Args:
931
- shape (Union[tuple[int], int, Tensor]): The specified shape of output tensor. Only positive integer or
932
- tuple or Tensor containing positive integers are allowed. If it is a Tensor,
933
- it must be a 0-D or 1-D Tensor with int32 or int64 dtypes.
934
- dtype (:class:`mindspore.dtype`): The specified type of output tensor. If `dtype` is ``None`` ,
935
- `mindspore.float32` will be used. Default: ``None`` .
936
-
937
- Returns:
938
- Tensor, has the same type and shape as input shape value.
939
-
940
- Raises:
941
- TypeError: If `shape` is not tuple, int or Tensor.
942
-
943
- Supported Platforms:
944
- ``Ascend`` ``GPU`` ``CPU``
945
-
946
- Examples:
947
- >>> import mindspore
948
- >>> from mindspore import ops
949
- >>> output = ops.ones((2, 2), mindspore.float32)
950
- >>> print(output)
951
- [[1. 1.]
952
- [1. 1.]]
953
- """
954
- _dtype = mstype.float32 if dtype is None else dtype
955
- ones_op = _get_cache_prim(P.FillV2)()
956
- value = Tensor(1, _dtype)
957
- if isinstance(shape, int):
958
- shape = tuple([shape])
959
- elif isinstance(shape, list):
960
- shape = Tensor(shape, dtype=mstype.int64)
961
- elif isinstance(shape, Tensor) and shape.ndim == 0 and shape.size == 1:
962
- shape = shape.reshape(1)
963
- output = ones_op(shape, value)
964
- return output
965
-
966
-
967
1006
  def ones_like(input, *, dtype=None):
968
1007
  """
969
1008
  Returns a Tensor with a value of 1 and its shape is the same as the input.
@@ -993,57 +1032,15 @@ def ones_like(input, *, dtype=None):
993
1032
  [[1 1]
994
1033
  [1 1]]
995
1034
  """
996
- ones_like_op = _get_cache_prim(P.OnesLike)()
997
- output = ones_like_op(input)
1035
+ output = ones_like_(input)
998
1036
  _dtype = input.dtype if dtype is None else dtype
999
1037
  output = cast_(output, _dtype)
1000
1038
  return output
1001
1039
 
1002
1040
 
1003
- def zeros(size, dtype=None): # pylint: disable=redefined-outer-name
1004
- r"""
1005
- Creates a tensor filled with 0 with shape described by `shape` and fills it with value 0 in type of `dtype`.
1006
-
1007
- Args:
1008
- size (Union[tuple[int], int, Tensor]): The specified shape of output tensor. Only positive integer or
1009
- tuple or Tensor containing positive integers are allowed. If it is a Tensor,
1010
- it must be a 0-D or 1-D Tensor with int32 or int64 dtypes.
1011
- dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor. If `dtype` is ``None`` ,
1012
- mindspore.float32 will be used. Default: ``None`` .
1013
-
1014
- Returns:
1015
- Tensor, has the same dtype and size as input.
1016
-
1017
- Raises:
1018
- TypeError: If `size` is not tuple, int or Tensor.
1019
-
1020
- Supported Platforms:
1021
- ``Ascend`` ``GPU`` ``CPU``
1022
-
1023
- Examples:
1024
- >>> import mindspore
1025
- >>> from mindspore import ops
1026
- >>> output = ops.zeros((2, 2), mindspore.float32)
1027
- >>> print(output)
1028
- [[0. 0.]
1029
- [0. 0.]]
1030
- """
1031
- zero_op = _get_cache_prim(P.FillV2)()
1032
- _dtype = mstype.float32 if dtype is None else dtype
1033
- value = Tensor(0, _dtype)
1034
- if isinstance(size, int):
1035
- size = tuple([size])
1036
- elif isinstance(size, list):
1037
- size = Tensor(size, dtype=mstype.int64)
1038
- elif isinstance(size, Tensor) and size.ndim == 0 and size.size == 1:
1039
- size = size.reshape(1)
1040
- output = zero_op(size, value)
1041
- return output
1042
-
1043
-
1044
1041
  def zeros_like(input, *, dtype=None):
1045
1042
  r"""
1046
- Creates a tensor filled with 0, with the same size as x, and the given dtype.
1043
+ Creates a tensor filled with 0, with the same size as input, and the given dtype.
1047
1044
 
1048
1045
  If `dtype = None`, the tensor will have the same dtype as input `input`.
1049
1046
 
@@ -1074,125 +1071,78 @@ def zeros_like(input, *, dtype=None):
1074
1071
  [0. 0.]]
1075
1072
  """
1076
1073
  _dtype = input.dtype if dtype is None else dtype
1077
- _zeros_like = _get_cache_prim(P.ZerosLike)()
1078
- _cast = _get_cache_prim(P.Cast)()
1079
- output = _zeros_like(input)
1080
- output = _cast(output, _dtype)
1074
+ output = zeros_like_(input)
1075
+ output = cast_(output, _dtype)
1081
1076
  return output
1082
1077
 
1083
1078
 
1084
- def tile(input, multiples):
1085
- r"""
1086
- Replicates an input tensor with given multiples times.
1087
-
1088
- Creates a new tensor by replicating `input` `multiples` times. The i'th dimension of
1089
- output tensor has `input.shape[i] * multiples[i]` elements, and the values of `input`
1090
- are replicated `multiples[i]` times along the i'th dimension.
1079
+ def ones_like_ext(input, *, dtype=None):
1080
+ """
1081
+ Creates a tensor filled with 1, with the same shape as input, and its data type is determined by the given dtype.
1091
1082
 
1092
- Note:
1093
- The length of `multiples` must be greater or equal to the length of dimension in `input`.
1083
+ If `dtype = None`, the tensor will have the same dtype as input `input`.
1094
1084
 
1095
1085
  Args:
1096
- input (Tensor): 1-D or higher dimensional Tensor. Set the shape of input tensor as
1097
- :math:`(x_1, x_2, ..., x_S)` .
1086
+ input (Tensor): Tensor of any dimension.
1098
1087
 
1099
- multiples (tuple[int]): The parameter that specifies the number of replications,
1100
- the parameter type is tuple, and the data type is int, i.e., :math:`(y_1, y_2, ..., y_S)`.
1101
- The length of `multiples` cannot be smaller than the length of the shape of `input`.
1102
- Only constant value is allowed.
1088
+ Keyword Args:
1089
+ dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype` is ``None`` ,
1090
+ the dtype of the input tensor will be used. Default: ``None`` .
1103
1091
 
1104
1092
  Returns:
1105
- Tensor, has the same data type as the `input`. Suppose the length of `multiples` is `d`,
1106
- the dimension of `input` is `input.dim`, and the shape of `input` is :math:`(x_1, x_2, ..., x_S)`.
1107
-
1108
- - If `input.dim = d`, then the shape of their corresponding positions can be multiplied, and
1109
- the shape of Outputs is :math:`(x_1*y_1, x_2*y_2, ..., x_S*y_S)`.
1110
- - If `input.dim < d`, fill in multiple 1 in the length of the shape of `input` until their
1111
- lengths are consistent. Such as set the shape of `input` as :math:`(1, ..., x_1, x_2, ..., x_S)`,
1112
- then the shape of their corresponding positions can be multiplied, and the shape of Outputs is
1113
- :math:`(1*y_1, ..., x_R*y_R, x_S*y_S)`.
1093
+ Tensor, has the same shape as `input` but filled with ones.
1114
1094
 
1115
1095
  Raises:
1116
- TypeError: If `multiples` is not a tuple or its elements are not all int.
1117
- ValueError: If the elements of `multiples` are not all greater than 0.
1118
- ValueError: If the length of `multiples` are smaller than the length of dimension in `input`.
1096
+ TypeError: If `input` is not a Tensor.
1119
1097
 
1120
1098
  Supported Platforms:
1121
1099
  ``Ascend`` ``GPU`` ``CPU``
1122
1100
 
1123
1101
  Examples:
1124
- >>> import mindspore
1125
1102
  >>> import numpy as np
1126
1103
  >>> from mindspore import Tensor, ops
1127
- >>> input = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32)
1128
- >>> multiples = (2, 3)
1129
- >>> output = ops.tile(input, multiples)
1130
- >>> print(output)
1131
- [[1. 2. 1. 2. 1. 2.]
1132
- [3. 4. 3. 4. 3. 4.]
1133
- [1. 2. 1. 2. 1. 2.]
1134
- [3. 4. 3. 4. 3. 4.]]
1135
- >>> multiples = (2, 3, 2)
1136
- >>> output = ops.tile(input, multiples)
1137
- >>> print(output)
1138
- [[[1. 2. 1. 2.]
1139
- [3. 4. 3. 4.]
1140
- [1. 2. 1. 2.]
1141
- [3. 4. 3. 4.]
1142
- [1. 2. 1. 2.]
1143
- [3. 4. 3. 4.]]
1144
- [[1. 2. 1. 2.]
1145
- [3. 4. 3. 4.]
1146
- [1. 2. 1. 2.]
1147
- [3. 4. 3. 4.]
1148
- [1. 2. 1. 2.]
1149
- [3. 4. 3. 4.]]]
1150
- """
1151
- tile_op = _get_cache_prim(P.Tile)()
1152
- return tile_op(input, multiples)
1153
-
1154
-
1155
- def range(start, end, step):
1104
+ >>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
1105
+ >>> output = ops.function.array_func.ones_like_ext(x)
1106
+ >>> print(output)
1107
+ [[1 1]
1108
+ [1 1]]
1109
+ """
1110
+ return ones_like_ext_(input, dtype)
1111
+
1112
+
1113
+ def zeros_like_ext(input, *, dtype=None):
1156
1114
  r"""
1157
- Creates a sequence of numbers that begins at `start` and extends by increments of
1158
- `limit` up to but not including `end`.
1115
+ Creates a tensor filled with 0, with the same size as input. Its data type is determined by the given dtype.
1159
1116
 
1160
- The types of all 3 inputs must be the same. The type of the resulting tensor is
1161
- the same as the type of the inputs.
1117
+ If `dtype = None`, the tensor will have the same dtype as input `input`.
1162
1118
 
1163
1119
  Args:
1164
- start (Tensor): A scalar Tensor. The first number in the sequence. Must have
1165
- type: int32 ,int64, float32 or float64.
1166
- end (Tensor): A scalar Tensor. Upper limit of the sequence, exclusive. Must
1167
- have type: int32 ,int64, float32 or float64.
1168
- step (Tensor): A scalar Tensor. Number that increments `start`. Must have
1169
- type: int32 ,int64, float32 or float64.
1120
+ input (Tensor): Tensor of any dimension.
1121
+
1122
+ Keyword Args:
1123
+ dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype` is ``None`` ,
1124
+ the dtype of the input tensor will be used. Default: ``None`` .
1170
1125
 
1171
1126
  Returns:
1172
- A 1-D Tensor, with the same type as the inputs.
1127
+ Tensor, filled with 0.
1173
1128
 
1174
1129
  Raises:
1175
- TypeError: If `start`, `end` or `step` is not scalar Tensor.
1176
- TypeError: If datatype of `start`, `end` or `step` is not same.
1177
- TypeError: If datatype of `start`, `end` or `step` is not supported.
1178
- ValueError: If `step` = 0.
1179
- ValueError: If `start` >= `end` when `step` > 0.
1180
- ValueError: If `start` <= `end` when `step` < 0.
1130
+ TypeError: If dtype is not a MindSpore dtype.
1181
1131
 
1182
1132
  Supported Platforms:
1183
- ``GPU`` ``CPU``
1133
+ ``Ascend`` ``GPU`` ``CPU``
1184
1134
 
1185
1135
  Examples:
1136
+ >>> import mindspore
1137
+ >>> import numpy as np
1186
1138
  >>> from mindspore import Tensor, ops
1187
- >>> from mindspore import dtype as mstype
1188
- >>> start = Tensor(0, mstype.int32)
1189
- >>> end = Tensor(10, mstype.int32)
1190
- >>> step = Tensor(4, mstype.int32)
1191
- >>> output = ops.range(start, end, step)
1139
+ >>> x = Tensor(np.arange(4).reshape(2, 2))
1140
+ >>> output = ops.function.array_func.zeros_like_ext(x, dtype=mindspore.float32)
1192
1141
  >>> print(output)
1193
- [0 4 8]
1142
+ [[0. 0.]
1143
+ [0. 0.]]
1194
1144
  """
1195
- return range_(start, end, step)
1145
+ return zeros_like_ext_(input, dtype)
1196
1146
 
1197
1147
 
1198
1148
  ##############################
@@ -1228,7 +1178,70 @@ def unique(input):
1228
1178
  TypeError: If `input` is not a Tensor.
1229
1179
 
1230
1180
  Supported Platforms:
1231
- ``Ascend`` ``GPU`` ``CPU``
1181
+ ``Ascend`` ``GPU`` ``CPU``
1182
+
1183
+ Examples:
1184
+ >>> import mindspore
1185
+ >>> import numpy as np
1186
+ >>> from mindspore import Tensor, nn
1187
+ >>> from mindspore import ops
1188
+ >>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
1189
+ >>> output = ops.unique(x)
1190
+ >>> print(output)
1191
+ (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1]))
1192
+ >>> y = output[0]
1193
+ >>> print(y)
1194
+ [1 2 5]
1195
+ >>> idx = output[1]
1196
+ >>> print(idx)
1197
+ [0 1 2 1]
1198
+ """
1199
+ shape_x = input.shape
1200
+ length_x = get_x_shape(shape_x)
1201
+ input = reshape_(input, length_x)
1202
+ y, idx = unique_(input)
1203
+ idx = reshape_(idx, shape_x)
1204
+ return y, idx
1205
+
1206
+
1207
+ def unique_ext(input, sorted=True, return_inverse=False, return_counts=False, dim=None):
1208
+ """
1209
+ Returns the unique elements of input tensor.
1210
+
1211
+ when `return_inverse=True`, also return a tensor containing the index of each value of input
1212
+ tensor corresponding to the output unique tensor.
1213
+ when `return_counts=True`, also return a tensor containing the number of occurrences for each
1214
+ unique value or tensor
1215
+
1216
+ Args:
1217
+ input (Tensor): The input tensor.
1218
+ sorted(bool): Whether to sort the unique elements in ascending order before returning as output.
1219
+ Default: ``True`` .
1220
+ return_inverse(bool): Whether to also return the indices for where elements in the original input ended up in
1221
+ the returned unique list. Default: ``False`` .
1222
+ return_counts(bool): Whether to also return the counts for each unique element. Default: ``False`` .
1223
+ dim(int): the dimension to operate upon. If ``None``, the unique of the flattened input is returned.
1224
+ Otherwise, each of the tensors indexed by the given dimension is treated as one of the elements to apply the
1225
+ unique operation upon. Default: ``None`` .
1226
+
1227
+
1228
+ Returns:
1229
+ A tensor or a tuple of tensors containing some of tensor objects (`output`, `inverse_indices`, `counts`).
1230
+
1231
+ - output(Tensor) - The output tensor including the unique elements of input tensor, it has same dtype as input.
1232
+ - inverse_indices(Tensor) - Return when ``return_inverse`` is True. It represents the indices for where
1233
+ elements in the original input map to in the output. When ``dim`` is ``None``, it has same shape as input,
1234
+ otherwise, the shape is input.shape[dim].
1235
+ - counts(Tensor) - Return when ``return_counts`` is True. It represents the number of occurrences for each
1236
+ unique value or tensor. When ``dim`` is ``None``, it has same shape as output, otherwise, the shape is
1237
+ output.shape(dim).
1238
+
1239
+
1240
+ Raises:
1241
+ TypeError: If `input` is not a Tensor.
1242
+
1243
+ Supported Platforms:
1244
+ ``Ascend``
1232
1245
 
1233
1246
  Examples:
1234
1247
  >>> import mindspore
@@ -1236,9 +1249,9 @@ def unique(input):
1236
1249
  >>> from mindspore import Tensor, nn
1237
1250
  >>> from mindspore import ops
1238
1251
  >>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
1239
- >>> output = ops.unique(x)
1252
+ >>> output = ops.unique_ext(x, return_inverse=True)
1240
1253
  >>> print(output)
1241
- (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1]))
1254
+ (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int64, value= [0, 1, 2, 1]))
1242
1255
  >>> y = output[0]
1243
1256
  >>> print(y)
1244
1257
  [1 2 5]
@@ -1246,16 +1259,20 @@ def unique(input):
1246
1259
  >>> print(idx)
1247
1260
  [0 1 2 1]
1248
1261
  """
1249
-
1250
- unique_op = _get_cache_prim(P.Unique)()
1251
- reshape_op = _get_cache_prim(P.Reshape)()
1252
-
1253
- shape_x = input.shape
1254
- length_x = get_x_shape(shape_x)
1255
- input = reshape_op(input, length_x)
1256
- y, idx = unique_op(input)
1257
- idx = reshape_op(idx, shape_x)
1258
- return y, idx
1262
+ if not F.isconstant(return_inverse) or not F.isconstant(return_counts):
1263
+ raise ValueError(f"For 'unique_ext', 'return_inverse' and 'return_counts' cannot be mutable")
1264
+ if dim is None:
1265
+ y, inverse, counts = unique2_(input, sorted, return_inverse, return_counts)
1266
+ else:
1267
+ validator.check_value_type("return_counts", return_counts, [bool], "unique_ext")
1268
+ y, inverse, counts = unique_dim_(input, sorted, return_inverse, dim)
1269
+ if return_inverse and return_counts:
1270
+ return y, inverse, counts
1271
+ if return_inverse:
1272
+ return y, inverse
1273
+ if return_counts:
1274
+ return y, counts
1275
+ return y
1259
1276
 
1260
1277
 
1261
1278
  def unique_with_pad(x, pad_num):
@@ -1363,7 +1380,7 @@ def unique_consecutive(input, return_idx=False, return_counts=False, axis=None):
1363
1380
  return output
1364
1381
 
1365
1382
 
1366
- def searchsorted(sorted_sequence, values, *, out_int32=False, right=False):
1383
+ def searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=None, sorter=None):
1367
1384
  """
1368
1385
  Return the position indices such that after inserting the values into the `sorted_sequence`, the order of innermost
1369
1386
  dimension of the `sorted_sequence` remains unchanged.
@@ -1378,16 +1395,24 @@ def searchsorted(sorted_sequence, values, *, out_int32=False, right=False):
1378
1395
  if ``False`` , the output datatype will be int64. Default: ``False`` .
1379
1396
  right (bool, optional): Search Strategy. If ``True`` , return the last suitable index found;
1380
1397
  if ``False`` , return the first such index. Default: ``False`` .
1398
+ side (str, optional): the same as right but preferred. ``"left"`` corresponds to ``False`` for `right`
1399
+ and ``"right"`` corresponds to ``True`` for `right`. An error will be reported if this parameter is
1400
+ set to ``"left"`` while `right` is ``True``. Default: ``None`` .
1401
+ sorter(Tensor, optional): if provided, a tensor matching the shape of the unsorted sorted_sequence
1402
+ containing a sequence of indices that sort it in the ascending order on the innermost
1403
+ dimension and type must be int64. Default: ``None`` .
1381
1404
 
1382
1405
  Returns:
1383
1406
  Tensor containing the indices from the innermost dimension of `sorted_sequence` such that,
1384
- if insert the corresponding value in the `values` tensor, the order of `sorted_sequence` would be preserved,
1407
+ if insert the corresponding value in the `values` Tensor, the order of `sorted_sequence` would be preserved,
1385
1408
  whose datatype is int32 if out_int32 is ``True`` , otherwise int64, and shape is the same as the shape of
1386
1409
  `values`.
1387
1410
 
1388
1411
  Raises:
1389
1412
  ValueError: If the dimension of `sorted_sequence` isn't 1 and all dimensions except the last dimension of
1390
1413
  `sorted_sequence` and `values` are different.
1414
+ ValueError: If `sorted_sequence` value is a scalar.
1415
+ ValueError: If `values` is a scalar when `sorted_sequence` dimension is not 1.
1391
1416
 
1392
1417
  Supported Platforms:
1393
1418
  ``Ascend`` ``GPU`` ``CPU``
@@ -1404,10 +1429,16 @@ def searchsorted(sorted_sequence, values, *, out_int32=False, right=False):
1404
1429
  [1 2 4]]
1405
1430
  """
1406
1431
 
1407
- _check_attr_dtype("out_int32", out_int32, [bool], "search_sorted")
1408
- dtype = mstype.int64 if not out_int32 else mstype.int32
1432
+ validator.check_value_type("out_int32", out_int32, [bool], "search_sorted")
1433
+ validator.check_value_type("right", right, [bool], "search_sorted")
1434
+ dtype = mstype.int32 if bool(out_int32) else mstype.int64
1435
+ if (side == "left" and right is True):
1436
+ raise ValueError(f"For 'searchsorted', side and right can't be set to opposites,"
1437
+ f"got side of left while right was True.")
1438
+ if side == "right":
1439
+ right = True
1409
1440
  search_sorted_ = SearchSorted(dtype, right)
1410
- return search_sorted_(sorted_sequence, values)
1441
+ return search_sorted_(sorted_sequence, values, sorter)
1411
1442
 
1412
1443
 
1413
1444
  def ger(input, vec2):
@@ -1457,7 +1488,7 @@ def size(input_x):
1457
1488
 
1458
1489
  Args:
1459
1490
  input_x (Tensor): Input parameters, the shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is
1460
- `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
1491
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
1461
1492
 
1462
1493
  Returns:
1463
1494
  int. A scalar representing the elements' size of `input_x`, tensor is the number of elements
@@ -1538,76 +1569,6 @@ def dyn_shape(input_x):
1538
1569
  return tensor_shape_(input_x)
1539
1570
 
1540
1571
 
1541
- def rank(input_x):
1542
- """
1543
- Returns the rank of a tensor.
1544
-
1545
- Returns a 0-D int32 Tensor representing the rank of input; the rank of a tensor
1546
- is the number of indices required to uniquely select each element of the tensor.
1547
-
1548
- Args:
1549
- input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is Number.
1550
-
1551
- Returns:
1552
- Tensor. 0-D int32 Tensor representing the rank of input, i.e., :math:`R`. The data type is an int.
1553
-
1554
- Raises:
1555
- TypeError: If `input_x` is not a Tensor.
1556
-
1557
- Supported Platforms:
1558
- ``Ascend`` ``GPU`` ``CPU``
1559
-
1560
- Examples:
1561
- >>> import mindspore
1562
- >>> import numpy as np
1563
- >>> from mindspore import Tensor, ops
1564
- >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
1565
- >>> output = ops.rank(input_tensor)
1566
- >>> print(output)
1567
- 2
1568
- >>> print(type(output))
1569
- <class 'int'>
1570
- """
1571
- return rank_(input_x)
1572
-
1573
-
1574
- def reshape(input, shape):
1575
- """
1576
- Rearranges the input Tensor based on the given shape.
1577
-
1578
- The 'shape' can only have one -1 at most, in which case it's inferred from the remaining dimensions and
1579
- the number of elements in the input.
1580
-
1581
- Args:
1582
- input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
1583
- shape (Union[tuple[int], Tensor[int]]): Constructed by multiple
1584
- integers, i.e., :math:`(y_1, y_2, ..., y_S)`. Only constant value is allowed.
1585
-
1586
- Returns:
1587
- Tensor, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
1588
-
1589
- Raises:
1590
- ValueError: Given a shape tuple, if it has several -1; or if the product
1591
- of its elements is less than or equal to 0 or cannot be divided by the product
1592
- of the input tensor shape; or if it does not match the input's array size.
1593
-
1594
- Supported Platforms:
1595
- ``Ascend`` ``GPU`` ``CPU``
1596
-
1597
- Examples:
1598
- >>> import mindspore
1599
- >>> import numpy as np
1600
- >>> from mindspore import Tensor, ops
1601
- >>> input = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
1602
- >>> output = ops.reshape(input, (3, 2))
1603
- >>> print(output)
1604
- [[-0.1 0.3]
1605
- [ 3.6 0.4]
1606
- [ 0.5 -3.2]]
1607
- """
1608
- return reshape_(input, shape)
1609
-
1610
-
1611
1572
  def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
1612
1573
  r"""
1613
1574
  Reverses variable length slices.
@@ -1672,7 +1633,7 @@ def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
1672
1633
  [[4. 3. 2. 1.]
1673
1634
  [8. 7. 6. 5.]]
1674
1635
  """
1675
- return P.ReverseSequence(seq_dim=seq_dim, batch_dim=batch_dim)(x, seq_lengths)
1636
+ return _get_cache_prim(P.ReverseSequence)(seq_dim=seq_dim, batch_dim=batch_dim)(x, seq_lengths)
1676
1637
 
1677
1638
 
1678
1639
  def flatten(input, order='C', *, start_dim=1, end_dim=-1):
@@ -1696,7 +1657,7 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
1696
1657
  Raises:
1697
1658
  TypeError: If `input` is not a Tensor.
1698
1659
  TypeError: If `order` is not string type.
1699
- ValueError: If `order` is string type, but not 'C' or 'F'.
1660
+ ValueError: If `order` is string type, but not ``'C'`` or ``'F'``.
1700
1661
  TypeError: If `start_dim` or `end_dim` is not int.
1701
1662
  ValueError: If `start_dim` is greater than `end_dim` after canonicalized.
1702
1663
  ValueError: If `start_dim` or `end_dim` is not in range of [-input.dim, input.dim-1].
@@ -1741,7 +1702,7 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
1741
1702
  return reshape_(input, (-1,))
1742
1703
  perm = ops.make_range(0, x_rank)
1743
1704
  new_order = ops.tuple_reversed(perm)
1744
- input = _get_cache_prim(P.Transpose)()(input, new_order)
1705
+ input = transpose_(input, new_order)
1745
1706
 
1746
1707
  # Handle the default case.
1747
1708
  x_shape = shape_(input)
@@ -1749,7 +1710,7 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
1749
1710
  if start_dim == 1 and end_dim == -1:
1750
1711
  if x_rank in (0, 1):
1751
1712
  return reshape_(input, (-1,))
1752
- return _get_cache_prim(P.Flatten)()(input)
1713
+ return flatten_(input)
1753
1714
 
1754
1715
  # Check axis.
1755
1716
  start_dim = canonicalize_axis(start_dim, x_rank)
@@ -1771,341 +1732,6 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
1771
1732
  return reshape_(input, new_shape)
1772
1733
 
1773
1734
 
1774
- @constexpr
1775
- def _check_select_type_match(scalar, tensor_type, scalar_name, tensor_name):
1776
- if isinstance(scalar, int) and tensor_type != mstype.int32:
1777
- raise TypeError(f"For functional operator[select], the input[{scalar_name}] is int, "
1778
- f"then the input[{tensor_name}] must be a Tensor of int32.")
1779
- if isinstance(scalar, float) and tensor_type != mstype.float32:
1780
- raise TypeError(f"For functional operator[select], the input[{scalar_name}] is float, "
1781
- f"then the input[{tensor_name}] must be a Tensor of float32.")
1782
-
1783
-
1784
- @_primexpr
1785
- def _check_select_shape_match(input_shape, cond_shape, tensor_name):
1786
- if input_shape != cond_shape:
1787
- raise ValueError(f"For functional operator[select], the cond shape must be same as {tensor_name} shape.")
1788
-
1789
-
1790
- @constexpr
1791
- def _check_select_type(is_cond_tensor, is_x_scalar, is_y_scalar, is_x_tensor, is_y_tensor):
1792
- if not is_cond_tensor:
1793
- raise TypeError(f"For functional operator[select], the input[cond] must be a Tensor.")
1794
- if is_x_scalar and not is_y_tensor:
1795
- raise TypeError(f"For functional operator[select], the input[x] is int or float, "
1796
- f"then the input[y] must be a Tensor.")
1797
- if is_y_scalar and not is_x_tensor:
1798
- raise TypeError(f"For functional operator[select], the input[y] is int or float, "
1799
- f"then the input[x] must be a Tensor.")
1800
-
1801
-
1802
- @constexpr
1803
- def _check_select_shape_same(cond_shape, x_shape, y_shape):
1804
- """Check if input of select has same shape."""
1805
- return cond_shape == x_shape and x_shape == y_shape and cond_shape == y_shape
1806
-
1807
-
1808
- @constexpr
1809
- def get_max_value(x, y, z):
1810
- """Get the maximum value of x, y and z."""
1811
- if x >= y and x >= z:
1812
- return x
1813
- if y >= x and y >= z:
1814
- return y
1815
- return z
1816
-
1817
-
1818
- @constexpr
1819
- def _calc_broadcast_shape(cond_shape, x_shape, y_shape):
1820
- """Calculate broadcast shape for select"""
1821
- converted_shape = []
1822
- cond_reverse = cond_shape[::-1]
1823
- x_reverse = x_shape[::-1]
1824
- y_reverse = y_shape[::-1]
1825
- max_len = get_max_value(len(cond_reverse), len(x_reverse), len(y_reverse))
1826
- i = 0
1827
- while i < max_len:
1828
- cond_element = 1 if i >= len(cond_reverse) else cond_reverse[i]
1829
- x_element = 1 if i >= len(x_reverse) else x_reverse[i]
1830
- y_element = 1 if i >= len(y_reverse) else y_reverse[i]
1831
- broadcast_element = get_max_value(cond_element, x_element, y_element)
1832
- if cond_element not in (1, broadcast_element):
1833
- raise ValueError(f"For select, condition input can not broadcast at index {i}")
1834
- if x_element not in (1, broadcast_element):
1835
- raise ValueError(f"For select, x input can not broadcast at index {i}")
1836
- if y_element not in (1, broadcast_element):
1837
- raise ValueError(f"For select, y input can not broadcast at index {i}")
1838
- converted_shape.append(broadcast_element)
1839
- i = i + 1
1840
- converted_shape.reverse()
1841
- return tuple(converted_shape)
1842
-
1843
-
1844
- def select(cond, x, y):
1845
- r"""
1846
- The conditional tensor determines whether the corresponding element in the output must be
1847
- selected from `x` (if true) or `y` (if false) based on the value of each element.
1848
-
1849
- It can be defined as:
1850
-
1851
- .. math::
1852
- out_i = \begin{cases}
1853
- x_i, & \text{if } cond_i \\
1854
- y_i, & \text{otherwise}
1855
- \end{cases}
1856
-
1857
- Args:
1858
- cond (Tensor[bool]): The condition tensor, decides which element is chosen.
1859
- The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
1860
- x (Union[Tensor, int, float]): The first Tensor or number to be selected.
1861
- If x is a Tensor, the shape is or can be broadcadt to :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
1862
- If x is an int or a float, it will be cast to the type of int32 or float32,
1863
- and broadcast to the same shape as y. One of x and y must be a Tensor.
1864
- y (Union[Tensor, int, float]): The second Tensor or number to be selected.
1865
- If y is a Tensor, The shape is or can be broadcadt to :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
1866
- If y is an int or a float, it will be cast to the type of int32 or float32,
1867
- and broadcast to the same shape as x. One of x and y must be a Tensor.
1868
-
1869
- Returns:
1870
- Tensor, has the same shape as `cond`.
1871
-
1872
- Raises:
1873
- TypeError: If `x` or `y` is not a Tensor, int or float.
1874
- ValueError: The shapes of inputs can not be broadcast.
1875
-
1876
- Supported Platforms:
1877
- ``Ascend`` ``GPU`` ``CPU``
1878
-
1879
- Examples:
1880
- >>> import mindspore
1881
- >>> from mindspore import Tensor, ops
1882
- >>> # 1) Both inputs are Tensor
1883
- >>>
1884
- >>> cond = Tensor([True, False])
1885
- >>> x = Tensor([2,3], mindspore.float32)
1886
- >>> y = Tensor([1,2], mindspore.float32)
1887
- >>> output = ops.select(cond, x, y)
1888
- >>> print(output)
1889
- [2. 2.]
1890
- >>> # 2) y is a float
1891
- >>> cond = Tensor([True, False])
1892
- >>> x = Tensor([2,3], mindspore.float32)
1893
- >>> y = 2.0
1894
- >>> output = ops.select(cond, x, y)
1895
- >>> print(output)
1896
- [2. 2.]
1897
- """
1898
- is_x_scalar = isinstance(x, (int, float))
1899
- is_y_scalar = isinstance(y, (int, float))
1900
- is_x_tensor = isinstance(x, Tensor)
1901
- is_y_tensor = isinstance(y, Tensor)
1902
- is_cond_tensor = isinstance(cond, Tensor)
1903
- _check_select_type(is_cond_tensor, is_x_scalar, is_y_scalar, is_x_tensor, is_y_tensor)
1904
- input_x = x
1905
- input_y = y
1906
- if is_x_scalar:
1907
- _check_select_shape_match(y.shape, cond.shape, "y")
1908
- _check_select_type_match(x, y.dtype, "x", "y")
1909
- input_x = zeros_like_(y) + x
1910
- if isinstance(x, int):
1911
- input_x = cast_(input_x, mstype.int32)
1912
- else:
1913
- input_x = cast_(input_x, mstype.float32)
1914
-
1915
- if is_y_scalar:
1916
- _check_select_shape_match(x.shape, cond.shape, "x")
1917
- _check_select_type_match(y, x.dtype, "y", "x")
1918
- input_y = zeros_like_(x) + y
1919
- if isinstance(y, int):
1920
- input_y = cast_(input_y, mstype.int32)
1921
- else:
1922
- input_y = cast_(input_y, mstype.float32)
1923
-
1924
- if is_x_tensor and is_y_tensor and is_cond_tensor:
1925
- x_shape = ops.shape(x)
1926
- y_shape = ops.shape(y)
1927
- cond_shape = ops.shape(cond)
1928
- all_constant = ops.isconstant(cond_shape) and ops.isconstant(x_shape) and ops.isconstant(y_shape)
1929
- if all_constant and not _check_select_shape_same(cond_shape, x_shape, y_shape):
1930
- broadcast_shape = _calc_broadcast_shape(cond_shape, x_shape, y_shape)
1931
- new_cond = ops.broadcast_to(cond, broadcast_shape)
1932
- new_x = ops.broadcast_to(x, broadcast_shape)
1933
- new_y = ops.broadcast_to(y, broadcast_shape)
1934
- return tensor_select_(new_cond, new_x, new_y)
1935
-
1936
- return tensor_select_(cond, input_x, input_y)
1937
-
1938
-
1939
- def strided_slice(input_x,
1940
- begin,
1941
- end,
1942
- strides,
1943
- begin_mask=0,
1944
- end_mask=0,
1945
- ellipsis_mask=0,
1946
- new_axis_mask=0,
1947
- shrink_axis_mask=0):
1948
- r"""
1949
- Extracts a strided slice of a Tensor based on `begin/end` index and `strides`.
1950
-
1951
- This operation extracts a fragment of size (end-begin)/strides from the given 'input_tensor'.
1952
- Starting from the beginning position, the fragment continues adding strides to the index until
1953
- all dimensions are not less than the ending position.
1954
-
1955
- Note:
1956
- - `begin` , `end` and `strides` must have the same shape.
1957
- - `begin` , `end` and `strides` are all 1-D Tensor, and their shape size
1958
- must not greater than the dim of `input_x`.
1959
-
1960
- During the slicing process, the fragment (end-begin)/strides are extracted from each dimension.
1961
-
1962
- Example: For Tensor `input_x` with shape :math:`(5, 6, 7)`,
1963
- set `begin`, `end` and `strides` to (1, 3, 2), (3, 5, 6),
1964
- (1, 1, 2) respectively, then elements from index 1 to 3 are extrected for dim 0, index 3 to 5
1965
- are extrected for dim 1 and index 2 to 6 with a `stirded` of 2 are extrected for dim 2, this
1966
- process is equivalent to a pythonic slice `input_x[1:3, 3:5, 2:6:2]`.
1967
-
1968
- If the length of `begin` 、 `end` and `strides` is smaller than the dim of `input_x`,
1969
- then all elements are extracted from the missing dims, it behaves like all the
1970
- missing dims are filled with zeros, size of that missing dim and ones.
1971
-
1972
- Example: For Tensor `input_x` with shape :math:`(5, 6, 7)`,
1973
- set `begin`, `end` and `strides` to (1, 3),
1974
- (3, 5), (1, 1) respectively, then elements from index 1 to 3 are extrected
1975
- for dim 0, index 3 to 5 are extrected for dim 1 and index 3 to 5 are extrected
1976
- for dim 2, this process is equivalent to a pythonic slice `input_x[1:3, 3:5, 0:7]`.
1977
-
1978
- Here's how a mask works:
1979
- For each specific mask, it will be converted to a binary representation internally, and then
1980
- reverse the result to start the calculation. For Tensor `input_x` with
1981
- shape :math:`(5, 6, 7)`. Given mask value of 3 which
1982
- can be represented as 0b011. Reverse that we get 0b110, which implies the first and second dim of the
1983
- original Tensor will be effected by this mask. See examples below, for simplicity all mask mentioned
1984
- below are all in their reverted binary form:
1985
-
1986
- - `begin_mask` and `end_mask`
1987
-
1988
- If the ith bit of `begin_mask` is 1, `begin[i]` is ignored and the fullest
1989
- possible range in that dimension is used instead. `end_mask` is analogous,
1990
- except with the end range. For Tensor `input_x` with shape :math:`(5, 6, 7, 8)`, if `begin_mask`
1991
- is 0b110, `end_mask` is 0b011, the slice `input_x[0:3, 0:6, 2:7:2]` is produced.
1992
-
1993
- - `ellipsis_mask`
1994
-
1995
- If the ith bit of `ellipsis_mask` is 1, as many unspecified dimensions as needed
1996
- will be inserted between other dimensions. Only one non-zero bit is allowed
1997
- in `ellipsis_mask`. For Tensor `input_x` with shape :math:`(5, 6, 7, 8)`, `input_x[2:,...,:6]`
1998
- is equivalent to `input_x[2:5,:,:,0:6]` , `input_x[2:,...]` is equivalent
1999
- to `input_x[2:5,:,:,:]`.
2000
-
2001
- - `new_axis_mask`
2002
-
2003
- If the ith bit of `new_axis_mask` is 1, `begin`, `end` and `strides` are
2004
- ignored and a new length 1 dimension is added at the specified position
2005
- in the output Tensor. For Tensor `input_x` with shape :math:`(5, 6, 7)`, if `new_axis_mask`
2006
- is 0b110, a new dim is added to the second dim, which will produce
2007
- a Tensor with shape :math:`(5, 1, 6, 7)`.
2008
-
2009
- - `shrink_axis_mask`
2010
-
2011
- If the ith bit of `shrink_axis_mask` is 1, `begin`, `end` and `strides`
2012
- are ignored and dimension i will be shrunk to 0.
2013
- For Tensor `input_x` with shape :math:`(5, 6, 7)`,
2014
- if `shrink_axis_mask` is 0b010, it is equivalent to slice `x[:, 5, :]`
2015
- and results in an output shape of :math:`(5, 7)`.
2016
-
2017
- Note:
2018
- `new_axis_mask` and `shrink_axis_mask` are not recommended to
2019
- use at the same time, it might incur unexpected result.
2020
-
2021
- Args:
2022
- input_x (Tensor): The input Tensor to be extracted from.
2023
- begin (tuple[int]): A tuple which represents the location where to start.
2024
- end (tuple[int]): A tuple or which represents the maximum location where to end.
2025
- strides (tuple[int]): A tuple which represents the strides is continuously added
2026
- before reaching the maximum location. Only int is allowed, it can be negative
2027
- which results in reversed slicing.
2028
- begin_mask (int, optional): Starting index of the slice. Default: ``0`` .
2029
- end_mask (int, optional): Ending index of the slice. Default: ``0`` .
2030
- ellipsis_mask (int, optional): An int mask, ignore slicing operation when set to 1. Default: ``0`` .
2031
- new_axis_mask (int, optional): An int mask for adding new dims. Default: ``0`` .
2032
- shrink_axis_mask (int, optional): An int mask for shrinking dims. Default: ``0`` .
2033
-
2034
- Returns:
2035
- Tensor, return the extracts a strided slice of a Tensor based on `begin/end` index and `strides`.
2036
-
2037
- Raises:
2038
- TypeError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or
2039
- `shrink_axis_mask` is not an int.
2040
- TypeError: If `begin`, `end` or `strides` is not tuple[int].
2041
- ValueError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or
2042
- `shrink_axis_mask` is less than 0.
2043
- ValueError: If `begin`, `end` and `strides` have different shapes.
2044
-
2045
- Supported Platforms:
2046
- ``Ascend`` ``GPU`` ``CPU``
2047
-
2048
- Examples:
2049
- >>> import mindspore
2050
- >>> from mindspore import Tensor, ops
2051
- >>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
2052
- ... [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
2053
- >>> output = ops.strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1))
2054
- >>> # Take this " output = strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1)) " as an example,
2055
- >>> # start = [1, 0, 2] , end = [3, 1, 3], strides = [1, 1, 1], Find a segment of (start, end),
2056
- >>> # note that end is an open interval
2057
- >>> # To facilitate understanding, this operator can be divided into three steps:
2058
- >>> # Step 1: Calculation of the first dimension:
2059
- >>> # start = 1, end = 3, strides = 1, So can take 1st, 2nd rows, and then gets the final output at this time.
2060
- >>> # output_1th =
2061
- >>> # [
2062
- >>> # [
2063
- >>> # [3,3,3]
2064
- >>> # [4,4,4]
2065
- >>> # ]
2066
- >>> # [
2067
- >>> # [5,5,5]
2068
- >>> # [6,6,6]
2069
- >>> # ]
2070
- >>> # ]
2071
- >>> # Step 2: Calculation of the second dimension
2072
- >>> # 2nd dimension, start = 0, end = 1, strides = 1. So only 0th rows
2073
- >>> # can be taken, and the output at this time.
2074
- >>> # output_2nd =
2075
- >>> # [
2076
- >>> # [
2077
- >>> # [3,3,3]
2078
- >>> # ]
2079
- >>> # [
2080
- >>> # [5,5,5]
2081
- >>> # ]
2082
- >>> # ]
2083
- >>> # Step 3: Calculation of the third dimension
2084
- >>> # 3nd dimension,start = 2, end = 3, strides = 1, So can take 2th cols,
2085
- >>> # and you get the final output at this time.
2086
- >>> # output_3ed =
2087
- >>> # [
2088
- >>> # [
2089
- >>> # [3]
2090
- >>> # ]
2091
- >>> # [
2092
- >>> # [5]
2093
- >>> # ]
2094
- >>> # ]
2095
- >>> # The final output after finishing is:
2096
- >>> print(output)
2097
- [[[3.]]
2098
- [[5.]]]
2099
- >>> # another example like :
2100
- >>> output = strided_slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1))
2101
- >>> print(output)
2102
- [[[3. 3. 3.]]]
2103
- """
2104
- strided_slice_ = _get_cache_prim(P.StridedSlice)(
2105
- begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask)
2106
- return strided_slice_(input_x, begin, end, strides)
2107
-
2108
-
2109
1735
  def slice(input_x, begin, size):
2110
1736
  r"""
2111
1737
  Slices a tensor in the specified shape.
@@ -2160,20 +1786,6 @@ def slice(input_x, begin, size):
2160
1786
  return tensor_slice(input_x, begin, size)
2161
1787
 
2162
1788
 
2163
- def concat(tensors, axis=0):
2164
- """
2165
- Alias for :func:`mindspore.ops.cat()`.
2166
-
2167
- Tutorial Examples:
2168
- - `Tensor - Tensor Operation <https://mindspore.cn/tutorials/en/r2.2/beginner/tensor.html#tensor-operation>`_
2169
- - `Vision Transformer Image Classification - Building ViT as a whole
2170
- <https://mindspore.cn/tutorials/application/en/r2.2/cv/vit.html#building-vit-as-a-whole>`_
2171
- - `Sentiment Classification Implemented by RNN - Dense
2172
- <https://mindspore.cn/tutorials/application/en/r2.2/nlp/sentiment_analysis.html#dense>`_
2173
- """
2174
- return cat(tensors, axis)
2175
-
2176
-
2177
1789
  def stack(tensors, axis=0):
2178
1790
  r"""
2179
1791
  Stacks a list of tensors in specified axis.
@@ -2284,45 +1896,6 @@ def unbind(input, dim=0):
2284
1896
  return _unstack(input)
2285
1897
 
2286
1898
 
2287
- def expand_dims(input_x, axis):
2288
- """
2289
- Adds an additional dimension to `input_x` at the given axis, the dimension
2290
- of `input_x` should be greater than or equal to 1.
2291
-
2292
- Note:
2293
- If the specified axis is a negative number, the index is counted
2294
- backward from the end and starts at 1.
2295
-
2296
- Args:
2297
- input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
2298
- axis (int): Specifies the dimension index at which to expand
2299
- the shape of `input_x`. The value of axis must be in the range
2300
- `[-input_x.ndim-1, input_x.ndim]`. Only constant value is allowed.
2301
-
2302
- Returns:
2303
- Tensor, the shape of tensor is :math:`(1, x_1, x_2, ..., x_R)` if the
2304
- value of `axis` is 0. It has the same data type as `input_x`.
2305
-
2306
- Raises:
2307
- TypeError: If `axis` is not an int.
2308
- ValueError: If `axis` is not in the valid range :math:`[-a.ndim-1, a.ndim]`.
2309
-
2310
- Supported Platforms:
2311
- ``Ascend`` ``GPU`` ``CPU``
2312
-
2313
- Examples:
2314
- >>> import mindspore
2315
- >>> import numpy as np
2316
- >>> from mindspore import Tensor, ops
2317
- >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
2318
- >>> output = ops.expand_dims(input_tensor, 0)
2319
- >>> print(output)
2320
- [[[2. 2.]
2321
- [2. 2.]]]
2322
- """
2323
- return expand_dims_(input_x, axis)
2324
-
2325
-
2326
1899
  def unsqueeze(input, dim):
2327
1900
  """
2328
1901
  Adds an additional dimension to `input` at the given dim.
@@ -2354,7 +1927,7 @@ def unsqueeze(input, dim):
2354
1927
  [[[2. 2.]
2355
1928
  [2. 2.]]]
2356
1929
  """
2357
- return expand_dims_(input, dim)
1930
+ return expand_dims(input, dim)
2358
1931
 
2359
1932
 
2360
1933
  def squeeze(input, axis=None):
@@ -2411,57 +1984,6 @@ def squeeze(input, axis=None):
2411
1984
  return squeeze_(input)
2412
1985
 
2413
1986
 
2414
- def transpose(input, input_perm):
2415
- """
2416
- Permutes the dimensions of the input tensor according to input permutation.
2417
-
2418
- For a 1-D array this has no effect, as a transposed vector is simply the same vector.
2419
- To convert a 1-D array into a 2D column vector please refer the class: mindspore.ops.ExpandDims.
2420
- For a 2-D array, this is a standard matrix transpose. For an n-D array, if axes are given,
2421
- their order indicates how the axes are permuted (see Examples).
2422
- If axes are not provided and a.shape is :math:`(i[0], i[1], ... i[n-2], i[n-1])`,
2423
- then a.transpose().shape is :math:`(i[n-1], i[n-2], ... i[1], i[0])`.
2424
-
2425
- Note:
2426
- On GPU and CPU, if the value of `input_perm` is negative, its actual value is `input_perm[i] + rank(input)`.
2427
- Negative value of `input_perm` is not supported on Ascend.
2428
-
2429
- Args:
2430
- input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
2431
- input_perm (tuple[int]): The permutation to be converted. The elements in `input_perm` are composed of
2432
- the indexes of each dimension of `input`. The length of `input_perm` and the shape of `input` must be
2433
- the same. Only constant value is allowed. Must be in the range [-rank(input), rank(input)).
2434
-
2435
- Returns:
2436
- Tensor, the type of output tensor is the same as `input` and the shape of output tensor is decided by the
2437
- shape of `input` and the value of `input_perm`.
2438
-
2439
- Raises:
2440
- TypeError: If `input_perm` is not a tuple.
2441
- ValueError: If length of shape of `input` is not equal to length of shape of `input_perm`.
2442
- ValueError: If the same element exists in `input_perm`.
2443
-
2444
- Supported Platforms:
2445
- ``Ascend`` ``GPU`` ``CPU``
2446
-
2447
- Examples:
2448
- >>> import mindspore
2449
- >>> import numpy as np
2450
- >>> from mindspore import Tensor, ops
2451
- >>> input = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32)
2452
- >>> input_perm = (0, 2, 1)
2453
- >>> output = ops.transpose(input, input_perm)
2454
- >>> print(output)
2455
- [[[ 1. 4.]
2456
- [ 2. 5.]
2457
- [ 3. 6.]]
2458
- [[ 7. 10.]
2459
- [ 8. 11.]
2460
- [ 9. 12.]]]
2461
- """
2462
- return transpose_(input, input_perm)
2463
-
2464
-
2465
1987
  def scatter_mul(input_x, indices, updates):
2466
1988
  r"""
2467
1989
  Using given values to update tensor value through the mul operation, along with the input indices.
@@ -2792,111 +2314,6 @@ def scatter_div(input_x, indices, updates):
2792
2314
  return scatter_div_(input_x, indices, updates)
2793
2315
 
2794
2316
 
2795
- def scatter_nd(indices, updates, shape):
2796
- r"""
2797
- Scatters a tensor into a new tensor depending on the specified indices.
2798
-
2799
- Creates an empty tensor with the given `shape`, and set values by scattering the update tensor
2800
- depending on indices. The empty tensor has rank :math:`P` and `indices` has rank :math:`Q`.
2801
-
2802
- The `shape` is :math:`(s_0, s_1, ..., s_{P-1})`, where :math:`P \ge 1`.
2803
-
2804
- `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)`, where :math:`Q \ge 2` and :math:`N \le P`.
2805
-
2806
- The last dimension of `indices` (with length :math:`N` ) indicates slices along the :math:`N` th dimension of the
2807
- empty tensor.
2808
-
2809
- `updates` is a tensor of rank :math:`Q-1+P-N`, and
2810
- its shape is :math:`(i_0, i_1, ..., i_{Q-2}, s_N, s_{N+1}, ..., s_{P-1})`.
2811
-
2812
- If `indices` contains duplicates, the duplicate `updates` are summed.
2813
-
2814
- The following figure shows the calculation process of inserting two new value matrices into the first dimension
2815
- with rank-3:
2816
-
2817
- .. image:: ScatterNd.png
2818
-
2819
- Args:
2820
- indices (Tensor): Define the index of scattering in the new tensor with int32 or int64 data type.
2821
- The rank of `indices` must be at least 2 and `indices.shape[-1] <= len(shape)`.
2822
- updates (Tensor): Define the source Tensor to be updated.
2823
- It has shape `indices.shape[:-1] + shape[indices.shape[-1]:]`.
2824
- shape (tuple[int]): Define the shape of the output tensor, has the same data type as indices.
2825
- `shape` can not be empty, and the elements in `shape` must be greater than or equal to 1.
2826
-
2827
- Returns:
2828
- Tensor, the new tensor, has the same type as `update` and the same shape as `shape`.
2829
-
2830
- Raises:
2831
- TypeError: If `shape` is not a tuple.
2832
- ValueError: If any element of `shape` is less than 1.
2833
-
2834
- Supported Platforms:
2835
- ``Ascend`` ``GPU`` ``CPU``
2836
-
2837
- Examples:
2838
- >>> import mindspore
2839
- >>> import numpy as np
2840
- >>> from mindspore import Tensor, ops
2841
- >>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
2842
- >>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2],
2843
- ... [3, 3, 3, 3], [4, 4, 4, 4]],
2844
- ... [[1, 1, 1, 1], [2, 2, 2, 2],
2845
- ... [3, 3, 3, 3], [4, 4, 4, 4]]]), mindspore.float32)
2846
- >>> shape = (4, 4, 4)
2847
- >>> output = ops.scatter_nd(indices, updates, shape)
2848
- >>> print(output)
2849
- [[[1. 1. 1. 1.]
2850
- [2. 2. 2. 2.]
2851
- [3. 3. 3. 3.]
2852
- [4. 4. 4. 4.]]
2853
- [[0. 0. 0. 0.]
2854
- [0. 0. 0. 0.]
2855
- [0. 0. 0. 0.]
2856
- [0. 0. 0. 0.]]
2857
- [[1. 1. 1. 1.]
2858
- [2. 2. 2. 2.]
2859
- [3. 3. 3. 3.]
2860
- [4. 4. 4. 4.]]
2861
- [[0. 0. 0. 0.]
2862
- [0. 0. 0. 0.]
2863
- [0. 0. 0. 0.]
2864
- [0. 0. 0. 0.]]]
2865
- >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)
2866
- >>> updates = Tensor(np.array([3.2, 1.1]), mindspore.float32)
2867
- >>> shape = (3, 3)
2868
- >>> output = ops.scatter_nd(indices, updates, shape)
2869
- >>> # In order to facilitate understanding, explain the operator pseudo-operation process step by step:
2870
- >>> # Step 1: Generate an empty Tensor of the specified shape according to the shape
2871
- >>> # [
2872
- >>> # [0. 0. 0.]
2873
- >>> # [0. 0. 0.]
2874
- >>> # [0. 0. 0.]
2875
- >>> # ]
2876
- >>> # Step 2: Modify the data at the specified location according to the indicators
2877
- >>> # 0th row of indices is [0, 1], 0th row of updates is 3.2.
2878
- >>> # means that the empty tensor in the 0th row and 1st col set to 3.2
2879
- >>> # [
2880
- >>> # [0. 3.2. 0.]
2881
- >>> # [0. 0. 0.]
2882
- >>> # [0. 0. 0.]
2883
- >>> # ]
2884
- >>> # 1th row of indices is [1, 1], 1th row of updates is 1.1.
2885
- >>> # means that the empty tensor in the 1th row and 1st col set to 1.1
2886
- >>> # [
2887
- >>> # [0. 3.2. 0.]
2888
- >>> # [0. 1.1 0.]
2889
- >>> # [0. 0. 0.]
2890
- >>> # ]
2891
- >>> # The final result is as follows:
2892
- >>> print(output)
2893
- [[0. 3.2 0.]
2894
- [0. 1.1 0.]
2895
- [0. 0. 0.]]
2896
- """
2897
- return scatter_nd_(indices, updates, shape)
2898
-
2899
-
2900
2317
  def scatter_update(input_x, indices, updates):
2901
2318
  r"""
2902
2319
  Updates tensor values by using input indices and value.
@@ -2946,8 +2363,7 @@ def scatter_update(input_x, indices, updates):
2946
2363
  [[2. 1.2 1.]
2947
2364
  [3. 1.2 1.]]
2948
2365
  """
2949
- scatter_update_inner = _get_cache_prim(P.ScatterUpdate)()
2950
- return scatter_update_inner(input_x, indices, updates)
2366
+ return scatter_update_(input_x, indices, updates)
2951
2367
 
2952
2368
 
2953
2369
  def scatter_nd_add(input_x, indices, updates, use_locking=False):
@@ -3414,8 +2830,8 @@ def sort(input_x, axis=-1, descending=False):
3414
2830
  are sorted in descending order, or else sorted in ascending order. Default: ``False`` .
3415
2831
 
3416
2832
  .. warning::
3417
- Currently, the data types of Float16, UInt8, Int8, Int16, Int32, Int64 are well supported.
3418
- If use Float32, it may cause loss of accuracy.
2833
+ Currently, the data types of float16, uint8, int8, int16, int32, int64 are well supported.
2834
+ If use float32, it may cause loss of accuracy.
3419
2835
 
3420
2836
  Returns:
3421
2837
 
@@ -3452,129 +2868,72 @@ def sort(input_x, axis=-1, descending=False):
3452
2868
  return _sort(input_x)
3453
2869
 
3454
2870
 
3455
- def argsort(input, axis=-1, descending=False):
2871
+ def sort_ext(input, *, dim=-1, descending=False, stable=False):
3456
2872
  r"""
3457
- Sorts the input tensor along the given dimension in specified order and return the sorted indices.
2873
+ Sorts the elements of the input tensor along the given dimension in the specified order.
2874
+
2875
+ .. warning::
2876
+ Currently, the data types of float16, uint8, int8, int16, int32, int64 are well supported.
2877
+ If use float32, it may cause loss of accuracy.
3458
2878
 
3459
2879
  Args:
3460
2880
  input(Tensor): The input tensor to sort.
3461
- axis (int): The axis to sort along. Default: ``-1`` , means the last dimension.
3462
- The Ascend backend only supports sorting the last dimension.
3463
- descending (bool): The sort order. If `descending` is True then the elements
3464
- are sorted in descending order by value. Otherwise sort in ascending order. Default: ``False`` .
3465
-
3466
- Returns:
3467
- Tensor, the indices of sorted input tensor. Data type is int32.
3468
-
3469
- Supported Platforms:
3470
- ``Ascend`` ``GPU`` ``CPU``
3471
-
3472
- Examples:
3473
- >>> import mindspore
3474
- >>> import numpy as np
3475
- >>> from mindspore import Tensor, ops
3476
- >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
3477
- >>> sort = ops.argsort(x)
3478
- >>> print(sort)
3479
- [[2 1 0]
3480
- [2 0 1]
3481
- [0 1 2]]
3482
- """
3483
- _sort = _get_cache_prim(P.Sort)(axis, descending)
3484
- _, arg_sort = _sort(input)
3485
- return arg_sort
3486
-
3487
-
3488
- def gather(input_params, input_indices, axis, batch_dims=0):
3489
- r"""
3490
- Returns the slice of the input tensor corresponding to the elements of `input_indices` on the specified `axis`.
3491
-
3492
- The following figure shows the calculation process of Gather commonly:
3493
-
3494
- .. image:: Gather.png
3495
-
3496
- where params represents the input `input_params`, and indices represents the index to be sliced `input_indices`.
3497
-
3498
- .. note::
3499
- 1. The value of input_indices must be in the range of `[0, input_param.shape[axis])`.
3500
- On CPU and GPU, an error is raised if an out of bound indice is found. On Ascend, the results may be
3501
- undefined.
3502
-
3503
- 2. The data type of input_params cannot be
3504
- `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ on Ascend
3505
- platform currently.
2881
+ The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
3506
2882
 
3507
- Args:
3508
- input_params (Tensor): The original Tensor. The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
3509
- input_indices (Tensor): Index tensor to be sliced, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
3510
- Specifies the indices of elements of the original Tensor. The data type can be int32 or int64.
3511
- axis (Union(int, Tensor[int])): Specifies the dimension index to gather indices.
3512
- It must be greater than or equal to `batch_dims`.
3513
- When `axis` is a Tensor, the size must be 1.
3514
- batch_dims (int): Specifies the number of batch dimensions. It must be less than or euqal to the rank
3515
- of `input_indices`. Default: ``0`` .
2883
+ Keyword Args:
2884
+ dim (int, optional): The dimension to sort along. Default: ``-1``, means the last dimension.
2885
+ descending (bool, optional): Controls the sort order. If `descending` is True, the elements
2886
+ are sorted in descending order, or else sorted in ascending order. Default: ``False`` .
2887
+ stable (bool, optional): Controls the sort order. If stable is True then the sorting routine
2888
+ becomes stable, preserving the order of equivalent elements. Default: ``False`` .
3516
2889
 
3517
2890
  Returns:
3518
- Tensor, the shape of tensor is
3519
- :math:`input\_params.shape[:axis] + input\_indices.shape[batch\_dims:] + input\_params.shape[axis + 1:]`.
2891
+ - y1, a tensor whose values are the sorted values, with the same shape and data type as input.
2892
+ - y2, a tensor that consists of the indices of the elements in the original input tensor.
2893
+ Data type is int64.
3520
2894
 
3521
2895
  Raises:
3522
- TypeError: If `axis` is not an int or Tensor.
3523
- ValueError: If `axis` is a Tensor and its size is not 1.
3524
- TypeError: If `input_params` is not a tensor.
3525
- TypeError: If `input_indices` is not a tensor of type int.
3526
- RuntimeError: If `input_indices` is out of range `[0, input_param.shape[axis])` on CPU or GPU.
2896
+ TypeError: If `dim` is not an int.
2897
+ TypeError: If `descending` is not a bool.
2898
+ TypeError: If `input` not in float16, float32, uint8, int8, int16, int32, int64, bfloat16
2899
+ TypeError: If `stable` is not a bool.
2900
+ ValueError: If `dim` is not in range of [-len(input_x.shape), len(input_x.shape)).
3527
2901
 
3528
2902
  Supported Platforms:
3529
- ``Ascend`` ``GPU`` ``CPU``
2903
+ ``Ascend``
3530
2904
 
3531
- Examples:
3532
- >>> import mindspore
3533
- >>> import numpy as np
3534
- >>> from mindspore import Tensor, ops
3535
- >>> # case1: input_indices is a Tensor with shape (5, ).
3536
- >>> input_params = Tensor(np.array([1, 2, 3, 4, 5, 6, 7]), mindspore.float32)
3537
- >>> input_indices = Tensor(np.array([0, 2, 4, 2, 6]), mindspore.int32)
3538
- >>> axis = 0
3539
- >>> output = ops.gather(input_params, input_indices, axis)
3540
- >>> print(output)
3541
- [1. 3. 5. 3. 7.]
3542
- >>> # case2: input_indices is a Tensor with shape (2, 2). When the input_params has one dimension,
3543
- >>> # the output shape is equal to the input_indices shape.
3544
- >>> input_indices = Tensor(np.array([[0, 2], [2, 6]]), mindspore.int32)
3545
- >>> axis = 0
3546
- >>> output = ops.gather(input_params, input_indices, axis)
3547
- >>> print(output)
3548
- [[1. 3.]
3549
- [3. 7.]]
3550
- >>> # case3: input_indices is a Tensor with shape (2, ) and
3551
- >>> # input_params is a Tensor with shape (3, 4) and axis is 0.
3552
- >>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
3553
- >>> input_indices = Tensor(np.array([0, 2]), mindspore.int32)
3554
- >>> axis = 0
3555
- >>> output = ops.gather(input_params, input_indices, axis)
3556
- >>> print(output)
3557
- [[ 1. 2. 3. 4.]
3558
- [ 9. 10. 11. 12.]]
3559
- >>> # case4: input_indices is a Tensor with shape (2, ) and
3560
- >>> # input_params is a Tensor with shape (3, 4) and axis is 1, batch_dims is 1.
3561
- >>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
3562
- >>> input_indices = Tensor(np.array([0, 2, 1]), mindspore.int32)
3563
- >>> axis = 1
3564
- >>> batch_dims = 1
3565
- >>> output = ops.gather(input_params, input_indices, axis, batch_dims)
2905
+ Examples:
2906
+ >>> import mindspore
2907
+ >>> import numpy as np
2908
+ >>> from mindspore import Tensor, ops
2909
+ >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
2910
+ >>> output = ops.function.array_func.sort_ext(x)
2911
+ >>> # The output below is based on the Ascend platform.
3566
2912
  >>> print(output)
3567
- [ 1. 7. 10.]
2913
+ (Tensor(shape=[3, 3], dtype=Float16, value=
2914
+ [[ 1.0000e+00, 2.0000e+00, 8.0000e+00],
2915
+ [ 3.0000e+00, 5.0000e+00, 9.0000e+00],
2916
+ [ 4.0000e+00, 6.0000e+00, 7.0000e+00]]), Tensor(shape=[3, 3], dtype=Int64, value=
2917
+ [[2, 1, 0],
2918
+ [2, 0, 1],
2919
+ [0, 1, 2]]))
3568
2920
  """
3569
- _gather = _get_cache_prim(P.Gather)(batch_dims)
3570
- return _gather(input_params, input_indices, axis)
2921
+ return sort_ext_(input, dim, descending, stable)
3571
2922
 
3572
2923
 
3573
- def gather_d(x, dim, index):
3574
- """
3575
- Gathers elements along an axis specified by dim.
2924
+ def argsort(input, axis=-1, descending=False):
2925
+ r"""
2926
+ Sorts the input tensor along the given dimension in specified order and return the sorted indices.
2927
+
2928
+ Args:
2929
+ input(Tensor): The input tensor to sort.
2930
+ axis (int): The axis to sort along. Default: ``-1`` , means the last dimension.
2931
+ The Ascend backend only supports sorting the last dimension.
2932
+ descending (bool): The sort order. If `descending` is True then the elements
2933
+ are sorted in descending order by value. Otherwise sort in ascending order. Default: ``False`` .
3576
2934
 
3577
- Refer to :func:`mindspore.ops.gather_elements` for more detail.
2935
+ Returns:
2936
+ Tensor, the indices of sorted input tensor. Data type is int32.
3578
2937
 
3579
2938
  Supported Platforms:
3580
2939
  ``Ascend`` ``GPU`` ``CPU``
@@ -3583,15 +2942,16 @@ def gather_d(x, dim, index):
3583
2942
  >>> import mindspore
3584
2943
  >>> import numpy as np
3585
2944
  >>> from mindspore import Tensor, ops
3586
- >>> x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.int32)
3587
- >>> index = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32)
3588
- >>> dim = 1
3589
- >>> output = ops.gather_d(x, dim, index)
3590
- >>> print(output)
3591
- [[1 1]
3592
- [4 3]]
2945
+ >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
2946
+ >>> sort = ops.argsort(x)
2947
+ >>> print(sort)
2948
+ [[2 1 0]
2949
+ [2 0 1]
2950
+ [0 1 2]]
3593
2951
  """
3594
- return gather_d_(x, dim, index)
2952
+ _sort = _get_cache_prim(P.Sort)(axis, descending)
2953
+ _, arg_sort = _sort(input)
2954
+ return arg_sort
3595
2955
 
3596
2956
 
3597
2957
  def gather_elements(input, dim, index):
@@ -3608,26 +2968,29 @@ def gather_elements(input, dim, index):
3608
2968
 
3609
2969
  output[i][j][k] = x[i][j][index[i][j][k]] # if dim == 2
3610
2970
 
3611
- `input` and `index` have the same length of dimensions, and all dimensions except `dim` have the same size.
3612
- If `dim` = i, `input` is an n-D tensor with shape :math:`(z_0, z_1, ..., z_i, ..., z_{n-1})`,
3613
- the `index` must be an n-D tensor with shape :math:`(z_0, z_1, ..., y, ..., z_{n-1})`
3614
- where `y`>=1 and the output will have the same shape with `index`.
2971
+ `input` and `index` have the same length of dimensions, and `index.shape[axis] <= input.shape[axis]`
2972
+ where axis goes through all dimensions of `input` except `dim`.
2973
+
2974
+ .. warning::
2975
+ On Ascend, the behavior is unpredictable in the following cases:
2976
+
2977
+ - the value of `index` is not in the range `[-input.shape[dim], input.shape[dim])` in forward;
2978
+ - the value of `index` is not in the range `[0, input.shape[dim])` in backward.
3615
2979
 
3616
2980
  Args:
3617
2981
  input (Tensor): The input tensor.
3618
- dim (int): The axis along which to index. It must be int32 or int64. The value range is [-input.ndim,
3619
- input.ndim).
2982
+ dim (int): The axis along which to index. It must be int32 or int64. The value range is `[-input.ndim,
2983
+ input.ndim)`.
3620
2984
  index (Tensor): The indices of elements to gather. It can be one of the following data types:
3621
- int32, int64. The value range of each index element is [-input.shape(dim), input.shape(dim)).
2985
+ int32, int64. The value range of each index element is `[-input.shape(dim), input.shape(dim))`.
3622
2986
 
3623
2987
  Returns:
3624
- Tensor, has the same shape as index tensor, the shape of tensor is :math:`(z_0, z_1, ..., y, ..., z_{n-1})`,
3625
- and has the same data type with `input`.
2988
+ Tensor, has the same shape as `index` and has the same data type with `input`.
3626
2989
 
3627
2990
  Raises:
3628
2991
  TypeError: If dtype of `dim` or `index` is neither int32 nor int64.
3629
2992
  ValueError: If length of shape of `input` is not equal to length of shape of `index`.
3630
- ValueError: If the size of the dimension except `dim` is not equal between `input` and `index`.
2993
+ ValueError: If the size of the dimension except `dim` in `input` is less than size in `index`.
3631
2994
  ValueError: If the value of `dim` is not in the expected range.
3632
2995
 
3633
2996
  Supported Platforms:
@@ -3648,48 +3011,6 @@ def gather_elements(input, dim, index):
3648
3011
  return gather_d_(input, dim, index)
3649
3012
 
3650
3013
 
3651
- def gather_nd(input_x, indices):
3652
- r"""
3653
- Gathers slices from a tensor by indices.
3654
-
3655
- Using given indices to gather slices from a tensor with a specified shape.
3656
-
3657
- `indices` is an K-dimensional integer tensor. Supposes it as a (K-1)-dimensional tensor and each element of it
3658
- defines a slice of `input_x`:
3659
-
3660
- .. math::
3661
- output[(i_0, ..., i_{K-2})] = input\_x[indices[(i_0, ..., i_{K-2})]]
3662
-
3663
- The last dimension of `indices` can not more than the rank of `input_x`:
3664
- :math:`indices.shape[-1] <= input\_x.rank`.
3665
-
3666
- Args:
3667
- input_x (Tensor): The target tensor to gather values.
3668
- indices (Tensor): The index tensor, with int32 or int64 data type.
3669
-
3670
- Returns:
3671
- Tensor, has the same type as `input_x` and the shape is
3672
- :math:`indices\_shape[:-1] + input\_x\_shape[indices\_shape[-1]:]`.
3673
-
3674
- Raises:
3675
- ValueError: If length of shape of `input_x` is less than the last dimension of `indices`.
3676
-
3677
- Supported Platforms:
3678
- ``Ascend`` ``GPU`` ``CPU``
3679
-
3680
- Examples:
3681
- >>> import mindspore
3682
- >>> import numpy as np
3683
- >>> from mindspore import Tensor, ops
3684
- >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
3685
- >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
3686
- >>> output = ops.gather_nd(input_x, indices)
3687
- >>> print(output)
3688
- [-0.1 0.5]
3689
- """
3690
- return gather_nd_(input_x, indices)
3691
-
3692
-
3693
3014
  def tensor_scatter_add(input_x, indices, updates):
3694
3015
  r"""
3695
3016
  Creates a new tensor by adding the values from the positions in `input_x` indicated by
@@ -3700,7 +3021,7 @@ def tensor_scatter_add(input_x, indices, updates):
3700
3021
 
3701
3022
  The last axis of `indices` is the depth of each index vectors. For each index vector,
3702
3023
  there must be a corresponding value in `updates`. The shape of `updates` should be
3703
- equal to the shape of `input_x[indices]`. For more details, see use cases.
3024
+ equal to the shape of `input_x[indices]`. For more details, see Examples.
3704
3025
 
3705
3026
  .. math::
3706
3027
  output\left [indices \right ] = input\_x + update
@@ -3758,7 +3079,7 @@ def tensor_scatter_sub(input_x, indices, updates):
3758
3079
 
3759
3080
  The last axis of `indices` is the depth of each index vectors. For each index vector,
3760
3081
  there must be a corresponding value in `updates`. The shape of `updates` should be
3761
- equal to the shape of `input_x[indices]`. For more details, see use cases.
3082
+ equal to the shape of `input_x[indices]`. For more details, see Examples.
3762
3083
 
3763
3084
  .. math::
3764
3085
  output[indices] = input\_x - update
@@ -3943,14 +3264,12 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
3943
3264
  nondeterministic.
3944
3265
  - On Ascend, the reduction only support set to "none" for now.
3945
3266
  - On Ascend, the data type of `input_x` must be float16 or float32.
3267
+ - This is an experimental API that is subject to change or deletion.
3946
3268
 
3947
3269
  Note:
3948
3270
  If some values of the `indices` exceed the upper or lower bounds of the index of `input_x`, instead of raising
3949
3271
  an index error, the corresponding `updates` will not be updated to `input_x`.
3950
3272
 
3951
- .. warning::
3952
- This is an experimental API that is subject to change or deletion.
3953
-
3954
3273
  Args:
3955
3274
  input_x (Tensor): The target tensor. The rank must be at least 1.
3956
3275
  indices (Tensor): The index of `input_x` to do scatter operation whose data type must be mindspore.int32 or
@@ -4065,6 +3384,79 @@ def scatter(input, axis, index, src):
4065
3384
  return ops.tensor_scatter_elements(input_x=input, indices=index, updates=src, axis=axis)
4066
3385
 
4067
3386
 
3387
+ def scatter_add_ext(input, dim, index, src):
3388
+ """
3389
+ Add all elements in `src` to the index specified by `index` to `input` along dimension specified by `dim`.
3390
+ It takes three inputs `input`, `src` and `index` of the same rank r >= 1.
3391
+
3392
+ For a 3-D tensor, the operation updates input as follows:
3393
+
3394
+ .. code-block::
3395
+
3396
+ input[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
3397
+
3398
+ input[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
3399
+
3400
+ input[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
3401
+
3402
+ Args:
3403
+ input (Tensor): The target tensor. The rank must be at least 1.
3404
+ dim (int): Which dim to scatter. Accepted range is [-r, r) where r = rank(`input`). Default: ``0``.
3405
+ index (Tensor): The index of `input` to do scatter operation whose data type must be mindspore.int32 or
3406
+ mindspore.int64. Same rank as `input`. Except for the dimension specified by `dim`,
3407
+ the size of each dimension of `index` must be less than or equal to the size of
3408
+ the corresponding dimension of `input`.
3409
+ src (Tensor): The tensor doing the scatter operation with `input`, has the same type as `input` and
3410
+ the size of each dimension must be greater than or equal to that of `index`.
3411
+
3412
+ Returns:
3413
+ Tensor, has the same shape and type as `input`.
3414
+
3415
+ Raises:
3416
+ TypeError: If `index` is neither int32 nor int64.
3417
+ ValueError: If anyone of the rank among `input`, `index` and `src` less than 1.
3418
+ ValueError: If the rank of `input`, `index` and `src` is not the same.
3419
+ ValueError: If, outside dimension `dim`, the size of any dimension of `index` is greater than the size of
3420
+ the corresponding dimension of `input` .
3421
+ ValueError: If the size of any dimension of `src` is less than that of `index`.
3422
+
3423
+ Supported Platforms:
3424
+ ``Ascend``
3425
+
3426
+ Examples:
3427
+ >>> import numpy as np
3428
+ >>> import mindspore as ms
3429
+ >>> from mindspore import Tensor, ops
3430
+ >>> input = Tensor(np.array([[1, 2, 3, 4, 5]]), dtype=ms.float32)
3431
+ >>> src = Tensor(np.array([[8, 8]]), dtype=ms.float32)
3432
+ >>> index = Tensor(np.array([[2, 4]]), dtype=ms.int64)
3433
+ >>> out = ops.function.array_func.scatter_add_ext(input=input, dim=1, index=index, src=src)
3434
+ >>> print(out)
3435
+ [[1. 2. 11. 4. 13.]]
3436
+ >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
3437
+ >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
3438
+ >>> index = Tensor(np.array([[0, 0, 0], [2, 2, 2], [4, 4, 4]]), dtype=ms.int64)
3439
+ >>> out = ops.function.array_func.scatter_add_ext(input=input, dim=0, index=index, src=src)
3440
+ >>> print(out)
3441
+ [[1. 2. 3. 0. 0.]
3442
+ [0. 0. 0. 0. 0.]
3443
+ [4. 5. 6. 0. 0.]
3444
+ [0. 0. 0. 0. 0.]
3445
+ [7. 8. 9. 0. 0.]]
3446
+ >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
3447
+ >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
3448
+ >>> index = Tensor(np.array([[0, 2, 4], [0, 2, 4], [0, 2, 4]]), dtype=ms.int64)
3449
+ >>> out = ops.function.array_func.scatter_add_ext(input=input, dim=1, index=index, src=src)
3450
+ >>> print(out)
3451
+ [[1. 0. 2. 0. 3.]
3452
+ [4. 0. 5. 0. 6.]
3453
+ [7. 0. 8. 0. 9.]
3454
+ [0. 0. 0. 0. 0.]
3455
+ [0. 0. 0. 0. 0.]]
3456
+ """
3457
+ return scatter_add_ext_op(input, dim, index, src)
3458
+
3459
+
4068
3460
  def _get_slice_scatter_const(x_shape, axis, start, end, step):
4069
3461
  r"""
4070
3462
  Calculate the rank of input, embedded dimensions and index.
@@ -4074,7 +3466,7 @@ def _get_slice_scatter_const(x_shape, axis, start, end, step):
4074
3466
  start = start if start is not None else 0
4075
3467
  start = start if start >= 0 else start + x_rank
4076
3468
  end = end if end is not None else x_shape[axis]
4077
- end = end if end >= 0 else end + x_rank
3469
+ end = end if end >= 0 else end + x_shape[axis]
4078
3470
  end = end if end < x_shape[axis] else x_shape[axis]
4079
3471
  index = list(builtins.range(start, end, step))
4080
3472
  return x_rank, index, axis
@@ -4121,6 +3513,8 @@ def slice_scatter(input, src, axis=0, start=None, end=None, step=1):
4121
3513
  [1. 0. 1. 0. 1. 0.]
4122
3514
  [1. 0. 1. 0. 1. 0.]]
4123
3515
  """
3516
+ _check_is_tensor("input", input, "slice_scatter")
3517
+ _check_is_tensor("src", src, "slice_scatter")
4124
3518
  input_shape = input.shape
4125
3519
  input_rank, index, axis = _get_slice_scatter_const(input_shape, axis, start, end, step)
4126
3520
 
@@ -4136,6 +3530,8 @@ def slice_scatter(input, src, axis=0, start=None, end=None, step=1):
4136
3530
  for _ in builtins.range(input_rank - axis - 1):
4137
3531
  index_tensor = index_tensor.expand_dims(-1)
4138
3532
  index_tensor = index_tensor.broadcast_to(src.shape)
3533
+ if index_tensor.dtype not in mstype.int_type:
3534
+ index_tensor = index_tensor.astype(mstype.int64)
4139
3535
  return tensor_scatter_elements(input, axis=axis, indices=index_tensor, updates=src)
4140
3536
 
4141
3537
 
@@ -4174,10 +3570,12 @@ def select_scatter(input, src, axis, index):
4174
3570
  [1. 1. 1.]
4175
3571
  [0. 0. 0.]]]
4176
3572
  """
3573
+ _check_is_tensor("input", input, "select_scatter")
3574
+ _check_is_tensor("src", src, "select_scatter")
4177
3575
  src = src.expand_dims(axis=axis)
4178
3576
  x_rank = input.ndim
4179
3577
  axis = axis if axis >= 0 else axis + x_rank
4180
- index = index if index >= 0 else index + x_rank
3578
+ index = index if index >= 0 else index + input.shape[axis]
4181
3579
  return slice_scatter(input, src, axis, start=index, end=index + 1)
4182
3580
 
4183
3581
 
@@ -4228,6 +3626,7 @@ def space_to_batch_nd(input_x, block_size, paddings):
4228
3626
 
4229
3627
  Examples:
4230
3628
  >>> import numpy as np
3629
+ >>> import mindspore
4231
3630
  >>> from mindspore import Tensor, ops
4232
3631
  >>> block_size = [2, 2]
4233
3632
  >>> paddings = [[0, 0], [0, 0]]
@@ -4302,49 +3701,11 @@ def batch_to_space_nd(input_x, block_shape, crops):
4302
3701
  [3. 4.]]]]
4303
3702
  """
4304
3703
  if isinstance(block_shape, Tensor):
4305
- _batch_to_space_ndv2 = _get_cache_prim(P.BatchToSpaceNDV2)()
4306
- return _batch_to_space_ndv2(input_x, block_shape, crops)
3704
+ return batch_to_space_nd_v2_(input_x, block_shape, crops)
4307
3705
  _batch_to_space_nd = _get_cache_prim(P.BatchToSpaceND)(block_shape, crops)
4308
3706
  return _batch_to_space_nd(input_x)
4309
3707
 
4310
3708
 
4311
- def nonzero(input):
4312
- """
4313
- Return a Tensor of the positions of all non-zero values.
4314
-
4315
- Args:
4316
- input (Tensor): The input Tensor, its rank should be greater than or eaqual to 1.
4317
-
4318
- Returns:
4319
- Tensor, a 2-D Tensor whose data type is int64, containing the positions of all non-zero values of the input.
4320
-
4321
- Raises:
4322
- TypeError: If `input` is not Tensor.
4323
- ValueError: If dim of `x` equals to 0.
4324
-
4325
- Supported Platforms:
4326
- ``Ascend`` ``GPU`` ``CPU``
4327
-
4328
- Examples:
4329
- >>> import mindspore
4330
- >>> import numpy as np
4331
- >>> from mindspore import Tensor
4332
- >>> import mindspore.ops as ops
4333
- >>> x = Tensor(np.array([[[1, 0], [-5, 0]]]), mindspore.int32)
4334
- >>> output = ops.nonzero(x)
4335
- >>> print(output)
4336
- [[0 0 0]
4337
- [0 1 0]]
4338
- >>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
4339
- >>> output = ops.nonzero(x)
4340
- >>> print(output)
4341
- [[0]
4342
- [2]
4343
- [4]]
4344
- """
4345
- return nonzero_(input)
4346
-
4347
-
4348
3709
  def matrix_diag(x, k=0, num_rows=-1, num_cols=-1, padding_value=0, align="RIGHT_LEFT"):
4349
3710
  r"""
4350
3711
  Returns a Tensor with the contents in `x` as k[0]-th to k[1]-th diagonals of a matrix, with everything else padded
@@ -4604,18 +3965,19 @@ def meshgrid(*inputs, indexing='xy'):
4604
3965
 
4605
3966
  Keyword Args:
4606
3967
  indexing (str, optional): Cartesian ('xy', default) or
4607
- matrix ('ij') indexing of output. Valid options: xy' or 'ij'. In the 2-D case with
3968
+ matrix ('ij') indexing of output. Valid options: xy' or ``'ij'``. In the 2-D case with
4608
3969
  inputs of length `M` and `N`, the outputs are of shape :math:`(N, M)`
4609
- for 'xy' indexing and :math:`(M, N)` for 'ij' indexing. In the 3-D
3970
+ for ``'xy'`` indexing and :math:`(M, N)` for ``'ij'`` indexing. In the 3-D
4610
3971
  case with inputs of length `M`, `N` and `P`, outputs are of shape
4611
- :math:`(N, M, P)` for 'xy' indexing and :math:`(M, N, P)` for 'ij' indexing. Default: ``'xy'`` .
3972
+ :math:`(N, M, P)` for ``'xy'`` indexing and :math:`(M, N, P)` for ``'ij'`` indexing.
3973
+ Default: ``'xy'`` .
4612
3974
 
4613
3975
  Returns:
4614
3976
  Tensors, a Tuple of N N-D Tensor objects. The data type is the same with the Inputs.
4615
3977
 
4616
3978
  Raises:
4617
3979
  TypeError: If `indexing` is not a str or `inputs` is not a tuple.
4618
- ValueError: If `indexing` is neither 'xy' nor 'ij'.
3980
+ ValueError: If `indexing` is neither ``'xy'`` nor ``'ij'``.
4619
3981
 
4620
3982
  Supported Platforms:
4621
3983
  ``Ascend`` ``GPU`` ``CPU``
@@ -4623,7 +3985,7 @@ def meshgrid(*inputs, indexing='xy'):
4623
3985
  Examples:
4624
3986
  >>> import numpy as np
4625
3987
  >>> from mindspore import Tensor
4626
- >>> import mindspore.ops as ops
3988
+ >>> from mindspore import ops
4627
3989
  >>> x = Tensor(np.array([1, 2, 3, 4]).astype(np.int32))
4628
3990
  >>> y = Tensor(np.array([5, 6, 7]).astype(np.int32))
4629
3991
  >>> z = Tensor(np.array([8, 9, 0, 1, 2]).astype(np.int32))
@@ -4706,7 +4068,7 @@ def affine_grid(theta, size, align_corners=False):
4706
4068
  Examples:
4707
4069
  >>> import mindspore
4708
4070
  >>> from mindspore import Tensor
4709
- >>> import mindspore.ops as ops
4071
+ >>> from mindspore import ops
4710
4072
  >>> theta = Tensor([[[0.8, 0.5, 0],[-0.5, 0.8, 0]]], mindspore.float32)
4711
4073
  >>> out_size = (1, 3, 2, 3)
4712
4074
  >>> output = ops.affine_grid(theta, out_size, False)
@@ -4722,87 +4084,6 @@ def affine_grid(theta, size, align_corners=False):
4722
4084
  return affine_grid_op(theta, size)
4723
4085
 
4724
4086
 
4725
- def broadcast_to(input, shape): # pylint: disable=redefined-outer-name
4726
- """
4727
- Broadcasts input tensor to a given shape. The dim of input shape must be smaller
4728
- than or equal to that of target shape. Suppose input shape is :math:`(x_1, x_2, ..., x_m)`,
4729
- target shape is :math:`(*, y_1, y_2, ..., y_m)`, where :math:`*` means any additional dimension.
4730
- The broadcast rules are as follows:
4731
-
4732
- Compare the value of :math:`x_m` and :math:`y_m`, :math:`x_{m-1}` and :math:`y_{m-1}`, ...,
4733
- :math:`x_1` and :math:`y_1` consecutively and
4734
- decide whether these shapes are broadcastable and what the broadcast result is.
4735
-
4736
- If the value pairs at a specific dim are equal, then that value goes right into that dim of output shape.
4737
- With an input shape :math:`(2, 3)`, target shape :math:`(2, 3)` , the inferred output shape is :math:`(2, 3)`.
4738
-
4739
- If the value pairs are unequal, there are three cases:
4740
-
4741
- Case 1: If the value of the target shape in the dimension is -1, the value of the
4742
- output shape in the dimension is the value of the corresponding input shape in the dimension.
4743
- With an input shape :math:`(3, 3)`, target
4744
- shape :math:`(-1, 3)`, the output shape is :math:`(3, 3)`.
4745
-
4746
- Case 2: If the value of target shape in the dimension is not -1, but the corresponding
4747
- value in the input shape is 1, then the corresponding value of the output shape
4748
- is that of the target shape. With an input shape :math:`(1, 3)`, target
4749
- shape :math:`(8, 3)`, the output shape is :math:`(8, 3)`.
4750
-
4751
- Case 3: If the corresponding values of the two shapes do not satisfy the above cases,
4752
- it means that broadcasting from the input shape to the target shape is not supported.
4753
-
4754
- So far we got the last m dims of the outshape, now focus on the first :math:`*` dims, there are
4755
- two cases:
4756
-
4757
- If the first :math:`*` dims of output shape does not have -1 in it, then fill the input
4758
- shape with ones until their length are the same, and then refer to
4759
- Case 2 mentioned above to calculate the output shape. With target shape :math:`(3, 1, 4, 1, 5, 9)`,
4760
- input shape :math:`(1, 5, 9)`, the filled input shape will be :math:`(1, 1, 1, 1, 5, 9)` and thus the
4761
- output shape is :math:`(3, 1, 4, 1, 5, 9)`.
4762
-
4763
- If the first :math:`*` dims of output shape have -1 in it, it implies this -1 is corresponding to
4764
- a non-existing dim so they're not broadcastable. With target shape :math:`(3, -1, 4, 1, 5, 9)`,
4765
- input shape :math:`(1, 5, 9)`, instead of operating the dim-filling process first, it raises errors directly.
4766
-
4767
- Args:
4768
- input (Tensor): The input Tensor.
4769
- shape (tuple): The target shape to broadcast. Can be fully specified, or have -1 in one position
4770
- where it will be substituted by the input tensor's shape in that position, see example.
4771
-
4772
- Returns:
4773
- Tensor, with the given `shape` and the same data type as `input`.
4774
-
4775
- Raises:
4776
- TypeError: If `shape` is not a tuple.
4777
- ValueError: If the target and input shapes are incompatible, or if a - 1 in the target shape is in an invalid
4778
- location.
4779
-
4780
- Supported Platforms:
4781
- ``Ascend`` ``GPU`` ``CPU``
4782
-
4783
- Examples:
4784
- >>> import numpy as np
4785
- >>> from mindspore import Tensor, ops
4786
- >>> shape = (2, 3)
4787
- >>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
4788
- >>> output = ops.broadcast_to(x, shape)
4789
- >>> print(output)
4790
- [[1. 2. 3.]
4791
- [1. 2. 3.]]
4792
- >>> shape = (-1, 2)
4793
- >>> x = Tensor(np.array([[1], [2]]).astype(np.float32))
4794
- >>> output = ops.broadcast_to(x, shape)
4795
- >>> print(output)
4796
- [[1. 1.]
4797
- [2. 2.]]
4798
- """
4799
- if isinstance(shape, Tensor) or ops.is_sequence_value_unknown(shape):
4800
- _dyn_broadcast_to = _get_cache_prim(DynamicBroadcastTo)()
4801
- return _dyn_broadcast_to(input, shape)
4802
- _broadcast_to = _get_cache_prim(P.BroadcastTo)(shape)
4803
- return _broadcast_to(input)
4804
-
4805
-
4806
4087
  def unsorted_segment_min(x, segment_ids, num_segments):
4807
4088
  r"""
4808
4089
  Computes the minimum of a tensor along segments.
@@ -4826,14 +4107,13 @@ def unsorted_segment_min(x, segment_ids, num_segments):
4826
4107
  x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
4827
4108
  segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
4828
4109
  Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
4829
- num_segments (int): The value specifies the number of distinct `segment_ids`.
4110
+ num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
4830
4111
 
4831
4112
  Returns:
4832
- Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
4113
+ Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
4833
4114
 
4834
4115
  Raises:
4835
4116
  TypeError: If `num_segments` is not an int.
4836
- ValueError: If length of shape of `segment_ids` is not equal to 1.
4837
4117
 
4838
4118
  Supported Platforms:
4839
4119
  ``Ascend`` ``GPU`` ``CPU``
@@ -4850,7 +4130,6 @@ def unsorted_segment_min(x, segment_ids, num_segments):
4850
4130
  [[1. 2. 3.]
4851
4131
  [4. 2. 1.]]
4852
4132
  """
4853
- unsorted_segment_min_ = P.UnsortedSegmentMin()
4854
4133
  return unsorted_segment_min_(x, segment_ids, num_segments)
4855
4134
 
4856
4135
 
@@ -4877,14 +4156,13 @@ def unsorted_segment_max(x, segment_ids, num_segments):
4877
4156
  x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
4878
4157
  segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
4879
4158
  Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
4880
- num_segments (int): The value specifies the number of distinct `segment_ids`.
4159
+ num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
4881
4160
 
4882
4161
  Returns:
4883
- Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
4162
+ Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
4884
4163
 
4885
4164
  Raises:
4886
4165
  TypeError: If `num_segments` is not an int.
4887
- ValueError: If length of shape of `segment_ids` is not equal to 1.
4888
4166
 
4889
4167
  Supported Platforms:
4890
4168
  ``Ascend`` ``GPU`` ``CPU``
@@ -4901,7 +4179,6 @@ def unsorted_segment_max(x, segment_ids, num_segments):
4901
4179
  [[1. 2. 3.]
4902
4180
  [4. 5. 6.]]
4903
4181
  """
4904
- unsorted_segment_max_ = P.UnsortedSegmentMax()
4905
4182
  return unsorted_segment_max_(x, segment_ids, num_segments)
4906
4183
 
4907
4184
 
@@ -4919,16 +4196,15 @@ def unsorted_segment_prod(x, segment_ids, num_segments):
4919
4196
 
4920
4197
  Args:
4921
4198
  x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
4922
- segment_ids (Tensor): A `1-D` tensor whose shape is :math:`(x_1)`,
4923
- the value must be non-negative tensor. The data type must be int32.
4924
- num_segments (int): The value specifies the number of distinct `segment_ids`.
4199
+ segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
4200
+ Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R. The data type must be int32.
4201
+ num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
4925
4202
 
4926
4203
  Returns:
4927
- Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
4204
+ Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
4928
4205
 
4929
4206
  Raises:
4930
4207
  TypeError: If `num_segments` is not an int.
4931
- ValueError: If length of shape of `segment_ids` is not equal to 1.
4932
4208
 
4933
4209
  Supported Platforms:
4934
4210
  ``Ascend`` ``GPU`` ``CPU``
@@ -4945,7 +4221,6 @@ def unsorted_segment_prod(x, segment_ids, num_segments):
4945
4221
  [[4. 4. 3.]
4946
4222
  [4. 5. 6.]]
4947
4223
  """
4948
- unsorted_segment_prod_ = P.UnsortedSegmentProd()
4949
4224
  return unsorted_segment_prod_(x, segment_ids, num_segments)
4950
4225
 
4951
4226
 
@@ -4986,7 +4261,7 @@ def index_fill(x, axis, index, value):
4986
4261
  Examples:
4987
4262
  >>> import mindspore
4988
4263
  >>> import numpy as np
4989
- >>> import mindspore.ops as ops
4264
+ >>> from mindspore import ops
4990
4265
  >>> from mindspore import Tensor
4991
4266
  >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32))
4992
4267
  >>> index = Tensor([0, 2], mindspore.int32)
@@ -5157,33 +4432,6 @@ def is_nonzero(input):
5157
4432
  return bool(out)
5158
4433
 
5159
4434
 
5160
- def scalar_cast(input_x, input_y):
5161
- """
5162
- Casts the input scalar to another type.
5163
-
5164
- Args:
5165
- input_x (scalar): The input scalar. Only constant value is allowed.
5166
- input_y (mindspore.dtype): The type to be cast. Only constant value is allowed.
5167
-
5168
- Returns:
5169
- Scalar. The type is the same as the python type corresponding to `input_y`.
5170
-
5171
- Raises:
5172
- TypeError: If neither `input_x` nor `input_y` is a constant value.
5173
-
5174
- Supported Platforms:
5175
- ``Ascend`` ``GPU`` ``CPU``
5176
-
5177
- Examples:
5178
- >>> import mindspore
5179
- >>> from mindspore import ops
5180
- >>> output = ops.scalar_cast(255.0, mindspore.int32)
5181
- >>> print(output)
5182
- 255
5183
- """
5184
- return scalar_cast_(input_x, input_y)
5185
-
5186
-
5187
4435
  def tensor_scatter_mul(input_x, indices, updates):
5188
4436
  r"""
5189
4437
  Creates a new tensor by multiplying the values from the positions in `input_x` indicated by
@@ -5193,10 +4441,10 @@ def tensor_scatter_mul(input_x, indices, updates):
5193
4441
 
5194
4442
  The last axis of `indices` is the depth of each index vectors. For each index vector,
5195
4443
  there must be a corresponding value in `updates`. The shape of `updates` should be
5196
- equal to the shape of `input_x[indices]`. For more details, see use cases.
4444
+ equal to the shape of `input_x[indices]`. For more details, see Examples.
5197
4445
 
5198
4446
  .. math::
5199
- output[indices] = input\_x \times update
4447
+ output\left [indices \right ] = input\_x\times update
5200
4448
 
5201
4449
  Note:
5202
4450
  - If some values of the `indices` are out of bound, instead of raising an index error,
@@ -5253,7 +4501,7 @@ def tensor_scatter_div(input_x, indices, updates):
5253
4501
 
5254
4502
  The last axis of `indices` is the depth of each index vectors. For each index vector,
5255
4503
  there must be a corresponding value in `updates`. The shape of `updates` should be
5256
- equal to the shape of `input_x[indices]`. For more details, see use cases.
4504
+ equal to the shape of `input_x[indices]`. For more details, see Examples.
5257
4505
 
5258
4506
  .. math::
5259
4507
  output\left [indices \right ] = input\_x \div update
@@ -5374,113 +4622,36 @@ def tuple_to_array(input_x):
5374
4622
  return tuple_to_tensor_(input_x, dtype)
5375
4623
 
5376
4624
 
5377
- def masked_select(input, mask):
5378
- """
5379
- Returns a new 1-D Tensor which indexes the `x` tensor according to the boolean `mask`.
5380
- The shapes of the `mask` tensor and the `x` tensor don't need to match, but they must be broadcastable.
5381
-
5382
- Args:
5383
- input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
5384
- mask (Tensor[bool]): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
5385
-
5386
- Returns:
5387
- A 1-D Tensor, with the same type as `input`.
5388
-
5389
- Raises:
5390
- TypeError: If `input` or `mask` is not a Tensor.
5391
- TypeError: If dtype of `mask` is not bool.
5392
-
5393
- Supported Platforms:
5394
- ``Ascend`` ``GPU`` ``CPU``
5395
-
5396
- Examples:
5397
- >>> import numpy as np
5398
- >>> import mindspore.ops as ops
5399
- >>> from mindspore import Tensor
5400
- >>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
5401
- >>> mask = Tensor(np.array([1, 0, 1, 0]), mindspore.bool_)
5402
- >>> output = ops.masked_select(x, mask)
5403
- >>> print(output)
5404
- [1 3]
5405
- """
5406
- return masked_select_(input, mask)
5407
-
5408
-
5409
- def masked_fill(input_x, mask, value):
5410
- """
5411
- Fills elements of Tensor with value where mask is True.
5412
- The shapes of `input_x` and `mask` need to be the same or broadcastable.
5413
-
5414
- Args:
5415
- input_x (Tensor): The source Tensor whose data type is one of bool, uint8, int8, int16, int32,
5416
- int64, float16, float32, float64, complex64, complex128.
5417
- mask (Tensor[bool]): The boolean mask.
5418
- value (Union[float, Tensor]): The value to fill in with, which dtype is the same as `input_x`.
5419
-
5420
- Returns:
5421
- Tensor, has the same type and shape as `input_x`.
5422
-
5423
- Raises:
5424
- TypeError: If dtype of `mask` is not bool.
5425
- TypeError: If `input_x` or `mask` is not a Tensor.
5426
- ValueError: If the shapes of `input_x` and `mask` could not be broadcast.
5427
- TypeError: If dtype of `input_x` or `value` is not one of bool, uint8, int8, int16, int32,
5428
- int64, float16, float32, float64, complex64, complex128.
5429
- TypeError: If dtype of `value` is different from that of `input_x`.
5430
- TypeError: If `value` is neither float number nor Tensor.
5431
-
5432
- Supported Platforms:
5433
- ``Ascend`` ``GPU`` ``CPU``
5434
-
5435
- Examples:
5436
- >>> import mindspore
5437
- >>> import numpy as np
5438
- >>> from mindspore import Tensor, ops
5439
- >>> input_x = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
5440
- >>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)
5441
- >>> output = ops.masked_fill(input_x, mask, 0.5)
5442
- >>> print(output)
5443
- [0.5 0.5 3. 0.5]
5444
- """
5445
- if isinstance(value, (float, int)) and isinstance(input_x, Tensor):
5446
- value = scalar_to_tensor_(value, input_x.dtype)
5447
- masked_fill_ = _get_cache_prim(P.MaskedFill)()
5448
- return masked_fill_(input_x, mask, value)
5449
-
5450
-
5451
- def diag(input):
5452
- r"""
5453
- Constructs a diagonal tensor with a given diagonal values.
5454
-
5455
- Assume `input` has dimensions :math:`(D_1,... D_k)` , the output is a tensor of
5456
- rank 2k with dimensions :math:`(D_1,..., D_k, D_1,..., D_k)` where:
5457
- :math:`output[i_1,..., i_k, i_1,..., i_k] = input[i_1,..., i_k]` and 0 everywhere else.
4625
+ def masked_select(input, mask):
4626
+ """
4627
+ Returns a new 1-D Tensor which indexes the `x` tensor according to the boolean `mask`.
4628
+ The shapes of the `mask` tensor and the `x` tensor don't need to match, but they must be broadcastable.
5458
4629
 
5459
4630
  Args:
5460
- input (Tensor): The input tensor.
4631
+ input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
4632
+ mask (Tensor[bool]): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
5461
4633
 
5462
4634
  Returns:
5463
- Tensor, has the same dtype as the `input`.
4635
+ A 1-D Tensor, with the same type as `input`.
5464
4636
 
5465
4637
  Raises:
5466
- TypeError: If `input` is not a Tensor.
5467
- ValueError: If rank of `input` is less than 1.
4638
+ TypeError: If `input` or `mask` is not a Tensor.
4639
+ TypeError: If dtype of `mask` is not bool.
5468
4640
 
5469
4641
  Supported Platforms:
5470
4642
  ``Ascend`` ``GPU`` ``CPU``
5471
4643
 
5472
4644
  Examples:
5473
- >>> from mindspore import Tensor
5474
- >>> import mindspore.ops as ops
5475
- >>> input_x = Tensor([1, 2, 3, 4]).astype('int32')
5476
- >>> output = ops.diag(input_x)
4645
+ >>> import numpy as np
4646
+ >>> import mindspore
4647
+ >>> from mindspore import Tensor, ops
4648
+ >>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
4649
+ >>> mask = Tensor(np.array([1, 0, 1, 0]), mindspore.bool_)
4650
+ >>> output = ops.masked_select(x, mask)
5477
4651
  >>> print(output)
5478
- [[1 0 0 0]
5479
- [0 2 0 0]
5480
- [0 0 3 0]
5481
- [0 0 0 4]]
4652
+ [1 3]
5482
4653
  """
5483
- return diag_(input)
4654
+ return masked_select_(input, mask)
5484
4655
 
5485
4656
 
5486
4657
  def diagflat(input, offset=0):
@@ -5541,7 +4712,7 @@ def col2im(input_x, output_size, kernel_size, dilation, padding_value, stride):
5541
4712
  Combines an array of sliding local blocks into a large containing tensor.
5542
4713
 
5543
4714
  Args:
5544
- input_x (Tensor): 4D tensor with data type float16 or float.
4715
+ input_x (Tensor): 4D tensor with data type float16 or float32.
5545
4716
  output_size (Tensor): 1D tensor with 2 elements of data type int.
5546
4717
  kernel_size (Union[int, tuple[int], list[int]]): The size of the kernel, should be two int
5547
4718
  for height and width. If type is int, it means that height equal with width. Must be specified.
@@ -5597,7 +4768,7 @@ def _split_int(x, split_size_or_sections, axis):
5597
4768
  num_sections = length_along_dim // split_size_or_sections
5598
4769
  length1 = num_sections * split_size_or_sections
5599
4770
  length2 = length_along_dim - length1
5600
- start1 = _list_comprehensions(rank(x), 0, True)
4771
+ start1 = _list_comprehensions(rank_(x), 0, True)
5601
4772
  size1 = _tuple_setitem(arr_shape, axis, length1)
5602
4773
  start2 = _tuple_setitem(start1, axis, length1)
5603
4774
  size2 = _tuple_setitem(arr_shape, axis, length2)
@@ -5627,7 +4798,6 @@ def _split_sub_tensors(x, split_size_or_sections, axis):
5627
4798
  sub_tensors.append(sliced_tensor)
5628
4799
  return sub_tensors
5629
4800
 
5630
-
5631
4801
  def split(tensor, split_size_or_sections, axis=0):
5632
4802
  """
5633
4803
  Splits the Tensor into chunks along the given axis.
@@ -5649,9 +4819,9 @@ def split(tensor, split_size_or_sections, axis=0):
5649
4819
  TypeError: If argument `tensor` is not Tensor.
5650
4820
  TypeError: If argument `axis` is not Tensor.
5651
4821
  ValueError: If argument `axis` is out of range of :math:`[-tensor.ndim, tensor.ndim)` .
5652
- TypeError: If each element in 'split_size_or_sections' is not integer.
5653
- TypeError: If argument `indices_or_sections` is not int, tuple(int) or list(int).
5654
- ValueError: The sum of 'split_size_or_sections' is not equal to x.shape[axis].
4822
+ TypeError: If each element in `split_size_or_sections` is not integer.
4823
+ TypeError: If argument `split_size_or_sections` is not int, tuple(int) or list(int).
4824
+ ValueError: The sum of `split_size_or_sections` is not equal to x.shape[axis].
5655
4825
 
5656
4826
  Supported Platforms:
5657
4827
  ``Ascend`` ``GPU`` ``CPU``
@@ -5695,6 +4865,53 @@ def split(tensor, split_size_or_sections, axis=0):
5695
4865
  f"but got {type(split_size_or_sections)}")
5696
4866
  return tuple(res)
5697
4867
 
4868
+ def split_ext(tensor, split_size_or_sections, axis=0):
4869
+ """
4870
+ Splits the Tensor into chunks along the given axis.
4871
+
4872
+ Args:
4873
+ tensor (Tensor): A Tensor to be divided.
4874
+ split_size_or_sections (Union[int, tuple(int), list(int)]):
4875
+ If `split_size_or_sections` is an int type, `tensor` will be split into equally sized chunks,
4876
+ each chunk with size `split_size_or_sections`. Last chunk will be smaller than `split_size_or_sections`
4877
+ if `tensor.shape[axis]` is not divisible by `split_size_or_sections`.
4878
+ If `split_size_or_sections` is a list type, then `tensor` will be split into len(split_size_or_sections)
4879
+ chunks with sizes `split_size_or_sections` along the given `axis`.
4880
+ axis (int): The axis along which to split. Default: ``0`` .
4881
+
4882
+ Returns:
4883
+ A tuple of sub-tensors.
4884
+
4885
+ Raises:
4886
+ TypeError: If argument `tensor` is not Tensor.
4887
+ TypeError: If argument `axis` is not int.
4888
+ ValueError: If argument `axis` is out of range of :[-tensor.ndim, tensor.ndim).
4889
+ TypeError: If each element in `split_size_or_sections` is not integer.
4890
+ TypeError: If argument `split_size_or_sections` is not int, tuple(int) or list(int).
4891
+ ValueError: The sum of `split_size_or_sections` is not equal to x.shape[axis].
4892
+
4893
+ Supported Platforms:
4894
+ ``Ascend``
4895
+
4896
+ Examples:
4897
+ >>> import numpy as np
4898
+ >>> from mindspore import ops, Tensor
4899
+ >>> input_x = np.arange(9).astype("float32")
4900
+ >>> output = ops.split_ext(Tensor(input_x), 3)
4901
+ >>> print(output)
4902
+ (Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
4903
+ Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
4904
+ Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))
4905
+ """
4906
+ if isinstance(split_size_or_sections, int):
4907
+ res = split_tensor(tensor, split_size_or_sections, axis)
4908
+ elif isinstance(split_size_or_sections, (list, tuple)):
4909
+ res = split_with_size(tensor, split_size_or_sections, axis)
4910
+ else:
4911
+ raise TypeError(f"Type of Argument `split_size_or_sections` should be integer, tuple(int) or list(int), " \
4912
+ f"but got {type(split_size_or_sections)}")
4913
+ return res
4914
+
5698
4915
 
5699
4916
  def tril(input, diagonal=0): # pylint: disable=redefined-outer-name
5700
4917
  """
@@ -5757,67 +4974,6 @@ def tril(input, diagonal=0): # pylint: disable=redefined-outer-name
5757
4974
  return tril_(input)
5758
4975
 
5759
4976
 
5760
- def triu(input, diagonal=0): # pylint: disable=redefined-outer-name
5761
- r"""
5762
- Returns the upper triangle part of 'input' (elements that contain the diagonal and below),
5763
- and set the other elements to zeros.
5764
-
5765
- .. warning::
5766
- This is an experimental API that is subject to change or deletion.
5767
-
5768
- Args:
5769
- input (Tensor): The input tensor with shape :math:`(M, N, *)` where * means any number of additional dimensions.
5770
- diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: 0,
5771
- indicating the main diagonal.
5772
-
5773
- Returns:
5774
- Tensor, a tensor has the same shape and data type as input.
5775
-
5776
- Raises:
5777
- TypeError: If `diagonal` is not an int.
5778
- TypeError: If `input` is not a Tensor.
5779
- ValueError: If the dimension of `input` is less than 2.
5780
-
5781
- Supported Platforms:
5782
- ``Ascend`` ``GPU`` ``CPU``
5783
-
5784
- Examples:
5785
- >>> import numpy as np
5786
- >>> from mindspore import Tensor, ops
5787
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
5788
- ... [ 5, 6, 7, 8],
5789
- ... [10, 11, 12, 13],
5790
- ... [14, 15, 16, 17]]))
5791
- >>> result = ops.triu(x)
5792
- >>> print(result)
5793
- [[ 1 2 3 4]
5794
- [ 0 6 7 8]
5795
- [ 0 0 12 13]
5796
- [ 0 0 0 17]]
5797
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
5798
- ... [ 5, 6, 7, 8],
5799
- ... [10, 11, 12, 13],
5800
- ... [14, 15, 16, 17]]))
5801
- >>> result = ops.triu(x, diagonal=1)
5802
- >>> print(result)
5803
- [[ 0 2 3 4]
5804
- [ 0 0 7 8]
5805
- [ 0 0 0 13]
5806
- [ 0 0 0 0]]
5807
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
5808
- ... [ 5, 6, 7, 8],
5809
- ... [10, 11, 12, 13],
5810
- ... [14, 15, 16, 17]]))
5811
- >>> result = ops.triu(x, diagonal=-1)
5812
- >>> print(result)
5813
- [[ 1 2 3 4]
5814
- [ 5 6 7 8]
5815
- [ 0 11 12 13]
5816
- [ 0 0 16 17]]
5817
- """
5818
- return _get_cache_prim(P.Triu)(diagonal)(input)
5819
-
5820
-
5821
4977
  @_primexpr
5822
4978
  def _canonicalize_axis(axis, ndim):
5823
4979
  """
@@ -5917,24 +5073,24 @@ def _tensor_split_sub_int(x, indices_or_sections, axis):
5917
5073
  arr_shape = x.shape
5918
5074
  length_along_dim = arr_shape[axis]
5919
5075
  if indices_or_sections > length_along_dim:
5920
- res = P.Split(axis, length_along_dim)(x)
5076
+ res = _get_cache_prim(P.Split)(axis, length_along_dim)(x)
5921
5077
  indices_or_sections_n = [length_along_dim, length_along_dim + 1]
5922
5078
  res2 = _tensor_split_sub_tensors(x, indices_or_sections_n, axis)
5923
5079
  for _ in np.arange(length_along_dim, indices_or_sections):
5924
5080
  res += tuple(res2)[1:]
5925
5081
  elif length_along_dim % indices_or_sections == 0:
5926
- res = P.Split(axis, indices_or_sections)(x)
5082
+ res = _get_cache_prim(P.Split)(axis, indices_or_sections)(x)
5927
5083
  else:
5928
5084
  num_long_tensor = length_along_dim % indices_or_sections
5929
5085
  num_short_tensor = indices_or_sections - num_long_tensor
5930
5086
  length1 = num_long_tensor * (length_along_dim // indices_or_sections + 1)
5931
5087
  length2 = length_along_dim - length1
5932
- start1 = _list_comprehensions(rank(x), 0, True)
5088
+ start1 = _list_comprehensions(rank_(x), 0, True)
5933
5089
  size1 = _tuple_setitem(arr_shape, axis, length1)
5934
5090
  start2 = _tuple_setitem(start1, axis, length1)
5935
5091
  size2 = _tuple_setitem(arr_shape, axis, length2)
5936
- res = P.Split(axis, num_long_tensor)(tensor_slice(x, start1, size1)) + \
5937
- P.Split(axis, num_short_tensor)(tensor_slice(x, start2, size2))
5092
+ res = _get_cache_prim(P.Split)(axis, num_long_tensor)(tensor_slice(x, start1, size1)) + \
5093
+ _get_cache_prim(P.Split)(axis, num_short_tensor)(tensor_slice(x, start2, size2))
5938
5094
  return res
5939
5095
 
5940
5096
 
@@ -5948,11 +5104,11 @@ def tensor_split(input, indices_or_sections, axis=0):
5948
5104
 
5949
5105
  - If `indices_or_sections` is an integer n, input tensor will be split into n sections.
5950
5106
 
5951
- - If :math:`input.shape(axis)` can be divisible by n, sub-sections will have equal size
5952
- :math:`input.shape(axis) / n` .
5953
- - If :math:`input.shape(axis)` is not divisible by n, the first :math:`input.shape(axis) % n` sections
5954
- will have size :math:`input.shape(axis) // n + 1` , and the rest will have
5955
- size :math:`input.shape(axis) // n` .
5107
+ - If :math:`input.shape[axis]` can be divisible by n, sub-sections will have equal size
5108
+ :math:`input.shape[axis] / n` .
5109
+ - If :math:`input.shape[axis]` is not divisible by n, the first :math:`input.shape[axis] \bmod n` sections
5110
+ will have size :math:`input.shape[axis] // n + 1` , and the rest will have
5111
+ size :math:`input.shape[axis] // n` .
5956
5112
  - If `indices_or_sections` is of type tuple(int) or list(int), the input tensor will be split at the
5957
5113
  indices in the list or tuple. For example, given parameters :math:`indices\_or\_sections=[1, 4]`
5958
5114
  and :math:`axis=0` , the input tensor will be split into sections :math:`input[:1]` ,
@@ -6165,7 +5321,7 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
6165
5321
  tensor.
6166
5322
 
6167
5323
  - values (Tensor) - The maximum value of input tensor, with the same shape as index, and same dtype as x.
6168
- - index (Tensor) - The index for the maximum value of the input tensor, with dtype int32. If `keepdims`
5324
+ - index (Tensor) - The index for the maximum value of the input tensor, with dtype int64. If `keepdims`
6169
5325
  is true, the shape of output tensors is :math:`(input_1, input_2, ..., input_{axis-1}, 1, input_{axis+1},
6170
5326
  ..., input_N)` . Otherwise, the shape is :math:`(input_1, input_2, ..., input_{axis-1}, input_{axis+1},
6171
5327
  ..., input_N)` .
@@ -6194,16 +5350,15 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
6194
5350
  [[3.2 0.4 0.4 2.9 4. ]] [[1 1 0 1 1]]
6195
5351
  """
6196
5352
  if not input.shape:
6197
- return (input, Tensor(0, dtype=mstype.int32))
5353
+ return (input, Tensor(0, dtype=mstype.int64))
6198
5354
  if axis is None:
6199
- reduce_max_op = _get_cache_prim(P.ReduceMax)()
6200
- return (reduce_max_op(input), Tensor(0, dtype=mstype.int32))
5355
+ return (max_(input), Tensor(0, dtype=mstype.int64))
6201
5356
  if initial is not None and not isinstance(initial, numbers.Number):
6202
5357
  raise TypeError(f"For 'max', 'initial' must be a scalar, but got {type(initial)}")
6203
5358
  if axis is not None and not isinstance(axis, int):
6204
5359
  raise TypeError(f"For 'max', 'axis' must be int, but got {type(axis)}")
6205
5360
  input = _init_and_select_elem(input, initial, where, ops.maximum)
6206
- argmax_with_value_op = ArgMaxWithValue(axis, keepdims)
5361
+ argmax_with_value_op = _get_cache_prim(ArgMaxWithValue)(axis, keepdims)
6207
5362
  indices, values = argmax_with_value_op(input)
6208
5363
  return values, indices
6209
5364
 
@@ -6249,10 +5404,11 @@ def argmax(input, dim=None, keepdim=False):
6249
5404
  is_dim_none = True
6250
5405
  out = _get_cache_prim(Argmax)(dim, mstype.int64)(input)
6251
5406
  if keepdim and not is_dim_none:
6252
- out = expand_dims_(out, dim)
5407
+ out = expand_dims(out, dim)
6253
5408
  return out
6254
5409
 
6255
5410
 
5411
+
6256
5412
  def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylint: disable=redefined-outer-name
6257
5413
  """
6258
5414
  Calculates the minimum value along with the given axis for the input tensor. It returns the minimum values and
@@ -6311,16 +5467,16 @@ def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
6311
5467
  0.0 0
6312
5468
  """
6313
5469
  if not input.shape:
6314
- return (input, Tensor(0, dtype=mstype.int32))
5470
+ return (input, Tensor(0, dtype=mstype.int64))
6315
5471
  if axis is None:
6316
- return (reduce_min(input), Tensor(0, dtype=mstype.int32))
5472
+ return (min_(input), Tensor(0, dtype=mstype.int64))
6317
5473
  if initial is not None and not isinstance(initial, numbers.Number):
6318
5474
  raise TypeError(f"For 'min', 'initial' must be a scalar, but got {type(initial)}")
6319
5475
  if axis is not None and not isinstance(axis, int):
6320
5476
  raise TypeError(f"For 'min', 'axis' must be int, but got {type(axis)}")
6321
5477
  input = _init_and_select_elem(input, initial, where, ops.minimum)
6322
- argmin_with_value_ = ArgMinWithValue(axis=axis, keep_dims=keepdims)
6323
- indices, values = argmin_with_value_(input)
5478
+ argmin_with_value_op = _get_cache_prim(ArgMinWithValue)(axis, keepdims)
5479
+ indices, values = argmin_with_value_op(input)
6324
5480
  return values, indices
6325
5481
 
6326
5482
 
@@ -6378,8 +5534,8 @@ def aminmax(input, *, axis=0, keepdims=False):
6378
5534
  output0 = ops.reshape(output0, [1] * input.ndim)
6379
5535
  output1 = ops.reshape(output1, [1] * input.ndim)
6380
5536
  return output0, output1
6381
- argmin_with_value_op = P.ArgMinWithValue(axis, keepdims)
6382
- argmax_with_value_op = P.ArgMaxWithValue(axis, keepdims)
5537
+ argmin_with_value_op = _get_cache_prim(ArgMinWithValue)(axis, keepdims)
5538
+ argmax_with_value_op = _get_cache_prim(ArgMaxWithValue)(axis, keepdims)
6383
5539
  _, output0 = argmin_with_value_op(input)
6384
5540
  _, output1 = argmax_with_value_op(input)
6385
5541
  if keepdims is True and input.ndim == 0:
@@ -6434,66 +5590,48 @@ def narrow(input, axis, start, length):
6434
5590
  begins[axis] = start
6435
5591
  sizes = list(input.shape)
6436
5592
  sizes[axis] = length
6437
- return P.Slice()(input, begins, sizes)
6438
-
6439
-
6440
- def unsorted_segment_sum(input_x, segment_ids, num_segments):
6441
- r"""
6442
- Computes the sum of a tensor along segments.
5593
+ return tensor_slice(input, begins, sizes)
6443
5594
 
6444
- Calculates a tensor such that :math:`\text{output}[i] = \sum_{segment\_ids[j] == i} \text{data}[j, \ldots]`, where
6445
- :math:`j,...` is a tuple describing the index of element in data.
6446
- `segment_ids` selects which elements in data to sum
6447
- up. Segment_ids does not need to be sorted, and it does not need to cover all values in the entire valid value
6448
- range.
6449
5595
 
6450
- The following figure shows the calculation process of unsorted_segment_sum:
6451
-
6452
- .. image:: UnsortedSegmentSum.png
6453
-
6454
- Note:
6455
- - If the segment_id i is absent in the segment_ids, then output[i] will be filled with 0.
6456
- - On Ascend, if the value of segment_id is less than 0 or greater than the length of the input data shape, an
6457
- execution error will occur.
6458
-
6459
- If the sum of the given segment_ids :math:`i` is empty, then :math:`\text{output}[i] = 0`. If the given segment_ids
6460
- is negative, the value will be ignored. 'num_segments' must be equal to the number of different segment_ids.
5596
+ def narrow_ext(input, dim, start, length):
5597
+ """
5598
+ Returns a narrowed tensor from input tensor, and
5599
+ the dimension axis is input from start to start + length.
6461
5600
 
6462
5601
  Args:
6463
- input_x (Tensor): Input Tensor contains the data to be summed.
6464
- The shape is :math:`(x_1, x_2, ..., x_R)`.
6465
- segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
6466
- Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
6467
- num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
5602
+ input (Tensor): the tensor to narrow.
5603
+ dim (int): dimension along which to narrow.
5604
+ start (int): the starting dimension.
5605
+ length (int): the distance to the ending dimension.
6468
5606
 
6469
5607
  Returns:
6470
- Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
5608
+ Tensor.
6471
5609
 
6472
5610
  Raises:
6473
- TypeError: If `num_segments` is not an int or 0-D Tensor.
6474
- ValueError: If length of shape of `segment_ids` is less than 1.
5611
+ ValueError: If dim is out of range [-input.ndim, input.ndim).
5612
+ ValueError: If start is out of range [-input.shape[dim], input.shape[dim]].
5613
+ ValueError: It length is out of range [0, input.shape[dim]-start].
6475
5614
 
6476
5615
  Supported Platforms:
6477
- ``Ascend`` ``GPU`` ``CPU``
5616
+ ``Ascend``
6478
5617
 
6479
5618
  Examples:
6480
- >>> from mindspore import Tensor
6481
- >>> from mindspore import ops
6482
5619
  >>> import mindspore
6483
- >>> input_x = Tensor([1, 2, 3, 4], mindspore.float32)
6484
- >>> segment_ids = Tensor([0, 0, 1, 2], mindspore.int32)
6485
- >>> num_segments = 4
6486
- >>> output = ops.unsorted_segment_sum(input_x, segment_ids, num_segments)
5620
+ >>> from mindspore import ops
5621
+ >>> from mindspore import Tensor
5622
+ >>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mindspore.int32)
5623
+ >>> output = ops.narrow(x, 0, 0, 2)
6487
5624
  >>> print(output)
6488
- [3. 3. 4. 0.]
6489
- >>> input_x = Tensor([1, 2, 3, 4, 2, 5], mindspore.float32)
6490
- >>> segment_ids = Tensor([0, 0, 1, 2, 3, 4], mindspore.int32)
6491
- >>> num_segments = 6
6492
- >>> output = ops.unsorted_segment_sum(input_x, segment_ids, num_segments)
5625
+ [[ 1 2 3]
5626
+ [ 4 5 6]]
5627
+ >>> output = ops.narrow(x, 1, 1, 2)
6493
5628
  >>> print(output)
6494
- [3. 3. 4. 2. 5. 0.]
5629
+ [[ 2 3]
5630
+ [ 5 6]
5631
+ [ 8 9]]
6495
5632
  """
6496
- return unsorted_segment_sum_(input_x, segment_ids, num_segments)
5633
+ validator.check_value_type("input", input, Tensor, "narrow")
5634
+ return slice_ext_op(input, dim, start, start+length, 1)
6497
5635
 
6498
5636
 
6499
5637
  def topk(input, k, dim=None, largest=True, sorted=True):
@@ -6520,7 +5658,7 @@ def topk(input, k, dim=None, largest=True, sorted=True):
6520
5658
 
6521
5659
  Args:
6522
5660
  input (Tensor): Input to be computed, data type must be float16, float32 or int32.
6523
- k (int): The number of top or bottom elements to be computed along the last dimension, constant input is needed.
5661
+ k (int): The number of top or bottom elements to be computed along the last dimension.
6524
5662
  dim (int, optional): The dimension to sort along. Default: ``None`` .
6525
5663
  largest (bool, optional): If largest is ``False`` then the k smallest elements are returned.
6526
5664
  Default: ``True`` .
@@ -6650,8 +5788,8 @@ def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
6650
5788
  A Tensor, with same type as `input` . And its shape is as described above.
6651
5789
 
6652
5790
  Raises:
6653
- TypeError: If `kernel_size`, `dilation`, `padding`, `stride` data type is not int, tuple or list.
6654
- ValueError: If `kernel_size`, `dilation`, `stride` value is not
5791
+ TypeError: If `output_size`, `kernel_size`, `stride`, `dilation`, `padding` data type is not int, tuple or list.
5792
+ ValueError: If `output_size`, `kernel_size`, `dilation`, `stride` value is not
6655
5793
  greater than zero or elements number more than `2`.
6656
5794
  ValueError: If `padding` value is less than zero or elements number more than `2`.
6657
5795
  ValueError: If `input.shape[1] != kernel_size[0] * kernel_size[1]`
@@ -6727,9 +5865,7 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
6727
5865
  .. warning::
6728
5866
  - The output is a 3-dimensional Tensor whose shape is
6729
5867
  :math:`(N, C \times \prod(\text{kernel_size}), L)` .
6730
-
6731
- .. warning::
6732
- This is an experimental API that is subject to change or deletion.
5868
+ - This is an experimental API that is subject to change or deletion.
6733
5869
 
6734
5870
  Args:
6735
5871
  input (Tensor): 4-D Tensor, supported dtypes: float16, float32, float64, complex64 and complex128.
@@ -6738,10 +5874,11 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
6738
5874
  dilation (Union[int, tuple[int], list[int]], optional): The dilation of the window, should be two int
6739
5875
  for height and width. If type is int, it means that height equal with width. Default: ``1`` .
6740
5876
  padding (Union[int, tuple[int], list[int]], optional): The pad of the window, that must be
6741
- a tuple/list of one or two `int` for height and width.
6742
- If one int, pad_height = pad_width.
6743
- If two int, pad_height = padding[0], pad_width = padding[1].
6744
- Default: ``0`` .
5877
+ a tuple/list of one or two `int` for height and width. Default: ``0`` .
5878
+
5879
+ - If one int, pad_height = pad_width.
5880
+ - If two int, pad_height = padding[0], pad_width = padding[1].
5881
+
6745
5882
  stride (Union[int, tuple[int], list[int]], optional): The stride of the window, should be two int
6746
5883
  for height and width. If type is int, it means that height equal with width. Default: ``1`` .
6747
5884
 
@@ -6788,98 +5925,6 @@ def _check_diagonal_axes(dim1, dim2, x_ndim):
6788
5925
  return axes
6789
5926
 
6790
5927
 
6791
- def diagonal(input, offset=0, dim1=0, dim2=1):
6792
- """
6793
- Returns specified diagonals of `input`.
6794
-
6795
- If `input` is 2-D, returns the diagonal of `input` with the given offset.
6796
- If `input` has more than two
6797
- dimensions, then the axes specified by `dim1` and `dim2` are used to determine
6798
- the 2-D sub-array whose diagonal is returned. In this case, remove the `dim1` and `dim2` dimensions of `input`
6799
- and insert the last dimension of `input` by the diagonal elements determined by `dim1` and `dim2`.
6800
-
6801
- Args:
6802
- input (Tensor): Array from which the diagonals are taken.
6803
- offset (int, optional): Offset of the diagonal from the main diagonal.
6804
- Can be positive or negative. Default: ``0`` .
6805
- dim1 (int, optional): Axis to be used as the first axis of the 2-D
6806
- sub-arrays from which the diagonals should be taken. Defaults to
6807
- first axis (0). Default: ``0`` .
6808
- dim2 (int, optional): Axis to be used as the second axis of the 2-D
6809
- sub-arrays from which the diagonals should be taken. Defaults to
6810
- second axis (1). Default: ``1`` .
6811
-
6812
- Returns:
6813
- Tensor, if `input` is 2-D, then `input` 1-D array containing the diagonal. If
6814
- ``input.ndim > 2``, then the dimensions specified by `dim1` and `dim2` are removed,
6815
- and a new axis inserted at the end corresponding to the diagonal.
6816
-
6817
- Raises:
6818
- TypeError: if `dim1` or `dim2` are not an int.
6819
- ValueError: if the input tensor has less than two dimensions.
6820
-
6821
- Supported Platforms:
6822
- ``Ascend`` ``GPU`` ``CPU``
6823
-
6824
- Examples:
6825
- >>> from mindspore import Tensor, ops
6826
- >>> from mindspore import dtype as mstype
6827
- >>> x = Tensor([[0, 1], [2, 3]], mstype.float32)
6828
- >>> output = ops.diagonal(x)
6829
- >>> print(output)
6830
- [0 3]
6831
- """
6832
- x_ndim = input.ndim
6833
- if x_ndim < 2:
6834
- raise ValueError(f"For 'ops.diagonal', the original tensor requires at least two dimensions, but got {x_ndim}")
6835
- _check_attr_dtype("dim1", dim1, [int], "diagonal")
6836
- _check_attr_dtype("dim2", dim2, [int], "diagonal")
6837
- dtype = input.dtype
6838
-
6839
- axes = _check_diagonal_axes(dim1, dim2, x_ndim)
6840
- perm = ()
6841
- for i in ms_arrange(x_ndim):
6842
- if i not in axes:
6843
- perm += (i,)
6844
- perm += axes
6845
- input = input.transpose(perm)
6846
-
6847
- x_shape = input.shape
6848
- n, m = x_shape[-2:]
6849
-
6850
- e = ops.eye(n, m, dtype)
6851
- if offset >= m or offset <= -n:
6852
- zero_shape = x_shape[:-2] + (0,)
6853
- return ops.zeros(zero_shape, dtype)
6854
- if offset != 0:
6855
- e = e.astype(mstype.float32)
6856
- if offset > 0:
6857
- e_left = ops.fill(mstype.float32, (n, offset), 0)
6858
- e_right = e[..., 0:m - offset:1]
6859
- e = ops.cat((e_left, e_right), 1).astype(dtype)
6860
- elif offset < 0:
6861
- e_upper = ops.fill(mstype.float32, (-offset, m), 0)
6862
- e_lower = e[0:n + offset:1, ...]
6863
- e = ops.cat((e_upper, e_lower), 0).astype(dtype)
6864
- e = ops.broadcast_to(e, x_shape)
6865
-
6866
- prod_val = ops.mul(input, e)
6867
- res = ops.ReduceSum()(prod_val.astype(mstype.float32), -1)
6868
-
6869
- begin = ()
6870
- for _ in ms_arrange(x_ndim - 2):
6871
- begin += (0,)
6872
- last_dim_begin = builtins.max(0, -offset)
6873
- begin += (last_dim_begin,)
6874
- res_size = res.shape[:-1]
6875
- last_dim_end = builtins.min(x_shape[-2], builtins.max(0, x_shape[-1] - offset)) - last_dim_begin
6876
- if last_dim_end <= 0:
6877
- return Tensor([])
6878
- res_size += (last_dim_end,)
6879
- res = ops.slice(res, begin, res_size)
6880
- return res.astype(dtype)
6881
-
6882
-
6883
5928
  def _check_is_tensor(param_name, input, cls_name):
6884
5929
  """Returns True if input is Tensor."""
6885
5930
  if not isinstance(input, Tensor):
@@ -6899,6 +5944,9 @@ def diagonal_scatter(input, src, offset=0, dim1=0, dim2=1):
6899
5944
  the elements in these two dimensions will be treated as elements of a matrix,
6900
5945
  and `src` is embedded on the diagonal of the matrix.
6901
5946
 
5947
+ Note:
5948
+ Currently, ``inf`` value of elements in `input` or `src` is not supported.
5949
+
6902
5950
  Args:
6903
5951
  input (Tensor): Input Tensor, whose dimension is larger than 1.
6904
5952
  src (Tensor): The source Tensor to embed.
@@ -6935,16 +5983,39 @@ def diagonal_scatter(input, src, offset=0, dim1=0, dim2=1):
6935
5983
  """
6936
5984
  _check_is_tensor("input", input, "diagonal_scatter")
6937
5985
  _check_is_tensor("src", src, "diagonal_scatter")
6938
- _check_is_int(offset, "offset", "diagonal_scatter")
6939
- _check_is_int(dim1, "dim1", "diagonal_scatter")
6940
- _check_is_int(dim2, "dim2", "diagonal_scatter")
6941
5986
  input_diag = input.diagonal(offset, dim1, dim2)
6942
5987
  _check_diagonal_scatter_shape(input_diag.shape, src.shape)
6943
- embed = ones_like(src)
6944
- embed = ops.diag_embed(embed, offset, dim1, dim2)
6945
- embed = input * embed
5988
+ input_shape = input.shape
5989
+ zeros_shape = list(input_shape)
5990
+ m, n = input_shape[dim1], input_shape[dim2]
5991
+ if m == n:
5992
+ src = src - input_diag
5993
+ src = ops.diag_embed(src, offset, dim1, dim2)
5994
+ return input + src
5995
+ if m > n:
5996
+ axis = dim2
5997
+ zeros_shape[axis] = m - n
5998
+ else:
5999
+ axis = dim1
6000
+ zeros_shape[axis] = n - m
6001
+ zeros_tensor = zeros(zeros_shape, dtype=input.dtype)
6002
+ input = concat((input, zeros_tensor), axis)
6003
+ input_diag = input.diagonal(offset, dim1, dim2)
6004
+ if src.shape != input_diag.shape:
6005
+ zeros_shape = []
6006
+ for i, ax in enumerate(src.shape):
6007
+ if ax == input_diag.shape[i]:
6008
+ zeros_shape.append(ax)
6009
+ else:
6010
+ axis = i
6011
+ zeros_shape.append(input_diag.shape[i] - ax)
6012
+ zeros_tensor = zeros(zeros_shape, dtype=src.dtype)
6013
+ src = concat((src, zeros_tensor), axis)
6014
+ src = src - input_diag
6946
6015
  src = ops.diag_embed(src, offset, dim1, dim2)
6947
- return input + src - embed
6016
+ input = input + src
6017
+ begin = (0,) * input.ndim
6018
+ return slice(input, begin, input_shape)
6948
6019
 
6949
6020
 
6950
6021
  def lstsq(input, A):
@@ -7003,8 +6074,7 @@ def lstsq(input, A):
7003
6074
  [-6.5000005 -4.500001 ]
7004
6075
  [-3.500002 -2.5000017]]
7005
6076
  """
7006
- lstsq_op = _get_cache_prim(Lstsq)()
7007
- return lstsq_op(input, A)
6077
+ return lstsq_(input, A)
7008
6078
 
7009
6079
 
7010
6080
  def mvlgamma(input, p):
@@ -7052,6 +6122,64 @@ def mvlgamma(input, p):
7052
6122
  return mvlgamma_op(input)
7053
6123
 
7054
6124
 
6125
+ def nonzero(input, as_tuple=False):
6126
+ r"""
6127
+ Return the positions of all non-zero values.
6128
+
6129
+ Args:
6130
+ input (Tensor): The input Tensor, its rank should be greater than or equal to 1.
6131
+ as_tuple (bool, optional): Whether the output is tuple.
6132
+ If ``False`` , return Tensor. Default: ``False`` .
6133
+ If ``True`` , return Tuple of Tensor, only support ``Ascend`` .
6134
+
6135
+
6136
+ Returns:
6137
+ - If `as_tuple` is ``False``, return the Tensor, a 2-D Tensor whose data type is int64,
6138
+ containing the positions of all non-zero values of the input.
6139
+ - If `as_tuple` is ``True``, return the Tuple of Tensor and data type is int64.
6140
+ The Tuple length is the dimension of the input tensor,
6141
+ and each element is the 1D tensor of the subscript of all non-zero elements of
6142
+ the input tensor in that dimension.
6143
+
6144
+ Raises:
6145
+ TypeError: If `input` is not Tensor.
6146
+ TypeError: If `as_tuple` is not bool.
6147
+ ValueError: If dim of `input` equals to 0.
6148
+
6149
+ Supported Platforms:
6150
+ ``Ascend`` ``GPU`` ``CPU``
6151
+
6152
+ Examples:
6153
+ >>> import mindspore
6154
+ >>> import numpy as np
6155
+ >>> from mindspore import Tensor, ops
6156
+ >>> x = Tensor(np.array([[[1, 0], [-5, 0]]]), mindspore.int32)
6157
+ >>> output = ops.nonzero(x)
6158
+ >>> print(output)
6159
+ [[0 0 0]
6160
+ [0 1 0]]
6161
+ >>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
6162
+ >>> output = ops.nonzero(x, False)
6163
+ >>> print(output)
6164
+ [[0]
6165
+ [2]
6166
+ [4]]
6167
+ >>> x = Tensor(np.array([[[1, 0], [-5, 0]]]), mindspore.int32)
6168
+ >>> output = ops.nonzero(x, True)
6169
+ >>> print(output)
6170
+ (Tensor(shape=[2], dtype=Int64, value=[0, 0]),
6171
+ Tensor(shape=[2], dtype=Int64, value=[0, 1]),
6172
+ Tensor(shape=[2], dtype=Int64, value=[0, 0]))
6173
+ >>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
6174
+ >>> output = ops.nonzero(x, True)
6175
+ >>> print(output)
6176
+ (Tensor(shape=[3], dtype=Int64, value=[0, 2, 4]), )
6177
+ """
6178
+ if as_tuple:
6179
+ return non_zero_ext_(input)
6180
+ return non_zero_(input)
6181
+
6182
+
7055
6183
  def argwhere(input):
7056
6184
  """
7057
6185
  Return a Tensor of the positions of all non-zero values.
@@ -7079,7 +6207,7 @@ def argwhere(input):
7079
6207
  [[0 0 0]
7080
6208
  [0 1 0]]
7081
6209
  """
7082
- return nonzero_(input)
6210
+ return nonzero(input)
7083
6211
 
7084
6212
 
7085
6213
  def column_stack(tensors):
@@ -7116,14 +6244,13 @@ def column_stack(tensors):
7116
6244
  raise TypeError(f"For column_stack, the input must be list or tuple of tensors, but got {type(tensors)}.")
7117
6245
 
7118
6246
  trans_x = ()
7119
- _expand_dims = _get_cache_prim(P.ExpandDims)()
7120
6247
  for tensor in tensors:
7121
6248
  if not isinstance(tensor, Tensor):
7122
6249
  raise TypeError(f"For column_stack, the input element must be tensor, but got {type(tensor)}.")
7123
6250
  if tensor.ndim < 1:
7124
- tensor = _expand_dims(tensor, 0)
6251
+ tensor = expand_dims(tensor, 0)
7125
6252
  if tensor.ndim == 1:
7126
- tensor = _expand_dims(tensor, 1)
6253
+ tensor = expand_dims(tensor, 1)
7127
6254
  trans_x += (tensor,)
7128
6255
  if not trans_x:
7129
6256
  raise ValueError(f"For column_stack, the input must have at least 1 tensor, but got 0.")
@@ -7169,7 +6296,7 @@ def hstack(tensors):
7169
6296
  if not isinstance(tensor, Tensor):
7170
6297
  raise TypeError(f"For hstack, the input element must be tensor, but got {type(tensor)}.")
7171
6298
  if tensor.ndim < 1:
7172
- tensor = expand_dims_(tensor, 0)
6299
+ tensor = expand_dims(tensor, 0)
7173
6300
  tuple_of_tensor += (tensor,)
7174
6301
  if not tuple_of_tensor:
7175
6302
  raise ValueError("For hstack, the input must have at least 1 tensor, but got 0.")
@@ -7269,7 +6396,7 @@ def movedim(x, source, destination):
7269
6396
  f"For `source` and `destination` arguments, the number of elements must be the same, but got 'source':"
7270
6397
  f" {len(source)} and 'destination': {len(destination)}.")
7271
6398
  perm = _get_moved_perm(ndim, source, destination)
7272
- return _get_cache_prim(P.Transpose)()(x, perm)
6399
+ return transpose_(x, perm)
7273
6400
 
7274
6401
 
7275
6402
  def moveaxis(x, source, destination):
@@ -7320,7 +6447,7 @@ def swapaxes(input, axis0, axis1):
7320
6447
 
7321
6448
  Examples:
7322
6449
  >>> import numpy as np
7323
- >>> import mindspore.ops as ops
6450
+ >>> from mindspore import ops
7324
6451
  >>> from mindspore import Tensor
7325
6452
  >>> input = Tensor(np.ones((2,3,4), dtype=np.float32))
7326
6453
  >>> output = ops.swapaxes(input, 0, 2)
@@ -7344,7 +6471,7 @@ def swapaxes(input, axis0, axis1):
7344
6471
  new_perm = perm[0:axis0] + perm[axis1:axis1 + 1] + \
7345
6472
  perm[axis0 + 1:axis1] + perm[axis0:axis0 + 1]
7346
6473
 
7347
- return _get_cache_prim(P.Transpose)()(input, new_perm)
6474
+ return transpose_(input, new_perm)
7348
6475
 
7349
6476
 
7350
6477
  def swapdims(input, dim0, dim1):
@@ -7370,7 +6497,7 @@ def swapdims(input, dim0, dim1):
7370
6497
 
7371
6498
  Examples:
7372
6499
  >>> import numpy as np
7373
- >>> import mindspore.ops as ops
6500
+ >>> from mindspore import ops
7374
6501
  >>> from mindspore import Tensor
7375
6502
  >>> input = Tensor(np.ones((2,3,4), dtype=np.float32))
7376
6503
  >>> output = ops.swapdims(input, 0, 2)
@@ -7452,9 +6579,47 @@ def repeat_interleave(input, repeats, axis=None):
7452
6579
  return output
7453
6580
 
7454
6581
 
6582
+ def repeat_interleave_ext(input, repeats, dim=None, output_size=None):
6583
+ r"""
6584
+ Repeat elements of a tensor along an axis, like `numpy.repeat`.
6585
+
6586
+ Args:
6587
+ input (Tensor): The tensor to repeat values for. Must be of type: float16,
6588
+ float32, int8, uint8, int16, int32, or int64.
6589
+ repeats (Union[int, tuple, list, Tensor]): The number of times to repeat, must be positive.
6590
+ dim (int, optional): The dim along which to repeat, Default: ``None``. if dims is None,
6591
+ the input Tensor will be flattened and the output will alse be flattened.
6592
+ output_size (int, optional): Total output size for the given axis (e.g. sum of repeats),
6593
+ Default: ``None``.
6594
+
6595
+ Returns:
6596
+ One tensor with values repeated along the specified dim. If input has shape
6597
+ :math:`(s1, s2, ..., sn)` and dim is i, the output will have shape :math:`(s1, s2, ...,
6598
+ si * repeats, ..., sn)`. The output type will be the same as the type of `input`.
6599
+
6600
+ Supported Platforms:
6601
+ ``Ascend``
6602
+
6603
+ Examples:
6604
+ >>> import mindspore
6605
+ >>> import numpy as np
6606
+ >>> from mindspore import Tensor, ops
6607
+ >>> input = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
6608
+ >>> output = ops.function.array_func.repeat_interleave_ext(input, repeats=2, dim=0)
6609
+ >>> print(output)
6610
+ [[0 1 2]
6611
+ [0 1 2]
6612
+ [3 4 5]
6613
+ [3 4 5]]
6614
+ """
6615
+ if isinstance(repeats, int):
6616
+ return repeat_interleave_int_(input, repeats, dim, output_size)
6617
+ return repeat_interleave_tensor_(input, repeats, dim, output_size)
6618
+
6619
+
7455
6620
  def repeat_elements(x, rep, axis=0):
7456
6621
  """
7457
- Repeat elements of a tensor along an axis, like `np.repeat` .
6622
+ Repeat elements of a tensor along an axis, like `numpy.repeat` .
7458
6623
 
7459
6624
  Args:
7460
6625
  x (Tensor): The tensor to repeat values for. Must be of type: float16,
@@ -7492,34 +6657,19 @@ def repeat_elements(x, rep, axis=0):
7492
6657
  const_utils.check_type_valid(ops.dtype(x), mstype.number_type, 'input x')
7493
6658
  rep = _check_positive_int(rep, "rep", "repeat_elements")
7494
6659
  axis = _check_is_int(axis, "axis", "repeat_elements")
7495
- shape_op = P.Shape()
7496
- rank_op = P.Rank()
7497
- tile_op = P.Tile()
7498
- expand_dims_op = P.ExpandDims()
7499
- reshape_op = P.Reshape()
7500
- x_rank = rank_op(x)
6660
+ x_rank = rank_(x)
7501
6661
  axis = _check_axis_range(axis, x_rank, "axis", "repeat_elements")
6662
+ axis = axis + x.ndim if axis < 0 else axis
7502
6663
  expand_axis = axis + 1
7503
- x_expand = expand_dims_op(x, expand_axis)
6664
+ x_expand = expand_dims(x, expand_axis)
7504
6665
  rep_dims = _cal_repeat_dims(x_rank, rep, expand_axis)
7505
- x_expand = tile_op(x_expand, rep_dims)
7506
- x_shape = shape_op(x)
6666
+ x_expand = tile_(x_expand, rep_dims)
6667
+ x_shape = shape_(x)
7507
6668
  x_reshape = _cal_reshape(x_shape, rep, axis)
7508
- x_rep = reshape_op(x_expand, x_reshape)
6669
+ x_rep = reshape_(x_expand, x_reshape)
7509
6670
  return x_rep
7510
6671
 
7511
6672
 
7512
- @_primexpr
7513
- def _check_sequence_mask_input_len(input_shape, prim_name=None):
7514
- msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
7515
- if not input_shape:
7516
- raise ValueError(f"{msg_prefix} input_shape must be greater than 0, but got {input_shape}.")
7517
- # broadcast only supports 7d shape
7518
- shape_size = len(input_shape)
7519
- if shape_size >= 7:
7520
- raise ValueError(f"{msg_prefix} dimension of input_shape must be less than 7, but got {shape_size}d.")
7521
-
7522
-
7523
6673
  def sequence_mask(lengths, maxlen=None):
7524
6674
  """
7525
6675
  Returns a mask tensor representing the first N positions of each cell.
@@ -7572,29 +6722,19 @@ def sequence_mask(lengths, maxlen=None):
7572
6722
  [[ True True False False ]
7573
6723
  [ True True True True ]]]
7574
6724
  """
7575
-
7576
- argmax_op = P.ArgMaxWithValue()
7577
- reshape_op = P.Reshape()
7578
- range_op = P.Range()
7579
- expand_op = P.ExpandDims()
7580
- cast_op = P.Cast()
7581
- to_tensor_op = P.ScalarToTensor()
7582
- shape_op = P.Shape()
7583
-
7584
6725
  const_utils.check_type_valid(ops.dtype(lengths), [mstype.int64, mstype.int32], 'lengths')
7585
- _check_sequence_mask_input_len(shape_op(lengths), "sequence_mask")
7586
6726
 
7587
6727
  if maxlen is None:
7588
- flatten_data = reshape_op(lengths, (-1,))
7589
- flatten_data = cast_op(flatten_data, mstype.float32)
7590
- _, value = argmax_op(flatten_data)
7591
- maxlen = cast_op(value, mstype.int32)
6728
+ flatten_data = reshape_(lengths, (-1,))
6729
+ flatten_data = cast_(flatten_data, mstype.float32)
6730
+ _, value = arg_max_with_value_(flatten_data)
6731
+ maxlen = cast_(value, mstype.int32)
7592
6732
  else:
7593
6733
  maxlen = _check_positive_int(maxlen, "maxlen", "sequence_mask")
7594
- maxlen = to_tensor_op(maxlen, mstype.int32)
6734
+ maxlen = scalar_to_tensor_(maxlen, mstype.int32)
7595
6735
 
7596
- range_vector = range_op(to_tensor_op(0, mstype.int32), maxlen, to_tensor_op(1, mstype.int32))
7597
- mask = expand_op(lengths, -1)
6736
+ range_vector = range_(scalar_to_tensor_(0, mstype.int32), maxlen, scalar_to_tensor_(1, mstype.int32))
6737
+ mask = expand_dims(lengths, -1)
7598
6738
  result = range_vector < mask
7599
6739
  return result
7600
6740
 
@@ -7607,35 +6747,6 @@ def top_k(input_x, k, sorted=True):
7607
6747
  return top_k_(input_x, k)
7608
6748
 
7609
6749
 
7610
- def deepcopy(input_x):
7611
- """
7612
- Returns a deepcopy of input tensor.
7613
-
7614
- Args:
7615
- input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
7616
-
7617
- Returns:
7618
- Tensor, a deepcopy of `input_x`.
7619
-
7620
- Raises:
7621
- TypeError: If `input_x` is not a Tensor.
7622
-
7623
- Supported Platforms:
7624
- ``Ascend`` ``GPU`` ``CPU``
7625
-
7626
- Examples:
7627
- >>> import mindspore
7628
- >>> from mindspore import Tensor, ops
7629
- >>> input = Tensor([[0, 1], [2, 1]], dtype=mindspore.int32)
7630
- >>> output = ops.deepcopy(input)
7631
- >>> print(output)
7632
- [[0 1]
7633
- [2 1]]
7634
- """
7635
- _deepcopy = _get_cache_prim(P.Identity)()
7636
- return _deepcopy(input_x)
7637
-
7638
-
7639
6750
  __all__ = [
7640
6751
  'unique',
7641
6752
  'unique_with_pad',
@@ -7662,8 +6773,8 @@ __all__ = [
7662
6773
  'full_like',
7663
6774
  'dyn_shape',
7664
6775
  'rank',
7665
- 'range',
7666
6776
  'arange',
6777
+ 'range',
7667
6778
  'reshape',
7668
6779
  'reshape_',
7669
6780
  'flatten',
@@ -7772,6 +6883,7 @@ __all__ = [
7772
6883
  'aminmax',
7773
6884
  'sort',
7774
6885
  'top_k',
7775
- 'deepcopy'
6886
+ 'deepcopy',
6887
+ 'flip',
7776
6888
  ]
7777
6889
  __all__.sort()