mindspore 2.2.14__cp39-cp39-win_amd64.whl → 2.3.0__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (1166) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +6 -5
  5. mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
  8. mindspore/_checkparam.py +76 -18
  9. mindspore/_extends/builtin_operations.py +2 -1
  10. mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
  11. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
  12. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
  13. mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
  14. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
  15. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
  16. mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
  17. mindspore/_extends/parse/__init__.py +18 -14
  18. mindspore/_extends/parse/compile_config.py +258 -0
  19. mindspore/_extends/parse/namespace.py +2 -2
  20. mindspore/_extends/parse/parser.py +174 -62
  21. mindspore/_extends/parse/resources.py +45 -14
  22. mindspore/_extends/parse/standard_method.py +142 -240
  23. mindspore/{ops/_op_impl/tbe/atomic_addr_clean.py → _extends/pijit/__init__.py} +6 -16
  24. mindspore/_extends/pijit/pijit_func_white_list.py +343 -0
  25. mindspore/_extends/remote/kernel_build_server.py +2 -0
  26. mindspore/_profiler.py +30 -0
  27. mindspore/amp.py +51 -24
  28. mindspore/atlprov.dll +0 -0
  29. mindspore/avcodec-59.dll +0 -0
  30. mindspore/avdevice-59.dll +0 -0
  31. mindspore/avfilter-8.dll +0 -0
  32. mindspore/avformat-59.dll +0 -0
  33. mindspore/avutil-57.dll +0 -0
  34. mindspore/boost/adasum.py +1 -1
  35. mindspore/boost/base.py +1 -1
  36. mindspore/boost/boost_cell_wrapper.py +2 -2
  37. mindspore/boost/grad_freeze.py +2 -2
  38. mindspore/boost/group_loss_scale_manager.py +1 -1
  39. mindspore/boost/less_batch_normalization.py +9 -6
  40. mindspore/c1.dll +0 -0
  41. mindspore/c1xx.dll +0 -0
  42. mindspore/c2.dll +0 -0
  43. mindspore/common/__init__.py +15 -4
  44. mindspore/common/_jit_fallback_utils.py +2 -3
  45. mindspore/common/_register_for_adapter.py +7 -0
  46. mindspore/common/_register_for_recompute.py +48 -0
  47. mindspore/common/_register_for_tensor.py +8 -9
  48. mindspore/common/_stub_tensor.py +7 -1
  49. mindspore/common/_utils.py +5 -17
  50. mindspore/common/api.py +411 -106
  51. mindspore/common/auto_dynamic_shape.py +27 -14
  52. mindspore/common/dtype.py +17 -10
  53. mindspore/common/dump.py +6 -8
  54. mindspore/common/file_system.py +48 -0
  55. mindspore/common/generator.py +260 -0
  56. mindspore/common/hook_handle.py +51 -4
  57. mindspore/common/initializer.py +1 -1
  58. mindspore/common/jit_config.py +34 -14
  59. mindspore/common/lazy_inline.py +72 -19
  60. mindspore/common/mindir_util.py +12 -2
  61. mindspore/common/mutable.py +79 -14
  62. mindspore/common/no_inline.py +54 -0
  63. mindspore/common/np_dtype.py +25 -0
  64. mindspore/common/parameter.py +30 -11
  65. mindspore/common/recompute.py +262 -0
  66. mindspore/common/seed.py +9 -9
  67. mindspore/common/sparse_tensor.py +272 -24
  68. mindspore/common/symbol.py +122 -0
  69. mindspore/common/tensor.py +468 -494
  70. mindspore/communication/__init__.py +6 -11
  71. mindspore/communication/_comm_helper.py +5 -0
  72. mindspore/communication/comm_func.py +1140 -0
  73. mindspore/communication/management.py +115 -102
  74. mindspore/config/op_info.config +22 -54
  75. mindspore/context.py +346 -63
  76. mindspore/dataset/__init__.py +5 -5
  77. mindspore/dataset/audio/__init__.py +6 -6
  78. mindspore/dataset/audio/transforms.py +711 -158
  79. mindspore/dataset/callback/ds_callback.py +2 -2
  80. mindspore/dataset/engine/cache_client.py +2 -2
  81. mindspore/dataset/engine/datasets.py +140 -83
  82. mindspore/dataset/engine/datasets_audio.py +14 -14
  83. mindspore/dataset/engine/datasets_standard_format.py +33 -3
  84. mindspore/dataset/engine/datasets_text.py +38 -38
  85. mindspore/dataset/engine/datasets_user_defined.py +78 -59
  86. mindspore/dataset/engine/datasets_vision.py +77 -73
  87. mindspore/dataset/engine/offload.py +5 -7
  88. mindspore/dataset/engine/queue.py +56 -38
  89. mindspore/dataset/engine/validators.py +11 -5
  90. mindspore/dataset/text/__init__.py +3 -3
  91. mindspore/dataset/text/transforms.py +408 -121
  92. mindspore/dataset/text/utils.py +9 -9
  93. mindspore/dataset/transforms/__init__.py +1 -1
  94. mindspore/dataset/transforms/transforms.py +261 -76
  95. mindspore/dataset/utils/browse_dataset.py +9 -9
  96. mindspore/dataset/vision/__init__.py +8 -8
  97. mindspore/dataset/vision/c_transforms.py +10 -10
  98. mindspore/dataset/vision/py_transforms_util.py +1 -1
  99. mindspore/dataset/vision/transforms.py +2844 -549
  100. mindspore/dataset/vision/utils.py +161 -10
  101. mindspore/dataset/vision/validators.py +14 -2
  102. mindspore/dnnl.dll +0 -0
  103. mindspore/dpcmi.dll +0 -0
  104. mindspore/experimental/optim/__init__.py +12 -2
  105. mindspore/experimental/optim/adadelta.py +161 -0
  106. mindspore/experimental/optim/adagrad.py +168 -0
  107. mindspore/experimental/optim/adam.py +35 -34
  108. mindspore/experimental/optim/adamax.py +170 -0
  109. mindspore/experimental/optim/adamw.py +40 -16
  110. mindspore/experimental/optim/asgd.py +153 -0
  111. mindspore/experimental/optim/lr_scheduler.py +66 -121
  112. mindspore/experimental/optim/nadam.py +157 -0
  113. mindspore/experimental/optim/optimizer.py +15 -8
  114. mindspore/experimental/optim/radam.py +194 -0
  115. mindspore/experimental/optim/rmsprop.py +154 -0
  116. mindspore/experimental/optim/rprop.py +164 -0
  117. mindspore/experimental/optim/sgd.py +28 -19
  118. mindspore/hal/__init__.py +40 -0
  119. mindspore/hal/_ascend.py +57 -0
  120. mindspore/hal/_base.py +57 -0
  121. mindspore/hal/_cpu.py +56 -0
  122. mindspore/hal/_gpu.py +57 -0
  123. mindspore/hal/device.py +356 -0
  124. mindspore/hal/event.py +179 -0
  125. mindspore/hal/memory.py +326 -0
  126. mindspore/hal/stream.py +339 -0
  127. mindspore/include/api/data_type.h +2 -2
  128. mindspore/include/api/dual_abi_helper.h +16 -3
  129. mindspore/include/api/model.h +4 -3
  130. mindspore/include/api/status.h +14 -0
  131. mindspore/include/c_api/model_c.h +173 -0
  132. mindspore/include/c_api/ms/base/types.h +1 -0
  133. mindspore/include/c_api/types_c.h +19 -0
  134. mindspore/include/dataset/execute.h +1 -3
  135. mindspore/include/dataset/vision.h +54 -2
  136. mindspore/jpeg62.dll +0 -0
  137. mindspore/log.py +2 -2
  138. mindspore/mindrecord/__init__.py +5 -1
  139. mindspore/mindrecord/config.py +809 -0
  140. mindspore/mindrecord/filereader.py +25 -0
  141. mindspore/mindrecord/filewriter.py +76 -58
  142. mindspore/mindrecord/mindpage.py +40 -6
  143. mindspore/mindrecord/shardutils.py +3 -2
  144. mindspore/mindrecord/shardwriter.py +7 -0
  145. mindspore/mindrecord/tools/cifar100_to_mr.py +8 -13
  146. mindspore/mindrecord/tools/cifar10_to_mr.py +9 -15
  147. mindspore/mindrecord/tools/csv_to_mr.py +4 -9
  148. mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
  149. mindspore/mindrecord/tools/mnist_to_mr.py +7 -12
  150. mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -6
  151. mindspore/mindspore_backend.dll +0 -0
  152. mindspore/mindspore_common.dll +0 -0
  153. mindspore/mindspore_core.dll +0 -0
  154. mindspore/mindspore_glog.dll +0 -0
  155. mindspore/mindspore_np_dtype.dll +0 -0
  156. mindspore/mindspore_shared_lib.dll +0 -0
  157. mindspore/mint/__init__.py +1137 -0
  158. mindspore/{rewrite/ast_transformers → mint/linalg}/__init__.py +9 -4
  159. mindspore/mint/nn/__init__.py +512 -0
  160. mindspore/mint/nn/functional.py +573 -0
  161. mindspore/mint/optim/__init__.py +24 -0
  162. mindspore/mint/optim/adamw.py +185 -0
  163. mindspore/msobj140.dll +0 -0
  164. mindspore/mspdb140.dll +0 -0
  165. mindspore/mspdbcore.dll +0 -0
  166. mindspore/mspdbst.dll +0 -0
  167. mindspore/mspft140.dll +0 -0
  168. mindspore/msvcdis140.dll +0 -0
  169. mindspore/msvcp140_1.dll +0 -0
  170. mindspore/msvcp140_2.dll +0 -0
  171. mindspore/msvcp140_atomic_wait.dll +0 -0
  172. mindspore/msvcp140_codecvt_ids.dll +0 -0
  173. mindspore/multiprocessing/__init__.py +72 -0
  174. mindspore/nn/__init__.py +1 -0
  175. mindspore/nn/cell.py +213 -257
  176. mindspore/nn/dynamic_lr.py +2 -2
  177. mindspore/nn/extend/__init__.py +29 -0
  178. mindspore/nn/extend/basic.py +140 -0
  179. mindspore/nn/extend/embedding.py +143 -0
  180. mindspore/{rewrite/ast_creator_register.py → nn/extend/layer/__init__.py} +9 -19
  181. mindspore/nn/extend/layer/normalization.py +109 -0
  182. mindspore/nn/extend/pooling.py +117 -0
  183. mindspore/nn/layer/activation.py +83 -93
  184. mindspore/nn/layer/basic.py +177 -82
  185. mindspore/nn/layer/channel_shuffle.py +3 -16
  186. mindspore/nn/layer/container.py +3 -3
  187. mindspore/nn/layer/conv.py +75 -66
  188. mindspore/nn/layer/embedding.py +101 -43
  189. mindspore/nn/layer/embedding_service.py +531 -0
  190. mindspore/nn/layer/embedding_service_layer.py +393 -0
  191. mindspore/nn/layer/image.py +4 -7
  192. mindspore/nn/layer/math.py +1 -1
  193. mindspore/nn/layer/normalization.py +52 -66
  194. mindspore/nn/layer/padding.py +30 -39
  195. mindspore/nn/layer/pooling.py +18 -9
  196. mindspore/nn/layer/rnn_cells.py +6 -16
  197. mindspore/nn/layer/rnns.py +6 -5
  198. mindspore/nn/layer/thor_layer.py +1 -2
  199. mindspore/nn/layer/timedistributed.py +1 -1
  200. mindspore/nn/layer/transformer.py +52 -50
  201. mindspore/nn/learning_rate_schedule.py +6 -5
  202. mindspore/nn/loss/loss.py +62 -83
  203. mindspore/nn/optim/ada_grad.py +4 -2
  204. mindspore/nn/optim/adadelta.py +3 -1
  205. mindspore/nn/optim/adafactor.py +1 -1
  206. mindspore/nn/optim/adam.py +102 -181
  207. mindspore/nn/optim/adamax.py +4 -2
  208. mindspore/nn/optim/adasum.py +3 -3
  209. mindspore/nn/optim/asgd.py +4 -2
  210. mindspore/nn/optim/ftrl.py +31 -61
  211. mindspore/nn/optim/lamb.py +5 -3
  212. mindspore/nn/optim/lars.py +2 -2
  213. mindspore/nn/optim/lazyadam.py +6 -4
  214. mindspore/nn/optim/momentum.py +13 -25
  215. mindspore/nn/optim/optimizer.py +6 -3
  216. mindspore/nn/optim/proximal_ada_grad.py +4 -2
  217. mindspore/nn/optim/rmsprop.py +9 -3
  218. mindspore/nn/optim/rprop.py +4 -2
  219. mindspore/nn/optim/sgd.py +5 -3
  220. mindspore/nn/optim/thor.py +2 -2
  221. mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
  222. mindspore/nn/probability/distribution/beta.py +2 -2
  223. mindspore/nn/probability/distribution/categorical.py +4 -6
  224. mindspore/nn/probability/distribution/cauchy.py +2 -2
  225. mindspore/nn/probability/distribution/exponential.py +2 -2
  226. mindspore/nn/probability/distribution/geometric.py +1 -1
  227. mindspore/nn/probability/distribution/gumbel.py +2 -2
  228. mindspore/nn/probability/distribution/logistic.py +1 -1
  229. mindspore/nn/probability/distribution/poisson.py +2 -2
  230. mindspore/nn/probability/distribution/uniform.py +2 -2
  231. mindspore/nn/reinforcement/_tensors_queue.py +13 -1
  232. mindspore/nn/wrap/__init__.py +2 -1
  233. mindspore/nn/wrap/cell_wrapper.py +58 -13
  234. mindspore/nn/wrap/grad_reducer.py +148 -8
  235. mindspore/nn/wrap/loss_scale.py +32 -9
  236. mindspore/numpy/__init__.py +2 -0
  237. mindspore/numpy/array_creations.py +2 -0
  238. mindspore/numpy/array_ops.py +6 -6
  239. mindspore/numpy/dtypes.py +3 -3
  240. mindspore/numpy/fft.py +431 -0
  241. mindspore/numpy/math_ops.py +62 -68
  242. mindspore/numpy/utils.py +3 -0
  243. mindspore/opencv_core452.dll +0 -0
  244. mindspore/opencv_imgcodecs452.dll +0 -0
  245. mindspore/opencv_imgproc452.dll +0 -0
  246. mindspore/ops/__init__.py +6 -5
  247. mindspore/ops/_grad_experimental/grad_array_ops.py +4 -129
  248. mindspore/ops/_grad_experimental/grad_comm_ops.py +89 -34
  249. mindspore/ops/_grad_experimental/grad_math_ops.py +68 -283
  250. mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
  251. mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
  252. mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
  253. mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
  254. mindspore/ops/_op_impl/__init__.py +0 -1
  255. mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
  256. mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +1 -1
  257. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
  258. mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
  259. mindspore/ops/_op_impl/cpu/__init__.py +1 -3
  260. mindspore/ops/_op_impl/cpu/adam.py +2 -2
  261. mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
  262. mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
  263. mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
  264. mindspore/ops/_vmap/vmap_array_ops.py +164 -101
  265. mindspore/ops/_vmap/vmap_base.py +8 -1
  266. mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
  267. mindspore/ops/_vmap/vmap_grad_nn_ops.py +143 -58
  268. mindspore/ops/_vmap/vmap_image_ops.py +70 -13
  269. mindspore/ops/_vmap/vmap_math_ops.py +130 -58
  270. mindspore/ops/_vmap/vmap_nn_ops.py +249 -115
  271. mindspore/ops/_vmap/vmap_other_ops.py +1 -1
  272. mindspore/ops/auto_generate/__init__.py +31 -0
  273. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +231 -0
  274. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +250 -0
  275. mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
  276. mindspore/ops/auto_generate/gen_extend_func.py +980 -0
  277. mindspore/ops/auto_generate/gen_ops_def.py +6443 -0
  278. mindspore/ops/auto_generate/gen_ops_prim.py +13167 -0
  279. mindspore/ops/auto_generate/pyboost_inner_prim.py +429 -0
  280. mindspore/ops/composite/__init__.py +5 -2
  281. mindspore/ops/composite/base.py +121 -23
  282. mindspore/ops/composite/math_ops.py +10 -49
  283. mindspore/ops/composite/multitype_ops/_compile_utils.py +191 -618
  284. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +25 -134
  285. mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
  286. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
  287. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
  288. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
  289. mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
  290. mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
  291. mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
  292. mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
  293. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
  294. mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
  295. mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
  296. mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
  297. mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
  298. mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
  299. mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
  300. mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
  301. mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
  302. mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
  303. mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
  304. mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
  305. mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
  306. mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
  307. mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
  308. mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
  309. mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
  310. mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
  311. mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
  312. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
  313. mindspore/ops/deprecated.py +14 -3
  314. mindspore/ops/extend/__init__.py +53 -0
  315. mindspore/ops/extend/array_func.py +218 -0
  316. mindspore/ops/extend/math_func.py +76 -0
  317. mindspore/ops/extend/nn_func.py +308 -0
  318. mindspore/ops/function/__init__.py +31 -11
  319. mindspore/ops/function/array_func.py +846 -1735
  320. mindspore/ops/function/clip_func.py +19 -31
  321. mindspore/ops/function/debug_func.py +1 -4
  322. mindspore/ops/function/fft_func.py +31 -0
  323. mindspore/ops/function/grad/grad_func.py +27 -20
  324. mindspore/ops/function/image_func.py +27 -21
  325. mindspore/ops/function/linalg_func.py +35 -68
  326. mindspore/ops/function/math_func.py +913 -2791
  327. mindspore/ops/function/nn_func.py +1439 -885
  328. mindspore/ops/function/other_func.py +6 -7
  329. mindspore/ops/function/parameter_func.py +5 -93
  330. mindspore/ops/function/random_func.py +254 -108
  331. mindspore/ops/function/reshard_func.py +102 -0
  332. mindspore/ops/function/sparse_func.py +4 -4
  333. mindspore/ops/function/sparse_unary_func.py +9 -16
  334. mindspore/ops/function/spectral_func.py +1 -1
  335. mindspore/ops/function/vmap_func.py +14 -14
  336. mindspore/ops/functional.py +342 -343
  337. mindspore/ops/op_info_register.py +16 -43
  338. mindspore/ops/operations/__init__.py +32 -23
  339. mindspore/ops/operations/_grad_ops.py +21 -853
  340. mindspore/ops/operations/_infer_ops.py +19 -0
  341. mindspore/ops/operations/_inner_ops.py +107 -518
  342. mindspore/ops/operations/_rl_inner_ops.py +2 -2
  343. mindspore/ops/operations/_scalar_ops.py +5 -480
  344. mindspore/ops/operations/_sequence_ops.py +6 -36
  345. mindspore/ops/operations/_tensor_array.py +8 -8
  346. mindspore/ops/operations/array_ops.py +108 -2705
  347. mindspore/ops/operations/comm_ops.py +801 -118
  348. mindspore/ops/operations/custom_ops.py +61 -120
  349. mindspore/ops/operations/debug_ops.py +104 -35
  350. mindspore/ops/operations/image_ops.py +1 -217
  351. mindspore/ops/operations/inner_ops.py +5 -40
  352. mindspore/ops/operations/linalg_ops.py +1 -49
  353. mindspore/ops/operations/manually_defined/__init__.py +24 -0
  354. mindspore/ops/operations/manually_defined/_inner.py +61 -0
  355. mindspore/ops/operations/manually_defined/ops_def.py +2016 -0
  356. mindspore/ops/operations/math_ops.py +572 -4667
  357. mindspore/ops/operations/nn_ops.py +248 -2162
  358. mindspore/ops/operations/other_ops.py +53 -45
  359. mindspore/ops/operations/random_ops.py +4 -53
  360. mindspore/ops/operations/reshard_ops.py +53 -0
  361. mindspore/ops/operations/sparse_ops.py +4 -4
  362. mindspore/ops/primitive.py +204 -103
  363. mindspore/ops/silent_check.py +5 -5
  364. mindspore/ops_generate/__init__.py +27 -0
  365. mindspore/ops_generate/arg_dtype_cast.py +250 -0
  366. mindspore/ops_generate/arg_handler.py +197 -0
  367. mindspore/ops_generate/gen_aclnn_implement.py +263 -0
  368. mindspore/ops_generate/gen_ops.py +1084 -0
  369. mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
  370. mindspore/ops_generate/gen_pyboost_func.py +968 -0
  371. mindspore/ops_generate/gen_utils.py +209 -0
  372. mindspore/ops_generate/op_proto.py +138 -0
  373. mindspore/ops_generate/pyboost_utils.py +354 -0
  374. mindspore/ops_generate/template.py +239 -0
  375. mindspore/parallel/__init__.py +6 -4
  376. mindspore/parallel/_auto_parallel_context.py +73 -3
  377. mindspore/parallel/_cell_wrapper.py +16 -9
  378. mindspore/parallel/_cost_model_context.py +1 -1
  379. mindspore/parallel/_dp_allreduce_fusion.py +159 -159
  380. mindspore/parallel/_parallel_serialization.py +29 -13
  381. mindspore/parallel/_ps_context.py +1 -1
  382. mindspore/parallel/_recovery_context.py +1 -1
  383. mindspore/parallel/_tensor.py +18 -11
  384. mindspore/parallel/_transformer/__init__.py +1 -1
  385. mindspore/parallel/_transformer/layers.py +1 -1
  386. mindspore/parallel/_transformer/loss.py +1 -1
  387. mindspore/parallel/_transformer/moe.py +1 -1
  388. mindspore/parallel/_transformer/op_parallel_config.py +1 -1
  389. mindspore/parallel/_transformer/transformer.py +2 -2
  390. mindspore/parallel/_utils.py +161 -6
  391. mindspore/parallel/algo_parameter_config.py +6 -8
  392. mindspore/parallel/checkpoint_transform.py +191 -32
  393. mindspore/parallel/cluster/__init__.py +15 -0
  394. mindspore/parallel/cluster/process_entity/__init__.py +18 -0
  395. mindspore/parallel/cluster/process_entity/_api.py +344 -0
  396. mindspore/parallel/cluster/process_entity/_utils.py +126 -0
  397. mindspore/parallel/cluster/run.py +136 -0
  398. mindspore/parallel/mpi/__init__.py +1 -1
  399. mindspore/parallel/mpi/_mpi_config.py +1 -1
  400. mindspore/parallel/parameter_broadcast.py +152 -0
  401. mindspore/parallel/shard.py +128 -17
  402. mindspore/pgodb140.dll +0 -0
  403. mindspore/pgort140.dll +0 -0
  404. mindspore/profiler/__init__.py +3 -2
  405. mindspore/profiler/common/process_pool.py +41 -0
  406. mindspore/profiler/common/singleton.py +28 -0
  407. mindspore/profiler/common/util.py +125 -0
  408. mindspore/profiler/envprofiling.py +2 -2
  409. mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
  410. mindspore/profiler/parser/ascend_analysis/constant.py +53 -0
  411. mindspore/profiler/parser/ascend_analysis/file_manager.py +159 -0
  412. mindspore/profiler/parser/ascend_analysis/function_event.py +161 -0
  413. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +131 -0
  414. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +85 -0
  415. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +57 -0
  416. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +116 -0
  417. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
  418. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +68 -0
  419. mindspore/profiler/parser/ascend_cluster_generator.py +14 -9
  420. mindspore/profiler/parser/ascend_communicate_generator.py +0 -1
  421. mindspore/profiler/parser/ascend_flops_generator.py +20 -4
  422. mindspore/profiler/parser/ascend_hccl_generator.py +29 -278
  423. mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
  424. mindspore/profiler/parser/ascend_memory_generator.py +185 -0
  425. mindspore/profiler/parser/ascend_msprof_exporter.py +147 -146
  426. mindspore/profiler/parser/ascend_msprof_generator.py +73 -283
  427. mindspore/profiler/parser/ascend_op_generator.py +92 -42
  428. mindspore/profiler/parser/ascend_timeline_generator.py +296 -133
  429. mindspore/profiler/parser/base_timeline_generator.py +6 -0
  430. mindspore/profiler/parser/framework_parser.py +3 -2
  431. mindspore/profiler/parser/integrator.py +3 -1
  432. mindspore/profiler/parser/minddata_parser.py +72 -3
  433. mindspore/profiler/parser/msadvisor_analyzer.py +1 -1
  434. mindspore/profiler/parser/msadvisor_parser.py +1 -1
  435. mindspore/profiler/parser/profiler_info.py +16 -1
  436. mindspore/profiler/profiling.py +445 -190
  437. mindspore/rewrite/__init__.py +2 -13
  438. mindspore/rewrite/api/node.py +122 -36
  439. mindspore/rewrite/api/pattern_engine.py +2 -3
  440. mindspore/rewrite/api/scoped_value.py +16 -15
  441. mindspore/rewrite/api/symbol_tree.py +45 -29
  442. mindspore/rewrite/ast_helpers/__init__.py +3 -6
  443. mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
  444. mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
  445. mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
  446. mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
  447. mindspore/rewrite/common/__init__.py +1 -2
  448. mindspore/rewrite/common/config.py +24 -0
  449. mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
  450. mindspore/rewrite/{namer.py → common/namer.py} +63 -18
  451. mindspore/rewrite/common/namespace.py +118 -0
  452. mindspore/rewrite/node/__init__.py +5 -5
  453. mindspore/rewrite/node/call_function.py +23 -7
  454. mindspore/rewrite/node/cell_container.py +7 -3
  455. mindspore/rewrite/node/control_flow.py +53 -28
  456. mindspore/rewrite/node/node.py +212 -196
  457. mindspore/rewrite/node/node_manager.py +51 -22
  458. mindspore/rewrite/node/node_topological_manager.py +3 -23
  459. mindspore/rewrite/parsers/__init__.py +12 -0
  460. mindspore/rewrite/parsers/arguments_parser.py +8 -9
  461. mindspore/rewrite/parsers/assign_parser.py +637 -413
  462. mindspore/rewrite/parsers/attribute_parser.py +3 -4
  463. mindspore/rewrite/parsers/class_def_parser.py +115 -148
  464. mindspore/rewrite/parsers/constant_parser.py +5 -5
  465. mindspore/rewrite/parsers/container_parser.py +4 -6
  466. mindspore/rewrite/parsers/expr_parser.py +55 -0
  467. mindspore/rewrite/parsers/for_parser.py +31 -98
  468. mindspore/rewrite/parsers/function_def_parser.py +13 -5
  469. mindspore/rewrite/parsers/if_parser.py +28 -10
  470. mindspore/rewrite/parsers/module_parser.py +8 -182
  471. mindspore/rewrite/parsers/parser.py +1 -5
  472. mindspore/rewrite/parsers/parser_register.py +1 -1
  473. mindspore/rewrite/parsers/return_parser.py +5 -10
  474. mindspore/rewrite/parsers/while_parser.py +59 -0
  475. mindspore/rewrite/sparsify/utils.py +1 -1
  476. mindspore/rewrite/symbol_tree/__init__.py +20 -0
  477. mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
  478. mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
  479. mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
  480. mindspore/run_check/_check_version.py +6 -14
  481. mindspore/run_check/run_check.py +1 -1
  482. mindspore/safeguard/rewrite_obfuscation.py +9 -19
  483. mindspore/swresample-4.dll +0 -0
  484. mindspore/swscale-6.dll +0 -0
  485. mindspore/tbbmalloc.dll +0 -0
  486. mindspore/tinyxml2.dll +0 -0
  487. mindspore/train/__init__.py +6 -5
  488. mindspore/train/_utils.py +178 -4
  489. mindspore/train/amp.py +167 -245
  490. mindspore/train/anf_ir_pb2.py +14 -2
  491. mindspore/train/callback/__init__.py +5 -2
  492. mindspore/train/callback/_backup_and_restore.py +5 -5
  493. mindspore/train/callback/_callback.py +4 -4
  494. mindspore/train/callback/_checkpoint.py +143 -29
  495. mindspore/train/callback/_cluster_monitor.py +201 -0
  496. mindspore/train/callback/_early_stop.py +2 -2
  497. mindspore/train/callback/_flops_collector.py +238 -0
  498. mindspore/train/callback/_landscape.py +15 -9
  499. mindspore/train/callback/_loss_monitor.py +2 -2
  500. mindspore/train/callback/_mindio_ttp.py +443 -0
  501. mindspore/train/callback/_on_request_exit.py +2 -2
  502. mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
  503. mindspore/train/callback/_summary_collector.py +7 -7
  504. mindspore/train/callback/_time_monitor.py +3 -3
  505. mindspore/train/data_sink.py +6 -5
  506. mindspore/train/dataset_helper.py +60 -21
  507. mindspore/train/loss_scale_manager.py +2 -2
  508. mindspore/train/metrics/accuracy.py +7 -7
  509. mindspore/train/metrics/confusion_matrix.py +8 -6
  510. mindspore/train/metrics/cosine_similarity.py +6 -4
  511. mindspore/train/metrics/error.py +2 -2
  512. mindspore/train/metrics/metric.py +3 -3
  513. mindspore/train/metrics/perplexity.py +2 -1
  514. mindspore/train/metrics/topk.py +2 -2
  515. mindspore/train/mind_ir_pb2.py +89 -15
  516. mindspore/train/model.py +290 -60
  517. mindspore/train/serialization.py +495 -220
  518. mindspore/train/summary/_summary_adapter.py +1 -1
  519. mindspore/train/summary/summary_record.py +51 -28
  520. mindspore/train/train_thor/convert_utils.py +3 -3
  521. mindspore/turbojpeg.dll +0 -0
  522. mindspore/vcmeta.dll +0 -0
  523. mindspore/vcruntime140.dll +0 -0
  524. mindspore/vcruntime140_1.dll +0 -0
  525. mindspore/version.py +1 -1
  526. {mindspore-2.2.14.dist-info → mindspore-2.3.0.dist-info}/METADATA +3 -3
  527. mindspore-2.3.0.dist-info/RECORD +1400 -0
  528. {mindspore-2.2.14.dist-info → mindspore-2.3.0.dist-info}/entry_points.txt +1 -0
  529. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
  530. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
  531. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
  532. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
  533. mindspore/gen_ops.py +0 -273
  534. mindspore/nn/layer/flash_attention.py +0 -189
  535. mindspore/ops/_op_impl/cpu/concat.py +0 -39
  536. mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
  537. mindspore/ops/_op_impl/tbe/__init__.py +0 -47
  538. mindspore/ops/_op_impl/tbe/abs.py +0 -38
  539. mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
  540. mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
  541. mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
  542. mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
  543. mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
  544. mindspore/ops/_op_impl/tbe/acos.py +0 -37
  545. mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
  546. mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
  547. mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
  548. mindspore/ops/_op_impl/tbe/acosh.py +0 -37
  549. mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
  550. mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
  551. mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
  552. mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
  553. mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
  554. mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
  555. mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
  556. mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
  557. mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
  558. mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
  559. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
  560. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
  561. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
  562. mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
  563. mindspore/ops/_op_impl/tbe/add.py +0 -42
  564. mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
  565. mindspore/ops/_op_impl/tbe/add_n.py +0 -39
  566. mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
  567. mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
  568. mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
  569. mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
  570. mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
  571. mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
  572. mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
  573. mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
  574. mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
  575. mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
  576. mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
  577. mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
  578. mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
  579. mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
  580. mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
  581. mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
  582. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
  583. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
  584. mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
  585. mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
  586. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
  587. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
  588. mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
  589. mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
  590. mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
  591. mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
  592. mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
  593. mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
  594. mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
  595. mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
  596. mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
  597. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
  598. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
  599. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
  600. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
  601. mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
  602. mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
  603. mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
  604. mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
  605. mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
  606. mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
  607. mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
  608. mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
  609. mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
  610. mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
  611. mindspore/ops/_op_impl/tbe/asin.py +0 -37
  612. mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
  613. mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
  614. mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
  615. mindspore/ops/_op_impl/tbe/asinh.py +0 -37
  616. mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
  617. mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
  618. mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
  619. mindspore/ops/_op_impl/tbe/assign.py +0 -79
  620. mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
  621. mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
  622. mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
  623. mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
  624. mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
  625. mindspore/ops/_op_impl/tbe/atan.py +0 -37
  626. mindspore/ops/_op_impl/tbe/atan2.py +0 -38
  627. mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
  628. mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
  629. mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
  630. mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
  631. mindspore/ops/_op_impl/tbe/atanh.py +0 -37
  632. mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
  633. mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
  634. mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
  635. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
  636. mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
  637. mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
  638. mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
  639. mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
  640. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
  641. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
  642. mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
  643. mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
  644. mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
  645. mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
  646. mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
  647. mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
  648. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
  649. mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
  650. mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
  651. mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
  652. mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
  653. mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
  654. mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
  655. mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
  656. mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
  657. mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
  658. mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
  659. mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
  660. mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
  661. mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
  662. mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
  663. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
  664. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
  665. mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
  666. mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
  667. mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
  668. mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
  669. mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
  670. mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
  671. mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
  672. mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
  673. mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
  674. mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
  675. mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
  676. mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
  677. mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
  678. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
  679. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
  680. mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
  681. mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
  682. mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
  683. mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
  684. mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
  685. mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
  686. mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
  687. mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
  688. mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
  689. mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
  690. mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
  691. mindspore/ops/_op_impl/tbe/cast.py +0 -55
  692. mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
  693. mindspore/ops/_op_impl/tbe/cdist.py +0 -38
  694. mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
  695. mindspore/ops/_op_impl/tbe/ceil.py +0 -37
  696. mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
  697. mindspore/ops/_op_impl/tbe/celu.py +0 -39
  698. mindspore/ops/_op_impl/tbe/centralization.py +0 -39
  699. mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
  700. mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
  701. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
  702. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
  703. mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
  704. mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
  705. mindspore/ops/_op_impl/tbe/concat.py +0 -40
  706. mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
  707. mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
  708. mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
  709. mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
  710. mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
  711. mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
  712. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
  713. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
  714. mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
  715. mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
  716. mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
  717. mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
  718. mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
  719. mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
  720. mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
  721. mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
  722. mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
  723. mindspore/ops/_op_impl/tbe/cos.py +0 -37
  724. mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
  725. mindspore/ops/_op_impl/tbe/cosh.py +0 -37
  726. mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
  727. mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
  728. mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
  729. mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
  730. mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
  731. mindspore/ops/_op_impl/tbe/cummin.py +0 -41
  732. mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
  733. mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
  734. mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
  735. mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
  736. mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
  737. mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
  738. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
  739. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
  740. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
  741. mindspore/ops/_op_impl/tbe/diag.py +0 -38
  742. mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
  743. mindspore/ops/_op_impl/tbe/dilation.py +0 -40
  744. mindspore/ops/_op_impl/tbe/div.py +0 -41
  745. mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
  746. mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
  747. mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
  748. mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
  749. mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
  750. mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
  751. mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
  752. mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
  753. mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
  754. mindspore/ops/_op_impl/tbe/elu.py +0 -38
  755. mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
  756. mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
  757. mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
  758. mindspore/ops/_op_impl/tbe/equal.py +0 -42
  759. mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
  760. mindspore/ops/_op_impl/tbe/erf.py +0 -37
  761. mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
  762. mindspore/ops/_op_impl/tbe/erfc.py +0 -37
  763. mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
  764. mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
  765. mindspore/ops/_op_impl/tbe/exp.py +0 -40
  766. mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
  767. mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
  768. mindspore/ops/_op_impl/tbe/expm1.py +0 -37
  769. mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
  770. mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
  771. mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
  772. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
  773. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
  774. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
  775. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
  776. mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
  777. mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
  778. mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
  779. mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
  780. mindspore/ops/_op_impl/tbe/fill.py +0 -56
  781. mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
  782. mindspore/ops/_op_impl/tbe/flatten.py +0 -48
  783. mindspore/ops/_op_impl/tbe/floor.py +0 -37
  784. mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
  785. mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
  786. mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
  787. mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
  788. mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
  789. mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
  790. mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
  791. mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
  792. mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
  793. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
  794. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
  795. mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
  796. mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
  797. mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
  798. mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
  799. mindspore/ops/_op_impl/tbe/gelu.py +0 -37
  800. mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
  801. mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
  802. mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
  803. mindspore/ops/_op_impl/tbe/ger.py +0 -43
  804. mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
  805. mindspore/ops/_op_impl/tbe/greater.py +0 -43
  806. mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
  807. mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
  808. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
  809. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
  810. mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
  811. mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
  812. mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
  813. mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
  814. mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
  815. mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
  816. mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
  817. mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
  818. mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
  819. mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
  820. mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
  821. mindspore/ops/_op_impl/tbe/im2col.py +0 -42
  822. mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
  823. mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
  824. mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
  825. mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
  826. mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
  827. mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
  828. mindspore/ops/_op_impl/tbe/inv.py +0 -38
  829. mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
  830. mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
  831. mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
  832. mindspore/ops/_op_impl/tbe/invert.py +0 -37
  833. mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
  834. mindspore/ops/_op_impl/tbe/iou.py +0 -38
  835. mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
  836. mindspore/ops/_op_impl/tbe/is_close.py +0 -40
  837. mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
  838. mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
  839. mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
  840. mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
  841. mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
  842. mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
  843. mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
  844. mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
  845. mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
  846. mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
  847. mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
  848. mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
  849. mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
  850. mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
  851. mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
  852. mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
  853. mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
  854. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
  855. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
  856. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
  857. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
  858. mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
  859. mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
  860. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
  861. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
  862. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
  863. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
  864. mindspore/ops/_op_impl/tbe/lerp.py +0 -38
  865. mindspore/ops/_op_impl/tbe/less.py +0 -41
  866. mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
  867. mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
  868. mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
  869. mindspore/ops/_op_impl/tbe/log.py +0 -40
  870. mindspore/ops/_op_impl/tbe/log1p.py +0 -37
  871. mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
  872. mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
  873. mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
  874. mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
  875. mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
  876. mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
  877. mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
  878. mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
  879. mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
  880. mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
  881. mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
  882. mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
  883. mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
  884. mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
  885. mindspore/ops/_op_impl/tbe/lrn.py +0 -41
  886. mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
  887. mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
  888. mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
  889. mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
  890. mindspore/ops/_op_impl/tbe/matmul.py +0 -53
  891. mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
  892. mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
  893. mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
  894. mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
  895. mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
  896. mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
  897. mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
  898. mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
  899. mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
  900. mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
  901. mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
  902. mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
  903. mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
  904. mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
  905. mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
  906. mindspore/ops/_op_impl/tbe/maximum.py +0 -39
  907. mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
  908. mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
  909. mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
  910. mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
  911. mindspore/ops/_op_impl/tbe/minimum.py +0 -40
  912. mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
  913. mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
  914. mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
  915. mindspore/ops/_op_impl/tbe/mish.py +0 -37
  916. mindspore/ops/_op_impl/tbe/mod.py +0 -41
  917. mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
  918. mindspore/ops/_op_impl/tbe/mul.py +0 -37
  919. mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
  920. mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
  921. mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
  922. mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
  923. mindspore/ops/_op_impl/tbe/neg.py +0 -39
  924. mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
  925. mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
  926. mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
  927. mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
  928. mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
  929. mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
  930. mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
  931. mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
  932. mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
  933. mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
  934. mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
  935. mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
  936. mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
  937. mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
  938. mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
  939. mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
  940. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
  941. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
  942. mindspore/ops/_op_impl/tbe/pack.py +0 -58
  943. mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
  944. mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
  945. mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
  946. mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
  947. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
  948. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
  949. mindspore/ops/_op_impl/tbe/pdist.py +0 -36
  950. mindspore/ops/_op_impl/tbe/pooling.py +0 -46
  951. mindspore/ops/_op_impl/tbe/population_count.py +0 -38
  952. mindspore/ops/_op_impl/tbe/pow.py +0 -41
  953. mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
  954. mindspore/ops/_op_impl/tbe/prelu.py +0 -37
  955. mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
  956. mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
  957. mindspore/ops/_op_impl/tbe/range.py +0 -39
  958. mindspore/ops/_op_impl/tbe/real_div.py +0 -38
  959. mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
  960. mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
  961. mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
  962. mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
  963. mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
  964. mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
  965. mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
  966. mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
  967. mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
  968. mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
  969. mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
  970. mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
  971. mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
  972. mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
  973. mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
  974. mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
  975. mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
  976. mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
  977. mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
  978. mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
  979. mindspore/ops/_op_impl/tbe/relu.py +0 -39
  980. mindspore/ops/_op_impl/tbe/relu6.py +0 -38
  981. mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
  982. mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
  983. mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
  984. mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
  985. mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
  986. mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
  987. mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
  988. mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
  989. mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
  990. mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
  991. mindspore/ops/_op_impl/tbe/renorm.py +0 -39
  992. mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
  993. mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
  994. mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
  995. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
  996. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
  997. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
  998. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
  999. mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
  1000. mindspore/ops/_op_impl/tbe/rint.py +0 -37
  1001. mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
  1002. mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
  1003. mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
  1004. mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
  1005. mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
  1006. mindspore/ops/_op_impl/tbe/roll.py +0 -42
  1007. mindspore/ops/_op_impl/tbe/round.py +0 -38
  1008. mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
  1009. mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
  1010. mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
  1011. mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
  1012. mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
  1013. mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
  1014. mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
  1015. mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
  1016. mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
  1017. mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
  1018. mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
  1019. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
  1020. mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
  1021. mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
  1022. mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
  1023. mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
  1024. mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
  1025. mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
  1026. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
  1027. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
  1028. mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
  1029. mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
  1030. mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
  1031. mindspore/ops/_op_impl/tbe/select.py +0 -38
  1032. mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
  1033. mindspore/ops/_op_impl/tbe/selu.py +0 -39
  1034. mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
  1035. mindspore/ops/_op_impl/tbe/sgd.py +0 -62
  1036. mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
  1037. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
  1038. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
  1039. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
  1040. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
  1041. mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
  1042. mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
  1043. mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
  1044. mindspore/ops/_op_impl/tbe/sign.py +0 -38
  1045. mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
  1046. mindspore/ops/_op_impl/tbe/sin.py +0 -37
  1047. mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
  1048. mindspore/ops/_op_impl/tbe/sinh.py +0 -37
  1049. mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
  1050. mindspore/ops/_op_impl/tbe/slice.py +0 -58
  1051. mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
  1052. mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
  1053. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
  1054. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
  1055. mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
  1056. mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
  1057. mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
  1058. mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
  1059. mindspore/ops/_op_impl/tbe/softmax.py +0 -37
  1060. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
  1061. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
  1062. mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
  1063. mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
  1064. mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
  1065. mindspore/ops/_op_impl/tbe/softplus.py +0 -37
  1066. mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
  1067. mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
  1068. mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
  1069. mindspore/ops/_op_impl/tbe/softsign.py +0 -37
  1070. mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
  1071. mindspore/ops/_op_impl/tbe/sort.py +0 -38
  1072. mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
  1073. mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
  1074. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
  1075. mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
  1076. mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
  1077. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
  1078. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
  1079. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
  1080. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
  1081. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
  1082. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
  1083. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
  1084. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
  1085. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
  1086. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
  1087. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
  1088. mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
  1089. mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
  1090. mindspore/ops/_op_impl/tbe/split_d.py +0 -38
  1091. mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
  1092. mindspore/ops/_op_impl/tbe/split_v.py +0 -39
  1093. mindspore/ops/_op_impl/tbe/splitv.py +0 -39
  1094. mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
  1095. mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
  1096. mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
  1097. mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
  1098. mindspore/ops/_op_impl/tbe/square.py +0 -38
  1099. mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
  1100. mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
  1101. mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
  1102. mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
  1103. mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
  1104. mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
  1105. mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
  1106. mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
  1107. mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
  1108. mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
  1109. mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
  1110. mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
  1111. mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
  1112. mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
  1113. mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
  1114. mindspore/ops/_op_impl/tbe/sub.py +0 -39
  1115. mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
  1116. mindspore/ops/_op_impl/tbe/tan.py +0 -38
  1117. mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
  1118. mindspore/ops/_op_impl/tbe/tanh.py +0 -37
  1119. mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
  1120. mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
  1121. mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
  1122. mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
  1123. mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
  1124. mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
  1125. mindspore/ops/_op_impl/tbe/tile.py +0 -37
  1126. mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
  1127. mindspore/ops/_op_impl/tbe/top_k.py +0 -42
  1128. mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
  1129. mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
  1130. mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
  1131. mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
  1132. mindspore/ops/_op_impl/tbe/transpose.py +0 -60
  1133. mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
  1134. mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
  1135. mindspore/ops/_op_impl/tbe/trunc.py +0 -39
  1136. mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
  1137. mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
  1138. mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
  1139. mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
  1140. mindspore/ops/_op_impl/tbe/unpack.py +0 -38
  1141. mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
  1142. mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
  1143. mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
  1144. mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
  1145. mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
  1146. mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
  1147. mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
  1148. mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
  1149. mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
  1150. mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
  1151. mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
  1152. mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
  1153. mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
  1154. mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
  1155. mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
  1156. mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
  1157. mindspore/ops/_tracefunc.py +0 -241
  1158. mindspore/ops/arg_dtype_cast.py +0 -54
  1159. mindspore/rewrite/api/tree_node_helper.py +0 -60
  1160. mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
  1161. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
  1162. mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
  1163. mindspore/rewrite/namespace.py +0 -53
  1164. mindspore-2.2.14.dist-info/RECORD +0 -1924
  1165. {mindspore-2.2.14.dist-info → mindspore-2.3.0.dist-info}/WHEEL +0 -0
  1166. {mindspore-2.2.14.dist-info → mindspore-2.3.0.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,4 @@
1
- # Copyright 2022 Huawei Technologies Co., Ltd
1
+ # Copyright 2022-2023 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -24,6 +24,7 @@ import numpy as np
24
24
  import mindspore as ms
25
25
  import mindspore.common.dtype as mstype
26
26
  from mindspore.ops import operations as P
27
+ from mindspore.ops import functional as F
27
28
  from mindspore.ops.primitive import constexpr
28
29
  from mindspore.ops.primitive import _primexpr
29
30
  import mindspore.ops as ops
@@ -31,18 +32,19 @@ from mindspore.ops.operations._inner_ops import DynamicBroadcastTo
31
32
  from mindspore.ops.operations._sequence_ops import TupleToTensor
32
33
  from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
33
34
  from mindspore.ops.operations._sequence_ops import TensorToList
35
+ from mindspore.ops.auto_generate import OnesLikeExt, ZerosLikeExt, FillScalar, FillTensor, Arange, Chunk, UniqueDim,\
36
+ Unique2, SortExt, NonZero, NonZeroExt
37
+ from mindspore.ops.auto_generate.gen_ops_prim import SplitTensor
38
+ from mindspore.ops.auto_generate.gen_ops_prim import SplitWithSize, RepeatInterleaveInt, RepeatInterleaveTensor
34
39
 
35
40
  from mindspore.ops.operations.array_ops import (
36
41
  UniqueConsecutive,
37
42
  SearchSorted,
38
- NonZero,
39
43
  MatrixDiagV3,
40
44
  MatrixDiagPartV3,
41
45
  MatrixSetDiagV3,
42
46
  Fills,
43
47
  Col2Im,
44
- ArgMaxWithValue,
45
- ArgMinWithValue,
46
48
  ScatterNdMax,
47
49
  ScatterNdMul,
48
50
  IndexFill,
@@ -52,7 +54,9 @@ from mindspore.ops.operations.array_ops import (
52
54
  Lstsq,
53
55
  Mvlgamma,
54
56
  Tril,
55
- Argmax
57
+ Argmax,
58
+ ArgMaxWithValue,
59
+ ArgMinWithValue
56
60
  )
57
61
  from mindspore.ops.operations.array_ops import TensorScatterElements
58
62
  from mindspore.common import Tensor
@@ -61,53 +65,83 @@ from mindspore import _checkparam as validator
61
65
  from mindspore._c_expression import Tensor as Tensor_
62
66
  from mindspore.ops._utils.utils import ms_arrange
63
67
 
64
- tuple_to_tensor_ = TupleToTensor()
68
+ from mindspore.ops.auto_generate import cat, range, scatter_nd, deepcopy, masked_fill, diagonal, expand_dims, \
69
+ flip, transpose, triu, unsorted_segment_sum, diag, gather, gather_d, gather_nd, reshape, \
70
+ broadcast_to, strided_slice, ones, zeros, max_, min_, select
71
+ from mindspore.ops.auto_generate.gen_ops_prim import scatter_add_ext_op, slice_ext_op
72
+ from mindspore.ops.operations.manually_defined import tile, rank, scalar_cast
73
+
74
+ arg_max_with_value_ = ArgMaxWithValue()
75
+ arg_min_with_value_ = ArgMinWithValue()
76
+ batch_to_space_nd_v2_ = P.BatchToSpaceNDV2()
77
+ cast_ = P.Cast()
78
+ diag_ = P.Diag()
79
+ dynamic_broadcast_to_ = DynamicBroadcastTo()
65
80
  eye_ = P.Eye()
66
81
  fills_ = Fills()
82
+ fillv2_ = P.FillV2()
83
+ flatten_ = P.Flatten()
84
+ gather_ = P.Gather()
85
+ gather_d_ = P.GatherD()
86
+ gather_nd_ = P.GatherNd()
87
+ ger_ = P.Ger()
88
+ index_fill_ = IndexFill()
89
+ lstsq_ = Lstsq()
90
+ masked_select_ = P.MaskedSelect()
91
+ matrix_band_part_ = P.array_ops.MatrixBandPart()
67
92
  ones_ = P.Ones()
68
- ones_like_ = P.OnesLike()
69
- tile_ = P.Tile()
70
- unique_with_pad_ = P.UniqueWithPad()
71
- size_ = P.Size()
72
- shape_ = P.Shape()
93
+ population_count_ = P.PopulationCount()
94
+ range_ = P.Range()
73
95
  rank_ = P.Rank()
74
- tensor_shape_ = P.TensorShape()
96
+ reduce_max_ = P.ReduceMax()
97
+ reduce_min_ = P.ReduceMin()
75
98
  reshape_ = P.Reshape()
76
- tensor_slice = P.Slice()
77
- expand_dims_ = P.ExpandDims()
78
- transpose_ = P.Transpose()
99
+ scalar_to_tensor_ = P.ScalarToTensor()
79
100
  scatter_add_ = P.ScatterAdd()
101
+ scatter_div_ = P.ScatterDiv()
80
102
  scatter_max_ = P.ScatterMax()
81
103
  scatter_min_ = P.ScatterMin()
82
104
  scatter_mul_ = P.ScatterMul()
83
- scatter_div_ = P.ScatterDiv()
84
105
  scatter_nd_ = P.ScatterNd()
85
- gather_ = P.Gather()
86
- gather_d_ = P.GatherD()
87
- gather_nd_ = P.GatherNd()
88
- nonzero_ = NonZero()
89
- scalar_cast_ = P.ScalarCast()
106
+ scatter_update_ = P.ScatterUpdate()
107
+ shape_ = P.Shape()
108
+ split_tensor = SplitTensor()
109
+ split_with_size = SplitWithSize()
110
+ size_ = P.Size()
90
111
  tensor_scatter_add_ = P.TensorScatterAdd()
91
- tensor_scatter_sub_ = P.TensorScatterSub()
92
- tensor_scatter_mul_ = P.TensorScatterMul()
93
112
  tensor_scatter_div_ = P.TensorScatterDiv()
94
- tensor_scatter_min_ = P.TensorScatterMin()
95
113
  tensor_scatter_max_ = P.TensorScatterMax()
96
- scalar_to_tensor_ = P.ScalarToTensor()
97
- tuple_to_array_ = P.TupleToArray()
98
- masked_select_ = P.MaskedSelect()
99
- matrix_band_part_ = P.array_ops.MatrixBandPart()
100
- ger_ = P.Ger()
101
- diag_ = P.Diag()
102
- range_ = P.Range()
103
- zeros_like_ = P.ZerosLike()
104
- cast_ = P.Cast()
114
+ tensor_scatter_min_ = P.TensorScatterMin()
115
+ tensor_scatter_mul_ = P.TensorScatterMul()
116
+ tensor_scatter_sub_ = P.TensorScatterSub()
105
117
  tensor_select_ = P.Select()
106
- index_fill_ = IndexFill()
118
+ tensor_shape_ = P.TensorShape()
119
+ tensor_slice = P.Slice()
120
+ tile_ = P.Tile()
121
+ transpose_ = P.Transpose()
122
+ tuple_to_array_ = P.TupleToArray()
123
+ tuple_to_tensor_ = TupleToTensor()
124
+ unique_ = P.Unique()
125
+ unique_with_pad_ = P.UniqueWithPad()
126
+ unsorted_segment_max_ = P.UnsortedSegmentMax()
127
+ unsorted_segment_min_ = P.UnsortedSegmentMin()
128
+ unsorted_segment_prod_ = P.UnsortedSegmentProd()
107
129
  unsorted_segment_sum_ = P.UnsortedSegmentSum()
108
- population_count_ = P.PopulationCount()
109
- reduce_max = P.ReduceMax()
110
- reduce_min = P.ReduceMin()
130
+ ones_like_ = P.OnesLike()
131
+ zeros_like_ = P.ZerosLike()
132
+ ones_like_ext_ = OnesLikeExt()
133
+ zeros_like_ext_ = ZerosLikeExt()
134
+ fill_scalar_ = FillScalar()
135
+ fill_tensor_ = FillTensor()
136
+ sort_ext_ = SortExt()
137
+ arange_ = Arange()
138
+ chunk_ = Chunk()
139
+ repeat_interleave_int_ = RepeatInterleaveInt()
140
+ repeat_interleave_tensor_ = RepeatInterleaveTensor()
141
+ unique_dim_ = UniqueDim()
142
+ unique2_ = Unique2()
143
+ non_zero_ = NonZero()
144
+ non_zero_ext_ = NonZeroExt()
111
145
 
112
146
 
113
147
  @_primexpr
@@ -187,8 +221,11 @@ def arange(start=0, end=None, step=1, *, dtype=None):
187
221
 
188
222
  Keyword Args:
189
223
  dtype (mindspore.dtype, optional): The required data type of returned Tensor. Default: ``None`` .
190
- If the value is not specified or is ``None`` , the type with the highest precision in the
191
- `start`, `end`, and `step` parameters is inferred.
224
+ When `dtype` is not specified or ``None``:
225
+
226
+ If `start`, `end`, and `step` are all integers, the dtype of output is int64,
227
+
228
+ If `start`, `end`, and `step` contain at least one floating-point number, the dtype of output is float32.
192
229
 
193
230
  Returns:
194
231
  A 1-D Tensor, with the same type as the inputs.
@@ -225,7 +262,7 @@ def arange(start=0, end=None, step=1, *, dtype=None):
225
262
  >>> print(output)
226
263
  [12. 11. 10. 9. 8. 7. 6. 5. 4. 3.]
227
264
  >>> print(output.dtype)
228
- Float64
265
+ Float32
229
266
  """
230
267
  if end is None:
231
268
  start, end = 0, start
@@ -237,67 +274,84 @@ def arange(start=0, end=None, step=1, *, dtype=None):
237
274
  if start.shape != () or end.shape != () or step.shape != ():
238
275
  raise ValueError(f"For arange, the input args must be a TensorScalar,"
239
276
  f" but got start shape:{start.shape}, end shape:{end.shape}, step shape:{step.shape}")
240
- range_op = _get_cache_prim(P.Range)()
241
- data = range_op(start, end, step)
277
+ data = range_(start, end, step)
242
278
  if dtype is not None:
243
279
  data = cast_(data, dtype)
244
280
  return data
245
281
 
246
282
 
247
- def cat(tensors, axis=0):
283
+ def arange_ext(start=0, end=None, step=1, *, dtype=None):
248
284
  r"""
249
- Connect input tensors along with the given axis.
285
+ Creates a sequence of numbers that begins at `start` and extends by increments of
286
+ `step` up to but not including `end`.
250
287
 
251
- The input data is a tuple or a list of tensors. These tensors have the same rank :math:`R`.
252
- Set the given axis as :math:`m`, and :math:`0 \le m < R`. Set the number of input tensors as :math:`N`.
253
- For the :math:`i`-th tensor :math:`t_i`, it has the shape of :math:`(x_1, x_2, ..., x_{mi}, ..., x_R)`.
254
- :math:`x_{mi}` is the :math:`m`-th dimension of the :math:`t_i`. Then, the shape of the output tensor is
288
+ Args:
289
+ start (Union[float, int], optional): The start of the interval. Default: ``0`` .
290
+ end (Union[float, int], optional): The end of the interval, exclusive.
291
+ Default: ``None`` . If ``None`` , it defaults to the value of `start`, and 0 is used as the starting value.
292
+ step (Union[float, int], optional): The step size with which the array element increments. Default: ``1`` .
255
293
 
256
- .. math::
294
+ Keyword Args:
295
+ dtype (mindspore.dtype, optional): The required data type of returned Tensor. Default: ``None`` .
296
+ When `dtype` is not specified or ``None``:
257
297
 
258
- (x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)
298
+ If `start`, `end`, and `step` are all integers, the dtype of output is int64,
259
299
 
260
- Args:
261
- tensors (Union[tuple, list]): A tuple or a list of input tensors.
262
- Suppose there are two tensors in this tuple or list, namely t1 and t2.
263
- To perform `concat` in the axis 0 direction, except for the :math:`0`-th axis,
264
- all other dimensions should be equal, that is,
265
- :math:`t1.shape[1] = t2.shape[1], t1.shape[2] = t2.shape[2], ..., t1.shape[R-1] = t2.shape[R-1]`,
266
- where :math:`R` represents the rank of tensor.
267
- axis (int): The specified axis, whose value is in range :math:`[-R, R)`. Default: ``0`` .
300
+ If `start`, `end`, and `step` contain at least one floating-point number, the dtype of output is float32.
268
301
 
269
302
  Returns:
270
- Tensor, the shape is :math:`(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)`.
271
- The data type is the same with `tensors`.
303
+ A 1-D Tensor, cast to `dtype` if provided, may potentially lose precision due to casting.
272
304
 
273
305
  Raises:
274
- TypeError: If `axis` is not an int.
275
- ValueError: If `tensors` have different dimension of tensor.
276
- ValueError: If `axis` not in range :math:`[-R, R)`.
277
- RuntimeError: If tensor's shape in `tensors` except for `axis` are different.
306
+ TypeError: If `start`, `end` or `step` are not of type int or float.
307
+ ValueError: If `step` = 0.
308
+ ValueError: If `start` >= `end` when `step` > 0.
309
+ ValueError: If `start` <= `end` when `step` < 0.
278
310
 
279
311
  Supported Platforms:
280
- ``Ascend`` ``GPU`` ``CPU``
312
+ ``Ascend``
281
313
 
282
314
  Examples:
283
- >>> import mindspore
284
- >>> import numpy as np
315
+ >>> import mindspore as ms
285
316
  >>> from mindspore import Tensor, ops
286
- >>> input_x1 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
287
- >>> input_x2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
288
- >>> output = ops.cat((input_x1, input_x2))
317
+ >>> output = ops.arange_ext(1, 6)
289
318
  >>> print(output)
290
- [[0. 1.]
291
- [2. 1.]
292
- [0. 1.]
293
- [2. 1.]]
294
- >>> output = ops.cat((input_x1, input_x2), 1)
319
+ [1 2 3 4 5]
320
+ >>> print(output.dtype)
321
+ Int64
322
+ >>> output = ops.arange_ext(0, 3, 1.2)
323
+ >>> print(output)
324
+ [0. 1.2 2.4]
325
+ >>> print(output.dtype)
326
+ Float32
327
+ >>> output = ops.arange_ext(7, 1, -2)
295
328
  >>> print(output)
296
- [[0. 1. 0. 1.]
297
- [2. 1. 2. 1.]]
329
+ [7 5 3]
330
+ >>> print(output.dtype)
331
+ Int64
332
+ >>> output = ops.arange_ext(12, 2, -1, dtype=ms.bfloat16))
333
+ >>> print(output)
334
+ [12. 11. 10. 9. 8. 7. 6. 5. 4. 3.]
335
+ >>> print(output.dtype)
336
+ BFloat16
337
+ """
338
+ if end is None:
339
+ start, end = 0, start
340
+ return arange_(start, end, step, dtype)
341
+
342
+
343
+ def concat(tensors, axis=0):
344
+ """
345
+ Alias for :func:`mindspore.ops.cat()`.
346
+
347
+ Tutorial Examples:
348
+ - `Tensor - Tensor Operation <https://mindspore.cn/tutorials/en/master/beginner/tensor.html#tensor-operation>`_
349
+ - `Vision Transformer Image Classification - Building ViT as a whole
350
+ <https://mindspore.cn/tutorials/application/en/master/cv/vit.html#building-vit-as-a-whole>`_
351
+ - `Sentiment Classification Implemented by RNN - Dense
352
+ <https://mindspore.cn/tutorials/application/en/master/nlp/sentiment_analysis.html#dense>`_
298
353
  """
299
- _concat = _get_cache_prim(P.Concat)(axis)
300
- return _concat(tensors)
354
+ return cat(tensors, axis)
301
355
 
302
356
 
303
357
  def eye(n, m=None, dtype=None):
@@ -305,14 +359,14 @@ def eye(n, m=None, dtype=None):
305
359
  Creates a tensor with ones on the diagonal and zeros in the rest.
306
360
 
307
361
  Note:
308
- Combines ReverseV2 operator to get an anti-diagonal Tensor,
309
- but ReverseV2 only supports Ascend and GPU platforms currently.
362
+ The data type of returned tensor can be float16, float32, int8, int16, int32, int64, uint8
363
+ or bool on Ascend platforms.
310
364
 
311
365
  Args:
312
366
  n (int): The number of rows of returned tensor. Constant value only.
313
- m (int): The number of columns of returned tensor. Constant value only.
367
+ m (int, optional): The number of columns of returned tensor. Constant value only.
314
368
  Default: ``None`` , if ``None`` , the number of columns is as the same as n.
315
- dtype (mindspore.dtype): MindSpore's dtype, the data type of the returned tensor.
369
+ dtype (mindspore.dtype, optional): MindSpore's dtype, the data type of the returned tensor.
316
370
  The data type can be bool or Number.
317
371
  Default: ``None`` , the data type of the returned tensor is mindspore.float32.
318
372
 
@@ -336,11 +390,11 @@ def eye(n, m=None, dtype=None):
336
390
  [0 1]]
337
391
  >>> print(output.dtype)
338
392
  Int32
339
- >>> output = ops.eye(1, 2, mindspore.float64)
393
+ >>> output = ops.eye(1, 2, mindspore.float32)
340
394
  >>> print(output)
341
395
  [[1. 0.]]
342
396
  >>> print(output.dtype)
343
- Float64
397
+ Float32
344
398
  >>> output = ops.eye(2, dtype=mindspore.int32)
345
399
  >>> print(output)
346
400
  [[1 0]
@@ -419,25 +473,25 @@ def hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype
419
473
  return out
420
474
 
421
475
 
422
- def where(condition, x, y):
476
+ def where(condition, input, other):
423
477
  r"""
424
- Selects elements from `x` or `y` based on `condition` and returns a tensor.
478
+ Selects elements from `input` or `other` based on `condition` and returns a tensor.
425
479
 
426
480
  .. math::
427
- output_i = \begin{cases} x_i,\quad &if\ condition_i \\ y_i,\quad &otherwise \end{cases}
481
+ output_i = \begin{cases} input_i,\quad &if\ condition_i \\ other_i,\quad &otherwise \end{cases}
428
482
 
429
483
  Args:
430
- condition (Tensor[bool]): If True, yield `x`, otherwise yield `y`.
431
- x (Union[Tensor, Scalar]): When `condition` is True, values to select from.
432
- y (Union[Tensor, Scalar]): When `condition` is False, values to select from.
484
+ condition (Tensor[bool]): If True, yield `input`, otherwise yield `other`.
485
+ input (Union[Tensor, Scalar]): When `condition` is True, values to select from.
486
+ other (Union[Tensor, Scalar]): When `condition` is False, values to select from.
433
487
 
434
488
  Returns:
435
- Tensor, elements are selected from `x` and `y`.
489
+ Tensor, elements are selected from `input` and `other`.
436
490
 
437
491
  Raises:
438
492
  TypeError: If `condition` is not a Tensor.
439
- TypeError: If both `x` and `y` are scalars.
440
- ValueError: If `condition`, `x` and `y` can not broadcast to each other.
493
+ TypeError: If both `input` and `other` are scalars.
494
+ ValueError: If `condition`, `input` and `other` can not broadcast to each other.
441
495
 
442
496
  Supported Platforms:
443
497
  ``Ascend`` ``GPU`` ``CPU``
@@ -454,66 +508,15 @@ def where(condition, x, y):
454
508
  [[0. 1.]
455
509
  [2. 1.]]
456
510
  """
457
- if not isinstance(condition, Tensor):
458
- raise TypeError(f"For 'where', 'condition' must be a Tensor, but got {type(condition)}.")
459
- if isinstance(x, (int, float)):
460
- if not isinstance(y, Tensor):
461
- raise TypeError(
462
- f"For 'where', at least one of 'x' and 'y' should be Tensor, but got x:{type(x)}, y:{type(y)}."
463
- )
464
- x = cast_(x, y.dtype)
465
- elif isinstance(y, (int, float)):
466
- if not isinstance(x, Tensor):
467
- raise TypeError(
468
- f"For 'where', at least one of 'x' and 'y' should be Tensor, but got x:{type(x)}, y:{type(y)}."
469
- )
470
- y = cast_(y, x.dtype)
471
- output_shape = _calc_broadcast_shape(x.shape, y.shape, condition.shape)
472
- condition = broadcast_to(condition, output_shape)
473
- x = broadcast_to(x, output_shape)
474
- y = broadcast_to(y, output_shape)
475
- _select = P.Select()
476
- return _select(condition, x, y)
511
+ return tensor_select_(condition, input, other)
477
512
 
478
513
 
479
514
  def reverse(x, axis):
480
515
  """
481
- Reverses specific dimensions of a tensor.
482
-
483
- .. warning::
484
- The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "input_x".
485
-
486
- Args:
487
- x (Tensor): The target tensor.
488
- The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
489
- axis (Union[tuple(int), list(int)]): The indices of the dimensions to reverse.
490
-
491
- Outputs:
492
- Tensor, has the same shape and type as `x`.
493
-
494
- Raises:
495
- TypeError: If `axis` is neither list nor tuple.
496
- TypeError: If element of `axis` is not an int.
497
-
498
- Supported Platforms:
499
- ``Ascend`` ``GPU`` ``CPU``
500
-
501
- Examples:
502
- >>> import mindspore
503
- >>> import numpy as np
504
- >>> from mindspore import Tensor, ops
505
- >>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
506
- >>> output = ops.reverse(input_x, axis=[1])
507
- >>> print(output)
508
- [[4 3 2 1]
509
- [8 7 6 5]]
510
- >>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
511
- >>> output = ops.reverse(input_x, axis=[1, 0])
512
- >>> print(output)
513
- [[8 7 6 5]
514
- [4 3 2 1]]
516
+ :func:`mindspore.ops.reverse` will be deprecated in the future.
517
+ Please use :func:`mindspore.ops.flip` instead.
515
518
  """
516
- return P.ReverseV2(axis)(x)
519
+ return flip(x, axis)
517
520
 
518
521
 
519
522
  def ravel(input):
@@ -659,8 +662,9 @@ def one_hot(indices, depth, on_value=1, off_value=0, axis=-1):
659
662
  other locations take value `off_value`.
660
663
 
661
664
  Note:
662
- If the input indices is rank `N`, the output will have rank `N+1`. The new axis is created at dimension `axis`.
663
- On Ascend, if `on_value` is int64 dtype, `indices` must be int64 dtype.
665
+ If the input `indices` has rank `N`, the output will have rank `N+1`.
666
+ The new axis is created at dimension `axis`. On Ascend, if `on_value` is int64 dtype, `indices` must be
667
+ int64 dtype, and the value for `on_value` and `off_value` can only be 1 and 0.
664
668
 
665
669
  Args:
666
670
  indices(Tensor): A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
@@ -682,6 +686,7 @@ def one_hot(indices, depth, on_value=1, off_value=0, axis=-1):
682
686
  Raises:
683
687
  TypeError: If `axis` or `depth` is not an int.
684
688
  TypeError: If dtype of `indices` is not int32 or int64.
689
+ TypeError: If dtype of `on_value` is not int32, int64, float16 or float32.
685
690
  TypeError: If `indices`, `on_value` or `off_value` is not a Tensor.
686
691
  ValueError: If `axis` is not in range [-1, ndim].
687
692
  ValueError: If `depth` is less than 0.
@@ -715,8 +720,8 @@ def fill(type, shape, value): # pylint: disable=redefined-outer-name
715
720
 
716
721
  Args:
717
722
  type (mindspore.dtype): The specified type of output tensor. The data type only supports
718
- `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ and
719
- `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ .
723
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ and
724
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ .
720
725
  shape (Union(Tensor, tuple[int])): The specified shape of output tensor.
721
726
  value (Union(Tensor, number.Number, bool)): Value to fill the returned tensor.
722
727
 
@@ -743,7 +748,7 @@ def fill(type, shape, value): # pylint: disable=redefined-outer-name
743
748
  [0. 0. 0.]]
744
749
  """
745
750
  value = cast_(value, type)
746
- return _get_cache_prim(P.FillV2)()(shape, value)
751
+ return fillv2_(shape, value)
747
752
 
748
753
 
749
754
  def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-name
@@ -791,6 +796,45 @@ def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-na
791
796
  return ops.fill(dtype, size, fill_value)
792
797
 
793
798
 
799
+ def full_ext(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-name
800
+ """
801
+ Create a Tensor of the specified shape and fill it with the specified value.
802
+
803
+ Args:
804
+ size (Union(tuple[int], list[int])): The specified shape of output tensor.
805
+ fill_value (number.Number): Value to fill the returned tensor. Complex numbers are not supported for now.
806
+
807
+ Keyword Args:
808
+ dtype (mindspore.dtype): The specified type of output tensor. `bool_` and `number` are supported, for details,
809
+ please refer to :class:`mindspore.dtype` . Default: ``None`` .
810
+
811
+ Returns:
812
+ Tensor.
813
+
814
+ Raises:
815
+ TypeError: If `size` is not a tuple or list.
816
+ ValueError: The element in `size` is less than 0.
817
+
818
+ Supported Platforms:
819
+ ``Ascend`` ``GPU`` ``CPU``
820
+
821
+ Examples:
822
+ >>> from mindspore import ops
823
+ >>> output = ops.full((2, 2), 1)
824
+ >>> print(output)
825
+ [[1. 1.]
826
+ [1. 1.]]
827
+ >>> output = ops.full((3, 3), 0)
828
+ >>> print(output)
829
+ [[0. 0. 0.]
830
+ [0. 0. 0.]
831
+ [0. 0. 0.]]
832
+ """
833
+ if isinstance(fill_value, Tensor):
834
+ return fill_tensor_(size, fill_value, dtype)
835
+ return fill_scalar_(size, fill_value, dtype)
836
+
837
+
794
838
  def full_like(input, fill_value, *, dtype=None):
795
839
  """
796
840
  Return a Tensor of the same shape as `input` and filled with `fill_value`.
@@ -883,24 +927,63 @@ def chunk(input, chunks, axis=0):
883
927
  length_along_dim = arr_shape[arr_axis]
884
928
 
885
929
  if chunks > length_along_dim:
886
- res = P.Split(arr_axis, length_along_dim)(input)
930
+ res = _get_cache_prim(P.Split)(arr_axis, length_along_dim)(input)
887
931
  elif length_along_dim % chunks == 0:
888
- res = P.Split(arr_axis, chunks)(input)
932
+ res = _get_cache_prim(P.Split)(arr_axis, chunks)(input)
889
933
  else:
890
934
  block_size = int(np.ceil(length_along_dim / chunks))
891
935
  true_chunks = int(length_along_dim // block_size)
892
936
  length1 = true_chunks * block_size
893
937
  length2 = length_along_dim - length1
894
- start1 = _list_comprehensions(rank(input), 0, True)
938
+ start1 = _list_comprehensions(rank_(input), 0, True)
895
939
  size1 = _tuple_setitem(arr_shape, arr_axis, length1)
896
940
  start2 = _tuple_setitem(start1, arr_axis, length1)
897
941
  size2 = _tuple_setitem(arr_shape, arr_axis, length2)
898
- res = P.Split(arr_axis, true_chunks)(tensor_slice(input, start1, size1))
942
+ res = _get_cache_prim(P.Split)(arr_axis, true_chunks)(tensor_slice(input, start1, size1))
899
943
  if length2:
900
- res += P.Split(arr_axis, 1)(tensor_slice(input, start2, size2))
944
+ res += _get_cache_prim(P.Split)(arr_axis, 1)(tensor_slice(input, start2, size2))
901
945
  return res
902
946
 
903
947
 
948
+ def chunk_ext(input, chunks, dim=0):
949
+ """
950
+ Cut the input Tensor into `chunks` sub-tensors along the specified axis.
951
+
952
+ Note:
953
+ This function may return less than the specified number of chunks!
954
+
955
+ Args:
956
+ input (Tensor): A Tensor to be cut.
957
+ chunks (int): Number of sub-tensors to cut.
958
+ dim (int, optional): Specify the dimensions that you want to split. Default: ``0`` .
959
+
960
+ Returns:
961
+ A tuple of sub-tensors.
962
+
963
+ Raises:
964
+ TypeError: If argument `input` is not Tensor.
965
+ TypeError: The sum of `chunks` is not int.
966
+ TypeError: If argument `dim` is not int.
967
+ ValueError: If argument `dim` is out of range of :math:`[-input.ndim, input.ndim)` .
968
+ ValueError: If argument `chunks` is not positive number.
969
+
970
+ Supported Platforms:
971
+ ``Ascend``
972
+
973
+ Examples:
974
+ >>> import numpy as np
975
+ >>> import mindspore
976
+ >>> from mindspore import Tensor
977
+ >>> input_x = np.arange(9).astype("float32")
978
+ >>> output = mindspore.mint.chunk(Tensor(input_x), 3)
979
+ >>> print(output)
980
+ (Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
981
+ Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
982
+ Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))
983
+ """
984
+ return chunk_(input, chunks, dim)
985
+
986
+
904
987
  def fills(x, value):
905
988
  """
906
989
  `fills` is deprecated, please use `ops.fill` instead.
@@ -920,50 +1003,6 @@ def fills(x, value):
920
1003
  return fills_(x, value_)
921
1004
 
922
1005
 
923
- def ones(shape, dtype=None): # pylint: disable=redefined-outer-name
924
- r"""
925
- Creates a tensor filled with value ones.
926
-
927
- Creates a tensor with shape described by the first argument and fills it with value ones in type of the second
928
- argument.
929
-
930
- Args:
931
- shape (Union[tuple[int], int, Tensor]): The specified shape of output tensor. Only positive integer or
932
- tuple or Tensor containing positive integers are allowed. If it is a Tensor,
933
- it must be a 0-D or 1-D Tensor with int32 or int64 dtypes.
934
- dtype (:class:`mindspore.dtype`): The specified type of output tensor. If `dtype` is ``None`` ,
935
- `mindspore.float32` will be used. Default: ``None`` .
936
-
937
- Returns:
938
- Tensor, has the same type and shape as input shape value.
939
-
940
- Raises:
941
- TypeError: If `shape` is not tuple, int or Tensor.
942
-
943
- Supported Platforms:
944
- ``Ascend`` ``GPU`` ``CPU``
945
-
946
- Examples:
947
- >>> import mindspore
948
- >>> from mindspore import ops
949
- >>> output = ops.ones((2, 2), mindspore.float32)
950
- >>> print(output)
951
- [[1. 1.]
952
- [1. 1.]]
953
- """
954
- _dtype = mstype.float32 if dtype is None else dtype
955
- ones_op = _get_cache_prim(P.FillV2)()
956
- value = Tensor(1, _dtype)
957
- if isinstance(shape, int):
958
- shape = tuple([shape])
959
- elif isinstance(shape, list):
960
- shape = Tensor(shape, dtype=mstype.int64)
961
- elif isinstance(shape, Tensor) and shape.ndim == 0 and shape.size == 1:
962
- shape = shape.reshape(1)
963
- output = ones_op(shape, value)
964
- return output
965
-
966
-
967
1006
  def ones_like(input, *, dtype=None):
968
1007
  """
969
1008
  Returns a Tensor with a value of 1 and its shape is the same as the input.
@@ -993,57 +1032,15 @@ def ones_like(input, *, dtype=None):
993
1032
  [[1 1]
994
1033
  [1 1]]
995
1034
  """
996
- ones_like_op = _get_cache_prim(P.OnesLike)()
997
- output = ones_like_op(input)
1035
+ output = ones_like_(input)
998
1036
  _dtype = input.dtype if dtype is None else dtype
999
1037
  output = cast_(output, _dtype)
1000
1038
  return output
1001
1039
 
1002
1040
 
1003
- def zeros(size, dtype=None): # pylint: disable=redefined-outer-name
1004
- r"""
1005
- Creates a tensor filled with 0 with shape described by `shape` and fills it with value 0 in type of `dtype`.
1006
-
1007
- Args:
1008
- size (Union[tuple[int], int, Tensor]): The specified shape of output tensor. Only positive integer or
1009
- tuple or Tensor containing positive integers are allowed. If it is a Tensor,
1010
- it must be a 0-D or 1-D Tensor with int32 or int64 dtypes.
1011
- dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor. If `dtype` is ``None`` ,
1012
- mindspore.float32 will be used. Default: ``None`` .
1013
-
1014
- Returns:
1015
- Tensor, has the same dtype and size as input.
1016
-
1017
- Raises:
1018
- TypeError: If `size` is not tuple, int or Tensor.
1019
-
1020
- Supported Platforms:
1021
- ``Ascend`` ``GPU`` ``CPU``
1022
-
1023
- Examples:
1024
- >>> import mindspore
1025
- >>> from mindspore import ops
1026
- >>> output = ops.zeros((2, 2), mindspore.float32)
1027
- >>> print(output)
1028
- [[0. 0.]
1029
- [0. 0.]]
1030
- """
1031
- zero_op = _get_cache_prim(P.FillV2)()
1032
- _dtype = mstype.float32 if dtype is None else dtype
1033
- value = Tensor(0, _dtype)
1034
- if isinstance(size, int):
1035
- size = tuple([size])
1036
- elif isinstance(size, list):
1037
- size = Tensor(size, dtype=mstype.int64)
1038
- elif isinstance(size, Tensor) and size.ndim == 0 and size.size == 1:
1039
- size = size.reshape(1)
1040
- output = zero_op(size, value)
1041
- return output
1042
-
1043
-
1044
1041
  def zeros_like(input, *, dtype=None):
1045
1042
  r"""
1046
- Creates a tensor filled with 0, with the same size as x, and the given dtype.
1043
+ Creates a tensor filled with 0, with the same size as input, and the given dtype.
1047
1044
 
1048
1045
  If `dtype = None`, the tensor will have the same dtype as input `input`.
1049
1046
 
@@ -1074,125 +1071,78 @@ def zeros_like(input, *, dtype=None):
1074
1071
  [0. 0.]]
1075
1072
  """
1076
1073
  _dtype = input.dtype if dtype is None else dtype
1077
- _zeros_like = _get_cache_prim(P.ZerosLike)()
1078
- _cast = _get_cache_prim(P.Cast)()
1079
- output = _zeros_like(input)
1080
- output = _cast(output, _dtype)
1074
+ output = zeros_like_(input)
1075
+ output = cast_(output, _dtype)
1081
1076
  return output
1082
1077
 
1083
1078
 
1084
- def tile(input, multiples):
1085
- r"""
1086
- Replicates an input tensor with given multiples times.
1087
-
1088
- Creates a new tensor by replicating `input` `multiples` times. The i'th dimension of
1089
- output tensor has `input.shape[i] * multiples[i]` elements, and the values of `input`
1090
- are replicated `multiples[i]` times along the i'th dimension.
1079
+ def ones_like_ext(input, *, dtype=None):
1080
+ """
1081
+ Creates a tensor filled with 1, with the same shape as input, and its data type is determined by the given dtype.
1091
1082
 
1092
- Note:
1093
- The length of `multiples` must be greater or equal to the length of dimension in `input`.
1083
+ If `dtype = None`, the tensor will have the same dtype as input `input`.
1094
1084
 
1095
1085
  Args:
1096
- input (Tensor): 1-D or higher dimensional Tensor. Set the shape of input tensor as
1097
- :math:`(x_1, x_2, ..., x_S)` .
1086
+ input (Tensor): Tensor of any dimension.
1098
1087
 
1099
- multiples (tuple[int]): The parameter that specifies the number of replications,
1100
- the parameter type is tuple, and the data type is int, i.e., :math:`(y_1, y_2, ..., y_S)`.
1101
- The length of `multiples` cannot be smaller than the length of the shape of `input`.
1102
- Only constant value is allowed.
1088
+ Keyword Args:
1089
+ dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype` is ``None`` ,
1090
+ the dtype of the input tensor will be used. Default: ``None`` .
1103
1091
 
1104
1092
  Returns:
1105
- Tensor, has the same data type as the `input`. Suppose the length of `multiples` is `d`,
1106
- the dimension of `input` is `input.dim`, and the shape of `input` is :math:`(x_1, x_2, ..., x_S)`.
1107
-
1108
- - If `input.dim = d`, then the shape of their corresponding positions can be multiplied, and
1109
- the shape of Outputs is :math:`(x_1*y_1, x_2*y_2, ..., x_S*y_S)`.
1110
- - If `input.dim < d`, fill in multiple 1 in the length of the shape of `input` until their
1111
- lengths are consistent. Such as set the shape of `input` as :math:`(1, ..., x_1, x_2, ..., x_S)`,
1112
- then the shape of their corresponding positions can be multiplied, and the shape of Outputs is
1113
- :math:`(1*y_1, ..., x_R*y_R, x_S*y_S)`.
1093
+ Tensor, has the same shape as `input` but filled with ones.
1114
1094
 
1115
1095
  Raises:
1116
- TypeError: If `multiples` is not a tuple or its elements are not all int.
1117
- ValueError: If the elements of `multiples` are not all greater than 0.
1118
- ValueError: If the length of `multiples` are smaller than the length of dimension in `input`.
1096
+ TypeError: If `input` is not a Tensor.
1119
1097
 
1120
1098
  Supported Platforms:
1121
1099
  ``Ascend`` ``GPU`` ``CPU``
1122
1100
 
1123
1101
  Examples:
1124
- >>> import mindspore
1125
1102
  >>> import numpy as np
1126
1103
  >>> from mindspore import Tensor, ops
1127
- >>> input = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32)
1128
- >>> multiples = (2, 3)
1129
- >>> output = ops.tile(input, multiples)
1130
- >>> print(output)
1131
- [[1. 2. 1. 2. 1. 2.]
1132
- [3. 4. 3. 4. 3. 4.]
1133
- [1. 2. 1. 2. 1. 2.]
1134
- [3. 4. 3. 4. 3. 4.]]
1135
- >>> multiples = (2, 3, 2)
1136
- >>> output = ops.tile(input, multiples)
1137
- >>> print(output)
1138
- [[[1. 2. 1. 2.]
1139
- [3. 4. 3. 4.]
1140
- [1. 2. 1. 2.]
1141
- [3. 4. 3. 4.]
1142
- [1. 2. 1. 2.]
1143
- [3. 4. 3. 4.]]
1144
- [[1. 2. 1. 2.]
1145
- [3. 4. 3. 4.]
1146
- [1. 2. 1. 2.]
1147
- [3. 4. 3. 4.]
1148
- [1. 2. 1. 2.]
1149
- [3. 4. 3. 4.]]]
1150
- """
1151
- tile_op = _get_cache_prim(P.Tile)()
1152
- return tile_op(input, multiples)
1153
-
1154
-
1155
- def range(start, end, step):
1104
+ >>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
1105
+ >>> output = ops.function.array_func.ones_like_ext(x)
1106
+ >>> print(output)
1107
+ [[1 1]
1108
+ [1 1]]
1109
+ """
1110
+ return ones_like_ext_(input, dtype)
1111
+
1112
+
1113
+ def zeros_like_ext(input, *, dtype=None):
1156
1114
  r"""
1157
- Creates a sequence of numbers that begins at `start` and extends by increments of
1158
- `limit` up to but not including `end`.
1115
+ Creates a tensor filled with 0, with the same size as input. Its data type is determined by the given dtype.
1159
1116
 
1160
- The types of all 3 inputs must be the same. The type of the resulting tensor is
1161
- the same as the type of the inputs.
1117
+ If `dtype = None`, the tensor will have the same dtype as input `input`.
1162
1118
 
1163
1119
  Args:
1164
- start (Tensor): A scalar Tensor. The first number in the sequence. Must have
1165
- type: int32 ,int64, float32 or float64.
1166
- end (Tensor): A scalar Tensor. Upper limit of the sequence, exclusive. Must
1167
- have type: int32 ,int64, float32 or float64.
1168
- step (Tensor): A scalar Tensor. Number that increments `start`. Must have
1169
- type: int32 ,int64, float32 or float64.
1120
+ input (Tensor): Tensor of any dimension.
1121
+
1122
+ Keyword Args:
1123
+ dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype` is ``None`` ,
1124
+ the dtype of the input tensor will be used. Default: ``None`` .
1170
1125
 
1171
1126
  Returns:
1172
- A 1-D Tensor, with the same type as the inputs.
1127
+ Tensor, filled with 0.
1173
1128
 
1174
1129
  Raises:
1175
- TypeError: If `start`, `end` or `step` is not scalar Tensor.
1176
- TypeError: If datatype of `start`, `end` or `step` is not same.
1177
- TypeError: If datatype of `start`, `end` or `step` is not supported.
1178
- ValueError: If `step` = 0.
1179
- ValueError: If `start` >= `end` when `step` > 0.
1180
- ValueError: If `start` <= `end` when `step` < 0.
1130
+ TypeError: If dtype is not a MindSpore dtype.
1181
1131
 
1182
1132
  Supported Platforms:
1183
- ``GPU`` ``CPU``
1133
+ ``Ascend`` ``GPU`` ``CPU``
1184
1134
 
1185
1135
  Examples:
1136
+ >>> import mindspore
1137
+ >>> import numpy as np
1186
1138
  >>> from mindspore import Tensor, ops
1187
- >>> from mindspore import dtype as mstype
1188
- >>> start = Tensor(0, mstype.int32)
1189
- >>> end = Tensor(10, mstype.int32)
1190
- >>> step = Tensor(4, mstype.int32)
1191
- >>> output = ops.range(start, end, step)
1139
+ >>> x = Tensor(np.arange(4).reshape(2, 2))
1140
+ >>> output = ops.function.array_func.zeros_like_ext(x, dtype=mindspore.float32)
1192
1141
  >>> print(output)
1193
- [0 4 8]
1142
+ [[0. 0.]
1143
+ [0. 0.]]
1194
1144
  """
1195
- return range_(start, end, step)
1145
+ return zeros_like_ext_(input, dtype)
1196
1146
 
1197
1147
 
1198
1148
  ##############################
@@ -1228,7 +1178,70 @@ def unique(input):
1228
1178
  TypeError: If `input` is not a Tensor.
1229
1179
 
1230
1180
  Supported Platforms:
1231
- ``Ascend`` ``GPU`` ``CPU``
1181
+ ``Ascend`` ``GPU`` ``CPU``
1182
+
1183
+ Examples:
1184
+ >>> import mindspore
1185
+ >>> import numpy as np
1186
+ >>> from mindspore import Tensor, nn
1187
+ >>> from mindspore import ops
1188
+ >>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
1189
+ >>> output = ops.unique(x)
1190
+ >>> print(output)
1191
+ (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1]))
1192
+ >>> y = output[0]
1193
+ >>> print(y)
1194
+ [1 2 5]
1195
+ >>> idx = output[1]
1196
+ >>> print(idx)
1197
+ [0 1 2 1]
1198
+ """
1199
+ shape_x = input.shape
1200
+ length_x = get_x_shape(shape_x)
1201
+ input = reshape_(input, length_x)
1202
+ y, idx = unique_(input)
1203
+ idx = reshape_(idx, shape_x)
1204
+ return y, idx
1205
+
1206
+
1207
+ def unique_ext(input, sorted=True, return_inverse=False, return_counts=False, dim=None):
1208
+ """
1209
+ Returns the unique elements of input tensor.
1210
+
1211
+ when `return_inverse=True`, also return a tensor containing the index of each value of input
1212
+ tensor corresponding to the output unique tensor.
1213
+ when `return_counts=True`, also return a tensor containing the number of occurrences for each
1214
+ unique value or tensor
1215
+
1216
+ Args:
1217
+ input (Tensor): The input tensor.
1218
+ sorted(bool): Whether to sort the unique elements in ascending order before returning as output.
1219
+ Default: ``True`` .
1220
+ return_inverse(bool): Whether to also return the indices for where elements in the original input ended up in
1221
+ the returned unique list. Default: ``False`` .
1222
+ return_counts(bool): Whether to also return the counts for each unique element. Default: ``False`` .
1223
+ dim(int): the dimension to operate upon. If ``None``, the unique of the flattened input is returned.
1224
+ Otherwise, each of the tensors indexed by the given dimension is treated as one of the elements to apply the
1225
+ unique operation upon. Default: ``None`` .
1226
+
1227
+
1228
+ Returns:
1229
+ A tensor or a tuple of tensors containing some of tensor objects (`output`, `inverse_indices`, `counts`).
1230
+
1231
+ - output(Tensor) - The output tensor including the unique elements of input tensor, it has same dtype as input.
1232
+ - inverse_indices(Tensor) - Return when ``return_inverse`` is True. It represents the indices for where
1233
+ elements in the original input map to in the output. When ``dim`` is ``None``, it has same shape as input,
1234
+ otherwise, the shape is input.shape[dim].
1235
+ - counts(Tensor) - Return when ``return_counts`` is True. It represents the number of occurrences for each
1236
+ unique value or tensor. When ``dim`` is ``None``, it has same shape as output, otherwise, the shape is
1237
+ output.shape(dim).
1238
+
1239
+
1240
+ Raises:
1241
+ TypeError: If `input` is not a Tensor.
1242
+
1243
+ Supported Platforms:
1244
+ ``Ascend``
1232
1245
 
1233
1246
  Examples:
1234
1247
  >>> import mindspore
@@ -1236,9 +1249,9 @@ def unique(input):
1236
1249
  >>> from mindspore import Tensor, nn
1237
1250
  >>> from mindspore import ops
1238
1251
  >>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
1239
- >>> output = ops.unique(x)
1252
+ >>> output = ops.unique_ext(x, return_inverse=True)
1240
1253
  >>> print(output)
1241
- (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1]))
1254
+ (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int64, value= [0, 1, 2, 1]))
1242
1255
  >>> y = output[0]
1243
1256
  >>> print(y)
1244
1257
  [1 2 5]
@@ -1246,16 +1259,20 @@ def unique(input):
1246
1259
  >>> print(idx)
1247
1260
  [0 1 2 1]
1248
1261
  """
1249
-
1250
- unique_op = _get_cache_prim(P.Unique)()
1251
- reshape_op = _get_cache_prim(P.Reshape)()
1252
-
1253
- shape_x = input.shape
1254
- length_x = get_x_shape(shape_x)
1255
- input = reshape_op(input, length_x)
1256
- y, idx = unique_op(input)
1257
- idx = reshape_op(idx, shape_x)
1258
- return y, idx
1262
+ if not F.isconstant(return_inverse) or not F.isconstant(return_counts):
1263
+ raise ValueError(f"For 'unique_ext', 'return_inverse' and 'return_counts' cannot be mutable")
1264
+ if dim is None:
1265
+ y, inverse, counts = unique2_(input, sorted, return_inverse, return_counts)
1266
+ else:
1267
+ validator.check_value_type("return_counts", return_counts, [bool], "unique_ext")
1268
+ y, inverse, counts = unique_dim_(input, sorted, return_inverse, dim)
1269
+ if return_inverse and return_counts:
1270
+ return y, inverse, counts
1271
+ if return_inverse:
1272
+ return y, inverse
1273
+ if return_counts:
1274
+ return y, counts
1275
+ return y
1259
1276
 
1260
1277
 
1261
1278
  def unique_with_pad(x, pad_num):
@@ -1363,7 +1380,7 @@ def unique_consecutive(input, return_idx=False, return_counts=False, axis=None):
1363
1380
  return output
1364
1381
 
1365
1382
 
1366
- def searchsorted(sorted_sequence, values, *, out_int32=False, right=False):
1383
+ def searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=None, sorter=None):
1367
1384
  """
1368
1385
  Return the position indices such that after inserting the values into the `sorted_sequence`, the order of innermost
1369
1386
  dimension of the `sorted_sequence` remains unchanged.
@@ -1378,16 +1395,24 @@ def searchsorted(sorted_sequence, values, *, out_int32=False, right=False):
1378
1395
  if ``False`` , the output datatype will be int64. Default: ``False`` .
1379
1396
  right (bool, optional): Search Strategy. If ``True`` , return the last suitable index found;
1380
1397
  if ``False`` , return the first such index. Default: ``False`` .
1398
+ side (str, optional): the same as right but preferred. ``"left"`` corresponds to ``False`` for `right`
1399
+ and ``"right"`` corresponds to ``True`` for `right`. An error will be reported if this parameter is
1400
+ set to ``"left"`` while `right` is ``True``. Default: ``None`` .
1401
+ sorter(Tensor, optional): if provided, a tensor matching the shape of the unsorted sorted_sequence
1402
+ containing a sequence of indices that sort it in the ascending order on the innermost
1403
+ dimension and type must be int64. Default: ``None`` .
1381
1404
 
1382
1405
  Returns:
1383
1406
  Tensor containing the indices from the innermost dimension of `sorted_sequence` such that,
1384
- if insert the corresponding value in the `values` tensor, the order of `sorted_sequence` would be preserved,
1407
+ if insert the corresponding value in the `values` Tensor, the order of `sorted_sequence` would be preserved,
1385
1408
  whose datatype is int32 if out_int32 is ``True`` , otherwise int64, and shape is the same as the shape of
1386
1409
  `values`.
1387
1410
 
1388
1411
  Raises:
1389
1412
  ValueError: If the dimension of `sorted_sequence` isn't 1 and all dimensions except the last dimension of
1390
1413
  `sorted_sequence` and `values` are different.
1414
+ ValueError: If `sorted_sequence` value is a scalar.
1415
+ ValueError: If `values` is a scalar when `sorted_sequence` dimension is not 1.
1391
1416
 
1392
1417
  Supported Platforms:
1393
1418
  ``Ascend`` ``GPU`` ``CPU``
@@ -1404,10 +1429,16 @@ def searchsorted(sorted_sequence, values, *, out_int32=False, right=False):
1404
1429
  [1 2 4]]
1405
1430
  """
1406
1431
 
1407
- _check_attr_dtype("out_int32", out_int32, [bool], "search_sorted")
1408
- dtype = mstype.int64 if not out_int32 else mstype.int32
1432
+ validator.check_value_type("out_int32", out_int32, [bool], "search_sorted")
1433
+ validator.check_value_type("right", right, [bool], "search_sorted")
1434
+ dtype = mstype.int32 if bool(out_int32) else mstype.int64
1435
+ if (side == "left" and right is True):
1436
+ raise ValueError(f"For 'searchsorted', side and right can't be set to opposites,"
1437
+ f"got side of left while right was True.")
1438
+ if side == "right":
1439
+ right = True
1409
1440
  search_sorted_ = SearchSorted(dtype, right)
1410
- return search_sorted_(sorted_sequence, values)
1441
+ return search_sorted_(sorted_sequence, values, sorter)
1411
1442
 
1412
1443
 
1413
1444
  def ger(input, vec2):
@@ -1457,7 +1488,7 @@ def size(input_x):
1457
1488
 
1458
1489
  Args:
1459
1490
  input_x (Tensor): Input parameters, the shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is
1460
- `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
1491
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
1461
1492
 
1462
1493
  Returns:
1463
1494
  int. A scalar representing the elements' size of `input_x`, tensor is the number of elements
@@ -1538,76 +1569,6 @@ def dyn_shape(input_x):
1538
1569
  return tensor_shape_(input_x)
1539
1570
 
1540
1571
 
1541
- def rank(input_x):
1542
- """
1543
- Returns the rank of a tensor.
1544
-
1545
- Returns a 0-D int32 Tensor representing the rank of input; the rank of a tensor
1546
- is the number of indices required to uniquely select each element of the tensor.
1547
-
1548
- Args:
1549
- input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is Number.
1550
-
1551
- Returns:
1552
- Tensor. 0-D int32 Tensor representing the rank of input, i.e., :math:`R`. The data type is an int.
1553
-
1554
- Raises:
1555
- TypeError: If `input_x` is not a Tensor.
1556
-
1557
- Supported Platforms:
1558
- ``Ascend`` ``GPU`` ``CPU``
1559
-
1560
- Examples:
1561
- >>> import mindspore
1562
- >>> import numpy as np
1563
- >>> from mindspore import Tensor, ops
1564
- >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
1565
- >>> output = ops.rank(input_tensor)
1566
- >>> print(output)
1567
- 2
1568
- >>> print(type(output))
1569
- <class 'int'>
1570
- """
1571
- return rank_(input_x)
1572
-
1573
-
1574
- def reshape(input, shape):
1575
- """
1576
- Rearranges the input Tensor based on the given shape.
1577
-
1578
- The 'shape' can only have one -1 at most, in which case it's inferred from the remaining dimensions and
1579
- the number of elements in the input.
1580
-
1581
- Args:
1582
- input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
1583
- shape (Union[tuple[int], Tensor[int]]): Constructed by multiple
1584
- integers, i.e., :math:`(y_1, y_2, ..., y_S)`. Only constant value is allowed.
1585
-
1586
- Returns:
1587
- Tensor, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
1588
-
1589
- Raises:
1590
- ValueError: Given a shape tuple, if it has several -1; or if the product
1591
- of its elements is less than or equal to 0 or cannot be divided by the product
1592
- of the input tensor shape; or if it does not match the input's array size.
1593
-
1594
- Supported Platforms:
1595
- ``Ascend`` ``GPU`` ``CPU``
1596
-
1597
- Examples:
1598
- >>> import mindspore
1599
- >>> import numpy as np
1600
- >>> from mindspore import Tensor, ops
1601
- >>> input = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
1602
- >>> output = ops.reshape(input, (3, 2))
1603
- >>> print(output)
1604
- [[-0.1 0.3]
1605
- [ 3.6 0.4]
1606
- [ 0.5 -3.2]]
1607
- """
1608
- return reshape_(input, shape)
1609
-
1610
-
1611
1572
  def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
1612
1573
  r"""
1613
1574
  Reverses variable length slices.
@@ -1672,7 +1633,7 @@ def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
1672
1633
  [[4. 3. 2. 1.]
1673
1634
  [8. 7. 6. 5.]]
1674
1635
  """
1675
- return P.ReverseSequence(seq_dim=seq_dim, batch_dim=batch_dim)(x, seq_lengths)
1636
+ return _get_cache_prim(P.ReverseSequence)(seq_dim=seq_dim, batch_dim=batch_dim)(x, seq_lengths)
1676
1637
 
1677
1638
 
1678
1639
  def flatten(input, order='C', *, start_dim=1, end_dim=-1):
@@ -1696,7 +1657,7 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
1696
1657
  Raises:
1697
1658
  TypeError: If `input` is not a Tensor.
1698
1659
  TypeError: If `order` is not string type.
1699
- ValueError: If `order` is string type, but not 'C' or 'F'.
1660
+ ValueError: If `order` is string type, but not ``'C'`` or ``'F'``.
1700
1661
  TypeError: If `start_dim` or `end_dim` is not int.
1701
1662
  ValueError: If `start_dim` is greater than `end_dim` after canonicalized.
1702
1663
  ValueError: If `start_dim` or `end_dim` is not in range of [-input.dim, input.dim-1].
@@ -1741,7 +1702,7 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
1741
1702
  return reshape_(input, (-1,))
1742
1703
  perm = ops.make_range(0, x_rank)
1743
1704
  new_order = ops.tuple_reversed(perm)
1744
- input = _get_cache_prim(P.Transpose)()(input, new_order)
1705
+ input = transpose_(input, new_order)
1745
1706
 
1746
1707
  # Handle the default case.
1747
1708
  x_shape = shape_(input)
@@ -1749,7 +1710,7 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
1749
1710
  if start_dim == 1 and end_dim == -1:
1750
1711
  if x_rank in (0, 1):
1751
1712
  return reshape_(input, (-1,))
1752
- return _get_cache_prim(P.Flatten)()(input)
1713
+ return flatten_(input)
1753
1714
 
1754
1715
  # Check axis.
1755
1716
  start_dim = canonicalize_axis(start_dim, x_rank)
@@ -1771,341 +1732,6 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
1771
1732
  return reshape_(input, new_shape)
1772
1733
 
1773
1734
 
1774
- @constexpr
1775
- def _check_select_type_match(scalar, tensor_type, scalar_name, tensor_name):
1776
- if isinstance(scalar, int) and tensor_type != mstype.int32:
1777
- raise TypeError(f"For functional operator[select], the input[{scalar_name}] is int, "
1778
- f"then the input[{tensor_name}] must be a Tensor of int32.")
1779
- if isinstance(scalar, float) and tensor_type != mstype.float32:
1780
- raise TypeError(f"For functional operator[select], the input[{scalar_name}] is float, "
1781
- f"then the input[{tensor_name}] must be a Tensor of float32.")
1782
-
1783
-
1784
- @_primexpr
1785
- def _check_select_shape_match(input_shape, cond_shape, tensor_name):
1786
- if input_shape != cond_shape:
1787
- raise ValueError(f"For functional operator[select], the cond shape must be same as {tensor_name} shape.")
1788
-
1789
-
1790
- @constexpr
1791
- def _check_select_type(is_cond_tensor, is_x_scalar, is_y_scalar, is_x_tensor, is_y_tensor):
1792
- if not is_cond_tensor:
1793
- raise TypeError(f"For functional operator[select], the input[cond] must be a Tensor.")
1794
- if is_x_scalar and not is_y_tensor:
1795
- raise TypeError(f"For functional operator[select], the input[x] is int or float, "
1796
- f"then the input[y] must be a Tensor.")
1797
- if is_y_scalar and not is_x_tensor:
1798
- raise TypeError(f"For functional operator[select], the input[y] is int or float, "
1799
- f"then the input[x] must be a Tensor.")
1800
-
1801
-
1802
- @constexpr
1803
- def _check_select_shape_same(cond_shape, x_shape, y_shape):
1804
- """Check if input of select has same shape."""
1805
- return cond_shape == x_shape and x_shape == y_shape and cond_shape == y_shape
1806
-
1807
-
1808
- @constexpr
1809
- def get_max_value(x, y, z):
1810
- """Get the maximum value of x, y and z."""
1811
- if x >= y and x >= z:
1812
- return x
1813
- if y >= x and y >= z:
1814
- return y
1815
- return z
1816
-
1817
-
1818
- @constexpr
1819
- def _calc_broadcast_shape(cond_shape, x_shape, y_shape):
1820
- """Calculate broadcast shape for select"""
1821
- converted_shape = []
1822
- cond_reverse = cond_shape[::-1]
1823
- x_reverse = x_shape[::-1]
1824
- y_reverse = y_shape[::-1]
1825
- max_len = get_max_value(len(cond_reverse), len(x_reverse), len(y_reverse))
1826
- i = 0
1827
- while i < max_len:
1828
- cond_element = 1 if i >= len(cond_reverse) else cond_reverse[i]
1829
- x_element = 1 if i >= len(x_reverse) else x_reverse[i]
1830
- y_element = 1 if i >= len(y_reverse) else y_reverse[i]
1831
- broadcast_element = get_max_value(cond_element, x_element, y_element)
1832
- if cond_element not in (1, broadcast_element):
1833
- raise ValueError(f"For select, condition input can not broadcast at index {i}")
1834
- if x_element not in (1, broadcast_element):
1835
- raise ValueError(f"For select, x input can not broadcast at index {i}")
1836
- if y_element not in (1, broadcast_element):
1837
- raise ValueError(f"For select, y input can not broadcast at index {i}")
1838
- converted_shape.append(broadcast_element)
1839
- i = i + 1
1840
- converted_shape.reverse()
1841
- return tuple(converted_shape)
1842
-
1843
-
1844
- def select(cond, x, y):
1845
- r"""
1846
- The conditional tensor determines whether the corresponding element in the output must be
1847
- selected from `x` (if true) or `y` (if false) based on the value of each element.
1848
-
1849
- It can be defined as:
1850
-
1851
- .. math::
1852
- out_i = \begin{cases}
1853
- x_i, & \text{if } cond_i \\
1854
- y_i, & \text{otherwise}
1855
- \end{cases}
1856
-
1857
- Args:
1858
- cond (Tensor[bool]): The condition tensor, decides which element is chosen.
1859
- The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
1860
- x (Union[Tensor, int, float]): The first Tensor or number to be selected.
1861
- If x is a Tensor, the shape is or can be broadcadt to :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
1862
- If x is an int or a float, it will be cast to the type of int32 or float32,
1863
- and broadcast to the same shape as y. One of x and y must be a Tensor.
1864
- y (Union[Tensor, int, float]): The second Tensor or number to be selected.
1865
- If y is a Tensor, The shape is or can be broadcadt to :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
1866
- If y is an int or a float, it will be cast to the type of int32 or float32,
1867
- and broadcast to the same shape as x. One of x and y must be a Tensor.
1868
-
1869
- Returns:
1870
- Tensor, has the same shape as `cond`.
1871
-
1872
- Raises:
1873
- TypeError: If `x` or `y` is not a Tensor, int or float.
1874
- ValueError: The shapes of inputs can not be broadcast.
1875
-
1876
- Supported Platforms:
1877
- ``Ascend`` ``GPU`` ``CPU``
1878
-
1879
- Examples:
1880
- >>> import mindspore
1881
- >>> from mindspore import Tensor, ops
1882
- >>> # 1) Both inputs are Tensor
1883
- >>>
1884
- >>> cond = Tensor([True, False])
1885
- >>> x = Tensor([2,3], mindspore.float32)
1886
- >>> y = Tensor([1,2], mindspore.float32)
1887
- >>> output = ops.select(cond, x, y)
1888
- >>> print(output)
1889
- [2. 2.]
1890
- >>> # 2) y is a float
1891
- >>> cond = Tensor([True, False])
1892
- >>> x = Tensor([2,3], mindspore.float32)
1893
- >>> y = 2.0
1894
- >>> output = ops.select(cond, x, y)
1895
- >>> print(output)
1896
- [2. 2.]
1897
- """
1898
- is_x_scalar = isinstance(x, (int, float))
1899
- is_y_scalar = isinstance(y, (int, float))
1900
- is_x_tensor = isinstance(x, Tensor)
1901
- is_y_tensor = isinstance(y, Tensor)
1902
- is_cond_tensor = isinstance(cond, Tensor)
1903
- _check_select_type(is_cond_tensor, is_x_scalar, is_y_scalar, is_x_tensor, is_y_tensor)
1904
- input_x = x
1905
- input_y = y
1906
- if is_x_scalar:
1907
- _check_select_shape_match(y.shape, cond.shape, "y")
1908
- _check_select_type_match(x, y.dtype, "x", "y")
1909
- input_x = zeros_like_(y) + x
1910
- if isinstance(x, int):
1911
- input_x = cast_(input_x, mstype.int32)
1912
- else:
1913
- input_x = cast_(input_x, mstype.float32)
1914
-
1915
- if is_y_scalar:
1916
- _check_select_shape_match(x.shape, cond.shape, "x")
1917
- _check_select_type_match(y, x.dtype, "y", "x")
1918
- input_y = zeros_like_(x) + y
1919
- if isinstance(y, int):
1920
- input_y = cast_(input_y, mstype.int32)
1921
- else:
1922
- input_y = cast_(input_y, mstype.float32)
1923
-
1924
- if is_x_tensor and is_y_tensor and is_cond_tensor:
1925
- x_shape = ops.shape(x)
1926
- y_shape = ops.shape(y)
1927
- cond_shape = ops.shape(cond)
1928
- all_constant = ops.isconstant(cond_shape) and ops.isconstant(x_shape) and ops.isconstant(y_shape)
1929
- if all_constant and not _check_select_shape_same(cond_shape, x_shape, y_shape):
1930
- broadcast_shape = _calc_broadcast_shape(cond_shape, x_shape, y_shape)
1931
- new_cond = ops.broadcast_to(cond, broadcast_shape)
1932
- new_x = ops.broadcast_to(x, broadcast_shape)
1933
- new_y = ops.broadcast_to(y, broadcast_shape)
1934
- return tensor_select_(new_cond, new_x, new_y)
1935
-
1936
- return tensor_select_(cond, input_x, input_y)
1937
-
1938
-
1939
- def strided_slice(input_x,
1940
- begin,
1941
- end,
1942
- strides,
1943
- begin_mask=0,
1944
- end_mask=0,
1945
- ellipsis_mask=0,
1946
- new_axis_mask=0,
1947
- shrink_axis_mask=0):
1948
- r"""
1949
- Extracts a strided slice of a Tensor based on `begin/end` index and `strides`.
1950
-
1951
- This operation extracts a fragment of size (end-begin)/strides from the given 'input_tensor'.
1952
- Starting from the beginning position, the fragment continues adding strides to the index until
1953
- all dimensions are not less than the ending position.
1954
-
1955
- Note:
1956
- - `begin` , `end` and `strides` must have the same shape.
1957
- - `begin` , `end` and `strides` are all 1-D Tensor, and their shape size
1958
- must not greater than the dim of `input_x`.
1959
-
1960
- During the slicing process, the fragment (end-begin)/strides are extracted from each dimension.
1961
-
1962
- Example: For Tensor `input_x` with shape :math:`(5, 6, 7)`,
1963
- set `begin`, `end` and `strides` to (1, 3, 2), (3, 5, 6),
1964
- (1, 1, 2) respectively, then elements from index 1 to 3 are extrected for dim 0, index 3 to 5
1965
- are extrected for dim 1 and index 2 to 6 with a `stirded` of 2 are extrected for dim 2, this
1966
- process is equivalent to a pythonic slice `input_x[1:3, 3:5, 2:6:2]`.
1967
-
1968
- If the length of `begin` 、 `end` and `strides` is smaller than the dim of `input_x`,
1969
- then all elements are extracted from the missing dims, it behaves like all the
1970
- missing dims are filled with zeros, size of that missing dim and ones.
1971
-
1972
- Example: For Tensor `input_x` with shape :math:`(5, 6, 7)`,
1973
- set `begin`, `end` and `strides` to (1, 3),
1974
- (3, 5), (1, 1) respectively, then elements from index 1 to 3 are extrected
1975
- for dim 0, index 3 to 5 are extrected for dim 1 and index 3 to 5 are extrected
1976
- for dim 2, this process is equivalent to a pythonic slice `input_x[1:3, 3:5, 0:7]`.
1977
-
1978
- Here's how a mask works:
1979
- For each specific mask, it will be converted to a binary representation internally, and then
1980
- reverse the result to start the calculation. For Tensor `input_x` with
1981
- shape :math:`(5, 6, 7)`. Given mask value of 3 which
1982
- can be represented as 0b011. Reverse that we get 0b110, which implies the first and second dim of the
1983
- original Tensor will be effected by this mask. See examples below, for simplicity all mask mentioned
1984
- below are all in their reverted binary form:
1985
-
1986
- - `begin_mask` and `end_mask`
1987
-
1988
- If the ith bit of `begin_mask` is 1, `begin[i]` is ignored and the fullest
1989
- possible range in that dimension is used instead. `end_mask` is analogous,
1990
- except with the end range. For Tensor `input_x` with shape :math:`(5, 6, 7, 8)`, if `begin_mask`
1991
- is 0b110, `end_mask` is 0b011, the slice `input_x[0:3, 0:6, 2:7:2]` is produced.
1992
-
1993
- - `ellipsis_mask`
1994
-
1995
- If the ith bit of `ellipsis_mask` is 1, as many unspecified dimensions as needed
1996
- will be inserted between other dimensions. Only one non-zero bit is allowed
1997
- in `ellipsis_mask`. For Tensor `input_x` with shape :math:`(5, 6, 7, 8)`, `input_x[2:,...,:6]`
1998
- is equivalent to `input_x[2:5,:,:,0:6]` , `input_x[2:,...]` is equivalent
1999
- to `input_x[2:5,:,:,:]`.
2000
-
2001
- - `new_axis_mask`
2002
-
2003
- If the ith bit of `new_axis_mask` is 1, `begin`, `end` and `strides` are
2004
- ignored and a new length 1 dimension is added at the specified position
2005
- in the output Tensor. For Tensor `input_x` with shape :math:`(5, 6, 7)`, if `new_axis_mask`
2006
- is 0b110, a new dim is added to the second dim, which will produce
2007
- a Tensor with shape :math:`(5, 1, 6, 7)`.
2008
-
2009
- - `shrink_axis_mask`
2010
-
2011
- If the ith bit of `shrink_axis_mask` is 1, `begin`, `end` and `strides`
2012
- are ignored and dimension i will be shrunk to 0.
2013
- For Tensor `input_x` with shape :math:`(5, 6, 7)`,
2014
- if `shrink_axis_mask` is 0b010, it is equivalent to slice `x[:, 5, :]`
2015
- and results in an output shape of :math:`(5, 7)`.
2016
-
2017
- Note:
2018
- `new_axis_mask` and `shrink_axis_mask` are not recommended to
2019
- use at the same time, it might incur unexpected result.
2020
-
2021
- Args:
2022
- input_x (Tensor): The input Tensor to be extracted from.
2023
- begin (tuple[int]): A tuple which represents the location where to start.
2024
- end (tuple[int]): A tuple or which represents the maximum location where to end.
2025
- strides (tuple[int]): A tuple which represents the strides is continuously added
2026
- before reaching the maximum location. Only int is allowed, it can be negative
2027
- which results in reversed slicing.
2028
- begin_mask (int, optional): Starting index of the slice. Default: ``0`` .
2029
- end_mask (int, optional): Ending index of the slice. Default: ``0`` .
2030
- ellipsis_mask (int, optional): An int mask, ignore slicing operation when set to 1. Default: ``0`` .
2031
- new_axis_mask (int, optional): An int mask for adding new dims. Default: ``0`` .
2032
- shrink_axis_mask (int, optional): An int mask for shrinking dims. Default: ``0`` .
2033
-
2034
- Returns:
2035
- Tensor, return the extracts a strided slice of a Tensor based on `begin/end` index and `strides`.
2036
-
2037
- Raises:
2038
- TypeError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or
2039
- `shrink_axis_mask` is not an int.
2040
- TypeError: If `begin`, `end` or `strides` is not tuple[int].
2041
- ValueError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or
2042
- `shrink_axis_mask` is less than 0.
2043
- ValueError: If `begin`, `end` and `strides` have different shapes.
2044
-
2045
- Supported Platforms:
2046
- ``Ascend`` ``GPU`` ``CPU``
2047
-
2048
- Examples:
2049
- >>> import mindspore
2050
- >>> from mindspore import Tensor, ops
2051
- >>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
2052
- ... [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
2053
- >>> output = ops.strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1))
2054
- >>> # Take this " output = strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1)) " as an example,
2055
- >>> # start = [1, 0, 2] , end = [3, 1, 3], strides = [1, 1, 1], Find a segment of (start, end),
2056
- >>> # note that end is an open interval
2057
- >>> # To facilitate understanding, this operator can be divided into three steps:
2058
- >>> # Step 1: Calculation of the first dimension:
2059
- >>> # start = 1, end = 3, strides = 1, So can take 1st, 2nd rows, and then gets the final output at this time.
2060
- >>> # output_1th =
2061
- >>> # [
2062
- >>> # [
2063
- >>> # [3,3,3]
2064
- >>> # [4,4,4]
2065
- >>> # ]
2066
- >>> # [
2067
- >>> # [5,5,5]
2068
- >>> # [6,6,6]
2069
- >>> # ]
2070
- >>> # ]
2071
- >>> # Step 2: Calculation of the second dimension
2072
- >>> # 2nd dimension, start = 0, end = 1, strides = 1. So only 0th rows
2073
- >>> # can be taken, and the output at this time.
2074
- >>> # output_2nd =
2075
- >>> # [
2076
- >>> # [
2077
- >>> # [3,3,3]
2078
- >>> # ]
2079
- >>> # [
2080
- >>> # [5,5,5]
2081
- >>> # ]
2082
- >>> # ]
2083
- >>> # Step 3: Calculation of the third dimension
2084
- >>> # 3nd dimension,start = 2, end = 3, strides = 1, So can take 2th cols,
2085
- >>> # and you get the final output at this time.
2086
- >>> # output_3ed =
2087
- >>> # [
2088
- >>> # [
2089
- >>> # [3]
2090
- >>> # ]
2091
- >>> # [
2092
- >>> # [5]
2093
- >>> # ]
2094
- >>> # ]
2095
- >>> # The final output after finishing is:
2096
- >>> print(output)
2097
- [[[3.]]
2098
- [[5.]]]
2099
- >>> # another example like :
2100
- >>> output = strided_slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1))
2101
- >>> print(output)
2102
- [[[3. 3. 3.]]]
2103
- """
2104
- strided_slice_ = _get_cache_prim(P.StridedSlice)(
2105
- begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask)
2106
- return strided_slice_(input_x, begin, end, strides)
2107
-
2108
-
2109
1735
  def slice(input_x, begin, size):
2110
1736
  r"""
2111
1737
  Slices a tensor in the specified shape.
@@ -2160,20 +1786,6 @@ def slice(input_x, begin, size):
2160
1786
  return tensor_slice(input_x, begin, size)
2161
1787
 
2162
1788
 
2163
- def concat(tensors, axis=0):
2164
- """
2165
- Alias for :func:`mindspore.ops.cat()`.
2166
-
2167
- Tutorial Examples:
2168
- - `Tensor - Tensor Operation <https://mindspore.cn/tutorials/en/r2.2/beginner/tensor.html#tensor-operation>`_
2169
- - `Vision Transformer Image Classification - Building ViT as a whole
2170
- <https://mindspore.cn/tutorials/application/en/r2.2/cv/vit.html#building-vit-as-a-whole>`_
2171
- - `Sentiment Classification Implemented by RNN - Dense
2172
- <https://mindspore.cn/tutorials/application/en/r2.2/nlp/sentiment_analysis.html#dense>`_
2173
- """
2174
- return cat(tensors, axis)
2175
-
2176
-
2177
1789
  def stack(tensors, axis=0):
2178
1790
  r"""
2179
1791
  Stacks a list of tensors in specified axis.
@@ -2284,45 +1896,6 @@ def unbind(input, dim=0):
2284
1896
  return _unstack(input)
2285
1897
 
2286
1898
 
2287
- def expand_dims(input_x, axis):
2288
- """
2289
- Adds an additional dimension to `input_x` at the given axis, the dimension
2290
- of `input_x` should be greater than or equal to 1.
2291
-
2292
- Note:
2293
- If the specified axis is a negative number, the index is counted
2294
- backward from the end and starts at 1.
2295
-
2296
- Args:
2297
- input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
2298
- axis (int): Specifies the dimension index at which to expand
2299
- the shape of `input_x`. The value of axis must be in the range
2300
- `[-input_x.ndim-1, input_x.ndim]`. Only constant value is allowed.
2301
-
2302
- Returns:
2303
- Tensor, the shape of tensor is :math:`(1, x_1, x_2, ..., x_R)` if the
2304
- value of `axis` is 0. It has the same data type as `input_x`.
2305
-
2306
- Raises:
2307
- TypeError: If `axis` is not an int.
2308
- ValueError: If `axis` is not in the valid range :math:`[-a.ndim-1, a.ndim]`.
2309
-
2310
- Supported Platforms:
2311
- ``Ascend`` ``GPU`` ``CPU``
2312
-
2313
- Examples:
2314
- >>> import mindspore
2315
- >>> import numpy as np
2316
- >>> from mindspore import Tensor, ops
2317
- >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
2318
- >>> output = ops.expand_dims(input_tensor, 0)
2319
- >>> print(output)
2320
- [[[2. 2.]
2321
- [2. 2.]]]
2322
- """
2323
- return expand_dims_(input_x, axis)
2324
-
2325
-
2326
1899
  def unsqueeze(input, dim):
2327
1900
  """
2328
1901
  Adds an additional dimension to `input` at the given dim.
@@ -2354,7 +1927,7 @@ def unsqueeze(input, dim):
2354
1927
  [[[2. 2.]
2355
1928
  [2. 2.]]]
2356
1929
  """
2357
- return expand_dims_(input, dim)
1930
+ return expand_dims(input, dim)
2358
1931
 
2359
1932
 
2360
1933
  def squeeze(input, axis=None):
@@ -2411,57 +1984,6 @@ def squeeze(input, axis=None):
2411
1984
  return squeeze_(input)
2412
1985
 
2413
1986
 
2414
- def transpose(input, input_perm):
2415
- """
2416
- Permutes the dimensions of the input tensor according to input permutation.
2417
-
2418
- For a 1-D array this has no effect, as a transposed vector is simply the same vector.
2419
- To convert a 1-D array into a 2D column vector please refer the class: mindspore.ops.ExpandDims.
2420
- For a 2-D array, this is a standard matrix transpose. For an n-D array, if axes are given,
2421
- their order indicates how the axes are permuted (see Examples).
2422
- If axes are not provided and a.shape is :math:`(i[0], i[1], ... i[n-2], i[n-1])`,
2423
- then a.transpose().shape is :math:`(i[n-1], i[n-2], ... i[1], i[0])`.
2424
-
2425
- Note:
2426
- On GPU and CPU, if the value of `input_perm` is negative, its actual value is `input_perm[i] + rank(input)`.
2427
- Negative value of `input_perm` is not supported on Ascend.
2428
-
2429
- Args:
2430
- input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
2431
- input_perm (tuple[int]): The permutation to be converted. The elements in `input_perm` are composed of
2432
- the indexes of each dimension of `input`. The length of `input_perm` and the shape of `input` must be
2433
- the same. Only constant value is allowed. Must be in the range [-rank(input), rank(input)).
2434
-
2435
- Returns:
2436
- Tensor, the type of output tensor is the same as `input` and the shape of output tensor is decided by the
2437
- shape of `input` and the value of `input_perm`.
2438
-
2439
- Raises:
2440
- TypeError: If `input_perm` is not a tuple.
2441
- ValueError: If length of shape of `input` is not equal to length of shape of `input_perm`.
2442
- ValueError: If the same element exists in `input_perm`.
2443
-
2444
- Supported Platforms:
2445
- ``Ascend`` ``GPU`` ``CPU``
2446
-
2447
- Examples:
2448
- >>> import mindspore
2449
- >>> import numpy as np
2450
- >>> from mindspore import Tensor, ops
2451
- >>> input = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32)
2452
- >>> input_perm = (0, 2, 1)
2453
- >>> output = ops.transpose(input, input_perm)
2454
- >>> print(output)
2455
- [[[ 1. 4.]
2456
- [ 2. 5.]
2457
- [ 3. 6.]]
2458
- [[ 7. 10.]
2459
- [ 8. 11.]
2460
- [ 9. 12.]]]
2461
- """
2462
- return transpose_(input, input_perm)
2463
-
2464
-
2465
1987
  def scatter_mul(input_x, indices, updates):
2466
1988
  r"""
2467
1989
  Using given values to update tensor value through the mul operation, along with the input indices.
@@ -2792,111 +2314,6 @@ def scatter_div(input_x, indices, updates):
2792
2314
  return scatter_div_(input_x, indices, updates)
2793
2315
 
2794
2316
 
2795
- def scatter_nd(indices, updates, shape):
2796
- r"""
2797
- Scatters a tensor into a new tensor depending on the specified indices.
2798
-
2799
- Creates an empty tensor with the given `shape`, and set values by scattering the update tensor
2800
- depending on indices. The empty tensor has rank :math:`P` and `indices` has rank :math:`Q`.
2801
-
2802
- The `shape` is :math:`(s_0, s_1, ..., s_{P-1})`, where :math:`P \ge 1`.
2803
-
2804
- `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)`, where :math:`Q \ge 2` and :math:`N \le P`.
2805
-
2806
- The last dimension of `indices` (with length :math:`N` ) indicates slices along the :math:`N` th dimension of the
2807
- empty tensor.
2808
-
2809
- `updates` is a tensor of rank :math:`Q-1+P-N`, and
2810
- its shape is :math:`(i_0, i_1, ..., i_{Q-2}, s_N, s_{N+1}, ..., s_{P-1})`.
2811
-
2812
- If `indices` contains duplicates, the duplicate `updates` are summed.
2813
-
2814
- The following figure shows the calculation process of inserting two new value matrices into the first dimension
2815
- with rank-3:
2816
-
2817
- .. image:: ScatterNd.png
2818
-
2819
- Args:
2820
- indices (Tensor): Define the index of scattering in the new tensor with int32 or int64 data type.
2821
- The rank of `indices` must be at least 2 and `indices.shape[-1] <= len(shape)`.
2822
- updates (Tensor): Define the source Tensor to be updated.
2823
- It has shape `indices.shape[:-1] + shape[indices.shape[-1]:]`.
2824
- shape (tuple[int]): Define the shape of the output tensor, has the same data type as indices.
2825
- `shape` can not be empty, and the elements in `shape` must be greater than or equal to 1.
2826
-
2827
- Returns:
2828
- Tensor, the new tensor, has the same type as `update` and the same shape as `shape`.
2829
-
2830
- Raises:
2831
- TypeError: If `shape` is not a tuple.
2832
- ValueError: If any element of `shape` is less than 1.
2833
-
2834
- Supported Platforms:
2835
- ``Ascend`` ``GPU`` ``CPU``
2836
-
2837
- Examples:
2838
- >>> import mindspore
2839
- >>> import numpy as np
2840
- >>> from mindspore import Tensor, ops
2841
- >>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
2842
- >>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2],
2843
- ... [3, 3, 3, 3], [4, 4, 4, 4]],
2844
- ... [[1, 1, 1, 1], [2, 2, 2, 2],
2845
- ... [3, 3, 3, 3], [4, 4, 4, 4]]]), mindspore.float32)
2846
- >>> shape = (4, 4, 4)
2847
- >>> output = ops.scatter_nd(indices, updates, shape)
2848
- >>> print(output)
2849
- [[[1. 1. 1. 1.]
2850
- [2. 2. 2. 2.]
2851
- [3. 3. 3. 3.]
2852
- [4. 4. 4. 4.]]
2853
- [[0. 0. 0. 0.]
2854
- [0. 0. 0. 0.]
2855
- [0. 0. 0. 0.]
2856
- [0. 0. 0. 0.]]
2857
- [[1. 1. 1. 1.]
2858
- [2. 2. 2. 2.]
2859
- [3. 3. 3. 3.]
2860
- [4. 4. 4. 4.]]
2861
- [[0. 0. 0. 0.]
2862
- [0. 0. 0. 0.]
2863
- [0. 0. 0. 0.]
2864
- [0. 0. 0. 0.]]]
2865
- >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)
2866
- >>> updates = Tensor(np.array([3.2, 1.1]), mindspore.float32)
2867
- >>> shape = (3, 3)
2868
- >>> output = ops.scatter_nd(indices, updates, shape)
2869
- >>> # In order to facilitate understanding, explain the operator pseudo-operation process step by step:
2870
- >>> # Step 1: Generate an empty Tensor of the specified shape according to the shape
2871
- >>> # [
2872
- >>> # [0. 0. 0.]
2873
- >>> # [0. 0. 0.]
2874
- >>> # [0. 0. 0.]
2875
- >>> # ]
2876
- >>> # Step 2: Modify the data at the specified location according to the indicators
2877
- >>> # 0th row of indices is [0, 1], 0th row of updates is 3.2.
2878
- >>> # means that the empty tensor in the 0th row and 1st col set to 3.2
2879
- >>> # [
2880
- >>> # [0. 3.2. 0.]
2881
- >>> # [0. 0. 0.]
2882
- >>> # [0. 0. 0.]
2883
- >>> # ]
2884
- >>> # 1th row of indices is [1, 1], 1th row of updates is 1.1.
2885
- >>> # means that the empty tensor in the 1th row and 1st col set to 1.1
2886
- >>> # [
2887
- >>> # [0. 3.2. 0.]
2888
- >>> # [0. 1.1 0.]
2889
- >>> # [0. 0. 0.]
2890
- >>> # ]
2891
- >>> # The final result is as follows:
2892
- >>> print(output)
2893
- [[0. 3.2 0.]
2894
- [0. 1.1 0.]
2895
- [0. 0. 0.]]
2896
- """
2897
- return scatter_nd_(indices, updates, shape)
2898
-
2899
-
2900
2317
  def scatter_update(input_x, indices, updates):
2901
2318
  r"""
2902
2319
  Updates tensor values by using input indices and value.
@@ -2946,8 +2363,7 @@ def scatter_update(input_x, indices, updates):
2946
2363
  [[2. 1.2 1.]
2947
2364
  [3. 1.2 1.]]
2948
2365
  """
2949
- scatter_update_inner = _get_cache_prim(P.ScatterUpdate)()
2950
- return scatter_update_inner(input_x, indices, updates)
2366
+ return scatter_update_(input_x, indices, updates)
2951
2367
 
2952
2368
 
2953
2369
  def scatter_nd_add(input_x, indices, updates, use_locking=False):
@@ -3414,8 +2830,8 @@ def sort(input_x, axis=-1, descending=False):
3414
2830
  are sorted in descending order, or else sorted in ascending order. Default: ``False`` .
3415
2831
 
3416
2832
  .. warning::
3417
- Currently, the data types of Float16, UInt8, Int8, Int16, Int32, Int64 are well supported.
3418
- If use Float32, it may cause loss of accuracy.
2833
+ Currently, the data types of float16, uint8, int8, int16, int32, int64 are well supported.
2834
+ If use float32, it may cause loss of accuracy.
3419
2835
 
3420
2836
  Returns:
3421
2837
 
@@ -3452,129 +2868,72 @@ def sort(input_x, axis=-1, descending=False):
3452
2868
  return _sort(input_x)
3453
2869
 
3454
2870
 
3455
- def argsort(input, axis=-1, descending=False):
2871
+ def sort_ext(input, *, dim=-1, descending=False, stable=False):
3456
2872
  r"""
3457
- Sorts the input tensor along the given dimension in specified order and return the sorted indices.
2873
+ Sorts the elements of the input tensor along the given dimension in the specified order.
2874
+
2875
+ .. warning::
2876
+ Currently, the data types of float16, uint8, int8, int16, int32, int64 are well supported.
2877
+ If use float32, it may cause loss of accuracy.
3458
2878
 
3459
2879
  Args:
3460
2880
  input(Tensor): The input tensor to sort.
3461
- axis (int): The axis to sort along. Default: ``-1`` , means the last dimension.
3462
- The Ascend backend only supports sorting the last dimension.
3463
- descending (bool): The sort order. If `descending` is True then the elements
3464
- are sorted in descending order by value. Otherwise sort in ascending order. Default: ``False`` .
3465
-
3466
- Returns:
3467
- Tensor, the indices of sorted input tensor. Data type is int32.
3468
-
3469
- Supported Platforms:
3470
- ``Ascend`` ``GPU`` ``CPU``
3471
-
3472
- Examples:
3473
- >>> import mindspore
3474
- >>> import numpy as np
3475
- >>> from mindspore import Tensor, ops
3476
- >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
3477
- >>> sort = ops.argsort(x)
3478
- >>> print(sort)
3479
- [[2 1 0]
3480
- [2 0 1]
3481
- [0 1 2]]
3482
- """
3483
- _sort = _get_cache_prim(P.Sort)(axis, descending)
3484
- _, arg_sort = _sort(input)
3485
- return arg_sort
3486
-
3487
-
3488
- def gather(input_params, input_indices, axis, batch_dims=0):
3489
- r"""
3490
- Returns the slice of the input tensor corresponding to the elements of `input_indices` on the specified `axis`.
3491
-
3492
- The following figure shows the calculation process of Gather commonly:
3493
-
3494
- .. image:: Gather.png
3495
-
3496
- where params represents the input `input_params`, and indices represents the index to be sliced `input_indices`.
3497
-
3498
- .. note::
3499
- 1. The value of input_indices must be in the range of `[0, input_param.shape[axis])`.
3500
- On CPU and GPU, an error is raised if an out of bound indice is found. On Ascend, the results may be
3501
- undefined.
3502
-
3503
- 2. The data type of input_params cannot be
3504
- `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ on Ascend
3505
- platform currently.
2881
+ The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
3506
2882
 
3507
- Args:
3508
- input_params (Tensor): The original Tensor. The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
3509
- input_indices (Tensor): Index tensor to be sliced, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
3510
- Specifies the indices of elements of the original Tensor. The data type can be int32 or int64.
3511
- axis (Union(int, Tensor[int])): Specifies the dimension index to gather indices.
3512
- It must be greater than or equal to `batch_dims`.
3513
- When `axis` is a Tensor, the size must be 1.
3514
- batch_dims (int): Specifies the number of batch dimensions. It must be less than or euqal to the rank
3515
- of `input_indices`. Default: ``0`` .
2883
+ Keyword Args:
2884
+ dim (int, optional): The dimension to sort along. Default: ``-1``, means the last dimension.
2885
+ descending (bool, optional): Controls the sort order. If `descending` is True, the elements
2886
+ are sorted in descending order, or else sorted in ascending order. Default: ``False`` .
2887
+ stable (bool, optional): Controls the sort order. If stable is True then the sorting routine
2888
+ becomes stable, preserving the order of equivalent elements. Default: ``False`` .
3516
2889
 
3517
2890
  Returns:
3518
- Tensor, the shape of tensor is
3519
- :math:`input\_params.shape[:axis] + input\_indices.shape[batch\_dims:] + input\_params.shape[axis + 1:]`.
2891
+ - y1, a tensor whose values are the sorted values, with the same shape and data type as input.
2892
+ - y2, a tensor that consists of the indices of the elements in the original input tensor.
2893
+ Data type is int64.
3520
2894
 
3521
2895
  Raises:
3522
- TypeError: If `axis` is not an int or Tensor.
3523
- ValueError: If `axis` is a Tensor and its size is not 1.
3524
- TypeError: If `input_params` is not a tensor.
3525
- TypeError: If `input_indices` is not a tensor of type int.
3526
- RuntimeError: If `input_indices` is out of range `[0, input_param.shape[axis])` on CPU or GPU.
2896
+ TypeError: If `dim` is not an int.
2897
+ TypeError: If `descending` is not a bool.
2898
+ TypeError: If `input` not in float16, float32, uint8, int8, int16, int32, int64, bfloat16
2899
+ TypeError: If `stable` is not a bool.
2900
+ ValueError: If `dim` is not in range of [-len(input_x.shape), len(input_x.shape)).
3527
2901
 
3528
2902
  Supported Platforms:
3529
- ``Ascend`` ``GPU`` ``CPU``
2903
+ ``Ascend``
3530
2904
 
3531
- Examples:
3532
- >>> import mindspore
3533
- >>> import numpy as np
3534
- >>> from mindspore import Tensor, ops
3535
- >>> # case1: input_indices is a Tensor with shape (5, ).
3536
- >>> input_params = Tensor(np.array([1, 2, 3, 4, 5, 6, 7]), mindspore.float32)
3537
- >>> input_indices = Tensor(np.array([0, 2, 4, 2, 6]), mindspore.int32)
3538
- >>> axis = 0
3539
- >>> output = ops.gather(input_params, input_indices, axis)
3540
- >>> print(output)
3541
- [1. 3. 5. 3. 7.]
3542
- >>> # case2: input_indices is a Tensor with shape (2, 2). When the input_params has one dimension,
3543
- >>> # the output shape is equal to the input_indices shape.
3544
- >>> input_indices = Tensor(np.array([[0, 2], [2, 6]]), mindspore.int32)
3545
- >>> axis = 0
3546
- >>> output = ops.gather(input_params, input_indices, axis)
3547
- >>> print(output)
3548
- [[1. 3.]
3549
- [3. 7.]]
3550
- >>> # case3: input_indices is a Tensor with shape (2, ) and
3551
- >>> # input_params is a Tensor with shape (3, 4) and axis is 0.
3552
- >>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
3553
- >>> input_indices = Tensor(np.array([0, 2]), mindspore.int32)
3554
- >>> axis = 0
3555
- >>> output = ops.gather(input_params, input_indices, axis)
3556
- >>> print(output)
3557
- [[ 1. 2. 3. 4.]
3558
- [ 9. 10. 11. 12.]]
3559
- >>> # case4: input_indices is a Tensor with shape (2, ) and
3560
- >>> # input_params is a Tensor with shape (3, 4) and axis is 1, batch_dims is 1.
3561
- >>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
3562
- >>> input_indices = Tensor(np.array([0, 2, 1]), mindspore.int32)
3563
- >>> axis = 1
3564
- >>> batch_dims = 1
3565
- >>> output = ops.gather(input_params, input_indices, axis, batch_dims)
2905
+ Examples:
2906
+ >>> import mindspore
2907
+ >>> import numpy as np
2908
+ >>> from mindspore import Tensor, ops
2909
+ >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
2910
+ >>> output = ops.function.array_func.sort_ext(x)
2911
+ >>> # The output below is based on the Ascend platform.
3566
2912
  >>> print(output)
3567
- [ 1. 7. 10.]
2913
+ (Tensor(shape=[3, 3], dtype=Float16, value=
2914
+ [[ 1.0000e+00, 2.0000e+00, 8.0000e+00],
2915
+ [ 3.0000e+00, 5.0000e+00, 9.0000e+00],
2916
+ [ 4.0000e+00, 6.0000e+00, 7.0000e+00]]), Tensor(shape=[3, 3], dtype=Int64, value=
2917
+ [[2, 1, 0],
2918
+ [2, 0, 1],
2919
+ [0, 1, 2]]))
3568
2920
  """
3569
- _gather = _get_cache_prim(P.Gather)(batch_dims)
3570
- return _gather(input_params, input_indices, axis)
2921
+ return sort_ext_(input, dim, descending, stable)
3571
2922
 
3572
2923
 
3573
- def gather_d(x, dim, index):
3574
- """
3575
- Gathers elements along an axis specified by dim.
2924
+ def argsort(input, axis=-1, descending=False):
2925
+ r"""
2926
+ Sorts the input tensor along the given dimension in specified order and return the sorted indices.
2927
+
2928
+ Args:
2929
+ input(Tensor): The input tensor to sort.
2930
+ axis (int): The axis to sort along. Default: ``-1`` , means the last dimension.
2931
+ The Ascend backend only supports sorting the last dimension.
2932
+ descending (bool): The sort order. If `descending` is True then the elements
2933
+ are sorted in descending order by value. Otherwise sort in ascending order. Default: ``False`` .
3576
2934
 
3577
- Refer to :func:`mindspore.ops.gather_elements` for more detail.
2935
+ Returns:
2936
+ Tensor, the indices of sorted input tensor. Data type is int32.
3578
2937
 
3579
2938
  Supported Platforms:
3580
2939
  ``Ascend`` ``GPU`` ``CPU``
@@ -3583,15 +2942,16 @@ def gather_d(x, dim, index):
3583
2942
  >>> import mindspore
3584
2943
  >>> import numpy as np
3585
2944
  >>> from mindspore import Tensor, ops
3586
- >>> x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.int32)
3587
- >>> index = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32)
3588
- >>> dim = 1
3589
- >>> output = ops.gather_d(x, dim, index)
3590
- >>> print(output)
3591
- [[1 1]
3592
- [4 3]]
2945
+ >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
2946
+ >>> sort = ops.argsort(x)
2947
+ >>> print(sort)
2948
+ [[2 1 0]
2949
+ [2 0 1]
2950
+ [0 1 2]]
3593
2951
  """
3594
- return gather_d_(x, dim, index)
2952
+ _sort = _get_cache_prim(P.Sort)(axis, descending)
2953
+ _, arg_sort = _sort(input)
2954
+ return arg_sort
3595
2955
 
3596
2956
 
3597
2957
  def gather_elements(input, dim, index):
@@ -3608,26 +2968,29 @@ def gather_elements(input, dim, index):
3608
2968
 
3609
2969
  output[i][j][k] = x[i][j][index[i][j][k]] # if dim == 2
3610
2970
 
3611
- `input` and `index` have the same length of dimensions, and all dimensions except `dim` have the same size.
3612
- If `dim` = i, `input` is an n-D tensor with shape :math:`(z_0, z_1, ..., z_i, ..., z_{n-1})`,
3613
- the `index` must be an n-D tensor with shape :math:`(z_0, z_1, ..., y, ..., z_{n-1})`
3614
- where `y`>=1 and the output will have the same shape with `index`.
2971
+ `input` and `index` have the same length of dimensions, and `index.shape[axis] <= input.shape[axis]`
2972
+ where axis goes through all dimensions of `input` except `dim`.
2973
+
2974
+ .. warning::
2975
+ On Ascend, the behavior is unpredictable in the following cases:
2976
+
2977
+ - the value of `index` is not in the range `[-input.shape[dim], input.shape[dim])` in forward;
2978
+ - the value of `index` is not in the range `[0, input.shape[dim])` in backward.
3615
2979
 
3616
2980
  Args:
3617
2981
  input (Tensor): The input tensor.
3618
- dim (int): The axis along which to index. It must be int32 or int64. The value range is [-input.ndim,
3619
- input.ndim).
2982
+ dim (int): The axis along which to index. It must be int32 or int64. The value range is `[-input.ndim,
2983
+ input.ndim)`.
3620
2984
  index (Tensor): The indices of elements to gather. It can be one of the following data types:
3621
- int32, int64. The value range of each index element is [-input.shape(dim), input.shape(dim)).
2985
+ int32, int64. The value range of each index element is `[-input.shape(dim), input.shape(dim))`.
3622
2986
 
3623
2987
  Returns:
3624
- Tensor, has the same shape as index tensor, the shape of tensor is :math:`(z_0, z_1, ..., y, ..., z_{n-1})`,
3625
- and has the same data type with `input`.
2988
+ Tensor, has the same shape as `index` and has the same data type with `input`.
3626
2989
 
3627
2990
  Raises:
3628
2991
  TypeError: If dtype of `dim` or `index` is neither int32 nor int64.
3629
2992
  ValueError: If length of shape of `input` is not equal to length of shape of `index`.
3630
- ValueError: If the size of the dimension except `dim` is not equal between `input` and `index`.
2993
+ ValueError: If the size of the dimension except `dim` in `input` is less than size in `index`.
3631
2994
  ValueError: If the value of `dim` is not in the expected range.
3632
2995
 
3633
2996
  Supported Platforms:
@@ -3648,48 +3011,6 @@ def gather_elements(input, dim, index):
3648
3011
  return gather_d_(input, dim, index)
3649
3012
 
3650
3013
 
3651
- def gather_nd(input_x, indices):
3652
- r"""
3653
- Gathers slices from a tensor by indices.
3654
-
3655
- Using given indices to gather slices from a tensor with a specified shape.
3656
-
3657
- `indices` is an K-dimensional integer tensor. Supposes it as a (K-1)-dimensional tensor and each element of it
3658
- defines a slice of `input_x`:
3659
-
3660
- .. math::
3661
- output[(i_0, ..., i_{K-2})] = input\_x[indices[(i_0, ..., i_{K-2})]]
3662
-
3663
- The last dimension of `indices` can not more than the rank of `input_x`:
3664
- :math:`indices.shape[-1] <= input\_x.rank`.
3665
-
3666
- Args:
3667
- input_x (Tensor): The target tensor to gather values.
3668
- indices (Tensor): The index tensor, with int32 or int64 data type.
3669
-
3670
- Returns:
3671
- Tensor, has the same type as `input_x` and the shape is
3672
- :math:`indices\_shape[:-1] + input\_x\_shape[indices\_shape[-1]:]`.
3673
-
3674
- Raises:
3675
- ValueError: If length of shape of `input_x` is less than the last dimension of `indices`.
3676
-
3677
- Supported Platforms:
3678
- ``Ascend`` ``GPU`` ``CPU``
3679
-
3680
- Examples:
3681
- >>> import mindspore
3682
- >>> import numpy as np
3683
- >>> from mindspore import Tensor, ops
3684
- >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
3685
- >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
3686
- >>> output = ops.gather_nd(input_x, indices)
3687
- >>> print(output)
3688
- [-0.1 0.5]
3689
- """
3690
- return gather_nd_(input_x, indices)
3691
-
3692
-
3693
3014
  def tensor_scatter_add(input_x, indices, updates):
3694
3015
  r"""
3695
3016
  Creates a new tensor by adding the values from the positions in `input_x` indicated by
@@ -3700,7 +3021,7 @@ def tensor_scatter_add(input_x, indices, updates):
3700
3021
 
3701
3022
  The last axis of `indices` is the depth of each index vectors. For each index vector,
3702
3023
  there must be a corresponding value in `updates`. The shape of `updates` should be
3703
- equal to the shape of `input_x[indices]`. For more details, see use cases.
3024
+ equal to the shape of `input_x[indices]`. For more details, see Examples.
3704
3025
 
3705
3026
  .. math::
3706
3027
  output\left [indices \right ] = input\_x + update
@@ -3758,7 +3079,7 @@ def tensor_scatter_sub(input_x, indices, updates):
3758
3079
 
3759
3080
  The last axis of `indices` is the depth of each index vectors. For each index vector,
3760
3081
  there must be a corresponding value in `updates`. The shape of `updates` should be
3761
- equal to the shape of `input_x[indices]`. For more details, see use cases.
3082
+ equal to the shape of `input_x[indices]`. For more details, see Examples.
3762
3083
 
3763
3084
  .. math::
3764
3085
  output[indices] = input\_x - update
@@ -3943,14 +3264,12 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
3943
3264
  nondeterministic.
3944
3265
  - On Ascend, the reduction only support set to "none" for now.
3945
3266
  - On Ascend, the data type of `input_x` must be float16 or float32.
3267
+ - This is an experimental API that is subject to change or deletion.
3946
3268
 
3947
3269
  Note:
3948
3270
  If some values of the `indices` exceed the upper or lower bounds of the index of `input_x`, instead of raising
3949
3271
  an index error, the corresponding `updates` will not be updated to `input_x`.
3950
3272
 
3951
- .. warning::
3952
- This is an experimental API that is subject to change or deletion.
3953
-
3954
3273
  Args:
3955
3274
  input_x (Tensor): The target tensor. The rank must be at least 1.
3956
3275
  indices (Tensor): The index of `input_x` to do scatter operation whose data type must be mindspore.int32 or
@@ -4065,6 +3384,79 @@ def scatter(input, axis, index, src):
4065
3384
  return ops.tensor_scatter_elements(input_x=input, indices=index, updates=src, axis=axis)
4066
3385
 
4067
3386
 
3387
+ def scatter_add_ext(input, dim, index, src):
3388
+ """
3389
+ Add all elements in `src` to the index specified by `index` to `input` along dimension specified by `dim`.
3390
+ It takes three inputs `input`, `src` and `index` of the same rank r >= 1.
3391
+
3392
+ For a 3-D tensor, the operation updates input as follows:
3393
+
3394
+ .. code-block::
3395
+
3396
+ input[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
3397
+
3398
+ input[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
3399
+
3400
+ input[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
3401
+
3402
+ Args:
3403
+ input (Tensor): The target tensor. The rank must be at least 1.
3404
+ dim (int): Which dim to scatter. Accepted range is [-r, r) where r = rank(`input`). Default: ``0``.
3405
+ index (Tensor): The index of `input` to do scatter operation whose data type must be mindspore.int32 or
3406
+ mindspore.int64. Same rank as `input`. Except for the dimension specified by `dim`,
3407
+ the size of each dimension of `index` must be less than or equal to the size of
3408
+ the corresponding dimension of `input`.
3409
+ src (Tensor): The tensor doing the scatter operation with `input`, has the same type as `input` and
3410
+ the size of each dimension must be greater than or equal to that of `index`.
3411
+
3412
+ Returns:
3413
+ Tensor, has the same shape and type as `input`.
3414
+
3415
+ Raises:
3416
+ TypeError: If `index` is neither int32 nor int64.
3417
+ ValueError: If anyone of the rank among `input`, `index` and `src` less than 1.
3418
+ ValueError: If the rank of `input`, `index` and `src` is not the same.
3419
+ ValueError: If, outside dimension `dim`, the size of any dimension of `index` is greater than the size of
3420
+ the corresponding dimension of `input` .
3421
+ ValueError: If the size of any dimension of `src` is less than that of `index`.
3422
+
3423
+ Supported Platforms:
3424
+ ``Ascend``
3425
+
3426
+ Examples:
3427
+ >>> import numpy as np
3428
+ >>> import mindspore as ms
3429
+ >>> from mindspore import Tensor, ops
3430
+ >>> input = Tensor(np.array([[1, 2, 3, 4, 5]]), dtype=ms.float32)
3431
+ >>> src = Tensor(np.array([[8, 8]]), dtype=ms.float32)
3432
+ >>> index = Tensor(np.array([[2, 4]]), dtype=ms.int64)
3433
+ >>> out = ops.function.array_func.scatter_add_ext(input=input, dim=1, index=index, src=src)
3434
+ >>> print(out)
3435
+ [[1. 2. 11. 4. 13.]]
3436
+ >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
3437
+ >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
3438
+ >>> index = Tensor(np.array([[0, 0, 0], [2, 2, 2], [4, 4, 4]]), dtype=ms.int64)
3439
+ >>> out = ops.function.array_func.scatter_add_ext(input=input, dim=0, index=index, src=src)
3440
+ >>> print(out)
3441
+ [[1. 2. 3. 0. 0.]
3442
+ [0. 0. 0. 0. 0.]
3443
+ [4. 5. 6. 0. 0.]
3444
+ [0. 0. 0. 0. 0.]
3445
+ [7. 8. 9. 0. 0.]]
3446
+ >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
3447
+ >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
3448
+ >>> index = Tensor(np.array([[0, 2, 4], [0, 2, 4], [0, 2, 4]]), dtype=ms.int64)
3449
+ >>> out = ops.function.array_func.scatter_add_ext(input=input, dim=1, index=index, src=src)
3450
+ >>> print(out)
3451
+ [[1. 0. 2. 0. 3.]
3452
+ [4. 0. 5. 0. 6.]
3453
+ [7. 0. 8. 0. 9.]
3454
+ [0. 0. 0. 0. 0.]
3455
+ [0. 0. 0. 0. 0.]]
3456
+ """
3457
+ return scatter_add_ext_op(input, dim, index, src)
3458
+
3459
+
4068
3460
  def _get_slice_scatter_const(x_shape, axis, start, end, step):
4069
3461
  r"""
4070
3462
  Calculate the rank of input, embedded dimensions and index.
@@ -4074,7 +3466,7 @@ def _get_slice_scatter_const(x_shape, axis, start, end, step):
4074
3466
  start = start if start is not None else 0
4075
3467
  start = start if start >= 0 else start + x_rank
4076
3468
  end = end if end is not None else x_shape[axis]
4077
- end = end if end >= 0 else end + x_rank
3469
+ end = end if end >= 0 else end + x_shape[axis]
4078
3470
  end = end if end < x_shape[axis] else x_shape[axis]
4079
3471
  index = list(builtins.range(start, end, step))
4080
3472
  return x_rank, index, axis
@@ -4121,6 +3513,8 @@ def slice_scatter(input, src, axis=0, start=None, end=None, step=1):
4121
3513
  [1. 0. 1. 0. 1. 0.]
4122
3514
  [1. 0. 1. 0. 1. 0.]]
4123
3515
  """
3516
+ _check_is_tensor("input", input, "slice_scatter")
3517
+ _check_is_tensor("src", src, "slice_scatter")
4124
3518
  input_shape = input.shape
4125
3519
  input_rank, index, axis = _get_slice_scatter_const(input_shape, axis, start, end, step)
4126
3520
 
@@ -4136,6 +3530,8 @@ def slice_scatter(input, src, axis=0, start=None, end=None, step=1):
4136
3530
  for _ in builtins.range(input_rank - axis - 1):
4137
3531
  index_tensor = index_tensor.expand_dims(-1)
4138
3532
  index_tensor = index_tensor.broadcast_to(src.shape)
3533
+ if index_tensor.dtype not in mstype.int_type:
3534
+ index_tensor = index_tensor.astype(mstype.int64)
4139
3535
  return tensor_scatter_elements(input, axis=axis, indices=index_tensor, updates=src)
4140
3536
 
4141
3537
 
@@ -4174,10 +3570,12 @@ def select_scatter(input, src, axis, index):
4174
3570
  [1. 1. 1.]
4175
3571
  [0. 0. 0.]]]
4176
3572
  """
3573
+ _check_is_tensor("input", input, "select_scatter")
3574
+ _check_is_tensor("src", src, "select_scatter")
4177
3575
  src = src.expand_dims(axis=axis)
4178
3576
  x_rank = input.ndim
4179
3577
  axis = axis if axis >= 0 else axis + x_rank
4180
- index = index if index >= 0 else index + x_rank
3578
+ index = index if index >= 0 else index + input.shape[axis]
4181
3579
  return slice_scatter(input, src, axis, start=index, end=index + 1)
4182
3580
 
4183
3581
 
@@ -4303,49 +3701,11 @@ def batch_to_space_nd(input_x, block_shape, crops):
4303
3701
  [3. 4.]]]]
4304
3702
  """
4305
3703
  if isinstance(block_shape, Tensor):
4306
- _batch_to_space_ndv2 = _get_cache_prim(P.BatchToSpaceNDV2)()
4307
- return _batch_to_space_ndv2(input_x, block_shape, crops)
3704
+ return batch_to_space_nd_v2_(input_x, block_shape, crops)
4308
3705
  _batch_to_space_nd = _get_cache_prim(P.BatchToSpaceND)(block_shape, crops)
4309
3706
  return _batch_to_space_nd(input_x)
4310
3707
 
4311
3708
 
4312
- def nonzero(input):
4313
- """
4314
- Return a Tensor of the positions of all non-zero values.
4315
-
4316
- Args:
4317
- input (Tensor): The input Tensor, its rank should be greater than or eaqual to 1.
4318
-
4319
- Returns:
4320
- Tensor, a 2-D Tensor whose data type is int64, containing the positions of all non-zero values of the input.
4321
-
4322
- Raises:
4323
- TypeError: If `input` is not Tensor.
4324
- ValueError: If dim of `x` equals to 0.
4325
-
4326
- Supported Platforms:
4327
- ``Ascend`` ``GPU`` ``CPU``
4328
-
4329
- Examples:
4330
- >>> import mindspore
4331
- >>> import numpy as np
4332
- >>> from mindspore import Tensor
4333
- >>> import mindspore.ops as ops
4334
- >>> x = Tensor(np.array([[[1, 0], [-5, 0]]]), mindspore.int32)
4335
- >>> output = ops.nonzero(x)
4336
- >>> print(output)
4337
- [[0 0 0]
4338
- [0 1 0]]
4339
- >>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
4340
- >>> output = ops.nonzero(x)
4341
- >>> print(output)
4342
- [[0]
4343
- [2]
4344
- [4]]
4345
- """
4346
- return nonzero_(input)
4347
-
4348
-
4349
3709
  def matrix_diag(x, k=0, num_rows=-1, num_cols=-1, padding_value=0, align="RIGHT_LEFT"):
4350
3710
  r"""
4351
3711
  Returns a Tensor with the contents in `x` as k[0]-th to k[1]-th diagonals of a matrix, with everything else padded
@@ -4605,18 +3965,19 @@ def meshgrid(*inputs, indexing='xy'):
4605
3965
 
4606
3966
  Keyword Args:
4607
3967
  indexing (str, optional): Cartesian ('xy', default) or
4608
- matrix ('ij') indexing of output. Valid options: xy' or 'ij'. In the 2-D case with
3968
+ matrix ('ij') indexing of output. Valid options: xy' or ``'ij'``. In the 2-D case with
4609
3969
  inputs of length `M` and `N`, the outputs are of shape :math:`(N, M)`
4610
- for 'xy' indexing and :math:`(M, N)` for 'ij' indexing. In the 3-D
3970
+ for ``'xy'`` indexing and :math:`(M, N)` for ``'ij'`` indexing. In the 3-D
4611
3971
  case with inputs of length `M`, `N` and `P`, outputs are of shape
4612
- :math:`(N, M, P)` for 'xy' indexing and :math:`(M, N, P)` for 'ij' indexing. Default: ``'xy'`` .
3972
+ :math:`(N, M, P)` for ``'xy'`` indexing and :math:`(M, N, P)` for ``'ij'`` indexing.
3973
+ Default: ``'xy'`` .
4613
3974
 
4614
3975
  Returns:
4615
3976
  Tensors, a Tuple of N N-D Tensor objects. The data type is the same with the Inputs.
4616
3977
 
4617
3978
  Raises:
4618
3979
  TypeError: If `indexing` is not a str or `inputs` is not a tuple.
4619
- ValueError: If `indexing` is neither 'xy' nor 'ij'.
3980
+ ValueError: If `indexing` is neither ``'xy'`` nor ``'ij'``.
4620
3981
 
4621
3982
  Supported Platforms:
4622
3983
  ``Ascend`` ``GPU`` ``CPU``
@@ -4624,7 +3985,7 @@ def meshgrid(*inputs, indexing='xy'):
4624
3985
  Examples:
4625
3986
  >>> import numpy as np
4626
3987
  >>> from mindspore import Tensor
4627
- >>> import mindspore.ops as ops
3988
+ >>> from mindspore import ops
4628
3989
  >>> x = Tensor(np.array([1, 2, 3, 4]).astype(np.int32))
4629
3990
  >>> y = Tensor(np.array([5, 6, 7]).astype(np.int32))
4630
3991
  >>> z = Tensor(np.array([8, 9, 0, 1, 2]).astype(np.int32))
@@ -4707,7 +4068,7 @@ def affine_grid(theta, size, align_corners=False):
4707
4068
  Examples:
4708
4069
  >>> import mindspore
4709
4070
  >>> from mindspore import Tensor
4710
- >>> import mindspore.ops as ops
4071
+ >>> from mindspore import ops
4711
4072
  >>> theta = Tensor([[[0.8, 0.5, 0],[-0.5, 0.8, 0]]], mindspore.float32)
4712
4073
  >>> out_size = (1, 3, 2, 3)
4713
4074
  >>> output = ops.affine_grid(theta, out_size, False)
@@ -4723,87 +4084,6 @@ def affine_grid(theta, size, align_corners=False):
4723
4084
  return affine_grid_op(theta, size)
4724
4085
 
4725
4086
 
4726
- def broadcast_to(input, shape): # pylint: disable=redefined-outer-name
4727
- """
4728
- Broadcasts input tensor to a given shape. The dim of input shape must be smaller
4729
- than or equal to that of target shape. Suppose input shape is :math:`(x_1, x_2, ..., x_m)`,
4730
- target shape is :math:`(*, y_1, y_2, ..., y_m)`, where :math:`*` means any additional dimension.
4731
- The broadcast rules are as follows:
4732
-
4733
- Compare the value of :math:`x_m` and :math:`y_m`, :math:`x_{m-1}` and :math:`y_{m-1}`, ...,
4734
- :math:`x_1` and :math:`y_1` consecutively and
4735
- decide whether these shapes are broadcastable and what the broadcast result is.
4736
-
4737
- If the value pairs at a specific dim are equal, then that value goes right into that dim of output shape.
4738
- With an input shape :math:`(2, 3)`, target shape :math:`(2, 3)` , the inferred output shape is :math:`(2, 3)`.
4739
-
4740
- If the value pairs are unequal, there are three cases:
4741
-
4742
- Case 1: If the value of the target shape in the dimension is -1, the value of the
4743
- output shape in the dimension is the value of the corresponding input shape in the dimension.
4744
- With an input shape :math:`(3, 3)`, target
4745
- shape :math:`(-1, 3)`, the output shape is :math:`(3, 3)`.
4746
-
4747
- Case 2: If the value of target shape in the dimension is not -1, but the corresponding
4748
- value in the input shape is 1, then the corresponding value of the output shape
4749
- is that of the target shape. With an input shape :math:`(1, 3)`, target
4750
- shape :math:`(8, 3)`, the output shape is :math:`(8, 3)`.
4751
-
4752
- Case 3: If the corresponding values of the two shapes do not satisfy the above cases,
4753
- it means that broadcasting from the input shape to the target shape is not supported.
4754
-
4755
- So far we got the last m dims of the outshape, now focus on the first :math:`*` dims, there are
4756
- two cases:
4757
-
4758
- If the first :math:`*` dims of output shape does not have -1 in it, then fill the input
4759
- shape with ones until their length are the same, and then refer to
4760
- Case 2 mentioned above to calculate the output shape. With target shape :math:`(3, 1, 4, 1, 5, 9)`,
4761
- input shape :math:`(1, 5, 9)`, the filled input shape will be :math:`(1, 1, 1, 1, 5, 9)` and thus the
4762
- output shape is :math:`(3, 1, 4, 1, 5, 9)`.
4763
-
4764
- If the first :math:`*` dims of output shape have -1 in it, it implies this -1 is corresponding to
4765
- a non-existing dim so they're not broadcastable. With target shape :math:`(3, -1, 4, 1, 5, 9)`,
4766
- input shape :math:`(1, 5, 9)`, instead of operating the dim-filling process first, it raises errors directly.
4767
-
4768
- Args:
4769
- input (Tensor): The input Tensor.
4770
- shape (tuple): The target shape to broadcast. Can be fully specified, or have -1 in one position
4771
- where it will be substituted by the input tensor's shape in that position, see example.
4772
-
4773
- Returns:
4774
- Tensor, with the given `shape` and the same data type as `input`.
4775
-
4776
- Raises:
4777
- TypeError: If `shape` is not a tuple.
4778
- ValueError: If the target and input shapes are incompatible, or if a - 1 in the target shape is in an invalid
4779
- location.
4780
-
4781
- Supported Platforms:
4782
- ``Ascend`` ``GPU`` ``CPU``
4783
-
4784
- Examples:
4785
- >>> import numpy as np
4786
- >>> from mindspore import Tensor, ops
4787
- >>> shape = (2, 3)
4788
- >>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
4789
- >>> output = ops.broadcast_to(x, shape)
4790
- >>> print(output)
4791
- [[1. 2. 3.]
4792
- [1. 2. 3.]]
4793
- >>> shape = (-1, 2)
4794
- >>> x = Tensor(np.array([[1], [2]]).astype(np.float32))
4795
- >>> output = ops.broadcast_to(x, shape)
4796
- >>> print(output)
4797
- [[1. 1.]
4798
- [2. 2.]]
4799
- """
4800
- if isinstance(shape, Tensor) or ops.is_sequence_value_unknown(shape):
4801
- _dyn_broadcast_to = _get_cache_prim(DynamicBroadcastTo)()
4802
- return _dyn_broadcast_to(input, shape)
4803
- _broadcast_to = _get_cache_prim(P.BroadcastTo)(shape)
4804
- return _broadcast_to(input)
4805
-
4806
-
4807
4087
  def unsorted_segment_min(x, segment_ids, num_segments):
4808
4088
  r"""
4809
4089
  Computes the minimum of a tensor along segments.
@@ -4827,14 +4107,13 @@ def unsorted_segment_min(x, segment_ids, num_segments):
4827
4107
  x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
4828
4108
  segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
4829
4109
  Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
4830
- num_segments (int): The value specifies the number of distinct `segment_ids`.
4110
+ num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
4831
4111
 
4832
4112
  Returns:
4833
- Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
4113
+ Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
4834
4114
 
4835
4115
  Raises:
4836
4116
  TypeError: If `num_segments` is not an int.
4837
- ValueError: If length of shape of `segment_ids` is not equal to 1.
4838
4117
 
4839
4118
  Supported Platforms:
4840
4119
  ``Ascend`` ``GPU`` ``CPU``
@@ -4851,7 +4130,6 @@ def unsorted_segment_min(x, segment_ids, num_segments):
4851
4130
  [[1. 2. 3.]
4852
4131
  [4. 2. 1.]]
4853
4132
  """
4854
- unsorted_segment_min_ = P.UnsortedSegmentMin()
4855
4133
  return unsorted_segment_min_(x, segment_ids, num_segments)
4856
4134
 
4857
4135
 
@@ -4878,14 +4156,13 @@ def unsorted_segment_max(x, segment_ids, num_segments):
4878
4156
  x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
4879
4157
  segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
4880
4158
  Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
4881
- num_segments (int): The value specifies the number of distinct `segment_ids`.
4159
+ num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
4882
4160
 
4883
4161
  Returns:
4884
- Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
4162
+ Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
4885
4163
 
4886
4164
  Raises:
4887
4165
  TypeError: If `num_segments` is not an int.
4888
- ValueError: If length of shape of `segment_ids` is not equal to 1.
4889
4166
 
4890
4167
  Supported Platforms:
4891
4168
  ``Ascend`` ``GPU`` ``CPU``
@@ -4902,7 +4179,6 @@ def unsorted_segment_max(x, segment_ids, num_segments):
4902
4179
  [[1. 2. 3.]
4903
4180
  [4. 5. 6.]]
4904
4181
  """
4905
- unsorted_segment_max_ = P.UnsortedSegmentMax()
4906
4182
  return unsorted_segment_max_(x, segment_ids, num_segments)
4907
4183
 
4908
4184
 
@@ -4920,16 +4196,15 @@ def unsorted_segment_prod(x, segment_ids, num_segments):
4920
4196
 
4921
4197
  Args:
4922
4198
  x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
4923
- segment_ids (Tensor): A `1-D` tensor whose shape is :math:`(x_1)`,
4924
- the value must be non-negative tensor. The data type must be int32.
4925
- num_segments (int): The value specifies the number of distinct `segment_ids`.
4199
+ segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
4200
+ Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R. The data type must be int32.
4201
+ num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
4926
4202
 
4927
4203
  Returns:
4928
- Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
4204
+ Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
4929
4205
 
4930
4206
  Raises:
4931
4207
  TypeError: If `num_segments` is not an int.
4932
- ValueError: If length of shape of `segment_ids` is not equal to 1.
4933
4208
 
4934
4209
  Supported Platforms:
4935
4210
  ``Ascend`` ``GPU`` ``CPU``
@@ -4946,7 +4221,6 @@ def unsorted_segment_prod(x, segment_ids, num_segments):
4946
4221
  [[4. 4. 3.]
4947
4222
  [4. 5. 6.]]
4948
4223
  """
4949
- unsorted_segment_prod_ = P.UnsortedSegmentProd()
4950
4224
  return unsorted_segment_prod_(x, segment_ids, num_segments)
4951
4225
 
4952
4226
 
@@ -4987,7 +4261,7 @@ def index_fill(x, axis, index, value):
4987
4261
  Examples:
4988
4262
  >>> import mindspore
4989
4263
  >>> import numpy as np
4990
- >>> import mindspore.ops as ops
4264
+ >>> from mindspore import ops
4991
4265
  >>> from mindspore import Tensor
4992
4266
  >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32))
4993
4267
  >>> index = Tensor([0, 2], mindspore.int32)
@@ -5158,33 +4432,6 @@ def is_nonzero(input):
5158
4432
  return bool(out)
5159
4433
 
5160
4434
 
5161
- def scalar_cast(input_x, input_y):
5162
- """
5163
- Casts the input scalar to another type.
5164
-
5165
- Args:
5166
- input_x (scalar): The input scalar. Only constant value is allowed.
5167
- input_y (mindspore.dtype): The type to be cast. Only constant value is allowed.
5168
-
5169
- Returns:
5170
- Scalar. The type is the same as the python type corresponding to `input_y`.
5171
-
5172
- Raises:
5173
- TypeError: If neither `input_x` nor `input_y` is a constant value.
5174
-
5175
- Supported Platforms:
5176
- ``Ascend`` ``GPU`` ``CPU``
5177
-
5178
- Examples:
5179
- >>> import mindspore
5180
- >>> from mindspore import ops
5181
- >>> output = ops.scalar_cast(255.0, mindspore.int32)
5182
- >>> print(output)
5183
- 255
5184
- """
5185
- return scalar_cast_(input_x, input_y)
5186
-
5187
-
5188
4435
  def tensor_scatter_mul(input_x, indices, updates):
5189
4436
  r"""
5190
4437
  Creates a new tensor by multiplying the values from the positions in `input_x` indicated by
@@ -5194,10 +4441,10 @@ def tensor_scatter_mul(input_x, indices, updates):
5194
4441
 
5195
4442
  The last axis of `indices` is the depth of each index vectors. For each index vector,
5196
4443
  there must be a corresponding value in `updates`. The shape of `updates` should be
5197
- equal to the shape of `input_x[indices]`. For more details, see use cases.
4444
+ equal to the shape of `input_x[indices]`. For more details, see Examples.
5198
4445
 
5199
4446
  .. math::
5200
- output[indices] = input\_x \times update
4447
+ output\left [indices \right ] = input\_x\times update
5201
4448
 
5202
4449
  Note:
5203
4450
  - If some values of the `indices` are out of bound, instead of raising an index error,
@@ -5254,7 +4501,7 @@ def tensor_scatter_div(input_x, indices, updates):
5254
4501
 
5255
4502
  The last axis of `indices` is the depth of each index vectors. For each index vector,
5256
4503
  there must be a corresponding value in `updates`. The shape of `updates` should be
5257
- equal to the shape of `input_x[indices]`. For more details, see use cases.
4504
+ equal to the shape of `input_x[indices]`. For more details, see Examples.
5258
4505
 
5259
4506
  .. math::
5260
4507
  output\left [indices \right ] = input\_x \div update
@@ -5375,113 +4622,36 @@ def tuple_to_array(input_x):
5375
4622
  return tuple_to_tensor_(input_x, dtype)
5376
4623
 
5377
4624
 
5378
- def masked_select(input, mask):
5379
- """
5380
- Returns a new 1-D Tensor which indexes the `x` tensor according to the boolean `mask`.
5381
- The shapes of the `mask` tensor and the `x` tensor don't need to match, but they must be broadcastable.
5382
-
5383
- Args:
5384
- input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
5385
- mask (Tensor[bool]): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
5386
-
5387
- Returns:
5388
- A 1-D Tensor, with the same type as `input`.
5389
-
5390
- Raises:
5391
- TypeError: If `input` or `mask` is not a Tensor.
5392
- TypeError: If dtype of `mask` is not bool.
5393
-
5394
- Supported Platforms:
5395
- ``Ascend`` ``GPU`` ``CPU``
5396
-
5397
- Examples:
5398
- >>> import numpy as np
5399
- >>> import mindspore
5400
- >>> from mindspore import Tensor, ops
5401
- >>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
5402
- >>> mask = Tensor(np.array([1, 0, 1, 0]), mindspore.bool_)
5403
- >>> output = ops.masked_select(x, mask)
5404
- >>> print(output)
5405
- [1 3]
5406
- """
5407
- return masked_select_(input, mask)
5408
-
5409
-
5410
- def masked_fill(input_x, mask, value):
5411
- """
5412
- Fills elements of Tensor with value where mask is True.
5413
- The shapes of `input_x` and `mask` need to be the same or broadcastable.
5414
-
5415
- Args:
5416
- input_x (Tensor): The source Tensor whose data type is one of bool, uint8, int8, int16, int32,
5417
- int64, float16, float32, float64, complex64, complex128.
5418
- mask (Tensor[bool]): The boolean mask.
5419
- value (Union[float, Tensor]): The value to fill in with, which dtype is the same as `input_x`.
5420
-
5421
- Returns:
5422
- Tensor, has the same type and shape as `input_x`.
5423
-
5424
- Raises:
5425
- TypeError: If dtype of `mask` is not bool.
5426
- TypeError: If `input_x` or `mask` is not a Tensor.
5427
- ValueError: If the shapes of `input_x` and `mask` could not be broadcast.
5428
- TypeError: If dtype of `input_x` or `value` is not one of bool, uint8, int8, int16, int32,
5429
- int64, float16, float32, float64, complex64, complex128.
5430
- TypeError: If dtype of `value` is different from that of `input_x`.
5431
- TypeError: If `value` is neither float number nor Tensor.
5432
-
5433
- Supported Platforms:
5434
- ``Ascend`` ``GPU`` ``CPU``
5435
-
5436
- Examples:
5437
- >>> import mindspore
5438
- >>> import numpy as np
5439
- >>> from mindspore import Tensor, ops
5440
- >>> input_x = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
5441
- >>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)
5442
- >>> output = ops.masked_fill(input_x, mask, 0.5)
5443
- >>> print(output)
5444
- [0.5 0.5 3. 0.5]
5445
- """
5446
- if isinstance(value, (float, int)) and isinstance(input_x, Tensor):
5447
- value = scalar_to_tensor_(value, input_x.dtype)
5448
- masked_fill_ = _get_cache_prim(P.MaskedFill)()
5449
- return masked_fill_(input_x, mask, value)
5450
-
5451
-
5452
- def diag(input):
5453
- r"""
5454
- Constructs a diagonal tensor with a given diagonal values.
5455
-
5456
- Assume `input` has dimensions :math:`(D_1,... D_k)` , the output is a tensor of
5457
- rank 2k with dimensions :math:`(D_1,..., D_k, D_1,..., D_k)` where:
5458
- :math:`output[i_1,..., i_k, i_1,..., i_k] = input[i_1,..., i_k]` and 0 everywhere else.
4625
+ def masked_select(input, mask):
4626
+ """
4627
+ Returns a new 1-D Tensor which indexes the `x` tensor according to the boolean `mask`.
4628
+ The shapes of the `mask` tensor and the `x` tensor don't need to match, but they must be broadcastable.
5459
4629
 
5460
4630
  Args:
5461
- input (Tensor): The input tensor.
4631
+ input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
4632
+ mask (Tensor[bool]): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
5462
4633
 
5463
4634
  Returns:
5464
- Tensor, has the same dtype as the `input`.
4635
+ A 1-D Tensor, with the same type as `input`.
5465
4636
 
5466
4637
  Raises:
5467
- TypeError: If `input` is not a Tensor.
5468
- ValueError: If rank of `input` is less than 1.
4638
+ TypeError: If `input` or `mask` is not a Tensor.
4639
+ TypeError: If dtype of `mask` is not bool.
5469
4640
 
5470
4641
  Supported Platforms:
5471
4642
  ``Ascend`` ``GPU`` ``CPU``
5472
4643
 
5473
4644
  Examples:
5474
- >>> from mindspore import Tensor
5475
- >>> import mindspore.ops as ops
5476
- >>> input_x = Tensor([1, 2, 3, 4]).astype('int32')
5477
- >>> output = ops.diag(input_x)
4645
+ >>> import numpy as np
4646
+ >>> import mindspore
4647
+ >>> from mindspore import Tensor, ops
4648
+ >>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
4649
+ >>> mask = Tensor(np.array([1, 0, 1, 0]), mindspore.bool_)
4650
+ >>> output = ops.masked_select(x, mask)
5478
4651
  >>> print(output)
5479
- [[1 0 0 0]
5480
- [0 2 0 0]
5481
- [0 0 3 0]
5482
- [0 0 0 4]]
4652
+ [1 3]
5483
4653
  """
5484
- return diag_(input)
4654
+ return masked_select_(input, mask)
5485
4655
 
5486
4656
 
5487
4657
  def diagflat(input, offset=0):
@@ -5542,7 +4712,7 @@ def col2im(input_x, output_size, kernel_size, dilation, padding_value, stride):
5542
4712
  Combines an array of sliding local blocks into a large containing tensor.
5543
4713
 
5544
4714
  Args:
5545
- input_x (Tensor): 4D tensor with data type float16 or float.
4715
+ input_x (Tensor): 4D tensor with data type float16 or float32.
5546
4716
  output_size (Tensor): 1D tensor with 2 elements of data type int.
5547
4717
  kernel_size (Union[int, tuple[int], list[int]]): The size of the kernel, should be two int
5548
4718
  for height and width. If type is int, it means that height equal with width. Must be specified.
@@ -5598,7 +4768,7 @@ def _split_int(x, split_size_or_sections, axis):
5598
4768
  num_sections = length_along_dim // split_size_or_sections
5599
4769
  length1 = num_sections * split_size_or_sections
5600
4770
  length2 = length_along_dim - length1
5601
- start1 = _list_comprehensions(rank(x), 0, True)
4771
+ start1 = _list_comprehensions(rank_(x), 0, True)
5602
4772
  size1 = _tuple_setitem(arr_shape, axis, length1)
5603
4773
  start2 = _tuple_setitem(start1, axis, length1)
5604
4774
  size2 = _tuple_setitem(arr_shape, axis, length2)
@@ -5628,7 +4798,6 @@ def _split_sub_tensors(x, split_size_or_sections, axis):
5628
4798
  sub_tensors.append(sliced_tensor)
5629
4799
  return sub_tensors
5630
4800
 
5631
-
5632
4801
  def split(tensor, split_size_or_sections, axis=0):
5633
4802
  """
5634
4803
  Splits the Tensor into chunks along the given axis.
@@ -5650,9 +4819,9 @@ def split(tensor, split_size_or_sections, axis=0):
5650
4819
  TypeError: If argument `tensor` is not Tensor.
5651
4820
  TypeError: If argument `axis` is not Tensor.
5652
4821
  ValueError: If argument `axis` is out of range of :math:`[-tensor.ndim, tensor.ndim)` .
5653
- TypeError: If each element in 'split_size_or_sections' is not integer.
5654
- TypeError: If argument `indices_or_sections` is not int, tuple(int) or list(int).
5655
- ValueError: The sum of 'split_size_or_sections' is not equal to x.shape[axis].
4822
+ TypeError: If each element in `split_size_or_sections` is not integer.
4823
+ TypeError: If argument `split_size_or_sections` is not int, tuple(int) or list(int).
4824
+ ValueError: The sum of `split_size_or_sections` is not equal to x.shape[axis].
5656
4825
 
5657
4826
  Supported Platforms:
5658
4827
  ``Ascend`` ``GPU`` ``CPU``
@@ -5696,6 +4865,53 @@ def split(tensor, split_size_or_sections, axis=0):
5696
4865
  f"but got {type(split_size_or_sections)}")
5697
4866
  return tuple(res)
5698
4867
 
4868
+ def split_ext(tensor, split_size_or_sections, axis=0):
4869
+ """
4870
+ Splits the Tensor into chunks along the given axis.
4871
+
4872
+ Args:
4873
+ tensor (Tensor): A Tensor to be divided.
4874
+ split_size_or_sections (Union[int, tuple(int), list(int)]):
4875
+ If `split_size_or_sections` is an int type, `tensor` will be split into equally sized chunks,
4876
+ each chunk with size `split_size_or_sections`. Last chunk will be smaller than `split_size_or_sections`
4877
+ if `tensor.shape[axis]` is not divisible by `split_size_or_sections`.
4878
+ If `split_size_or_sections` is a list type, then `tensor` will be split into len(split_size_or_sections)
4879
+ chunks with sizes `split_size_or_sections` along the given `axis`.
4880
+ axis (int): The axis along which to split. Default: ``0`` .
4881
+
4882
+ Returns:
4883
+ A tuple of sub-tensors.
4884
+
4885
+ Raises:
4886
+ TypeError: If argument `tensor` is not Tensor.
4887
+ TypeError: If argument `axis` is not int.
4888
+ ValueError: If argument `axis` is out of range of :[-tensor.ndim, tensor.ndim).
4889
+ TypeError: If each element in `split_size_or_sections` is not integer.
4890
+ TypeError: If argument `split_size_or_sections` is not int, tuple(int) or list(int).
4891
+ ValueError: The sum of `split_size_or_sections` is not equal to x.shape[axis].
4892
+
4893
+ Supported Platforms:
4894
+ ``Ascend``
4895
+
4896
+ Examples:
4897
+ >>> import numpy as np
4898
+ >>> from mindspore import ops, Tensor
4899
+ >>> input_x = np.arange(9).astype("float32")
4900
+ >>> output = ops.split_ext(Tensor(input_x), 3)
4901
+ >>> print(output)
4902
+ (Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
4903
+ Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
4904
+ Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))
4905
+ """
4906
+ if isinstance(split_size_or_sections, int):
4907
+ res = split_tensor(tensor, split_size_or_sections, axis)
4908
+ elif isinstance(split_size_or_sections, (list, tuple)):
4909
+ res = split_with_size(tensor, split_size_or_sections, axis)
4910
+ else:
4911
+ raise TypeError(f"Type of Argument `split_size_or_sections` should be integer, tuple(int) or list(int), " \
4912
+ f"but got {type(split_size_or_sections)}")
4913
+ return res
4914
+
5699
4915
 
5700
4916
  def tril(input, diagonal=0): # pylint: disable=redefined-outer-name
5701
4917
  """
@@ -5758,67 +4974,6 @@ def tril(input, diagonal=0): # pylint: disable=redefined-outer-name
5758
4974
  return tril_(input)
5759
4975
 
5760
4976
 
5761
- def triu(input, diagonal=0): # pylint: disable=redefined-outer-name
5762
- r"""
5763
- Returns the upper triangle part of 'input' (elements that contain the diagonal and below),
5764
- and set the other elements to zeros.
5765
-
5766
- .. warning::
5767
- This is an experimental API that is subject to change or deletion.
5768
-
5769
- Args:
5770
- input (Tensor): The input tensor with shape :math:`(M, N, *)` where * means any number of additional dimensions.
5771
- diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: 0,
5772
- indicating the main diagonal.
5773
-
5774
- Returns:
5775
- Tensor, a tensor has the same shape and data type as input.
5776
-
5777
- Raises:
5778
- TypeError: If `diagonal` is not an int.
5779
- TypeError: If `input` is not a Tensor.
5780
- ValueError: If the dimension of `input` is less than 2.
5781
-
5782
- Supported Platforms:
5783
- ``Ascend`` ``GPU`` ``CPU``
5784
-
5785
- Examples:
5786
- >>> import numpy as np
5787
- >>> from mindspore import Tensor, ops
5788
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
5789
- ... [ 5, 6, 7, 8],
5790
- ... [10, 11, 12, 13],
5791
- ... [14, 15, 16, 17]]))
5792
- >>> result = ops.triu(x)
5793
- >>> print(result)
5794
- [[ 1 2 3 4]
5795
- [ 0 6 7 8]
5796
- [ 0 0 12 13]
5797
- [ 0 0 0 17]]
5798
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
5799
- ... [ 5, 6, 7, 8],
5800
- ... [10, 11, 12, 13],
5801
- ... [14, 15, 16, 17]]))
5802
- >>> result = ops.triu(x, diagonal=1)
5803
- >>> print(result)
5804
- [[ 0 2 3 4]
5805
- [ 0 0 7 8]
5806
- [ 0 0 0 13]
5807
- [ 0 0 0 0]]
5808
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
5809
- ... [ 5, 6, 7, 8],
5810
- ... [10, 11, 12, 13],
5811
- ... [14, 15, 16, 17]]))
5812
- >>> result = ops.triu(x, diagonal=-1)
5813
- >>> print(result)
5814
- [[ 1 2 3 4]
5815
- [ 5 6 7 8]
5816
- [ 0 11 12 13]
5817
- [ 0 0 16 17]]
5818
- """
5819
- return _get_cache_prim(P.Triu)(diagonal)(input)
5820
-
5821
-
5822
4977
  @_primexpr
5823
4978
  def _canonicalize_axis(axis, ndim):
5824
4979
  """
@@ -5918,24 +5073,24 @@ def _tensor_split_sub_int(x, indices_or_sections, axis):
5918
5073
  arr_shape = x.shape
5919
5074
  length_along_dim = arr_shape[axis]
5920
5075
  if indices_or_sections > length_along_dim:
5921
- res = P.Split(axis, length_along_dim)(x)
5076
+ res = _get_cache_prim(P.Split)(axis, length_along_dim)(x)
5922
5077
  indices_or_sections_n = [length_along_dim, length_along_dim + 1]
5923
5078
  res2 = _tensor_split_sub_tensors(x, indices_or_sections_n, axis)
5924
5079
  for _ in np.arange(length_along_dim, indices_or_sections):
5925
5080
  res += tuple(res2)[1:]
5926
5081
  elif length_along_dim % indices_or_sections == 0:
5927
- res = P.Split(axis, indices_or_sections)(x)
5082
+ res = _get_cache_prim(P.Split)(axis, indices_or_sections)(x)
5928
5083
  else:
5929
5084
  num_long_tensor = length_along_dim % indices_or_sections
5930
5085
  num_short_tensor = indices_or_sections - num_long_tensor
5931
5086
  length1 = num_long_tensor * (length_along_dim // indices_or_sections + 1)
5932
5087
  length2 = length_along_dim - length1
5933
- start1 = _list_comprehensions(rank(x), 0, True)
5088
+ start1 = _list_comprehensions(rank_(x), 0, True)
5934
5089
  size1 = _tuple_setitem(arr_shape, axis, length1)
5935
5090
  start2 = _tuple_setitem(start1, axis, length1)
5936
5091
  size2 = _tuple_setitem(arr_shape, axis, length2)
5937
- res = P.Split(axis, num_long_tensor)(tensor_slice(x, start1, size1)) + \
5938
- P.Split(axis, num_short_tensor)(tensor_slice(x, start2, size2))
5092
+ res = _get_cache_prim(P.Split)(axis, num_long_tensor)(tensor_slice(x, start1, size1)) + \
5093
+ _get_cache_prim(P.Split)(axis, num_short_tensor)(tensor_slice(x, start2, size2))
5939
5094
  return res
5940
5095
 
5941
5096
 
@@ -5949,11 +5104,11 @@ def tensor_split(input, indices_or_sections, axis=0):
5949
5104
 
5950
5105
  - If `indices_or_sections` is an integer n, input tensor will be split into n sections.
5951
5106
 
5952
- - If :math:`input.shape(axis)` can be divisible by n, sub-sections will have equal size
5953
- :math:`input.shape(axis) / n` .
5954
- - If :math:`input.shape(axis)` is not divisible by n, the first :math:`input.shape(axis) % n` sections
5955
- will have size :math:`input.shape(axis) // n + 1` , and the rest will have
5956
- size :math:`input.shape(axis) // n` .
5107
+ - If :math:`input.shape[axis]` can be divisible by n, sub-sections will have equal size
5108
+ :math:`input.shape[axis] / n` .
5109
+ - If :math:`input.shape[axis]` is not divisible by n, the first :math:`input.shape[axis] \bmod n` sections
5110
+ will have size :math:`input.shape[axis] // n + 1` , and the rest will have
5111
+ size :math:`input.shape[axis] // n` .
5957
5112
  - If `indices_or_sections` is of type tuple(int) or list(int), the input tensor will be split at the
5958
5113
  indices in the list or tuple. For example, given parameters :math:`indices\_or\_sections=[1, 4]`
5959
5114
  and :math:`axis=0` , the input tensor will be split into sections :math:`input[:1]` ,
@@ -6166,7 +5321,7 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
6166
5321
  tensor.
6167
5322
 
6168
5323
  - values (Tensor) - The maximum value of input tensor, with the same shape as index, and same dtype as x.
6169
- - index (Tensor) - The index for the maximum value of the input tensor, with dtype int32. If `keepdims`
5324
+ - index (Tensor) - The index for the maximum value of the input tensor, with dtype int64. If `keepdims`
6170
5325
  is true, the shape of output tensors is :math:`(input_1, input_2, ..., input_{axis-1}, 1, input_{axis+1},
6171
5326
  ..., input_N)` . Otherwise, the shape is :math:`(input_1, input_2, ..., input_{axis-1}, input_{axis+1},
6172
5327
  ..., input_N)` .
@@ -6195,16 +5350,15 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
6195
5350
  [[3.2 0.4 0.4 2.9 4. ]] [[1 1 0 1 1]]
6196
5351
  """
6197
5352
  if not input.shape:
6198
- return (input, Tensor(0, dtype=mstype.int32))
5353
+ return (input, Tensor(0, dtype=mstype.int64))
6199
5354
  if axis is None:
6200
- reduce_max_op = _get_cache_prim(P.ReduceMax)()
6201
- return (reduce_max_op(input), Tensor(0, dtype=mstype.int32))
5355
+ return (max_(input), Tensor(0, dtype=mstype.int64))
6202
5356
  if initial is not None and not isinstance(initial, numbers.Number):
6203
5357
  raise TypeError(f"For 'max', 'initial' must be a scalar, but got {type(initial)}")
6204
5358
  if axis is not None and not isinstance(axis, int):
6205
5359
  raise TypeError(f"For 'max', 'axis' must be int, but got {type(axis)}")
6206
5360
  input = _init_and_select_elem(input, initial, where, ops.maximum)
6207
- argmax_with_value_op = ArgMaxWithValue(axis, keepdims)
5361
+ argmax_with_value_op = _get_cache_prim(ArgMaxWithValue)(axis, keepdims)
6208
5362
  indices, values = argmax_with_value_op(input)
6209
5363
  return values, indices
6210
5364
 
@@ -6250,10 +5404,11 @@ def argmax(input, dim=None, keepdim=False):
6250
5404
  is_dim_none = True
6251
5405
  out = _get_cache_prim(Argmax)(dim, mstype.int64)(input)
6252
5406
  if keepdim and not is_dim_none:
6253
- out = expand_dims_(out, dim)
5407
+ out = expand_dims(out, dim)
6254
5408
  return out
6255
5409
 
6256
5410
 
5411
+
6257
5412
  def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylint: disable=redefined-outer-name
6258
5413
  """
6259
5414
  Calculates the minimum value along with the given axis for the input tensor. It returns the minimum values and
@@ -6312,16 +5467,16 @@ def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
6312
5467
  0.0 0
6313
5468
  """
6314
5469
  if not input.shape:
6315
- return (input, Tensor(0, dtype=mstype.int32))
5470
+ return (input, Tensor(0, dtype=mstype.int64))
6316
5471
  if axis is None:
6317
- return (reduce_min(input), Tensor(0, dtype=mstype.int32))
5472
+ return (min_(input), Tensor(0, dtype=mstype.int64))
6318
5473
  if initial is not None and not isinstance(initial, numbers.Number):
6319
5474
  raise TypeError(f"For 'min', 'initial' must be a scalar, but got {type(initial)}")
6320
5475
  if axis is not None and not isinstance(axis, int):
6321
5476
  raise TypeError(f"For 'min', 'axis' must be int, but got {type(axis)}")
6322
5477
  input = _init_and_select_elem(input, initial, where, ops.minimum)
6323
- argmin_with_value_ = ArgMinWithValue(axis=axis, keep_dims=keepdims)
6324
- indices, values = argmin_with_value_(input)
5478
+ argmin_with_value_op = _get_cache_prim(ArgMinWithValue)(axis, keepdims)
5479
+ indices, values = argmin_with_value_op(input)
6325
5480
  return values, indices
6326
5481
 
6327
5482
 
@@ -6379,8 +5534,8 @@ def aminmax(input, *, axis=0, keepdims=False):
6379
5534
  output0 = ops.reshape(output0, [1] * input.ndim)
6380
5535
  output1 = ops.reshape(output1, [1] * input.ndim)
6381
5536
  return output0, output1
6382
- argmin_with_value_op = P.ArgMinWithValue(axis, keepdims)
6383
- argmax_with_value_op = P.ArgMaxWithValue(axis, keepdims)
5537
+ argmin_with_value_op = _get_cache_prim(ArgMinWithValue)(axis, keepdims)
5538
+ argmax_with_value_op = _get_cache_prim(ArgMaxWithValue)(axis, keepdims)
6384
5539
  _, output0 = argmin_with_value_op(input)
6385
5540
  _, output1 = argmax_with_value_op(input)
6386
5541
  if keepdims is True and input.ndim == 0:
@@ -6435,66 +5590,48 @@ def narrow(input, axis, start, length):
6435
5590
  begins[axis] = start
6436
5591
  sizes = list(input.shape)
6437
5592
  sizes[axis] = length
6438
- return P.Slice()(input, begins, sizes)
6439
-
6440
-
6441
- def unsorted_segment_sum(input_x, segment_ids, num_segments):
6442
- r"""
6443
- Computes the sum of a tensor along segments.
5593
+ return tensor_slice(input, begins, sizes)
6444
5594
 
6445
- Calculates a tensor such that :math:`\text{output}[i] = \sum_{segment\_ids[j] == i} \text{data}[j, \ldots]`, where
6446
- :math:`j,...` is a tuple describing the index of element in data.
6447
- `segment_ids` selects which elements in data to sum
6448
- up. Segment_ids does not need to be sorted, and it does not need to cover all values in the entire valid value
6449
- range.
6450
5595
 
6451
- The following figure shows the calculation process of unsorted_segment_sum:
6452
-
6453
- .. image:: UnsortedSegmentSum.png
6454
-
6455
- Note:
6456
- - If the segment_id i is absent in the segment_ids, then output[i] will be filled with 0.
6457
- - On Ascend, if the value of segment_id is less than 0 or greater than the length of the input data shape, an
6458
- execution error will occur.
6459
-
6460
- If the sum of the given segment_ids :math:`i` is empty, then :math:`\text{output}[i] = 0`. If the given segment_ids
6461
- is negative, the value will be ignored. 'num_segments' must be equal to the number of different segment_ids.
5596
+ def narrow_ext(input, dim, start, length):
5597
+ """
5598
+ Returns a narrowed tensor from input tensor, and
5599
+ the dimension axis is input from start to start + length.
6462
5600
 
6463
5601
  Args:
6464
- input_x (Tensor): Input Tensor contains the data to be summed.
6465
- The shape is :math:`(x_1, x_2, ..., x_R)`.
6466
- segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
6467
- Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
6468
- num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
5602
+ input (Tensor): the tensor to narrow.
5603
+ dim (int): dimension along which to narrow.
5604
+ start (int): the starting dimension.
5605
+ length (int): the distance to the ending dimension.
6469
5606
 
6470
5607
  Returns:
6471
- Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
5608
+ Tensor.
6472
5609
 
6473
5610
  Raises:
6474
- TypeError: If `num_segments` is not an int or 0-D Tensor.
6475
- ValueError: If length of shape of `segment_ids` is less than 1.
5611
+ ValueError: If dim is out of range [-input.ndim, input.ndim).
5612
+ ValueError: If start is out of range [-input.shape[dim], input.shape[dim]].
5613
+ ValueError: It length is out of range [0, input.shape[dim]-start].
6476
5614
 
6477
5615
  Supported Platforms:
6478
- ``Ascend`` ``GPU`` ``CPU``
5616
+ ``Ascend``
6479
5617
 
6480
5618
  Examples:
6481
- >>> from mindspore import Tensor
6482
- >>> from mindspore import ops
6483
5619
  >>> import mindspore
6484
- >>> input_x = Tensor([1, 2, 3, 4], mindspore.float32)
6485
- >>> segment_ids = Tensor([0, 0, 1, 2], mindspore.int32)
6486
- >>> num_segments = 4
6487
- >>> output = ops.unsorted_segment_sum(input_x, segment_ids, num_segments)
5620
+ >>> from mindspore import ops
5621
+ >>> from mindspore import Tensor
5622
+ >>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mindspore.int32)
5623
+ >>> output = ops.narrow(x, 0, 0, 2)
6488
5624
  >>> print(output)
6489
- [3. 3. 4. 0.]
6490
- >>> input_x = Tensor([1, 2, 3, 4, 2, 5], mindspore.float32)
6491
- >>> segment_ids = Tensor([0, 0, 1, 2, 3, 4], mindspore.int32)
6492
- >>> num_segments = 6
6493
- >>> output = ops.unsorted_segment_sum(input_x, segment_ids, num_segments)
5625
+ [[ 1 2 3]
5626
+ [ 4 5 6]]
5627
+ >>> output = ops.narrow(x, 1, 1, 2)
6494
5628
  >>> print(output)
6495
- [3. 3. 4. 2. 5. 0.]
5629
+ [[ 2 3]
5630
+ [ 5 6]
5631
+ [ 8 9]]
6496
5632
  """
6497
- return unsorted_segment_sum_(input_x, segment_ids, num_segments)
5633
+ validator.check_value_type("input", input, Tensor, "narrow")
5634
+ return slice_ext_op(input, dim, start, start+length, 1)
6498
5635
 
6499
5636
 
6500
5637
  def topk(input, k, dim=None, largest=True, sorted=True):
@@ -6651,8 +5788,8 @@ def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
6651
5788
  A Tensor, with same type as `input` . And its shape is as described above.
6652
5789
 
6653
5790
  Raises:
6654
- TypeError: If `kernel_size`, `dilation`, `padding`, `stride` data type is not int, tuple or list.
6655
- ValueError: If `kernel_size`, `dilation`, `stride` value is not
5791
+ TypeError: If `output_size`, `kernel_size`, `stride`, `dilation`, `padding` data type is not int, tuple or list.
5792
+ ValueError: If `output_size`, `kernel_size`, `dilation`, `stride` value is not
6656
5793
  greater than zero or elements number more than `2`.
6657
5794
  ValueError: If `padding` value is less than zero or elements number more than `2`.
6658
5795
  ValueError: If `input.shape[1] != kernel_size[0] * kernel_size[1]`
@@ -6728,9 +5865,7 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
6728
5865
  .. warning::
6729
5866
  - The output is a 3-dimensional Tensor whose shape is
6730
5867
  :math:`(N, C \times \prod(\text{kernel_size}), L)` .
6731
-
6732
- .. warning::
6733
- This is an experimental API that is subject to change or deletion.
5868
+ - This is an experimental API that is subject to change or deletion.
6734
5869
 
6735
5870
  Args:
6736
5871
  input (Tensor): 4-D Tensor, supported dtypes: float16, float32, float64, complex64 and complex128.
@@ -6739,10 +5874,11 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
6739
5874
  dilation (Union[int, tuple[int], list[int]], optional): The dilation of the window, should be two int
6740
5875
  for height and width. If type is int, it means that height equal with width. Default: ``1`` .
6741
5876
  padding (Union[int, tuple[int], list[int]], optional): The pad of the window, that must be
6742
- a tuple/list of one or two `int` for height and width.
6743
- If one int, pad_height = pad_width.
6744
- If two int, pad_height = padding[0], pad_width = padding[1].
6745
- Default: ``0`` .
5877
+ a tuple/list of one or two `int` for height and width. Default: ``0`` .
5878
+
5879
+ - If one int, pad_height = pad_width.
5880
+ - If two int, pad_height = padding[0], pad_width = padding[1].
5881
+
6746
5882
  stride (Union[int, tuple[int], list[int]], optional): The stride of the window, should be two int
6747
5883
  for height and width. If type is int, it means that height equal with width. Default: ``1`` .
6748
5884
 
@@ -6789,98 +5925,6 @@ def _check_diagonal_axes(dim1, dim2, x_ndim):
6789
5925
  return axes
6790
5926
 
6791
5927
 
6792
- def diagonal(input, offset=0, dim1=0, dim2=1):
6793
- """
6794
- Returns specified diagonals of `input`.
6795
-
6796
- If `input` is 2-D, returns the diagonal of `input` with the given offset.
6797
- If `input` has more than two
6798
- dimensions, then the axes specified by `dim1` and `dim2` are used to determine
6799
- the 2-D sub-array whose diagonal is returned. In this case, remove the `dim1` and `dim2` dimensions of `input`
6800
- and insert the last dimension of `input` by the diagonal elements determined by `dim1` and `dim2`.
6801
-
6802
- Args:
6803
- input (Tensor): Array from which the diagonals are taken.
6804
- offset (int, optional): Offset of the diagonal from the main diagonal.
6805
- Can be positive or negative. Default: ``0`` .
6806
- dim1 (int, optional): Axis to be used as the first axis of the 2-D
6807
- sub-arrays from which the diagonals should be taken. Defaults to
6808
- first axis (0). Default: ``0`` .
6809
- dim2 (int, optional): Axis to be used as the second axis of the 2-D
6810
- sub-arrays from which the diagonals should be taken. Defaults to
6811
- second axis (1). Default: ``1`` .
6812
-
6813
- Returns:
6814
- Tensor, if `input` is 2-D, then `input` 1-D array containing the diagonal. If
6815
- ``input.ndim > 2``, then the dimensions specified by `dim1` and `dim2` are removed,
6816
- and a new axis inserted at the end corresponding to the diagonal.
6817
-
6818
- Raises:
6819
- TypeError: if `dim1` or `dim2` are not an int.
6820
- ValueError: if the input tensor has less than two dimensions.
6821
-
6822
- Supported Platforms:
6823
- ``Ascend`` ``GPU`` ``CPU``
6824
-
6825
- Examples:
6826
- >>> from mindspore import Tensor, ops
6827
- >>> from mindspore import dtype as mstype
6828
- >>> x = Tensor([[0, 1], [2, 3]], mstype.float32)
6829
- >>> output = ops.diagonal(x)
6830
- >>> print(output)
6831
- [0 3]
6832
- """
6833
- x_ndim = input.ndim
6834
- if x_ndim < 2:
6835
- raise ValueError(f"For 'ops.diagonal', the original tensor requires at least two dimensions, but got {x_ndim}")
6836
- _check_attr_dtype("dim1", dim1, [int], "diagonal")
6837
- _check_attr_dtype("dim2", dim2, [int], "diagonal")
6838
- dtype = input.dtype
6839
-
6840
- axes = _check_diagonal_axes(dim1, dim2, x_ndim)
6841
- perm = ()
6842
- for i in ms_arrange(x_ndim):
6843
- if i not in axes:
6844
- perm += (i,)
6845
- perm += axes
6846
- input = input.transpose(perm)
6847
-
6848
- x_shape = input.shape
6849
- n, m = x_shape[-2:]
6850
-
6851
- e = ops.eye(n, m, dtype)
6852
- if offset >= m or offset <= -n:
6853
- zero_shape = x_shape[:-2] + (0,)
6854
- return ops.zeros(zero_shape, dtype)
6855
- if offset != 0:
6856
- e = e.astype(mstype.float32)
6857
- if offset > 0:
6858
- e_left = ops.fill(mstype.float32, (n, offset), 0)
6859
- e_right = e[..., 0:m - offset:1]
6860
- e = ops.cat((e_left, e_right), 1).astype(dtype)
6861
- elif offset < 0:
6862
- e_upper = ops.fill(mstype.float32, (-offset, m), 0)
6863
- e_lower = e[0:n + offset:1, ...]
6864
- e = ops.cat((e_upper, e_lower), 0).astype(dtype)
6865
- e = ops.broadcast_to(e, x_shape)
6866
-
6867
- prod_val = ops.mul(input, e)
6868
- res = ops.ReduceSum()(prod_val.astype(mstype.float32), -1)
6869
-
6870
- begin = ()
6871
- for _ in ms_arrange(x_ndim - 2):
6872
- begin += (0,)
6873
- last_dim_begin = builtins.max(0, -offset)
6874
- begin += (last_dim_begin,)
6875
- res_size = res.shape[:-1]
6876
- last_dim_end = builtins.min(x_shape[-2], builtins.max(0, x_shape[-1] - offset)) - last_dim_begin
6877
- if last_dim_end <= 0:
6878
- return Tensor([])
6879
- res_size += (last_dim_end,)
6880
- res = ops.slice(res, begin, res_size)
6881
- return res.astype(dtype)
6882
-
6883
-
6884
5928
  def _check_is_tensor(param_name, input, cls_name):
6885
5929
  """Returns True if input is Tensor."""
6886
5930
  if not isinstance(input, Tensor):
@@ -6900,6 +5944,9 @@ def diagonal_scatter(input, src, offset=0, dim1=0, dim2=1):
6900
5944
  the elements in these two dimensions will be treated as elements of a matrix,
6901
5945
  and `src` is embedded on the diagonal of the matrix.
6902
5946
 
5947
+ Note:
5948
+ Currently, ``inf`` value of elements in `input` or `src` is not supported.
5949
+
6903
5950
  Args:
6904
5951
  input (Tensor): Input Tensor, whose dimension is larger than 1.
6905
5952
  src (Tensor): The source Tensor to embed.
@@ -6936,16 +5983,39 @@ def diagonal_scatter(input, src, offset=0, dim1=0, dim2=1):
6936
5983
  """
6937
5984
  _check_is_tensor("input", input, "diagonal_scatter")
6938
5985
  _check_is_tensor("src", src, "diagonal_scatter")
6939
- _check_is_int(offset, "offset", "diagonal_scatter")
6940
- _check_is_int(dim1, "dim1", "diagonal_scatter")
6941
- _check_is_int(dim2, "dim2", "diagonal_scatter")
6942
5986
  input_diag = input.diagonal(offset, dim1, dim2)
6943
5987
  _check_diagonal_scatter_shape(input_diag.shape, src.shape)
6944
- embed = ones_like(src)
6945
- embed = ops.diag_embed(embed, offset, dim1, dim2)
6946
- embed = input * embed
5988
+ input_shape = input.shape
5989
+ zeros_shape = list(input_shape)
5990
+ m, n = input_shape[dim1], input_shape[dim2]
5991
+ if m == n:
5992
+ src = src - input_diag
5993
+ src = ops.diag_embed(src, offset, dim1, dim2)
5994
+ return input + src
5995
+ if m > n:
5996
+ axis = dim2
5997
+ zeros_shape[axis] = m - n
5998
+ else:
5999
+ axis = dim1
6000
+ zeros_shape[axis] = n - m
6001
+ zeros_tensor = zeros(zeros_shape, dtype=input.dtype)
6002
+ input = concat((input, zeros_tensor), axis)
6003
+ input_diag = input.diagonal(offset, dim1, dim2)
6004
+ if src.shape != input_diag.shape:
6005
+ zeros_shape = []
6006
+ for i, ax in enumerate(src.shape):
6007
+ if ax == input_diag.shape[i]:
6008
+ zeros_shape.append(ax)
6009
+ else:
6010
+ axis = i
6011
+ zeros_shape.append(input_diag.shape[i] - ax)
6012
+ zeros_tensor = zeros(zeros_shape, dtype=src.dtype)
6013
+ src = concat((src, zeros_tensor), axis)
6014
+ src = src - input_diag
6947
6015
  src = ops.diag_embed(src, offset, dim1, dim2)
6948
- return input + src - embed
6016
+ input = input + src
6017
+ begin = (0,) * input.ndim
6018
+ return slice(input, begin, input_shape)
6949
6019
 
6950
6020
 
6951
6021
  def lstsq(input, A):
@@ -7004,8 +6074,7 @@ def lstsq(input, A):
7004
6074
  [-6.5000005 -4.500001 ]
7005
6075
  [-3.500002 -2.5000017]]
7006
6076
  """
7007
- lstsq_op = _get_cache_prim(Lstsq)()
7008
- return lstsq_op(input, A)
6077
+ return lstsq_(input, A)
7009
6078
 
7010
6079
 
7011
6080
  def mvlgamma(input, p):
@@ -7053,6 +6122,64 @@ def mvlgamma(input, p):
7053
6122
  return mvlgamma_op(input)
7054
6123
 
7055
6124
 
6125
+ def nonzero(input, as_tuple=False):
6126
+ r"""
6127
+ Return the positions of all non-zero values.
6128
+
6129
+ Args:
6130
+ input (Tensor): The input Tensor, its rank should be greater than or equal to 1.
6131
+ as_tuple (bool, optional): Whether the output is tuple.
6132
+ If ``False`` , return Tensor. Default: ``False`` .
6133
+ If ``True`` , return Tuple of Tensor, only support ``Ascend`` .
6134
+
6135
+
6136
+ Returns:
6137
+ - If `as_tuple` is ``False``, return the Tensor, a 2-D Tensor whose data type is int64,
6138
+ containing the positions of all non-zero values of the input.
6139
+ - If `as_tuple` is ``True``, return the Tuple of Tensor and data type is int64.
6140
+ The Tuple length is the dimension of the input tensor,
6141
+ and each element is the 1D tensor of the subscript of all non-zero elements of
6142
+ the input tensor in that dimension.
6143
+
6144
+ Raises:
6145
+ TypeError: If `input` is not Tensor.
6146
+ TypeError: If `as_tuple` is not bool.
6147
+ ValueError: If dim of `input` equals to 0.
6148
+
6149
+ Supported Platforms:
6150
+ ``Ascend`` ``GPU`` ``CPU``
6151
+
6152
+ Examples:
6153
+ >>> import mindspore
6154
+ >>> import numpy as np
6155
+ >>> from mindspore import Tensor, ops
6156
+ >>> x = Tensor(np.array([[[1, 0], [-5, 0]]]), mindspore.int32)
6157
+ >>> output = ops.nonzero(x)
6158
+ >>> print(output)
6159
+ [[0 0 0]
6160
+ [0 1 0]]
6161
+ >>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
6162
+ >>> output = ops.nonzero(x, False)
6163
+ >>> print(output)
6164
+ [[0]
6165
+ [2]
6166
+ [4]]
6167
+ >>> x = Tensor(np.array([[[1, 0], [-5, 0]]]), mindspore.int32)
6168
+ >>> output = ops.nonzero(x, True)
6169
+ >>> print(output)
6170
+ (Tensor(shape=[2], dtype=Int64, value=[0, 0]),
6171
+ Tensor(shape=[2], dtype=Int64, value=[0, 1]),
6172
+ Tensor(shape=[2], dtype=Int64, value=[0, 0]))
6173
+ >>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
6174
+ >>> output = ops.nonzero(x, True)
6175
+ >>> print(output)
6176
+ (Tensor(shape=[3], dtype=Int64, value=[0, 2, 4]), )
6177
+ """
6178
+ if as_tuple:
6179
+ return non_zero_ext_(input)
6180
+ return non_zero_(input)
6181
+
6182
+
7056
6183
  def argwhere(input):
7057
6184
  """
7058
6185
  Return a Tensor of the positions of all non-zero values.
@@ -7080,7 +6207,7 @@ def argwhere(input):
7080
6207
  [[0 0 0]
7081
6208
  [0 1 0]]
7082
6209
  """
7083
- return nonzero_(input)
6210
+ return nonzero(input)
7084
6211
 
7085
6212
 
7086
6213
  def column_stack(tensors):
@@ -7117,14 +6244,13 @@ def column_stack(tensors):
7117
6244
  raise TypeError(f"For column_stack, the input must be list or tuple of tensors, but got {type(tensors)}.")
7118
6245
 
7119
6246
  trans_x = ()
7120
- _expand_dims = _get_cache_prim(P.ExpandDims)()
7121
6247
  for tensor in tensors:
7122
6248
  if not isinstance(tensor, Tensor):
7123
6249
  raise TypeError(f"For column_stack, the input element must be tensor, but got {type(tensor)}.")
7124
6250
  if tensor.ndim < 1:
7125
- tensor = _expand_dims(tensor, 0)
6251
+ tensor = expand_dims(tensor, 0)
7126
6252
  if tensor.ndim == 1:
7127
- tensor = _expand_dims(tensor, 1)
6253
+ tensor = expand_dims(tensor, 1)
7128
6254
  trans_x += (tensor,)
7129
6255
  if not trans_x:
7130
6256
  raise ValueError(f"For column_stack, the input must have at least 1 tensor, but got 0.")
@@ -7170,7 +6296,7 @@ def hstack(tensors):
7170
6296
  if not isinstance(tensor, Tensor):
7171
6297
  raise TypeError(f"For hstack, the input element must be tensor, but got {type(tensor)}.")
7172
6298
  if tensor.ndim < 1:
7173
- tensor = expand_dims_(tensor, 0)
6299
+ tensor = expand_dims(tensor, 0)
7174
6300
  tuple_of_tensor += (tensor,)
7175
6301
  if not tuple_of_tensor:
7176
6302
  raise ValueError("For hstack, the input must have at least 1 tensor, but got 0.")
@@ -7270,7 +6396,7 @@ def movedim(x, source, destination):
7270
6396
  f"For `source` and `destination` arguments, the number of elements must be the same, but got 'source':"
7271
6397
  f" {len(source)} and 'destination': {len(destination)}.")
7272
6398
  perm = _get_moved_perm(ndim, source, destination)
7273
- return _get_cache_prim(P.Transpose)()(x, perm)
6399
+ return transpose_(x, perm)
7274
6400
 
7275
6401
 
7276
6402
  def moveaxis(x, source, destination):
@@ -7321,7 +6447,7 @@ def swapaxes(input, axis0, axis1):
7321
6447
 
7322
6448
  Examples:
7323
6449
  >>> import numpy as np
7324
- >>> import mindspore.ops as ops
6450
+ >>> from mindspore import ops
7325
6451
  >>> from mindspore import Tensor
7326
6452
  >>> input = Tensor(np.ones((2,3,4), dtype=np.float32))
7327
6453
  >>> output = ops.swapaxes(input, 0, 2)
@@ -7345,7 +6471,7 @@ def swapaxes(input, axis0, axis1):
7345
6471
  new_perm = perm[0:axis0] + perm[axis1:axis1 + 1] + \
7346
6472
  perm[axis0 + 1:axis1] + perm[axis0:axis0 + 1]
7347
6473
 
7348
- return _get_cache_prim(P.Transpose)()(input, new_perm)
6474
+ return transpose_(input, new_perm)
7349
6475
 
7350
6476
 
7351
6477
  def swapdims(input, dim0, dim1):
@@ -7371,7 +6497,7 @@ def swapdims(input, dim0, dim1):
7371
6497
 
7372
6498
  Examples:
7373
6499
  >>> import numpy as np
7374
- >>> import mindspore.ops as ops
6500
+ >>> from mindspore import ops
7375
6501
  >>> from mindspore import Tensor
7376
6502
  >>> input = Tensor(np.ones((2,3,4), dtype=np.float32))
7377
6503
  >>> output = ops.swapdims(input, 0, 2)
@@ -7453,9 +6579,47 @@ def repeat_interleave(input, repeats, axis=None):
7453
6579
  return output
7454
6580
 
7455
6581
 
6582
+ def repeat_interleave_ext(input, repeats, dim=None, output_size=None):
6583
+ r"""
6584
+ Repeat elements of a tensor along an axis, like `numpy.repeat`.
6585
+
6586
+ Args:
6587
+ input (Tensor): The tensor to repeat values for. Must be of type: float16,
6588
+ float32, int8, uint8, int16, int32, or int64.
6589
+ repeats (Union[int, tuple, list, Tensor]): The number of times to repeat, must be positive.
6590
+ dim (int, optional): The dim along which to repeat, Default: ``None``. if dims is None,
6591
+ the input Tensor will be flattened and the output will alse be flattened.
6592
+ output_size (int, optional): Total output size for the given axis (e.g. sum of repeats),
6593
+ Default: ``None``.
6594
+
6595
+ Returns:
6596
+ One tensor with values repeated along the specified dim. If input has shape
6597
+ :math:`(s1, s2, ..., sn)` and dim is i, the output will have shape :math:`(s1, s2, ...,
6598
+ si * repeats, ..., sn)`. The output type will be the same as the type of `input`.
6599
+
6600
+ Supported Platforms:
6601
+ ``Ascend``
6602
+
6603
+ Examples:
6604
+ >>> import mindspore
6605
+ >>> import numpy as np
6606
+ >>> from mindspore import Tensor, ops
6607
+ >>> input = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
6608
+ >>> output = ops.function.array_func.repeat_interleave_ext(input, repeats=2, dim=0)
6609
+ >>> print(output)
6610
+ [[0 1 2]
6611
+ [0 1 2]
6612
+ [3 4 5]
6613
+ [3 4 5]]
6614
+ """
6615
+ if isinstance(repeats, int):
6616
+ return repeat_interleave_int_(input, repeats, dim, output_size)
6617
+ return repeat_interleave_tensor_(input, repeats, dim, output_size)
6618
+
6619
+
7456
6620
  def repeat_elements(x, rep, axis=0):
7457
6621
  """
7458
- Repeat elements of a tensor along an axis, like `np.repeat` .
6622
+ Repeat elements of a tensor along an axis, like `numpy.repeat` .
7459
6623
 
7460
6624
  Args:
7461
6625
  x (Tensor): The tensor to repeat values for. Must be of type: float16,
@@ -7493,34 +6657,19 @@ def repeat_elements(x, rep, axis=0):
7493
6657
  const_utils.check_type_valid(ops.dtype(x), mstype.number_type, 'input x')
7494
6658
  rep = _check_positive_int(rep, "rep", "repeat_elements")
7495
6659
  axis = _check_is_int(axis, "axis", "repeat_elements")
7496
- shape_op = P.Shape()
7497
- rank_op = P.Rank()
7498
- tile_op = P.Tile()
7499
- expand_dims_op = P.ExpandDims()
7500
- reshape_op = P.Reshape()
7501
- x_rank = rank_op(x)
6660
+ x_rank = rank_(x)
7502
6661
  axis = _check_axis_range(axis, x_rank, "axis", "repeat_elements")
6662
+ axis = axis + x.ndim if axis < 0 else axis
7503
6663
  expand_axis = axis + 1
7504
- x_expand = expand_dims_op(x, expand_axis)
6664
+ x_expand = expand_dims(x, expand_axis)
7505
6665
  rep_dims = _cal_repeat_dims(x_rank, rep, expand_axis)
7506
- x_expand = tile_op(x_expand, rep_dims)
7507
- x_shape = shape_op(x)
6666
+ x_expand = tile_(x_expand, rep_dims)
6667
+ x_shape = shape_(x)
7508
6668
  x_reshape = _cal_reshape(x_shape, rep, axis)
7509
- x_rep = reshape_op(x_expand, x_reshape)
6669
+ x_rep = reshape_(x_expand, x_reshape)
7510
6670
  return x_rep
7511
6671
 
7512
6672
 
7513
- @_primexpr
7514
- def _check_sequence_mask_input_len(input_shape, prim_name=None):
7515
- msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
7516
- if not input_shape:
7517
- raise ValueError(f"{msg_prefix} input_shape must be greater than 0, but got {input_shape}.")
7518
- # broadcast only supports 7d shape
7519
- shape_size = len(input_shape)
7520
- if shape_size >= 7:
7521
- raise ValueError(f"{msg_prefix} dimension of input_shape must be less than 7, but got {shape_size}d.")
7522
-
7523
-
7524
6673
  def sequence_mask(lengths, maxlen=None):
7525
6674
  """
7526
6675
  Returns a mask tensor representing the first N positions of each cell.
@@ -7573,29 +6722,19 @@ def sequence_mask(lengths, maxlen=None):
7573
6722
  [[ True True False False ]
7574
6723
  [ True True True True ]]]
7575
6724
  """
7576
-
7577
- argmax_op = P.ArgMaxWithValue()
7578
- reshape_op = P.Reshape()
7579
- range_op = P.Range()
7580
- expand_op = P.ExpandDims()
7581
- cast_op = P.Cast()
7582
- to_tensor_op = P.ScalarToTensor()
7583
- shape_op = P.Shape()
7584
-
7585
6725
  const_utils.check_type_valid(ops.dtype(lengths), [mstype.int64, mstype.int32], 'lengths')
7586
- _check_sequence_mask_input_len(shape_op(lengths), "sequence_mask")
7587
6726
 
7588
6727
  if maxlen is None:
7589
- flatten_data = reshape_op(lengths, (-1,))
7590
- flatten_data = cast_op(flatten_data, mstype.float32)
7591
- _, value = argmax_op(flatten_data)
7592
- maxlen = cast_op(value, mstype.int32)
6728
+ flatten_data = reshape_(lengths, (-1,))
6729
+ flatten_data = cast_(flatten_data, mstype.float32)
6730
+ _, value = arg_max_with_value_(flatten_data)
6731
+ maxlen = cast_(value, mstype.int32)
7593
6732
  else:
7594
6733
  maxlen = _check_positive_int(maxlen, "maxlen", "sequence_mask")
7595
- maxlen = to_tensor_op(maxlen, mstype.int32)
6734
+ maxlen = scalar_to_tensor_(maxlen, mstype.int32)
7596
6735
 
7597
- range_vector = range_op(to_tensor_op(0, mstype.int32), maxlen, to_tensor_op(1, mstype.int32))
7598
- mask = expand_op(lengths, -1)
6736
+ range_vector = range_(scalar_to_tensor_(0, mstype.int32), maxlen, scalar_to_tensor_(1, mstype.int32))
6737
+ mask = expand_dims(lengths, -1)
7599
6738
  result = range_vector < mask
7600
6739
  return result
7601
6740
 
@@ -7608,35 +6747,6 @@ def top_k(input_x, k, sorted=True):
7608
6747
  return top_k_(input_x, k)
7609
6748
 
7610
6749
 
7611
- def deepcopy(input_x):
7612
- """
7613
- Returns a deepcopy of input tensor.
7614
-
7615
- Args:
7616
- input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
7617
-
7618
- Returns:
7619
- Tensor, a deepcopy of `input_x`.
7620
-
7621
- Raises:
7622
- TypeError: If `input_x` is not a Tensor.
7623
-
7624
- Supported Platforms:
7625
- ``Ascend`` ``GPU`` ``CPU``
7626
-
7627
- Examples:
7628
- >>> import mindspore
7629
- >>> from mindspore import Tensor, ops
7630
- >>> input = Tensor([[0, 1], [2, 1]], dtype=mindspore.int32)
7631
- >>> output = ops.deepcopy(input)
7632
- >>> print(output)
7633
- [[0 1]
7634
- [2 1]]
7635
- """
7636
- _deepcopy = _get_cache_prim(P.Identity)()
7637
- return _deepcopy(input_x)
7638
-
7639
-
7640
6750
  __all__ = [
7641
6751
  'unique',
7642
6752
  'unique_with_pad',
@@ -7663,8 +6773,8 @@ __all__ = [
7663
6773
  'full_like',
7664
6774
  'dyn_shape',
7665
6775
  'rank',
7666
- 'range',
7667
6776
  'arange',
6777
+ 'range',
7668
6778
  'reshape',
7669
6779
  'reshape_',
7670
6780
  'flatten',
@@ -7773,6 +6883,7 @@ __all__ = [
7773
6883
  'aminmax',
7774
6884
  'sort',
7775
6885
  'top_k',
7776
- 'deepcopy'
6886
+ 'deepcopy',
6887
+ 'flip',
7777
6888
  ]
7778
6889
  __all__.sort()