mindspore 2.2.14__cp38-cp38-manylinux1_x86_64.whl → 2.3.0rc1__cp38-cp38-manylinux1_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (1153) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +4 -4
  3. mindspore/_akg/akg/composite/build_module.py +155 -11
  4. mindspore/_akg/akg/config/repository.json +38 -0
  5. mindspore/_akg/akg/ms/info_version_adapt.py +29 -0
  6. mindspore/_akg/akg/tvm/contrib/nvcc.py +4 -1
  7. mindspore/_akg/akg/utils/ascend_profilier/path_manager.py +2 -1
  8. mindspore/_akg/akg/utils/composite_op_helper.py +4 -2
  9. mindspore/_akg/akg/utils/dump_ascend_meta.py +2 -2
  10. mindspore/_akg/akg/utils/gen_random.py +14 -8
  11. mindspore/_akg/akg/utils/op_dsl.py +11 -0
  12. mindspore/_akg/akg/utils/tbe_codegen_utils.py +5 -5
  13. mindspore/_c_dataengine.cpython-38-x86_64-linux-gnu.so +0 -0
  14. mindspore/_c_expression.cpython-38-x86_64-linux-gnu.so +0 -0
  15. mindspore/_c_mindrecord.cpython-38-x86_64-linux-gnu.so +0 -0
  16. mindspore/_checkparam.py +58 -0
  17. mindspore/_extends/builtin_operations.py +2 -1
  18. mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
  19. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
  20. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
  21. mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
  22. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
  23. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
  24. mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
  25. mindspore/_extends/parse/__init__.py +18 -14
  26. mindspore/_extends/parse/compile_config.py +229 -0
  27. mindspore/_extends/parse/parser.py +155 -59
  28. mindspore/_extends/parse/resources.py +40 -7
  29. mindspore/_extends/parse/standard_method.py +124 -204
  30. mindspore/_extends/remote/kernel_build_server.py +2 -0
  31. mindspore/_mindspore_offline_debug.cpython-38-x86_64-linux-gnu.so +0 -0
  32. mindspore/_profiler.py +30 -0
  33. mindspore/amp.py +24 -18
  34. mindspore/bin/cache_admin +0 -0
  35. mindspore/bin/cache_server +0 -0
  36. mindspore/boost/boost_cell_wrapper.py +1 -1
  37. mindspore/boost/group_loss_scale_manager.py +1 -1
  38. mindspore/common/__init__.py +3 -1
  39. mindspore/common/_jit_fallback_utils.py +2 -3
  40. mindspore/common/_register_for_adapter.py +7 -0
  41. mindspore/common/_stub_tensor.py +6 -1
  42. mindspore/common/_utils.py +5 -17
  43. mindspore/common/api.py +91 -48
  44. mindspore/common/auto_dynamic_shape.py +27 -14
  45. mindspore/common/dtype.py +5 -4
  46. mindspore/common/dump.py +5 -4
  47. mindspore/common/initializer.py +1 -1
  48. mindspore/common/jit_config.py +20 -11
  49. mindspore/common/lazy_inline.py +58 -17
  50. mindspore/common/mindir_util.py +12 -2
  51. mindspore/common/mutable.py +79 -14
  52. mindspore/common/parameter.py +19 -4
  53. mindspore/common/seed.py +9 -9
  54. mindspore/common/sparse_tensor.py +251 -18
  55. mindspore/common/symbol.py +122 -0
  56. mindspore/common/tensor.py +321 -433
  57. mindspore/communication/__init__.py +3 -3
  58. mindspore/communication/_comm_helper.py +5 -0
  59. mindspore/communication/management.py +53 -38
  60. mindspore/config/op_info.config +22 -54
  61. mindspore/context.py +167 -59
  62. mindspore/dataset/__init__.py +5 -5
  63. mindspore/dataset/audio/__init__.py +6 -6
  64. mindspore/dataset/audio/transforms.py +711 -158
  65. mindspore/dataset/callback/ds_callback.py +2 -2
  66. mindspore/dataset/engine/cache_client.py +2 -2
  67. mindspore/dataset/engine/datasets.py +72 -38
  68. mindspore/dataset/engine/datasets_audio.py +14 -14
  69. mindspore/dataset/engine/datasets_standard_format.py +33 -3
  70. mindspore/dataset/engine/datasets_text.py +38 -38
  71. mindspore/dataset/engine/datasets_user_defined.py +7 -7
  72. mindspore/dataset/engine/datasets_vision.py +75 -71
  73. mindspore/dataset/engine/offload.py +5 -7
  74. mindspore/dataset/text/__init__.py +3 -3
  75. mindspore/dataset/text/transforms.py +408 -121
  76. mindspore/dataset/text/utils.py +9 -9
  77. mindspore/dataset/transforms/__init__.py +1 -1
  78. mindspore/dataset/transforms/transforms.py +261 -76
  79. mindspore/dataset/utils/browse_dataset.py +9 -9
  80. mindspore/dataset/vision/__init__.py +3 -3
  81. mindspore/dataset/vision/c_transforms.py +5 -5
  82. mindspore/dataset/vision/transforms.py +2264 -514
  83. mindspore/dataset/vision/utils.py +40 -9
  84. mindspore/dataset/vision/validators.py +7 -1
  85. mindspore/experimental/optim/__init__.py +12 -2
  86. mindspore/experimental/optim/adadelta.py +161 -0
  87. mindspore/experimental/optim/adagrad.py +168 -0
  88. mindspore/experimental/optim/adam.py +35 -34
  89. mindspore/experimental/optim/adamax.py +170 -0
  90. mindspore/experimental/optim/adamw.py +40 -16
  91. mindspore/experimental/optim/asgd.py +153 -0
  92. mindspore/experimental/optim/lr_scheduler.py +60 -119
  93. mindspore/experimental/optim/nadam.py +157 -0
  94. mindspore/experimental/optim/optimizer.py +15 -8
  95. mindspore/experimental/optim/radam.py +194 -0
  96. mindspore/experimental/optim/rmsprop.py +154 -0
  97. mindspore/experimental/optim/rprop.py +164 -0
  98. mindspore/experimental/optim/sgd.py +28 -19
  99. mindspore/hal/__init__.py +34 -0
  100. mindspore/hal/_ascend.py +57 -0
  101. mindspore/hal/_base.py +57 -0
  102. mindspore/hal/_cpu.py +56 -0
  103. mindspore/hal/_gpu.py +57 -0
  104. mindspore/hal/device.py +356 -0
  105. mindspore/hal/event.py +179 -0
  106. mindspore/hal/stream.py +337 -0
  107. mindspore/include/api/data_type.h +2 -2
  108. mindspore/include/api/dual_abi_helper.h +16 -3
  109. mindspore/include/api/model.h +1 -3
  110. mindspore/include/api/status.h +14 -0
  111. mindspore/include/c_api/model_c.h +173 -0
  112. mindspore/include/c_api/ms/base/types.h +1 -0
  113. mindspore/include/c_api/types_c.h +19 -0
  114. mindspore/include/dataset/execute.h +1 -3
  115. mindspore/include/mindapi/base/format.h +125 -23
  116. mindspore/include/mindapi/base/types.h +7 -0
  117. mindspore/lib/libdnnl.so.2 +0 -0
  118. mindspore/lib/libmindspore.so +0 -0
  119. mindspore/lib/libmindspore_backend.so +0 -0
  120. mindspore/lib/libmindspore_common.so +0 -0
  121. mindspore/lib/libmindspore_core.so +0 -0
  122. mindspore/lib/libmindspore_glog.so.0 +0 -0
  123. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  124. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  125. mindspore/lib/libmindspore_shared_lib.so +0 -0
  126. mindspore/lib/libmpi_adapter.so +0 -0
  127. mindspore/lib/libmpi_collective.so +0 -0
  128. mindspore/lib/libnnacl.so +0 -0
  129. mindspore/lib/libopencv_core.so.4.5 +0 -0
  130. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  131. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  132. mindspore/lib/libps_cache.so +0 -0
  133. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +2044 -154
  134. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +2044 -33
  135. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/build_tbe_kernel.py +529 -0
  136. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/compiler.py +56 -0
  137. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/custom.py +1109 -0
  138. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/get_file_path.py +36 -0
  139. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  140. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/tbe_topi.py +556 -0
  141. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  142. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  143. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6325 -1767
  144. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  145. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_add_custom.h +49 -0
  146. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +59 -0
  147. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +59 -0
  148. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
  149. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +52 -0
  150. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +232 -0
  151. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +232 -0
  152. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
  153. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
  154. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.cpp +192 -0
  155. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +134 -0
  156. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.cpp +274 -0
  157. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +134 -0
  158. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64/libcust_opmaster_rt2.0.so +0 -0
  159. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
  160. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +39 -0
  161. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/x86_64/libcust_opsproto_rt2.0.so +0 -0
  162. mindspore/lib/plugin/ascend/libakg.so +0 -0
  163. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  164. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  165. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  166. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  167. mindspore/lib/plugin/cpu/libakg.so +0 -0
  168. mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
  169. mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
  170. mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
  171. mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
  172. mindspore/lib/plugin/gpu10.1/libnvidia_collective.so +0 -0
  173. mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
  174. mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
  175. mindspore/lib/plugin/gpu11.1/libnvidia_collective.so +0 -0
  176. mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
  177. mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
  178. mindspore/lib/plugin/gpu11.6/libnvidia_collective.so +0 -0
  179. mindspore/lib/plugin/{libmindspore_ascend.so.1 → libmindspore_ascend.so.2} +0 -0
  180. mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
  181. mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
  182. mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
  183. mindspore/mindrecord/__init__.py +5 -1
  184. mindspore/mindrecord/config.py +809 -0
  185. mindspore/mindrecord/filereader.py +25 -0
  186. mindspore/mindrecord/filewriter.py +74 -56
  187. mindspore/mindrecord/mindpage.py +40 -6
  188. mindspore/mindrecord/shardutils.py +3 -2
  189. mindspore/mindrecord/shardwriter.py +7 -0
  190. mindspore/mindrecord/tools/cifar100_to_mr.py +8 -13
  191. mindspore/mindrecord/tools/cifar10_to_mr.py +9 -15
  192. mindspore/mindrecord/tools/csv_to_mr.py +4 -9
  193. mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
  194. mindspore/mindrecord/tools/mnist_to_mr.py +7 -12
  195. mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -6
  196. mindspore/multiprocessing/__init__.py +68 -0
  197. mindspore/nn/cell.py +86 -133
  198. mindspore/nn/dynamic_lr.py +2 -2
  199. mindspore/nn/layer/activation.py +79 -90
  200. mindspore/nn/layer/basic.py +4 -80
  201. mindspore/nn/layer/channel_shuffle.py +3 -16
  202. mindspore/nn/layer/container.py +3 -3
  203. mindspore/nn/layer/conv.py +71 -71
  204. mindspore/nn/layer/embedding.py +105 -44
  205. mindspore/nn/layer/image.py +4 -7
  206. mindspore/nn/layer/normalization.py +46 -38
  207. mindspore/nn/layer/padding.py +26 -39
  208. mindspore/nn/layer/pooling.py +13 -9
  209. mindspore/nn/layer/rnn_cells.py +5 -15
  210. mindspore/nn/layer/rnns.py +6 -5
  211. mindspore/nn/layer/thor_layer.py +1 -2
  212. mindspore/nn/layer/timedistributed.py +1 -1
  213. mindspore/nn/layer/transformer.py +52 -50
  214. mindspore/nn/learning_rate_schedule.py +6 -5
  215. mindspore/nn/loss/loss.py +43 -64
  216. mindspore/nn/optim/ada_grad.py +4 -2
  217. mindspore/nn/optim/adadelta.py +3 -1
  218. mindspore/nn/optim/adafactor.py +1 -1
  219. mindspore/nn/optim/adam.py +102 -181
  220. mindspore/nn/optim/adamax.py +4 -2
  221. mindspore/nn/optim/adasum.py +2 -2
  222. mindspore/nn/optim/asgd.py +4 -2
  223. mindspore/nn/optim/ftrl.py +31 -61
  224. mindspore/nn/optim/lamb.py +5 -3
  225. mindspore/nn/optim/lars.py +2 -2
  226. mindspore/nn/optim/lazyadam.py +6 -4
  227. mindspore/nn/optim/momentum.py +13 -25
  228. mindspore/nn/optim/optimizer.py +6 -3
  229. mindspore/nn/optim/proximal_ada_grad.py +4 -2
  230. mindspore/nn/optim/rmsprop.py +9 -3
  231. mindspore/nn/optim/rprop.py +4 -2
  232. mindspore/nn/optim/sgd.py +6 -5
  233. mindspore/nn/optim/thor.py +2 -2
  234. mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
  235. mindspore/nn/probability/distribution/beta.py +2 -2
  236. mindspore/nn/probability/distribution/categorical.py +4 -6
  237. mindspore/nn/probability/distribution/cauchy.py +2 -2
  238. mindspore/nn/probability/distribution/exponential.py +1 -1
  239. mindspore/nn/probability/distribution/gumbel.py +2 -2
  240. mindspore/nn/probability/distribution/poisson.py +2 -2
  241. mindspore/nn/probability/distribution/uniform.py +2 -2
  242. mindspore/nn/reinforcement/_tensors_queue.py +13 -1
  243. mindspore/nn/wrap/__init__.py +2 -1
  244. mindspore/nn/wrap/cell_wrapper.py +33 -12
  245. mindspore/nn/wrap/grad_reducer.py +148 -8
  246. mindspore/nn/wrap/loss_scale.py +7 -7
  247. mindspore/numpy/__init__.py +2 -0
  248. mindspore/numpy/array_creations.py +2 -0
  249. mindspore/numpy/array_ops.py +1 -5
  250. mindspore/numpy/fft.py +431 -0
  251. mindspore/numpy/math_ops.py +54 -60
  252. mindspore/numpy/utils.py +3 -0
  253. mindspore/ops/__init__.py +5 -4
  254. mindspore/ops/_grad_experimental/grad_array_ops.py +4 -129
  255. mindspore/ops/_grad_experimental/grad_comm_ops.py +16 -22
  256. mindspore/ops/_grad_experimental/grad_math_ops.py +68 -283
  257. mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
  258. mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
  259. mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
  260. mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
  261. mindspore/ops/_op_impl/__init__.py +0 -1
  262. mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
  263. mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +1 -1
  264. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
  265. mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
  266. mindspore/ops/_op_impl/cpu/__init__.py +1 -3
  267. mindspore/ops/_op_impl/cpu/adam.py +2 -2
  268. mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
  269. mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
  270. mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
  271. mindspore/ops/_vmap/vmap_array_ops.py +137 -101
  272. mindspore/ops/_vmap/vmap_base.py +8 -1
  273. mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
  274. mindspore/ops/_vmap/vmap_grad_nn_ops.py +102 -56
  275. mindspore/ops/_vmap/vmap_image_ops.py +70 -13
  276. mindspore/ops/_vmap/vmap_math_ops.py +74 -49
  277. mindspore/ops/_vmap/vmap_nn_ops.py +164 -89
  278. mindspore/ops/_vmap/vmap_other_ops.py +1 -1
  279. mindspore/ops/auto_generate/__init__.py +31 -0
  280. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +133 -0
  281. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +248 -0
  282. mindspore/ops/auto_generate/gen_arg_handler.py +147 -0
  283. mindspore/ops/auto_generate/gen_extend_func.py +130 -0
  284. mindspore/ops/auto_generate/gen_ops_def.py +4786 -0
  285. mindspore/ops/auto_generate/gen_ops_prim.py +8335 -0
  286. mindspore/ops/auto_generate/pyboost_inner_prim.py +77 -0
  287. mindspore/ops/composite/__init__.py +5 -2
  288. mindspore/ops/composite/base.py +118 -17
  289. mindspore/ops/composite/math_ops.py +9 -48
  290. mindspore/ops/composite/multitype_ops/_compile_utils.py +166 -601
  291. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +15 -133
  292. mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
  293. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
  294. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
  295. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
  296. mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
  297. mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
  298. mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
  299. mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
  300. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
  301. mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
  302. mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
  303. mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
  304. mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
  305. mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
  306. mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
  307. mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
  308. mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
  309. mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
  310. mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
  311. mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
  312. mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
  313. mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
  314. mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
  315. mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
  316. mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
  317. mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
  318. mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
  319. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
  320. mindspore/ops/deprecated.py +14 -3
  321. mindspore/ops/extend/__init__.py +46 -0
  322. mindspore/ops/extend/array_func.py +152 -0
  323. mindspore/ops/extend/math_func.py +76 -0
  324. mindspore/ops/{_op_impl/tbe/atomic_addr_clean.py → extend/nn_func.py} +5 -15
  325. mindspore/ops/function/__init__.py +19 -11
  326. mindspore/ops/function/array_func.py +251 -1440
  327. mindspore/ops/function/clip_func.py +12 -13
  328. mindspore/ops/function/debug_func.py +1 -4
  329. mindspore/ops/function/fft_func.py +31 -0
  330. mindspore/ops/function/grad/grad_func.py +24 -17
  331. mindspore/ops/function/image_func.py +27 -21
  332. mindspore/ops/function/linalg_func.py +35 -68
  333. mindspore/ops/function/math_func.py +451 -2360
  334. mindspore/ops/function/nn_func.py +459 -780
  335. mindspore/ops/function/other_func.py +4 -5
  336. mindspore/ops/function/parameter_func.py +5 -93
  337. mindspore/ops/function/random_func.py +24 -80
  338. mindspore/ops/function/sparse_unary_func.py +9 -16
  339. mindspore/ops/function/spectral_func.py +1 -1
  340. mindspore/ops/function/vmap_func.py +14 -14
  341. mindspore/ops/functional.py +56 -62
  342. mindspore/ops/op_info_register.py +22 -19
  343. mindspore/ops/operations/__init__.py +19 -19
  344. mindspore/ops/operations/_grad_ops.py +20 -723
  345. mindspore/ops/operations/_inner_ops.py +178 -286
  346. mindspore/ops/operations/_scalar_ops.py +5 -480
  347. mindspore/ops/operations/_sequence_ops.py +4 -34
  348. mindspore/ops/operations/array_ops.py +99 -2491
  349. mindspore/ops/operations/comm_ops.py +38 -46
  350. mindspore/ops/operations/custom_ops.py +8 -8
  351. mindspore/ops/operations/debug_ops.py +100 -31
  352. mindspore/ops/operations/image_ops.py +1 -217
  353. mindspore/ops/operations/inner_ops.py +3 -38
  354. mindspore/ops/operations/linalg_ops.py +1 -49
  355. mindspore/{rewrite/ast_transformers → ops/operations/manually_defined}/__init__.py +11 -4
  356. mindspore/ops/operations/manually_defined/_inner.py +61 -0
  357. mindspore/ops/operations/manually_defined/ops_def.py +1391 -0
  358. mindspore/ops/operations/math_ops.py +703 -4601
  359. mindspore/ops/operations/nn_ops.py +374 -1748
  360. mindspore/ops/operations/other_ops.py +50 -42
  361. mindspore/ops/operations/random_ops.py +3 -52
  362. mindspore/ops/primitive.py +196 -96
  363. mindspore/ops_generate/__init__.py +27 -0
  364. mindspore/ops_generate/arg_dtype_cast.py +248 -0
  365. mindspore/ops_generate/arg_handler.py +147 -0
  366. mindspore/ops_generate/gen_aclnn_implement.py +266 -0
  367. mindspore/ops_generate/gen_ops.py +1062 -0
  368. mindspore/ops_generate/gen_ops_inner_prim.py +129 -0
  369. mindspore/ops_generate/gen_pyboost_func.py +932 -0
  370. mindspore/ops_generate/gen_utils.py +188 -0
  371. mindspore/ops_generate/op_proto.py +138 -0
  372. mindspore/ops_generate/pyboost_utils.py +364 -0
  373. mindspore/ops_generate/template.py +238 -0
  374. mindspore/parallel/__init__.py +5 -4
  375. mindspore/parallel/_auto_parallel_context.py +21 -76
  376. mindspore/parallel/_cell_wrapper.py +16 -9
  377. mindspore/parallel/_cost_model_context.py +1 -1
  378. mindspore/parallel/_dp_allreduce_fusion.py +159 -159
  379. mindspore/parallel/_parallel_serialization.py +30 -46
  380. mindspore/parallel/_ps_context.py +1 -1
  381. mindspore/parallel/_recovery_context.py +1 -1
  382. mindspore/parallel/_tensor.py +19 -7
  383. mindspore/parallel/_transformer/__init__.py +1 -1
  384. mindspore/parallel/_transformer/layers.py +1 -1
  385. mindspore/parallel/_transformer/loss.py +1 -1
  386. mindspore/parallel/_transformer/moe.py +1 -1
  387. mindspore/parallel/_transformer/op_parallel_config.py +1 -1
  388. mindspore/parallel/_transformer/transformer.py +1 -1
  389. mindspore/parallel/_utils.py +131 -6
  390. mindspore/parallel/algo_parameter_config.py +6 -6
  391. mindspore/parallel/checkpoint_transform.py +180 -196
  392. mindspore/parallel/cluster/__init__.py +15 -0
  393. mindspore/parallel/cluster/process_entity/__init__.py +18 -0
  394. mindspore/parallel/cluster/process_entity/_api.py +345 -0
  395. mindspore/parallel/cluster/process_entity/_utils.py +116 -0
  396. mindspore/parallel/cluster/run.py +139 -0
  397. mindspore/parallel/mpi/__init__.py +1 -1
  398. mindspore/parallel/mpi/_mpi_config.py +1 -1
  399. mindspore/parallel/parameter_broadcast.py +152 -0
  400. mindspore/parallel/shard.py +99 -2
  401. mindspore/profiler/common/util.py +20 -0
  402. mindspore/profiler/envprofiling.py +1 -1
  403. mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
  404. mindspore/profiler/parser/ascend_analysis/constant.py +66 -0
  405. mindspore/profiler/parser/ascend_analysis/file_manager.py +77 -0
  406. mindspore/profiler/parser/ascend_analysis/function_event.py +146 -0
  407. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +108 -0
  408. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +80 -0
  409. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +52 -0
  410. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +104 -0
  411. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
  412. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +59 -0
  413. mindspore/profiler/parser/ascend_cluster_generator.py +14 -9
  414. mindspore/profiler/parser/ascend_communicate_generator.py +0 -1
  415. mindspore/profiler/parser/ascend_flops_generator.py +20 -4
  416. mindspore/profiler/parser/ascend_hccl_generator.py +25 -277
  417. mindspore/profiler/parser/ascend_msprof_exporter.py +112 -132
  418. mindspore/profiler/parser/ascend_msprof_generator.py +68 -285
  419. mindspore/profiler/parser/ascend_op_generator.py +75 -42
  420. mindspore/profiler/parser/ascend_timeline_generator.py +293 -135
  421. mindspore/profiler/parser/base_timeline_generator.py +6 -0
  422. mindspore/profiler/parser/framework_parser.py +3 -2
  423. mindspore/profiler/parser/integrator.py +3 -1
  424. mindspore/profiler/parser/msadvisor_analyzer.py +1 -1
  425. mindspore/profiler/parser/msadvisor_parser.py +1 -1
  426. mindspore/profiler/parser/profiler_info.py +5 -0
  427. mindspore/profiler/profiling.py +296 -166
  428. mindspore/rewrite/__init__.py +2 -13
  429. mindspore/rewrite/api/node.py +121 -35
  430. mindspore/rewrite/api/pattern_engine.py +2 -3
  431. mindspore/rewrite/api/scoped_value.py +16 -15
  432. mindspore/rewrite/api/symbol_tree.py +45 -29
  433. mindspore/rewrite/ast_helpers/__init__.py +3 -6
  434. mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
  435. mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
  436. mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
  437. mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
  438. mindspore/rewrite/common/__init__.py +1 -2
  439. mindspore/rewrite/common/config.py +24 -0
  440. mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
  441. mindspore/rewrite/{namer.py → common/namer.py} +63 -18
  442. mindspore/rewrite/common/namespace.py +118 -0
  443. mindspore/rewrite/node/__init__.py +5 -5
  444. mindspore/rewrite/node/call_function.py +23 -7
  445. mindspore/rewrite/node/cell_container.py +7 -3
  446. mindspore/rewrite/node/control_flow.py +53 -28
  447. mindspore/rewrite/node/node.py +212 -196
  448. mindspore/rewrite/node/node_manager.py +51 -22
  449. mindspore/rewrite/node/node_topological_manager.py +3 -23
  450. mindspore/rewrite/parsers/__init__.py +12 -0
  451. mindspore/rewrite/parsers/arguments_parser.py +8 -9
  452. mindspore/rewrite/parsers/assign_parser.py +635 -413
  453. mindspore/rewrite/parsers/attribute_parser.py +3 -4
  454. mindspore/rewrite/parsers/class_def_parser.py +107 -144
  455. mindspore/rewrite/parsers/constant_parser.py +5 -5
  456. mindspore/rewrite/parsers/container_parser.py +4 -6
  457. mindspore/rewrite/parsers/expr_parser.py +55 -0
  458. mindspore/rewrite/parsers/for_parser.py +31 -98
  459. mindspore/rewrite/parsers/function_def_parser.py +13 -5
  460. mindspore/rewrite/parsers/if_parser.py +28 -10
  461. mindspore/rewrite/parsers/module_parser.py +8 -182
  462. mindspore/rewrite/parsers/parser.py +1 -5
  463. mindspore/rewrite/parsers/parser_register.py +1 -1
  464. mindspore/rewrite/parsers/return_parser.py +5 -10
  465. mindspore/rewrite/parsers/while_parser.py +59 -0
  466. mindspore/rewrite/sparsify/utils.py +1 -1
  467. mindspore/rewrite/symbol_tree/__init__.py +20 -0
  468. mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
  469. mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
  470. mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
  471. mindspore/run_check/_check_version.py +6 -14
  472. mindspore/run_check/run_check.py +1 -1
  473. mindspore/safeguard/rewrite_obfuscation.py +9 -19
  474. mindspore/scipy/__init__.py +2 -1
  475. mindspore/scipy/fft.py +133 -0
  476. mindspore/scipy/linalg.py +140 -55
  477. mindspore/scipy/ops.py +15 -71
  478. mindspore/scipy/ops_grad.py +5 -34
  479. mindspore/scipy/optimize/line_search.py +2 -2
  480. mindspore/scipy/optimize/minimize.py +1 -1
  481. mindspore/train/__init__.py +3 -2
  482. mindspore/train/_utils.py +178 -4
  483. mindspore/train/amp.py +167 -245
  484. mindspore/train/callback/_backup_and_restore.py +4 -4
  485. mindspore/train/callback/_callback.py +4 -4
  486. mindspore/train/callback/_checkpoint.py +39 -13
  487. mindspore/train/callback/_early_stop.py +2 -2
  488. mindspore/train/callback/_landscape.py +14 -8
  489. mindspore/train/callback/_loss_monitor.py +2 -2
  490. mindspore/train/callback/_on_request_exit.py +2 -2
  491. mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
  492. mindspore/train/callback/_summary_collector.py +7 -7
  493. mindspore/train/callback/_time_monitor.py +2 -2
  494. mindspore/train/data_sink.py +1 -1
  495. mindspore/train/dataset_helper.py +13 -4
  496. mindspore/train/loss_scale_manager.py +2 -2
  497. mindspore/train/metrics/accuracy.py +7 -7
  498. mindspore/train/metrics/confusion_matrix.py +8 -6
  499. mindspore/train/metrics/cosine_similarity.py +6 -4
  500. mindspore/train/metrics/error.py +2 -2
  501. mindspore/train/metrics/metric.py +3 -3
  502. mindspore/train/metrics/perplexity.py +2 -1
  503. mindspore/train/metrics/topk.py +2 -2
  504. mindspore/train/mind_ir_pb2.py +75 -6
  505. mindspore/train/model.py +24 -22
  506. mindspore/train/serialization.py +256 -132
  507. mindspore/train/summary/summary_record.py +51 -28
  508. mindspore/train/train_thor/convert_utils.py +3 -3
  509. mindspore/version.py +1 -1
  510. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/METADATA +2 -2
  511. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/RECORD +514 -1060
  512. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/entry_points.txt +1 -0
  513. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
  514. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
  515. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
  516. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
  517. mindspore/config/super_bar_config.json +0 -544
  518. mindspore/gen_ops.py +0 -273
  519. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
  520. mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
  521. mindspore/nn/layer/flash_attention.py +0 -189
  522. mindspore/ops/_op_impl/cpu/concat.py +0 -39
  523. mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
  524. mindspore/ops/_op_impl/tbe/__init__.py +0 -47
  525. mindspore/ops/_op_impl/tbe/abs.py +0 -38
  526. mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
  527. mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
  528. mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
  529. mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
  530. mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
  531. mindspore/ops/_op_impl/tbe/acos.py +0 -37
  532. mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
  533. mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
  534. mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
  535. mindspore/ops/_op_impl/tbe/acosh.py +0 -37
  536. mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
  537. mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
  538. mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
  539. mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
  540. mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
  541. mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
  542. mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
  543. mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
  544. mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
  545. mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
  546. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
  547. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
  548. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
  549. mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
  550. mindspore/ops/_op_impl/tbe/add.py +0 -42
  551. mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
  552. mindspore/ops/_op_impl/tbe/add_n.py +0 -39
  553. mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
  554. mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
  555. mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
  556. mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
  557. mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
  558. mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
  559. mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
  560. mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
  561. mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
  562. mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
  563. mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
  564. mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
  565. mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
  566. mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
  567. mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
  568. mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
  569. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
  570. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
  571. mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
  572. mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
  573. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
  574. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
  575. mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
  576. mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
  577. mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
  578. mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
  579. mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
  580. mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
  581. mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
  582. mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
  583. mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
  584. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
  585. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
  586. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
  587. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
  588. mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
  589. mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
  590. mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
  591. mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
  592. mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
  593. mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
  594. mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
  595. mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
  596. mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
  597. mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
  598. mindspore/ops/_op_impl/tbe/asin.py +0 -37
  599. mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
  600. mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
  601. mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
  602. mindspore/ops/_op_impl/tbe/asinh.py +0 -37
  603. mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
  604. mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
  605. mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
  606. mindspore/ops/_op_impl/tbe/assign.py +0 -79
  607. mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
  608. mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
  609. mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
  610. mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
  611. mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
  612. mindspore/ops/_op_impl/tbe/atan.py +0 -37
  613. mindspore/ops/_op_impl/tbe/atan2.py +0 -38
  614. mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
  615. mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
  616. mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
  617. mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
  618. mindspore/ops/_op_impl/tbe/atanh.py +0 -37
  619. mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
  620. mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
  621. mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
  622. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
  623. mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
  624. mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
  625. mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
  626. mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
  627. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
  628. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
  629. mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
  630. mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
  631. mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
  632. mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
  633. mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
  634. mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
  635. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
  636. mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
  637. mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
  638. mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
  639. mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
  640. mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
  641. mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
  642. mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
  643. mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
  644. mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
  645. mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
  646. mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
  647. mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
  648. mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
  649. mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
  650. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
  651. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
  652. mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
  653. mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
  654. mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
  655. mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
  656. mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
  657. mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
  658. mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
  659. mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
  660. mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
  661. mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
  662. mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
  663. mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
  664. mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
  665. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
  666. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
  667. mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
  668. mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
  669. mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
  670. mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
  671. mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
  672. mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
  673. mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
  674. mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
  675. mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
  676. mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
  677. mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
  678. mindspore/ops/_op_impl/tbe/cast.py +0 -55
  679. mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
  680. mindspore/ops/_op_impl/tbe/cdist.py +0 -38
  681. mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
  682. mindspore/ops/_op_impl/tbe/ceil.py +0 -37
  683. mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
  684. mindspore/ops/_op_impl/tbe/celu.py +0 -39
  685. mindspore/ops/_op_impl/tbe/centralization.py +0 -39
  686. mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
  687. mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
  688. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
  689. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
  690. mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
  691. mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
  692. mindspore/ops/_op_impl/tbe/concat.py +0 -40
  693. mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
  694. mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
  695. mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
  696. mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
  697. mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
  698. mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
  699. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
  700. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
  701. mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
  702. mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
  703. mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
  704. mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
  705. mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
  706. mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
  707. mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
  708. mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
  709. mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
  710. mindspore/ops/_op_impl/tbe/cos.py +0 -37
  711. mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
  712. mindspore/ops/_op_impl/tbe/cosh.py +0 -37
  713. mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
  714. mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
  715. mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
  716. mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
  717. mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
  718. mindspore/ops/_op_impl/tbe/cummin.py +0 -41
  719. mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
  720. mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
  721. mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
  722. mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
  723. mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
  724. mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
  725. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
  726. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
  727. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
  728. mindspore/ops/_op_impl/tbe/diag.py +0 -38
  729. mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
  730. mindspore/ops/_op_impl/tbe/dilation.py +0 -40
  731. mindspore/ops/_op_impl/tbe/div.py +0 -41
  732. mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
  733. mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
  734. mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
  735. mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
  736. mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
  737. mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
  738. mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
  739. mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
  740. mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
  741. mindspore/ops/_op_impl/tbe/elu.py +0 -38
  742. mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
  743. mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
  744. mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
  745. mindspore/ops/_op_impl/tbe/equal.py +0 -42
  746. mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
  747. mindspore/ops/_op_impl/tbe/erf.py +0 -37
  748. mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
  749. mindspore/ops/_op_impl/tbe/erfc.py +0 -37
  750. mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
  751. mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
  752. mindspore/ops/_op_impl/tbe/exp.py +0 -40
  753. mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
  754. mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
  755. mindspore/ops/_op_impl/tbe/expm1.py +0 -37
  756. mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
  757. mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
  758. mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
  759. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
  760. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
  761. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
  762. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
  763. mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
  764. mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
  765. mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
  766. mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
  767. mindspore/ops/_op_impl/tbe/fill.py +0 -56
  768. mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
  769. mindspore/ops/_op_impl/tbe/flatten.py +0 -48
  770. mindspore/ops/_op_impl/tbe/floor.py +0 -37
  771. mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
  772. mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
  773. mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
  774. mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
  775. mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
  776. mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
  777. mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
  778. mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
  779. mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
  780. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
  781. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
  782. mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
  783. mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
  784. mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
  785. mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
  786. mindspore/ops/_op_impl/tbe/gelu.py +0 -37
  787. mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
  788. mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
  789. mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
  790. mindspore/ops/_op_impl/tbe/ger.py +0 -43
  791. mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
  792. mindspore/ops/_op_impl/tbe/greater.py +0 -43
  793. mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
  794. mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
  795. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
  796. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
  797. mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
  798. mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
  799. mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
  800. mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
  801. mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
  802. mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
  803. mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
  804. mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
  805. mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
  806. mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
  807. mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
  808. mindspore/ops/_op_impl/tbe/im2col.py +0 -42
  809. mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
  810. mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
  811. mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
  812. mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
  813. mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
  814. mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
  815. mindspore/ops/_op_impl/tbe/inv.py +0 -38
  816. mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
  817. mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
  818. mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
  819. mindspore/ops/_op_impl/tbe/invert.py +0 -37
  820. mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
  821. mindspore/ops/_op_impl/tbe/iou.py +0 -38
  822. mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
  823. mindspore/ops/_op_impl/tbe/is_close.py +0 -40
  824. mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
  825. mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
  826. mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
  827. mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
  828. mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
  829. mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
  830. mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
  831. mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
  832. mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
  833. mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
  834. mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
  835. mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
  836. mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
  837. mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
  838. mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
  839. mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
  840. mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
  841. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
  842. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
  843. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
  844. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
  845. mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
  846. mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
  847. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
  848. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
  849. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
  850. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
  851. mindspore/ops/_op_impl/tbe/lerp.py +0 -38
  852. mindspore/ops/_op_impl/tbe/less.py +0 -41
  853. mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
  854. mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
  855. mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
  856. mindspore/ops/_op_impl/tbe/log.py +0 -40
  857. mindspore/ops/_op_impl/tbe/log1p.py +0 -37
  858. mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
  859. mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
  860. mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
  861. mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
  862. mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
  863. mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
  864. mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
  865. mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
  866. mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
  867. mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
  868. mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
  869. mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
  870. mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
  871. mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
  872. mindspore/ops/_op_impl/tbe/lrn.py +0 -41
  873. mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
  874. mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
  875. mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
  876. mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
  877. mindspore/ops/_op_impl/tbe/matmul.py +0 -53
  878. mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
  879. mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
  880. mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
  881. mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
  882. mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
  883. mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
  884. mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
  885. mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
  886. mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
  887. mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
  888. mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
  889. mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
  890. mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
  891. mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
  892. mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
  893. mindspore/ops/_op_impl/tbe/maximum.py +0 -39
  894. mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
  895. mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
  896. mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
  897. mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
  898. mindspore/ops/_op_impl/tbe/minimum.py +0 -40
  899. mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
  900. mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
  901. mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
  902. mindspore/ops/_op_impl/tbe/mish.py +0 -37
  903. mindspore/ops/_op_impl/tbe/mod.py +0 -41
  904. mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
  905. mindspore/ops/_op_impl/tbe/mul.py +0 -37
  906. mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
  907. mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
  908. mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
  909. mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
  910. mindspore/ops/_op_impl/tbe/neg.py +0 -39
  911. mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
  912. mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
  913. mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
  914. mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
  915. mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
  916. mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
  917. mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
  918. mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
  919. mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
  920. mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
  921. mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
  922. mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
  923. mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
  924. mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
  925. mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
  926. mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
  927. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
  928. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
  929. mindspore/ops/_op_impl/tbe/pack.py +0 -58
  930. mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
  931. mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
  932. mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
  933. mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
  934. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
  935. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
  936. mindspore/ops/_op_impl/tbe/pdist.py +0 -36
  937. mindspore/ops/_op_impl/tbe/pooling.py +0 -46
  938. mindspore/ops/_op_impl/tbe/population_count.py +0 -38
  939. mindspore/ops/_op_impl/tbe/pow.py +0 -41
  940. mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
  941. mindspore/ops/_op_impl/tbe/prelu.py +0 -37
  942. mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
  943. mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
  944. mindspore/ops/_op_impl/tbe/range.py +0 -39
  945. mindspore/ops/_op_impl/tbe/real_div.py +0 -38
  946. mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
  947. mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
  948. mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
  949. mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
  950. mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
  951. mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
  952. mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
  953. mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
  954. mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
  955. mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
  956. mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
  957. mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
  958. mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
  959. mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
  960. mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
  961. mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
  962. mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
  963. mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
  964. mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
  965. mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
  966. mindspore/ops/_op_impl/tbe/relu.py +0 -39
  967. mindspore/ops/_op_impl/tbe/relu6.py +0 -38
  968. mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
  969. mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
  970. mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
  971. mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
  972. mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
  973. mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
  974. mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
  975. mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
  976. mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
  977. mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
  978. mindspore/ops/_op_impl/tbe/renorm.py +0 -39
  979. mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
  980. mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
  981. mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
  982. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
  983. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
  984. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
  985. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
  986. mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
  987. mindspore/ops/_op_impl/tbe/rint.py +0 -37
  988. mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
  989. mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
  990. mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
  991. mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
  992. mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
  993. mindspore/ops/_op_impl/tbe/roll.py +0 -42
  994. mindspore/ops/_op_impl/tbe/round.py +0 -38
  995. mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
  996. mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
  997. mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
  998. mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
  999. mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
  1000. mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
  1001. mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
  1002. mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
  1003. mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
  1004. mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
  1005. mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
  1006. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
  1007. mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
  1008. mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
  1009. mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
  1010. mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
  1011. mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
  1012. mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
  1013. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
  1014. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
  1015. mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
  1016. mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
  1017. mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
  1018. mindspore/ops/_op_impl/tbe/select.py +0 -38
  1019. mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
  1020. mindspore/ops/_op_impl/tbe/selu.py +0 -39
  1021. mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
  1022. mindspore/ops/_op_impl/tbe/sgd.py +0 -62
  1023. mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
  1024. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
  1025. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
  1026. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
  1027. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
  1028. mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
  1029. mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
  1030. mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
  1031. mindspore/ops/_op_impl/tbe/sign.py +0 -38
  1032. mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
  1033. mindspore/ops/_op_impl/tbe/sin.py +0 -37
  1034. mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
  1035. mindspore/ops/_op_impl/tbe/sinh.py +0 -37
  1036. mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
  1037. mindspore/ops/_op_impl/tbe/slice.py +0 -58
  1038. mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
  1039. mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
  1040. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
  1041. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
  1042. mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
  1043. mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
  1044. mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
  1045. mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
  1046. mindspore/ops/_op_impl/tbe/softmax.py +0 -37
  1047. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
  1048. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
  1049. mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
  1050. mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
  1051. mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
  1052. mindspore/ops/_op_impl/tbe/softplus.py +0 -37
  1053. mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
  1054. mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
  1055. mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
  1056. mindspore/ops/_op_impl/tbe/softsign.py +0 -37
  1057. mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
  1058. mindspore/ops/_op_impl/tbe/sort.py +0 -38
  1059. mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
  1060. mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
  1061. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
  1062. mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
  1063. mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
  1064. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
  1065. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
  1066. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
  1067. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
  1068. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
  1069. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
  1070. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
  1071. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
  1072. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
  1073. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
  1074. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
  1075. mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
  1076. mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
  1077. mindspore/ops/_op_impl/tbe/split_d.py +0 -38
  1078. mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
  1079. mindspore/ops/_op_impl/tbe/split_v.py +0 -39
  1080. mindspore/ops/_op_impl/tbe/splitv.py +0 -39
  1081. mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
  1082. mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
  1083. mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
  1084. mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
  1085. mindspore/ops/_op_impl/tbe/square.py +0 -38
  1086. mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
  1087. mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
  1088. mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
  1089. mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
  1090. mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
  1091. mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
  1092. mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
  1093. mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
  1094. mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
  1095. mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
  1096. mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
  1097. mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
  1098. mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
  1099. mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
  1100. mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
  1101. mindspore/ops/_op_impl/tbe/sub.py +0 -39
  1102. mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
  1103. mindspore/ops/_op_impl/tbe/tan.py +0 -38
  1104. mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
  1105. mindspore/ops/_op_impl/tbe/tanh.py +0 -37
  1106. mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
  1107. mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
  1108. mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
  1109. mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
  1110. mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
  1111. mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
  1112. mindspore/ops/_op_impl/tbe/tile.py +0 -37
  1113. mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
  1114. mindspore/ops/_op_impl/tbe/top_k.py +0 -42
  1115. mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
  1116. mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
  1117. mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
  1118. mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
  1119. mindspore/ops/_op_impl/tbe/transpose.py +0 -60
  1120. mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
  1121. mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
  1122. mindspore/ops/_op_impl/tbe/trunc.py +0 -39
  1123. mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
  1124. mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
  1125. mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
  1126. mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
  1127. mindspore/ops/_op_impl/tbe/unpack.py +0 -38
  1128. mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
  1129. mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
  1130. mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
  1131. mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
  1132. mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
  1133. mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
  1134. mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
  1135. mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
  1136. mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
  1137. mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
  1138. mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
  1139. mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
  1140. mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
  1141. mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
  1142. mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
  1143. mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
  1144. mindspore/ops/_tracefunc.py +0 -241
  1145. mindspore/ops/arg_dtype_cast.py +0 -54
  1146. mindspore/rewrite/api/tree_node_helper.py +0 -60
  1147. mindspore/rewrite/ast_creator_register.py +0 -37
  1148. mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
  1149. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
  1150. mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
  1151. mindspore/rewrite/namespace.py +0 -53
  1152. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/WHEEL +0 -0
  1153. {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,4 @@
1
- # Copyright 2022 Huawei Technologies Co., Ltd
1
+ # Copyright 2022-2023 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -35,14 +35,11 @@ from mindspore.ops.operations._sequence_ops import TensorToList
35
35
  from mindspore.ops.operations.array_ops import (
36
36
  UniqueConsecutive,
37
37
  SearchSorted,
38
- NonZero,
39
38
  MatrixDiagV3,
40
39
  MatrixDiagPartV3,
41
40
  MatrixSetDiagV3,
42
41
  Fills,
43
42
  Col2Im,
44
- ArgMaxWithValue,
45
- ArgMinWithValue,
46
43
  ScatterNdMax,
47
44
  ScatterNdMul,
48
45
  IndexFill,
@@ -52,7 +49,9 @@ from mindspore.ops.operations.array_ops import (
52
49
  Lstsq,
53
50
  Mvlgamma,
54
51
  Tril,
55
- Argmax
52
+ Argmax,
53
+ ArgMaxWithValue,
54
+ ArgMinWithValue
56
55
  )
57
56
  from mindspore.ops.operations.array_ops import TensorScatterElements
58
57
  from mindspore.common import Tensor
@@ -61,53 +60,66 @@ from mindspore import _checkparam as validator
61
60
  from mindspore._c_expression import Tensor as Tensor_
62
61
  from mindspore.ops._utils.utils import ms_arrange
63
62
 
64
- tuple_to_tensor_ = TupleToTensor()
63
+ from mindspore.ops.auto_generate import cat, range, scatter_nd, deepcopy, masked_fill, diagonal, expand_dims, \
64
+ nonzero, reverse, transpose, unsorted_segment_sum, diag, gather, gather_d, gather_nd, reshape, broadcast_to, \
65
+ strided_slice
66
+ from mindspore.ops.operations.manually_defined import tile, rank, scalar_cast
67
+
68
+ arg_max_with_value_ = ArgMaxWithValue()
69
+ batch_to_space_nd_v2_ = P.BatchToSpaceNDV2()
70
+ cast_ = P.Cast()
71
+ diag_ = P.Diag()
72
+ dynamic_broadcast_to_ = DynamicBroadcastTo()
65
73
  eye_ = P.Eye()
66
74
  fills_ = Fills()
75
+ fillv2_ = P.FillV2()
76
+ flatten_ = P.Flatten()
77
+ gather_ = P.Gather()
78
+ gather_d_ = P.GatherD()
79
+ gather_nd_ = P.GatherNd()
80
+ ger_ = P.Ger()
81
+ index_fill_ = IndexFill()
82
+ lstsq_ = Lstsq()
83
+ masked_select_ = P.MaskedSelect()
84
+ matrix_band_part_ = P.array_ops.MatrixBandPart()
67
85
  ones_ = P.Ones()
68
86
  ones_like_ = P.OnesLike()
69
- tile_ = P.Tile()
70
- unique_with_pad_ = P.UniqueWithPad()
71
- size_ = P.Size()
72
- shape_ = P.Shape()
87
+ population_count_ = P.PopulationCount()
88
+ range_ = P.Range()
73
89
  rank_ = P.Rank()
74
- tensor_shape_ = P.TensorShape()
90
+ reduce_max_ = P.ReduceMax()
91
+ reduce_min_ = P.ReduceMin()
75
92
  reshape_ = P.Reshape()
76
- tensor_slice = P.Slice()
77
- expand_dims_ = P.ExpandDims()
78
- transpose_ = P.Transpose()
93
+ scalar_to_tensor_ = P.ScalarToTensor()
79
94
  scatter_add_ = P.ScatterAdd()
95
+ scatter_div_ = P.ScatterDiv()
80
96
  scatter_max_ = P.ScatterMax()
81
97
  scatter_min_ = P.ScatterMin()
82
98
  scatter_mul_ = P.ScatterMul()
83
- scatter_div_ = P.ScatterDiv()
84
99
  scatter_nd_ = P.ScatterNd()
85
- gather_ = P.Gather()
86
- gather_d_ = P.GatherD()
87
- gather_nd_ = P.GatherNd()
88
- nonzero_ = NonZero()
89
- scalar_cast_ = P.ScalarCast()
100
+ scatter_update_ = P.ScatterUpdate()
101
+ shape_ = P.Shape()
102
+ size_ = P.Size()
90
103
  tensor_scatter_add_ = P.TensorScatterAdd()
91
- tensor_scatter_sub_ = P.TensorScatterSub()
92
- tensor_scatter_mul_ = P.TensorScatterMul()
93
104
  tensor_scatter_div_ = P.TensorScatterDiv()
94
- tensor_scatter_min_ = P.TensorScatterMin()
95
105
  tensor_scatter_max_ = P.TensorScatterMax()
96
- scalar_to_tensor_ = P.ScalarToTensor()
97
- tuple_to_array_ = P.TupleToArray()
98
- masked_select_ = P.MaskedSelect()
99
- matrix_band_part_ = P.array_ops.MatrixBandPart()
100
- ger_ = P.Ger()
101
- diag_ = P.Diag()
102
- range_ = P.Range()
103
- zeros_like_ = P.ZerosLike()
104
- cast_ = P.Cast()
106
+ tensor_scatter_min_ = P.TensorScatterMin()
107
+ tensor_scatter_mul_ = P.TensorScatterMul()
108
+ tensor_scatter_sub_ = P.TensorScatterSub()
105
109
  tensor_select_ = P.Select()
106
- index_fill_ = IndexFill()
110
+ tensor_shape_ = P.TensorShape()
111
+ tensor_slice = P.Slice()
112
+ tile_ = P.Tile()
113
+ transpose_ = P.Transpose()
114
+ tuple_to_array_ = P.TupleToArray()
115
+ tuple_to_tensor_ = TupleToTensor()
116
+ unique_ = P.Unique()
117
+ unique_with_pad_ = P.UniqueWithPad()
118
+ unsorted_segment_max_ = P.UnsortedSegmentMax()
119
+ unsorted_segment_min_ = P.UnsortedSegmentMin()
120
+ unsorted_segment_prod_ = P.UnsortedSegmentProd()
107
121
  unsorted_segment_sum_ = P.UnsortedSegmentSum()
108
- population_count_ = P.PopulationCount()
109
- reduce_max = P.ReduceMax()
110
- reduce_min = P.ReduceMin()
122
+ zeros_like_ = P.ZerosLike()
111
123
 
112
124
 
113
125
  @_primexpr
@@ -187,8 +199,11 @@ def arange(start=0, end=None, step=1, *, dtype=None):
187
199
 
188
200
  Keyword Args:
189
201
  dtype (mindspore.dtype, optional): The required data type of returned Tensor. Default: ``None`` .
190
- If the value is not specified or is ``None`` , the type with the highest precision in the
191
- `start`, `end`, and `step` parameters is inferred.
202
+ When `dtype` is not specified or ``None``:
203
+
204
+ If `start`, `end`, and `step` are all integers, the dtype of output is int64,
205
+
206
+ If `start`, `end`, and `step` contain at least one floating-point number, the dtype of output is float32.
192
207
 
193
208
  Returns:
194
209
  A 1-D Tensor, with the same type as the inputs.
@@ -225,7 +240,7 @@ def arange(start=0, end=None, step=1, *, dtype=None):
225
240
  >>> print(output)
226
241
  [12. 11. 10. 9. 8. 7. 6. 5. 4. 3.]
227
242
  >>> print(output.dtype)
228
- Float64
243
+ Float32
229
244
  """
230
245
  if end is None:
231
246
  start, end = 0, start
@@ -237,67 +252,24 @@ def arange(start=0, end=None, step=1, *, dtype=None):
237
252
  if start.shape != () or end.shape != () or step.shape != ():
238
253
  raise ValueError(f"For arange, the input args must be a TensorScalar,"
239
254
  f" but got start shape:{start.shape}, end shape:{end.shape}, step shape:{step.shape}")
240
- range_op = _get_cache_prim(P.Range)()
241
- data = range_op(start, end, step)
255
+ data = range_(start, end, step)
242
256
  if dtype is not None:
243
257
  data = cast_(data, dtype)
244
258
  return data
245
259
 
246
260
 
247
- def cat(tensors, axis=0):
248
- r"""
249
- Connect input tensors along with the given axis.
250
-
251
- The input data is a tuple or a list of tensors. These tensors have the same rank :math:`R`.
252
- Set the given axis as :math:`m`, and :math:`0 \le m < R`. Set the number of input tensors as :math:`N`.
253
- For the :math:`i`-th tensor :math:`t_i`, it has the shape of :math:`(x_1, x_2, ..., x_{mi}, ..., x_R)`.
254
- :math:`x_{mi}` is the :math:`m`-th dimension of the :math:`t_i`. Then, the shape of the output tensor is
255
-
256
- .. math::
257
-
258
- (x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)
259
-
260
- Args:
261
- tensors (Union[tuple, list]): A tuple or a list of input tensors.
262
- Suppose there are two tensors in this tuple or list, namely t1 and t2.
263
- To perform `concat` in the axis 0 direction, except for the :math:`0`-th axis,
264
- all other dimensions should be equal, that is,
265
- :math:`t1.shape[1] = t2.shape[1], t1.shape[2] = t2.shape[2], ..., t1.shape[R-1] = t2.shape[R-1]`,
266
- where :math:`R` represents the rank of tensor.
267
- axis (int): The specified axis, whose value is in range :math:`[-R, R)`. Default: ``0`` .
268
-
269
- Returns:
270
- Tensor, the shape is :math:`(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)`.
271
- The data type is the same with `tensors`.
272
-
273
- Raises:
274
- TypeError: If `axis` is not an int.
275
- ValueError: If `tensors` have different dimension of tensor.
276
- ValueError: If `axis` not in range :math:`[-R, R)`.
277
- RuntimeError: If tensor's shape in `tensors` except for `axis` are different.
278
-
279
- Supported Platforms:
280
- ``Ascend`` ``GPU`` ``CPU``
261
+ def concat(tensors, axis=0):
262
+ """
263
+ Alias for :func:`mindspore.ops.cat()`.
281
264
 
282
- Examples:
283
- >>> import mindspore
284
- >>> import numpy as np
285
- >>> from mindspore import Tensor, ops
286
- >>> input_x1 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
287
- >>> input_x2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
288
- >>> output = ops.cat((input_x1, input_x2))
289
- >>> print(output)
290
- [[0. 1.]
291
- [2. 1.]
292
- [0. 1.]
293
- [2. 1.]]
294
- >>> output = ops.cat((input_x1, input_x2), 1)
295
- >>> print(output)
296
- [[0. 1. 0. 1.]
297
- [2. 1. 2. 1.]]
265
+ Tutorial Examples:
266
+ - `Tensor - Tensor Operation <https://mindspore.cn/tutorials/en/r2.3.q1/beginner/tensor.html#tensor-operation>`_
267
+ - `Vision Transformer Image Classification - Building ViT as a whole
268
+ <https://mindspore.cn/tutorials/application/en/r2.3.q1/cv/vit.html#building-vit-as-a-whole>`_
269
+ - `Sentiment Classification Implemented by RNN - Dense
270
+ <https://mindspore.cn/tutorials/application/en/r2.3.q1/nlp/sentiment_analysis.html#dense>`_
298
271
  """
299
- _concat = _get_cache_prim(P.Concat)(axis)
300
- return _concat(tensors)
272
+ return cat(tensors, axis)
301
273
 
302
274
 
303
275
  def eye(n, m=None, dtype=None):
@@ -305,14 +277,14 @@ def eye(n, m=None, dtype=None):
305
277
  Creates a tensor with ones on the diagonal and zeros in the rest.
306
278
 
307
279
  Note:
308
- Combines ReverseV2 operator to get an anti-diagonal Tensor,
309
- but ReverseV2 only supports Ascend and GPU platforms currently.
280
+ The data type of returned tensor can be float16, float32, int8, int16, int32, int64, uint8
281
+ or bool on Ascend platforms.
310
282
 
311
283
  Args:
312
284
  n (int): The number of rows of returned tensor. Constant value only.
313
- m (int): The number of columns of returned tensor. Constant value only.
285
+ m (int, optional): The number of columns of returned tensor. Constant value only.
314
286
  Default: ``None`` , if ``None`` , the number of columns is as the same as n.
315
- dtype (mindspore.dtype): MindSpore's dtype, the data type of the returned tensor.
287
+ dtype (mindspore.dtype, optional): MindSpore's dtype, the data type of the returned tensor.
316
288
  The data type can be bool or Number.
317
289
  Default: ``None`` , the data type of the returned tensor is mindspore.float32.
318
290
 
@@ -336,11 +308,11 @@ def eye(n, m=None, dtype=None):
336
308
  [0 1]]
337
309
  >>> print(output.dtype)
338
310
  Int32
339
- >>> output = ops.eye(1, 2, mindspore.float64)
311
+ >>> output = ops.eye(1, 2, mindspore.float32)
340
312
  >>> print(output)
341
313
  [[1. 0.]]
342
314
  >>> print(output.dtype)
343
- Float64
315
+ Float32
344
316
  >>> output = ops.eye(2, dtype=mindspore.int32)
345
317
  >>> print(output)
346
318
  [[1 0]
@@ -472,48 +444,7 @@ def where(condition, x, y):
472
444
  condition = broadcast_to(condition, output_shape)
473
445
  x = broadcast_to(x, output_shape)
474
446
  y = broadcast_to(y, output_shape)
475
- _select = P.Select()
476
- return _select(condition, x, y)
477
-
478
-
479
- def reverse(x, axis):
480
- """
481
- Reverses specific dimensions of a tensor.
482
-
483
- .. warning::
484
- The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "input_x".
485
-
486
- Args:
487
- x (Tensor): The target tensor.
488
- The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
489
- axis (Union[tuple(int), list(int)]): The indices of the dimensions to reverse.
490
-
491
- Outputs:
492
- Tensor, has the same shape and type as `x`.
493
-
494
- Raises:
495
- TypeError: If `axis` is neither list nor tuple.
496
- TypeError: If element of `axis` is not an int.
497
-
498
- Supported Platforms:
499
- ``Ascend`` ``GPU`` ``CPU``
500
-
501
- Examples:
502
- >>> import mindspore
503
- >>> import numpy as np
504
- >>> from mindspore import Tensor, ops
505
- >>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
506
- >>> output = ops.reverse(input_x, axis=[1])
507
- >>> print(output)
508
- [[4 3 2 1]
509
- [8 7 6 5]]
510
- >>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
511
- >>> output = ops.reverse(input_x, axis=[1, 0])
512
- >>> print(output)
513
- [[8 7 6 5]
514
- [4 3 2 1]]
515
- """
516
- return P.ReverseV2(axis)(x)
447
+ return tensor_select_(condition, x, y)
517
448
 
518
449
 
519
450
  def ravel(input):
@@ -659,8 +590,9 @@ def one_hot(indices, depth, on_value=1, off_value=0, axis=-1):
659
590
  other locations take value `off_value`.
660
591
 
661
592
  Note:
662
- If the input indices is rank `N`, the output will have rank `N+1`. The new axis is created at dimension `axis`.
663
- On Ascend, if `on_value` is int64 dtype, `indices` must be int64 dtype.
593
+ If the input `indices` has rank `N`, the output will have rank `N+1`.
594
+ The new axis is created at dimension `axis`. On Ascend, if `on_value` is int64 dtype, `indices` must be
595
+ int64 dtype, and the value for `on_value` and `off_value` can only be 1 and 0.
664
596
 
665
597
  Args:
666
598
  indices(Tensor): A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
@@ -682,6 +614,7 @@ def one_hot(indices, depth, on_value=1, off_value=0, axis=-1):
682
614
  Raises:
683
615
  TypeError: If `axis` or `depth` is not an int.
684
616
  TypeError: If dtype of `indices` is not int32 or int64.
617
+ TypeError: If dtype of `on_value` is not int32, int64, float16 or float32.
685
618
  TypeError: If `indices`, `on_value` or `off_value` is not a Tensor.
686
619
  ValueError: If `axis` is not in range [-1, ndim].
687
620
  ValueError: If `depth` is less than 0.
@@ -715,8 +648,8 @@ def fill(type, shape, value): # pylint: disable=redefined-outer-name
715
648
 
716
649
  Args:
717
650
  type (mindspore.dtype): The specified type of output tensor. The data type only supports
718
- `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ and
719
- `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ .
651
+ `bool_ <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_ and
652
+ `number <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_ .
720
653
  shape (Union(Tensor, tuple[int])): The specified shape of output tensor.
721
654
  value (Union(Tensor, number.Number, bool)): Value to fill the returned tensor.
722
655
 
@@ -743,7 +676,7 @@ def fill(type, shape, value): # pylint: disable=redefined-outer-name
743
676
  [0. 0. 0.]]
744
677
  """
745
678
  value = cast_(value, type)
746
- return _get_cache_prim(P.FillV2)()(shape, value)
679
+ return fillv2_(shape, value)
747
680
 
748
681
 
749
682
  def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-name
@@ -883,21 +816,21 @@ def chunk(input, chunks, axis=0):
883
816
  length_along_dim = arr_shape[arr_axis]
884
817
 
885
818
  if chunks > length_along_dim:
886
- res = P.Split(arr_axis, length_along_dim)(input)
819
+ res = _get_cache_prim(P.Split)(arr_axis, length_along_dim)(input)
887
820
  elif length_along_dim % chunks == 0:
888
- res = P.Split(arr_axis, chunks)(input)
821
+ res = _get_cache_prim(P.Split)(arr_axis, chunks)(input)
889
822
  else:
890
823
  block_size = int(np.ceil(length_along_dim / chunks))
891
824
  true_chunks = int(length_along_dim // block_size)
892
825
  length1 = true_chunks * block_size
893
826
  length2 = length_along_dim - length1
894
- start1 = _list_comprehensions(rank(input), 0, True)
827
+ start1 = _list_comprehensions(rank_(input), 0, True)
895
828
  size1 = _tuple_setitem(arr_shape, arr_axis, length1)
896
829
  start2 = _tuple_setitem(start1, arr_axis, length1)
897
830
  size2 = _tuple_setitem(arr_shape, arr_axis, length2)
898
- res = P.Split(arr_axis, true_chunks)(tensor_slice(input, start1, size1))
831
+ res = _get_cache_prim(P.Split)(arr_axis, true_chunks)(tensor_slice(input, start1, size1))
899
832
  if length2:
900
- res += P.Split(arr_axis, 1)(tensor_slice(input, start2, size2))
833
+ res += _get_cache_prim(P.Split)(arr_axis, 1)(tensor_slice(input, start2, size2))
901
834
  return res
902
835
 
903
836
 
@@ -952,15 +885,17 @@ def ones(shape, dtype=None): # pylint: disable=redefined-outer-name
952
885
  [1. 1.]]
953
886
  """
954
887
  _dtype = mstype.float32 if dtype is None else dtype
955
- ones_op = _get_cache_prim(P.FillV2)()
956
888
  value = Tensor(1, _dtype)
957
889
  if isinstance(shape, int):
958
890
  shape = tuple([shape])
959
891
  elif isinstance(shape, list):
960
- shape = Tensor(shape, dtype=mstype.int64)
892
+ if not shape:
893
+ shape = Tensor_(shape, dtype=mstype.int64)
894
+ else:
895
+ shape = Tensor(shape, dtype=mstype.int64)
961
896
  elif isinstance(shape, Tensor) and shape.ndim == 0 and shape.size == 1:
962
897
  shape = shape.reshape(1)
963
- output = ones_op(shape, value)
898
+ output = fillv2_(shape, value)
964
899
  return output
965
900
 
966
901
 
@@ -993,8 +928,7 @@ def ones_like(input, *, dtype=None):
993
928
  [[1 1]
994
929
  [1 1]]
995
930
  """
996
- ones_like_op = _get_cache_prim(P.OnesLike)()
997
- output = ones_like_op(input)
931
+ output = ones_like_(input)
998
932
  _dtype = input.dtype if dtype is None else dtype
999
933
  output = cast_(output, _dtype)
1000
934
  return output
@@ -1028,22 +962,24 @@ def zeros(size, dtype=None): # pylint: disable=redefined-outer-name
1028
962
  [[0. 0.]
1029
963
  [0. 0.]]
1030
964
  """
1031
- zero_op = _get_cache_prim(P.FillV2)()
1032
965
  _dtype = mstype.float32 if dtype is None else dtype
1033
966
  value = Tensor(0, _dtype)
1034
967
  if isinstance(size, int):
1035
968
  size = tuple([size])
1036
969
  elif isinstance(size, list):
1037
- size = Tensor(size, dtype=mstype.int64)
970
+ if not size:
971
+ size = Tensor_(size, dtype=mstype.int64)
972
+ else:
973
+ size = Tensor(size, dtype=mstype.int64)
1038
974
  elif isinstance(size, Tensor) and size.ndim == 0 and size.size == 1:
1039
975
  size = size.reshape(1)
1040
- output = zero_op(size, value)
976
+ output = fillv2_(size, value)
1041
977
  return output
1042
978
 
1043
979
 
1044
980
  def zeros_like(input, *, dtype=None):
1045
981
  r"""
1046
- Creates a tensor filled with 0, with the same size as x, and the given dtype.
982
+ Creates a tensor filled with 0, with the same size as input, and the given dtype.
1047
983
 
1048
984
  If `dtype = None`, the tensor will have the same dtype as input `input`.
1049
985
 
@@ -1074,127 +1010,11 @@ def zeros_like(input, *, dtype=None):
1074
1010
  [0. 0.]]
1075
1011
  """
1076
1012
  _dtype = input.dtype if dtype is None else dtype
1077
- _zeros_like = _get_cache_prim(P.ZerosLike)()
1078
- _cast = _get_cache_prim(P.Cast)()
1079
- output = _zeros_like(input)
1080
- output = _cast(output, _dtype)
1013
+ output = zeros_like_(input)
1014
+ output = cast_(output, _dtype)
1081
1015
  return output
1082
1016
 
1083
1017
 
1084
- def tile(input, multiples):
1085
- r"""
1086
- Replicates an input tensor with given multiples times.
1087
-
1088
- Creates a new tensor by replicating `input` `multiples` times. The i'th dimension of
1089
- output tensor has `input.shape[i] * multiples[i]` elements, and the values of `input`
1090
- are replicated `multiples[i]` times along the i'th dimension.
1091
-
1092
- Note:
1093
- The length of `multiples` must be greater or equal to the length of dimension in `input`.
1094
-
1095
- Args:
1096
- input (Tensor): 1-D or higher dimensional Tensor. Set the shape of input tensor as
1097
- :math:`(x_1, x_2, ..., x_S)` .
1098
-
1099
- multiples (tuple[int]): The parameter that specifies the number of replications,
1100
- the parameter type is tuple, and the data type is int, i.e., :math:`(y_1, y_2, ..., y_S)`.
1101
- The length of `multiples` cannot be smaller than the length of the shape of `input`.
1102
- Only constant value is allowed.
1103
-
1104
- Returns:
1105
- Tensor, has the same data type as the `input`. Suppose the length of `multiples` is `d`,
1106
- the dimension of `input` is `input.dim`, and the shape of `input` is :math:`(x_1, x_2, ..., x_S)`.
1107
-
1108
- - If `input.dim = d`, then the shape of their corresponding positions can be multiplied, and
1109
- the shape of Outputs is :math:`(x_1*y_1, x_2*y_2, ..., x_S*y_S)`.
1110
- - If `input.dim < d`, fill in multiple 1 in the length of the shape of `input` until their
1111
- lengths are consistent. Such as set the shape of `input` as :math:`(1, ..., x_1, x_2, ..., x_S)`,
1112
- then the shape of their corresponding positions can be multiplied, and the shape of Outputs is
1113
- :math:`(1*y_1, ..., x_R*y_R, x_S*y_S)`.
1114
-
1115
- Raises:
1116
- TypeError: If `multiples` is not a tuple or its elements are not all int.
1117
- ValueError: If the elements of `multiples` are not all greater than 0.
1118
- ValueError: If the length of `multiples` are smaller than the length of dimension in `input`.
1119
-
1120
- Supported Platforms:
1121
- ``Ascend`` ``GPU`` ``CPU``
1122
-
1123
- Examples:
1124
- >>> import mindspore
1125
- >>> import numpy as np
1126
- >>> from mindspore import Tensor, ops
1127
- >>> input = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32)
1128
- >>> multiples = (2, 3)
1129
- >>> output = ops.tile(input, multiples)
1130
- >>> print(output)
1131
- [[1. 2. 1. 2. 1. 2.]
1132
- [3. 4. 3. 4. 3. 4.]
1133
- [1. 2. 1. 2. 1. 2.]
1134
- [3. 4. 3. 4. 3. 4.]]
1135
- >>> multiples = (2, 3, 2)
1136
- >>> output = ops.tile(input, multiples)
1137
- >>> print(output)
1138
- [[[1. 2. 1. 2.]
1139
- [3. 4. 3. 4.]
1140
- [1. 2. 1. 2.]
1141
- [3. 4. 3. 4.]
1142
- [1. 2. 1. 2.]
1143
- [3. 4. 3. 4.]]
1144
- [[1. 2. 1. 2.]
1145
- [3. 4. 3. 4.]
1146
- [1. 2. 1. 2.]
1147
- [3. 4. 3. 4.]
1148
- [1. 2. 1. 2.]
1149
- [3. 4. 3. 4.]]]
1150
- """
1151
- tile_op = _get_cache_prim(P.Tile)()
1152
- return tile_op(input, multiples)
1153
-
1154
-
1155
- def range(start, end, step):
1156
- r"""
1157
- Creates a sequence of numbers that begins at `start` and extends by increments of
1158
- `limit` up to but not including `end`.
1159
-
1160
- The types of all 3 inputs must be the same. The type of the resulting tensor is
1161
- the same as the type of the inputs.
1162
-
1163
- Args:
1164
- start (Tensor): A scalar Tensor. The first number in the sequence. Must have
1165
- type: int32 ,int64, float32 or float64.
1166
- end (Tensor): A scalar Tensor. Upper limit of the sequence, exclusive. Must
1167
- have type: int32 ,int64, float32 or float64.
1168
- step (Tensor): A scalar Tensor. Number that increments `start`. Must have
1169
- type: int32 ,int64, float32 or float64.
1170
-
1171
- Returns:
1172
- A 1-D Tensor, with the same type as the inputs.
1173
-
1174
- Raises:
1175
- TypeError: If `start`, `end` or `step` is not scalar Tensor.
1176
- TypeError: If datatype of `start`, `end` or `step` is not same.
1177
- TypeError: If datatype of `start`, `end` or `step` is not supported.
1178
- ValueError: If `step` = 0.
1179
- ValueError: If `start` >= `end` when `step` > 0.
1180
- ValueError: If `start` <= `end` when `step` < 0.
1181
-
1182
- Supported Platforms:
1183
- ``GPU`` ``CPU``
1184
-
1185
- Examples:
1186
- >>> from mindspore import Tensor, ops
1187
- >>> from mindspore import dtype as mstype
1188
- >>> start = Tensor(0, mstype.int32)
1189
- >>> end = Tensor(10, mstype.int32)
1190
- >>> step = Tensor(4, mstype.int32)
1191
- >>> output = ops.range(start, end, step)
1192
- >>> print(output)
1193
- [0 4 8]
1194
- """
1195
- return range_(start, end, step)
1196
-
1197
-
1198
1018
  ##############################
1199
1019
  # Tensor Operation Functions.
1200
1020
  ##############################
@@ -1246,15 +1066,11 @@ def unique(input):
1246
1066
  >>> print(idx)
1247
1067
  [0 1 2 1]
1248
1068
  """
1249
-
1250
- unique_op = _get_cache_prim(P.Unique)()
1251
- reshape_op = _get_cache_prim(P.Reshape)()
1252
-
1253
1069
  shape_x = input.shape
1254
1070
  length_x = get_x_shape(shape_x)
1255
- input = reshape_op(input, length_x)
1256
- y, idx = unique_op(input)
1257
- idx = reshape_op(idx, shape_x)
1071
+ input = reshape_(input, length_x)
1072
+ y, idx = unique_(input)
1073
+ idx = reshape_(idx, shape_x)
1258
1074
  return y, idx
1259
1075
 
1260
1076
 
@@ -1381,7 +1197,7 @@ def searchsorted(sorted_sequence, values, *, out_int32=False, right=False):
1381
1197
 
1382
1198
  Returns:
1383
1199
  Tensor containing the indices from the innermost dimension of `sorted_sequence` such that,
1384
- if insert the corresponding value in the `values` tensor, the order of `sorted_sequence` would be preserved,
1200
+ if insert the corresponding value in the `values` Tensor, the order of `sorted_sequence` would be preserved,
1385
1201
  whose datatype is int32 if out_int32 is ``True`` , otherwise int64, and shape is the same as the shape of
1386
1202
  `values`.
1387
1203
 
@@ -1457,7 +1273,7 @@ def size(input_x):
1457
1273
 
1458
1274
  Args:
1459
1275
  input_x (Tensor): Input parameters, the shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is
1460
- `number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
1276
+ `number <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_.
1461
1277
 
1462
1278
  Returns:
1463
1279
  int. A scalar representing the elements' size of `input_x`, tensor is the number of elements
@@ -1538,76 +1354,6 @@ def dyn_shape(input_x):
1538
1354
  return tensor_shape_(input_x)
1539
1355
 
1540
1356
 
1541
- def rank(input_x):
1542
- """
1543
- Returns the rank of a tensor.
1544
-
1545
- Returns a 0-D int32 Tensor representing the rank of input; the rank of a tensor
1546
- is the number of indices required to uniquely select each element of the tensor.
1547
-
1548
- Args:
1549
- input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is Number.
1550
-
1551
- Returns:
1552
- Tensor. 0-D int32 Tensor representing the rank of input, i.e., :math:`R`. The data type is an int.
1553
-
1554
- Raises:
1555
- TypeError: If `input_x` is not a Tensor.
1556
-
1557
- Supported Platforms:
1558
- ``Ascend`` ``GPU`` ``CPU``
1559
-
1560
- Examples:
1561
- >>> import mindspore
1562
- >>> import numpy as np
1563
- >>> from mindspore import Tensor, ops
1564
- >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
1565
- >>> output = ops.rank(input_tensor)
1566
- >>> print(output)
1567
- 2
1568
- >>> print(type(output))
1569
- <class 'int'>
1570
- """
1571
- return rank_(input_x)
1572
-
1573
-
1574
- def reshape(input, shape):
1575
- """
1576
- Rearranges the input Tensor based on the given shape.
1577
-
1578
- The 'shape' can only have one -1 at most, in which case it's inferred from the remaining dimensions and
1579
- the number of elements in the input.
1580
-
1581
- Args:
1582
- input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
1583
- shape (Union[tuple[int], Tensor[int]]): Constructed by multiple
1584
- integers, i.e., :math:`(y_1, y_2, ..., y_S)`. Only constant value is allowed.
1585
-
1586
- Returns:
1587
- Tensor, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
1588
-
1589
- Raises:
1590
- ValueError: Given a shape tuple, if it has several -1; or if the product
1591
- of its elements is less than or equal to 0 or cannot be divided by the product
1592
- of the input tensor shape; or if it does not match the input's array size.
1593
-
1594
- Supported Platforms:
1595
- ``Ascend`` ``GPU`` ``CPU``
1596
-
1597
- Examples:
1598
- >>> import mindspore
1599
- >>> import numpy as np
1600
- >>> from mindspore import Tensor, ops
1601
- >>> input = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
1602
- >>> output = ops.reshape(input, (3, 2))
1603
- >>> print(output)
1604
- [[-0.1 0.3]
1605
- [ 3.6 0.4]
1606
- [ 0.5 -3.2]]
1607
- """
1608
- return reshape_(input, shape)
1609
-
1610
-
1611
1357
  def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
1612
1358
  r"""
1613
1359
  Reverses variable length slices.
@@ -1672,7 +1418,7 @@ def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
1672
1418
  [[4. 3. 2. 1.]
1673
1419
  [8. 7. 6. 5.]]
1674
1420
  """
1675
- return P.ReverseSequence(seq_dim=seq_dim, batch_dim=batch_dim)(x, seq_lengths)
1421
+ return _get_cache_prim(P.ReverseSequence)(seq_dim=seq_dim, batch_dim=batch_dim)(x, seq_lengths)
1676
1422
 
1677
1423
 
1678
1424
  def flatten(input, order='C', *, start_dim=1, end_dim=-1):
@@ -1696,7 +1442,7 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
1696
1442
  Raises:
1697
1443
  TypeError: If `input` is not a Tensor.
1698
1444
  TypeError: If `order` is not string type.
1699
- ValueError: If `order` is string type, but not 'C' or 'F'.
1445
+ ValueError: If `order` is string type, but not ``'C'`` or ``'F'``.
1700
1446
  TypeError: If `start_dim` or `end_dim` is not int.
1701
1447
  ValueError: If `start_dim` is greater than `end_dim` after canonicalized.
1702
1448
  ValueError: If `start_dim` or `end_dim` is not in range of [-input.dim, input.dim-1].
@@ -1741,7 +1487,7 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
1741
1487
  return reshape_(input, (-1,))
1742
1488
  perm = ops.make_range(0, x_rank)
1743
1489
  new_order = ops.tuple_reversed(perm)
1744
- input = _get_cache_prim(P.Transpose)()(input, new_order)
1490
+ input = transpose_(input, new_order)
1745
1491
 
1746
1492
  # Handle the default case.
1747
1493
  x_shape = shape_(input)
@@ -1749,7 +1495,7 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
1749
1495
  if start_dim == 1 and end_dim == -1:
1750
1496
  if x_rank in (0, 1):
1751
1497
  return reshape_(input, (-1,))
1752
- return _get_cache_prim(P.Flatten)()(input)
1498
+ return flatten_(input)
1753
1499
 
1754
1500
  # Check axis.
1755
1501
  start_dim = canonicalize_axis(start_dim, x_rank)
@@ -1936,176 +1682,6 @@ def select(cond, x, y):
1936
1682
  return tensor_select_(cond, input_x, input_y)
1937
1683
 
1938
1684
 
1939
- def strided_slice(input_x,
1940
- begin,
1941
- end,
1942
- strides,
1943
- begin_mask=0,
1944
- end_mask=0,
1945
- ellipsis_mask=0,
1946
- new_axis_mask=0,
1947
- shrink_axis_mask=0):
1948
- r"""
1949
- Extracts a strided slice of a Tensor based on `begin/end` index and `strides`.
1950
-
1951
- This operation extracts a fragment of size (end-begin)/strides from the given 'input_tensor'.
1952
- Starting from the beginning position, the fragment continues adding strides to the index until
1953
- all dimensions are not less than the ending position.
1954
-
1955
- Note:
1956
- - `begin` , `end` and `strides` must have the same shape.
1957
- - `begin` , `end` and `strides` are all 1-D Tensor, and their shape size
1958
- must not greater than the dim of `input_x`.
1959
-
1960
- During the slicing process, the fragment (end-begin)/strides are extracted from each dimension.
1961
-
1962
- Example: For Tensor `input_x` with shape :math:`(5, 6, 7)`,
1963
- set `begin`, `end` and `strides` to (1, 3, 2), (3, 5, 6),
1964
- (1, 1, 2) respectively, then elements from index 1 to 3 are extrected for dim 0, index 3 to 5
1965
- are extrected for dim 1 and index 2 to 6 with a `stirded` of 2 are extrected for dim 2, this
1966
- process is equivalent to a pythonic slice `input_x[1:3, 3:5, 2:6:2]`.
1967
-
1968
- If the length of `begin` 、 `end` and `strides` is smaller than the dim of `input_x`,
1969
- then all elements are extracted from the missing dims, it behaves like all the
1970
- missing dims are filled with zeros, size of that missing dim and ones.
1971
-
1972
- Example: For Tensor `input_x` with shape :math:`(5, 6, 7)`,
1973
- set `begin`, `end` and `strides` to (1, 3),
1974
- (3, 5), (1, 1) respectively, then elements from index 1 to 3 are extrected
1975
- for dim 0, index 3 to 5 are extrected for dim 1 and index 3 to 5 are extrected
1976
- for dim 2, this process is equivalent to a pythonic slice `input_x[1:3, 3:5, 0:7]`.
1977
-
1978
- Here's how a mask works:
1979
- For each specific mask, it will be converted to a binary representation internally, and then
1980
- reverse the result to start the calculation. For Tensor `input_x` with
1981
- shape :math:`(5, 6, 7)`. Given mask value of 3 which
1982
- can be represented as 0b011. Reverse that we get 0b110, which implies the first and second dim of the
1983
- original Tensor will be effected by this mask. See examples below, for simplicity all mask mentioned
1984
- below are all in their reverted binary form:
1985
-
1986
- - `begin_mask` and `end_mask`
1987
-
1988
- If the ith bit of `begin_mask` is 1, `begin[i]` is ignored and the fullest
1989
- possible range in that dimension is used instead. `end_mask` is analogous,
1990
- except with the end range. For Tensor `input_x` with shape :math:`(5, 6, 7, 8)`, if `begin_mask`
1991
- is 0b110, `end_mask` is 0b011, the slice `input_x[0:3, 0:6, 2:7:2]` is produced.
1992
-
1993
- - `ellipsis_mask`
1994
-
1995
- If the ith bit of `ellipsis_mask` is 1, as many unspecified dimensions as needed
1996
- will be inserted between other dimensions. Only one non-zero bit is allowed
1997
- in `ellipsis_mask`. For Tensor `input_x` with shape :math:`(5, 6, 7, 8)`, `input_x[2:,...,:6]`
1998
- is equivalent to `input_x[2:5,:,:,0:6]` , `input_x[2:,...]` is equivalent
1999
- to `input_x[2:5,:,:,:]`.
2000
-
2001
- - `new_axis_mask`
2002
-
2003
- If the ith bit of `new_axis_mask` is 1, `begin`, `end` and `strides` are
2004
- ignored and a new length 1 dimension is added at the specified position
2005
- in the output Tensor. For Tensor `input_x` with shape :math:`(5, 6, 7)`, if `new_axis_mask`
2006
- is 0b110, a new dim is added to the second dim, which will produce
2007
- a Tensor with shape :math:`(5, 1, 6, 7)`.
2008
-
2009
- - `shrink_axis_mask`
2010
-
2011
- If the ith bit of `shrink_axis_mask` is 1, `begin`, `end` and `strides`
2012
- are ignored and dimension i will be shrunk to 0.
2013
- For Tensor `input_x` with shape :math:`(5, 6, 7)`,
2014
- if `shrink_axis_mask` is 0b010, it is equivalent to slice `x[:, 5, :]`
2015
- and results in an output shape of :math:`(5, 7)`.
2016
-
2017
- Note:
2018
- `new_axis_mask` and `shrink_axis_mask` are not recommended to
2019
- use at the same time, it might incur unexpected result.
2020
-
2021
- Args:
2022
- input_x (Tensor): The input Tensor to be extracted from.
2023
- begin (tuple[int]): A tuple which represents the location where to start.
2024
- end (tuple[int]): A tuple or which represents the maximum location where to end.
2025
- strides (tuple[int]): A tuple which represents the strides is continuously added
2026
- before reaching the maximum location. Only int is allowed, it can be negative
2027
- which results in reversed slicing.
2028
- begin_mask (int, optional): Starting index of the slice. Default: ``0`` .
2029
- end_mask (int, optional): Ending index of the slice. Default: ``0`` .
2030
- ellipsis_mask (int, optional): An int mask, ignore slicing operation when set to 1. Default: ``0`` .
2031
- new_axis_mask (int, optional): An int mask for adding new dims. Default: ``0`` .
2032
- shrink_axis_mask (int, optional): An int mask for shrinking dims. Default: ``0`` .
2033
-
2034
- Returns:
2035
- Tensor, return the extracts a strided slice of a Tensor based on `begin/end` index and `strides`.
2036
-
2037
- Raises:
2038
- TypeError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or
2039
- `shrink_axis_mask` is not an int.
2040
- TypeError: If `begin`, `end` or `strides` is not tuple[int].
2041
- ValueError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or
2042
- `shrink_axis_mask` is less than 0.
2043
- ValueError: If `begin`, `end` and `strides` have different shapes.
2044
-
2045
- Supported Platforms:
2046
- ``Ascend`` ``GPU`` ``CPU``
2047
-
2048
- Examples:
2049
- >>> import mindspore
2050
- >>> from mindspore import Tensor, ops
2051
- >>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
2052
- ... [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
2053
- >>> output = ops.strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1))
2054
- >>> # Take this " output = strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1)) " as an example,
2055
- >>> # start = [1, 0, 2] , end = [3, 1, 3], strides = [1, 1, 1], Find a segment of (start, end),
2056
- >>> # note that end is an open interval
2057
- >>> # To facilitate understanding, this operator can be divided into three steps:
2058
- >>> # Step 1: Calculation of the first dimension:
2059
- >>> # start = 1, end = 3, strides = 1, So can take 1st, 2nd rows, and then gets the final output at this time.
2060
- >>> # output_1th =
2061
- >>> # [
2062
- >>> # [
2063
- >>> # [3,3,3]
2064
- >>> # [4,4,4]
2065
- >>> # ]
2066
- >>> # [
2067
- >>> # [5,5,5]
2068
- >>> # [6,6,6]
2069
- >>> # ]
2070
- >>> # ]
2071
- >>> # Step 2: Calculation of the second dimension
2072
- >>> # 2nd dimension, start = 0, end = 1, strides = 1. So only 0th rows
2073
- >>> # can be taken, and the output at this time.
2074
- >>> # output_2nd =
2075
- >>> # [
2076
- >>> # [
2077
- >>> # [3,3,3]
2078
- >>> # ]
2079
- >>> # [
2080
- >>> # [5,5,5]
2081
- >>> # ]
2082
- >>> # ]
2083
- >>> # Step 3: Calculation of the third dimension
2084
- >>> # 3nd dimension,start = 2, end = 3, strides = 1, So can take 2th cols,
2085
- >>> # and you get the final output at this time.
2086
- >>> # output_3ed =
2087
- >>> # [
2088
- >>> # [
2089
- >>> # [3]
2090
- >>> # ]
2091
- >>> # [
2092
- >>> # [5]
2093
- >>> # ]
2094
- >>> # ]
2095
- >>> # The final output after finishing is:
2096
- >>> print(output)
2097
- [[[3.]]
2098
- [[5.]]]
2099
- >>> # another example like :
2100
- >>> output = strided_slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1))
2101
- >>> print(output)
2102
- [[[3. 3. 3.]]]
2103
- """
2104
- strided_slice_ = _get_cache_prim(P.StridedSlice)(
2105
- begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask)
2106
- return strided_slice_(input_x, begin, end, strides)
2107
-
2108
-
2109
1685
  def slice(input_x, begin, size):
2110
1686
  r"""
2111
1687
  Slices a tensor in the specified shape.
@@ -2160,20 +1736,6 @@ def slice(input_x, begin, size):
2160
1736
  return tensor_slice(input_x, begin, size)
2161
1737
 
2162
1738
 
2163
- def concat(tensors, axis=0):
2164
- """
2165
- Alias for :func:`mindspore.ops.cat()`.
2166
-
2167
- Tutorial Examples:
2168
- - `Tensor - Tensor Operation <https://mindspore.cn/tutorials/en/r2.2/beginner/tensor.html#tensor-operation>`_
2169
- - `Vision Transformer Image Classification - Building ViT as a whole
2170
- <https://mindspore.cn/tutorials/application/en/r2.2/cv/vit.html#building-vit-as-a-whole>`_
2171
- - `Sentiment Classification Implemented by RNN - Dense
2172
- <https://mindspore.cn/tutorials/application/en/r2.2/nlp/sentiment_analysis.html#dense>`_
2173
- """
2174
- return cat(tensors, axis)
2175
-
2176
-
2177
1739
  def stack(tensors, axis=0):
2178
1740
  r"""
2179
1741
  Stacks a list of tensors in specified axis.
@@ -2284,45 +1846,6 @@ def unbind(input, dim=0):
2284
1846
  return _unstack(input)
2285
1847
 
2286
1848
 
2287
- def expand_dims(input_x, axis):
2288
- """
2289
- Adds an additional dimension to `input_x` at the given axis, the dimension
2290
- of `input_x` should be greater than or equal to 1.
2291
-
2292
- Note:
2293
- If the specified axis is a negative number, the index is counted
2294
- backward from the end and starts at 1.
2295
-
2296
- Args:
2297
- input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
2298
- axis (int): Specifies the dimension index at which to expand
2299
- the shape of `input_x`. The value of axis must be in the range
2300
- `[-input_x.ndim-1, input_x.ndim]`. Only constant value is allowed.
2301
-
2302
- Returns:
2303
- Tensor, the shape of tensor is :math:`(1, x_1, x_2, ..., x_R)` if the
2304
- value of `axis` is 0. It has the same data type as `input_x`.
2305
-
2306
- Raises:
2307
- TypeError: If `axis` is not an int.
2308
- ValueError: If `axis` is not in the valid range :math:`[-a.ndim-1, a.ndim]`.
2309
-
2310
- Supported Platforms:
2311
- ``Ascend`` ``GPU`` ``CPU``
2312
-
2313
- Examples:
2314
- >>> import mindspore
2315
- >>> import numpy as np
2316
- >>> from mindspore import Tensor, ops
2317
- >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
2318
- >>> output = ops.expand_dims(input_tensor, 0)
2319
- >>> print(output)
2320
- [[[2. 2.]
2321
- [2. 2.]]]
2322
- """
2323
- return expand_dims_(input_x, axis)
2324
-
2325
-
2326
1849
  def unsqueeze(input, dim):
2327
1850
  """
2328
1851
  Adds an additional dimension to `input` at the given dim.
@@ -2354,7 +1877,7 @@ def unsqueeze(input, dim):
2354
1877
  [[[2. 2.]
2355
1878
  [2. 2.]]]
2356
1879
  """
2357
- return expand_dims_(input, dim)
1880
+ return expand_dims(input, dim)
2358
1881
 
2359
1882
 
2360
1883
  def squeeze(input, axis=None):
@@ -2411,57 +1934,6 @@ def squeeze(input, axis=None):
2411
1934
  return squeeze_(input)
2412
1935
 
2413
1936
 
2414
- def transpose(input, input_perm):
2415
- """
2416
- Permutes the dimensions of the input tensor according to input permutation.
2417
-
2418
- For a 1-D array this has no effect, as a transposed vector is simply the same vector.
2419
- To convert a 1-D array into a 2D column vector please refer the class: mindspore.ops.ExpandDims.
2420
- For a 2-D array, this is a standard matrix transpose. For an n-D array, if axes are given,
2421
- their order indicates how the axes are permuted (see Examples).
2422
- If axes are not provided and a.shape is :math:`(i[0], i[1], ... i[n-2], i[n-1])`,
2423
- then a.transpose().shape is :math:`(i[n-1], i[n-2], ... i[1], i[0])`.
2424
-
2425
- Note:
2426
- On GPU and CPU, if the value of `input_perm` is negative, its actual value is `input_perm[i] + rank(input)`.
2427
- Negative value of `input_perm` is not supported on Ascend.
2428
-
2429
- Args:
2430
- input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
2431
- input_perm (tuple[int]): The permutation to be converted. The elements in `input_perm` are composed of
2432
- the indexes of each dimension of `input`. The length of `input_perm` and the shape of `input` must be
2433
- the same. Only constant value is allowed. Must be in the range [-rank(input), rank(input)).
2434
-
2435
- Returns:
2436
- Tensor, the type of output tensor is the same as `input` and the shape of output tensor is decided by the
2437
- shape of `input` and the value of `input_perm`.
2438
-
2439
- Raises:
2440
- TypeError: If `input_perm` is not a tuple.
2441
- ValueError: If length of shape of `input` is not equal to length of shape of `input_perm`.
2442
- ValueError: If the same element exists in `input_perm`.
2443
-
2444
- Supported Platforms:
2445
- ``Ascend`` ``GPU`` ``CPU``
2446
-
2447
- Examples:
2448
- >>> import mindspore
2449
- >>> import numpy as np
2450
- >>> from mindspore import Tensor, ops
2451
- >>> input = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32)
2452
- >>> input_perm = (0, 2, 1)
2453
- >>> output = ops.transpose(input, input_perm)
2454
- >>> print(output)
2455
- [[[ 1. 4.]
2456
- [ 2. 5.]
2457
- [ 3. 6.]]
2458
- [[ 7. 10.]
2459
- [ 8. 11.]
2460
- [ 9. 12.]]]
2461
- """
2462
- return transpose_(input, input_perm)
2463
-
2464
-
2465
1937
  def scatter_mul(input_x, indices, updates):
2466
1938
  r"""
2467
1939
  Using given values to update tensor value through the mul operation, along with the input indices.
@@ -2792,111 +2264,6 @@ def scatter_div(input_x, indices, updates):
2792
2264
  return scatter_div_(input_x, indices, updates)
2793
2265
 
2794
2266
 
2795
- def scatter_nd(indices, updates, shape):
2796
- r"""
2797
- Scatters a tensor into a new tensor depending on the specified indices.
2798
-
2799
- Creates an empty tensor with the given `shape`, and set values by scattering the update tensor
2800
- depending on indices. The empty tensor has rank :math:`P` and `indices` has rank :math:`Q`.
2801
-
2802
- The `shape` is :math:`(s_0, s_1, ..., s_{P-1})`, where :math:`P \ge 1`.
2803
-
2804
- `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)`, where :math:`Q \ge 2` and :math:`N \le P`.
2805
-
2806
- The last dimension of `indices` (with length :math:`N` ) indicates slices along the :math:`N` th dimension of the
2807
- empty tensor.
2808
-
2809
- `updates` is a tensor of rank :math:`Q-1+P-N`, and
2810
- its shape is :math:`(i_0, i_1, ..., i_{Q-2}, s_N, s_{N+1}, ..., s_{P-1})`.
2811
-
2812
- If `indices` contains duplicates, the duplicate `updates` are summed.
2813
-
2814
- The following figure shows the calculation process of inserting two new value matrices into the first dimension
2815
- with rank-3:
2816
-
2817
- .. image:: ScatterNd.png
2818
-
2819
- Args:
2820
- indices (Tensor): Define the index of scattering in the new tensor with int32 or int64 data type.
2821
- The rank of `indices` must be at least 2 and `indices.shape[-1] <= len(shape)`.
2822
- updates (Tensor): Define the source Tensor to be updated.
2823
- It has shape `indices.shape[:-1] + shape[indices.shape[-1]:]`.
2824
- shape (tuple[int]): Define the shape of the output tensor, has the same data type as indices.
2825
- `shape` can not be empty, and the elements in `shape` must be greater than or equal to 1.
2826
-
2827
- Returns:
2828
- Tensor, the new tensor, has the same type as `update` and the same shape as `shape`.
2829
-
2830
- Raises:
2831
- TypeError: If `shape` is not a tuple.
2832
- ValueError: If any element of `shape` is less than 1.
2833
-
2834
- Supported Platforms:
2835
- ``Ascend`` ``GPU`` ``CPU``
2836
-
2837
- Examples:
2838
- >>> import mindspore
2839
- >>> import numpy as np
2840
- >>> from mindspore import Tensor, ops
2841
- >>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
2842
- >>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2],
2843
- ... [3, 3, 3, 3], [4, 4, 4, 4]],
2844
- ... [[1, 1, 1, 1], [2, 2, 2, 2],
2845
- ... [3, 3, 3, 3], [4, 4, 4, 4]]]), mindspore.float32)
2846
- >>> shape = (4, 4, 4)
2847
- >>> output = ops.scatter_nd(indices, updates, shape)
2848
- >>> print(output)
2849
- [[[1. 1. 1. 1.]
2850
- [2. 2. 2. 2.]
2851
- [3. 3. 3. 3.]
2852
- [4. 4. 4. 4.]]
2853
- [[0. 0. 0. 0.]
2854
- [0. 0. 0. 0.]
2855
- [0. 0. 0. 0.]
2856
- [0. 0. 0. 0.]]
2857
- [[1. 1. 1. 1.]
2858
- [2. 2. 2. 2.]
2859
- [3. 3. 3. 3.]
2860
- [4. 4. 4. 4.]]
2861
- [[0. 0. 0. 0.]
2862
- [0. 0. 0. 0.]
2863
- [0. 0. 0. 0.]
2864
- [0. 0. 0. 0.]]]
2865
- >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)
2866
- >>> updates = Tensor(np.array([3.2, 1.1]), mindspore.float32)
2867
- >>> shape = (3, 3)
2868
- >>> output = ops.scatter_nd(indices, updates, shape)
2869
- >>> # In order to facilitate understanding, explain the operator pseudo-operation process step by step:
2870
- >>> # Step 1: Generate an empty Tensor of the specified shape according to the shape
2871
- >>> # [
2872
- >>> # [0. 0. 0.]
2873
- >>> # [0. 0. 0.]
2874
- >>> # [0. 0. 0.]
2875
- >>> # ]
2876
- >>> # Step 2: Modify the data at the specified location according to the indicators
2877
- >>> # 0th row of indices is [0, 1], 0th row of updates is 3.2.
2878
- >>> # means that the empty tensor in the 0th row and 1st col set to 3.2
2879
- >>> # [
2880
- >>> # [0. 3.2. 0.]
2881
- >>> # [0. 0. 0.]
2882
- >>> # [0. 0. 0.]
2883
- >>> # ]
2884
- >>> # 1th row of indices is [1, 1], 1th row of updates is 1.1.
2885
- >>> # means that the empty tensor in the 1th row and 1st col set to 1.1
2886
- >>> # [
2887
- >>> # [0. 3.2. 0.]
2888
- >>> # [0. 1.1 0.]
2889
- >>> # [0. 0. 0.]
2890
- >>> # ]
2891
- >>> # The final result is as follows:
2892
- >>> print(output)
2893
- [[0. 3.2 0.]
2894
- [0. 1.1 0.]
2895
- [0. 0. 0.]]
2896
- """
2897
- return scatter_nd_(indices, updates, shape)
2898
-
2899
-
2900
2267
  def scatter_update(input_x, indices, updates):
2901
2268
  r"""
2902
2269
  Updates tensor values by using input indices and value.
@@ -2946,8 +2313,7 @@ def scatter_update(input_x, indices, updates):
2946
2313
  [[2. 1.2 1.]
2947
2314
  [3. 1.2 1.]]
2948
2315
  """
2949
- scatter_update_inner = _get_cache_prim(P.ScatterUpdate)()
2950
- return scatter_update_inner(input_x, indices, updates)
2316
+ return scatter_update_(input_x, indices, updates)
2951
2317
 
2952
2318
 
2953
2319
  def scatter_nd_add(input_x, indices, updates, use_locking=False):
@@ -3414,8 +2780,8 @@ def sort(input_x, axis=-1, descending=False):
3414
2780
  are sorted in descending order, or else sorted in ascending order. Default: ``False`` .
3415
2781
 
3416
2782
  .. warning::
3417
- Currently, the data types of Float16, UInt8, Int8, Int16, Int32, Int64 are well supported.
3418
- If use Float32, it may cause loss of accuracy.
2783
+ Currently, the data types of float16, uint8, int8, int16, int32, int64 are well supported.
2784
+ If use float32, it may cause loss of accuracy.
3419
2785
 
3420
2786
  Returns:
3421
2787
 
@@ -3472,126 +2838,17 @@ def argsort(input, axis=-1, descending=False):
3472
2838
  Examples:
3473
2839
  >>> import mindspore
3474
2840
  >>> import numpy as np
3475
- >>> from mindspore import Tensor, ops
3476
- >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
3477
- >>> sort = ops.argsort(x)
3478
- >>> print(sort)
3479
- [[2 1 0]
3480
- [2 0 1]
3481
- [0 1 2]]
3482
- """
3483
- _sort = _get_cache_prim(P.Sort)(axis, descending)
3484
- _, arg_sort = _sort(input)
3485
- return arg_sort
3486
-
3487
-
3488
- def gather(input_params, input_indices, axis, batch_dims=0):
3489
- r"""
3490
- Returns the slice of the input tensor corresponding to the elements of `input_indices` on the specified `axis`.
3491
-
3492
- The following figure shows the calculation process of Gather commonly:
3493
-
3494
- .. image:: Gather.png
3495
-
3496
- where params represents the input `input_params`, and indices represents the index to be sliced `input_indices`.
3497
-
3498
- .. note::
3499
- 1. The value of input_indices must be in the range of `[0, input_param.shape[axis])`.
3500
- On CPU and GPU, an error is raised if an out of bound indice is found. On Ascend, the results may be
3501
- undefined.
3502
-
3503
- 2. The data type of input_params cannot be
3504
- `bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ on Ascend
3505
- platform currently.
3506
-
3507
- Args:
3508
- input_params (Tensor): The original Tensor. The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
3509
- input_indices (Tensor): Index tensor to be sliced, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
3510
- Specifies the indices of elements of the original Tensor. The data type can be int32 or int64.
3511
- axis (Union(int, Tensor[int])): Specifies the dimension index to gather indices.
3512
- It must be greater than or equal to `batch_dims`.
3513
- When `axis` is a Tensor, the size must be 1.
3514
- batch_dims (int): Specifies the number of batch dimensions. It must be less than or euqal to the rank
3515
- of `input_indices`. Default: ``0`` .
3516
-
3517
- Returns:
3518
- Tensor, the shape of tensor is
3519
- :math:`input\_params.shape[:axis] + input\_indices.shape[batch\_dims:] + input\_params.shape[axis + 1:]`.
3520
-
3521
- Raises:
3522
- TypeError: If `axis` is not an int or Tensor.
3523
- ValueError: If `axis` is a Tensor and its size is not 1.
3524
- TypeError: If `input_params` is not a tensor.
3525
- TypeError: If `input_indices` is not a tensor of type int.
3526
- RuntimeError: If `input_indices` is out of range `[0, input_param.shape[axis])` on CPU or GPU.
3527
-
3528
- Supported Platforms:
3529
- ``Ascend`` ``GPU`` ``CPU``
3530
-
3531
- Examples:
3532
- >>> import mindspore
3533
- >>> import numpy as np
3534
- >>> from mindspore import Tensor, ops
3535
- >>> # case1: input_indices is a Tensor with shape (5, ).
3536
- >>> input_params = Tensor(np.array([1, 2, 3, 4, 5, 6, 7]), mindspore.float32)
3537
- >>> input_indices = Tensor(np.array([0, 2, 4, 2, 6]), mindspore.int32)
3538
- >>> axis = 0
3539
- >>> output = ops.gather(input_params, input_indices, axis)
3540
- >>> print(output)
3541
- [1. 3. 5. 3. 7.]
3542
- >>> # case2: input_indices is a Tensor with shape (2, 2). When the input_params has one dimension,
3543
- >>> # the output shape is equal to the input_indices shape.
3544
- >>> input_indices = Tensor(np.array([[0, 2], [2, 6]]), mindspore.int32)
3545
- >>> axis = 0
3546
- >>> output = ops.gather(input_params, input_indices, axis)
3547
- >>> print(output)
3548
- [[1. 3.]
3549
- [3. 7.]]
3550
- >>> # case3: input_indices is a Tensor with shape (2, ) and
3551
- >>> # input_params is a Tensor with shape (3, 4) and axis is 0.
3552
- >>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
3553
- >>> input_indices = Tensor(np.array([0, 2]), mindspore.int32)
3554
- >>> axis = 0
3555
- >>> output = ops.gather(input_params, input_indices, axis)
3556
- >>> print(output)
3557
- [[ 1. 2. 3. 4.]
3558
- [ 9. 10. 11. 12.]]
3559
- >>> # case4: input_indices is a Tensor with shape (2, ) and
3560
- >>> # input_params is a Tensor with shape (3, 4) and axis is 1, batch_dims is 1.
3561
- >>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
3562
- >>> input_indices = Tensor(np.array([0, 2, 1]), mindspore.int32)
3563
- >>> axis = 1
3564
- >>> batch_dims = 1
3565
- >>> output = ops.gather(input_params, input_indices, axis, batch_dims)
3566
- >>> print(output)
3567
- [ 1. 7. 10.]
3568
- """
3569
- _gather = _get_cache_prim(P.Gather)(batch_dims)
3570
- return _gather(input_params, input_indices, axis)
3571
-
3572
-
3573
- def gather_d(x, dim, index):
3574
- """
3575
- Gathers elements along an axis specified by dim.
3576
-
3577
- Refer to :func:`mindspore.ops.gather_elements` for more detail.
3578
-
3579
- Supported Platforms:
3580
- ``Ascend`` ``GPU`` ``CPU``
3581
-
3582
- Examples:
3583
- >>> import mindspore
3584
- >>> import numpy as np
3585
- >>> from mindspore import Tensor, ops
3586
- >>> x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.int32)
3587
- >>> index = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32)
3588
- >>> dim = 1
3589
- >>> output = ops.gather_d(x, dim, index)
3590
- >>> print(output)
3591
- [[1 1]
3592
- [4 3]]
2841
+ >>> from mindspore import Tensor, ops
2842
+ >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
2843
+ >>> sort = ops.argsort(x)
2844
+ >>> print(sort)
2845
+ [[2 1 0]
2846
+ [2 0 1]
2847
+ [0 1 2]]
3593
2848
  """
3594
- return gather_d_(x, dim, index)
2849
+ _sort = _get_cache_prim(P.Sort)(axis, descending)
2850
+ _, arg_sort = _sort(input)
2851
+ return arg_sort
3595
2852
 
3596
2853
 
3597
2854
  def gather_elements(input, dim, index):
@@ -3608,26 +2865,29 @@ def gather_elements(input, dim, index):
3608
2865
 
3609
2866
  output[i][j][k] = x[i][j][index[i][j][k]] # if dim == 2
3610
2867
 
3611
- `input` and `index` have the same length of dimensions, and all dimensions except `dim` have the same size.
3612
- If `dim` = i, `input` is an n-D tensor with shape :math:`(z_0, z_1, ..., z_i, ..., z_{n-1})`,
3613
- the `index` must be an n-D tensor with shape :math:`(z_0, z_1, ..., y, ..., z_{n-1})`
3614
- where `y`>=1 and the output will have the same shape with `index`.
2868
+ `input` and `index` have the same length of dimensions, and `index.shape[axis] <= input.shape[axis]`
2869
+ where axis goes through all dimensions of `input` except `dim`.
2870
+
2871
+ .. warning::
2872
+ On Ascend, the behavior is unpredictable in the following cases:
2873
+
2874
+ - the value of `index` is not in the range `[-input.shape[dim], input.shape[dim])` in forward;
2875
+ - the value of `index` is not in the range `[0, input.shape[dim])` in backward.
3615
2876
 
3616
2877
  Args:
3617
2878
  input (Tensor): The input tensor.
3618
- dim (int): The axis along which to index. It must be int32 or int64. The value range is [-input.ndim,
3619
- input.ndim).
2879
+ dim (int): The axis along which to index. It must be int32 or int64. The value range is `[-input.ndim,
2880
+ input.ndim)`.
3620
2881
  index (Tensor): The indices of elements to gather. It can be one of the following data types:
3621
- int32, int64. The value range of each index element is [-input.shape(dim), input.shape(dim)).
2882
+ int32, int64. The value range of each index element is `[-input.shape(dim), input.shape(dim))`.
3622
2883
 
3623
2884
  Returns:
3624
- Tensor, has the same shape as index tensor, the shape of tensor is :math:`(z_0, z_1, ..., y, ..., z_{n-1})`,
3625
- and has the same data type with `input`.
2885
+ Tensor, has the same shape as `index` tensor and has the same data type with `input`.
3626
2886
 
3627
2887
  Raises:
3628
2888
  TypeError: If dtype of `dim` or `index` is neither int32 nor int64.
3629
2889
  ValueError: If length of shape of `input` is not equal to length of shape of `index`.
3630
- ValueError: If the size of the dimension except `dim` is not equal between `input` and `index`.
2890
+ ValueError: If the size of the dimension except `dim` in `input` is less than size in `index`.
3631
2891
  ValueError: If the value of `dim` is not in the expected range.
3632
2892
 
3633
2893
  Supported Platforms:
@@ -3648,48 +2908,6 @@ def gather_elements(input, dim, index):
3648
2908
  return gather_d_(input, dim, index)
3649
2909
 
3650
2910
 
3651
- def gather_nd(input_x, indices):
3652
- r"""
3653
- Gathers slices from a tensor by indices.
3654
-
3655
- Using given indices to gather slices from a tensor with a specified shape.
3656
-
3657
- `indices` is an K-dimensional integer tensor. Supposes it as a (K-1)-dimensional tensor and each element of it
3658
- defines a slice of `input_x`:
3659
-
3660
- .. math::
3661
- output[(i_0, ..., i_{K-2})] = input\_x[indices[(i_0, ..., i_{K-2})]]
3662
-
3663
- The last dimension of `indices` can not more than the rank of `input_x`:
3664
- :math:`indices.shape[-1] <= input\_x.rank`.
3665
-
3666
- Args:
3667
- input_x (Tensor): The target tensor to gather values.
3668
- indices (Tensor): The index tensor, with int32 or int64 data type.
3669
-
3670
- Returns:
3671
- Tensor, has the same type as `input_x` and the shape is
3672
- :math:`indices\_shape[:-1] + input\_x\_shape[indices\_shape[-1]:]`.
3673
-
3674
- Raises:
3675
- ValueError: If length of shape of `input_x` is less than the last dimension of `indices`.
3676
-
3677
- Supported Platforms:
3678
- ``Ascend`` ``GPU`` ``CPU``
3679
-
3680
- Examples:
3681
- >>> import mindspore
3682
- >>> import numpy as np
3683
- >>> from mindspore import Tensor, ops
3684
- >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
3685
- >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
3686
- >>> output = ops.gather_nd(input_x, indices)
3687
- >>> print(output)
3688
- [-0.1 0.5]
3689
- """
3690
- return gather_nd_(input_x, indices)
3691
-
3692
-
3693
2911
  def tensor_scatter_add(input_x, indices, updates):
3694
2912
  r"""
3695
2913
  Creates a new tensor by adding the values from the positions in `input_x` indicated by
@@ -3700,7 +2918,7 @@ def tensor_scatter_add(input_x, indices, updates):
3700
2918
 
3701
2919
  The last axis of `indices` is the depth of each index vectors. For each index vector,
3702
2920
  there must be a corresponding value in `updates`. The shape of `updates` should be
3703
- equal to the shape of `input_x[indices]`. For more details, see use cases.
2921
+ equal to the shape of `input_x[indices]`. For more details, see Examples.
3704
2922
 
3705
2923
  .. math::
3706
2924
  output\left [indices \right ] = input\_x + update
@@ -3758,7 +2976,7 @@ def tensor_scatter_sub(input_x, indices, updates):
3758
2976
 
3759
2977
  The last axis of `indices` is the depth of each index vectors. For each index vector,
3760
2978
  there must be a corresponding value in `updates`. The shape of `updates` should be
3761
- equal to the shape of `input_x[indices]`. For more details, see use cases.
2979
+ equal to the shape of `input_x[indices]`. For more details, see Examples.
3762
2980
 
3763
2981
  .. math::
3764
2982
  output[indices] = input\_x - update
@@ -3943,14 +3161,12 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
3943
3161
  nondeterministic.
3944
3162
  - On Ascend, the reduction only support set to "none" for now.
3945
3163
  - On Ascend, the data type of `input_x` must be float16 or float32.
3164
+ - This is an experimental API that is subject to change or deletion.
3946
3165
 
3947
3166
  Note:
3948
3167
  If some values of the `indices` exceed the upper or lower bounds of the index of `input_x`, instead of raising
3949
3168
  an index error, the corresponding `updates` will not be updated to `input_x`.
3950
3169
 
3951
- .. warning::
3952
- This is an experimental API that is subject to change or deletion.
3953
-
3954
3170
  Args:
3955
3171
  input_x (Tensor): The target tensor. The rank must be at least 1.
3956
3172
  indices (Tensor): The index of `input_x` to do scatter operation whose data type must be mindspore.int32 or
@@ -4074,7 +3290,7 @@ def _get_slice_scatter_const(x_shape, axis, start, end, step):
4074
3290
  start = start if start is not None else 0
4075
3291
  start = start if start >= 0 else start + x_rank
4076
3292
  end = end if end is not None else x_shape[axis]
4077
- end = end if end >= 0 else end + x_rank
3293
+ end = end if end >= 0 else end + x_shape[axis]
4078
3294
  end = end if end < x_shape[axis] else x_shape[axis]
4079
3295
  index = list(builtins.range(start, end, step))
4080
3296
  return x_rank, index, axis
@@ -4121,6 +3337,8 @@ def slice_scatter(input, src, axis=0, start=None, end=None, step=1):
4121
3337
  [1. 0. 1. 0. 1. 0.]
4122
3338
  [1. 0. 1. 0. 1. 0.]]
4123
3339
  """
3340
+ _check_is_tensor("input", input, "slice_scatter")
3341
+ _check_is_tensor("src", src, "slice_scatter")
4124
3342
  input_shape = input.shape
4125
3343
  input_rank, index, axis = _get_slice_scatter_const(input_shape, axis, start, end, step)
4126
3344
 
@@ -4136,6 +3354,8 @@ def slice_scatter(input, src, axis=0, start=None, end=None, step=1):
4136
3354
  for _ in builtins.range(input_rank - axis - 1):
4137
3355
  index_tensor = index_tensor.expand_dims(-1)
4138
3356
  index_tensor = index_tensor.broadcast_to(src.shape)
3357
+ if index_tensor.dtype not in mstype.int_type:
3358
+ index_tensor = index_tensor.astype(mstype.int64)
4139
3359
  return tensor_scatter_elements(input, axis=axis, indices=index_tensor, updates=src)
4140
3360
 
4141
3361
 
@@ -4174,10 +3394,12 @@ def select_scatter(input, src, axis, index):
4174
3394
  [1. 1. 1.]
4175
3395
  [0. 0. 0.]]]
4176
3396
  """
3397
+ _check_is_tensor("input", input, "select_scatter")
3398
+ _check_is_tensor("src", src, "select_scatter")
4177
3399
  src = src.expand_dims(axis=axis)
4178
3400
  x_rank = input.ndim
4179
3401
  axis = axis if axis >= 0 else axis + x_rank
4180
- index = index if index >= 0 else index + x_rank
3402
+ index = index if index >= 0 else index + input.shape[axis]
4181
3403
  return slice_scatter(input, src, axis, start=index, end=index + 1)
4182
3404
 
4183
3405
 
@@ -4303,49 +3525,11 @@ def batch_to_space_nd(input_x, block_shape, crops):
4303
3525
  [3. 4.]]]]
4304
3526
  """
4305
3527
  if isinstance(block_shape, Tensor):
4306
- _batch_to_space_ndv2 = _get_cache_prim(P.BatchToSpaceNDV2)()
4307
- return _batch_to_space_ndv2(input_x, block_shape, crops)
3528
+ return batch_to_space_nd_v2_(input_x, block_shape, crops)
4308
3529
  _batch_to_space_nd = _get_cache_prim(P.BatchToSpaceND)(block_shape, crops)
4309
3530
  return _batch_to_space_nd(input_x)
4310
3531
 
4311
3532
 
4312
- def nonzero(input):
4313
- """
4314
- Return a Tensor of the positions of all non-zero values.
4315
-
4316
- Args:
4317
- input (Tensor): The input Tensor, its rank should be greater than or eaqual to 1.
4318
-
4319
- Returns:
4320
- Tensor, a 2-D Tensor whose data type is int64, containing the positions of all non-zero values of the input.
4321
-
4322
- Raises:
4323
- TypeError: If `input` is not Tensor.
4324
- ValueError: If dim of `x` equals to 0.
4325
-
4326
- Supported Platforms:
4327
- ``Ascend`` ``GPU`` ``CPU``
4328
-
4329
- Examples:
4330
- >>> import mindspore
4331
- >>> import numpy as np
4332
- >>> from mindspore import Tensor
4333
- >>> import mindspore.ops as ops
4334
- >>> x = Tensor(np.array([[[1, 0], [-5, 0]]]), mindspore.int32)
4335
- >>> output = ops.nonzero(x)
4336
- >>> print(output)
4337
- [[0 0 0]
4338
- [0 1 0]]
4339
- >>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
4340
- >>> output = ops.nonzero(x)
4341
- >>> print(output)
4342
- [[0]
4343
- [2]
4344
- [4]]
4345
- """
4346
- return nonzero_(input)
4347
-
4348
-
4349
3533
  def matrix_diag(x, k=0, num_rows=-1, num_cols=-1, padding_value=0, align="RIGHT_LEFT"):
4350
3534
  r"""
4351
3535
  Returns a Tensor with the contents in `x` as k[0]-th to k[1]-th diagonals of a matrix, with everything else padded
@@ -4605,18 +3789,19 @@ def meshgrid(*inputs, indexing='xy'):
4605
3789
 
4606
3790
  Keyword Args:
4607
3791
  indexing (str, optional): Cartesian ('xy', default) or
4608
- matrix ('ij') indexing of output. Valid options: xy' or 'ij'. In the 2-D case with
3792
+ matrix ('ij') indexing of output. Valid options: xy' or ``'ij'``. In the 2-D case with
4609
3793
  inputs of length `M` and `N`, the outputs are of shape :math:`(N, M)`
4610
- for 'xy' indexing and :math:`(M, N)` for 'ij' indexing. In the 3-D
3794
+ for ``'xy'`` indexing and :math:`(M, N)` for ``'ij'`` indexing. In the 3-D
4611
3795
  case with inputs of length `M`, `N` and `P`, outputs are of shape
4612
- :math:`(N, M, P)` for 'xy' indexing and :math:`(M, N, P)` for 'ij' indexing. Default: ``'xy'`` .
3796
+ :math:`(N, M, P)` for ``'xy'`` indexing and :math:`(M, N, P)` for ``'ij'`` indexing.
3797
+ Default: ``'xy'`` .
4613
3798
 
4614
3799
  Returns:
4615
3800
  Tensors, a Tuple of N N-D Tensor objects. The data type is the same with the Inputs.
4616
3801
 
4617
3802
  Raises:
4618
3803
  TypeError: If `indexing` is not a str or `inputs` is not a tuple.
4619
- ValueError: If `indexing` is neither 'xy' nor 'ij'.
3804
+ ValueError: If `indexing` is neither ``'xy'`` nor ``'ij'``.
4620
3805
 
4621
3806
  Supported Platforms:
4622
3807
  ``Ascend`` ``GPU`` ``CPU``
@@ -4723,87 +3908,6 @@ def affine_grid(theta, size, align_corners=False):
4723
3908
  return affine_grid_op(theta, size)
4724
3909
 
4725
3910
 
4726
- def broadcast_to(input, shape): # pylint: disable=redefined-outer-name
4727
- """
4728
- Broadcasts input tensor to a given shape. The dim of input shape must be smaller
4729
- than or equal to that of target shape. Suppose input shape is :math:`(x_1, x_2, ..., x_m)`,
4730
- target shape is :math:`(*, y_1, y_2, ..., y_m)`, where :math:`*` means any additional dimension.
4731
- The broadcast rules are as follows:
4732
-
4733
- Compare the value of :math:`x_m` and :math:`y_m`, :math:`x_{m-1}` and :math:`y_{m-1}`, ...,
4734
- :math:`x_1` and :math:`y_1` consecutively and
4735
- decide whether these shapes are broadcastable and what the broadcast result is.
4736
-
4737
- If the value pairs at a specific dim are equal, then that value goes right into that dim of output shape.
4738
- With an input shape :math:`(2, 3)`, target shape :math:`(2, 3)` , the inferred output shape is :math:`(2, 3)`.
4739
-
4740
- If the value pairs are unequal, there are three cases:
4741
-
4742
- Case 1: If the value of the target shape in the dimension is -1, the value of the
4743
- output shape in the dimension is the value of the corresponding input shape in the dimension.
4744
- With an input shape :math:`(3, 3)`, target
4745
- shape :math:`(-1, 3)`, the output shape is :math:`(3, 3)`.
4746
-
4747
- Case 2: If the value of target shape in the dimension is not -1, but the corresponding
4748
- value in the input shape is 1, then the corresponding value of the output shape
4749
- is that of the target shape. With an input shape :math:`(1, 3)`, target
4750
- shape :math:`(8, 3)`, the output shape is :math:`(8, 3)`.
4751
-
4752
- Case 3: If the corresponding values of the two shapes do not satisfy the above cases,
4753
- it means that broadcasting from the input shape to the target shape is not supported.
4754
-
4755
- So far we got the last m dims of the outshape, now focus on the first :math:`*` dims, there are
4756
- two cases:
4757
-
4758
- If the first :math:`*` dims of output shape does not have -1 in it, then fill the input
4759
- shape with ones until their length are the same, and then refer to
4760
- Case 2 mentioned above to calculate the output shape. With target shape :math:`(3, 1, 4, 1, 5, 9)`,
4761
- input shape :math:`(1, 5, 9)`, the filled input shape will be :math:`(1, 1, 1, 1, 5, 9)` and thus the
4762
- output shape is :math:`(3, 1, 4, 1, 5, 9)`.
4763
-
4764
- If the first :math:`*` dims of output shape have -1 in it, it implies this -1 is corresponding to
4765
- a non-existing dim so they're not broadcastable. With target shape :math:`(3, -1, 4, 1, 5, 9)`,
4766
- input shape :math:`(1, 5, 9)`, instead of operating the dim-filling process first, it raises errors directly.
4767
-
4768
- Args:
4769
- input (Tensor): The input Tensor.
4770
- shape (tuple): The target shape to broadcast. Can be fully specified, or have -1 in one position
4771
- where it will be substituted by the input tensor's shape in that position, see example.
4772
-
4773
- Returns:
4774
- Tensor, with the given `shape` and the same data type as `input`.
4775
-
4776
- Raises:
4777
- TypeError: If `shape` is not a tuple.
4778
- ValueError: If the target and input shapes are incompatible, or if a - 1 in the target shape is in an invalid
4779
- location.
4780
-
4781
- Supported Platforms:
4782
- ``Ascend`` ``GPU`` ``CPU``
4783
-
4784
- Examples:
4785
- >>> import numpy as np
4786
- >>> from mindspore import Tensor, ops
4787
- >>> shape = (2, 3)
4788
- >>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
4789
- >>> output = ops.broadcast_to(x, shape)
4790
- >>> print(output)
4791
- [[1. 2. 3.]
4792
- [1. 2. 3.]]
4793
- >>> shape = (-1, 2)
4794
- >>> x = Tensor(np.array([[1], [2]]).astype(np.float32))
4795
- >>> output = ops.broadcast_to(x, shape)
4796
- >>> print(output)
4797
- [[1. 1.]
4798
- [2. 2.]]
4799
- """
4800
- if isinstance(shape, Tensor) or ops.is_sequence_value_unknown(shape):
4801
- _dyn_broadcast_to = _get_cache_prim(DynamicBroadcastTo)()
4802
- return _dyn_broadcast_to(input, shape)
4803
- _broadcast_to = _get_cache_prim(P.BroadcastTo)(shape)
4804
- return _broadcast_to(input)
4805
-
4806
-
4807
3911
  def unsorted_segment_min(x, segment_ids, num_segments):
4808
3912
  r"""
4809
3913
  Computes the minimum of a tensor along segments.
@@ -4827,14 +3931,13 @@ def unsorted_segment_min(x, segment_ids, num_segments):
4827
3931
  x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
4828
3932
  segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
4829
3933
  Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
4830
- num_segments (int): The value specifies the number of distinct `segment_ids`.
3934
+ num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
4831
3935
 
4832
3936
  Returns:
4833
- Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
3937
+ Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
4834
3938
 
4835
3939
  Raises:
4836
3940
  TypeError: If `num_segments` is not an int.
4837
- ValueError: If length of shape of `segment_ids` is not equal to 1.
4838
3941
 
4839
3942
  Supported Platforms:
4840
3943
  ``Ascend`` ``GPU`` ``CPU``
@@ -4851,7 +3954,6 @@ def unsorted_segment_min(x, segment_ids, num_segments):
4851
3954
  [[1. 2. 3.]
4852
3955
  [4. 2. 1.]]
4853
3956
  """
4854
- unsorted_segment_min_ = P.UnsortedSegmentMin()
4855
3957
  return unsorted_segment_min_(x, segment_ids, num_segments)
4856
3958
 
4857
3959
 
@@ -4878,14 +3980,13 @@ def unsorted_segment_max(x, segment_ids, num_segments):
4878
3980
  x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
4879
3981
  segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
4880
3982
  Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
4881
- num_segments (int): The value specifies the number of distinct `segment_ids`.
3983
+ num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
4882
3984
 
4883
3985
  Returns:
4884
- Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
3986
+ Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
4885
3987
 
4886
3988
  Raises:
4887
3989
  TypeError: If `num_segments` is not an int.
4888
- ValueError: If length of shape of `segment_ids` is not equal to 1.
4889
3990
 
4890
3991
  Supported Platforms:
4891
3992
  ``Ascend`` ``GPU`` ``CPU``
@@ -4902,7 +4003,6 @@ def unsorted_segment_max(x, segment_ids, num_segments):
4902
4003
  [[1. 2. 3.]
4903
4004
  [4. 5. 6.]]
4904
4005
  """
4905
- unsorted_segment_max_ = P.UnsortedSegmentMax()
4906
4006
  return unsorted_segment_max_(x, segment_ids, num_segments)
4907
4007
 
4908
4008
 
@@ -4920,16 +4020,15 @@ def unsorted_segment_prod(x, segment_ids, num_segments):
4920
4020
 
4921
4021
  Args:
4922
4022
  x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
4923
- segment_ids (Tensor): A `1-D` tensor whose shape is :math:`(x_1)`,
4924
- the value must be non-negative tensor. The data type must be int32.
4925
- num_segments (int): The value specifies the number of distinct `segment_ids`.
4023
+ segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
4024
+ Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R. The data type must be int32.
4025
+ num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
4926
4026
 
4927
4027
  Returns:
4928
- Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
4028
+ Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
4929
4029
 
4930
4030
  Raises:
4931
4031
  TypeError: If `num_segments` is not an int.
4932
- ValueError: If length of shape of `segment_ids` is not equal to 1.
4933
4032
 
4934
4033
  Supported Platforms:
4935
4034
  ``Ascend`` ``GPU`` ``CPU``
@@ -4946,7 +4045,6 @@ def unsorted_segment_prod(x, segment_ids, num_segments):
4946
4045
  [[4. 4. 3.]
4947
4046
  [4. 5. 6.]]
4948
4047
  """
4949
- unsorted_segment_prod_ = P.UnsortedSegmentProd()
4950
4048
  return unsorted_segment_prod_(x, segment_ids, num_segments)
4951
4049
 
4952
4050
 
@@ -5158,33 +4256,6 @@ def is_nonzero(input):
5158
4256
  return bool(out)
5159
4257
 
5160
4258
 
5161
- def scalar_cast(input_x, input_y):
5162
- """
5163
- Casts the input scalar to another type.
5164
-
5165
- Args:
5166
- input_x (scalar): The input scalar. Only constant value is allowed.
5167
- input_y (mindspore.dtype): The type to be cast. Only constant value is allowed.
5168
-
5169
- Returns:
5170
- Scalar. The type is the same as the python type corresponding to `input_y`.
5171
-
5172
- Raises:
5173
- TypeError: If neither `input_x` nor `input_y` is a constant value.
5174
-
5175
- Supported Platforms:
5176
- ``Ascend`` ``GPU`` ``CPU``
5177
-
5178
- Examples:
5179
- >>> import mindspore
5180
- >>> from mindspore import ops
5181
- >>> output = ops.scalar_cast(255.0, mindspore.int32)
5182
- >>> print(output)
5183
- 255
5184
- """
5185
- return scalar_cast_(input_x, input_y)
5186
-
5187
-
5188
4259
  def tensor_scatter_mul(input_x, indices, updates):
5189
4260
  r"""
5190
4261
  Creates a new tensor by multiplying the values from the positions in `input_x` indicated by
@@ -5194,10 +4265,10 @@ def tensor_scatter_mul(input_x, indices, updates):
5194
4265
 
5195
4266
  The last axis of `indices` is the depth of each index vectors. For each index vector,
5196
4267
  there must be a corresponding value in `updates`. The shape of `updates` should be
5197
- equal to the shape of `input_x[indices]`. For more details, see use cases.
4268
+ equal to the shape of `input_x[indices]`. For more details, see Examples.
5198
4269
 
5199
4270
  .. math::
5200
- output[indices] = input\_x \times update
4271
+ output\left [indices \right ] = input\_x\times update
5201
4272
 
5202
4273
  Note:
5203
4274
  - If some values of the `indices` are out of bound, instead of raising an index error,
@@ -5254,7 +4325,7 @@ def tensor_scatter_div(input_x, indices, updates):
5254
4325
 
5255
4326
  The last axis of `indices` is the depth of each index vectors. For each index vector,
5256
4327
  there must be a corresponding value in `updates`. The shape of `updates` should be
5257
- equal to the shape of `input_x[indices]`. For more details, see use cases.
4328
+ equal to the shape of `input_x[indices]`. For more details, see Examples.
5258
4329
 
5259
4330
  .. math::
5260
4331
  output\left [indices \right ] = input\_x \div update
@@ -5407,83 +4478,6 @@ def masked_select(input, mask):
5407
4478
  return masked_select_(input, mask)
5408
4479
 
5409
4480
 
5410
- def masked_fill(input_x, mask, value):
5411
- """
5412
- Fills elements of Tensor with value where mask is True.
5413
- The shapes of `input_x` and `mask` need to be the same or broadcastable.
5414
-
5415
- Args:
5416
- input_x (Tensor): The source Tensor whose data type is one of bool, uint8, int8, int16, int32,
5417
- int64, float16, float32, float64, complex64, complex128.
5418
- mask (Tensor[bool]): The boolean mask.
5419
- value (Union[float, Tensor]): The value to fill in with, which dtype is the same as `input_x`.
5420
-
5421
- Returns:
5422
- Tensor, has the same type and shape as `input_x`.
5423
-
5424
- Raises:
5425
- TypeError: If dtype of `mask` is not bool.
5426
- TypeError: If `input_x` or `mask` is not a Tensor.
5427
- ValueError: If the shapes of `input_x` and `mask` could not be broadcast.
5428
- TypeError: If dtype of `input_x` or `value` is not one of bool, uint8, int8, int16, int32,
5429
- int64, float16, float32, float64, complex64, complex128.
5430
- TypeError: If dtype of `value` is different from that of `input_x`.
5431
- TypeError: If `value` is neither float number nor Tensor.
5432
-
5433
- Supported Platforms:
5434
- ``Ascend`` ``GPU`` ``CPU``
5435
-
5436
- Examples:
5437
- >>> import mindspore
5438
- >>> import numpy as np
5439
- >>> from mindspore import Tensor, ops
5440
- >>> input_x = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
5441
- >>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)
5442
- >>> output = ops.masked_fill(input_x, mask, 0.5)
5443
- >>> print(output)
5444
- [0.5 0.5 3. 0.5]
5445
- """
5446
- if isinstance(value, (float, int)) and isinstance(input_x, Tensor):
5447
- value = scalar_to_tensor_(value, input_x.dtype)
5448
- masked_fill_ = _get_cache_prim(P.MaskedFill)()
5449
- return masked_fill_(input_x, mask, value)
5450
-
5451
-
5452
- def diag(input):
5453
- r"""
5454
- Constructs a diagonal tensor with a given diagonal values.
5455
-
5456
- Assume `input` has dimensions :math:`(D_1,... D_k)` , the output is a tensor of
5457
- rank 2k with dimensions :math:`(D_1,..., D_k, D_1,..., D_k)` where:
5458
- :math:`output[i_1,..., i_k, i_1,..., i_k] = input[i_1,..., i_k]` and 0 everywhere else.
5459
-
5460
- Args:
5461
- input (Tensor): The input tensor.
5462
-
5463
- Returns:
5464
- Tensor, has the same dtype as the `input`.
5465
-
5466
- Raises:
5467
- TypeError: If `input` is not a Tensor.
5468
- ValueError: If rank of `input` is less than 1.
5469
-
5470
- Supported Platforms:
5471
- ``Ascend`` ``GPU`` ``CPU``
5472
-
5473
- Examples:
5474
- >>> from mindspore import Tensor
5475
- >>> import mindspore.ops as ops
5476
- >>> input_x = Tensor([1, 2, 3, 4]).astype('int32')
5477
- >>> output = ops.diag(input_x)
5478
- >>> print(output)
5479
- [[1 0 0 0]
5480
- [0 2 0 0]
5481
- [0 0 3 0]
5482
- [0 0 0 4]]
5483
- """
5484
- return diag_(input)
5485
-
5486
-
5487
4481
  def diagflat(input, offset=0):
5488
4482
  r"""
5489
4483
  Create a 2-D Tensor which diagonal is the flattened `input` .
@@ -5542,7 +4536,7 @@ def col2im(input_x, output_size, kernel_size, dilation, padding_value, stride):
5542
4536
  Combines an array of sliding local blocks into a large containing tensor.
5543
4537
 
5544
4538
  Args:
5545
- input_x (Tensor): 4D tensor with data type float16 or float.
4539
+ input_x (Tensor): 4D tensor with data type float16 or float32.
5546
4540
  output_size (Tensor): 1D tensor with 2 elements of data type int.
5547
4541
  kernel_size (Union[int, tuple[int], list[int]]): The size of the kernel, should be two int
5548
4542
  for height and width. If type is int, it means that height equal with width. Must be specified.
@@ -5598,7 +4592,7 @@ def _split_int(x, split_size_or_sections, axis):
5598
4592
  num_sections = length_along_dim // split_size_or_sections
5599
4593
  length1 = num_sections * split_size_or_sections
5600
4594
  length2 = length_along_dim - length1
5601
- start1 = _list_comprehensions(rank(x), 0, True)
4595
+ start1 = _list_comprehensions(rank_(x), 0, True)
5602
4596
  size1 = _tuple_setitem(arr_shape, axis, length1)
5603
4597
  start2 = _tuple_setitem(start1, axis, length1)
5604
4598
  size2 = _tuple_setitem(arr_shape, axis, length2)
@@ -5650,9 +4644,9 @@ def split(tensor, split_size_or_sections, axis=0):
5650
4644
  TypeError: If argument `tensor` is not Tensor.
5651
4645
  TypeError: If argument `axis` is not Tensor.
5652
4646
  ValueError: If argument `axis` is out of range of :math:`[-tensor.ndim, tensor.ndim)` .
5653
- TypeError: If each element in 'split_size_or_sections' is not integer.
5654
- TypeError: If argument `indices_or_sections` is not int, tuple(int) or list(int).
5655
- ValueError: The sum of 'split_size_or_sections' is not equal to x.shape[axis].
4647
+ TypeError: If each element in `split_size_or_sections` is not integer.
4648
+ TypeError: If argument `split_size_or_sections` is not int, tuple(int) or list(int).
4649
+ ValueError: The sum of `split_size_or_sections` is not equal to x.shape[axis].
5656
4650
 
5657
4651
  Supported Platforms:
5658
4652
  ``Ascend`` ``GPU`` ``CPU``
@@ -5918,24 +4912,24 @@ def _tensor_split_sub_int(x, indices_or_sections, axis):
5918
4912
  arr_shape = x.shape
5919
4913
  length_along_dim = arr_shape[axis]
5920
4914
  if indices_or_sections > length_along_dim:
5921
- res = P.Split(axis, length_along_dim)(x)
4915
+ res = _get_cache_prim(P.Split)(axis, length_along_dim)(x)
5922
4916
  indices_or_sections_n = [length_along_dim, length_along_dim + 1]
5923
4917
  res2 = _tensor_split_sub_tensors(x, indices_or_sections_n, axis)
5924
4918
  for _ in np.arange(length_along_dim, indices_or_sections):
5925
4919
  res += tuple(res2)[1:]
5926
4920
  elif length_along_dim % indices_or_sections == 0:
5927
- res = P.Split(axis, indices_or_sections)(x)
4921
+ res = _get_cache_prim(P.Split)(axis, indices_or_sections)(x)
5928
4922
  else:
5929
4923
  num_long_tensor = length_along_dim % indices_or_sections
5930
4924
  num_short_tensor = indices_or_sections - num_long_tensor
5931
4925
  length1 = num_long_tensor * (length_along_dim // indices_or_sections + 1)
5932
4926
  length2 = length_along_dim - length1
5933
- start1 = _list_comprehensions(rank(x), 0, True)
4927
+ start1 = _list_comprehensions(rank_(x), 0, True)
5934
4928
  size1 = _tuple_setitem(arr_shape, axis, length1)
5935
4929
  start2 = _tuple_setitem(start1, axis, length1)
5936
4930
  size2 = _tuple_setitem(arr_shape, axis, length2)
5937
- res = P.Split(axis, num_long_tensor)(tensor_slice(x, start1, size1)) + \
5938
- P.Split(axis, num_short_tensor)(tensor_slice(x, start2, size2))
4931
+ res = _get_cache_prim(P.Split)(axis, num_long_tensor)(tensor_slice(x, start1, size1)) + \
4932
+ _get_cache_prim(P.Split)(axis, num_short_tensor)(tensor_slice(x, start2, size2))
5939
4933
  return res
5940
4934
 
5941
4935
 
@@ -5949,11 +4943,11 @@ def tensor_split(input, indices_or_sections, axis=0):
5949
4943
 
5950
4944
  - If `indices_or_sections` is an integer n, input tensor will be split into n sections.
5951
4945
 
5952
- - If :math:`input.shape(axis)` can be divisible by n, sub-sections will have equal size
5953
- :math:`input.shape(axis) / n` .
5954
- - If :math:`input.shape(axis)` is not divisible by n, the first :math:`input.shape(axis) % n` sections
5955
- will have size :math:`input.shape(axis) // n + 1` , and the rest will have
5956
- size :math:`input.shape(axis) // n` .
4946
+ - If :math:`input.shape[axis]` can be divisible by n, sub-sections will have equal size
4947
+ :math:`input.shape[axis] / n` .
4948
+ - If :math:`input.shape[axis]` is not divisible by n, the first :math:`input.shape[axis] \bmod n` sections
4949
+ will have size :math:`input.shape[axis] // n + 1` , and the rest will have
4950
+ size :math:`input.shape[axis] // n` .
5957
4951
  - If `indices_or_sections` is of type tuple(int) or list(int), the input tensor will be split at the
5958
4952
  indices in the list or tuple. For example, given parameters :math:`indices\_or\_sections=[1, 4]`
5959
4953
  and :math:`axis=0` , the input tensor will be split into sections :math:`input[:1]` ,
@@ -6166,7 +5160,7 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
6166
5160
  tensor.
6167
5161
 
6168
5162
  - values (Tensor) - The maximum value of input tensor, with the same shape as index, and same dtype as x.
6169
- - index (Tensor) - The index for the maximum value of the input tensor, with dtype int32. If `keepdims`
5163
+ - index (Tensor) - The index for the maximum value of the input tensor, with dtype int64. If `keepdims`
6170
5164
  is true, the shape of output tensors is :math:`(input_1, input_2, ..., input_{axis-1}, 1, input_{axis+1},
6171
5165
  ..., input_N)` . Otherwise, the shape is :math:`(input_1, input_2, ..., input_{axis-1}, input_{axis+1},
6172
5166
  ..., input_N)` .
@@ -6195,16 +5189,15 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
6195
5189
  [[3.2 0.4 0.4 2.9 4. ]] [[1 1 0 1 1]]
6196
5190
  """
6197
5191
  if not input.shape:
6198
- return (input, Tensor(0, dtype=mstype.int32))
5192
+ return (input, Tensor(0, dtype=mstype.int64))
6199
5193
  if axis is None:
6200
- reduce_max_op = _get_cache_prim(P.ReduceMax)()
6201
- return (reduce_max_op(input), Tensor(0, dtype=mstype.int32))
5194
+ return (reduce_max_(input), Tensor(0, dtype=mstype.int64))
6202
5195
  if initial is not None and not isinstance(initial, numbers.Number):
6203
5196
  raise TypeError(f"For 'max', 'initial' must be a scalar, but got {type(initial)}")
6204
5197
  if axis is not None and not isinstance(axis, int):
6205
5198
  raise TypeError(f"For 'max', 'axis' must be int, but got {type(axis)}")
6206
5199
  input = _init_and_select_elem(input, initial, where, ops.maximum)
6207
- argmax_with_value_op = ArgMaxWithValue(axis, keepdims)
5200
+ argmax_with_value_op = _get_cache_prim(ArgMaxWithValue)(axis, keepdims)
6208
5201
  indices, values = argmax_with_value_op(input)
6209
5202
  return values, indices
6210
5203
 
@@ -6250,7 +5243,7 @@ def argmax(input, dim=None, keepdim=False):
6250
5243
  is_dim_none = True
6251
5244
  out = _get_cache_prim(Argmax)(dim, mstype.int64)(input)
6252
5245
  if keepdim and not is_dim_none:
6253
- out = expand_dims_(out, dim)
5246
+ out = expand_dims(out, dim)
6254
5247
  return out
6255
5248
 
6256
5249
 
@@ -6312,16 +5305,16 @@ def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
6312
5305
  0.0 0
6313
5306
  """
6314
5307
  if not input.shape:
6315
- return (input, Tensor(0, dtype=mstype.int32))
5308
+ return (input, Tensor(0, dtype=mstype.int64))
6316
5309
  if axis is None:
6317
- return (reduce_min(input), Tensor(0, dtype=mstype.int32))
5310
+ return (reduce_min_(input), Tensor(0, dtype=mstype.int64))
6318
5311
  if initial is not None and not isinstance(initial, numbers.Number):
6319
5312
  raise TypeError(f"For 'min', 'initial' must be a scalar, but got {type(initial)}")
6320
5313
  if axis is not None and not isinstance(axis, int):
6321
5314
  raise TypeError(f"For 'min', 'axis' must be int, but got {type(axis)}")
6322
5315
  input = _init_and_select_elem(input, initial, where, ops.minimum)
6323
- argmin_with_value_ = ArgMinWithValue(axis=axis, keep_dims=keepdims)
6324
- indices, values = argmin_with_value_(input)
5316
+ argmin_with_value_op = _get_cache_prim(ArgMinWithValue)(axis, keepdims)
5317
+ indices, values = argmin_with_value_op(input)
6325
5318
  return values, indices
6326
5319
 
6327
5320
 
@@ -6379,8 +5372,8 @@ def aminmax(input, *, axis=0, keepdims=False):
6379
5372
  output0 = ops.reshape(output0, [1] * input.ndim)
6380
5373
  output1 = ops.reshape(output1, [1] * input.ndim)
6381
5374
  return output0, output1
6382
- argmin_with_value_op = P.ArgMinWithValue(axis, keepdims)
6383
- argmax_with_value_op = P.ArgMaxWithValue(axis, keepdims)
5375
+ argmin_with_value_op = _get_cache_prim(ArgMinWithValue)(axis, keepdims)
5376
+ argmax_with_value_op = _get_cache_prim(ArgMaxWithValue)(axis, keepdims)
6384
5377
  _, output0 = argmin_with_value_op(input)
6385
5378
  _, output1 = argmax_with_value_op(input)
6386
5379
  if keepdims is True and input.ndim == 0:
@@ -6435,66 +5428,7 @@ def narrow(input, axis, start, length):
6435
5428
  begins[axis] = start
6436
5429
  sizes = list(input.shape)
6437
5430
  sizes[axis] = length
6438
- return P.Slice()(input, begins, sizes)
6439
-
6440
-
6441
- def unsorted_segment_sum(input_x, segment_ids, num_segments):
6442
- r"""
6443
- Computes the sum of a tensor along segments.
6444
-
6445
- Calculates a tensor such that :math:`\text{output}[i] = \sum_{segment\_ids[j] == i} \text{data}[j, \ldots]`, where
6446
- :math:`j,...` is a tuple describing the index of element in data.
6447
- `segment_ids` selects which elements in data to sum
6448
- up. Segment_ids does not need to be sorted, and it does not need to cover all values in the entire valid value
6449
- range.
6450
-
6451
- The following figure shows the calculation process of unsorted_segment_sum:
6452
-
6453
- .. image:: UnsortedSegmentSum.png
6454
-
6455
- Note:
6456
- - If the segment_id i is absent in the segment_ids, then output[i] will be filled with 0.
6457
- - On Ascend, if the value of segment_id is less than 0 or greater than the length of the input data shape, an
6458
- execution error will occur.
6459
-
6460
- If the sum of the given segment_ids :math:`i` is empty, then :math:`\text{output}[i] = 0`. If the given segment_ids
6461
- is negative, the value will be ignored. 'num_segments' must be equal to the number of different segment_ids.
6462
-
6463
- Args:
6464
- input_x (Tensor): Input Tensor contains the data to be summed.
6465
- The shape is :math:`(x_1, x_2, ..., x_R)`.
6466
- segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
6467
- Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
6468
- num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
6469
-
6470
- Returns:
6471
- Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
6472
-
6473
- Raises:
6474
- TypeError: If `num_segments` is not an int or 0-D Tensor.
6475
- ValueError: If length of shape of `segment_ids` is less than 1.
6476
-
6477
- Supported Platforms:
6478
- ``Ascend`` ``GPU`` ``CPU``
6479
-
6480
- Examples:
6481
- >>> from mindspore import Tensor
6482
- >>> from mindspore import ops
6483
- >>> import mindspore
6484
- >>> input_x = Tensor([1, 2, 3, 4], mindspore.float32)
6485
- >>> segment_ids = Tensor([0, 0, 1, 2], mindspore.int32)
6486
- >>> num_segments = 4
6487
- >>> output = ops.unsorted_segment_sum(input_x, segment_ids, num_segments)
6488
- >>> print(output)
6489
- [3. 3. 4. 0.]
6490
- >>> input_x = Tensor([1, 2, 3, 4, 2, 5], mindspore.float32)
6491
- >>> segment_ids = Tensor([0, 0, 1, 2, 3, 4], mindspore.int32)
6492
- >>> num_segments = 6
6493
- >>> output = ops.unsorted_segment_sum(input_x, segment_ids, num_segments)
6494
- >>> print(output)
6495
- [3. 3. 4. 2. 5. 0.]
6496
- """
6497
- return unsorted_segment_sum_(input_x, segment_ids, num_segments)
5431
+ return tensor_slice(input, begins, sizes)
6498
5432
 
6499
5433
 
6500
5434
  def topk(input, k, dim=None, largest=True, sorted=True):
@@ -6521,7 +5455,7 @@ def topk(input, k, dim=None, largest=True, sorted=True):
6521
5455
 
6522
5456
  Args:
6523
5457
  input (Tensor): Input to be computed, data type must be float16, float32 or int32.
6524
- k (int): The number of top or bottom elements to be computed along the last dimension.
5458
+ k (int): The number of top or bottom elements to be computed along the last dimension, constant input is needed.
6525
5459
  dim (int, optional): The dimension to sort along. Default: ``None`` .
6526
5460
  largest (bool, optional): If largest is ``False`` then the k smallest elements are returned.
6527
5461
  Default: ``True`` .
@@ -6728,9 +5662,7 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
6728
5662
  .. warning::
6729
5663
  - The output is a 3-dimensional Tensor whose shape is
6730
5664
  :math:`(N, C \times \prod(\text{kernel_size}), L)` .
6731
-
6732
- .. warning::
6733
- This is an experimental API that is subject to change or deletion.
5665
+ - This is an experimental API that is subject to change or deletion.
6734
5666
 
6735
5667
  Args:
6736
5668
  input (Tensor): 4-D Tensor, supported dtypes: float16, float32, float64, complex64 and complex128.
@@ -6739,10 +5671,11 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
6739
5671
  dilation (Union[int, tuple[int], list[int]], optional): The dilation of the window, should be two int
6740
5672
  for height and width. If type is int, it means that height equal with width. Default: ``1`` .
6741
5673
  padding (Union[int, tuple[int], list[int]], optional): The pad of the window, that must be
6742
- a tuple/list of one or two `int` for height and width.
6743
- If one int, pad_height = pad_width.
6744
- If two int, pad_height = padding[0], pad_width = padding[1].
6745
- Default: ``0`` .
5674
+ a tuple/list of one or two `int` for height and width. Default: ``0`` .
5675
+
5676
+ - If one int, pad_height = pad_width.
5677
+ - If two int, pad_height = padding[0], pad_width = padding[1].
5678
+
6746
5679
  stride (Union[int, tuple[int], list[int]], optional): The stride of the window, should be two int
6747
5680
  for height and width. If type is int, it means that height equal with width. Default: ``1`` .
6748
5681
 
@@ -6789,98 +5722,6 @@ def _check_diagonal_axes(dim1, dim2, x_ndim):
6789
5722
  return axes
6790
5723
 
6791
5724
 
6792
- def diagonal(input, offset=0, dim1=0, dim2=1):
6793
- """
6794
- Returns specified diagonals of `input`.
6795
-
6796
- If `input` is 2-D, returns the diagonal of `input` with the given offset.
6797
- If `input` has more than two
6798
- dimensions, then the axes specified by `dim1` and `dim2` are used to determine
6799
- the 2-D sub-array whose diagonal is returned. In this case, remove the `dim1` and `dim2` dimensions of `input`
6800
- and insert the last dimension of `input` by the diagonal elements determined by `dim1` and `dim2`.
6801
-
6802
- Args:
6803
- input (Tensor): Array from which the diagonals are taken.
6804
- offset (int, optional): Offset of the diagonal from the main diagonal.
6805
- Can be positive or negative. Default: ``0`` .
6806
- dim1 (int, optional): Axis to be used as the first axis of the 2-D
6807
- sub-arrays from which the diagonals should be taken. Defaults to
6808
- first axis (0). Default: ``0`` .
6809
- dim2 (int, optional): Axis to be used as the second axis of the 2-D
6810
- sub-arrays from which the diagonals should be taken. Defaults to
6811
- second axis (1). Default: ``1`` .
6812
-
6813
- Returns:
6814
- Tensor, if `input` is 2-D, then `input` 1-D array containing the diagonal. If
6815
- ``input.ndim > 2``, then the dimensions specified by `dim1` and `dim2` are removed,
6816
- and a new axis inserted at the end corresponding to the diagonal.
6817
-
6818
- Raises:
6819
- TypeError: if `dim1` or `dim2` are not an int.
6820
- ValueError: if the input tensor has less than two dimensions.
6821
-
6822
- Supported Platforms:
6823
- ``Ascend`` ``GPU`` ``CPU``
6824
-
6825
- Examples:
6826
- >>> from mindspore import Tensor, ops
6827
- >>> from mindspore import dtype as mstype
6828
- >>> x = Tensor([[0, 1], [2, 3]], mstype.float32)
6829
- >>> output = ops.diagonal(x)
6830
- >>> print(output)
6831
- [0 3]
6832
- """
6833
- x_ndim = input.ndim
6834
- if x_ndim < 2:
6835
- raise ValueError(f"For 'ops.diagonal', the original tensor requires at least two dimensions, but got {x_ndim}")
6836
- _check_attr_dtype("dim1", dim1, [int], "diagonal")
6837
- _check_attr_dtype("dim2", dim2, [int], "diagonal")
6838
- dtype = input.dtype
6839
-
6840
- axes = _check_diagonal_axes(dim1, dim2, x_ndim)
6841
- perm = ()
6842
- for i in ms_arrange(x_ndim):
6843
- if i not in axes:
6844
- perm += (i,)
6845
- perm += axes
6846
- input = input.transpose(perm)
6847
-
6848
- x_shape = input.shape
6849
- n, m = x_shape[-2:]
6850
-
6851
- e = ops.eye(n, m, dtype)
6852
- if offset >= m or offset <= -n:
6853
- zero_shape = x_shape[:-2] + (0,)
6854
- return ops.zeros(zero_shape, dtype)
6855
- if offset != 0:
6856
- e = e.astype(mstype.float32)
6857
- if offset > 0:
6858
- e_left = ops.fill(mstype.float32, (n, offset), 0)
6859
- e_right = e[..., 0:m - offset:1]
6860
- e = ops.cat((e_left, e_right), 1).astype(dtype)
6861
- elif offset < 0:
6862
- e_upper = ops.fill(mstype.float32, (-offset, m), 0)
6863
- e_lower = e[0:n + offset:1, ...]
6864
- e = ops.cat((e_upper, e_lower), 0).astype(dtype)
6865
- e = ops.broadcast_to(e, x_shape)
6866
-
6867
- prod_val = ops.mul(input, e)
6868
- res = ops.ReduceSum()(prod_val.astype(mstype.float32), -1)
6869
-
6870
- begin = ()
6871
- for _ in ms_arrange(x_ndim - 2):
6872
- begin += (0,)
6873
- last_dim_begin = builtins.max(0, -offset)
6874
- begin += (last_dim_begin,)
6875
- res_size = res.shape[:-1]
6876
- last_dim_end = builtins.min(x_shape[-2], builtins.max(0, x_shape[-1] - offset)) - last_dim_begin
6877
- if last_dim_end <= 0:
6878
- return Tensor([])
6879
- res_size += (last_dim_end,)
6880
- res = ops.slice(res, begin, res_size)
6881
- return res.astype(dtype)
6882
-
6883
-
6884
5725
  def _check_is_tensor(param_name, input, cls_name):
6885
5726
  """Returns True if input is Tensor."""
6886
5727
  if not isinstance(input, Tensor):
@@ -6900,6 +5741,9 @@ def diagonal_scatter(input, src, offset=0, dim1=0, dim2=1):
6900
5741
  the elements in these two dimensions will be treated as elements of a matrix,
6901
5742
  and `src` is embedded on the diagonal of the matrix.
6902
5743
 
5744
+ Note:
5745
+ Currently, ``inf`` value of elements in `input` or `src` is not supported.
5746
+
6903
5747
  Args:
6904
5748
  input (Tensor): Input Tensor, whose dimension is larger than 1.
6905
5749
  src (Tensor): The source Tensor to embed.
@@ -6936,16 +5780,39 @@ def diagonal_scatter(input, src, offset=0, dim1=0, dim2=1):
6936
5780
  """
6937
5781
  _check_is_tensor("input", input, "diagonal_scatter")
6938
5782
  _check_is_tensor("src", src, "diagonal_scatter")
6939
- _check_is_int(offset, "offset", "diagonal_scatter")
6940
- _check_is_int(dim1, "dim1", "diagonal_scatter")
6941
- _check_is_int(dim2, "dim2", "diagonal_scatter")
6942
5783
  input_diag = input.diagonal(offset, dim1, dim2)
6943
5784
  _check_diagonal_scatter_shape(input_diag.shape, src.shape)
6944
- embed = ones_like(src)
6945
- embed = ops.diag_embed(embed, offset, dim1, dim2)
6946
- embed = input * embed
5785
+ input_shape = input.shape
5786
+ zeros_shape = list(input_shape)
5787
+ m, n = input_shape[dim1], input_shape[dim2]
5788
+ if m == n:
5789
+ src = src - input_diag
5790
+ src = ops.diag_embed(src, offset, dim1, dim2)
5791
+ return input + src
5792
+ if m > n:
5793
+ axis = dim2
5794
+ zeros_shape[axis] = m - n
5795
+ else:
5796
+ axis = dim1
5797
+ zeros_shape[axis] = n - m
5798
+ zeros_tensor = zeros(zeros_shape, dtype=input.dtype)
5799
+ input = concat((input, zeros_tensor), axis)
5800
+ input_diag = input.diagonal(offset, dim1, dim2)
5801
+ if src.shape != input_diag.shape:
5802
+ zeros_shape = []
5803
+ for i, ax in enumerate(src.shape):
5804
+ if ax == input_diag.shape[i]:
5805
+ zeros_shape.append(ax)
5806
+ else:
5807
+ axis = i
5808
+ zeros_shape.append(input_diag.shape[i] - ax)
5809
+ zeros_tensor = zeros(zeros_shape, dtype=src.dtype)
5810
+ src = concat((src, zeros_tensor), axis)
5811
+ src = src - input_diag
6947
5812
  src = ops.diag_embed(src, offset, dim1, dim2)
6948
- return input + src - embed
5813
+ input = input + src
5814
+ begin = (0,) * input.ndim
5815
+ return slice(input, begin, input_shape)
6949
5816
 
6950
5817
 
6951
5818
  def lstsq(input, A):
@@ -7004,8 +5871,7 @@ def lstsq(input, A):
7004
5871
  [-6.5000005 -4.500001 ]
7005
5872
  [-3.500002 -2.5000017]]
7006
5873
  """
7007
- lstsq_op = _get_cache_prim(Lstsq)()
7008
- return lstsq_op(input, A)
5874
+ return lstsq_(input, A)
7009
5875
 
7010
5876
 
7011
5877
  def mvlgamma(input, p):
@@ -7080,7 +5946,7 @@ def argwhere(input):
7080
5946
  [[0 0 0]
7081
5947
  [0 1 0]]
7082
5948
  """
7083
- return nonzero_(input)
5949
+ return nonzero(input)
7084
5950
 
7085
5951
 
7086
5952
  def column_stack(tensors):
@@ -7117,14 +5983,13 @@ def column_stack(tensors):
7117
5983
  raise TypeError(f"For column_stack, the input must be list or tuple of tensors, but got {type(tensors)}.")
7118
5984
 
7119
5985
  trans_x = ()
7120
- _expand_dims = _get_cache_prim(P.ExpandDims)()
7121
5986
  for tensor in tensors:
7122
5987
  if not isinstance(tensor, Tensor):
7123
5988
  raise TypeError(f"For column_stack, the input element must be tensor, but got {type(tensor)}.")
7124
5989
  if tensor.ndim < 1:
7125
- tensor = _expand_dims(tensor, 0)
5990
+ tensor = expand_dims(tensor, 0)
7126
5991
  if tensor.ndim == 1:
7127
- tensor = _expand_dims(tensor, 1)
5992
+ tensor = expand_dims(tensor, 1)
7128
5993
  trans_x += (tensor,)
7129
5994
  if not trans_x:
7130
5995
  raise ValueError(f"For column_stack, the input must have at least 1 tensor, but got 0.")
@@ -7170,7 +6035,7 @@ def hstack(tensors):
7170
6035
  if not isinstance(tensor, Tensor):
7171
6036
  raise TypeError(f"For hstack, the input element must be tensor, but got {type(tensor)}.")
7172
6037
  if tensor.ndim < 1:
7173
- tensor = expand_dims_(tensor, 0)
6038
+ tensor = expand_dims(tensor, 0)
7174
6039
  tuple_of_tensor += (tensor,)
7175
6040
  if not tuple_of_tensor:
7176
6041
  raise ValueError("For hstack, the input must have at least 1 tensor, but got 0.")
@@ -7270,7 +6135,7 @@ def movedim(x, source, destination):
7270
6135
  f"For `source` and `destination` arguments, the number of elements must be the same, but got 'source':"
7271
6136
  f" {len(source)} and 'destination': {len(destination)}.")
7272
6137
  perm = _get_moved_perm(ndim, source, destination)
7273
- return _get_cache_prim(P.Transpose)()(x, perm)
6138
+ return transpose_(x, perm)
7274
6139
 
7275
6140
 
7276
6141
  def moveaxis(x, source, destination):
@@ -7345,7 +6210,7 @@ def swapaxes(input, axis0, axis1):
7345
6210
  new_perm = perm[0:axis0] + perm[axis1:axis1 + 1] + \
7346
6211
  perm[axis0 + 1:axis1] + perm[axis0:axis0 + 1]
7347
6212
 
7348
- return _get_cache_prim(P.Transpose)()(input, new_perm)
6213
+ return transpose_(input, new_perm)
7349
6214
 
7350
6215
 
7351
6216
  def swapdims(input, dim0, dim1):
@@ -7455,7 +6320,7 @@ def repeat_interleave(input, repeats, axis=None):
7455
6320
 
7456
6321
  def repeat_elements(x, rep, axis=0):
7457
6322
  """
7458
- Repeat elements of a tensor along an axis, like `np.repeat` .
6323
+ Repeat elements of a tensor along an axis, like `numpy.repeat` .
7459
6324
 
7460
6325
  Args:
7461
6326
  x (Tensor): The tensor to repeat values for. Must be of type: float16,
@@ -7493,34 +6358,19 @@ def repeat_elements(x, rep, axis=0):
7493
6358
  const_utils.check_type_valid(ops.dtype(x), mstype.number_type, 'input x')
7494
6359
  rep = _check_positive_int(rep, "rep", "repeat_elements")
7495
6360
  axis = _check_is_int(axis, "axis", "repeat_elements")
7496
- shape_op = P.Shape()
7497
- rank_op = P.Rank()
7498
- tile_op = P.Tile()
7499
- expand_dims_op = P.ExpandDims()
7500
- reshape_op = P.Reshape()
7501
- x_rank = rank_op(x)
6361
+ x_rank = rank_(x)
7502
6362
  axis = _check_axis_range(axis, x_rank, "axis", "repeat_elements")
6363
+ axis = axis + x.ndim if axis < 0 else axis
7503
6364
  expand_axis = axis + 1
7504
- x_expand = expand_dims_op(x, expand_axis)
6365
+ x_expand = expand_dims(x, expand_axis)
7505
6366
  rep_dims = _cal_repeat_dims(x_rank, rep, expand_axis)
7506
- x_expand = tile_op(x_expand, rep_dims)
7507
- x_shape = shape_op(x)
6367
+ x_expand = tile_(x_expand, rep_dims)
6368
+ x_shape = shape_(x)
7508
6369
  x_reshape = _cal_reshape(x_shape, rep, axis)
7509
- x_rep = reshape_op(x_expand, x_reshape)
6370
+ x_rep = reshape_(x_expand, x_reshape)
7510
6371
  return x_rep
7511
6372
 
7512
6373
 
7513
- @_primexpr
7514
- def _check_sequence_mask_input_len(input_shape, prim_name=None):
7515
- msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
7516
- if not input_shape:
7517
- raise ValueError(f"{msg_prefix} input_shape must be greater than 0, but got {input_shape}.")
7518
- # broadcast only supports 7d shape
7519
- shape_size = len(input_shape)
7520
- if shape_size >= 7:
7521
- raise ValueError(f"{msg_prefix} dimension of input_shape must be less than 7, but got {shape_size}d.")
7522
-
7523
-
7524
6374
  def sequence_mask(lengths, maxlen=None):
7525
6375
  """
7526
6376
  Returns a mask tensor representing the first N positions of each cell.
@@ -7573,29 +6423,19 @@ def sequence_mask(lengths, maxlen=None):
7573
6423
  [[ True True False False ]
7574
6424
  [ True True True True ]]]
7575
6425
  """
7576
-
7577
- argmax_op = P.ArgMaxWithValue()
7578
- reshape_op = P.Reshape()
7579
- range_op = P.Range()
7580
- expand_op = P.ExpandDims()
7581
- cast_op = P.Cast()
7582
- to_tensor_op = P.ScalarToTensor()
7583
- shape_op = P.Shape()
7584
-
7585
6426
  const_utils.check_type_valid(ops.dtype(lengths), [mstype.int64, mstype.int32], 'lengths')
7586
- _check_sequence_mask_input_len(shape_op(lengths), "sequence_mask")
7587
6427
 
7588
6428
  if maxlen is None:
7589
- flatten_data = reshape_op(lengths, (-1,))
7590
- flatten_data = cast_op(flatten_data, mstype.float32)
7591
- _, value = argmax_op(flatten_data)
7592
- maxlen = cast_op(value, mstype.int32)
6429
+ flatten_data = reshape_(lengths, (-1,))
6430
+ flatten_data = cast_(flatten_data, mstype.float32)
6431
+ _, value = arg_max_with_value_(flatten_data)
6432
+ maxlen = cast_(value, mstype.int32)
7593
6433
  else:
7594
6434
  maxlen = _check_positive_int(maxlen, "maxlen", "sequence_mask")
7595
- maxlen = to_tensor_op(maxlen, mstype.int32)
6435
+ maxlen = scalar_to_tensor_(maxlen, mstype.int32)
7596
6436
 
7597
- range_vector = range_op(to_tensor_op(0, mstype.int32), maxlen, to_tensor_op(1, mstype.int32))
7598
- mask = expand_op(lengths, -1)
6437
+ range_vector = range_(scalar_to_tensor_(0, mstype.int32), maxlen, scalar_to_tensor_(1, mstype.int32))
6438
+ mask = expand_dims(lengths, -1)
7599
6439
  result = range_vector < mask
7600
6440
  return result
7601
6441
 
@@ -7608,35 +6448,6 @@ def top_k(input_x, k, sorted=True):
7608
6448
  return top_k_(input_x, k)
7609
6449
 
7610
6450
 
7611
- def deepcopy(input_x):
7612
- """
7613
- Returns a deepcopy of input tensor.
7614
-
7615
- Args:
7616
- input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
7617
-
7618
- Returns:
7619
- Tensor, a deepcopy of `input_x`.
7620
-
7621
- Raises:
7622
- TypeError: If `input_x` is not a Tensor.
7623
-
7624
- Supported Platforms:
7625
- ``Ascend`` ``GPU`` ``CPU``
7626
-
7627
- Examples:
7628
- >>> import mindspore
7629
- >>> from mindspore import Tensor, ops
7630
- >>> input = Tensor([[0, 1], [2, 1]], dtype=mindspore.int32)
7631
- >>> output = ops.deepcopy(input)
7632
- >>> print(output)
7633
- [[0 1]
7634
- [2 1]]
7635
- """
7636
- _deepcopy = _get_cache_prim(P.Identity)()
7637
- return _deepcopy(input_x)
7638
-
7639
-
7640
6451
  __all__ = [
7641
6452
  'unique',
7642
6453
  'unique_with_pad',
@@ -7663,8 +6474,8 @@ __all__ = [
7663
6474
  'full_like',
7664
6475
  'dyn_shape',
7665
6476
  'rank',
7666
- 'range',
7667
6477
  'arange',
6478
+ 'range',
7668
6479
  'reshape',
7669
6480
  'reshape_',
7670
6481
  'flatten',