mindspore 2.3.0rc1__cp37-none-any.whl → 2.3.0rc2__cp37-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (316) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +1 -1
  3. mindspore/_akg/akg/utils/tbe_codegen_utils.py +13 -3
  4. mindspore/_c_dataengine.cpython-37m-aarch64-linux-gnu.so +0 -0
  5. mindspore/_c_expression.cpython-37m-aarch64-linux-gnu.so +0 -0
  6. mindspore/_checkparam.py +20 -0
  7. mindspore/_extends/parse/parser.py +1 -1
  8. mindspore/_extends/parse/standard_method.py +6 -5
  9. mindspore/_mindspore_offline_debug.cpython-37m-aarch64-linux-gnu.so +0 -0
  10. mindspore/amp.py +5 -5
  11. mindspore/bin/cache_admin +0 -0
  12. mindspore/bin/cache_server +0 -0
  13. mindspore/boost/boost_cell_wrapper.py +1 -1
  14. mindspore/boost/group_loss_scale_manager.py +1 -1
  15. mindspore/common/__init__.py +4 -2
  16. mindspore/common/_register_for_recompute.py +48 -0
  17. mindspore/common/_stub_tensor.py +1 -0
  18. mindspore/common/api.py +56 -4
  19. mindspore/common/dtype.py +5 -3
  20. mindspore/common/dump.py +2 -2
  21. mindspore/common/hook_handle.py +51 -4
  22. mindspore/common/initializer.py +1 -1
  23. mindspore/common/jit_config.py +17 -6
  24. mindspore/common/parameter.py +7 -2
  25. mindspore/common/recompute.py +247 -0
  26. mindspore/common/sparse_tensor.py +2 -2
  27. mindspore/common/symbol.py +1 -1
  28. mindspore/common/tensor.py +74 -36
  29. mindspore/communication/__init__.py +3 -3
  30. mindspore/communication/management.py +30 -30
  31. mindspore/context.py +28 -15
  32. mindspore/dataset/__init__.py +5 -5
  33. mindspore/dataset/audio/__init__.py +2 -2
  34. mindspore/dataset/audio/transforms.py +51 -51
  35. mindspore/dataset/callback/ds_callback.py +2 -2
  36. mindspore/dataset/engine/cache_client.py +1 -1
  37. mindspore/dataset/engine/datasets.py +3 -3
  38. mindspore/dataset/engine/datasets_audio.py +14 -14
  39. mindspore/dataset/engine/datasets_standard_format.py +3 -3
  40. mindspore/dataset/engine/datasets_text.py +38 -38
  41. mindspore/dataset/engine/datasets_user_defined.py +3 -3
  42. mindspore/dataset/engine/datasets_vision.py +68 -68
  43. mindspore/dataset/text/__init__.py +3 -3
  44. mindspore/dataset/text/transforms.py +26 -26
  45. mindspore/dataset/transforms/__init__.py +1 -1
  46. mindspore/dataset/vision/__init__.py +3 -3
  47. mindspore/dataset/vision/transforms.py +92 -92
  48. mindspore/dataset/vision/utils.py +1 -1
  49. mindspore/experimental/optim/adadelta.py +2 -2
  50. mindspore/experimental/optim/adagrad.py +2 -2
  51. mindspore/experimental/optim/adam.py +2 -2
  52. mindspore/experimental/optim/adamax.py +2 -2
  53. mindspore/experimental/optim/adamw.py +2 -2
  54. mindspore/experimental/optim/asgd.py +2 -2
  55. mindspore/experimental/optim/lr_scheduler.py +24 -20
  56. mindspore/experimental/optim/nadam.py +2 -2
  57. mindspore/experimental/optim/optimizer.py +1 -1
  58. mindspore/experimental/optim/radam.py +2 -2
  59. mindspore/experimental/optim/rmsprop.py +2 -2
  60. mindspore/experimental/optim/rprop.py +2 -2
  61. mindspore/experimental/optim/sgd.py +2 -2
  62. mindspore/hal/stream.py +2 -0
  63. mindspore/include/mindapi/base/types.h +5 -0
  64. mindspore/lib/libdnnl.so.2 +0 -0
  65. mindspore/lib/libmindspore.so +0 -0
  66. mindspore/lib/libmindspore_backend.so +0 -0
  67. mindspore/lib/libmindspore_common.so +0 -0
  68. mindspore/lib/libmindspore_core.so +0 -0
  69. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  70. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  71. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  72. mindspore/lib/libmindspore_shared_lib.so +0 -0
  73. mindspore/lib/libopencv_core.so.4.5 +0 -0
  74. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  75. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6 -6
  76. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  77. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  78. mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
  79. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  80. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/DeviceBin +0 -0
  81. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
  82. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
  83. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +101787 -98559
  84. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
  85. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
  86. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/base/op_register.h +2 -2
  87. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/params/mix.h +8 -1
  88. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/params/norm.h +5 -3
  89. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/params/reduce.h +2 -2
  90. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/rt/backend/backend.h +3 -3
  91. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/rt/backend/rtbackend.h +3 -3
  92. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/rt/base/types.h +0 -1
  93. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/rt/module/module.h +3 -3
  94. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/svector/svector.h +3 -2
  95. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
  96. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
  97. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/tiling/add_tiling.h +9 -9
  98. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +2 -6
  99. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb.h +2 -2
  100. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_base.h +460 -0
  101. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_bf16.h +217 -0
  102. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp16.h +116 -0
  103. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_tiling.h +16 -24
  104. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_value.h +27 -0
  105. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -4
  106. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention/FlashAttentionScore_impl.h → flash_attention_score/flash_attention_score_impl.h} +2 -1
  107. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention/bs_attention_tiling.h → flash_attention_score/flash_attention_score_tiling.h} +15 -19
  108. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/gelu/tiling/gelu_tiling.h +7 -9
  109. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/lccl/lccl_wrapper.h +58 -0
  110. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/matmul_impl.h +19 -8
  111. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{matmul → matmul_common}/pp_matmul_common_tiling.h +18 -8
  112. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{matmul → matmul_common}/pp_matmul_info.h +7 -4
  113. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{matmul → matmul_common}/tiling_data.h +44 -6
  114. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/tiling_utils.h +65 -0
  115. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/matmul_stridedslice_fusion_impl.h +10 -6
  116. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +4 -1
  117. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/kernel/paged_attention_mix_hwsync.h +41 -0
  118. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention/PagedAttention_impl.h → paged_attention/paged_attention_impl.h} +1 -1
  119. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_tiling.h +63 -0
  120. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/add_param.h +2 -2
  121. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention_param.h → param/attention_param.h} +11 -2
  122. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +37 -0
  123. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +45 -0
  124. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache/reshape_and_cache_tiling.h +1 -2
  125. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/rms_norm.h +23 -0
  126. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/rms_norm_base.h +175 -0
  127. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/rms_norm_normal.h +276 -0
  128. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/rms_norm_split_d.h +280 -0
  129. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/tiling_data.h +35 -0
  130. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +45 -0
  131. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/kernel/sub_kernel.h +20 -0
  132. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +47 -0
  133. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_tiling.h +25 -0
  134. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +323 -23
  135. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/types.h +15 -4
  136. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +8 -0
  137. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
  138. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
  139. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layernorm_impl.so +0 -0
  140. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_impl.so +0 -0
  141. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_impl.so +0 -0
  142. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_impl.so +0 -0
  143. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_impl.so +0 -0
  144. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_impl.so +0 -0
  145. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_stridedslice_fusion_impl.so +0 -0
  146. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
  147. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libnot_equal_impl.so +0 -0
  148. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_impl.so +0 -0
  149. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_impl.so +0 -0
  150. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
  151. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
  152. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_full_mix.o +0 -0
  153. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
  154. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
  155. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
  156. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_full_mix.o +0 -0
  157. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
  158. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bnsd_full_mix.o +0 -0
  159. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bsh_full_mix.o +0 -0
  160. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bnsd_full_mix.o +0 -0
  161. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bsh_full_mix.o +0 -0
  162. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lcal.h +22 -0
  163. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lcal_comm.h +70 -0
  164. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lcal_types.h +103 -0
  165. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lccl.h +47 -0
  166. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lccl_wrapper.h +58 -0
  167. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lcoc.h +154 -0
  168. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblcal.so +0 -0
  169. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
  170. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  171. mindspore/log.py +2 -2
  172. mindspore/mint/__init__.py +457 -0
  173. mindspore/mint/nn/__init__.py +430 -0
  174. mindspore/mint/nn/functional.py +424 -0
  175. mindspore/mint/optim/__init__.py +24 -0
  176. mindspore/mint/optim/adamw.py +186 -0
  177. mindspore/multiprocessing/__init__.py +4 -0
  178. mindspore/nn/__init__.py +3 -0
  179. mindspore/nn/cell.py +51 -47
  180. mindspore/nn/extend/__init__.py +29 -0
  181. mindspore/nn/extend/basic.py +140 -0
  182. mindspore/nn/extend/embedding.py +143 -0
  183. mindspore/nn/extend/layer/__init__.py +27 -0
  184. mindspore/nn/extend/layer/normalization.py +107 -0
  185. mindspore/nn/extend/pooling.py +117 -0
  186. mindspore/nn/generator.py +297 -0
  187. mindspore/nn/layer/basic.py +109 -1
  188. mindspore/nn/layer/container.py +2 -2
  189. mindspore/nn/layer/conv.py +6 -6
  190. mindspore/nn/layer/embedding.py +1 -1
  191. mindspore/nn/layer/normalization.py +21 -43
  192. mindspore/nn/layer/padding.py +4 -0
  193. mindspore/nn/optim/ada_grad.py +2 -2
  194. mindspore/nn/optim/adadelta.py +1 -1
  195. mindspore/nn/optim/adafactor.py +1 -1
  196. mindspore/nn/optim/adam.py +7 -7
  197. mindspore/nn/optim/adamax.py +2 -2
  198. mindspore/nn/optim/adasum.py +2 -2
  199. mindspore/nn/optim/asgd.py +2 -2
  200. mindspore/nn/optim/ftrl.py +1 -1
  201. mindspore/nn/optim/lamb.py +3 -3
  202. mindspore/nn/optim/lars.py +1 -1
  203. mindspore/nn/optim/lazyadam.py +2 -2
  204. mindspore/nn/optim/momentum.py +2 -2
  205. mindspore/nn/optim/optimizer.py +2 -2
  206. mindspore/nn/optim/proximal_ada_grad.py +2 -2
  207. mindspore/nn/optim/rmsprop.py +2 -2
  208. mindspore/nn/optim/rprop.py +2 -2
  209. mindspore/nn/optim/sgd.py +2 -2
  210. mindspore/nn/optim/thor.py +2 -2
  211. mindspore/nn/wrap/cell_wrapper.py +9 -9
  212. mindspore/nn/wrap/grad_reducer.py +5 -5
  213. mindspore/ops/_grad_experimental/grad_comm_ops.py +4 -2
  214. mindspore/ops/_vmap/vmap_grad_nn_ops.py +41 -2
  215. mindspore/ops/_vmap/vmap_math_ops.py +27 -8
  216. mindspore/ops/_vmap/vmap_nn_ops.py +66 -8
  217. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +73 -1
  218. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +12 -3
  219. mindspore/ops/auto_generate/gen_arg_handler.py +24 -0
  220. mindspore/ops/auto_generate/gen_extend_func.py +274 -0
  221. mindspore/ops/auto_generate/gen_ops_def.py +889 -22
  222. mindspore/ops/auto_generate/gen_ops_prim.py +3541 -253
  223. mindspore/ops/auto_generate/pyboost_inner_prim.py +282 -0
  224. mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -1
  225. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +9 -0
  226. mindspore/ops/extend/__init__.py +9 -1
  227. mindspore/ops/extend/array_func.py +134 -27
  228. mindspore/ops/extend/math_func.py +3 -3
  229. mindspore/ops/extend/nn_func.py +363 -2
  230. mindspore/ops/function/__init__.py +19 -2
  231. mindspore/ops/function/array_func.py +463 -439
  232. mindspore/ops/function/clip_func.py +7 -18
  233. mindspore/ops/function/grad/grad_func.py +5 -5
  234. mindspore/ops/function/linalg_func.py +4 -4
  235. mindspore/ops/function/math_func.py +260 -243
  236. mindspore/ops/function/nn_func.py +825 -62
  237. mindspore/ops/function/random_func.py +73 -4
  238. mindspore/ops/function/sparse_unary_func.py +1 -1
  239. mindspore/ops/function/vmap_func.py +1 -1
  240. mindspore/ops/functional.py +2 -2
  241. mindspore/ops/op_info_register.py +1 -31
  242. mindspore/ops/operations/__init__.py +2 -3
  243. mindspore/ops/operations/_grad_ops.py +2 -107
  244. mindspore/ops/operations/_inner_ops.py +5 -5
  245. mindspore/ops/operations/_sequence_ops.py +2 -2
  246. mindspore/ops/operations/array_ops.py +11 -233
  247. mindspore/ops/operations/comm_ops.py +32 -32
  248. mindspore/ops/operations/custom_ops.py +7 -89
  249. mindspore/ops/operations/manually_defined/ops_def.py +329 -4
  250. mindspore/ops/operations/math_ops.py +13 -163
  251. mindspore/ops/operations/nn_ops.py +9 -316
  252. mindspore/ops/operations/random_ops.py +1 -1
  253. mindspore/ops/operations/sparse_ops.py +3 -3
  254. mindspore/ops/primitive.py +2 -2
  255. mindspore/ops_generate/arg_dtype_cast.py +12 -3
  256. mindspore/ops_generate/arg_handler.py +24 -0
  257. mindspore/ops_generate/gen_ops_inner_prim.py +2 -0
  258. mindspore/ops_generate/gen_pyboost_func.py +13 -6
  259. mindspore/ops_generate/pyboost_utils.py +2 -17
  260. mindspore/parallel/__init__.py +3 -2
  261. mindspore/parallel/_auto_parallel_context.py +106 -1
  262. mindspore/parallel/_parallel_serialization.py +34 -2
  263. mindspore/parallel/_utils.py +16 -0
  264. mindspore/parallel/algo_parameter_config.py +4 -4
  265. mindspore/parallel/checkpoint_transform.py +249 -77
  266. mindspore/parallel/cluster/process_entity/_api.py +1 -1
  267. mindspore/parallel/parameter_broadcast.py +1 -1
  268. mindspore/parallel/shard.py +1 -1
  269. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +1 -0
  270. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +17 -5
  271. mindspore/profiler/parser/ascend_msprof_exporter.py +3 -3
  272. mindspore/profiler/parser/ascend_msprof_generator.py +10 -3
  273. mindspore/profiler/parser/ascend_op_generator.py +26 -9
  274. mindspore/profiler/parser/ascend_timeline_generator.py +7 -4
  275. mindspore/profiler/parser/profiler_info.py +11 -1
  276. mindspore/profiler/profiling.py +13 -5
  277. mindspore/rewrite/api/node.py +12 -12
  278. mindspore/rewrite/api/symbol_tree.py +11 -11
  279. mindspore/run_check/_check_version.py +1 -1
  280. mindspore/safeguard/rewrite_obfuscation.py +2 -2
  281. mindspore/train/amp.py +4 -4
  282. mindspore/train/anf_ir_pb2.py +8 -2
  283. mindspore/train/callback/_backup_and_restore.py +2 -2
  284. mindspore/train/callback/_callback.py +4 -4
  285. mindspore/train/callback/_checkpoint.py +2 -2
  286. mindspore/train/callback/_early_stop.py +2 -2
  287. mindspore/train/callback/_landscape.py +4 -4
  288. mindspore/train/callback/_loss_monitor.py +2 -2
  289. mindspore/train/callback/_on_request_exit.py +2 -2
  290. mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
  291. mindspore/train/callback/_summary_collector.py +2 -2
  292. mindspore/train/callback/_time_monitor.py +2 -2
  293. mindspore/train/dataset_helper.py +8 -3
  294. mindspore/train/loss_scale_manager.py +2 -2
  295. mindspore/train/metrics/metric.py +3 -3
  296. mindspore/train/mind_ir_pb2.py +22 -17
  297. mindspore/train/model.py +15 -15
  298. mindspore/train/serialization.py +18 -18
  299. mindspore/train/summary/summary_record.py +7 -7
  300. mindspore/train/train_thor/convert_utils.py +3 -3
  301. mindspore/version.py +1 -1
  302. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +1 -1
  303. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +307 -260
  304. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/tiling_data.h +0 -59
  305. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/FlashAttentionScore_bf16_BNSD_mix.o +0 -0
  306. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/FlashAttentionScore_bf16_BSH_mix.o +0 -0
  307. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/FlashAttentionScore_fp16_BNSD_mix.o +0 -0
  308. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/FlashAttentionScore_fp16_BSH_mix.o +0 -0
  309. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/PagedAttention_bf16_BNSD_mix.o +0 -0
  310. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/PagedAttention_bf16_BSH_mix.o +0 -0
  311. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/PagedAttention_fp16_BNSD_mix.o +0 -0
  312. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/PagedAttention_fp16_BSH_mix.o +0 -0
  313. /mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention/bs_attention_mix_hwsync.h → flash_attention_score/kernel/flash_attention_score_mix_hwsync.h} +0 -0
  314. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
  315. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
  316. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
@@ -313,6 +313,38 @@ def apply_rotary_pos_emb_(query, key, cos, sin, position_ids, cos_format=0):
313
313
  return apply_rotary_pos_emb_op(query, key, cos, sin, position_ids)
314
314
 
315
315
 
316
+ def argmax(input, dim=None, keepdim=False):
317
+ r"""
318
+ Return the indices of the maximum values of a tensor across a dimension.
319
+
320
+ Args:
321
+ input (Tensor): Input tensor.
322
+ dim (Union[int, None], optional): The dimension to reduce. If `dim` is ``None`` , the indices of the maximum
323
+ value within the flattened input will be returned. Default: ``None`` .
324
+ keepdim (bool, optional): Whether the output tensor retains the specified
325
+ dimension. Ignored if `dim` is None. Default: ``False`` .
326
+
327
+ Returns:
328
+ Tensor, indices of the maximum values across a dimension.
329
+
330
+ Raises:
331
+ TypeError: If `keepdim` is not bool.
332
+ ValueError: If `dim` is out of range.
333
+
334
+ Supported Platforms:
335
+ ``Ascend`` ``GPU`` ``CPU``
336
+
337
+ Examples:
338
+ >>> import numpy as np
339
+ >>> from mindspore import Tensor, ops
340
+ >>> x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
341
+ >>> output = ops.argmax(x, dim=-1)
342
+ >>> print(output)
343
+ [1 0 0]
344
+ """
345
+ return argmax_ext_op(input, dim, keepdim)
346
+
347
+
316
348
  def asin(input):
317
349
  r"""
318
350
  Computes arcsine of input tensors element-wise.
@@ -578,6 +610,42 @@ def atanh(input):
578
610
  return atanh_op(input)
579
611
 
580
612
 
613
+ def bmm_ext(input, mat2):
614
+ r"""
615
+ Performs batch matrix-matrix multiplication of two three-dimensional tensors.
616
+
617
+ .. math::
618
+ \text{output}[b, i, j] = \text{input}[b, i, k] @ \text{mat2}[b, k, j]
619
+
620
+ Args:
621
+ input (Tensor): The first batch of matrices to be multiplied. Must be a three-dimensional tensor.
622
+ mat2 (Tensor): The second batch of matrices to be multiplied. Must be a three-dimensional tensor.
623
+
624
+ Returns:
625
+ Tensor, the output tensor of shape `(b, n, p)`, where each matrix is the product of the corresponding matrices in the input batches.
626
+
627
+ Raises:
628
+ TypeError: If `input` or `mat2` is not three-dimensional tensors.
629
+ ValueError: If the length of the third dimension of `input` is not equal to the length of the second dimension of `mat2`.
630
+ ValueError: If the batch size of the inputs do not match.
631
+
632
+ Supported Platforms:
633
+ ``Ascend`` ``GPU`` ``CPU``
634
+
635
+ Examples:
636
+ >>> import mindspore
637
+ >>> import numpy as np
638
+ >>> from mindspore import Tensor
639
+ >>> from mindspore.ops.extend import bmm
640
+ >>> a = Tensor(np.ones(shape=[2, 3, 4]), mindspore.float32)
641
+ >>> b = Tensor(np.ones(shape=[2, 4, 5]), mindspore.float32)
642
+ >>> output = bmm(a, b)
643
+ >>> print(output.shape)
644
+ (2, 3, 5)
645
+ """
646
+ return bmm_ext_op(input, mat2)
647
+
648
+
581
649
  def broadcast_to(input, shape):
582
650
  r"""
583
651
  Broadcasts input tensor to a given shape. The dim of input shape must be smaller
@@ -784,6 +852,120 @@ def celu(x, alpha=1.0):
784
852
  return celu_op(x)
785
853
 
786
854
 
855
+ def clamp_scalar(input, min=None, max=None):
856
+ r"""
857
+ Clamps tensor values between the specified minimum value and maximum value.
858
+
859
+ Limits the value of :math:`input` to a range, whose lower limit is `min` and upper limit is `max` .
860
+
861
+ .. math::
862
+
863
+ out_i= \left\{
864
+ \begin{array}{align}
865
+ max & \text{ if } input_i\ge max \\
866
+ input_i & \text{ if } min \lt input_i \lt max \\
867
+ min & \text{ if } input_i \le min \\
868
+ \end{array}\right
869
+
870
+ Note:
871
+ - `min` and `max` cannot be None at the same time;
872
+ - When `min` is None and `max` is not None, the elements in Tensor larger than `max` will become `max`;
873
+ - When `min` is not None and `max` is None, the elements in Tensor smaller than `min` will become `min`;
874
+ - If `min` is greater than `max`, the value of all elements in Tensor will be set to `max`;
875
+ - The data type of `input`, `min` and `max` should support implicit type conversion and cannot be bool type.
876
+
877
+ Args:
878
+ input (Tensor): Input data, which type is Tensor. Tensors of arbitrary dimensions are supported.
879
+ min (Union(float, int), optional): The minimum value. Default: ``None`` .
880
+ max (Union(float, int), optional): The maximum value. Default: ``None`` .
881
+
882
+ Returns:
883
+ Tensor, a clipped Tensor.
884
+ The data type and shape are the same as input.
885
+
886
+ Raises:
887
+ ValueError: If both `min` and `max` are None.
888
+ TypeError: If the type of `input` is not Tensor.
889
+ TypeError: If the type of `min` is not in None, float or int.
890
+ TypeError: If the type of `max` is not in None, float or int.
891
+
892
+ Supported Platforms:
893
+ ``Ascend`` ``GPU`` ``CPU``
894
+
895
+ Examples:
896
+ >>> # case 1: the data type of input is number
897
+ >>> import mindspore
898
+ >>> from mindspore import Tensor
899
+ >>> from mindspore.ops.auto_generate import clamp_scalar
900
+ >>> import numpy as np
901
+ >>> min_value = 5
902
+ >>> max_value = 20
903
+ >>> input = Tensor(np.array([[1., 25., 5., 7.], [4., 11., 6., 21.]]), mindspore.float32)
904
+ >>> output = clamp_scalar(input, min_value, max_value)
905
+ >>> print(output)
906
+ [[ 5. 20. 5. 7.]
907
+ [ 5. 11. 6. 20.]]
908
+ """
909
+ return clamp_scalar_op(input, min, max)
910
+
911
+
912
+ def clamp_tensor(input, min=None, max=None):
913
+ r"""
914
+ Clamps tensor values between the specified minimum value and maximum value.
915
+
916
+ Limits the value of :math:`input` to a range, whose lower limit is `min` and upper limit is `max` .
917
+
918
+ .. math::
919
+
920
+ out_i= \left\{
921
+ \begin{array}{align}
922
+ max & \text{ if } input_i\ge max \\
923
+ input_i & \text{ if } min \lt input_i \lt max \\
924
+ min & \text{ if } input_i \le min \\
925
+ \end{array}\right
926
+
927
+ Note:
928
+ - `min` and `max` cannot be None at the same time;
929
+ - When `min` is None and `max` is not None, the elements in Tensor larger than `max` will become `max`;
930
+ - When `min` is not None and `max` is None, the elements in Tensor smaller than `min` will become `min`;
931
+ - If `min` is greater than `max`, the value of all elements in Tensor will be set to `max`;
932
+ - The data type of `input`, `min` and `max` should support implicit type conversion and cannot be bool type.
933
+
934
+ Args:
935
+ input (Tensor): Input data, which type is Tensor. Tensors of arbitrary dimensions are supported.
936
+ min (Tensor, optional): The minimum value. Default: ``None`` .
937
+ max (Tensor, optional): The maximum value. Default: ``None`` .
938
+
939
+ Returns:
940
+ Tensor, a clipped Tensor.
941
+ The data type and shape are the same as input.
942
+
943
+ Raises:
944
+ ValueError: If both `min` and `max` are None.
945
+ TypeError: If the type of `input` is not Tensor.
946
+ TypeError: If the type of `min` is not in None, Tensor.
947
+ TypeError: If the type of `max` is not in None, Tensor.
948
+
949
+ Supported Platforms:
950
+ ``Ascend`` ``GPU`` ``CPU``
951
+
952
+ Examples:
953
+ >>> # case 1: the data type of input is Tensor
954
+ >>> import mindspore
955
+ >>> from mindspore import Tensor
956
+ >>> from mindspore.ops.auto_generate import clamp_tensor
957
+ >>> import numpy as np
958
+ >>> min_value = Tensor(5, mindspore.float32)
959
+ >>> max_value = Tensor(20, mindspore.float32)
960
+ >>> input = Tensor(np.array([[1., 25., 5., 7.], [4., 11., 6., 21.]]), mindspore.float32)
961
+ >>> output = clamp_tensor(input, min_value, max_value)
962
+ >>> print(output)
963
+ [[ 5. 20. 5. 7.]
964
+ [ 5. 11. 6. 20.]]
965
+ """
966
+ return clamp_tensor_op(input, min, max)
967
+
968
+
787
969
  def cat(tensors, axis=0):
788
970
  r"""
789
971
  Connect input tensors along with the given axis.
@@ -1145,6 +1327,50 @@ def decoder_k_v_cache(cache, update, valid_seq_len, batch_index, seq_len_axis, n
1145
1327
  return decoder_k_v_cache_op(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len)
1146
1328
 
1147
1329
 
1330
+ def dense(input, weight, bias=None):
1331
+ r"""
1332
+ Applies the dense connected operation to the `input`. The dense function is defined as:
1333
+
1334
+ .. math::
1335
+ output = input * weight^{T} + bias
1336
+
1337
+ .. warning::
1338
+ This is an experimental API that is subject to change or deletion.
1339
+
1340
+ Args:
1341
+ input (Tensor): Input Tensor of shape :math:`(*, in\_channels)`,
1342
+ where :math:`*` means any number of additional dimensions.
1343
+ weight (Tensor): The weight applied to the input.
1344
+ The shape is :math:`(out\_channels, in\_channels)` or :math:`(in\_channels)`.
1345
+ bias (Tensor, optional): Additive biases to the output.
1346
+ The shape is :math:`(out\_channels)` or :math:`()`. Defaults: ``None``, the `bias` is 0.
1347
+
1348
+ Returns:
1349
+ Output whose shape is determined by the shape of the input and the weight.
1350
+
1351
+ Raises:
1352
+ TypeError: If `input` is not Tensor.
1353
+ TypeError: If `weight` is not Tensor.
1354
+ TypeError: If `bias` is not Tensor.
1355
+
1356
+ Supported Platforms:
1357
+ ``Ascend`` ``GPU`` ``CPU``
1358
+
1359
+ Examples:
1360
+ >>> import numpy as np
1361
+ >>> import mindspore
1362
+ >>> from mindspore import Tensor, ops
1363
+ >>> input = Tensor([[-1., 1., 2.], [-3., -3., 1.]], mindspore.float32)
1364
+ >>> weight = Tensor([[-2., -2., -2.], [0., -1., 0.]], mindspore.float32)
1365
+ >>> bias = Tensor([0., 1.], mindspore.float32)
1366
+ >>> output = ops.dense(input, weight, bias)
1367
+ >>> print(output)
1368
+ [[-4. 0.]
1369
+ [10. 4.]]
1370
+ """
1371
+ return dense_op(input, weight, bias)
1372
+
1373
+
1148
1374
  def diag(input):
1149
1375
  r"""
1150
1376
  Constructs a diagonal tensor with a given diagonal values.
@@ -1227,6 +1453,13 @@ def diagonal(input, offset=0, dim1=0, dim2=1):
1227
1453
  return diagonal_op(input)
1228
1454
 
1229
1455
 
1456
+ def dot(input, other):
1457
+ r"""
1458
+
1459
+ """
1460
+ return dot_op(input, other)
1461
+
1462
+
1230
1463
  def elu(input_x, alpha=1.0):
1231
1464
  r"""
1232
1465
  Exponential Linear Unit activation function.
@@ -1344,15 +1577,20 @@ def erf(input):
1344
1577
  input (Tensor): The input tensor of Gaussian error function. :math:`x` in the following formula.
1345
1578
  Supported dtypes:
1346
1579
 
1347
- - Ascend: float16, float32.
1580
+ - Ascend: float16, float32, int64, bool.
1348
1581
  - GPU/CPU: float16, float32, float64.
1349
1582
 
1350
1583
  Returns:
1351
- Tensor, has the same shape and dtype as the `input`.
1584
+ Tensor. If the input is int64 or bool, the return value type is float32.
1585
+ Otherwise, the return value type is the same as the input type.
1586
+
1352
1587
 
1353
1588
  Raises:
1354
1589
  TypeError: If `input` is not a Tensor.
1355
- TypeError: If dtype of `input` is not float16, float32 or float64.
1590
+ TypeError: If dtype of `input` is not as follows
1591
+
1592
+ - Ascend: float16, float32, int64, bool.
1593
+ - GPU/CPU: float16, float32, float64.
1356
1594
 
1357
1595
  Supported Platforms:
1358
1596
  ``Ascend`` ``GPU`` ``CPU``
@@ -1637,6 +1875,13 @@ def fast_gelu(x):
1637
1875
  return fast_gelu_op(x)
1638
1876
 
1639
1877
 
1878
+ def ffn_ext(x, weight1, weight2, expertTokens=None, bias1=None, bias2=None, scale=None, offset=None, deqScale1=None, deqScale2=None, antiquant_scale1=None, antiquant_scale2=None, antiquant_offset1=None, antiquant_offset2=None, activation='fastgelu', inner_precise=0):
1879
+ r"""
1880
+
1881
+ """
1882
+ return ffn_ext_impl(x, weight1, weight2, expertTokens, bias1, bias2, scale, offset, deqScale1, deqScale2, antiquant_scale1, antiquant_scale2, antiquant_offset1, antiquant_offset2, activation, inner_precise)
1883
+
1884
+
1640
1885
  def fft2(input, s=None, dim=(-2, -1), norm=None):
1641
1886
  r"""
1642
1887
  Calculates the two dimensional discrete Fourier transform of `input`.
@@ -1846,6 +2091,42 @@ def fftshift(input, dim=None):
1846
2091
  return fftshift_op(input, dim)
1847
2092
 
1848
2093
 
2094
+ def flatten_ext(input, start_dim=0, end_dim=-1):
2095
+ r"""
2096
+ Flatten a tensor along dimensions from `start_dim` to `end_dim`.
2097
+
2098
+ Args:
2099
+ input (Tensor): The input Tensor.
2100
+
2101
+ Keyword Args:
2102
+ start_dim (int, optional): The first dimension to flatten. Default: ``0`` .
2103
+ end_dim (int, optional): The last dimension to flatten. Default: ``-1`` .
2104
+
2105
+ Returns:
2106
+ Tensor. If no dimensions are flattened, returns the original `input`, otherwise return the flattened Tensor.
2107
+ If `input` is a 0-dimensional Tensor, a 1-dimensional Tensor will be returned.
2108
+
2109
+ Raises:
2110
+ TypeError: If `input` is not a Tensor.
2111
+ TypeError: If `start_dim` or `end_dim` is not int.
2112
+ ValueError: If `start_dim` is greater than `end_dim` after canonicalized.
2113
+ ValueError: If `start_dim` or `end_dim` is not in range of [-input.dim, input.dim-1].
2114
+
2115
+ Supported Platforms:
2116
+ ``Ascend`` ``GPU`` ``CPU``
2117
+
2118
+ Examples:
2119
+ >>> import mindspore
2120
+ >>> import numpy as np
2121
+ >>> from mindspore import Tensor, mint
2122
+ >>> input_x = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
2123
+ >>> output = mint.flatten(input_x)
2124
+ >>> print(output.shape)
2125
+ (24,)
2126
+ """
2127
+ return flatten_ext_op(input, start_dim, end_dim)
2128
+
2129
+
1849
2130
  def floor_divide(input, other):
1850
2131
  r"""
1851
2132
  Divides the first input tensor by the second input tensor element-wise and round down to the closest integer.
@@ -2568,12 +2849,20 @@ def irfft(input, n=None, dim=-1, norm=None):
2568
2849
  return irfft_op(input, n, dim, norm)
2569
2850
 
2570
2851
 
2571
- def is_finite(x):
2852
+ def isfinite(x):
2572
2853
  r"""
2573
- Determine which elements are finite for each position.
2854
+ Determine which elements are finite for each position. If elements are not ``NaN`` , ``-INF`` , ``INF``,
2855
+ they are finite.
2856
+
2857
+ .. math::
2858
+
2859
+ out_i = \begin{cases}
2860
+ & \text{ if } x_{i} = \text{Finite},\ \ True \\
2861
+ & \text{ if } x_{i} \ne \text{Finite},\ \ False
2862
+ \end{cases}
2574
2863
 
2575
2864
  Args:
2576
- - **x** (Tensor) - The input tensor.
2865
+ x (Tensor): The input tensor.
2577
2866
 
2578
2867
  Returns:
2579
2868
  Tensor, has the same shape of input, and the dtype is bool.
@@ -2586,15 +2875,65 @@ def is_finite(x):
2586
2875
 
2587
2876
  Examples:
2588
2877
  >>> import mindspore
2878
+ >>> import numpy as np
2589
2879
  >>> from mindspore import Tensor, ops
2590
- >>> is_finite = ops.IsFinite()
2591
2880
  >>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
2592
- >>> output = is_finite(x)
2881
+ >>> output = ops.isfinite(x)
2593
2882
  >>> print(output)
2594
2883
  [False True False]
2595
-
2884
+ >>> x = Tensor(2.1, mindspore.float64)
2885
+ >>> output = ops.isfinite(x)
2886
+ >>> print(output)
2887
+ True
2888
+ """
2889
+ return isfinite_op(x)
2890
+
2891
+
2892
+ def leaky_relu_ext(input, negative_slope=0.01):
2893
+ r"""
2894
+ leaky_relu activation function. The element of `input` less than 0 times `negative_slope` .
2895
+
2896
+ The activation function is defined as:
2897
+
2898
+ .. math::
2899
+ \text{leaky_relu}(input) = \begin{cases}input, &\text{if } input \geq 0; \cr
2900
+ {\negative_slope} * input, &\text{otherwise.}\end{cases}
2901
+
2902
+ where :math:`\negative_slope` represents the `negative_slope` parameter.
2903
+
2904
+ For more details, see `Rectifier Nonlinearities Improve Neural Network Acoustic Models
2905
+ <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`_.
2906
+
2907
+ LeakyReLU Activation Function Graph:
2908
+
2909
+ .. image:: ../images/LeakyReLU.png
2910
+ :align: center
2911
+
2912
+ Args:
2913
+ input (Tensor): The input of leaky_relu is a Tensor of any dimension.
2914
+ negative_slope (Union[int, float]): Slope of the activation function when the element of `input` is less than 0.
2915
+ Default: ``0.01`` .
2916
+
2917
+ Returns:
2918
+ Tensor, has the same type and shape as the `input`.
2919
+
2920
+ Raises:
2921
+ TypeError: If `input` is not a Tensor.
2922
+ TypeError: If `negative_slope` is not a float or an int.
2923
+
2924
+ Supported Platforms:
2925
+ ``Ascend`` ``GPU`` ``CPU``
2926
+
2927
+ Examples:
2928
+ >>> import mindspore
2929
+ >>> import numpy as np
2930
+ >>> from mindspore import Tensor, ops
2931
+ >>> input = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
2932
+ >>> print(mint.leaky_relu(input, negative_slope=0.2))
2933
+ [[-0.2 4. -1.6]
2934
+ [ 2. -1. 9. ]]
2596
2935
  """
2597
- return is_finite_op(x)
2936
+ return leaky_relu_ext_op(input, negative_slope)
2598
2937
 
2599
2938
 
2600
2939
  def less_equal(input, other):
@@ -2827,6 +3166,13 @@ def masked_fill(input_x, mask, value):
2827
3166
  return masked_fill_op(input_x, mask, value)
2828
3167
 
2829
3168
 
3169
+ def matmul_ext(input, mat2):
3170
+ r"""
3171
+
3172
+ """
3173
+ return matmul_ext_op(input, mat2)
3174
+
3175
+
2830
3176
  def matrix_exp(input):
2831
3177
  r"""
2832
3178
  Computes the exponential of a single or a batch of square matrices.
@@ -2867,6 +3213,15 @@ def matrix_exp(input):
2867
3213
  return matrix_exp_op(input)
2868
3214
 
2869
3215
 
3216
+ def max_(input):
3217
+ r"""
3218
+ Calculates the maximum value of the input tensor.
3219
+
3220
+ Also see :func:`mindspore.ops.extend.max`.
3221
+ """
3222
+ return max_op(input)
3223
+
3224
+
2870
3225
  def maximum(input, other):
2871
3226
  r"""
2872
3227
  Computes the maximum of input tensors element-wise.
@@ -2921,6 +3276,102 @@ def maximum(input, other):
2921
3276
  return maximum_op(input, other)
2922
3277
 
2923
3278
 
3279
+ def mean_ext(input, axis=None, keep_dims=False, dtype=None):
3280
+ r"""
3281
+ Reduces all dimension of a tensor by averaging all elements in the dimension, by default.
3282
+ And reduce a dimension of `input` along the specified `axis`. `keep_dims`
3283
+ determines whether the dimensions of the output and input are the same.
3284
+
3285
+ Note:
3286
+ The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
3287
+
3288
+ Args:
3289
+ input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
3290
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
3291
+ axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` ,
3292
+ reduce all dimensions. Only constant value is allowed. Assume the rank of `input` is r,
3293
+ and the value range is [-r,r).
3294
+ keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
3295
+ If ``False`` , don't keep these dimensions. Default: ``False`` .
3296
+ dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
3297
+
3298
+ Returns:
3299
+ Tensor, has the same data type as input tensor.
3300
+
3301
+ - If `axis` is ``None`` , and `keep_dims` is ``False`` ,
3302
+ the output is a 0-D tensor representing the product of all elements in the input tensor.
3303
+ - If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
3304
+ the shape of output is :math:`(x_0, x_2, ..., x_R)`.
3305
+ - If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` ,
3306
+ the shape of output is :math:`(x_0, x_3, ..., x_R)`.
3307
+ - If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` ,
3308
+ the shape of output is :math:`(x_0, x_3, ..., x_R)`.
3309
+
3310
+ Raises:
3311
+ TypeError: If `x` is not a Tensor.
3312
+ TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
3313
+ TypeError: If `keep_dims` is not a bool.
3314
+ ValueError: If `axis` is out of range.
3315
+
3316
+ Supported Platforms:
3317
+ ``Ascend`` ``GPU`` ``CPU``
3318
+
3319
+ Examples:
3320
+ >>> import mindspore
3321
+ >>> import numpy as np
3322
+ >>> from mindspore import Tensor, ops
3323
+ >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
3324
+ >>> output = ops.mean(x, 1, keep_dims=True)
3325
+ >>> result = output.shape
3326
+ >>> print(result)
3327
+ (3, 1, 5, 6)
3328
+ >>> # case 1: Reduces a dimension by averaging all elements in the dimension.
3329
+ >>> x = Tensor(np.array([[[2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2]],
3330
+ ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
3331
+ ... [[6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8], [10, 10, 10, 10, 10, 10]]]),
3332
+ ... mindspore.float32)
3333
+ >>> output = ops.mean(x)
3334
+ >>> print(output)
3335
+ 5.0
3336
+ >>> print(output.shape)
3337
+ ()
3338
+ >>> # case 2: Reduces a dimension along the axis 0
3339
+ >>> output = ops.mean(x, 0, True)
3340
+ >>> print(output)
3341
+ [[[4. 4. 4. 4. 4. 4.]
3342
+ [5. 5. 5. 5. 5. 5.]
3343
+ [6. 6. 6. 6. 6. 6.]]]
3344
+ >>> # case 3: Reduces a dimension along the axis 1
3345
+ >>> output = ops.mean(x, 1, True)
3346
+ >>> print(output)
3347
+ [[[2. 2. 2. 2. 2. 2.]]
3348
+ [[5. 5. 5. 5. 5. 5.]]
3349
+ [[8. 8. 8. 8. 8. 8.]]]
3350
+ >>> # case 4: Reduces a dimension along the axis 2
3351
+ >>> output = ops.mean(x, 2, True)
3352
+ >>> print(output)
3353
+ [[[ 2.]
3354
+ [ 2.]
3355
+ [ 2.]]
3356
+ [[ 4.]
3357
+ [ 5.]
3358
+ [ 6.]]
3359
+ [[ 6.]
3360
+ [ 8.]
3361
+ [10.]]]
3362
+ """
3363
+ return mean_ext_op(input, axis, keep_dims, dtype)
3364
+
3365
+
3366
+ def min_(input):
3367
+ r"""
3368
+ Calculates the minimum value of the input tensor.
3369
+
3370
+ Also see :func:`mindspore.ops.extend.min`.
3371
+ """
3372
+ return min_op(input)
3373
+
3374
+
2924
3375
  def minimum(input, other):
2925
3376
  r"""
2926
3377
  Computes the minimum of input tensors element-wise.
@@ -2973,6 +3424,13 @@ def minimum(input, other):
2973
3424
  return minimum_op(input, other)
2974
3425
 
2975
3426
 
3427
+ def moe_finalize_routing(expanded_x, x1, x2=None, bias=None, scales=None, expanded_row_idx=None, expanded_expert_idx=None):
3428
+ r"""
3429
+
3430
+ """
3431
+ return moe_finalize_routing_op(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
3432
+
3433
+
2976
3434
  def mul(input, other):
2977
3435
  r"""
2978
3436
  Multiplies two tensors element-wise.
@@ -3023,6 +3481,13 @@ def mul(input, other):
3023
3481
  return mul_op(input, other)
3024
3482
 
3025
3483
 
3484
+ def mv(input, vec):
3485
+ r"""
3486
+
3487
+ """
3488
+ return mv_op(input, vec)
3489
+
3490
+
3026
3491
  def neg(input):
3027
3492
  r"""
3028
3493
  Returns a tensor with negative values of the input tensor element-wise.
@@ -3147,6 +3612,44 @@ def not_equal(input, other):
3147
3612
  """
3148
3613
  return not_equal_op(input, other)
3149
3614
 
3615
+ ones_op=Ones()
3616
+
3617
+ def ones(shape, dtype=None):
3618
+ r"""
3619
+ Creates a tensor filled with value ones.
3620
+
3621
+ Creates a tensor with shape described by the first argument and fills it with value ones in type of the second
3622
+ argument.
3623
+
3624
+ .. warning::
3625
+ For argument `shape`, Tensor type input will be deprecated in the future version.
3626
+
3627
+ Args:
3628
+ shape (Union[tuple[int], list[int], int, Tensor]): The specified shape of output tensor. Only positive integer or
3629
+ tuple or Tensor containing positive integers are allowed. If it is a Tensor,
3630
+ it must be a 0-D or 1-D Tensor with int32 or int64 dtypes.
3631
+ dtype (:class:`mindspore.dtype`): The specified type of output tensor. If `dtype` is ``None`` ,
3632
+ `mindspore.float32` will be used. Default: ``None`` .
3633
+
3634
+ Returns:
3635
+ Tensor, whose dtype and size are defined by input.
3636
+
3637
+ Raises:
3638
+ TypeError: If `shape` is neither an int nor an tuple/list/Tensor of int.
3639
+
3640
+ Supported Platforms:
3641
+ ``Ascend`` ``GPU`` ``CPU``
3642
+
3643
+ Examples:
3644
+ >>> import mindspore
3645
+ >>> from mindspore import ops
3646
+ >>> output = ops.ones((2, 2), mindspore.float32)
3647
+ >>> print(output)
3648
+ [[1. 1.]
3649
+ [1. 1.]]
3650
+ """
3651
+ return ones_op(shape, dtype)
3652
+
3150
3653
 
3151
3654
  def paged_attention_mask(query, key_cache, value_cache, block_tables, context_lens, alibi_mask, head_num, scale_value, kv_head_num):
3152
3655
  r"""
@@ -3410,8 +3913,7 @@ def quant_batch_matmul(x1, x2, scale, offset=None, bias=None, transpose_x1=False
3410
3913
  r"""
3411
3914
 
3412
3915
  """
3413
- quant_batch_matmul_op = _get_cache_prim(QuantBatchMatmul)(transpose_x1, transpose_x2, dtype)
3414
- return quant_batch_matmul_op(x1, x2, scale, offset, bias)
3916
+ return quant_batch_matmul_impl(x1, x2, scale, offset, bias, transpose_x1, transpose_x2, dtype)
3415
3917
 
3416
3918
 
3417
3919
  def randperm(n, seed=0, offset=0, dtype=mstype.int64):
@@ -3535,6 +4037,66 @@ def real(input):
3535
4037
  return real_op(input)
3536
4038
 
3537
4039
 
4040
+ def all(input, axis=None, keep_dims=False):
4041
+ r"""
4042
+ Reduces a dimension of `input` by the "logical AND" of all elements in the dimension, by default. And also can
4043
+ reduce a dimension of `input` along the `axis`. Determine whether the dimensions of the output and input are the
4044
+ same by controlling `keep_dims`.
4045
+
4046
+ Note:
4047
+ The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
4048
+
4049
+ Args:
4050
+ input (Tensor): Input Tensor, has the shape :math:`(N, *)` where :math:`*` means,
4051
+ any number of additional dimensions.
4052
+ axis (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce.
4053
+ Suppose the rank of `input` is r, `axis` must be in the range [-rank(input), rank(input)).
4054
+ Default: ``None`` , all dimensions are reduced.
4055
+ keep_dims (bool, optional): If ``True`` , keep these reduced dimensions and the length is 1.
4056
+ If ``False`` , don't keep these dimensions. Default : ``False`` .
4057
+
4058
+ Returns:
4059
+ Tensor, the dtype is bool.
4060
+
4061
+ - If `axis` is ``None`` , and `keep_dims` is ``False`` ,
4062
+ the output is a 0-D Tensor representing the "logical AND" of all elements in the input Tensor.
4063
+ - If `axis` is int, such as 2, and `keep_dims` is ``False`` ,
4064
+ the shape of output is :math:`(input_1, input_3, ..., input_R)`.
4065
+ - If `axis` is tuple(int), such as (2, 3), and `keep_dims` is ``False`` ,
4066
+ the shape of output is :math:`(input_1, input_4, ..., input_R)`.
4067
+ - If `axis` is 1-D Tensor, such as [2, 3], and `keep_dims` is ``False`` ,
4068
+ the shape of output is :math:`(input_1, input_4, ..., input_R)`.
4069
+
4070
+ Raises:
4071
+ TypeError: If `keep_dims` is not a bool.
4072
+ TypeError: If `input` is not a Tensor.
4073
+ TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
4074
+
4075
+ Supported Platforms:
4076
+ ``Ascend`` ``GPU`` ``CPU``
4077
+
4078
+ Examples:
4079
+ >>> import numpy as np
4080
+ >>> from mindspore import Tensor, ops
4081
+ >>> x = Tensor(np.array([[True, False], [True, True]]))
4082
+ >>> # case 1: Reduces a dimension by the "logicalAND" of all elements in the dimension.
4083
+ >>> output = ops.all(x, keep_dims=True)
4084
+ >>> print(output)
4085
+ [[False]]
4086
+ >>> print(output.shape)
4087
+ (1, 1)
4088
+ >>> # case 2: Reduces a dimension along axis 0.
4089
+ >>> output = ops.all(x, axis=0)
4090
+ >>> print(output)
4091
+ [ True False]
4092
+ >>> # case 3: Reduces a dimension along axis 1.
4093
+ >>> output = ops.all(x, axis=1)
4094
+ >>> print(output)
4095
+ [False True]
4096
+ """
4097
+ return reduce_all_impl(input, axis, keep_dims)
4098
+
4099
+
3538
4100
  def relu6(x):
3539
4101
  r"""
3540
4102
  Computes ReLU (Rectified Linear Unit) upper bounded by 6 of input tensors element-wise.
@@ -3619,6 +4181,13 @@ def relu(input):
3619
4181
  return relu_op(input)
3620
4182
 
3621
4183
 
4184
+ def repeat_interleave(input, repeats, axis=None, output_size=None):
4185
+ r"""
4186
+
4187
+ """
4188
+ return repeat_interleave_op(input, repeats, axis, output_size)
4189
+
4190
+
3622
4191
  def reshape_and_cache(key, value, key_cache, value_cache, slot_mapping):
3623
4192
  r"""
3624
4193
  The ReshapeAndCache is used for updating the block-wise KVCache of transformer network.
@@ -3704,20 +4273,20 @@ def reshape(input, shape):
3704
4273
  return reshape_op(input, shape)
3705
4274
 
3706
4275
 
3707
- def reverse(x, axis):
4276
+ def flip(input, axis):
3708
4277
  r"""
3709
4278
  Reverses specific dimensions of a tensor.
3710
4279
 
3711
4280
  .. warning::
3712
- The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "x".
4281
+ The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "input".
3713
4282
 
3714
4283
  Args:
3715
- x (Tensor): The target tensor.
4284
+ input (Tensor): The target tensor.
3716
4285
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
3717
4286
  axis (Union[tuple(int), list(int)]): The indices of the dimensions to reverse.
3718
4287
 
3719
4288
  Outputs:
3720
- Tensor, has the same shape and type as `x`.
4289
+ Tensor, has the same shape and type as `input`.
3721
4290
 
3722
4291
  Raises:
3723
4292
  TypeError: If `axis` is neither list nor tuple.
@@ -3731,18 +4300,17 @@ def reverse(x, axis):
3731
4300
  >>> import numpy as np
3732
4301
  >>> from mindspore import Tensor, ops
3733
4302
  >>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
3734
- >>> output = ops.reverse(input_x, axis=[1])
4303
+ >>> output = ops.flip(input_x, axis=[1])
3735
4304
  >>> print(output)
3736
4305
  [[4 3 2 1]
3737
4306
  [8 7 6 5]]
3738
4307
  >>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
3739
- >>> output = ops.reverse(input_x, axis=[1, 0])
4308
+ >>> output = ops.flip(input_x, axis=[1, 0])
3740
4309
  >>> print(output)
3741
4310
  [[8 7 6 5]
3742
4311
  [4 3 2 1]]
3743
4312
  """
3744
- reverse_v2_op = _get_cache_prim(ReverseV2)(axis)
3745
- return reverse_v2_op(x)
4313
+ return reverse_v2_impl(input, axis)
3746
4314
 
3747
4315
 
3748
4316
  def rfft(input, n=None, dim=-1, norm=None):
@@ -4001,6 +4569,57 @@ def scatter_nd(indices, updates, shape):
4001
4569
  return scatter_nd_op(indices, updates, shape)
4002
4570
 
4003
4571
 
4572
+ def select(condition, input, other):
4573
+ r"""
4574
+ The conditional tensor determines whether the corresponding element in the output must be
4575
+ selected from `input` (if True) or `other` (if False) based on the value of each
4576
+ element.
4577
+
4578
+ It can be defined as:
4579
+
4580
+ .. math::
4581
+ out_i = \begin{cases}
4582
+ input_i, & \text{if } condition_i \\
4583
+ other_i, & \text{otherwise}
4584
+ \end{cases}
4585
+
4586
+ Args:
4587
+ condition (Tensor[bool]): The condition tensor, decides which element is chosen.
4588
+ The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
4589
+ input (Union[Tensor, int, float]): The first Tensor to be selected.
4590
+ If input is a Tensor, its shape should be or be braodcast to :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
4591
+ If input is int or float, it will be casted to int32 or float32, and broadcast to the same shape as y.
4592
+ There must be at least one Tensor between x and y.
4593
+ other (Union[Tensor, int, float]): The second Tensor to be selected.
4594
+ If other is a Tensor, its shape should be or be braodcast to :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
4595
+ If other is int or float, it will be casted to int32 or float32, and broadcast to the same shape as y.
4596
+ There must be at least one Tensor between x and y.
4597
+
4598
+ Returns:
4599
+ Tensor, has the same shape as `condition`.
4600
+
4601
+ Raises:
4602
+ TypeError: If input or other is not a Tensor.
4603
+ ValueError: The shape of inputs cannot be broadcast.
4604
+
4605
+ Supported Platforms:
4606
+ ``Ascend`` ``GPU`` ``CPU``
4607
+
4608
+ Examples:
4609
+ >>> import mindspore
4610
+ >>> from mindspore import Tensor, ops
4611
+ >>> # Both inputs are Tensor
4612
+ >>> cond = Tensor([True, False])
4613
+ >>> x = Tensor([2,3], mindspore.float32)
4614
+ >>> y = Tensor([1,2], mindspore.float32)
4615
+ >>> output = ops.select(cond, x, y)
4616
+ >>> print(output)
4617
+ [2. 2.]
4618
+
4619
+ """
4620
+ return select_op(condition, input, other)
4621
+
4622
+
4004
4623
  def sequence_concat(x, axis=0):
4005
4624
  r"""
4006
4625
  Support sequence Concat operation.
@@ -4214,6 +4833,13 @@ def sinh(input):
4214
4833
  return sinh_op(input)
4215
4834
 
4216
4835
 
4836
+ def softplus_ext(input, beta=1, threshold=20):
4837
+ r"""
4838
+
4839
+ """
4840
+ return softplus_ext_op(input, beta, threshold)
4841
+
4842
+
4217
4843
  def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False):
4218
4844
  r"""
4219
4845
  Solve the linear system :math:`a x = b` for `x`, Assuming `a` is a triangular matrix.
@@ -4335,6 +4961,46 @@ def square(input):
4335
4961
  return square_op(input)
4336
4962
 
4337
4963
 
4964
+ def stack_ext(tensors, dim=0):
4965
+ r"""
4966
+ Stacks a list of tensors in specified dim.
4967
+
4968
+ Stacks the list of input tensors with the same rank `R`, output is a tensor of rank `(R+1)`.
4969
+
4970
+ Given input tensors of shape :math:`(x_1, x_2, ..., x_R)`. Set the number of input tensors as `N`.
4971
+ If :math:`dim \ge 0`, the shape of the output tensor is
4972
+ :math:`(x_1, x_2, ..., x_{dim}, N, x_{dim+1}, ..., x_R)`.
4973
+
4974
+ Args:
4975
+ tensors (Union[tuple, list]): A Tuple or list of Tensor objects with the same shape and type.
4976
+ dim (int): Dimension to stack. The range is [-(R+1), R+1). Default: ``0`` .
4977
+
4978
+ Returns:
4979
+ Tensor. A stacked Tensor with the same type as `tensors`.
4980
+
4981
+ Raises:
4982
+ TypeError: If the data types of elements in `tensors` are not the same.
4983
+ ValueError: If the length of `tensors` is not greater than zero;
4984
+ or if dim is out of the range [-(R+1), R+1);
4985
+ or if the shapes of elements in tensors are not the same.
4986
+
4987
+ Supported Platforms:
4988
+ ``Ascend`` ``GPU`` ``CPU``
4989
+
4990
+ Examples:
4991
+ >>> import mindspore
4992
+ >>> from mindspore import Tensor, mint
4993
+ >>> import numpy as np
4994
+ >>> data1 = Tensor(np.array([0, 1]).astype(np.float32))
4995
+ >>> data2 = Tensor(np.array([2, 3]).astype(np.float32))
4996
+ >>> output = mint.stack([data1, data2], 0)
4997
+ >>> print(output)
4998
+ [[0. 1.]
4999
+ [2. 3.]]
5000
+ """
5001
+ return stack_ext_impl(tensors, dim)
5002
+
5003
+
4338
5004
  def strided_slice(input_x, begin, end, strides, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0):
4339
5005
  r"""
4340
5006
  Extracts a strided slice of a Tensor based on `begin/end` index and `strides`.
@@ -4598,6 +5264,52 @@ def sub(input, other):
4598
5264
  return sub_op(input, other)
4599
5265
 
4600
5266
 
5267
+ def tanh(input):
5268
+ r"""
5269
+ Computes hyperbolic tangent of input element-wise. The Tanh function is defined as:
5270
+
5271
+ .. math::
5272
+
5273
+ tanh(x_i) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1},
5274
+
5275
+ where :math:`x_i` is an element of the input Tensor.
5276
+
5277
+ Tanh Activation Function Graph:
5278
+
5279
+ .. image:: ../images/Tanh.png
5280
+ :align: center
5281
+
5282
+ Args:
5283
+ input (Tensor): Input of Tanh.
5284
+
5285
+ Returns:
5286
+ Tensor, with the same type and shape as the `input`.
5287
+
5288
+ Raises:
5289
+ TypeError: If `input` is not a Tensor.
5290
+
5291
+ Supported Platforms:
5292
+ ``Ascend`` ``GPU`` ``CPU``
5293
+
5294
+ Examples:
5295
+ >>> import mindspore
5296
+ >>> import numpy as np
5297
+ >>> from mindspore import Tensor, ops
5298
+ >>> input = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
5299
+ >>> output = ops.tanh(input)
5300
+ >>> print(output)
5301
+ [0.7615941 0.9640276 0.9950547 0.9993293 0.9999092]
5302
+ """
5303
+ return tanh_op(input)
5304
+
5305
+
5306
+ def topk_ext(input, k, dim=-1, largest=True, sorted=True):
5307
+ r"""
5308
+
5309
+ """
5310
+ return topk_ext_op(input, k, dim, largest, sorted)
5311
+
5312
+
4601
5313
  def trace(input):
4602
5314
  r"""
4603
5315
  Returns a new tensor that is the sum of the `input` main trace.
@@ -4689,6 +5401,127 @@ def transpose(input, input_perm):
4689
5401
  return transpose_op(input, input_perm)
4690
5402
 
4691
5403
 
5404
+ def tril(input, diagonal=0):
5405
+ r"""
5406
+ Returns the lower triangle part of 'input' (elements that contain the diagonal and below),
5407
+ and set the other elements to zeros.
5408
+
5409
+ Args:
5410
+ input (Tensor): A Tensor with shape :math:`(x_1, x_2, ..., x_R)`. The rank must be at least 2.
5411
+ Supporting all number types including bool.
5412
+ diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: 0,
5413
+ indicating the main diagonal.
5414
+
5415
+ Returns:
5416
+ Tensor, the same shape and data type as the input `x`.
5417
+
5418
+ Raises:
5419
+ TypeError: If `x` is not a Tensor.
5420
+ TypeError: If `diagonal` is not an int.
5421
+ TypeError: If the type of `x` is neither number nor bool.
5422
+ ValueError: If the rank of `x` is less than 2.
5423
+
5424
+ Supported Platforms:
5425
+ ``Ascend`` ``GPU`` ``CPU``
5426
+
5427
+ Examples:
5428
+ >>> import numpy as np
5429
+ >>> from mindspore import Tensor, ops
5430
+ >>> x = Tensor(np.array([[ 1, 2, 3, 4],
5431
+ ... [ 5, 6, 7, 8],
5432
+ ... [10, 11, 12, 13],
5433
+ ... [14, 15, 16, 17]]))
5434
+ >>> result = ops.tril(x)
5435
+ >>> print(result)
5436
+ [[ 1 0 0 0]
5437
+ [ 5 6 0 0]
5438
+ [10 11 12 0]
5439
+ [14 15 16 17]]
5440
+ >>> x = Tensor(np.array([[ 1, 2, 3, 4],
5441
+ ... [ 5, 6, 7, 8],
5442
+ ... [10, 11, 12, 13],
5443
+ ... [14, 15, 16, 17]]))
5444
+ >>> result = ops.tril(x, diagonal=1)
5445
+ >>> print(result)
5446
+ [[ 1 2 0 0]
5447
+ [ 5 6 7 0]
5448
+ [10 11 12 13]
5449
+ [14 15 16 17]]
5450
+ >>> x = Tensor(np.array([[ 1, 2, 3, 4],
5451
+ ... [ 5, 6, 7, 8],
5452
+ ... [10, 11, 12, 13],
5453
+ ... [14, 15, 16, 17]]))
5454
+ >>> result = ops.tril(x, diagonal=-1)
5455
+ >>> print(result)
5456
+ [[ 0 0 0 0]
5457
+ [ 5 0 0 0]
5458
+ [10 11 0 0]
5459
+ [14 15 16 0]]
5460
+ """
5461
+ return tril_impl(input, diagonal)
5462
+
5463
+
5464
+ def triu(input, diagonal=0):
5465
+ r"""
5466
+ Returns the upper triangle part of 'input' (elements that contain the diagonal and below),
5467
+ and set the other elements to zeros.
5468
+
5469
+ .. warning::
5470
+ This is an experimental API that is subject to change or deletion.
5471
+
5472
+ Args:
5473
+ input (Tensor): The input tensor with shape :math:`(M, N, *)` where * means any number of additional dimensions.
5474
+ diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: ``0``,
5475
+ indicating the main diagonal.
5476
+
5477
+ Returns:
5478
+ Tensor, a tensor has the same shape and data type as input.
5479
+
5480
+ Raises:
5481
+ TypeError: If `diagonal` is not an int.
5482
+ TypeError: If `input` is not a Tensor.
5483
+ ValueError: If the dimension of `input` is less than 2.
5484
+
5485
+ Supported Platforms:
5486
+ ``Ascend`` ``GPU`` ``CPU``
5487
+
5488
+ Examples:
5489
+ >>> import numpy as np
5490
+ >>> from mindspore import Tensor, ops
5491
+ >>> x = Tensor(np.array([[ 1, 2, 3, 4],
5492
+ ... [ 5, 6, 7, 8],
5493
+ ... [10, 11, 12, 13],
5494
+ ... [14, 15, 16, 17]]))
5495
+ >>> result = ops.triu(x)
5496
+ >>> print(result)
5497
+ [[ 1 2 3 4]
5498
+ [ 0 6 7 8]
5499
+ [ 0 0 12 13]
5500
+ [ 0 0 0 17]]
5501
+ >>> x = Tensor(np.array([[ 1, 2, 3, 4],
5502
+ ... [ 5, 6, 7, 8],
5503
+ ... [10, 11, 12, 13],
5504
+ ... [14, 15, 16, 17]]))
5505
+ >>> result = ops.triu(x, diagonal=1)
5506
+ >>> print(result)
5507
+ [[ 0 2 3 4]
5508
+ [ 0 0 7 8]
5509
+ [ 0 0 0 13]
5510
+ [ 0 0 0 0]]
5511
+ >>> x = Tensor(np.array([[ 1, 2, 3, 4],
5512
+ ... [ 5, 6, 7, 8],
5513
+ ... [10, 11, 12, 13],
5514
+ ... [14, 15, 16, 17]]))
5515
+ >>> result = ops.triu(x, diagonal=-1)
5516
+ >>> print(result)
5517
+ [[ 1 2 3 4]
5518
+ [ 5 6 7 8]
5519
+ [ 0 11 12 13]
5520
+ [ 0 0 16 17]]
5521
+ """
5522
+ return triu_impl(input, diagonal)
5523
+
5524
+
4692
5525
  def tuple_to_tensor(input_tuple, dtype=None):
4693
5526
  r"""
4694
5527
 
@@ -4782,5 +5615,39 @@ def weight_quant_batch_matmul(x, weight, antiquant_scale, antiquant_offset=None,
4782
5615
  r"""
4783
5616
 
4784
5617
  """
4785
- weight_quant_batch_matmul_op = _get_cache_prim(WeightQuantBatchMatmul)(transpose_x, transpose_weight, antiquant_group_size)
4786
- return weight_quant_batch_matmul_op(x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias)
5618
+ return weight_quant_batch_matmul_impl(x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias, transpose_x, transpose_weight, antiquant_group_size)
5619
+
5620
+ zeros_op=Zeros()
5621
+
5622
+ def zeros(size, dtype=None):
5623
+ r"""
5624
+ Creates a tensor filled with 0 with shape described by `size` and fills it with value 0 in type of `dtype`.
5625
+
5626
+ .. warning::
5627
+ For argument `size`, Tensor type input will be deprecated in the future version.
5628
+
5629
+ Args:
5630
+ size (Union[tuple[int], list[int], int, Tensor]): The specified shape of output tensor. Only positive integer or
5631
+ tuple or Tensor containing positive integers are allowed. If it is a Tensor,
5632
+ it must be a 0-D or 1-D Tensor with int32 or int64 dtypes.
5633
+ dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor. If `dtype` is ``None`` ,
5634
+ mindspore.float32 will be used. Default: ``None`` .
5635
+
5636
+ Returns:
5637
+ Tensor, whose dtype and size are defined by input.
5638
+
5639
+ Raises:
5640
+ TypeError: If `size` is neither an int nor an tuple/list/Tensor of int.
5641
+
5642
+ Supported Platforms:
5643
+ ``Ascend`` ``GPU`` ``CPU``
5644
+
5645
+ Examples:
5646
+ >>> import mindspore
5647
+ >>> from mindspore import ops
5648
+ >>> output = ops.zeros((2, 2), mindspore.float32)
5649
+ >>> print(output)
5650
+ [[0. 0.]
5651
+ [0. 0.]]
5652
+ """
5653
+ return zeros_op(size, dtype)