mindspore 2.3.0rc1__cp37-none-any.whl → 2.3.0rc2__cp37-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (316) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +1 -1
  3. mindspore/_akg/akg/utils/tbe_codegen_utils.py +13 -3
  4. mindspore/_c_dataengine.cpython-37m-aarch64-linux-gnu.so +0 -0
  5. mindspore/_c_expression.cpython-37m-aarch64-linux-gnu.so +0 -0
  6. mindspore/_checkparam.py +20 -0
  7. mindspore/_extends/parse/parser.py +1 -1
  8. mindspore/_extends/parse/standard_method.py +6 -5
  9. mindspore/_mindspore_offline_debug.cpython-37m-aarch64-linux-gnu.so +0 -0
  10. mindspore/amp.py +5 -5
  11. mindspore/bin/cache_admin +0 -0
  12. mindspore/bin/cache_server +0 -0
  13. mindspore/boost/boost_cell_wrapper.py +1 -1
  14. mindspore/boost/group_loss_scale_manager.py +1 -1
  15. mindspore/common/__init__.py +4 -2
  16. mindspore/common/_register_for_recompute.py +48 -0
  17. mindspore/common/_stub_tensor.py +1 -0
  18. mindspore/common/api.py +56 -4
  19. mindspore/common/dtype.py +5 -3
  20. mindspore/common/dump.py +2 -2
  21. mindspore/common/hook_handle.py +51 -4
  22. mindspore/common/initializer.py +1 -1
  23. mindspore/common/jit_config.py +17 -6
  24. mindspore/common/parameter.py +7 -2
  25. mindspore/common/recompute.py +247 -0
  26. mindspore/common/sparse_tensor.py +2 -2
  27. mindspore/common/symbol.py +1 -1
  28. mindspore/common/tensor.py +74 -36
  29. mindspore/communication/__init__.py +3 -3
  30. mindspore/communication/management.py +30 -30
  31. mindspore/context.py +28 -15
  32. mindspore/dataset/__init__.py +5 -5
  33. mindspore/dataset/audio/__init__.py +2 -2
  34. mindspore/dataset/audio/transforms.py +51 -51
  35. mindspore/dataset/callback/ds_callback.py +2 -2
  36. mindspore/dataset/engine/cache_client.py +1 -1
  37. mindspore/dataset/engine/datasets.py +3 -3
  38. mindspore/dataset/engine/datasets_audio.py +14 -14
  39. mindspore/dataset/engine/datasets_standard_format.py +3 -3
  40. mindspore/dataset/engine/datasets_text.py +38 -38
  41. mindspore/dataset/engine/datasets_user_defined.py +3 -3
  42. mindspore/dataset/engine/datasets_vision.py +68 -68
  43. mindspore/dataset/text/__init__.py +3 -3
  44. mindspore/dataset/text/transforms.py +26 -26
  45. mindspore/dataset/transforms/__init__.py +1 -1
  46. mindspore/dataset/vision/__init__.py +3 -3
  47. mindspore/dataset/vision/transforms.py +92 -92
  48. mindspore/dataset/vision/utils.py +1 -1
  49. mindspore/experimental/optim/adadelta.py +2 -2
  50. mindspore/experimental/optim/adagrad.py +2 -2
  51. mindspore/experimental/optim/adam.py +2 -2
  52. mindspore/experimental/optim/adamax.py +2 -2
  53. mindspore/experimental/optim/adamw.py +2 -2
  54. mindspore/experimental/optim/asgd.py +2 -2
  55. mindspore/experimental/optim/lr_scheduler.py +24 -20
  56. mindspore/experimental/optim/nadam.py +2 -2
  57. mindspore/experimental/optim/optimizer.py +1 -1
  58. mindspore/experimental/optim/radam.py +2 -2
  59. mindspore/experimental/optim/rmsprop.py +2 -2
  60. mindspore/experimental/optim/rprop.py +2 -2
  61. mindspore/experimental/optim/sgd.py +2 -2
  62. mindspore/hal/stream.py +2 -0
  63. mindspore/include/mindapi/base/types.h +5 -0
  64. mindspore/lib/libdnnl.so.2 +0 -0
  65. mindspore/lib/libmindspore.so +0 -0
  66. mindspore/lib/libmindspore_backend.so +0 -0
  67. mindspore/lib/libmindspore_common.so +0 -0
  68. mindspore/lib/libmindspore_core.so +0 -0
  69. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  70. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  71. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  72. mindspore/lib/libmindspore_shared_lib.so +0 -0
  73. mindspore/lib/libopencv_core.so.4.5 +0 -0
  74. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  75. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6 -6
  76. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  77. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  78. mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
  79. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  80. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/DeviceBin +0 -0
  81. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
  82. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
  83. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +101787 -98559
  84. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
  85. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
  86. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/base/op_register.h +2 -2
  87. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/params/mix.h +8 -1
  88. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/params/norm.h +5 -3
  89. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/params/reduce.h +2 -2
  90. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/rt/backend/backend.h +3 -3
  91. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/rt/backend/rtbackend.h +3 -3
  92. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/rt/base/types.h +0 -1
  93. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/rt/module/module.h +3 -3
  94. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/svector/svector.h +3 -2
  95. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
  96. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
  97. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/tiling/add_tiling.h +9 -9
  98. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +2 -6
  99. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb.h +2 -2
  100. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_base.h +460 -0
  101. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_bf16.h +217 -0
  102. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp16.h +116 -0
  103. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_tiling.h +16 -24
  104. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_value.h +27 -0
  105. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -4
  106. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention/FlashAttentionScore_impl.h → flash_attention_score/flash_attention_score_impl.h} +2 -1
  107. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention/bs_attention_tiling.h → flash_attention_score/flash_attention_score_tiling.h} +15 -19
  108. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/gelu/tiling/gelu_tiling.h +7 -9
  109. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/lccl/lccl_wrapper.h +58 -0
  110. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/matmul_impl.h +19 -8
  111. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{matmul → matmul_common}/pp_matmul_common_tiling.h +18 -8
  112. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{matmul → matmul_common}/pp_matmul_info.h +7 -4
  113. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{matmul → matmul_common}/tiling_data.h +44 -6
  114. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/tiling_utils.h +65 -0
  115. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/matmul_stridedslice_fusion_impl.h +10 -6
  116. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +4 -1
  117. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/kernel/paged_attention_mix_hwsync.h +41 -0
  118. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention/PagedAttention_impl.h → paged_attention/paged_attention_impl.h} +1 -1
  119. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_tiling.h +63 -0
  120. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/add_param.h +2 -2
  121. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention_param.h → param/attention_param.h} +11 -2
  122. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +37 -0
  123. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +45 -0
  124. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache/reshape_and_cache_tiling.h +1 -2
  125. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/rms_norm.h +23 -0
  126. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/rms_norm_base.h +175 -0
  127. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/rms_norm_normal.h +276 -0
  128. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/rms_norm_split_d.h +280 -0
  129. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/tiling_data.h +35 -0
  130. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +45 -0
  131. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/kernel/sub_kernel.h +20 -0
  132. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +47 -0
  133. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_tiling.h +25 -0
  134. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +323 -23
  135. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/types.h +15 -4
  136. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +8 -0
  137. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
  138. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
  139. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layernorm_impl.so +0 -0
  140. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_impl.so +0 -0
  141. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_impl.so +0 -0
  142. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_impl.so +0 -0
  143. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_impl.so +0 -0
  144. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_impl.so +0 -0
  145. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_stridedslice_fusion_impl.so +0 -0
  146. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
  147. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libnot_equal_impl.so +0 -0
  148. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_impl.so +0 -0
  149. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_impl.so +0 -0
  150. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
  151. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
  152. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_full_mix.o +0 -0
  153. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
  154. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
  155. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
  156. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_full_mix.o +0 -0
  157. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
  158. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bnsd_full_mix.o +0 -0
  159. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bsh_full_mix.o +0 -0
  160. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bnsd_full_mix.o +0 -0
  161. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bsh_full_mix.o +0 -0
  162. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lcal.h +22 -0
  163. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lcal_comm.h +70 -0
  164. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lcal_types.h +103 -0
  165. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lccl.h +47 -0
  166. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lccl_wrapper.h +58 -0
  167. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lcoc.h +154 -0
  168. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblcal.so +0 -0
  169. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
  170. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  171. mindspore/log.py +2 -2
  172. mindspore/mint/__init__.py +457 -0
  173. mindspore/mint/nn/__init__.py +430 -0
  174. mindspore/mint/nn/functional.py +424 -0
  175. mindspore/mint/optim/__init__.py +24 -0
  176. mindspore/mint/optim/adamw.py +186 -0
  177. mindspore/multiprocessing/__init__.py +4 -0
  178. mindspore/nn/__init__.py +3 -0
  179. mindspore/nn/cell.py +51 -47
  180. mindspore/nn/extend/__init__.py +29 -0
  181. mindspore/nn/extend/basic.py +140 -0
  182. mindspore/nn/extend/embedding.py +143 -0
  183. mindspore/nn/extend/layer/__init__.py +27 -0
  184. mindspore/nn/extend/layer/normalization.py +107 -0
  185. mindspore/nn/extend/pooling.py +117 -0
  186. mindspore/nn/generator.py +297 -0
  187. mindspore/nn/layer/basic.py +109 -1
  188. mindspore/nn/layer/container.py +2 -2
  189. mindspore/nn/layer/conv.py +6 -6
  190. mindspore/nn/layer/embedding.py +1 -1
  191. mindspore/nn/layer/normalization.py +21 -43
  192. mindspore/nn/layer/padding.py +4 -0
  193. mindspore/nn/optim/ada_grad.py +2 -2
  194. mindspore/nn/optim/adadelta.py +1 -1
  195. mindspore/nn/optim/adafactor.py +1 -1
  196. mindspore/nn/optim/adam.py +7 -7
  197. mindspore/nn/optim/adamax.py +2 -2
  198. mindspore/nn/optim/adasum.py +2 -2
  199. mindspore/nn/optim/asgd.py +2 -2
  200. mindspore/nn/optim/ftrl.py +1 -1
  201. mindspore/nn/optim/lamb.py +3 -3
  202. mindspore/nn/optim/lars.py +1 -1
  203. mindspore/nn/optim/lazyadam.py +2 -2
  204. mindspore/nn/optim/momentum.py +2 -2
  205. mindspore/nn/optim/optimizer.py +2 -2
  206. mindspore/nn/optim/proximal_ada_grad.py +2 -2
  207. mindspore/nn/optim/rmsprop.py +2 -2
  208. mindspore/nn/optim/rprop.py +2 -2
  209. mindspore/nn/optim/sgd.py +2 -2
  210. mindspore/nn/optim/thor.py +2 -2
  211. mindspore/nn/wrap/cell_wrapper.py +9 -9
  212. mindspore/nn/wrap/grad_reducer.py +5 -5
  213. mindspore/ops/_grad_experimental/grad_comm_ops.py +4 -2
  214. mindspore/ops/_vmap/vmap_grad_nn_ops.py +41 -2
  215. mindspore/ops/_vmap/vmap_math_ops.py +27 -8
  216. mindspore/ops/_vmap/vmap_nn_ops.py +66 -8
  217. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +73 -1
  218. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +12 -3
  219. mindspore/ops/auto_generate/gen_arg_handler.py +24 -0
  220. mindspore/ops/auto_generate/gen_extend_func.py +274 -0
  221. mindspore/ops/auto_generate/gen_ops_def.py +889 -22
  222. mindspore/ops/auto_generate/gen_ops_prim.py +3541 -253
  223. mindspore/ops/auto_generate/pyboost_inner_prim.py +282 -0
  224. mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -1
  225. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +9 -0
  226. mindspore/ops/extend/__init__.py +9 -1
  227. mindspore/ops/extend/array_func.py +134 -27
  228. mindspore/ops/extend/math_func.py +3 -3
  229. mindspore/ops/extend/nn_func.py +363 -2
  230. mindspore/ops/function/__init__.py +19 -2
  231. mindspore/ops/function/array_func.py +463 -439
  232. mindspore/ops/function/clip_func.py +7 -18
  233. mindspore/ops/function/grad/grad_func.py +5 -5
  234. mindspore/ops/function/linalg_func.py +4 -4
  235. mindspore/ops/function/math_func.py +260 -243
  236. mindspore/ops/function/nn_func.py +825 -62
  237. mindspore/ops/function/random_func.py +73 -4
  238. mindspore/ops/function/sparse_unary_func.py +1 -1
  239. mindspore/ops/function/vmap_func.py +1 -1
  240. mindspore/ops/functional.py +2 -2
  241. mindspore/ops/op_info_register.py +1 -31
  242. mindspore/ops/operations/__init__.py +2 -3
  243. mindspore/ops/operations/_grad_ops.py +2 -107
  244. mindspore/ops/operations/_inner_ops.py +5 -5
  245. mindspore/ops/operations/_sequence_ops.py +2 -2
  246. mindspore/ops/operations/array_ops.py +11 -233
  247. mindspore/ops/operations/comm_ops.py +32 -32
  248. mindspore/ops/operations/custom_ops.py +7 -89
  249. mindspore/ops/operations/manually_defined/ops_def.py +329 -4
  250. mindspore/ops/operations/math_ops.py +13 -163
  251. mindspore/ops/operations/nn_ops.py +9 -316
  252. mindspore/ops/operations/random_ops.py +1 -1
  253. mindspore/ops/operations/sparse_ops.py +3 -3
  254. mindspore/ops/primitive.py +2 -2
  255. mindspore/ops_generate/arg_dtype_cast.py +12 -3
  256. mindspore/ops_generate/arg_handler.py +24 -0
  257. mindspore/ops_generate/gen_ops_inner_prim.py +2 -0
  258. mindspore/ops_generate/gen_pyboost_func.py +13 -6
  259. mindspore/ops_generate/pyboost_utils.py +2 -17
  260. mindspore/parallel/__init__.py +3 -2
  261. mindspore/parallel/_auto_parallel_context.py +106 -1
  262. mindspore/parallel/_parallel_serialization.py +34 -2
  263. mindspore/parallel/_utils.py +16 -0
  264. mindspore/parallel/algo_parameter_config.py +4 -4
  265. mindspore/parallel/checkpoint_transform.py +249 -77
  266. mindspore/parallel/cluster/process_entity/_api.py +1 -1
  267. mindspore/parallel/parameter_broadcast.py +1 -1
  268. mindspore/parallel/shard.py +1 -1
  269. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +1 -0
  270. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +17 -5
  271. mindspore/profiler/parser/ascend_msprof_exporter.py +3 -3
  272. mindspore/profiler/parser/ascend_msprof_generator.py +10 -3
  273. mindspore/profiler/parser/ascend_op_generator.py +26 -9
  274. mindspore/profiler/parser/ascend_timeline_generator.py +7 -4
  275. mindspore/profiler/parser/profiler_info.py +11 -1
  276. mindspore/profiler/profiling.py +13 -5
  277. mindspore/rewrite/api/node.py +12 -12
  278. mindspore/rewrite/api/symbol_tree.py +11 -11
  279. mindspore/run_check/_check_version.py +1 -1
  280. mindspore/safeguard/rewrite_obfuscation.py +2 -2
  281. mindspore/train/amp.py +4 -4
  282. mindspore/train/anf_ir_pb2.py +8 -2
  283. mindspore/train/callback/_backup_and_restore.py +2 -2
  284. mindspore/train/callback/_callback.py +4 -4
  285. mindspore/train/callback/_checkpoint.py +2 -2
  286. mindspore/train/callback/_early_stop.py +2 -2
  287. mindspore/train/callback/_landscape.py +4 -4
  288. mindspore/train/callback/_loss_monitor.py +2 -2
  289. mindspore/train/callback/_on_request_exit.py +2 -2
  290. mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
  291. mindspore/train/callback/_summary_collector.py +2 -2
  292. mindspore/train/callback/_time_monitor.py +2 -2
  293. mindspore/train/dataset_helper.py +8 -3
  294. mindspore/train/loss_scale_manager.py +2 -2
  295. mindspore/train/metrics/metric.py +3 -3
  296. mindspore/train/mind_ir_pb2.py +22 -17
  297. mindspore/train/model.py +15 -15
  298. mindspore/train/serialization.py +18 -18
  299. mindspore/train/summary/summary_record.py +7 -7
  300. mindspore/train/train_thor/convert_utils.py +3 -3
  301. mindspore/version.py +1 -1
  302. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +1 -1
  303. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +307 -260
  304. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/tiling_data.h +0 -59
  305. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/FlashAttentionScore_bf16_BNSD_mix.o +0 -0
  306. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/FlashAttentionScore_bf16_BSH_mix.o +0 -0
  307. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/FlashAttentionScore_fp16_BNSD_mix.o +0 -0
  308. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/FlashAttentionScore_fp16_BSH_mix.o +0 -0
  309. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/PagedAttention_bf16_BNSD_mix.o +0 -0
  310. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/PagedAttention_bf16_BSH_mix.o +0 -0
  311. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/PagedAttention_fp16_BNSD_mix.o +0 -0
  312. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/PagedAttention_fp16_BSH_mix.o +0 -0
  313. /mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention/bs_attention_mix_hwsync.h → flash_attention_score/kernel/flash_attention_score_mix_hwsync.h} +0 -0
  314. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
  315. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
  316. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
@@ -24,52 +24,163 @@ from mindspore.ops.auto_generate.gen_arg_dtype_cast import type_it
24
24
  from mindspore.ops.auto_generate.gen_arg_handler import *
25
25
  from mindspore._c_expression import OpDtype
26
26
  from mindspore.common._stub_tensor import _convert_stub
27
+ from mindspore._c_expression import pyboost_abs
28
+ from mindspore._c_expression import pyboost_adam_weight_decay_ext
27
29
  from mindspore._c_expression import pyboost_add_ext
30
+ from mindspore._c_expression import pyboost_arange
31
+ from mindspore._c_expression import pyboost_argmax_ext
28
32
  from mindspore._c_expression import pyboost_argmax_with_value
29
33
  from mindspore._c_expression import pyboost_argmin_with_value
34
+ from mindspore._c_expression import pyboost_avg_pool2d_grad
35
+ from mindspore._c_expression import pyboost_avg_pool2d
36
+ from mindspore._c_expression import pyboost_batch_mat_mul
37
+ from mindspore._c_expression import pyboost_batch_norm_ext
38
+ from mindspore._c_expression import pyboost_batch_norm_grad_ext
39
+ from mindspore._c_expression import pyboost_bmm_ext
30
40
  from mindspore._c_expression import pyboost_broadcast_to
31
41
  from mindspore._c_expression import pyboost_cast
32
42
  from mindspore._c_expression import pyboost_ceil
43
+ from mindspore._c_expression import pyboost_chunk
44
+ from mindspore._c_expression import pyboost_clamp_scalar
45
+ from mindspore._c_expression import pyboost_clamp_tensor
33
46
  from mindspore._c_expression import pyboost_concat
47
+ from mindspore._c_expression import pyboost_constant_pad_nd
34
48
  from mindspore._c_expression import pyboost_contiguous
49
+ from mindspore._c_expression import pyboost_convolution_grad
50
+ from mindspore._c_expression import pyboost_convolution
35
51
  from mindspore._c_expression import pyboost_copy
36
52
  from mindspore._c_expression import pyboost_cos
53
+ from mindspore._c_expression import pyboost_dense
37
54
  from mindspore._c_expression import pyboost_div
55
+ from mindspore._c_expression import pyboost_divmod
56
+ from mindspore._c_expression import pyboost_dot
57
+ from mindspore._c_expression import pyboost_dropout_do_mask_ext
58
+ from mindspore._c_expression import pyboost_dropout_ext
59
+ from mindspore._c_expression import pyboost_dropout_gen_mask_ext
60
+ from mindspore._c_expression import pyboost_dropout_grad_ext
61
+ from mindspore._c_expression import pyboost_embedding_dense_backward
62
+ from mindspore._c_expression import pyboost_embedding
38
63
  from mindspore._c_expression import pyboost_equal
64
+ from mindspore._c_expression import pyboost_erf
39
65
  from mindspore._c_expression import pyboost_erfinv
40
66
  from mindspore._c_expression import pyboost_exp
67
+ from mindspore._c_expression import pyboost_ffn_ext
68
+ from mindspore._c_expression import pyboost_fill_scalar
69
+ from mindspore._c_expression import pyboost_fill_tensor
70
+ from mindspore._c_expression import pyboost_flash_attention_score_grad
71
+ from mindspore._c_expression import pyboost_flash_attention_score
72
+ from mindspore._c_expression import pyboost_flatten_ext
41
73
  from mindspore._c_expression import pyboost_gather_d_grad_v2
42
74
  from mindspore._c_expression import pyboost_gather_d
43
75
  from mindspore._c_expression import pyboost_gelu_grad
44
76
  from mindspore._c_expression import pyboost_gelu
45
77
  from mindspore._c_expression import pyboost_greater_equal
46
78
  from mindspore._c_expression import pyboost_greater
79
+ from mindspore._c_expression import pyboost_grid_sampler_2d_grad
80
+ from mindspore._c_expression import pyboost_grid_sampler_2d
81
+ from mindspore._c_expression import pyboost_grid_sampler_3d_grad
82
+ from mindspore._c_expression import pyboost_grid_sampler_3d
83
+ from mindspore._c_expression import pyboost_group_norm_grad
84
+ from mindspore._c_expression import pyboost_group_norm
85
+ from mindspore._c_expression import pyboost_isfinite
86
+ from mindspore._c_expression import pyboost_layer_norm_ext
87
+ from mindspore._c_expression import pyboost_layer_norm_grad_ext
88
+ from mindspore._c_expression import pyboost_leaky_relu_ext
89
+ from mindspore._c_expression import pyboost_leaky_relu_grad_ext
47
90
  from mindspore._c_expression import pyboost_less_equal
91
+ from mindspore._c_expression import pyboost_less
92
+ from mindspore._c_expression import pyboost_lin_space_ext
48
93
  from mindspore._c_expression import pyboost_log
49
94
  from mindspore._c_expression import pyboost_logical_and
50
95
  from mindspore._c_expression import pyboost_logical_not
51
96
  from mindspore._c_expression import pyboost_logical_or
52
97
  from mindspore._c_expression import pyboost_masked_fill
98
+ from mindspore._c_expression import pyboost_matmul_ext
99
+ from mindspore._c_expression import pyboost_matmul
100
+ from mindspore._c_expression import pyboost_max
101
+ from mindspore._c_expression import pyboost_max_pool_grad_with_indices
102
+ from mindspore._c_expression import pyboost_max_pool_grad_with_mask
103
+ from mindspore._c_expression import pyboost_max_pool_with_indices
104
+ from mindspore._c_expression import pyboost_max_pool_with_mask
105
+ from mindspore._c_expression import pyboost_maximum
106
+ from mindspore._c_expression import pyboost_mean_ext
107
+ from mindspore._c_expression import pyboost_min
108
+ from mindspore._c_expression import pyboost_minimum
53
109
  from mindspore._c_expression import pyboost_mul
110
+ from mindspore._c_expression import pyboost_mv
54
111
  from mindspore._c_expression import pyboost_neg
112
+ from mindspore._c_expression import pyboost_norm
113
+ from mindspore._c_expression import pyboost_normal_ext
55
114
  from mindspore._c_expression import pyboost_not_equal
115
+ from mindspore._c_expression import pyboost_one_hot_ext
116
+ from mindspore._c_expression import pyboost_ones_like_ext
117
+ from mindspore._c_expression import pyboost_ones
118
+ from mindspore._c_expression import pyboost_pow
119
+ from mindspore._c_expression import pyboost_prod_ext
120
+ from mindspore._c_expression import pyboost_quant_batch_matmul
56
121
  from mindspore._c_expression import pyboost_reciprocal
122
+ from mindspore._c_expression import pyboost_reduce_all
57
123
  from mindspore._c_expression import pyboost_reduce_any
124
+ from mindspore._c_expression import pyboost_reflection_pad_1d_grad
125
+ from mindspore._c_expression import pyboost_reflection_pad_1d
126
+ from mindspore._c_expression import pyboost_reflection_pad_2d_grad
127
+ from mindspore._c_expression import pyboost_reflection_pad_2d
128
+ from mindspore._c_expression import pyboost_reflection_pad_3d_grad
129
+ from mindspore._c_expression import pyboost_reflection_pad_3d
58
130
  from mindspore._c_expression import pyboost_relu_grad
59
131
  from mindspore._c_expression import pyboost_relu
132
+ from mindspore._c_expression import pyboost_repeat_interleave
133
+ from mindspore._c_expression import pyboost_replication_pad_1d_grad
134
+ from mindspore._c_expression import pyboost_replication_pad_1d
135
+ from mindspore._c_expression import pyboost_replication_pad_2d_grad
136
+ from mindspore._c_expression import pyboost_replication_pad_2d
137
+ from mindspore._c_expression import pyboost_replication_pad_3d_grad
138
+ from mindspore._c_expression import pyboost_replication_pad_3d
139
+ from mindspore._c_expression import pyboost_reverse_v2
140
+ from mindspore._c_expression import pyboost_rsqrt
141
+ from mindspore._c_expression import pyboost_scatter_add_ext
60
142
  from mindspore._c_expression import pyboost_scatter
143
+ from mindspore._c_expression import pyboost_select
61
144
  from mindspore._c_expression import pyboost_sigmoid_grad
62
145
  from mindspore._c_expression import pyboost_sigmoid
63
146
  from mindspore._c_expression import pyboost_silu_grad
64
147
  from mindspore._c_expression import pyboost_silu
65
148
  from mindspore._c_expression import pyboost_sin
149
+ from mindspore._c_expression import pyboost_slice_ext
66
150
  from mindspore._c_expression import pyboost_softmax_backward
67
151
  from mindspore._c_expression import pyboost_softmax
152
+ from mindspore._c_expression import pyboost_softplus_ext
153
+ from mindspore._c_expression import pyboost_softplus_grad_ext
154
+ from mindspore._c_expression import pyboost_split_tensor
155
+ from mindspore._c_expression import pyboost_split_with_size
68
156
  from mindspore._c_expression import pyboost_sqrt
69
157
  from mindspore._c_expression import pyboost_square
158
+ from mindspore._c_expression import pyboost_stack_ext
70
159
  from mindspore._c_expression import pyboost_sub_ext
160
+ from mindspore._c_expression import pyboost_sum_ext
161
+ from mindspore._c_expression import pyboost_tanh_grad
162
+ from mindspore._c_expression import pyboost_tanh
71
163
  from mindspore._c_expression import pyboost_tile
164
+ from mindspore._c_expression import pyboost_topk_ext
72
165
  from mindspore._c_expression import pyboost_transpose
166
+ from mindspore._c_expression import pyboost_tril
167
+ from mindspore._c_expression import pyboost_triu
168
+ from mindspore._c_expression import pyboost_uniform_ext
169
+ from mindspore._c_expression import pyboost_upsample_bilinear2d_grad
170
+ from mindspore._c_expression import pyboost_upsample_bilinear2d
171
+ from mindspore._c_expression import pyboost_upsample_linear1d_grad
172
+ from mindspore._c_expression import pyboost_upsample_linear1d
173
+ from mindspore._c_expression import pyboost_upsample_nearest1d_grad
174
+ from mindspore._c_expression import pyboost_upsample_nearest1d
175
+ from mindspore._c_expression import pyboost_upsample_nearest2d_grad
176
+ from mindspore._c_expression import pyboost_upsample_nearest2d
177
+ from mindspore._c_expression import pyboost_upsample_nearest3d_grad
178
+ from mindspore._c_expression import pyboost_upsample_nearest3d
179
+ from mindspore._c_expression import pyboost_upsample_trilinear3d_grad
180
+ from mindspore._c_expression import pyboost_upsample_trilinear3d
181
+ from mindspore._c_expression import pyboost_weight_quant_batch_matmul
182
+ from mindspore._c_expression import pyboost_zeros_like_ext
183
+ from mindspore._c_expression import pyboost_zeros
73
184
 
74
185
 
75
186
  class ACosGrad(Primitive):
@@ -125,8 +236,7 @@ class Abs(Primitive):
125
236
  pass
126
237
 
127
238
  def __call__(self, input):
128
- return super().__call__(input)
129
-
239
+ return _convert_stub(pyboost_abs(self, [input]))
130
240
 
131
241
  abs_op=Abs()
132
242
 
@@ -198,6 +308,106 @@ class Acosh(Primitive):
198
308
  acosh_op=Acosh()
199
309
 
200
310
 
311
+ class AdamWeightDecayExt(Primitive):
312
+ r"""
313
+ Implements Adam Weight Decay algorithm.
314
+
315
+ .. math::
316
+ \begin{aligned}
317
+ &\textbf{input} : \gamma \text{(lr)}, \: \beta_1, \beta_2
318
+ \text{(betas)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)},
319
+ \: \epsilon \text{ (epsilon)} \\
320
+ &\hspace{13mm} \lambda \text{(weight decay)}, \: \textit{amsgrad},
321
+ \: \textit{maximize} \\
322
+ &\textbf{initialize} : m_0 \leftarrow 0 \text{ (first moment)}, v_0 \leftarrow 0
323
+ \text{ ( second moment)}, \: \widehat{v_0}^{max}\leftarrow 0 \\[-1.ex]
324
+ &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
325
+ &\hspace{5mm}\textbf{if} \: \textit{maximize}: \\
326
+ &\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\
327
+ &\hspace{5mm}\textbf{else} \\
328
+ &\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
329
+ &\hspace{5mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1} \\
330
+ &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\
331
+ &\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\
332
+ &\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\
333
+ &\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\
334
+ &\hspace{5mm}\textbf{if} \: amsgrad \\
335
+ &\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max},
336
+ \widehat{v_t}) \\
337
+ &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
338
+ \big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big) \\
339
+ &\hspace{5mm}\textbf{else} \\
340
+ &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
341
+ \big(\sqrt{\widehat{v_t}} + \epsilon \big) \\
342
+ &\bf{return} \: \theta_t \\[-1.ex]
343
+ \end{aligned}
344
+
345
+ .. warning::
346
+ This is an experimental optimizer API that is subject to change.
347
+ This module must be used with lr scheduler module in `LRScheduler Class
348
+ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.experimental.html#lrscheduler-class>`_ .
349
+
350
+ Inputs:
351
+ - **var** (Parameter) - Weights to be updated. The shape is :math:`(N, *)` where :math:`*` means,
352
+ any number of additional dimensions. The data type can be float16 or float32.
353
+ - **m** (Parameter) - The 1st moment vector in the updating formula,
354
+ it should have the the shape as `var`. The data type can be float16 or float32.
355
+ - **v** (Parameter) - The 2nd moment vector in the updating formula,
356
+ it should have the same shape as `m`.
357
+ - **max_v** (Parameter) - The 2nd moment vector in the updating formula,
358
+ it should have the same shape as `m`.
359
+ - **gradient** (Tensor) - Gradient, has the same shape as `var`
360
+ - **step** (float, int) - step
361
+ - **lr** (float) - :math:`lr` in the updating formula. The paper suggested value is :math:`10^{-8}`,
362
+ the data type should be float32.
363
+ - **beta1** (float) - The exponential decay rate for the 1st moment estimations,
364
+ the data type should be float32. The paper suggested value is :math:`0.9`
365
+ - **beta2** (float) - The exponential decay rate for the 2nd moment estimations,
366
+ the data type should be float32. The paper suggested value is :math:`0.999`
367
+ - **decay** (float) - weight decay (L2 penalty), must be a scalar tensor with float32 data type.
368
+ - **eps** (float) - Term added to the denominator to improve numerical stability,
369
+ the data type should be float32.
370
+ - **amsgrad** (bool) - whether to use the AMSGrad algorithm. Default: ``False``.
371
+ - **maximize** (bool) - maximize the params based on the objective, instead of minimizing.
372
+ Default: ``False``.
373
+ .
374
+
375
+ Outputs:
376
+ Tuple of 3 Tensor, the updated parameters.
377
+
378
+ - **var** (Tensor) - The same shape and data type as `var`.
379
+ - **m** (Tensor) - The same shape and data type as `m`.
380
+ - **v** (Tensor) - The same shape and data type as `v`.
381
+
382
+ Supported Platforms:
383
+ ``Ascend``
384
+ """
385
+ __mindspore_signature__ = (
386
+ sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
387
+ sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1),
388
+ sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1),
389
+ sig.make_sig('max_v', dtype=sig.sig_dtype.T1),
390
+ sig.make_sig('gradient', dtype=sig.sig_dtype.T),
391
+ sig.make_sig('step', dtype=sig.sig_dtype.T2),
392
+ sig.make_sig('lr', dtype=sig.sig_dtype.T3),
393
+ sig.make_sig('beta1', dtype=sig.sig_dtype.T3),
394
+ sig.make_sig('beta2', dtype=sig.sig_dtype.T3),
395
+ sig.make_sig('decay', dtype=sig.sig_dtype.T3),
396
+ sig.make_sig('eps', dtype=sig.sig_dtype.T3),
397
+ sig.make_sig('amsgrad', dtype=sig.sig_dtype.T4, default=False),
398
+ sig.make_sig('maximize', dtype=sig.sig_dtype.T5, default=False),
399
+ )
400
+
401
+ @prim_arg_register
402
+ def __init__(self):
403
+ self.add_prim_attr("side_effect_mem", True)
404
+
405
+ def __call__(self, var, m, v, max_v, gradient, step, lr, beta1, beta2, decay, eps, amsgrad=False, maximize=False):
406
+ return _convert_stub(pyboost_adam_weight_decay_ext(self, [var, m, v, max_v, gradient, step, lr, beta1, beta2, decay, eps, amsgrad, maximize]))
407
+
408
+ adam_weight_decay_ext_op=AdamWeightDecayExt()
409
+
410
+
201
411
  class AdamWeightDecay(Primitive):
202
412
  r"""
203
413
  Updates gradients by the Adaptive Moment Estimation algorithm with weight decay (AdamWeightDecay).
@@ -302,7 +512,7 @@ class AdamWeightDecay(Primitive):
302
512
  sig.make_sig('beta2', dtype=sig.sig_dtype.T2),
303
513
  sig.make_sig('epsilon', dtype=sig.sig_dtype.T2),
304
514
  sig.make_sig('decay', dtype=sig.sig_dtype.T2),
305
- sig.make_sig('gradient', dtype=sig.sig_dtype.T3),
515
+ sig.make_sig('gradient', dtype=sig.sig_dtype.T),
306
516
  )
307
517
 
308
518
  @prim_arg_register
@@ -830,6 +1040,53 @@ class ApplyRotaryPosEmb(Primitive):
830
1040
  return super().__call__(query, key, cos, sin, position_ids, self.cos_format)
831
1041
 
832
1042
 
1043
+ class Arange(Primitive):
1044
+ r"""
1045
+
1046
+ """
1047
+ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T, sig.sig_dtype.T)
1048
+
1049
+ @prim_arg_register
1050
+ def __init__(self):
1051
+ pass
1052
+
1053
+ def __call__(self, start, end, step):
1054
+ return _convert_stub(pyboost_arange(self, [start, end, step]))
1055
+
1056
+ arange_op=Arange()
1057
+
1058
+
1059
+ class ArgMaxExt(Primitive):
1060
+ r"""
1061
+ .. code-block::
1062
+
1063
+ prim = ops.ArgMaxExt()
1064
+ out = prim(input, dim, keepdim)
1065
+
1066
+ is equivalent to
1067
+
1068
+ .. code-block::
1069
+
1070
+ ops.argmax(input, dim, keepdim)
1071
+
1072
+ Refer to :func:`mindspore.ops.argmax` for more details.
1073
+ """
1074
+ __mindspore_signature__ = (
1075
+ sig.make_sig('input'),
1076
+ sig.make_sig('dim', default=None),
1077
+ sig.make_sig('keepdim', default=False),
1078
+ )
1079
+
1080
+ @prim_arg_register
1081
+ def __init__(self):
1082
+ pass
1083
+
1084
+ def __call__(self, input, dim=None, keepdim=False):
1085
+ return _convert_stub(pyboost_argmax_ext(self, [input, dim, keepdim]))
1086
+
1087
+ argmax_ext_op=ArgMaxExt()
1088
+
1089
+
833
1090
  class Argmax(Primitive):
834
1091
  r"""
835
1092
  Returns the indices of the maximum value along a specified `axis` of a Tensor.
@@ -1278,6 +1535,111 @@ class Atanh(Primitive):
1278
1535
  atanh_op=Atanh()
1279
1536
 
1280
1537
 
1538
+ class AvgPool2DGrad(Primitive):
1539
+ r"""
1540
+
1541
+ """
1542
+ __mindspore_signature__ = (
1543
+ sig.make_sig('grad'),
1544
+ sig.make_sig('image'),
1545
+ sig.make_sig('kernel_size'),
1546
+ sig.make_sig('stride'),
1547
+ sig.make_sig('padding', default=0),
1548
+ sig.make_sig('ceil_mode', default=False),
1549
+ sig.make_sig('count_include_pad', default=True),
1550
+ sig.make_sig('divisor_override', default=None),
1551
+ )
1552
+
1553
+ @prim_arg_register
1554
+ def __init__(self):
1555
+ pass
1556
+
1557
+ def __call__(self, grad, image, kernel_size, stride, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None):
1558
+ return _convert_stub(pyboost_avg_pool2d_grad(self, [grad, image, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override]))
1559
+
1560
+ avg_pool2d_grad_op=AvgPool2DGrad()
1561
+
1562
+
1563
+ class AvgPool2D(Primitive):
1564
+ r"""
1565
+ Applies a 2D average pooling over an input Tensor which can be regarded as a composition of 2D input planes.
1566
+ Typically the input is of shape :math:`(N, C, H_{in}, W_{in})`, outputs regional average in the
1567
+ :math:`(H_{in}, W_{in})`-dimension. Given kernel size :math:`(k_{H}, k_{W})` and `stride` , the operation
1568
+ is as follows.
1569
+
1570
+ .. math::
1571
+ \text{output}(N_i, C_j, h, w) = \frac{1}{k_{H} * k_{W}} \sum_{m=0}^{k_{H}-1} \sum_{n=0}^{k_{W}-1}
1572
+ \text{input}(N_i, C_j, stride[0] \times h + m, stride[1] \times w + n)
1573
+
1574
+ Inputs:
1575
+ input (Tensor): Tensor of shape :math:`(N, C, H_{in}, W_{in})`.
1576
+ kernel_size (Union[int, tuple[int], list[int]]): The size of kernel used to take the average value. Can be
1577
+ a single number or a tuple (kH, kW).
1578
+ stride (Union[int, tuple[int], list[int]]): The distance of kernel moving. Can be a single number or
1579
+ a tuple (sH, sW).
1580
+ padding (Union(int, tuple[int], list[int])): Implicit zero padding to be added on both sides. Can be a single
1581
+ number or a tuple (padH, padW). Default: 0.
1582
+ ceil_mode (bool): If True, apply ceil instead of floor to compute the output shape. Default: ``False``.
1583
+ count_include_pad (bool): If True, include the zero-padding in the averaging calculation. Default: ``True`` .
1584
+ divisor_override (int): If specified, it will be used as divisor in the averaging calculation, otherwise
1585
+ `kernel_size` will be used. Default: ``None``.
1586
+
1587
+ Outputs:
1588
+ Tensor, with shape :math:`(N, C, H_{out}, W_{out})`.
1589
+
1590
+ .. math::
1591
+
1592
+ H_{out} = \frac{H_{in} + 2 \times padding[0] - kernel_size[0]}{stride[0]} + 1
1593
+ W_{out} = \frac{W_{in} + 2 \times padding[1] - kernel_size[1]}{stride[1]} + 1
1594
+
1595
+ Raises:
1596
+ TypeError: If `input` is not a Tensor.
1597
+ TypeError: If `kernel_size` or `stride` is neither int nor tuple.
1598
+ TypeError: If `ceil_mode` or `count_include_pad` is not a bool.
1599
+ TypeError: If `divisor_override` is not an int.
1600
+ ValueError: If length of shape of `input` is not equal to `4` or `3`.
1601
+ ValueError: If `kernel_size` or `stride` is less than 1.
1602
+ ValueError: If `kernel_size` or `stride` is a tuple whose length is not equal to `2` or `1`.
1603
+ ValueError: If `padding` is neither a int nor a tuple whose length is equal to `2` or `1`.
1604
+ ValueError: If value of `padding` is less than `0`.
1605
+
1606
+ Supported Platforms:
1607
+ ``Ascend``
1608
+
1609
+ Examples:
1610
+ >>> import mindspore
1611
+ >>> import numpy as np
1612
+ >>> from mindspore import Tensor, ops
1613
+ >>> x = Tensor(np.arange(1 * 3 * 3 * 4).reshape(1, 3, 3, 4), mindspore.float32)
1614
+ >>> output = ops.auto_generate.AvgPool2D()(x, 2, 1)
1615
+ >>> print(output)
1616
+ [[[[ 2.5 3.5 4.5]
1617
+ [ 6.5 7.5 8.5]]
1618
+ [[14.5 15.5 16.5]
1619
+ [18.5 19.5 20.5]]
1620
+ [[26.5 27.5 28.5]
1621
+ [30.5 31.5 32.5]]]]
1622
+ """
1623
+ __mindspore_signature__ = (
1624
+ sig.make_sig('input'),
1625
+ sig.make_sig('kernel_size'),
1626
+ sig.make_sig('stride'),
1627
+ sig.make_sig('padding', default=0),
1628
+ sig.make_sig('ceil_mode', default=False),
1629
+ sig.make_sig('count_include_pad', default=True),
1630
+ sig.make_sig('divisor_override', default=None),
1631
+ )
1632
+
1633
+ @prim_arg_register
1634
+ def __init__(self):
1635
+ pass
1636
+
1637
+ def __call__(self, input, kernel_size, stride, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None):
1638
+ return _convert_stub(pyboost_avg_pool2d(self, [input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override]))
1639
+
1640
+ avg_pool2d_op=AvgPool2D()
1641
+
1642
+
1281
1643
  class AvgPoolGrad(Primitive):
1282
1644
  r"""
1283
1645
  Gradients of the avg pool operation.
@@ -1373,6 +1735,102 @@ class AvgPool(Primitive):
1373
1735
  return super().__call__(x, self.kernel_size, self.strides, self.pad_mode, self.data_format)
1374
1736
 
1375
1737
 
1738
+ class BatchMatMul(Primitive):
1739
+ r"""
1740
+ Computes matrix multiplication between two tensors by batch.
1741
+
1742
+ .. math::
1743
+
1744
+ \text{output}[..., :, :] = \text{matrix}(x[..., :, :]) * \text{matrix}(y[..., :, :])
1745
+
1746
+ The rank of both two input tensors must be same and not less than `2`.
1747
+
1748
+ Args:
1749
+ transpose_a (bool): If ``True`` , the last two dimensions of `x` is transposed before multiplication.
1750
+ Default: ``False`` .
1751
+ transpose_b (bool): If ``True`` , the last two dimensions of `y` is transposed before multiplication.
1752
+ Default: ``False`` .
1753
+
1754
+ Inputs:
1755
+ - **x** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(*B, N, C)`,
1756
+ where :math:`*B` represents the batch size which can be multidimensional, :math:`N` and :math:`C` are the
1757
+ size of the last two dimensions. If `transpose_a` is ``True`` , its shape must be :math:`(*B, C, N)`.
1758
+ - **y** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(*B, C, M)`. If
1759
+ `transpose_b` is ``True`` , its shape must be :math:`(*B, M, C)`.
1760
+
1761
+ Outputs:
1762
+ Tensor, the shape of the output tensor is :math:`(*B, N, M)`.
1763
+
1764
+ Raises:
1765
+ TypeError: If `transpose_a` or `transpose_b` is not a bool.
1766
+ ValueError: If length of shape of `x` is not equal to length of shape of `y` or
1767
+ length of shape of inputs is less than 2.
1768
+
1769
+ Supported Platforms:
1770
+ ``Ascend`` ``GPU`` ``CPU``
1771
+
1772
+ Examples:
1773
+ >>> import mindspore
1774
+ >>> import numpy as np
1775
+ >>> from mindspore import Tensor, ops
1776
+ >>> x = Tensor(np.ones(shape=[2, 4, 1, 3]), mindspore.float32)
1777
+ >>> y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
1778
+ >>> batmatmul = ops.BatchMatMul()
1779
+ >>> output = batmatmul(x, y)
1780
+ >>> print(output.shape)
1781
+ (2, 4, 1, 4)
1782
+ >>> x = Tensor(np.ones(shape=[2, 4, 3, 1]), mindspore.float32)
1783
+ >>> y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
1784
+ >>> batmatmul = ops.BatchMatMul(transpose_a=True)
1785
+ >>> output = batmatmul(x, y)
1786
+ >>> print(output.shape)
1787
+ (2, 4, 1, 4)
1788
+ """
1789
+ @prim_arg_register
1790
+ def __init__(self, transpose_a=False, transpose_b=False):
1791
+ self._set_prim_arg("transpose_a", transpose_a)
1792
+ self._set_prim_arg("transpose_b", transpose_b)
1793
+
1794
+ def __call__(self, x, y):
1795
+ return _convert_stub(pyboost_batch_mat_mul(self, [x, y, self.transpose_a, self.transpose_b]))
1796
+
1797
+ class BatchNormExt(Primitive):
1798
+ r"""
1799
+
1800
+ """
1801
+ __mindspore_signature__ = (
1802
+ sig.make_sig('input'),
1803
+ sig.make_sig('weight'),
1804
+ sig.make_sig('bias'),
1805
+ sig.make_sig('running_mean'),
1806
+ sig.make_sig('runnning_var'),
1807
+ sig.make_sig('training', default=False),
1808
+ sig.make_sig('momentum', default=0.1),
1809
+ sig.make_sig('epsilon', default=1e-5),
1810
+ )
1811
+
1812
+ @prim_arg_register
1813
+ def __init__(self):
1814
+ pass
1815
+
1816
+ def __call__(self, input, weight, bias, running_mean, runnning_var, training=False, momentum=0.1, epsilon=1e-5):
1817
+ return _convert_stub(pyboost_batch_norm_ext(self, [input, weight, bias, running_mean, runnning_var, training, momentum, epsilon]))
1818
+
1819
+ batch_norm_ext_op=BatchNormExt()
1820
+
1821
+
1822
+ class BatchNormGradExt(Primitive):
1823
+ r"""
1824
+
1825
+ """
1826
+ @prim_arg_register
1827
+ def __init__(self, training=False, eps=1e-5):
1828
+ self._set_prim_arg("training", training)
1829
+ self._set_prim_arg("eps", eps)
1830
+
1831
+ def __call__(self, dout, input, weight, running_mean, running_var, saved_mean, saved_rstd):
1832
+ return _convert_stub(pyboost_batch_norm_grad_ext(self, [dout, input, weight, running_mean, running_var, saved_mean, saved_rstd, self.training, self.eps]))
1833
+
1376
1834
  class BatchNormGradGrad(Primitive):
1377
1835
  r"""
1378
1836
  Performs grad of BatchNormGrad operation.
@@ -1529,6 +1987,31 @@ class BiasAdd(Primitive):
1529
1987
  return super().__call__(input_x, bias, self.data_format)
1530
1988
 
1531
1989
 
1990
+ class BatchMatMulExt(Primitive):
1991
+ r"""
1992
+ .. code-block::
1993
+
1994
+ prim = ops.BatchMatMulExt()
1995
+ out = prim(input, mat2)
1996
+
1997
+ is equivalent to
1998
+
1999
+ .. code-block::
2000
+
2001
+ ops.bmm_ext(input, mat2)
2002
+
2003
+ Refer to :func:`mindspore.ops.bmm_ext` for more details.
2004
+ """
2005
+ @prim_arg_register
2006
+ def __init__(self):
2007
+ pass
2008
+
2009
+ def __call__(self, input, mat2):
2010
+ return _convert_stub(pyboost_bmm_ext(self, [input, mat2]))
2011
+
2012
+ bmm_ext_op=BatchMatMulExt()
2013
+
2014
+
1532
2015
  class BoolNot(Primitive):
1533
2016
  r"""
1534
2017
  Returns bool_not `not` of bool input.
@@ -1738,6 +2221,119 @@ class Cholesky(Primitive):
1738
2221
  return super().__call__(input_x, self.upper)
1739
2222
 
1740
2223
 
2224
+ class Chunk(Primitive):
2225
+ r"""
2226
+ Cut the input Tensor into `chunks` sub-tensors along the specified axis.
2227
+
2228
+ Note:
2229
+ This function may return less than the specified number of chunks!
2230
+
2231
+ Inputs:
2232
+ input (Tensor): A Tensor to be cut.
2233
+ chunks (int): Number of sub-tensors to cut.
2234
+ dim (int, optional): Specify the dimensions that you want to split. Default: ``0`` .
2235
+
2236
+ Returns:
2237
+ A tuple of sub-tensors.
2238
+
2239
+ Raises:
2240
+ TypeError: If argument `input` is not Tensor.
2241
+ TypeError: The sum of `chunks` is not int.
2242
+ TypeError: If argument `dim` is not int.
2243
+ ValueError: If argument `dim` is out of range of :math:`[-input.ndim, input.ndim)` .
2244
+ ValueError: If argument `chunks` is not positive number.
2245
+
2246
+ Supported Platforms:
2247
+ ``Ascend``
2248
+
2249
+ Examples:
2250
+ >>> import numpy as np
2251
+ >>> from mindspore import ops, Tensor
2252
+ >>> input_x = np.arange(9).astype("float32")
2253
+ >>> output = ops.Chunk()(Tensor(input_x), 3)
2254
+ >>> print(output)
2255
+ (Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
2256
+ Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
2257
+ Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))
2258
+ """
2259
+ __mindspore_signature__ = (
2260
+ sig.make_sig('input'),
2261
+ sig.make_sig('chunks'),
2262
+ sig.make_sig('dim', default=0),
2263
+ )
2264
+
2265
+ @prim_arg_register
2266
+ def __init__(self):
2267
+ pass
2268
+
2269
+ def __call__(self, input, chunks, dim=0):
2270
+ return _convert_stub(pyboost_chunk(self, [input, chunks, dim]))
2271
+
2272
+ chunk_op=Chunk()
2273
+
2274
+
2275
+ class ClampScalar(Primitive):
2276
+ r"""
2277
+ .. code-block::
2278
+
2279
+ prim = ops.ClampScalar()
2280
+ out = prim(input, min, max)
2281
+
2282
+ is equivalent to
2283
+
2284
+ .. code-block::
2285
+
2286
+ ops.clamp_scalar(input, min, max)
2287
+
2288
+ Refer to :func:`mindspore.ops.clamp_scalar` for more details.
2289
+ """
2290
+ __mindspore_signature__ = (
2291
+ sig.make_sig('input'),
2292
+ sig.make_sig('min', default=None),
2293
+ sig.make_sig('max', default=None),
2294
+ )
2295
+
2296
+ @prim_arg_register
2297
+ def __init__(self):
2298
+ pass
2299
+
2300
+ def __call__(self, input, min=None, max=None):
2301
+ return _convert_stub(pyboost_clamp_scalar(self, [input, min, max]))
2302
+
2303
+ clamp_scalar_op=ClampScalar()
2304
+
2305
+
2306
+ class ClampTensor(Primitive):
2307
+ r"""
2308
+ .. code-block::
2309
+
2310
+ prim = ops.ClampTensor()
2311
+ out = prim(input, min, max)
2312
+
2313
+ is equivalent to
2314
+
2315
+ .. code-block::
2316
+
2317
+ ops.clamp_tensor(input, min, max)
2318
+
2319
+ Refer to :func:`mindspore.ops.clamp_tensor` for more details.
2320
+ """
2321
+ __mindspore_signature__ = (
2322
+ sig.make_sig('input'),
2323
+ sig.make_sig('min', default=None),
2324
+ sig.make_sig('max', default=None),
2325
+ )
2326
+
2327
+ @prim_arg_register
2328
+ def __init__(self):
2329
+ pass
2330
+
2331
+ def __call__(self, input, min=None, max=None):
2332
+ return _convert_stub(pyboost_clamp_tensor(self, [input, min, max]))
2333
+
2334
+ clamp_tensor_op=ClampTensor()
2335
+
2336
+
1741
2337
  class Complex(Primitive):
1742
2338
  r"""
1743
2339
  Returns a complex Tensor from the real part and the imag part.
@@ -1829,6 +2425,26 @@ class Conj(Primitive):
1829
2425
  conj_op=Conj()
1830
2426
 
1831
2427
 
2428
+ class ConstantPadND(Primitive):
2429
+ r"""
2430
+
2431
+ """
2432
+ __mindspore_signature__ = (
2433
+ sig.make_sig('input'),
2434
+ sig.make_sig('padding'),
2435
+ sig.make_sig('value', default=0.0),
2436
+ )
2437
+
2438
+ @prim_arg_register
2439
+ def __init__(self):
2440
+ pass
2441
+
2442
+ def __call__(self, input, padding, value=0.0):
2443
+ return _convert_stub(pyboost_constant_pad_nd(self, [input, padding, value]))
2444
+
2445
+ constant_pad_nd_op=ConstantPadND()
2446
+
2447
+
1832
2448
  class Contiguous(Primitive):
1833
2449
  r"""
1834
2450
  .. code-block::
@@ -1854,7 +2470,53 @@ class Contiguous(Primitive):
1854
2470
  contiguous_op=Contiguous()
1855
2471
 
1856
2472
 
1857
- class Copy(Primitive):
2473
+ class ConvolutionGrad(Primitive):
2474
+ r"""
2475
+
2476
+ """
2477
+ __mindspore_signature__ = (
2478
+ sig.make_sig('dout'),
2479
+ sig.make_sig('input'),
2480
+ sig.make_sig('weight'),
2481
+ sig.make_sig('bias', default=None),
2482
+ )
2483
+
2484
+ @prim_arg_register
2485
+ def __init__(self, stride=1, padding=0, dilation=1, transposed=False, output_padding=0, groups=1, output_mask=()):
2486
+ self._set_prim_arg_with_handler("stride", stride, to_strides)
2487
+ self._set_prim_arg_with_handler("padding", padding, to_2d_paddings)
2488
+ self._set_prim_arg_with_handler("dilation", dilation, to_dilations)
2489
+ self._set_prim_arg("transposed", transposed)
2490
+ self._set_prim_arg_with_handler("output_padding", output_padding, to_output_padding)
2491
+ self._set_prim_arg("groups", groups)
2492
+ self._set_prim_arg("output_mask", output_mask)
2493
+
2494
+ def __call__(self, dout, input, weight, bias=None):
2495
+ return _convert_stub(pyboost_convolution_grad(self, [dout, input, weight, bias, self.stride, self.padding, self.dilation, self.transposed, self.output_padding, self.groups, self.output_mask]))
2496
+
2497
+ class Convolution(Primitive):
2498
+ r"""
2499
+
2500
+ """
2501
+ __mindspore_signature__ = (
2502
+ sig.make_sig('input'),
2503
+ sig.make_sig('weight'),
2504
+ sig.make_sig('bias', default=None),
2505
+ )
2506
+
2507
+ @prim_arg_register
2508
+ def __init__(self, stride=1, padding=0, dilation=1, transposed=False, output_padding=0, groups=1):
2509
+ self._set_prim_arg_with_handler("stride", stride, to_strides)
2510
+ self._set_prim_arg_with_handler("padding", padding, to_2d_paddings)
2511
+ self._set_prim_arg_with_handler("dilation", dilation, to_dilations)
2512
+ self._set_prim_arg("transposed", transposed)
2513
+ self._set_prim_arg_with_handler("output_padding", output_padding, to_output_padding)
2514
+ self._set_prim_arg("groups", groups)
2515
+
2516
+ def __call__(self, input, weight, bias=None):
2517
+ return _convert_stub(pyboost_convolution(self, [input, weight, bias, self.stride, self.padding, self.dilation, self.transposed, self.output_padding, self.groups]))
2518
+
2519
+ class Copy(Primitive):
1858
2520
  r"""
1859
2521
  .. code-block::
1860
2522
 
@@ -2214,6 +2876,37 @@ class DecoderKVCache(Primitive):
2214
2876
  decoder_k_v_cache_op=DecoderKVCache()
2215
2877
 
2216
2878
 
2879
+ class Dense(Primitive):
2880
+ r"""
2881
+ .. code-block::
2882
+
2883
+ prim = ops.Dense()
2884
+ out = prim(input, weight, bias)
2885
+
2886
+ is equivalent to
2887
+
2888
+ .. code-block::
2889
+
2890
+ ops.dense(input, weight, bias)
2891
+
2892
+ Refer to :func:`mindspore.ops.dense` for more details.
2893
+ """
2894
+ __mindspore_signature__ = (
2895
+ sig.make_sig('input'),
2896
+ sig.make_sig('weight'),
2897
+ sig.make_sig('bias', default=None),
2898
+ )
2899
+
2900
+ @prim_arg_register
2901
+ def __init__(self):
2902
+ pass
2903
+
2904
+ def __call__(self, input, weight, bias=None):
2905
+ return _convert_stub(pyboost_dense(self, [input, weight, bias]))
2906
+
2907
+ dense_op=Dense()
2908
+
2909
+
2217
2910
  class Diag(Primitive):
2218
2911
  r"""
2219
2912
  .. code-block::
@@ -2328,6 +3021,114 @@ class Div(Primitive):
2328
3021
  div_op=Div()
2329
3022
 
2330
3023
 
3024
+ class DivMod(Primitive):
3025
+ r"""
3026
+
3027
+ """
3028
+ __mindspore_signature__ = (
3029
+ sig.make_sig('x', dtype=sig.sig_dtype.T),
3030
+ sig.make_sig('y', dtype=sig.sig_dtype.T),
3031
+ sig.make_sig('rounding_mode', dtype=sig.sig_dtype.T1, default=None),
3032
+ )
3033
+
3034
+ @prim_arg_register
3035
+ def __init__(self):
3036
+ pass
3037
+
3038
+ def __call__(self, x, y, rounding_mode=None):
3039
+ return _convert_stub(pyboost_divmod(self, [x, y, rounding_mode if rounding_mode is None else str_to_enum('DivMod', 'rounding_mode', rounding_mode)]))
3040
+
3041
+ divmod_op=DivMod()
3042
+
3043
+
3044
+ class Dot(Primitive):
3045
+ r"""
3046
+ .. code-block::
3047
+
3048
+ prim = ops.Dot()
3049
+ out = prim(input, other)
3050
+
3051
+ is equivalent to
3052
+
3053
+ .. code-block::
3054
+
3055
+ ops.dot(input, other)
3056
+
3057
+ Refer to :func:`mindspore.ops.dot` for more details.
3058
+ """
3059
+ @prim_arg_register
3060
+ def __init__(self):
3061
+ pass
3062
+
3063
+ def __call__(self, input, other):
3064
+ return _convert_stub(pyboost_dot(self, [input, other]))
3065
+
3066
+ dot_op=Dot()
3067
+
3068
+
3069
+ class DropoutDoMaskExt(Primitive):
3070
+ r"""
3071
+
3072
+ """
3073
+ @prim_arg_register
3074
+ def __init__(self):
3075
+ pass
3076
+
3077
+ def __call__(self, input, mask, p):
3078
+ return _convert_stub(pyboost_dropout_do_mask_ext(self, [input, mask, p]))
3079
+
3080
+ dropout_do_mask_ext_op=DropoutDoMaskExt()
3081
+
3082
+
3083
+ class DropoutExt(Primitive):
3084
+ r"""
3085
+
3086
+ """
3087
+ __mindspore_signature__ = (
3088
+ sig.make_sig('input'),
3089
+ sig.make_sig('p', default=0.5),
3090
+ sig.make_sig('seed', default=0),
3091
+ sig.make_sig('offset', default=0),
3092
+ )
3093
+
3094
+ @prim_arg_register
3095
+ def __init__(self):
3096
+ self.add_prim_attr("side_effect_hidden", True)
3097
+
3098
+ def __call__(self, input, p=0.5, seed=0, offset=0):
3099
+ return _convert_stub(pyboost_dropout_ext(self, [input, p, seed, offset]))
3100
+
3101
+ dropout_ext_op=DropoutExt()
3102
+
3103
+
3104
+ class DropoutGenMaskExt(Primitive):
3105
+ r"""
3106
+
3107
+ """
3108
+ @prim_arg_register
3109
+ def __init__(self):
3110
+ self.add_prim_attr("side_effect_hidden", True)
3111
+
3112
+ def __call__(self, shape, p, seed, offset, dtype):
3113
+ return _convert_stub(pyboost_dropout_gen_mask_ext(self, [shape, p, seed, offset, dtype_to_type_id('DropoutGenMaskExt', 'dtype', dtype)]))
3114
+
3115
+ dropout_gen_mask_ext_op=DropoutGenMaskExt()
3116
+
3117
+
3118
+ class DropoutGradExt(Primitive):
3119
+ r"""
3120
+
3121
+ """
3122
+ @prim_arg_register
3123
+ def __init__(self):
3124
+ pass
3125
+
3126
+ def __call__(self, x, mask, p):
3127
+ return _convert_stub(pyboost_dropout_grad_ext(self, [x, mask, p]))
3128
+
3129
+ dropout_grad_ext_op=DropoutGradExt()
3130
+
3131
+
2331
3132
  class Dropout(Primitive):
2332
3133
  r"""
2333
3134
  During training, randomly zeroes some of the elements of the input tensor
@@ -2471,6 +3272,93 @@ class Elu(Primitive):
2471
3272
  return super().__call__(input_x, self.alpha)
2472
3273
 
2473
3274
 
3275
+ class EmbeddingDenseBackward(Primitive):
3276
+ r"""
3277
+
3278
+ """
3279
+ __mindspore_signature__ = (
3280
+ sig.make_sig('grad'),
3281
+ sig.make_sig('indices'),
3282
+ sig.make_sig('num_weights'),
3283
+ sig.make_sig('padding_idx', default=None),
3284
+ sig.make_sig('scale_grad_by_freq', default=False),
3285
+ )
3286
+
3287
+ @prim_arg_register
3288
+ def __init__(self):
3289
+ pass
3290
+
3291
+ def __call__(self, grad, indices, num_weights, padding_idx=None, scale_grad_by_freq=False):
3292
+ return _convert_stub(pyboost_embedding_dense_backward(self, [grad, indices, num_weights, padding_idx, scale_grad_by_freq]))
3293
+
3294
+ embedding_dense_backward_op=EmbeddingDenseBackward()
3295
+
3296
+
3297
+ class Embedding(Primitive):
3298
+ r"""
3299
+ Retrieve the word embeddings in `weight` using indices specified in `input`.
3300
+
3301
+ .. warning::
3302
+ On Ascend, the behavior is unpredictable when the value of `input` is invalid.
3303
+
3304
+ Args:
3305
+ input (Tensor): The indices used to lookup in the `weight`. The data type must be mindspore.int32 or mindspore.int64,
3306
+ and the value should be in range `[0, weight.shape[0])`.
3307
+ weight (Parameter): The matrix where to lookup from. The shape must be 2D.
3308
+ padding_idx (int, optional): If the value is not None, the corresponding row of `weight` will not be updated in training.
3309
+ The value should be in range `[-weight.shape[0], weight.shape[0])` if it's not ``None``. Default ``None``.
3310
+ max_norm (float, optional): If not None, firstly get the p-norm result of the `weight` specified by `input` where p is specified by `norm_type`;
3311
+ if the result is larger then `max_norm`, update the `weight` with :math:`\frac{max\_norm}{result+1e^{-7}}` in-place. Default ``None``.
3312
+ norm_type (float, optional): Indicates the value of p in p-norm. Default ``2.0``.
3313
+ scale_grad_by_freq (bool, optional): If ``True`` the gradients will be scaled by the inverse of frequency of the index in `input`. Default ``False``.
3314
+
3315
+ Returns:
3316
+ Tensor, has the same data type as `weight`, the shape is :math:`(*input.shape, weight.shape[1])`.
3317
+
3318
+ Raises:
3319
+ ValueError: If `padding_idx` is out of valid range.
3320
+ ValueError: If the shape of `weight` is invalid.
3321
+ TypeError: `weight` is not a :class:`mindspore.Parameter`.
3322
+
3323
+ Supported Platforms:
3324
+ ``Ascend``
3325
+
3326
+ Examples:
3327
+ >>> import mindspore
3328
+ >>> import numpy as np
3329
+ >>> from mindspore import Tensor, Parameter, ops
3330
+ >>> input = Tensor([[1, 0, 1, 1], [0, 0, 1, 0]])
3331
+ >>> weight = Parameter(np.random.randn(3, 3).astype(np.float32))
3332
+ >>> output = ops.auto_generate.Embedding()(input, weight, max_norm=0.4)
3333
+ >>> print(output)
3334
+ [[[ 5.49015924e-02, 3.47811311e-01, -1.89771220e-01],
3335
+ [ 2.09307984e-01, -2.24846993e-02, 3.40124398e-01],
3336
+ [ 5.49015924e-02, 3.47811311e-01, -1.89771220e-01],
3337
+ [ 5.49015924e-02, 3.47811311e-01, -1.89771220e-01]],
3338
+ [[ 2.09307984e-01, -2.24846993e-02, 3.40124398e-01],
3339
+ [ 2.09307984e-01, -2.24846993e-02, 3.40124398e-01],
3340
+ [ 5.49015924e-02, 3.47811311e-01, -1.89771220e-01],
3341
+ [ 2.09307984e-01, -2.24846993e-02, 3.40124398e-01]]]
3342
+ """
3343
+ __mindspore_signature__ = (
3344
+ sig.make_sig('input'),
3345
+ sig.make_sig('weight', sig.sig_rw.RW_WRITE),
3346
+ sig.make_sig('padding_idx', default=None),
3347
+ sig.make_sig('max_norm', default=None),
3348
+ sig.make_sig('norm_type', default=2.0),
3349
+ sig.make_sig('scale_grad_by_freq', default=False),
3350
+ )
3351
+
3352
+ @prim_arg_register
3353
+ def __init__(self):
3354
+ self.add_prim_attr("side_effect_mem", True)
3355
+
3356
+ def __call__(self, input, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False):
3357
+ return _convert_stub(pyboost_embedding(self, [input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq]))
3358
+
3359
+ embedding_op=Embedding()
3360
+
3361
+
2474
3362
  class Equal(Primitive):
2475
3363
  r"""
2476
3364
  .. code-block::
@@ -2518,8 +3406,7 @@ class Erf(Primitive):
2518
3406
  pass
2519
3407
 
2520
3408
  def __call__(self, input):
2521
- return super().__call__(input)
2522
-
3409
+ return _convert_stub(pyboost_erf(self, [input]))
2523
3410
 
2524
3411
  erf_op=Erf()
2525
3412
 
@@ -2768,6 +3655,46 @@ class FastGeLU(Primitive):
2768
3655
  fast_gelu_op=FastGeLU()
2769
3656
 
2770
3657
 
3658
+ class FFNExt(Primitive):
3659
+ r"""
3660
+ .. code-block::
3661
+
3662
+ prim = ops.FFNExt(activation, inner_precise)
3663
+ out = prim(x, weight1, weight2, expertTokens, bias1, bias2, scale, offset, deqScale1, deqScale2, antiquant_scale1, antiquant_scale2, antiquant_offset1, antiquant_offset2)
3664
+
3665
+ is equivalent to
3666
+
3667
+ .. code-block::
3668
+
3669
+ ops.ffn_ext(x, weight1, weight2, expertTokens, bias1, bias2, scale, offset, deqScale1, deqScale2, antiquant_scale1, antiquant_scale2, antiquant_offset1, antiquant_offset2, activation, inner_precise)
3670
+
3671
+ Refer to :func:`mindspore.ops.ffn_ext` for more details.
3672
+ """
3673
+ __mindspore_signature__ = (
3674
+ sig.make_sig('x'),
3675
+ sig.make_sig('weight1'),
3676
+ sig.make_sig('weight2'),
3677
+ sig.make_sig('expertTokens', default=None),
3678
+ sig.make_sig('bias1', default=None),
3679
+ sig.make_sig('bias2', default=None),
3680
+ sig.make_sig('scale', default=None),
3681
+ sig.make_sig('offset', default=None),
3682
+ sig.make_sig('deqScale1', default=None),
3683
+ sig.make_sig('deqScale2', default=None),
3684
+ sig.make_sig('antiquant_scale1', default=None),
3685
+ sig.make_sig('antiquant_scale2', default=None),
3686
+ sig.make_sig('antiquant_offset1', default=None),
3687
+ sig.make_sig('antiquant_offset2', default=None),
3688
+ )
3689
+
3690
+ @prim_arg_register
3691
+ def __init__(self, activation='fastgelu', inner_precise=0):
3692
+ self._set_prim_arg_with_handler("activation", activation, str_to_enum)
3693
+ self._set_prim_arg("inner_precise", inner_precise)
3694
+
3695
+ def __call__(self, x, weight1, weight2, expertTokens=None, bias1=None, bias2=None, scale=None, offset=None, deqScale1=None, deqScale2=None, antiquant_scale1=None, antiquant_scale2=None, antiquant_offset1=None, antiquant_offset2=None):
3696
+ return _convert_stub(pyboost_ffn_ext(self, [x, weight1, weight2, expertTokens, bias1, bias2, scale, offset, deqScale1, deqScale2, antiquant_scale1, antiquant_scale2, antiquant_offset1, antiquant_offset2, self.activation, self.inner_precise]))
3697
+
2771
3698
  class FFT2(Primitive):
2772
3699
  r"""
2773
3700
  .. code-block::
@@ -3043,77 +3970,383 @@ class FFTShift(Primitive):
3043
3970
  fftshift_op=FFTShift()
3044
3971
 
3045
3972
 
3046
- class Flatten(Primitive):
3973
+ class FillScalar(Primitive):
3047
3974
  r"""
3048
- Flattens a tensor without changing its batch size on the 0-th axis.
3975
+ Create a Tensor of the specified shape and fill it with the specified scalar value.
3049
3976
 
3050
- Refer to :func:`mindspore.ops.flatten` for more details.
3977
+ Args:
3978
+ size (Union(tuple[int], list[int])): The specified shape of output tensor.
3979
+ fill_value (number.Number): Value to fill the returned tensor. Complex numbers are not supported for now.
3051
3980
 
3052
- Inputs:
3053
- - **input_x** (Tensor) - Tensor of shape :math:`(N, \ldots)` to be flattened, where :math:`N` is batch size.
3981
+ Keyword Args:
3982
+ dtype (mindspore.dtype): The specified type of output tensor. `bool_` and `number` are supported, for
3983
+ details, please refer to :class:`mindspore.dtype` . Default: ``None`` .
3054
3984
 
3055
- Outputs:
3056
- Tensor, the shape of the output tensor is :math:`(N, X)`, where :math:`X` is
3057
- the product of the remaining dimension.
3985
+ Returns:
3986
+ Tensor.
3058
3987
 
3059
- Supported Platforms:
3060
- ``Ascend`` ``GPU`` ``CPU``
3988
+ Raises:
3989
+ TypeError: If `size` is not a tuple or list.
3990
+ ValueError: The element in `size` is less than 0.
3061
3991
 
3062
- Examples:
3063
- >>> import mindspore
3064
- >>> import numpy as np
3065
- >>> from mindspore import Tensor, ops
3066
- >>> input_x = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
3067
- >>> flatten = ops.Flatten()
3068
- >>> output = flatten(input_x)
3069
- >>> print(output.shape)
3070
- (1, 24)
3992
+ Supported Platforms:
3993
+ ``Ascend``
3071
3994
  """
3995
+ __mindspore_signature__ = (
3996
+ sig.make_sig('size'),
3997
+ sig.make_sig('fill_value'),
3998
+ sig.make_sig('dtype', default=None),
3999
+ )
4000
+
3072
4001
  @prim_arg_register
3073
4002
  def __init__(self):
3074
4003
  pass
3075
4004
 
3076
- def __call__(self, input_x):
3077
- return super().__call__(input_x)
4005
+ def __call__(self, size, fill_value, dtype=None):
4006
+ return _convert_stub(pyboost_fill_scalar(self, [size, fill_value, dtype if dtype is None else dtype_to_type_id('FillScalar', 'dtype', dtype)]))
3078
4007
 
3079
-
3080
- flatten_op=Flatten()
4008
+ fill_scalar_op=FillScalar()
3081
4009
 
3082
4010
 
3083
- class FloorDiv(Primitive):
4011
+ class FillTensor(Primitive):
3084
4012
  r"""
3085
- .. code-block::
3086
-
3087
- prim = ops.FloorDiv()
3088
- out = prim(input, other)
3089
-
3090
- is equivalent to
3091
-
3092
- .. code-block::
3093
-
3094
- ops.floor_divide(input, other)
3095
-
3096
- Refer to :func:`mindspore.ops.floor_divide` for more details.
4013
+ Create a Tensor of the specified shape and fill it with the specified tensor value.
4014
+
4015
+ Args:
4016
+ size (Union(tuple[int], list[int])): The specified shape of output tensor.
4017
+ fill_value (Tensor): Value to fill the returned tensor. Complex numbers are not supported for now. Must be
4018
+ scalar Tensor or 1-D Tensor with shape of [1].
4019
+
4020
+ Keyword Args:
4021
+ dtype (mindspore.dtype): The specified type of output tensor. `bool_` and `number` are supported, for
4022
+ details, please refer to :class:`mindspore.dtype` . Default: ``None`` .
4023
+
4024
+ Returns:
4025
+ Tensor.
4026
+
4027
+ Raises:
4028
+ TypeError: If `size` is not a tuple or list.
4029
+ ValueError: The element in `size` is less than 0.
4030
+
4031
+ Supported Platforms:
4032
+ ``Ascend``
3097
4033
  """
3098
- __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
4034
+ __mindspore_signature__ = (
4035
+ sig.make_sig('size'),
4036
+ sig.make_sig('fill_value'),
4037
+ sig.make_sig('dtype', default=None),
4038
+ )
3099
4039
 
3100
4040
  @prim_arg_register
3101
4041
  def __init__(self):
3102
4042
  pass
3103
4043
 
3104
- def __call__(self, input, other):
3105
- return super().__call__(input, other)
3106
-
4044
+ def __call__(self, size, fill_value, dtype=None):
4045
+ return _convert_stub(pyboost_fill_tensor(self, [size, fill_value, dtype if dtype is None else dtype_to_type_id('FillTensor', 'dtype', dtype)]))
3107
4046
 
3108
- floor_div_op=FloorDiv()
4047
+ fill_tensor_op=FillTensor()
3109
4048
 
3110
4049
 
3111
- class FloorMod(Primitive):
4050
+ class FlashAttentionScoreGrad(Primitive):
3112
4051
  r"""
3113
- .. code-block::
3114
-
3115
- prim = ops.FloorMod()
3116
- out = prim(x, y)
4052
+ Calculates the gradient of FlashAttentionScore operation.
4053
+ .. warning::
4054
+ This is an experimental API that is subject to change or deletion.
4055
+
4056
+ Supported Platforms:
4057
+ ``Ascend``
4058
+ """
4059
+ __mindspore_signature__ = (
4060
+ sig.make_sig('query'),
4061
+ sig.make_sig('key'),
4062
+ sig.make_sig('value'),
4063
+ sig.make_sig('dy'),
4064
+ sig.make_sig('pse_shift', default=None),
4065
+ sig.make_sig('drop_mask', default=None),
4066
+ sig.make_sig('padding_mask', default=None),
4067
+ sig.make_sig('atten_mask', default=None),
4068
+ sig.make_sig('softmax_max', default=None),
4069
+ sig.make_sig('softmax_sum', default=None),
4070
+ sig.make_sig('softmax_in', default=None),
4071
+ sig.make_sig('attention_in', default=None),
4072
+ sig.make_sig('prefix', default=None),
4073
+ sig.make_sig('actual_seq_qlen', default=None),
4074
+ sig.make_sig('actual_seq_kvlen', default=None),
4075
+ )
4076
+
4077
+ @prim_arg_register
4078
+ def __init__(self, head_num, keep_prob=1.0, scale_value=1.0, pre_tokens=65536, next_tokens=65536, inner_precise=1, input_layout='BSH', sparse_mode=0):
4079
+ self._set_prim_arg("head_num", head_num)
4080
+ self._set_prim_arg("keep_prob", keep_prob)
4081
+ self._set_prim_arg("scale_value", scale_value)
4082
+ self._set_prim_arg("pre_tokens", pre_tokens)
4083
+ self._set_prim_arg("next_tokens", next_tokens)
4084
+ self._set_prim_arg("inner_precise", inner_precise)
4085
+ self._set_prim_arg_with_handler("input_layout", input_layout, str_to_enum)
4086
+ self._set_prim_arg("sparse_mode", sparse_mode)
4087
+
4088
+ def __call__(self, query, key, value, dy, pse_shift=None, drop_mask=None, padding_mask=None, atten_mask=None, softmax_max=None, softmax_sum=None, softmax_in=None, attention_in=None, prefix=None, actual_seq_qlen=None, actual_seq_kvlen=None):
4089
+ return _convert_stub(pyboost_flash_attention_score_grad(self, [query, key, value, dy, pse_shift, drop_mask, padding_mask, atten_mask, softmax_max, softmax_sum, softmax_in, attention_in, prefix, actual_seq_qlen, actual_seq_kvlen, self.head_num, self.keep_prob, self.scale_value, self.pre_tokens, self.next_tokens, self.inner_precise, self.input_layout, self.sparse_mode]))
4090
+
4091
+ class FlashAttentionScore(Primitive):
4092
+ r"""
4093
+ FlashAttentionScore.
4094
+
4095
+ .. math::
4096
+ \begin{array}{ll} \\
4097
+ y = Dropout(Softmax(Mask(scale_value \mul (real_shift + query * key), attn_mask), -1), keep_prob) \\
4098
+ \mul value \\
4099
+ \end{array}
4100
+
4101
+ B -- Batch size. Value range 1 to 2k.
4102
+ S1 -- Sequence length of query. Value range 1 to 512k.
4103
+ S2 -- Sequence length of key and value. Value range 1 to 512k.
4104
+ N1 -- Num heads of query. Value range 1 to 256.
4105
+ N2 -- Num heads of key and value, and N2 must be a factor of N1.
4106
+ D -- Head size. The value ranges is a multiple of 16, with the max value of 512.
4107
+ H1 -- Hidden size of query, which equals to N1 * D.
4108
+ H2 -- Hidden size of key and value, which equals to N2 * D.
4109
+
4110
+ .. warning::
4111
+ This is an experimental API that is subject to change or deletion. Only support on Atlas A2 training series.
4112
+
4113
+ Args:
4114
+ head_num (int): The head num of query, equal to N1.
4115
+ keep_prob (float): The keep probability of dropout. Value range is (0.0, 1.0]. Default: 1.0. When keep_prob
4116
+ is 1.0, drop_mask should be none.
4117
+ scale_value (float): The scale factor of score. Generally, the value is 1.0 / (D ** 0.5). Default: 1.0.
4118
+ pre_tokens (int): Parameter for sparse computation, represents how many tokens are counted forward.
4119
+ When sparse_mode is set to 1, 2, 3, or 5, this parameter does not take effect. Default: 2147483647.
4120
+ next_tokens (int): Parameter for sparse computation, represents how many tokens are counted backward.
4121
+ When sparse_mode is set to 1, 2, 3, or 5, this parameter does not take effect. Default: 2147483647.
4122
+ The value of pre_tokens corresponds to S1, and the value of next_tokens corresponds to S2. They define the
4123
+ valid area on the attn_mask matrix. It must ensure that the band is not empty.
4124
+ The following values are not allowed:
4125
+
4126
+ - pre_tokens < 0 and next_tokens < 0.
4127
+ - (pre_tokens < 0 and next_tokens >= 0) and (next_tokens < abs(pre_tokens) or abs(pre_tokens) >= S2).
4128
+ - (pre_tokens >= 0 and next_tokens < 0) and (abs(next_tokens) > pre_tokens or abs(next_tokens) >= S1).
4129
+
4130
+ inner_precise (int): The parameter is reserved and not implemented yet. Default: 0.
4131
+ input_layout (str): Specifies the layout of input `query`, key and value. The value can be "BSH", "BNSD", "SBH",
4132
+ "BSND" or "TND". "TND" is an experimental format. Default: "BSH".
4133
+ When input_layout is "TND", the following restrictions must be met.
4134
+ There are two lists that represent the length of the input sequence: list_seq_q and list_seq_k. Each
4135
+ value in the list indicates the length of the sequence in the batch. For example, list_seq_q = [4, 2, 6],
4136
+ list_seq_k = [10, 3, 9]. The element of list indicate S. T1 is sum(list_seq_q) = 12, T2 is
4137
+ sum(list_seq_k) = 22.
4138
+ max_seqlen_q = max(list_seq_q), max_seqlen_k = max(list_seq_k).
4139
+ qk_pointer = sum(list_seq_q * list_seq_k), which is the sum of the element multiplication.
4140
+
4141
+ - The lengths of two lists are the same, and size of list is batch. batch is less than or equal to 1024.
4142
+ - When input_layout is "TND", actual_seq_qlen and actual_seq_kvlen must be not none.
4143
+ Otherwise, they are none.
4144
+ - The actual_seq_qlen and actual_seq_kvlen are the cumulative sum of sequence of key/value, so they must
4145
+ be non-decreasing.
4146
+ - If real_shift is not none, list_seq_q and list_seq_k must be same. The maximum value of list_seq_q and
4147
+ list_seq_k is greater than 1024. Real_shift should be `(B, N1, 1024, S2)` and `(1, N1, 1024, S2)`, and
4148
+ S2 is equal to max_seqlen_k.
4149
+ - Attn mask must be a lower trianglar matrix, so sparse_mode should be 2 or 3. The shape of attn_mask
4150
+ should be `(2048, 2048)`.
4151
+ - The shape of drop_mask is (qk_pointer * N1 // 8,).
4152
+ - Prefix is none.
4153
+ - Next_tokens is 0, and pre_tokens is not less than max_seqlen_q.
4154
+ - When sparse_mode is 3, S1 of each batch should be less than or equal to S2.
4155
+ - 0 should not exist in list_seq_k.
4156
+
4157
+ sparse_mode (int): Indicates sparse mode. Default 0.
4158
+
4159
+ - 0: Indicates the defaultMask mode. If attn_mask is not passed, the mask operation is not performed,
4160
+ and preTokens and nextTokens(internally assigned as INT_MAX) are ignored. If passed in, the full attn_mask
4161
+ matrix (S1 * S2) needs to be passed in, indicating that the part between preTokens and nextTokens needs to
4162
+ be calculated.
4163
+ - 1: Represents allMask, that is, passing in the complete attn_mask matrix.
4164
+ - 2: Representing the leftUpCausal mode corresponds to the lower triangle scenario divided by the left
4165
+ vertex, and the optimized attn_mask matrix (2048*2048) is required.
4166
+ - 3: Representing the rightDownCausal model corresponds to the lower triangle scene divided by the lower
4167
+ right vertex, and the optimized attn_mask matrix (2048*2048) is required.
4168
+ - 4: Represents the band scenario, that is, the part between counting preTokens and nextTokens, and the
4169
+ optimized attn_mask matrix (2048*2048) is required..
4170
+ - 5: Represents the prefix scenario, that is, on the basis of rightDownCasual, a matrix with length S1 and
4171
+ width N is added to the left side. The value of N is obtained by the new input prefix, and the N value of
4172
+ each Batch axis is different, not implemented yet.
4173
+ - 6: Represents the global scenario, not implemented yet.
4174
+ - 7: Represents the dilated scenario, not implemented yet.
4175
+ - 8: Represents the block_local scenario, not implemented yet.
4176
+
4177
+ Inputs:
4178
+ - **query** (Tensor[float16, bfloat16]) - The query tensor.
4179
+ Input tensor of shape :math:`(B, S1, H1)`, `(B, N1, S1, D)`, `(S1, B, H1)`, `(B, S1, N1, D)` or `(T1, N1, D)`.
4180
+ - **key** (Tensor[float16, bfloat16]) - The key tensor.
4181
+ Input tensor of shape :math:`(B, S2, H2)`, `(B, N2, S2, D)`, `(S2, B, H2)`, `(B, S2, N2, D)` or `(T2, N2, D)`.
4182
+ - **value** (Tensor[float16, bfloat16]) - The value tensor.
4183
+ Input tensor of shape :math:`(B, S2, H2)`, `(B, N2, S2, D)`, `(S2, B, H2)`, `(B, S2, N2, D)` or `(T2, N2, D)`.
4184
+ The key and value have the same shape.
4185
+ - **real_shift** (Union[Tensor[float16, bfloat16], None]) - Also known as pse. The position embedding code. If S
4186
+ is greater than 1024 and the mask of the lower triangle is used, enter only the inverse 1024 lines of
4187
+ the lower triangle for memory optimization. Input tensor of shape :math:`(B, N1, S1, S2)`,
4188
+ `(1, N1, S1, S2)`, `(B, N1, 1024, S2)`, `(1, N1, 1024, S2)`.
4189
+
4190
+ - ALiBi scenario: real_shift must meet the ALiBi rule, and sparse_mode is 2 or 3 for the lower triangle.
4191
+ In this scenario, real_shift is `(B, N1, 1024, S2)`, `(1, N1, 1024, S2)`.
4192
+ - Non-ALiBi scenario: real_shift is `(B, N1, S1, S2)`, `(1, N1, S1, S2)`.
4193
+
4194
+ The shape of `real_shift` should be `(B, N1, 1024, S2)` and `(1, N1, 1024, S2)` when input_layout is `TND`.
4195
+ - **drop_mask** (Union[Tensor[uint8], None]) - The dropout mask tensor.
4196
+ Input tensor of shape :math:`(B, N1, S1, S2 // 8) or None`. S2 is a multiple of 8 when not None.
4197
+ - **padding_mask** (None) - Reserved parameter. Not implemented yet.
4198
+ - **attn_mask** (Union[Tensor[uint8], Tensor[bool], None]) - The attention mask tensor. For each element, 0
4199
+ indicates retention and 1 indicates discard. Input tensor of shape :math:`(B, N1, S1, S2)`, `(B, 1, S1, S2)`,
4200
+ `(S1, S2)` or (2048, 2048). In compression scenario, sparse_mode is 2, 3, or 4, attn_mask must be
4201
+ `(2048, 2048)`. When sparse_mode is 5, attn_mask must be `(B, N1, S1, S2)`, `(B, 1, S1, S2)`. When sparse_mode
4202
+ is 0 and 1, attn_mask should be `(B, N1, S1, S2)`, `(B, 1, S1, S2)`, `(S1, S2)`.
4203
+ - **prefix** (Union[List[int64], Tuple[int64], None]) - N value of each Batch in the prefix sparse calculation
4204
+ scenario. Input tensor of shape :math:`(B,)`. B max value 32. Not none only when sparse_mode is 5.
4205
+ If S1 > S2, N ranges from 0 to S2. If S1 <= S2, N ranges from S2 - S1 to S2.
4206
+ - **actual_seq_qlen** (Union[List[int64], Tuple[int64], None]) - Size of query corresponding to each batch, array
4207
+ with increasing values and the last value equal to T1.
4208
+ - **actual_seq_kvlen** (Union[List[int64], Tuple[int64], None]) - Size of key and value corresponding to each
4209
+ batch, array with increasing values and the last value equal to T2.
4210
+
4211
+ Outputs:
4212
+ - **softmax_max** (Tensor[float32]) - (B, N1, S1, 8) when input_layout is not `TND` else (T1, N1, D)
4213
+ - **softmax_sum** (Tensor[float32]) - (B, N1, S1, 8) when input_layout is not `TND` else (T1, N1, D)
4214
+ - **softmax_out** (Tensor[float16, bfloat16]) - Useless output, ignore it. Output tensor of shape : `()`
4215
+ - **attention_out** (Tensor[float16, bfloat16]) - The output of attention, its shape, and data type
4216
+ are the same as the query.
4217
+
4218
+ Supported Platforms:
4219
+ ``Ascend``
4220
+ """
4221
+ __mindspore_signature__ = (
4222
+ sig.make_sig('query'),
4223
+ sig.make_sig('key'),
4224
+ sig.make_sig('value'),
4225
+ sig.make_sig('real_shift', default=None),
4226
+ sig.make_sig('drop_mask', default=None),
4227
+ sig.make_sig('padding_mask', default=None),
4228
+ sig.make_sig('attn_mask', default=None),
4229
+ sig.make_sig('prefix', default=None),
4230
+ sig.make_sig('actual_seq_qlen', default=None),
4231
+ sig.make_sig('actual_seq_kvlen', default=None),
4232
+ )
4233
+
4234
+ @prim_arg_register
4235
+ def __init__(self, head_num, keep_prob=1.0, scale_value=1.0, pre_tokens=2147483647, next_tokens=2147483647, inner_precise=0, input_layout='BSH', sparse_mode=0):
4236
+ self._set_prim_arg("head_num", head_num)
4237
+ self._set_prim_arg("keep_prob", keep_prob)
4238
+ self._set_prim_arg("scale_value", scale_value)
4239
+ self._set_prim_arg("pre_tokens", pre_tokens)
4240
+ self._set_prim_arg("next_tokens", next_tokens)
4241
+ self._set_prim_arg("inner_precise", inner_precise)
4242
+ self._set_prim_arg_with_handler("input_layout", input_layout, str_to_enum)
4243
+ self._set_prim_arg("sparse_mode", sparse_mode)
4244
+
4245
+ def __call__(self, query, key, value, real_shift=None, drop_mask=None, padding_mask=None, attn_mask=None, prefix=None, actual_seq_qlen=None, actual_seq_kvlen=None):
4246
+ return _convert_stub(pyboost_flash_attention_score(self, [query, key, value, real_shift, drop_mask, padding_mask, attn_mask, prefix, actual_seq_qlen, actual_seq_kvlen, self.head_num, self.keep_prob, self.scale_value, self.pre_tokens, self.next_tokens, self.inner_precise, self.input_layout, self.sparse_mode]))
4247
+
4248
+ class FlattenExt(Primitive):
4249
+ r"""
4250
+ .. code-block::
4251
+
4252
+ prim = ops.FlattenExt()
4253
+ out = prim(input, start_dim, end_dim)
4254
+
4255
+ is equivalent to
4256
+
4257
+ .. code-block::
4258
+
4259
+ ops.flatten_ext(input, start_dim, end_dim)
4260
+
4261
+ Refer to :func:`mindspore.ops.flatten_ext` for more details.
4262
+ """
4263
+ __mindspore_signature__ = (
4264
+ sig.make_sig('input'),
4265
+ sig.make_sig('start_dim', default=0),
4266
+ sig.make_sig('end_dim', default=-1),
4267
+ )
4268
+
4269
+ @prim_arg_register
4270
+ def __init__(self):
4271
+ pass
4272
+
4273
+ def __call__(self, input, start_dim=0, end_dim=-1):
4274
+ return _convert_stub(pyboost_flatten_ext(self, [input, start_dim, end_dim]))
4275
+
4276
+ flatten_ext_op=FlattenExt()
4277
+
4278
+
4279
+ class Flatten(Primitive):
4280
+ r"""
4281
+ Flattens a tensor without changing its batch size on the 0-th axis.
4282
+
4283
+ Refer to :func:`mindspore.ops.flatten` for more details.
4284
+
4285
+ Inputs:
4286
+ - **input_x** (Tensor) - Tensor of shape :math:`(N, \ldots)` to be flattened, where :math:`N` is batch size.
4287
+
4288
+ Outputs:
4289
+ Tensor, the shape of the output tensor is :math:`(N, X)`, where :math:`X` is
4290
+ the product of the remaining dimension.
4291
+
4292
+ Supported Platforms:
4293
+ ``Ascend`` ``GPU`` ``CPU``
4294
+
4295
+ Examples:
4296
+ >>> import mindspore
4297
+ >>> import numpy as np
4298
+ >>> from mindspore import Tensor, ops
4299
+ >>> input_x = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
4300
+ >>> flatten = ops.Flatten()
4301
+ >>> output = flatten(input_x)
4302
+ >>> print(output.shape)
4303
+ (1, 24)
4304
+ """
4305
+ @prim_arg_register
4306
+ def __init__(self):
4307
+ pass
4308
+
4309
+ def __call__(self, input_x):
4310
+ return super().__call__(input_x)
4311
+
4312
+
4313
+ flatten_op=Flatten()
4314
+
4315
+
4316
+ class FloorDiv(Primitive):
4317
+ r"""
4318
+ .. code-block::
4319
+
4320
+ prim = ops.FloorDiv()
4321
+ out = prim(input, other)
4322
+
4323
+ is equivalent to
4324
+
4325
+ .. code-block::
4326
+
4327
+ ops.floor_divide(input, other)
4328
+
4329
+ Refer to :func:`mindspore.ops.floor_divide` for more details.
4330
+ """
4331
+ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
4332
+
4333
+ @prim_arg_register
4334
+ def __init__(self):
4335
+ pass
4336
+
4337
+ def __call__(self, input, other):
4338
+ return super().__call__(input, other)
4339
+
4340
+
4341
+ floor_div_op=FloorDiv()
4342
+
4343
+
4344
+ class FloorMod(Primitive):
4345
+ r"""
4346
+ .. code-block::
4347
+
4348
+ prim = ops.FloorMod()
4349
+ out = prim(x, y)
3117
4350
 
3118
4351
  is equivalent to
3119
4352
 
@@ -3464,8 +4697,7 @@ class GridSampler2DGrad(Primitive):
3464
4697
  self._set_prim_arg("align_corners", align_corners)
3465
4698
 
3466
4699
  def __call__(self, grad, input_x, grid):
3467
- return super().__call__(grad, input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners)
3468
-
4700
+ return _convert_stub(pyboost_grid_sampler_2d_grad(self, [grad, input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners]))
3469
4701
 
3470
4702
  class GridSampler2D(Primitive):
3471
4703
  r"""
@@ -3547,8 +4779,7 @@ class GridSampler2D(Primitive):
3547
4779
  self._set_prim_arg("align_corners", align_corners)
3548
4780
 
3549
4781
  def __call__(self, input_x, grid):
3550
- return super().__call__(input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners)
3551
-
4782
+ return _convert_stub(pyboost_grid_sampler_2d(self, [input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners]))
3552
4783
 
3553
4784
  class GridSampler3DGrad(Primitive):
3554
4785
  r"""
@@ -3594,8 +4825,7 @@ class GridSampler3DGrad(Primitive):
3594
4825
  self._set_prim_arg("align_corners", align_corners)
3595
4826
 
3596
4827
  def __call__(self, grad, input_x, grid):
3597
- return super().__call__(grad, input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners)
3598
-
4828
+ return _convert_stub(pyboost_grid_sampler_3d_grad(self, [grad, input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners]))
3599
4829
 
3600
4830
  class GridSampler3D(Primitive):
3601
4831
  r"""
@@ -3668,7 +4898,104 @@ class GridSampler3D(Primitive):
3668
4898
  self._set_prim_arg("align_corners", align_corners)
3669
4899
 
3670
4900
  def __call__(self, input_x, grid):
3671
- return super().__call__(input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners)
4901
+ return _convert_stub(pyboost_grid_sampler_3d(self, [input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners]))
4902
+
4903
+ class GroupNormGrad(Primitive):
4904
+ r"""
4905
+
4906
+ """
4907
+ __mindspore_signature__ = (
4908
+ sig.make_sig('dy'),
4909
+ sig.make_sig('x'),
4910
+ sig.make_sig('mean'),
4911
+ sig.make_sig('rstd'),
4912
+ sig.make_sig('gamma_opt'),
4913
+ sig.make_sig('num_groups'),
4914
+ sig.make_sig('dx_is_require', default=True),
4915
+ sig.make_sig('dgamma_is_require', default=True),
4916
+ sig.make_sig('dbeta_is_require', default=True),
4917
+ )
4918
+
4919
+ @prim_arg_register
4920
+ def __init__(self):
4921
+ pass
4922
+
4923
+ def __call__(self, dy, x, mean, rstd, gamma_opt, num_groups, dx_is_require=True, dgamma_is_require=True, dbeta_is_require=True):
4924
+ return _convert_stub(pyboost_group_norm_grad(self, [dy, x, mean, rstd, gamma_opt, num_groups, dx_is_require, dgamma_is_require, dbeta_is_require]))
4925
+
4926
+ group_norm_grad_op=GroupNormGrad()
4927
+
4928
+
4929
+ class GroupNorm(Primitive):
4930
+ r"""
4931
+ Group Normalization over a mini-batch of inputs.
4932
+
4933
+ Group Normalization is widely used in recurrent neural networks. It applies
4934
+ normalization on a mini-batch of inputs for each single training case as described
4935
+ in the paper `Group Normalization <https://arxiv.org/pdf/1803.08494.pdf>`_. Group Normalization
4936
+ divides the channels into groups and computes within each group the mean and variance for normalization,
4937
+ and it performs very stable over a wide range of batch size. :math:`\gamma` and :math:`\beta` are trainable scale
4938
+ and shift.
4939
+ It can be described using the following formula:
4940
+
4941
+ .. math::
4942
+ y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
4943
+
4944
+ where :math:`\gamma` is `weight`, :math:`\beta` is `bias`, :math:`\epsilon` is `eps`.
4945
+
4946
+ Args:
4947
+ input (Tensor): The input feature with shape :math:`(N, C, *)` where :math:`*` means, any number of
4948
+ additional dimensions.
4949
+ num_groups (int): The number of groups to be divided along the channel dimension.
4950
+ weight (Tensor, optional): The shape :math:`(C,)`, Default: ``None``, has the same data type with `input`.
4951
+ bias (Tensor, optional): The shape :math:`(C,)`, Default: ``None``, has the same data type with `input`.
4952
+ eps (float, optional): A value added to the denominator for numerical stability. Default: ``1e-5`` .
4953
+
4954
+ Returns:
4955
+ Tensor, the normalized and scaled offset tensor, has the same shape and data type as the `input`.
4956
+
4957
+ Raises:
4958
+ TypeError: If `num_groups` is not an int.
4959
+ TypeError: If `eps` is not a float.
4960
+ ValueError: If `num_groups` is less than 1.
4961
+ ValueError: If `C` (the second parameter of dimensions of `input`) is not divided by `num_groups`.
4962
+
4963
+ Supported Platforms:
4964
+ ``Ascend`` ``GPU`` ``CPU``
4965
+
4966
+ Examples:
4967
+ >>> import mindspore as ms
4968
+ >>> import numpy as np
4969
+ >>> from mindspore import ops
4970
+ >>> x = ms.Tensor(np.ones([1, 2, 4, 4], np.float32))
4971
+ >>> group_norm_op = ops.GroupNorm()
4972
+ >>> output = group_norm_op(x, 2)[0]
4973
+ >>> print(output)
4974
+ [[[[0. 0. 0. 0.]
4975
+ [0. 0. 0. 0.]
4976
+ [0. 0. 0. 0.]
4977
+ [0. 0. 0. 0.]]
4978
+ [[0. 0. 0. 0.]
4979
+ [0. 0. 0. 0.]
4980
+ [0. 0. 0. 0.]
4981
+ [0. 0. 0. 0.]]]]
4982
+ """
4983
+ __mindspore_signature__ = (
4984
+ sig.make_sig('input', dtype=sig.sig_dtype.T),
4985
+ sig.make_sig('num_groups', dtype=sig.sig_dtype.T1),
4986
+ sig.make_sig('weight', dtype=sig.sig_dtype.T, default=None),
4987
+ sig.make_sig('bias', dtype=sig.sig_dtype.T, default=None),
4988
+ sig.make_sig('eps', dtype=sig.sig_dtype.T2, default=1e-5),
4989
+ )
4990
+
4991
+ @prim_arg_register
4992
+ def __init__(self):
4993
+ pass
4994
+
4995
+ def __call__(self, input, num_groups, weight=None, bias=None, eps=1e-5):
4996
+ return _convert_stub(pyboost_group_norm(self, [input, num_groups, weight, bias, eps]))
4997
+
4998
+ group_norm_op=GroupNorm()
3672
4999
 
3673
5000
 
3674
5001
  class HShrinkGrad(Primitive):
@@ -4065,19 +5392,108 @@ class IsFinite(Primitive):
4065
5392
 
4066
5393
  .. code-block::
4067
5394
 
4068
- ops.is_finite(x)
5395
+ ops.isfinite(x)
4069
5396
 
4070
- Refer to :func:`mindspore.ops.is_finite` for more details.
5397
+ Refer to :func:`mindspore.ops.isfinite` for more details.
4071
5398
  """
4072
5399
  @prim_arg_register
4073
5400
  def __init__(self):
4074
5401
  pass
4075
5402
 
4076
5403
  def __call__(self, x):
4077
- return super().__call__(x)
5404
+ return _convert_stub(pyboost_isfinite(self, [x]))
5405
+
5406
+ isfinite_op=IsFinite()
5407
+
5408
+
5409
+ class LayerNormExt(Primitive):
5410
+ r"""
5411
+ Applies the Layer Normalization to the input tensor.
5412
+
5413
+ This operator will normalize the input tensor on given axis. LayerNorm is described in the paper
5414
+ `Layer Normalization <https://arxiv.org/abs/1607.06450>`_.
5415
+
5416
+ .. math::
5417
+ y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
5418
+
5419
+ where :math:`\gamma` is weight, :math:`\beta` is bias, :math:`\epsilon` is eps.
5420
+
5421
+ Args:
5422
+ input (Tensor): Tensor of shape :math:`(N, \ldots)`. The input of LayerNorm.
5423
+ normalized_shape (Union(tuple[int], list[int])): The normalized shape of `input` for LayerNorm.
5424
+ weight (Tensor, optional): Learnable parameter :math:`\gamma` . Tensor of shape `normalized_shape`. Default: ``None`` .
5425
+ bias (Tensor, optional): Learnable parameter :math:`\beta` . Tensor of shape `normalized_shape`. Default: ``None`` .
5426
+ eps (float, optional): A value added to the denominator for numerical stability(:math:`\epsilon`). Default: ``1e-5`` .
5427
+
5428
+ Returns:
5429
+ tuple[Tensor], tuple of 3 tensors, the normalized input and the updated parameters.
5430
+
5431
+ - **output_x** (Tensor) - The normalized input, has the same type and shape as the `input_x`.
5432
+ - **mean** (Tensor) - The first `begin_norm_axis` (The begin axis of the `input_x` to apply LayerNorm) dimensions of `mean` shape is the same as `input_x`,
5433
+ and the remaining dimensions are 1. Suppose the shape of the `input_x` is :math:`(x_1, x_2, \ldots, x_R)`,
5434
+ the shape of the `mean` is :math:`(x_1, \ldots, x_{begin_params_axis}, 1, \ldots, 1)`
5435
+ (when `begin_params_axis=0`, the shape of `mean` is :math:`(1, \ldots, 1)` ).
5436
+ - **rstd** (Tensor) - Shape is the same as `mean` .
5437
+
5438
+ Raises:
5439
+ TypeError: If `input` is not a Tensor.
5440
+ TypeError: If `normalized_shape` is not an integer, a list or a tuple.
5441
+ TypeError: If `eps` is not a float.
5442
+
5443
+ Supported Platforms:
5444
+ ``Ascend``
5445
+
5446
+ Examples:
5447
+ >>> import mindspore
5448
+ >>> import numpy as np
5449
+ >>> from mindspore import Tensor, ops
5450
+ >>> input_x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]), mindspore.float32)
5451
+ >>> normalized_shape = (3,)
5452
+ >>> gamma = Tensor(np.ones(normalized_shape), mindspore.float32)
5453
+ >>> beta = Tensor(np.zeros(normalized_shape), mindspore.float32)
5454
+ >>> eps = 1e-7
5455
+ >>> layer_norm = ops.LayerNormExt()
5456
+ >>> output, mean, rstd = layer_norm(input_x, normalized_shape, gamma, beta, eps)
5457
+ >>> print(output)
5458
+ [[-1.2247448 0. 1.2247448]
5459
+ [-1.2247448 0. 1.2247448]]
5460
+ >>> print(mean)
5461
+ [[2.]
5462
+ [2.]]
5463
+ >>> print(rstd)
5464
+ [[1.2247447]
5465
+ [1.2247447]]
5466
+ """
5467
+ __mindspore_signature__ = (
5468
+ sig.make_sig('input'),
5469
+ sig.make_sig('normalized_shape'),
5470
+ sig.make_sig('weight', default=None),
5471
+ sig.make_sig('bias', default=None),
5472
+ sig.make_sig('eps', default=1e-5),
5473
+ )
5474
+
5475
+ @prim_arg_register
5476
+ def __init__(self):
5477
+ pass
5478
+
5479
+ def __call__(self, input, normalized_shape, weight=None, bias=None, eps=1e-5):
5480
+ return _convert_stub(pyboost_layer_norm_ext(self, [input, normalized_shape, weight, bias, eps]))
5481
+
5482
+ layer_norm_ext_op=LayerNormExt()
5483
+
5484
+
5485
+ class LayerNormGradExt(Primitive):
5486
+ r"""
5487
+
5488
+ """
5489
+ @prim_arg_register
5490
+ def __init__(self):
5491
+ pass
4078
5492
 
5493
+ def __call__(self, dy, x, normalized_shape, mean, variance, gamma, beta):
5494
+ return _convert_stub(pyboost_layer_norm_grad_ext(self, [dy, x, normalized_shape, mean, variance, gamma, beta]))
4079
5495
 
4080
- is_finite_op=IsFinite()
5496
+ layer_norm_grad_ext_op=LayerNormGradExt()
4081
5497
 
4082
5498
 
4083
5499
  class LayerNormGradGrad(Primitive):
@@ -4321,36 +5737,97 @@ class LayerNormV3(Primitive):
4321
5737
  return super().__call__(input_x, gamma, beta, self.begin_norm_axis, self.begin_params_axis, self.epsilon)
4322
5738
 
4323
5739
 
4324
- class LessEqual(Primitive):
5740
+ class LeakyReLUExt(Primitive):
4325
5741
  r"""
4326
5742
  .. code-block::
4327
5743
 
4328
- prim = ops.LessEqual()
4329
- out = prim(input, other)
5744
+ prim = ops.LeakyReLUExt()
5745
+ out = prim(input, negative_slope)
4330
5746
 
4331
5747
  is equivalent to
4332
5748
 
4333
5749
  .. code-block::
4334
5750
 
4335
- ops.less_equal(input, other)
5751
+ ops.leaky_relu_ext(input, negative_slope)
4336
5752
 
4337
- Refer to :func:`mindspore.ops.less_equal` for more details.
5753
+ Refer to :func:`mindspore.ops.leaky_relu_ext` for more details.
4338
5754
  """
4339
- __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
5755
+ __mindspore_signature__ = (
5756
+ sig.make_sig('input'),
5757
+ sig.make_sig('negative_slope', default=0.01),
5758
+ )
4340
5759
 
4341
5760
  @prim_arg_register
4342
5761
  def __init__(self):
4343
5762
  pass
4344
5763
 
4345
- def __call__(self, input, other):
4346
- return _convert_stub(pyboost_less_equal(self, [input, other]))
5764
+ def __call__(self, input, negative_slope=0.01):
5765
+ return _convert_stub(pyboost_leaky_relu_ext(self, [input, negative_slope]))
4347
5766
 
4348
- less_equal_op=LessEqual()
5767
+ leaky_relu_ext_op=LeakyReLUExt()
4349
5768
 
4350
5769
 
4351
- class Less(Primitive):
5770
+ class LeakyReLUGradExt(Primitive):
4352
5771
  r"""
4353
- .. code-block::
5772
+ Computes gradient for the LeakyReLU activation.
5773
+
5774
+ Args:
5775
+ dy (Tensor): Input gradients tensor, has the same dtype and shape as `input`.
5776
+ input (Tensor): Origin input tensor.
5777
+ negative_slope (Scalar): Origin negative_slope
5778
+ is_result(bool): Output input if True.
5779
+
5780
+ Returns:
5781
+ Tensor, has the same dtype and shape as `input`.
5782
+
5783
+ """
5784
+ __mindspore_signature__ = (
5785
+ sig.make_sig('dy'),
5786
+ sig.make_sig('input'),
5787
+ sig.make_sig('negative_slope', default=0.01),
5788
+ sig.make_sig('is_result', default=False),
5789
+ )
5790
+
5791
+ @prim_arg_register
5792
+ def __init__(self):
5793
+ pass
5794
+
5795
+ def __call__(self, dy, input, negative_slope=0.01, is_result=False):
5796
+ return _convert_stub(pyboost_leaky_relu_grad_ext(self, [dy, input, negative_slope, is_result]))
5797
+
5798
+ leaky_relu_grad_ext_op=LeakyReLUGradExt()
5799
+
5800
+
5801
+ class LessEqual(Primitive):
5802
+ r"""
5803
+ .. code-block::
5804
+
5805
+ prim = ops.LessEqual()
5806
+ out = prim(input, other)
5807
+
5808
+ is equivalent to
5809
+
5810
+ .. code-block::
5811
+
5812
+ ops.less_equal(input, other)
5813
+
5814
+ Refer to :func:`mindspore.ops.less_equal` for more details.
5815
+ """
5816
+ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
5817
+
5818
+ @prim_arg_register
5819
+ def __init__(self):
5820
+ pass
5821
+
5822
+ def __call__(self, input, other):
5823
+ return _convert_stub(pyboost_less_equal(self, [input, other]))
5824
+
5825
+ less_equal_op=LessEqual()
5826
+
5827
+
5828
+ class Less(Primitive):
5829
+ r"""
5830
+ .. code-block::
4354
5831
 
4355
5832
  prim = ops.Less()
4356
5833
  out = prim(input, other)
@@ -4370,12 +5847,32 @@ class Less(Primitive):
4370
5847
  pass
4371
5848
 
4372
5849
  def __call__(self, input, other):
4373
- return super().__call__(input, other)
4374
-
5850
+ return _convert_stub(pyboost_less(self, [input, other]))
4375
5851
 
4376
5852
  less_op=Less()
4377
5853
 
4378
5854
 
5855
+ class LinSpaceExt(Primitive):
5856
+ r"""
5857
+
5858
+ """
5859
+ __mindspore_signature__ = (
5860
+ sig.make_sig('start'),
5861
+ sig.make_sig('end'),
5862
+ sig.make_sig('steps'),
5863
+ sig.make_sig('dtype', default=None),
5864
+ )
5865
+
5866
+ @prim_arg_register
5867
+ def __init__(self):
5868
+ pass
5869
+
5870
+ def __call__(self, start, end, steps, dtype=None):
5871
+ return _convert_stub(pyboost_lin_space_ext(self, [start, end, steps, dtype if dtype is None else dtype_to_type_id('LinSpaceExt', 'dtype', dtype)]))
5872
+
5873
+ lin_space_ext_op=LinSpaceExt()
5874
+
5875
+
4379
5876
  class LinSpace(Primitive):
4380
5877
  r"""
4381
5878
  Returns a Tensor whose value is `num` evenly spaced in the interval `start` and `stop` (including `start` and
@@ -4838,6 +6335,88 @@ class MaskedFill(Primitive):
4838
6335
  masked_fill_op=MaskedFill()
4839
6336
 
4840
6337
 
6338
+ class MatMulExt(Primitive):
6339
+ r"""
6340
+ .. code-block::
6341
+
6342
+ prim = ops.MatMulExt()
6343
+ out = prim(input, mat2)
6344
+
6345
+ is equivalent to
6346
+
6347
+ .. code-block::
6348
+
6349
+ ops.matmul_ext(input, mat2)
6350
+
6351
+ Refer to :func:`mindspore.ops.matmul_ext` for more details.
6352
+ """
6353
+ @prim_arg_register
6354
+ def __init__(self):
6355
+ pass
6356
+
6357
+ def __call__(self, input, mat2):
6358
+ return _convert_stub(pyboost_matmul_ext(self, [input, mat2]))
6359
+
6360
+ matmul_ext_op=MatMulExt()
6361
+
6362
+
6363
+ class MatMul(Primitive):
6364
+ r"""
6365
+ Multiplies matrix `a` and matrix `b`.
6366
+
6367
+ .. math::
6368
+
6369
+ Output_{i j}=\sum_{k=1}^{p} a_{i k} b_{k j}=a_{i 1} b_{1 j}+a_{i 2} b_{2 j}+\cdots+a_{i p} b_{p j}, p\in N
6370
+
6371
+ where the :math:`i,j` indicates the output of the i-th row and j-th column element.
6372
+
6373
+ Note:
6374
+ - If :math:`N * M` cannot be divided by 16, the performance will be poor in ascend environment.
6375
+ - The dtype of inputs must be same.
6376
+ - On Ascend, float64 doesn't be supported.
6377
+
6378
+ Args:
6379
+ transpose_a (bool): If ``True`` , `a` is transposed before multiplication. Default: ``False`` .
6380
+ transpose_b (bool): If ``True`` , `b` is transposed before multiplication. Default: ``False`` .
6381
+
6382
+ Inputs:
6383
+ - **a** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(N, C)`. If
6384
+ `transpose_a` is ``True`` , its shape must be :math:`(C, N)` after transpose.
6385
+ - **b** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(C, M)`. If
6386
+ `transpose_b` is ``True`` , its shape must be :math:`(M, C)` after transpose.
6387
+
6388
+ Outputs:
6389
+ Tensor, the shape of the output tensor is :math:`(N, M)`.
6390
+
6391
+ Raises:
6392
+ TypeError: If `transpose_a` or `transpose_b` is not a bool.
6393
+ TypeError: If the dtype of `a` and the dtype of `b` are not the same.
6394
+ ValueError: If the column of matrix dimensions of `a` is not equal to
6395
+ the row of matrix dimensions of `b`.
6396
+ ValueError: If length of shape of `a` or `b` is not equal to 2.
6397
+
6398
+ Supported Platforms:
6399
+ ``Ascend`` ``GPU`` ``CPU``
6400
+
6401
+ Examples:
6402
+ >>> import mindspore
6403
+ >>> import numpy as np
6404
+ >>> from mindspore import Tensor, ops
6405
+ >>> a = Tensor(np.ones(shape=[1, 3]), mindspore.float32)
6406
+ >>> b = Tensor(np.ones(shape=[3, 4]), mindspore.float32)
6407
+ >>> matmul = ops.MatMul()
6408
+ >>> output = matmul(a, b)
6409
+ >>> print(output)
6410
+ [[3. 3. 3. 3.]]
6411
+ """
6412
+ @prim_arg_register
6413
+ def __init__(self, transpose_a=False, transpose_b=False):
6414
+ self._set_prim_arg("transpose_a", transpose_a)
6415
+ self._set_prim_arg("transpose_b", transpose_b)
6416
+
6417
+ def __call__(self, input, mat2):
6418
+ return _convert_stub(pyboost_matmul(self, [input, mat2, self.transpose_a, self.transpose_b]))
6419
+
4841
6420
  class MatrixDeterminant(Primitive):
4842
6421
  r"""
4843
6422
  Calculates the value of the determinant for one or more square matrices.
@@ -4898,6 +6477,215 @@ class MatrixExp(Primitive):
4898
6477
  matrix_exp_op=MatrixExp()
4899
6478
 
4900
6479
 
6480
+ class Max(Primitive):
6481
+ r"""
6482
+ .. code-block::
6483
+
6484
+ prim = ops.Max()
6485
+ out = prim(input)
6486
+
6487
+ is equivalent to
6488
+
6489
+ .. code-block::
6490
+
6491
+ ops.max_(input)
6492
+
6493
+ Refer to :func:`mindspore.ops.max_` for more details.
6494
+ """
6495
+ @prim_arg_register
6496
+ def __init__(self):
6497
+ pass
6498
+
6499
+ def __call__(self, input):
6500
+ return _convert_stub(pyboost_max(self, [input]))
6501
+
6502
+ max_op=Max()
6503
+
6504
+
6505
+ class MaxPoolGradWithIndices(Primitive):
6506
+ r"""
6507
+ Gradients of the MaxPoolWithIndices operation.
6508
+ """
6509
+ @prim_arg_register
6510
+ def __init__(self, kernel_size, strides=None, pads=0, dilation=(1, 1), ceil_mode=False, argmax_type=mstype.int64):
6511
+ self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size)
6512
+ self._set_prim_arg_with_handler("strides", strides, to_strides)
6513
+ self._set_prim_arg_with_handler("pads", pads, to_output_padding)
6514
+ self._set_prim_arg_with_handler("dilation", dilation, to_dilations)
6515
+ self._set_prim_arg("ceil_mode", ceil_mode)
6516
+ self._set_prim_arg_with_handler("argmax_type", argmax_type, dtype_to_type_id)
6517
+
6518
+ def __call__(self, x, grad, argmax):
6519
+ return _convert_stub(pyboost_max_pool_grad_with_indices(self, [x, grad, argmax, self.kernel_size, self.strides, self.pads, self.dilation, self.ceil_mode, self.argmax_type]))
6520
+
6521
+ class MaxPoolGradWithMask(Primitive):
6522
+ r"""
6523
+ Gradients of the MaxPoolWithMask operation.
6524
+ """
6525
+ @prim_arg_register
6526
+ def __init__(self, kernel_size, strides=None, pads=0, dilation=(1, 1), ceil_mode=False, argmax_type=mstype.int64):
6527
+ self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size)
6528
+ self._set_prim_arg_with_handler("strides", strides, to_strides)
6529
+ self._set_prim_arg_with_handler("pads", pads, to_output_padding)
6530
+ self._set_prim_arg_with_handler("dilation", dilation, to_dilations)
6531
+ self._set_prim_arg("ceil_mode", ceil_mode)
6532
+ self._set_prim_arg_with_handler("argmax_type", argmax_type, dtype_to_type_id)
6533
+
6534
+ def __call__(self, x, grad, mask):
6535
+ return _convert_stub(pyboost_max_pool_grad_with_mask(self, [x, grad, mask, self.kernel_size, self.strides, self.pads, self.dilation, self.ceil_mode, self.argmax_type]))
6536
+
6537
+ class MaxPoolWithIndices(Primitive):
6538
+ r"""
6539
+ Performs max pooling on the input Tensor and returns both max values and indices.
6540
+
6541
+ Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool outputs
6542
+ regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
6543
+ :math:`(h_{ker}, w_{ker})` and stride :math:`(s_0, s_1)`, the operation is as follows:
6544
+
6545
+ .. math::
6546
+ \text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
6547
+ \text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
6548
+
6549
+ .. warning::
6550
+ This is an experimental API that is subject to change or deletion. Only support on Atlas training series.
6551
+
6552
+ Args:
6553
+ kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value and argmax
6554
+ value, is an int number that represents height and width of the kernel, or a tuple of
6555
+ two int numbers that represent height and width respectively.
6556
+ strides (Union[int, tuple[int]], optional): The distance of kernel moving, an int number that represents
6557
+ not only the height of movement but also the width of movement, or a tuple of two int numbers that
6558
+ represent height and width of movement respectively. Default: ``None`` , meaning that
6559
+ `strides = kernel_size`.
6560
+ pads (Union[int, tuple[int]], optional): An int number that represents the depth,
6561
+ height and width of movement are both strides, or a tuple of two int numbers that represent
6562
+ depth, height and width of movement respectively.
6563
+ Default: 0.
6564
+ dilation (Union[int, tuple[int]], optional): Control the stride of elements in the kernel. Default: ``(1, 1)`` .
6565
+ ceil_mode (bool, optional): Whether to use ceil instead of floor to calculate output shape. Default: ``False`` .
6566
+ argmax_type (mindspore.dtype, optional) : The dtype for argmax.
6567
+ Default: ``mstype.int64`` . [Disabled in Ascend.]
6568
+
6569
+ Inputs:
6570
+ - **x** (Tensor) - Tensor of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})` with data type of float32 in Ascend.
6571
+
6572
+ Outputs:
6573
+ Tuple of 2 Tensors, representing the maxpool result and where the max values are generated.
6574
+
6575
+ - **output** (Tensor) - Maxpooling result, with shape :math:`(N_{out}, C_{out}, H_{out}, W_{out})`.
6576
+ It has the same data type as `x`.
6577
+
6578
+ .. math::
6579
+ H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{pads[0]} - \text{dilation[0]}
6580
+ \times (\text{kernel_size[0]} - 1) - 1}{\text{strides[0]}} + 1\right\rfloor
6581
+
6582
+ .. math::
6583
+ W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{pads[1]} - \text{dilation[1]}
6584
+ \times (\text{kernel_size[1]} - 1) - 1}{\text{strides[1]}} + 1\right\rfloor
6585
+
6586
+ - **argmax** (Tensor) - Index corresponding to the maximum value. Data type is int32 in Ascend.
6587
+
6588
+ Raises:
6589
+ TypeError: If `x` is not a Tensor.
6590
+ ValueError: If length of shape of `x` is not equal to 4.
6591
+ TypeError: If `kernel_size` , `strides` , `pads` or `dilation` is not int or tuple.
6592
+ ValueError: If `kernel_size`, `strides` or `dilation` is less than 1.
6593
+ ValueError: If `pads` is less than 0.
6594
+ ValueError: If `pads` is more than half of `kernel_size`.
6595
+ TypeError: If `ceil_mode` is not bool.
6596
+
6597
+ Supported Platforms:
6598
+ ``Ascend``
6599
+ """
6600
+ @prim_arg_register
6601
+ def __init__(self, kernel_size, strides=None, pads=0, dilation=(1, 1), ceil_mode=False, argmax_type=mstype.int64):
6602
+ self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size)
6603
+ self._set_prim_arg_with_handler("strides", strides, to_strides)
6604
+ self._set_prim_arg_with_handler("pads", pads, to_output_padding)
6605
+ self._set_prim_arg_with_handler("dilation", dilation, to_dilations)
6606
+ self._set_prim_arg("ceil_mode", ceil_mode)
6607
+ self._set_prim_arg_with_handler("argmax_type", argmax_type, dtype_to_type_id)
6608
+
6609
+ def __call__(self, x):
6610
+ return _convert_stub(pyboost_max_pool_with_indices(self, [x, self.kernel_size, self.strides, self.pads, self.dilation, self.ceil_mode, self.argmax_type]))
6611
+
6612
+ class MaxPoolWithMask(Primitive):
6613
+ r"""
6614
+ Performs max pooling on the input Tensor and returns both max values and mask.
6615
+
6616
+ Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool outputs
6617
+ regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
6618
+ :math:`(h_{ker}, w_{ker})` and stride :math:`(s_0, s_1)`, the operation is as follows:
6619
+
6620
+ .. math::
6621
+ \text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
6622
+ \text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
6623
+
6624
+ .. warning::
6625
+ This is an experimental API that is subject to change or deletion. Only support on Atlas training series.
6626
+
6627
+ Args:
6628
+ kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value and argmax
6629
+ value, is an int number that represents height and width of the kernel, or a tuple of
6630
+ two int numbers that represent height and width respectively.
6631
+ strides (Union[int, tuple[int]], optional): The distance of kernel moving, an int number that represents
6632
+ not only the height of movement but also the width of movement, or a tuple of two int numbers that
6633
+ represent height and width of movement respectively. Default: ``1``.
6634
+ pads (Union[int, tuple[int]], optional): An int number that represents the depth,
6635
+ height and width of movement are both strides, or a tuple of two int numbers that represent
6636
+ depth, height and width of movement respectively.
6637
+ Default: 0.
6638
+ dilation (Union[int, tuple[int]], optional): Control the stride of elements in the kernel.
6639
+ Default: ``(1, 1)`` .
6640
+ ceil_mode (bool, optional): Whether to use ceil instead of floor to calculate output shape.
6641
+ Default: ``False`` .
6642
+ argmax_type (mindspore.dtype, optional) : The dtype for argmax.
6643
+ Default: ``mstype.int64`` . [Disabled in Ascend.]
6644
+
6645
+ Inputs:
6646
+ - **x** (Tensor) - Tensor of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})` with data type of float16
6647
+ and float32 in Ascend.
6648
+
6649
+ Outputs:
6650
+ Tuple of 2 Tensors, representing the maxpool result and mask are generated.
6651
+
6652
+ - **output** (Tensor) - Maxpooling result, with shape :math:`(N_{out}, C_{out}, H_{out}, W_{out})`.
6653
+ It has the same data type as `x`.
6654
+
6655
+ .. math::
6656
+ H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{pads[0]} - \text{dilation[0]}
6657
+ \times (\text{kernel_size[0]} - 1) - 1}{\text{strides[0]}} + 1\right\rfloor
6658
+
6659
+ .. math::
6660
+ W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{pads[1]} - \text{dilation[1]}
6661
+ \times (\text{kernel_size[1]} - 1) - 1}{\text{strides[1]}} + 1\right\rfloor
6662
+
6663
+ - **mask** (Tensor) - Maxpooling mask. Data type is int8 in Ascend.
6664
+
6665
+ Raises:
6666
+ TypeError: If `x` is not a Tensor.
6667
+ ValueError: If length of shape of `x` is not equal to 4.
6668
+ TypeError: If `kernel_size` , `strides` , `pads` or `dilation` is not int or tuple.
6669
+ ValueError: If `kernel_size`, `strides` or `dilation` is less than 1.
6670
+ ValueError: If `pads` is less than 0.
6671
+ ValueError: If `pads` is more than half of `kernel_size`.
6672
+ TypeError: If `ceil_mode` is not bool.
6673
+
6674
+ Supported Platforms:
6675
+ ``Ascend``
6676
+ """
6677
+ @prim_arg_register
6678
+ def __init__(self, kernel_size, strides=None, pads=0, dilation=(1, 1), ceil_mode=False, argmax_type=mstype.int64):
6679
+ self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size)
6680
+ self._set_prim_arg_with_handler("strides", strides, to_strides)
6681
+ self._set_prim_arg_with_handler("pads", pads, to_output_padding)
6682
+ self._set_prim_arg_with_handler("dilation", dilation, to_dilations)
6683
+ self._set_prim_arg("ceil_mode", ceil_mode)
6684
+ self._set_prim_arg_with_handler("argmax_type", argmax_type, dtype_to_type_id)
6685
+
6686
+ def __call__(self, x):
6687
+ return _convert_stub(pyboost_max_pool_with_mask(self, [x, self.kernel_size, self.strides, self.pads, self.dilation, self.ceil_mode, self.argmax_type]))
6688
+
4901
6689
  class MaximumGradGrad(Primitive):
4902
6690
  r"""
4903
6691
  Grad for maximum grad.
@@ -4946,12 +6734,68 @@ class Maximum(Primitive):
4946
6734
  pass
4947
6735
 
4948
6736
  def __call__(self, input, other):
4949
- return super().__call__(input, other)
4950
-
6737
+ return _convert_stub(pyboost_maximum(self, [input, other]))
4951
6738
 
4952
6739
  maximum_op=Maximum()
4953
6740
 
4954
6741
 
6742
+ class MeanExt(Primitive):
6743
+ r"""
6744
+ .. code-block::
6745
+
6746
+ prim = ops.MeanExt()
6747
+ out = prim(input, axis, keep_dims, dtype)
6748
+
6749
+ is equivalent to
6750
+
6751
+ .. code-block::
6752
+
6753
+ ops.mean_ext(input, axis, keep_dims, dtype)
6754
+
6755
+ Refer to :func:`mindspore.ops.mean_ext` for more details.
6756
+ """
6757
+ __mindspore_signature__ = (
6758
+ sig.make_sig('input'),
6759
+ sig.make_sig('axis', default=None),
6760
+ sig.make_sig('keep_dims', default=False),
6761
+ sig.make_sig('dtype', default=None),
6762
+ )
6763
+
6764
+ @prim_arg_register
6765
+ def __init__(self):
6766
+ pass
6767
+
6768
+ def __call__(self, input, axis=None, keep_dims=False, dtype=None):
6769
+ return _convert_stub(pyboost_mean_ext(self, [input, axis, keep_dims, dtype if dtype is None else dtype_to_type_id('MeanExt', 'dtype', dtype)]))
6770
+
6771
+ mean_ext_op=MeanExt()
6772
+
6773
+
6774
+ class Min(Primitive):
6775
+ r"""
6776
+ .. code-block::
6777
+
6778
+ prim = ops.Min()
6779
+ out = prim(input)
6780
+
6781
+ is equivalent to
6782
+
6783
+ .. code-block::
6784
+
6785
+ ops.min_(input)
6786
+
6787
+ Refer to :func:`mindspore.ops.min_` for more details.
6788
+ """
6789
+ @prim_arg_register
6790
+ def __init__(self):
6791
+ pass
6792
+
6793
+ def __call__(self, input):
6794
+ return _convert_stub(pyboost_min(self, [input]))
6795
+
6796
+ min_op=Min()
6797
+
6798
+
4955
6799
  class MinimumGrad(Primitive):
4956
6800
  r"""
4957
6801
  Grad for minimum.
@@ -4987,42 +6831,102 @@ class Minimum(Primitive):
4987
6831
  pass
4988
6832
 
4989
6833
  def __call__(self, input, other):
4990
- return super().__call__(input, other)
4991
-
6834
+ return _convert_stub(pyboost_minimum(self, [input, other]))
4992
6835
 
4993
6836
  minimum_op=Minimum()
4994
6837
 
4995
6838
 
4996
- class Mul(Primitive):
6839
+ class MoeFinalizeRouting(Primitive):
4997
6840
  r"""
4998
6841
  .. code-block::
4999
6842
 
5000
- prim = ops.Mul()
5001
- out = prim(input, other)
6843
+ prim = ops.MoeFinalizeRouting()
6844
+ out = prim(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
5002
6845
 
5003
6846
  is equivalent to
5004
6847
 
5005
6848
  .. code-block::
5006
6849
 
5007
- ops.mul(input, other)
6850
+ ops.moe_finalize_routing(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
5008
6851
 
5009
- Refer to :func:`mindspore.ops.mul` for more details.
6852
+ Refer to :func:`mindspore.ops.moe_finalize_routing` for more details.
5010
6853
  """
5011
6854
  __mindspore_signature__ = (
5012
- sig.make_sig('input', sig.sig_rw.RW_READ, dtype=sig.sig_dtype.T),
5013
- sig.make_sig('other', sig.sig_rw.RW_READ, dtype=sig.sig_dtype.T),
6855
+ sig.make_sig('expanded_x'),
6856
+ sig.make_sig('x1'),
6857
+ sig.make_sig('x2', default=None),
6858
+ sig.make_sig('bias', default=None),
6859
+ sig.make_sig('scales', default=None),
6860
+ sig.make_sig('expanded_row_idx', default=None),
6861
+ sig.make_sig('expanded_expert_idx', default=None),
5014
6862
  )
5015
6863
 
5016
6864
  @prim_arg_register
5017
6865
  def __init__(self):
5018
6866
  pass
5019
6867
 
5020
- def __call__(self, input, other):
6868
+ def __call__(self, expanded_x, x1, x2=None, bias=None, scales=None, expanded_row_idx=None, expanded_expert_idx=None):
6869
+ return super().__call__(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
6870
+
6871
+
6872
+ moe_finalize_routing_op=MoeFinalizeRouting()
6873
+
6874
+
6875
+ class Mul(Primitive):
6876
+ r"""
6877
+ .. code-block::
6878
+
6879
+ prim = ops.Mul()
6880
+ out = prim(input, other)
6881
+
6882
+ is equivalent to
6883
+
6884
+ .. code-block::
6885
+
6886
+ ops.mul(input, other)
6887
+
6888
+ Refer to :func:`mindspore.ops.mul` for more details.
6889
+ """
6890
+ __mindspore_signature__ = (
6891
+ sig.make_sig('input', sig.sig_rw.RW_READ, dtype=sig.sig_dtype.T),
6892
+ sig.make_sig('other', sig.sig_rw.RW_READ, dtype=sig.sig_dtype.T),
6893
+ )
6894
+
6895
+ @prim_arg_register
6896
+ def __init__(self):
6897
+ pass
6898
+
6899
+ def __call__(self, input, other):
5021
6900
  return _convert_stub(pyboost_mul(self, [input, other]))
5022
6901
 
5023
6902
  mul_op=Mul()
5024
6903
 
5025
6904
 
6905
+ class Mv(Primitive):
6906
+ r"""
6907
+ .. code-block::
6908
+
6909
+ prim = ops.Mv()
6910
+ out = prim(input, vec)
6911
+
6912
+ is equivalent to
6913
+
6914
+ .. code-block::
6915
+
6916
+ ops.mv(input, vec)
6917
+
6918
+ Refer to :func:`mindspore.ops.mv` for more details.
6919
+ """
6920
+ @prim_arg_register
6921
+ def __init__(self):
6922
+ pass
6923
+
6924
+ def __call__(self, input, vec):
6925
+ return _convert_stub(pyboost_mv(self, [input, vec]))
6926
+
6927
+ mv_op=Mv()
6928
+
6929
+
5026
6930
  class NanToNum(Primitive):
5027
6931
  r"""
5028
6932
  Replaces `NaN`, positive infinity and negative infinity values in the input Tensor with the values
@@ -5249,6 +7153,56 @@ class NonZero(Primitive):
5249
7153
  non_zero_op=NonZero()
5250
7154
 
5251
7155
 
7156
+ class Norm(Primitive):
7157
+ r"""
7158
+
7159
+ """
7160
+ __mindspore_signature__ = (
7161
+ sig.make_sig('input_x'),
7162
+ sig.make_sig('ord', default=None),
7163
+ sig.make_sig('dim', default=None),
7164
+ sig.make_sig('keepdim', default=False),
7165
+ sig.make_sig('dtype', default=None),
7166
+ )
7167
+
7168
+ @prim_arg_register
7169
+ def __init__(self):
7170
+ pass
7171
+
7172
+ def __call__(self, input_x, ord=None, dim=None, keepdim=False, dtype=None):
7173
+ return _convert_stub(pyboost_norm(self, [input_x, ord, dim, keepdim, dtype if dtype is None else dtype_to_type_id('Norm', 'dtype', dtype)]))
7174
+
7175
+ norm_op=Norm()
7176
+
7177
+
7178
+ class NormalExt(Primitive):
7179
+ r"""
7180
+ Generates random numbers according to the Normal random number distribution.
7181
+
7182
+ Inputs:
7183
+ - **mean** (Union[float, Tensor]) - The mean is a tensor with the mean of each output element's normal distribution.
7184
+ - **std** (Union[float, Tensor]) - The tensor of per-element standard deviations.
7185
+ - **generator** (Generator, optional) - Mindspore generator.
7186
+
7187
+ Outputs:
7188
+ - **output** (Tensor) - With the same type and shape as the 'mean'.
7189
+
7190
+ Raises:
7191
+ TypeError: If `mean` or `std` is not Union[float, Tensor].
7192
+
7193
+ Supported Platforms:
7194
+ ``Ascend``
7195
+ """
7196
+ @prim_arg_register
7197
+ def __init__(self):
7198
+ pass
7199
+
7200
+ def __call__(self, mean, std, seed, offset):
7201
+ return _convert_stub(pyboost_normal_ext(self, [mean, std, seed, offset]))
7202
+
7203
+ normal_ext_op=NormalExt()
7204
+
7205
+
5252
7206
  class NotEqual(Primitive):
5253
7207
  r"""
5254
7208
  .. code-block::
@@ -5444,6 +7398,49 @@ class NPUGetFloatStatusV2(Primitive):
5444
7398
  npu_get_float_status_v2_op=NPUGetFloatStatusV2()
5445
7399
 
5446
7400
 
7401
+ class OneHotExt(Primitive):
7402
+ r"""
7403
+ Computes a one-hot tensor.
7404
+
7405
+ The locations represented by tensor in `tensor` take value `1`, while all
7406
+ other locations take value `0`.
7407
+
7408
+ Args:
7409
+ - **tensor** (Tensor) - A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
7410
+ Data type must be int32 or int64.
7411
+ - **num_classes** (int) - A scalar defining the depth of the one-hot dimension.
7412
+
7413
+ Returns:
7414
+ Tensor, one-hot tensor.
7415
+
7416
+ Raises:
7417
+ TypeError: If `num_classes` is not an int.
7418
+ TypeError: If dtype of `tensor` is not int32 or int64.
7419
+ ValueError: If `num_classes` is less than 0.
7420
+
7421
+ Supported Platforms:
7422
+ ``Ascend`` ``GPU`` ``CPU``
7423
+
7424
+ Examples:
7425
+ >>> import mindspore
7426
+ >>> import numpy as np
7427
+ >>> import mindspore.ops as ops
7428
+ >>> from mindspore import Tensor
7429
+ >>> tensor = Tensor(np.array([0, 1, 2]), mindspore.int32)
7430
+ >>> num_classes = 3
7431
+ >>> output = ops.extend.one_hot(tensor, num_classes)
7432
+ >>> print(output)
7433
+ [[1. 0. 0.]
7434
+ [0. 1. 0.]
7435
+ [0. 0. 1.]]
7436
+ """
7437
+ @prim_arg_register
7438
+ def __init__(self, axis=-1):
7439
+ self._set_prim_arg("axis", axis)
7440
+
7441
+ def __call__(self, tensor, num_classes, on_value, off_value):
7442
+ return _convert_stub(pyboost_one_hot_ext(self, [tensor, num_classes, on_value, off_value, self.axis]))
7443
+
5447
7444
  class OneHot(Primitive):
5448
7445
  r"""
5449
7446
  Computes a one-hot tensor.
@@ -5504,6 +7501,36 @@ class OneHot(Primitive):
5504
7501
  return super().__call__(indices, depth, on_value, off_value, self.axis)
5505
7502
 
5506
7503
 
7504
+ class OnesLikeExt(Primitive):
7505
+ r"""
7506
+ Returns a Tensor with a value of 1 and its shape and data type is the same as the input.
7507
+
7508
+ Refer to :func:`mindspore.ops.ones_like` for more details.
7509
+
7510
+ Args:
7511
+ - **input_x** (Tensor) - Tensor of any dimension.
7512
+
7513
+ Returns:
7514
+ Tensor, has the same shape and type as `input_x` but filled with ones.
7515
+
7516
+ Supported Platforms:
7517
+ ``Ascend``
7518
+ """
7519
+ __mindspore_signature__ = (
7520
+ sig.make_sig('input'),
7521
+ sig.make_sig('dtype', default=None),
7522
+ )
7523
+
7524
+ @prim_arg_register
7525
+ def __init__(self):
7526
+ pass
7527
+
7528
+ def __call__(self, input, dtype=None):
7529
+ return _convert_stub(pyboost_ones_like_ext(self, [input, dtype if dtype is None else dtype_to_type_id('OnesLikeExt', 'dtype', dtype)]))
7530
+
7531
+ ones_like_ext_op=OnesLikeExt()
7532
+
7533
+
5507
7534
  class OnesLike(Primitive):
5508
7535
  r"""
5509
7536
  Returns a Tensor with a value of 1 and its shape and data type is the same as the input.
@@ -5611,8 +7638,7 @@ class Pow(Primitive):
5611
7638
  pass
5612
7639
 
5613
7640
  def __call__(self, input, exponent):
5614
- return super().__call__(input, exponent)
5615
-
7641
+ return _convert_stub(pyboost_pow(self, [input, exponent]))
5616
7642
 
5617
7643
  pow_op=Pow()
5618
7644
 
@@ -5673,6 +7699,98 @@ class PReLU(Primitive):
5673
7699
  prelu_op=PReLU()
5674
7700
 
5675
7701
 
7702
+ class ProdExt(Primitive):
7703
+ r"""
7704
+ Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can
7705
+ reduce a dimension of `input` along the `axis`. Determine whether the dimensions of the output and input are the
7706
+ same by controlling `keep_dims`.
7707
+
7708
+ Args:
7709
+ input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
7710
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
7711
+ axis (int): The dimensions to reduce. Default: ``None`` , reduce all dimensions.
7712
+ Only constant value is allowed. Assume the rank of `input` is r, and the value range is [-r,r).
7713
+ keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
7714
+ If ``False`` , don't keep these dimensions. Default: ``False`` .
7715
+ dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
7716
+
7717
+ Returns:
7718
+ Tensor, has the same data type as input tensor.
7719
+
7720
+ - If `axis` is ``None`` , and `keep_dims` is ``False`` ,
7721
+ the output is a 0-D tensor representing the product of all elements in the input tensor.
7722
+ - If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
7723
+ the shape of output is :math:`(input_0, input_2, ..., input_R)`.
7724
+
7725
+ Raises:
7726
+ TypeError: If `input` is not a Tensor.
7727
+ TypeError: If `axis` is not one of the following: int or None.
7728
+ TypeError: If `keep_dims` is not a bool.
7729
+ ValueError: If `axis` is out of range.
7730
+
7731
+ Supported Platforms:
7732
+ ``Ascend`` ``GPU`` ``CPU``
7733
+
7734
+ Examples:
7735
+ >>> import mindspore
7736
+ >>> import numpy as np
7737
+ >>> from mindspore import Tensor, ops
7738
+ >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
7739
+ >>> output = ops.ProdExt()(x, 1, keep_dims=True)
7740
+ >>> result = output.shape
7741
+ >>> print(result)
7742
+ (3, 1, 5, 6)
7743
+ >>> # case 1: Reduces a dimension by multiplying all elements in the dimension.
7744
+ >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
7745
+ ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
7746
+ ... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
7747
+ >>> output = ops.ProdExt()(x)
7748
+ >>> print(output)
7749
+ 2.2833798e+33
7750
+ >>> print(output.shape)
7751
+ ()
7752
+ >>> # case 2: Reduces a dimension along axis 0.
7753
+ >>> output = ops.ProdExt()(x, 0, True)
7754
+ >>> print(output)
7755
+ [[[ 28. 28. 28. 28. 28. 28.]
7756
+ [ 80. 80. 80. 80. 80. 80.]
7757
+ [162. 162. 162. 162. 162. 162.]]]
7758
+ >>> # case 3: Reduces a dimension along axis 1.
7759
+ >>> output = ops.ProdExt()(x, 1, True)
7760
+ >>> print(output)
7761
+ [[[ 6. 6. 6. 6. 6. 6.]]
7762
+ [[120. 120. 120. 120. 120. 120.]]
7763
+ [[504. 504. 504. 504. 504. 504.]]]
7764
+ >>> # case 4: Reduces a dimension along axis 2.
7765
+ >>> output = ops.ProdExt()(x, 2, True)
7766
+ >>> print(output)
7767
+ [[[1.00000e+00]
7768
+ [6.40000e+01]
7769
+ [7.29000e+02]]
7770
+ [[4.09600e+03]
7771
+ [1.56250e+04]
7772
+ [4.66560e+04]]
7773
+ [[1.17649e+05]
7774
+ [2.62144e+05]
7775
+ [5.31441e+05]]]
7776
+ """
7777
+ __mindspore_signature__ = (
7778
+ sig.make_sig('input'),
7779
+ sig.make_sig('axis', default=None),
7780
+ sig.make_sig('keep_dims', default=False),
7781
+ sig.make_sig('dtype', default=None),
7782
+ )
7783
+
7784
+ @prim_arg_register
7785
+ def __init__(self):
7786
+ pass
7787
+
7788
+ def __call__(self, input, axis=None, keep_dims=False, dtype=None):
7789
+ return _convert_stub(pyboost_prod_ext(self, [input, axis, keep_dims, dtype if dtype is None else dtype_to_type_id('ProdExt', 'dtype', dtype)]))
7790
+
7791
+ prod_ext_op=ProdExt()
7792
+
7793
+
5676
7794
  class PromptKVCache(Primitive):
5677
7795
  r"""
5678
7796
  .. code-block::
@@ -5780,8 +7898,7 @@ class QuantBatchMatmul(Primitive):
5780
7898
  self._set_prim_arg_with_handler("dtype", dtype, dtype_to_type_id)
5781
7899
 
5782
7900
  def __call__(self, x1, x2, scale, offset=None, bias=None):
5783
- return super().__call__(x1, x2, scale, offset, bias, self.transpose_x1, self.transpose_x2, self.dtype)
5784
-
7901
+ return _convert_stub(pyboost_quant_batch_matmul(self, [x1, x2, scale, offset, bias, self.transpose_x1, self.transpose_x2, self.dtype]))
5785
7902
 
5786
7903
  class RandpermV2(Primitive):
5787
7904
  r"""
@@ -5957,7 +8074,34 @@ reciprocal_op=Reciprocal()
5957
8074
 
5958
8075
  class ReduceAll(Primitive):
5959
8076
  r"""
5960
- Reduces a dimension of a tensor by the "logicalAND" of all elements in the dimension, by default. And also can
8077
+ .. code-block::
8078
+
8079
+ prim = ops.ReduceAll(keep_dims)
8080
+ out = prim(input, axis)
8081
+
8082
+ is equivalent to
8083
+
8084
+ .. code-block::
8085
+
8086
+ ops.all(input, axis, keep_dims)
8087
+
8088
+ Refer to :func:`mindspore.ops.all` for more details.
8089
+ """
8090
+ __mindspore_signature__ = (
8091
+ sig.make_sig('input'),
8092
+ sig.make_sig('axis', default=None),
8093
+ )
8094
+
8095
+ @prim_arg_register
8096
+ def __init__(self, keep_dims=False):
8097
+ self._set_prim_arg("keep_dims", keep_dims)
8098
+
8099
+ def __call__(self, input, axis=None):
8100
+ return _convert_stub(pyboost_reduce_all(self, [input, axis, self.keep_dims]))
8101
+
8102
+ class ReduceAny(Primitive):
8103
+ r"""
8104
+ Reduces a dimension of a tensor by the "logical OR" of all elements in the dimension, by default. And also can
5961
8105
  reduce a dimension of `x` along the `axis`. Determine whether the dimensions of the output and input are the
5962
8106
  same by controlling `keep_dims`.
5963
8107
 
@@ -5977,7 +8121,7 @@ class ReduceAll(Primitive):
5977
8121
  Tensor, the dtype is bool.
5978
8122
 
5979
8123
  - If `axis` is ``()`` , and `keep_dims` is ``False`` ,
5980
- the output is a 0-D tensor representing the "logical and" of all elements in the input tensor.
8124
+ the output is a 0-D tensor representing the "logical or" of all elements in the input tensor.
5981
8125
  - If `axis` is int, set as 2, and `keep_dims` is ``False`` ,
5982
8126
  the shape of output is :math:`(x_1, x_3, ..., x_R)`.
5983
8127
  - If `axis` is tuple(int), set as (2, 3), and `keep_dims` is ``False`` ,
@@ -5997,25 +8141,25 @@ class ReduceAll(Primitive):
5997
8141
  >>> import numpy as np
5998
8142
  >>> from mindspore import Tensor, ops
5999
8143
  >>> x = Tensor(np.array([[True, False], [True, True]]))
6000
- >>> op = ops.ReduceAll(keep_dims=True)
6001
- >>> # case 1: Reduces a dimension by the "logicalAND" of all elements in the dimension.
8144
+ >>> op = ops.ReduceAny(keep_dims=True)
8145
+ >>> # case 1: Reduces a dimension by the "logical OR" of all elements in the dimension.
6002
8146
  >>> output = op(x)
6003
8147
  >>> print(output)
6004
- [[False]]
8148
+ [[ True]]
6005
8149
  >>> print(output.shape)
6006
8150
  (1, 1)
6007
8151
  >>> # case 2: Reduces a dimension along axis 0.
6008
8152
  >>> output = op(x, 0)
6009
8153
  >>> print(output)
6010
- [[ True False]]
8154
+ [[ True True]]
6011
8155
  >>> # case 3: Reduces a dimension along axis 1.
6012
8156
  >>> output = op(x, 1)
6013
8157
  >>> print(output)
6014
- [[False]
8158
+ [[True]
6015
8159
  [ True]]
6016
8160
  >>> # case 4: input is a scalar.
6017
8161
  >>> x = Tensor(True)
6018
- >>> op = ops.ReduceAll()
8162
+ >>> op = ops.ReduceAny()
6019
8163
  >>> output = op(x)
6020
8164
  >>> print(output)
6021
8165
  True
@@ -6030,12 +8174,11 @@ class ReduceAll(Primitive):
6030
8174
  self._set_prim_arg("keep_dims", keep_dims)
6031
8175
 
6032
8176
  def __call__(self, x, axis=()):
6033
- return super().__call__(x, axis, self.keep_dims)
6034
-
8177
+ return _convert_stub(pyboost_reduce_any(self, [x, axis, self.keep_dims]))
6035
8178
 
6036
- class ReduceAny(Primitive):
8179
+ class ReduceMax(Primitive):
6037
8180
  r"""
6038
- Reduces a dimension of a tensor by the "logical OR" of all elements in the dimension, by default. And also can
8181
+ Reduces a dimension of a tensor by the maximum value in this dimension, by default. And also can
6039
8182
  reduce a dimension of `x` along the `axis`. Determine whether the dimensions of the output and input are the
6040
8183
  same by controlling `keep_dims`.
6041
8184
 
@@ -6047,89 +8190,12 @@ class ReduceAny(Primitive):
6047
8190
  If ``False`` , don't keep these dimensions. Default: ``False`` .
6048
8191
 
6049
8192
  Inputs:
6050
- - **x** (Tensor[bool]) - The input tensor. The dtype of the tensor to be reduced is bool.
6051
- - **axis** (Union[int, tuple(int), list(int), Tensor]) - The dimensions to reduce. Default: ``()`` ,
6052
- reduce all dimensions. Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
8193
+ - **x** (Tensor[Number]) - The input tensor.
8194
+ - **axis** (Union[int, tuple(int), list(int), tensor]) - The dimensions to reduce. Default: ``()`` ,
8195
+ reduce all dimensions. Must be in the range [-r, r).
6053
8196
 
6054
8197
  Outputs:
6055
- Tensor, the dtype is bool.
6056
-
6057
- - If `axis` is ``()`` , and `keep_dims` is ``False`` ,
6058
- the output is a 0-D tensor representing the "logical or" of all elements in the input tensor.
6059
- - If `axis` is int, set as 2, and `keep_dims` is ``False`` ,
6060
- the shape of output is :math:`(x_1, x_3, ..., x_R)`.
6061
- - If `axis` is tuple(int), set as (2, 3), and `keep_dims` is ``False`` ,
6062
- the shape of output is :math:`(x_1, x_4, ..., x_R)`.
6063
- - If `axis` is 1-D Tensor, set as [2, 3], and `keep_dims` is ``False`` ,
6064
- the shape of output is :math:`(x_1, x_4, ..., x_R)`.
6065
-
6066
- Raises:
6067
- TypeError: If `keep_dims` is not a bool.
6068
- TypeError: If `x` is not a Tensor.
6069
- TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
6070
-
6071
- Supported Platforms:
6072
- ``Ascend`` ``GPU`` ``CPU``
6073
-
6074
- Examples:
6075
- >>> import numpy as np
6076
- >>> from mindspore import Tensor, ops
6077
- >>> x = Tensor(np.array([[True, False], [True, True]]))
6078
- >>> op = ops.ReduceAny(keep_dims=True)
6079
- >>> # case 1: Reduces a dimension by the "logical OR" of all elements in the dimension.
6080
- >>> output = op(x)
6081
- >>> print(output)
6082
- [[ True]]
6083
- >>> print(output.shape)
6084
- (1, 1)
6085
- >>> # case 2: Reduces a dimension along axis 0.
6086
- >>> output = op(x, 0)
6087
- >>> print(output)
6088
- [[ True True]]
6089
- >>> # case 3: Reduces a dimension along axis 1.
6090
- >>> output = op(x, 1)
6091
- >>> print(output)
6092
- [[True]
6093
- [ True]]
6094
- >>> # case 4: input is a scalar.
6095
- >>> x = Tensor(True)
6096
- >>> op = ops.ReduceAny()
6097
- >>> output = op(x)
6098
- >>> print(output)
6099
- True
6100
- """
6101
- __mindspore_signature__ = (
6102
- sig.make_sig('x'),
6103
- sig.make_sig('axis', default=()),
6104
- )
6105
-
6106
- @prim_arg_register
6107
- def __init__(self, keep_dims=False):
6108
- self._set_prim_arg("keep_dims", keep_dims)
6109
-
6110
- def __call__(self, x, axis=()):
6111
- return _convert_stub(pyboost_reduce_any(self, [x, axis, self.keep_dims]))
6112
-
6113
- class ReduceMax(Primitive):
6114
- r"""
6115
- Reduces a dimension of a tensor by the maximum value in this dimension, by default. And also can
6116
- reduce a dimension of `x` along the `axis`. Determine whether the dimensions of the output and input are the
6117
- same by controlling `keep_dims`.
6118
-
6119
- Note:
6120
- The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
6121
-
6122
- Args:
6123
- keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
6124
- If ``False`` , don't keep these dimensions. Default: ``False`` .
6125
-
6126
- Inputs:
6127
- - **x** (Tensor[Number]) - The input tensor.
6128
- - **axis** (Union[int, tuple(int), list(int), tensor]) - The dimensions to reduce. Default: ``()`` ,
6129
- reduce all dimensions. Must be in the range [-r, r).
6130
-
6131
- Outputs:
6132
- output(Tensor): has the same dtype as the `x`.
8198
+ output(Tensor): has the same dtype as the `x`.
6133
8199
 
6134
8200
  - If `axis` is ``()`` , and `keep_dims` is ``False`` ,
6135
8201
  the output is a 0-D tensor representing the maximum of all elements in the input tensor.
@@ -6656,6 +8722,90 @@ class ReduceSum(Primitive):
6656
8722
  return super().__call__(x, axis, self.keep_dims, self.skip_mode)
6657
8723
 
6658
8724
 
8725
+ class ReflectionPad1DGrad(Primitive):
8726
+ r"""
8727
+
8728
+ """
8729
+ @prim_arg_register
8730
+ def __init__(self):
8731
+ pass
8732
+
8733
+ def __call__(self, grad_output, input, padding):
8734
+ return _convert_stub(pyboost_reflection_pad_1d_grad(self, [grad_output, input, padding]))
8735
+
8736
+ reflection_pad_1d_grad_op=ReflectionPad1DGrad()
8737
+
8738
+
8739
+ class ReflectionPad1D(Primitive):
8740
+ r"""
8741
+
8742
+ """
8743
+ @prim_arg_register
8744
+ def __init__(self):
8745
+ pass
8746
+
8747
+ def __call__(self, input, padding):
8748
+ return _convert_stub(pyboost_reflection_pad_1d(self, [input, padding]))
8749
+
8750
+ reflection_pad_1d_op=ReflectionPad1D()
8751
+
8752
+
8753
+ class ReflectionPad2DGrad(Primitive):
8754
+ r"""
8755
+
8756
+ """
8757
+ @prim_arg_register
8758
+ def __init__(self):
8759
+ pass
8760
+
8761
+ def __call__(self, grad_output, input, padding):
8762
+ return _convert_stub(pyboost_reflection_pad_2d_grad(self, [grad_output, input, padding]))
8763
+
8764
+ reflection_pad_2d_grad_op=ReflectionPad2DGrad()
8765
+
8766
+
8767
+ class ReflectionPad2D(Primitive):
8768
+ r"""
8769
+
8770
+ """
8771
+ @prim_arg_register
8772
+ def __init__(self):
8773
+ pass
8774
+
8775
+ def __call__(self, input, padding):
8776
+ return _convert_stub(pyboost_reflection_pad_2d(self, [input, padding]))
8777
+
8778
+ reflection_pad_2d_op=ReflectionPad2D()
8779
+
8780
+
8781
+ class ReflectionPad3DGrad(Primitive):
8782
+ r"""
8783
+
8784
+ """
8785
+ @prim_arg_register
8786
+ def __init__(self):
8787
+ pass
8788
+
8789
+ def __call__(self, grad_output, input, padding):
8790
+ return _convert_stub(pyboost_reflection_pad_3d_grad(self, [grad_output, input, padding]))
8791
+
8792
+ reflection_pad_3d_grad_op=ReflectionPad3DGrad()
8793
+
8794
+
8795
+ class ReflectionPad3D(Primitive):
8796
+ r"""
8797
+
8798
+ """
8799
+ @prim_arg_register
8800
+ def __init__(self):
8801
+ pass
8802
+
8803
+ def __call__(self, input, padding):
8804
+ return _convert_stub(pyboost_reflection_pad_3d(self, [input, padding]))
8805
+
8806
+ reflection_pad_3d_op=ReflectionPad3D()
8807
+
8808
+
6659
8809
  class ReLU6Grad(Primitive):
6660
8810
  r"""
6661
8811
  Computes gradient for the ReLU6 activation.
@@ -6750,6 +8900,122 @@ class ReLU(Primitive):
6750
8900
  relu_op=ReLU()
6751
8901
 
6752
8902
 
8903
+ class RepeatInterleave(Primitive):
8904
+ r"""
8905
+ .. code-block::
8906
+
8907
+ prim = ops.RepeatInterleave()
8908
+ out = prim(input, repeats, axis, output_size)
8909
+
8910
+ is equivalent to
8911
+
8912
+ .. code-block::
8913
+
8914
+ ops.repeat_interleave(input, repeats, axis, output_size)
8915
+
8916
+ Refer to :func:`mindspore.ops.repeat_interleave` for more details.
8917
+ """
8918
+ __mindspore_signature__ = (
8919
+ sig.make_sig('input'),
8920
+ sig.make_sig('repeats'),
8921
+ sig.make_sig('axis', default=None),
8922
+ sig.make_sig('output_size', default=None),
8923
+ )
8924
+
8925
+ @prim_arg_register
8926
+ def __init__(self):
8927
+ pass
8928
+
8929
+ def __call__(self, input, repeats, axis=None, output_size=None):
8930
+ return _convert_stub(pyboost_repeat_interleave(self, [input, repeats, axis, output_size]))
8931
+
8932
+ repeat_interleave_op=RepeatInterleave()
8933
+
8934
+
8935
+ class ReplicationPad1DGrad(Primitive):
8936
+ r"""
8937
+
8938
+ """
8939
+ @prim_arg_register
8940
+ def __init__(self):
8941
+ pass
8942
+
8943
+ def __call__(self, grad_output, input, padding):
8944
+ return _convert_stub(pyboost_replication_pad_1d_grad(self, [grad_output, input, padding]))
8945
+
8946
+ replication_pad_1d_grad_op=ReplicationPad1DGrad()
8947
+
8948
+
8949
+ class ReplicationPad1D(Primitive):
8950
+ r"""
8951
+
8952
+ """
8953
+ @prim_arg_register
8954
+ def __init__(self):
8955
+ pass
8956
+
8957
+ def __call__(self, input, padding):
8958
+ return _convert_stub(pyboost_replication_pad_1d(self, [input, padding]))
8959
+
8960
+ replication_pad_1d_op=ReplicationPad1D()
8961
+
8962
+
8963
+ class ReplicationPad2DGrad(Primitive):
8964
+ r"""
8965
+
8966
+ """
8967
+ @prim_arg_register
8968
+ def __init__(self):
8969
+ pass
8970
+
8971
+ def __call__(self, grad_output, input, padding):
8972
+ return _convert_stub(pyboost_replication_pad_2d_grad(self, [grad_output, input, padding]))
8973
+
8974
+ replication_pad_2d_grad_op=ReplicationPad2DGrad()
8975
+
8976
+
8977
+ class ReplicationPad2D(Primitive):
8978
+ r"""
8979
+
8980
+ """
8981
+ @prim_arg_register
8982
+ def __init__(self):
8983
+ pass
8984
+
8985
+ def __call__(self, input, padding):
8986
+ return _convert_stub(pyboost_replication_pad_2d(self, [input, padding]))
8987
+
8988
+ replication_pad_2d_op=ReplicationPad2D()
8989
+
8990
+
8991
+ class ReplicationPad3DGrad(Primitive):
8992
+ r"""
8993
+
8994
+ """
8995
+ @prim_arg_register
8996
+ def __init__(self):
8997
+ pass
8998
+
8999
+ def __call__(self, grad_output, input, padding):
9000
+ return _convert_stub(pyboost_replication_pad_3d_grad(self, [grad_output, input, padding]))
9001
+
9002
+ replication_pad_3d_grad_op=ReplicationPad3DGrad()
9003
+
9004
+
9005
+ class ReplicationPad3D(Primitive):
9006
+ r"""
9007
+
9008
+ """
9009
+ @prim_arg_register
9010
+ def __init__(self):
9011
+ pass
9012
+
9013
+ def __call__(self, input, padding):
9014
+ return _convert_stub(pyboost_replication_pad_3d(self, [input, padding]))
9015
+
9016
+ replication_pad_3d_op=ReplicationPad3D()
9017
+
9018
+
6753
9019
  class ReshapeAndCache(Primitive):
6754
9020
  r"""
6755
9021
  .. code-block::
@@ -7218,23 +9484,22 @@ class ReverseV2(Primitive):
7218
9484
  .. code-block::
7219
9485
 
7220
9486
  prim = ops.ReverseV2(axis)
7221
- out = prim(x)
9487
+ out = prim(input)
7222
9488
 
7223
9489
  is equivalent to
7224
9490
 
7225
9491
  .. code-block::
7226
9492
 
7227
- ops.reverse(x, axis)
9493
+ ops.flip(input, axis)
7228
9494
 
7229
- Refer to :func:`mindspore.ops.reverse` for more details.
9495
+ Refer to :func:`mindspore.ops.flip` for more details.
7230
9496
  """
7231
9497
  @prim_arg_register
7232
9498
  def __init__(self, axis):
7233
9499
  self._set_prim_arg("axis", type_it('ReverseV2', 'axis', axis, OpDtype.DT_LIST_INT, OpDtype.DT_TUPLE_INT))
7234
9500
 
7235
- def __call__(self, x):
7236
- return super().__call__(x, self.axis)
7237
-
9501
+ def __call__(self, input):
9502
+ return _convert_stub(pyboost_reverse_v2(self, [input, self.axis]))
7238
9503
 
7239
9504
  class RFFTGrad(Primitive):
7240
9505
  r"""
@@ -7453,8 +9718,7 @@ class Rsqrt(Primitive):
7453
9718
  pass
7454
9719
 
7455
9720
  def __call__(self, input):
7456
- return super().__call__(input)
7457
-
9721
+ return _convert_stub(pyboost_rsqrt(self, [input]))
7458
9722
 
7459
9723
  rsqrt_op=Rsqrt()
7460
9724
 
@@ -7485,6 +9749,20 @@ class ScalarCast(Primitive):
7485
9749
  scalar_cast_op=ScalarCast()
7486
9750
 
7487
9751
 
9752
+ class ScatterAddExt(Primitive):
9753
+ r"""
9754
+
9755
+ """
9756
+ @prim_arg_register
9757
+ def __init__(self):
9758
+ pass
9759
+
9760
+ def __call__(self, input, dim, index, src):
9761
+ return _convert_stub(pyboost_scatter_add_ext(self, [input, dim, index, src]))
9762
+
9763
+ scatter_add_ext_op=ScatterAddExt()
9764
+
9765
+
7488
9766
  class ScatterNd(Primitive):
7489
9767
  r"""
7490
9768
  .. code-block::
@@ -7527,56 +9805,25 @@ scatter_op=Scatter()
7527
9805
 
7528
9806
  class Select(Primitive):
7529
9807
  r"""
7530
- The conditional tensor determines whether the corresponding element in the output must be
7531
- selected from `x` (if True) or `y` (if False) based on the value of each
7532
- element.
7533
-
7534
- It can be defined as:
7535
-
7536
- .. math::
7537
- out_i = \begin{cases}
7538
- x_i, & \text{if } cond_i \\
7539
- y_i, & \text{otherwise}
7540
- \end{cases}
7541
-
7542
- Inputs:
7543
- - **cond** (Tensor[bool]): The condition tensor, decides which element is chosen.
7544
- The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
7545
- - **x** (Tensor): The first Tensor to be selected.
7546
- The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
7547
- - **y** (Tensor): The second Tensor to be selected.
7548
- The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
7549
-
7550
- Outputs:
7551
- Tensor, has the same shape as `cond`.
7552
-
7553
- Raises:
7554
- TypeError: If x or y is not a Tensor.
7555
- ValueError: The shape of inputs are different.
7556
-
7557
- Supported Platforms:
7558
- ``Ascend`` ``GPU`` ``CPU``
7559
-
7560
- Examples:
7561
- >>> import mindspore
7562
- >>> from mindspore import Tensor, ops
7563
- >>> # Both inputs are Tensor
7564
- >>> select = ops.Select()
7565
- >>> cond = Tensor([True, False])
7566
- >>> x = Tensor([2,3], mindspore.float32)
7567
- >>> y = Tensor([1,2], mindspore.float32)
7568
- >>> output = select(cond, x, y)
7569
- >>> print(output)
7570
- [2. 2.]
9808
+ .. code-block::
9809
+
9810
+ prim = ops.Select()
9811
+ out = prim(condition, input, other)
9812
+
9813
+ is equivalent to
9814
+
9815
+ .. code-block::
9816
+
9817
+ ops.select(condition, input, other)
7571
9818
 
9819
+ Refer to :func:`mindspore.ops.select` for more details.
7572
9820
  """
7573
9821
  @prim_arg_register
7574
9822
  def __init__(self):
7575
9823
  pass
7576
9824
 
7577
- def __call__(self, cond, x, y):
7578
- return super().__call__(cond, x, y)
7579
-
9825
+ def __call__(self, condition, input, other):
9826
+ return _convert_stub(pyboost_select(self, [condition, input, other]))
7580
9827
 
7581
9828
  select_op=Select()
7582
9829
 
@@ -7759,13 +10006,27 @@ class Sinh(Primitive):
7759
10006
  sinh_op=Sinh()
7760
10007
 
7761
10008
 
7762
- class SoftmaxBackward(Primitive):
10009
+ class SliceExt(Primitive):
7763
10010
  r"""
7764
10011
 
7765
10012
  """
7766
- __mindspore_signature__ = (
7767
- sig.make_sig('dout'),
7768
- sig.make_sig('out'),
10013
+ @prim_arg_register
10014
+ def __init__(self):
10015
+ pass
10016
+
10017
+ def __call__(self, input, dim, start, end, step):
10018
+ return _convert_stub(pyboost_slice_ext(self, [input, dim, start, end, step]))
10019
+
10020
+ slice_ext_op=SliceExt()
10021
+
10022
+
10023
+ class SoftmaxBackward(Primitive):
10024
+ r"""
10025
+
10026
+ """
10027
+ __mindspore_signature__ = (
10028
+ sig.make_sig('dout'),
10029
+ sig.make_sig('out'),
7769
10030
  sig.make_sig('dim', default=-1),
7770
10031
  )
7771
10032
 
@@ -7815,6 +10076,58 @@ class Softmax(Primitive):
7815
10076
  def __call__(self, input):
7816
10077
  return _convert_stub(pyboost_softmax(self, [input, self.axis]))
7817
10078
 
10079
+ class SoftplusExt(Primitive):
10080
+ r"""
10081
+ .. code-block::
10082
+
10083
+ prim = ops.SoftplusExt()
10084
+ out = prim(input, beta, threshold)
10085
+
10086
+ is equivalent to
10087
+
10088
+ .. code-block::
10089
+
10090
+ ops.softplus_ext(input, beta, threshold)
10091
+
10092
+ Refer to :func:`mindspore.ops.softplus_ext` for more details.
10093
+ """
10094
+ __mindspore_signature__ = (
10095
+ sig.make_sig('input'),
10096
+ sig.make_sig('beta', default=1),
10097
+ sig.make_sig('threshold', default=20),
10098
+ )
10099
+
10100
+ @prim_arg_register
10101
+ def __init__(self):
10102
+ pass
10103
+
10104
+ def __call__(self, input, beta=1, threshold=20):
10105
+ return _convert_stub(pyboost_softplus_ext(self, [input, beta, threshold]))
10106
+
10107
+ softplus_ext_op=SoftplusExt()
10108
+
10109
+
10110
+ class SoftplusGradExt(Primitive):
10111
+ r"""
10112
+
10113
+ """
10114
+ __mindspore_signature__ = (
10115
+ sig.make_sig('dout'),
10116
+ sig.make_sig('x'),
10117
+ sig.make_sig('beta', default=1),
10118
+ sig.make_sig('threshold', default=20),
10119
+ )
10120
+
10121
+ @prim_arg_register
10122
+ def __init__(self):
10123
+ pass
10124
+
10125
+ def __call__(self, dout, x, beta=1, threshold=20):
10126
+ return _convert_stub(pyboost_softplus_grad_ext(self, [dout, x, beta, threshold]))
10127
+
10128
+ softplus_grad_ext_op=SoftplusGradExt()
10129
+
10130
+
7818
10131
  class SolveTriangular(Primitive):
7819
10132
  r"""
7820
10133
  .. code-block::
@@ -7908,6 +10221,46 @@ class Split(Primitive):
7908
10221
  return super().__call__(input_x, self.axis, self.output_num)
7909
10222
 
7910
10223
 
10224
+ class SplitTensor(Primitive):
10225
+ r"""
10226
+
10227
+ """
10228
+ __mindspore_signature__ = (
10229
+ sig.make_sig('input_x'),
10230
+ sig.make_sig('split_int'),
10231
+ sig.make_sig('axis', default=0),
10232
+ )
10233
+
10234
+ @prim_arg_register
10235
+ def __init__(self):
10236
+ pass
10237
+
10238
+ def __call__(self, input_x, split_int, axis=0):
10239
+ return _convert_stub(pyboost_split_tensor(self, [input_x, split_int, axis]))
10240
+
10241
+ split_tensor_op=SplitTensor()
10242
+
10243
+
10244
+ class SplitWithSize(Primitive):
10245
+ r"""
10246
+
10247
+ """
10248
+ __mindspore_signature__ = (
10249
+ sig.make_sig('input_x'),
10250
+ sig.make_sig('split_sections'),
10251
+ sig.make_sig('axis', default=0),
10252
+ )
10253
+
10254
+ @prim_arg_register
10255
+ def __init__(self):
10256
+ pass
10257
+
10258
+ def __call__(self, input_x, split_sections, axis=0):
10259
+ return _convert_stub(pyboost_split_with_size(self, [input_x, split_sections, axis]))
10260
+
10261
+ split_with_size_op=SplitWithSize()
10262
+
10263
+
7911
10264
  class SqrtGrad(Primitive):
7912
10265
  r"""
7913
10266
  Performs grad of Sqrt operation.
@@ -7973,6 +10326,28 @@ class Square(Primitive):
7973
10326
  square_op=Square()
7974
10327
 
7975
10328
 
10329
+ class StackExt(Primitive):
10330
+ r"""
10331
+ .. code-block::
10332
+
10333
+ prim = ops.StackExt(dim)
10334
+ out = prim(tensors)
10335
+
10336
+ is equivalent to
10337
+
10338
+ .. code-block::
10339
+
10340
+ ops.stack_ext(tensors, dim)
10341
+
10342
+ Refer to :func:`mindspore.ops.stack_ext` for more details.
10343
+ """
10344
+ @prim_arg_register
10345
+ def __init__(self, dim=0):
10346
+ self._set_prim_arg("dim", dim)
10347
+
10348
+ def __call__(self, tensors):
10349
+ return _convert_stub(pyboost_stack_ext(self, [tensors, self.dim]))
10350
+
7976
10351
  class StridedSlice(Primitive):
7977
10352
  r"""
7978
10353
  .. code-block::
@@ -8059,6 +10434,123 @@ class Sub(Primitive):
8059
10434
  sub_op=Sub()
8060
10435
 
8061
10436
 
10437
+ class SumExt(Primitive):
10438
+ r"""
10439
+ Calculate sum of Tensor elements over a given dim.
10440
+
10441
+ Note:
10442
+ The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
10443
+
10444
+ Args:
10445
+ input (Tensor): The input tensor.
10446
+ dim (Union[None, int, tuple(int), list(int), Tensor]): Dimensions along which a sum is performed.
10447
+ If ``None`` , sum all the elements of the input tensor.
10448
+ If the `dim` is a tuple or list of ints, a sum is performed on all the dimensions specified in the tuple.
10449
+ Must be in the range :math:`[-input.ndim, input.ndim)` . Default: ``None`` .
10450
+ keepdim (bool): Whether the output tensor has `dim` retained or not.
10451
+ If ``True`` , keep these reduced dimensions and the length is 1.
10452
+ If ``False`` , don't keep these dimensions. Default: ``False`` .
10453
+ dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
10454
+
10455
+ Returns:
10456
+ A Tensor, sum of elements over a given `dim` in `input`.
10457
+
10458
+ Raises:
10459
+ TypeError: If `input` is not a Tensor.
10460
+ TypeError: If `dim` is not an int, tulpe(int), list(int), Tensor or None.
10461
+ ValueError: If `dim` is not in the range :math:`[-input.ndim, input.ndim)` .
10462
+ TypeError: If `keepdim` is not a bool.
10463
+
10464
+ Supported Platforms:
10465
+ ``Ascend`` ``GPU`` ``CPU``
10466
+
10467
+ Examples:
10468
+ >>> import mindspore
10469
+ >>> import numpy as np
10470
+ >>> from mindspore import Tensor, ops
10471
+ >>> from mindspore import dtype as mstype
10472
+ >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
10473
+ ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
10474
+ ... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mstype.float32)
10475
+ >>> out = ops.sum(x)
10476
+ >>> print(out)
10477
+ 270.0
10478
+ >>> out = ops.sum(x, dim=2)
10479
+ >>> print(out)
10480
+ [[ 6. 12. 18.]
10481
+ [24. 30. 36.]
10482
+ [42. 48. 54.]]
10483
+ >>> out = ops.sum(x, dim=2, keepdim=True)
10484
+ >>> print(out)
10485
+ [[[ 6.]
10486
+ [12.]
10487
+ [18.]]
10488
+ [[24.]
10489
+ [30.]
10490
+ [36.]]
10491
+ [[42.]
10492
+ [48.]
10493
+ [54.]]]
10494
+ """
10495
+ __mindspore_signature__ = (
10496
+ sig.make_sig('input'),
10497
+ sig.make_sig('dim', default=None),
10498
+ sig.make_sig('keepdim', default=False),
10499
+ sig.make_sig('dtype', default=None),
10500
+ )
10501
+
10502
+ @prim_arg_register
10503
+ def __init__(self):
10504
+ pass
10505
+
10506
+ def __call__(self, input, dim=None, keepdim=False, dtype=None):
10507
+ return _convert_stub(pyboost_sum_ext(self, [input, dim, keepdim, dtype if dtype is None else dtype_to_type_id('SumExt', 'dtype', dtype)]))
10508
+
10509
+ sum_ext_op=SumExt()
10510
+
10511
+
10512
+ class TanhGrad(Primitive):
10513
+ r"""
10514
+ Computes TanhGrad of input element-wise.
10515
+
10516
+ Returns:
10517
+ Tensor, has the same type as input.
10518
+ """
10519
+ @prim_arg_register
10520
+ def __init__(self):
10521
+ pass
10522
+
10523
+ def __call__(self, y, dy):
10524
+ return _convert_stub(pyboost_tanh_grad(self, [y, dy]))
10525
+
10526
+ tanh_grad_op=TanhGrad()
10527
+
10528
+
10529
+ class Tanh(Primitive):
10530
+ r"""
10531
+ .. code-block::
10532
+
10533
+ prim = ops.Tanh()
10534
+ out = prim(input)
10535
+
10536
+ is equivalent to
10537
+
10538
+ .. code-block::
10539
+
10540
+ ops.tanh(input)
10541
+
10542
+ Refer to :func:`mindspore.ops.tanh` for more details.
10543
+ """
10544
+ @prim_arg_register
10545
+ def __init__(self):
10546
+ pass
10547
+
10548
+ def __call__(self, input):
10549
+ return _convert_stub(pyboost_tanh(self, [input]))
10550
+
10551
+ tanh_op=Tanh()
10552
+
10553
+
8062
10554
  class TensorCopySlices(Primitive):
8063
10555
  r"""
8064
10556
  Copy continues memory.
@@ -8129,6 +10621,39 @@ class TensorShape(Primitive):
8129
10621
  tensor_shape_op=TensorShape()
8130
10622
 
8131
10623
 
10624
+ class TopkExt(Primitive):
10625
+ r"""
10626
+ .. code-block::
10627
+
10628
+ prim = ops.TopkExt()
10629
+ out = prim(input, k, dim, largest, sorted)
10630
+
10631
+ is equivalent to
10632
+
10633
+ .. code-block::
10634
+
10635
+ ops.topk_ext(input, k, dim, largest, sorted)
10636
+
10637
+ Refer to :func:`mindspore.ops.topk_ext` for more details.
10638
+ """
10639
+ __mindspore_signature__ = (
10640
+ sig.make_sig('input'),
10641
+ sig.make_sig('k'),
10642
+ sig.make_sig('dim', default=-1),
10643
+ sig.make_sig('largest', default=True),
10644
+ sig.make_sig('sorted', default=True),
10645
+ )
10646
+
10647
+ @prim_arg_register
10648
+ def __init__(self):
10649
+ pass
10650
+
10651
+ def __call__(self, input, k, dim=-1, largest=True, sorted=True):
10652
+ return _convert_stub(pyboost_topk_ext(self, [input, k, dim, largest, sorted]))
10653
+
10654
+ topk_ext_op=TopkExt()
10655
+
10656
+
8132
10657
  class Trace(Primitive):
8133
10658
  r"""
8134
10659
  .. code-block::
@@ -8180,6 +10705,50 @@ class Transpose(Primitive):
8180
10705
  transpose_op=Transpose()
8181
10706
 
8182
10707
 
10708
+ class Tril(Primitive):
10709
+ r"""
10710
+ .. code-block::
10711
+
10712
+ prim = ops.Tril(diagonal)
10713
+ out = prim(input)
10714
+
10715
+ is equivalent to
10716
+
10717
+ .. code-block::
10718
+
10719
+ ops.tril(input, diagonal)
10720
+
10721
+ Refer to :func:`mindspore.ops.tril` for more details.
10722
+ """
10723
+ @prim_arg_register
10724
+ def __init__(self, diagonal=0):
10725
+ self._set_prim_arg("diagonal", diagonal)
10726
+
10727
+ def __call__(self, input):
10728
+ return _convert_stub(pyboost_tril(self, [input, self.diagonal]))
10729
+
10730
+ class Triu(Primitive):
10731
+ r"""
10732
+ .. code-block::
10733
+
10734
+ prim = ops.Triu(diagonal)
10735
+ out = prim(input)
10736
+
10737
+ is equivalent to
10738
+
10739
+ .. code-block::
10740
+
10741
+ ops.triu(input, diagonal)
10742
+
10743
+ Refer to :func:`mindspore.ops.triu` for more details.
10744
+ """
10745
+ @prim_arg_register
10746
+ def __init__(self, diagonal=0):
10747
+ self._set_prim_arg("diagonal", diagonal)
10748
+
10749
+ def __call__(self, input):
10750
+ return _convert_stub(pyboost_triu(self, [input, self.diagonal]))
10751
+
8183
10752
  class TupleToTensor(Primitive):
8184
10753
  r"""
8185
10754
  .. code-block::
@@ -8211,6 +10780,48 @@ class TupleToTensor(Primitive):
8211
10780
  tuple_to_tensor_op=TupleToTensor()
8212
10781
 
8213
10782
 
10783
+ class UniformExt(Primitive):
10784
+ r"""
10785
+ Generates random numbers according to the Uniform random number distribution.
10786
+
10787
+ Inputs:
10788
+ - **tensor** (Tensor) - The tensor of random tensor to be generated.
10789
+ - **a** (float) - Lower bound of the random numbers. Default: 0.0.
10790
+ - **b** (float) - Upper bound of the random numbers. Default: 0.0.
10791
+ - **seed** (int) - Seed for random number generation. Default: 0.
10792
+ - **offset** (int) - Positional offset in the tensor to start filling with random numbers. Default: 0.
10793
+
10794
+ Raises:
10795
+ TypeError: If `a` or `b` is not a float.
10796
+ TypeError: If `tensor` is not a Tensor.
10797
+ ValueError: If `a` is larger than `b`.
10798
+
10799
+ Outputs:
10800
+ - **output** (Tensor) - With the same type and shape as the 'tensor'.
10801
+
10802
+ Supported Platforms:
10803
+ ``Ascend``
10804
+
10805
+ Examples:
10806
+ >>> import numpy as np
10807
+ >>> from mindspore import Tensor
10808
+ >>> from mindspore.ops.operations.random_ops import UniformExt
10809
+ >>> x = Tensor(np.random.randn(3,4), mstype.float64)
10810
+ >>> uniform = UniformExt()
10811
+ >>> y = uniform(x, a=1.0, b=2.0, seed=10, offset=5)
10812
+ >>> print(y.shape)
10813
+ (3, 4)
10814
+ """
10815
+ @prim_arg_register
10816
+ def __init__(self):
10817
+ pass
10818
+
10819
+ def __call__(self, tensor, a, b, seed, offset):
10820
+ return _convert_stub(pyboost_uniform_ext(self, [tensor, a, b, seed, offset]))
10821
+
10822
+ uniform_ext_op=UniformExt()
10823
+
10824
+
8214
10825
  class UnsortedSegmentSum(Primitive):
8215
10826
  r"""
8216
10827
  .. code-block::
@@ -8237,18 +10848,666 @@ class UnsortedSegmentSum(Primitive):
8237
10848
  unsorted_segment_sum_op=UnsortedSegmentSum()
8238
10849
 
8239
10850
 
8240
- class View(Primitive):
10851
+ class UpsampleBilinear2DGrad(Primitive):
8241
10852
  r"""
8242
- .. code-block::
8243
-
8244
- prim = ops.View()
8245
- out = prim(input, shape)
8246
-
8247
- is equivalent to
8248
-
8249
- .. code-block::
8250
-
8251
- ops.view(input, shape)
10853
+ Upsample the 2-D gradient data with bilinear interpolation algorithm.
10854
+
10855
+ Note:
10856
+ One of 'scales' and 'output_size' must be specified. And it is an error if both are specified.
10857
+
10858
+ Inputs:
10859
+ - **dy** (Tensor) - Tensor of shape [N, C, H, W]. Must be one of the following types:
10860
+ float16, float32, float64.
10861
+ - **input_size** (Union[tuple[int], list[int]]): An required tuple[int] which contains 4 elements:
10862
+ [batch, channels, height, width]. Must:
10863
+ input_size[0] == dy.shape[0]
10864
+ input_size[1] == dy.shape[1].
10865
+ - **output_size** (Union[tuple[int], list[int]]): An optional tuple[int]. Default: ``None``.
10866
+ It contains 2 elements: height, width, whose elements should be the same as `dy`. Must:
10867
+ dy.shape[2] == output_size[0],
10868
+ dy.shape[3] == output_size[1].
10869
+ - **scales** (Union[tuple[float], list[float]]): An optional tuple[float]. Default: ``None``.
10870
+ The scale array along each dimension, contain 2 elements: scale_height, scale_width. Must:
10871
+ dy.shape[2] == floor(input_size[2] * scales[0],
10872
+ dy.shape[3] == floor(input_size[3] * scales[1].
10873
+ - **align_corners** (bool): An optional bool. Default: ``False``.
10874
+
10875
+ Outputs:
10876
+ - **dx** (Tensor) - A Tensor with shape depending on intput_size, and its' dtype is the same as `dy`.
10877
+ """
10878
+ __mindspore_signature__ = (
10879
+ sig.make_sig('dy'),
10880
+ sig.make_sig('input_size'),
10881
+ sig.make_sig('output_size', default=None),
10882
+ sig.make_sig('scales', default=None),
10883
+ sig.make_sig('align_corners', default=False),
10884
+ )
10885
+
10886
+ @prim_arg_register
10887
+ def __init__(self):
10888
+ pass
10889
+
10890
+ def __call__(self, dy, input_size, output_size=None, scales=None, align_corners=False):
10891
+ return _convert_stub(pyboost_upsample_bilinear2d_grad(self, [dy, input_size, output_size, scales, align_corners]))
10892
+
10893
+ upsample_bilinear2d_grad_op=UpsampleBilinear2DGrad()
10894
+
10895
+
10896
+ class UpsampleBilinear2D(Primitive):
10897
+ r"""
10898
+ Performs upsampling with trilinear interpolation across 2dims for 4dim input Tensor.
10899
+
10900
+ This operator scale up the volumetric input with specified `output_size` or `scales` factors,
10901
+ using trilinear upscaling algorithm.
10902
+
10903
+ Note:
10904
+ One of `scales` and `output_size` must be specified. And it is an error if both are specified.
10905
+
10906
+ Inputs:
10907
+ - **x** (Tensor) - 4D tensor of shape :math:`(N, C, H_{in}, W_{in})`. Supporting types:
10908
+ float16, float32, float64].
10909
+ - **output_size** (Union[tuple[int], list[int]]): A tuple or list of 2 int elements
10910
+ :math:`(output\_height, output\_width)`. Default: ``None``.
10911
+ - **scales** (Union[tuple[float], list[float]]): A tuple or list of 2 float
10912
+ elements :math:`(scale\_height, scale\_width)`. Default: ``None``.
10913
+ - **align_corners** (bool, optional): An optional bool. Default: ``False``.
10914
+ If ``True``, the input and output tensors are aligned by the center points of their corner pixels,
10915
+ preserving the values at the corner pixels.
10916
+ If ``False`` , the input and output tensors are aligned by the corner points of their corner pixels,
10917
+ and the interpolation use edge value padding for out of boundary values.
10918
+
10919
+ Outputs:
10920
+ - **y** (Tensor) - Upsampled output with the same data type as `x`, whose shape is
10921
+ :math:`(N, C, H_{out}, W_{out})`.
10922
+
10923
+ Raises:
10924
+ TypeError: When `output_size` is not ``None`` and `output_size` is not list[int] or tuple[int].
10925
+ TypeError: When `scales` is not ``None`` and `scales` is not list[float] or tuple[float].
10926
+ TypeError: If dtype of `x` is not in [float16, float32, float64].
10927
+ TypeError: If type of `align_corners` is not bool.
10928
+ ValueError: If any value of `output_size` is negative or zero when `output_size` is not ``None``.
10929
+ ValueError: If any value of `scales` is negative or zero when `scales` is not ``None``.
10930
+ ValueError: If shape of `x` is not 4D.
10931
+ ValueError: If none of `scales` and `output_size` is specified or both specified.
10932
+ ValueError: If size of `scales` is not equal 2 when `scales` is specified.
10933
+ ValueError: If size of `output_size` is not equal 2 when `output_size` is specified.
10934
+
10935
+ Supported Platforms:
10936
+ ``Ascend``
10937
+
10938
+ Examples:
10939
+ >>> import numpy as np
10940
+ >>> from mindspore import Tensor, ops
10941
+ >>> net = ops.UpsampleTrilinear3D()
10942
+ >>> in_x = Tensor(input_data=np.random.randn(2, 3, 4, 512, 256))
10943
+ >>> output_size=[4, 64, 48]
10944
+ >>> out = net(in_x, output_size, None)
10945
+ >>> print(out.shape)
10946
+ (2, 3, 4, 64, 48)
10947
+ >>>
10948
+ >>> net = ops.auto_generate.UpsampleBilinear2D()
10949
+ >>> in_x = Tensor(np.array([[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], [[0.7, 0.8, 0.9], [1.0, 1.1, 1.2]]]]).astype(np.float32))
10950
+ >>> output_size=[4, 5]
10951
+ >>> out = net(in_x, output_size, None, True)
10952
+ >>> print(out)
10953
+ [[[[0.1000, 0.1500, 0.2000, 0.2500, 0.3000],
10954
+ [0.2000, 0.2500, 0.3000, 0.3500, 0.4000],
10955
+ [0.3000, 0.3500, 0.4000, 0.4500, 0.5000],
10956
+ [0.4000, 0.4500, 0.5000, 0.5500, 0.6000]],
10957
+ [[0.7000, 0.7500, 0.8000, 0.8500, 0.9000],
10958
+ [0.8000, 0.8500, 0.9000, 0.9500, 1.0000],
10959
+ [0.9000, 0.9500, 1.0000, 1.0500, 1.1000],
10960
+ [1.0000, 1.0500, 1.1000, 1.1500, 1.2000]]]]
10961
+ """
10962
+ __mindspore_signature__ = (
10963
+ sig.make_sig('x'),
10964
+ sig.make_sig('output_size', default=None),
10965
+ sig.make_sig('scales', default=None),
10966
+ sig.make_sig('align_corners', default=False),
10967
+ )
10968
+
10969
+ @prim_arg_register
10970
+ def __init__(self):
10971
+ pass
10972
+
10973
+ def __call__(self, x, output_size=None, scales=None, align_corners=False):
10974
+ return _convert_stub(pyboost_upsample_bilinear2d(self, [x, output_size, scales, align_corners]))
10975
+
10976
+ upsample_bilinear2d_op=UpsampleBilinear2D()
10977
+
10978
+
10979
+ class UpsampleLinear1DGrad(Primitive):
10980
+ r"""
10981
+ Upsample the 1-D gradient data with linear interpolation algorithm.
10982
+
10983
+ Note:
10984
+ One of 'scales' and 'output_size' must be specified. And it is an error if both are specified.
10985
+
10986
+ Inputs:
10987
+ - **dy** (Tensor) - Tensor of shape [N, C, L]. Must be one of the following types:
10988
+ float16, float32, float64.
10989
+ - **input_size** (Union[tuple[int], list[int]]): An required tuple[int] which contains 3 elements:
10990
+ [batch, channels, length]. Must:
10991
+ input_size[0] == dy.shape[0]
10992
+ input_size[1] == dy.shape[1].
10993
+ - **output_size** (Union[tuple[int], list[int]]): An optional tuple[int]. Default: ``None``.
10994
+ It contains 1 elements: length, whose elements should be the same as `dy`. Must:
10995
+ dy.shape[2] == output_size[0].
10996
+ - **scales** (Union[tuple[float], list[float]]): An optional tuple[float]. Default: ``None``.
10997
+ The scale array along each dimension, contain 1 elements: length_depth. Must:
10998
+ dy.shape[2] == floor(input_size[2] * scales[0].
10999
+ - **align_corners** (bool): An optional bool. Default: ``False``.
11000
+
11001
+ Outputs:
11002
+ - **dx** (Tensor) - A Tensor with shape depending on intput_size, and its' dtype is the same as `dy`.
11003
+ """
11004
+ __mindspore_signature__ = (
11005
+ sig.make_sig('dy'),
11006
+ sig.make_sig('input_size'),
11007
+ sig.make_sig('output_size', default=None),
11008
+ sig.make_sig('scales', default=None),
11009
+ sig.make_sig('align_corners', default=False),
11010
+ )
11011
+
11012
+ @prim_arg_register
11013
+ def __init__(self):
11014
+ pass
11015
+
11016
+ def __call__(self, dy, input_size, output_size=None, scales=None, align_corners=False):
11017
+ return _convert_stub(pyboost_upsample_linear1d_grad(self, [dy, input_size, output_size, scales, align_corners]))
11018
+
11019
+ upsample_linear1d_grad_op=UpsampleLinear1DGrad()
11020
+
11021
+
11022
+ class UpsampleLinear1D(Primitive):
11023
+ r"""
11024
+
11025
+ """
11026
+ __mindspore_signature__ = (
11027
+ sig.make_sig('x'),
11028
+ sig.make_sig('output_size', default=None),
11029
+ sig.make_sig('scales', default=None),
11030
+ sig.make_sig('align_corners', default=False),
11031
+ )
11032
+
11033
+ @prim_arg_register
11034
+ def __init__(self):
11035
+ pass
11036
+
11037
+ def __call__(self, x, output_size=None, scales=None, align_corners=False):
11038
+ return _convert_stub(pyboost_upsample_linear1d(self, [x, output_size, scales, align_corners]))
11039
+
11040
+ upsample_linear1d_op=UpsampleLinear1D()
11041
+
11042
+
11043
+ class UpsampleNearest1DGrad(Primitive):
11044
+ r"""
11045
+ Upsample the 1-D gradient data with the nearest neighbor interpolation algorithm.
11046
+
11047
+ Note:
11048
+ Only one of 'scales' and 'output_size' can be specified, and it is an error if both are specified.
11049
+
11050
+ Inputs:
11051
+ - **dy** (Tensor) - Tensor of shape [N, C, L], Must be one of the following types:
11052
+ float16, float32, float64.
11053
+ - **input_size** (tuple[int]): An required tuple[int], which contain 3 elements:
11054
+ [min_batch, channels, length].
11055
+ Must: input_size[0] == dy.shape[0], input_size[1] == dy.shape[1].
11056
+ - **output_size** (tuple[int]): An optional tuple[int]. Default: ``None``.
11057
+ It contains 1 elements: length, whose elements should be the same as `dy`. Must:
11058
+ dy.shape[2] == output_size[0].
11059
+ - **scales** (tuple[float]): An optional tuple[float]. Default: ``None``.
11060
+ The scale array along each dimension, contain 1 elements: scale_length. Must:
11061
+ dy.shape[2] == floor(input_size[2] * scales[0].
11062
+
11063
+ Outputs:
11064
+ - **dx**- (Tensor) - A 3-D tensor. Has the same type as `dy`, shape depends on `input_size`.
11065
+ """
11066
+ __mindspore_signature__ = (
11067
+ sig.make_sig('dy'),
11068
+ sig.make_sig('input_size'),
11069
+ sig.make_sig('output_size', default=None),
11070
+ sig.make_sig('scales', default=None),
11071
+ )
11072
+
11073
+ @prim_arg_register
11074
+ def __init__(self):
11075
+ pass
11076
+
11077
+ def __call__(self, dy, input_size, output_size=None, scales=None):
11078
+ return _convert_stub(pyboost_upsample_nearest1d_grad(self, [dy, input_size, output_size, scales]))
11079
+
11080
+ upsample_nearest1d_grad_op=UpsampleNearest1DGrad()
11081
+
11082
+
11083
+ class UpsampleNearest1D(Primitive):
11084
+ r"""
11085
+ Performs nearest neighbor upsampling operation.
11086
+
11087
+ This operator scale up the volumetric input with specified `output_size` or `scales` factors, using nearest
11088
+ neighbor algorithm.
11089
+
11090
+ One of `output_size` or `scales` must be given, and can not specified both at the same time.
11091
+
11092
+ Inputs:
11093
+ - **x** (Tensor) - 3D tensor of shape :math:`(N, C, L_{in})`.
11094
+ Supporting types: [uint8, float16, float32, float64].
11095
+ - **output_size** (Union[tuple[int], list[int]]): A tuple or list of int specifying the output volumetric size.
11096
+ Default: ``None``.
11097
+ - **scales** (Union[tuple[float], list[float]]): A tuple or list of float specifying the upsampling factors.
11098
+ Default: ``None``.
11099
+
11100
+ Outputs:
11101
+ - **y** (Tensor) - Upsampled output with the same type as `x` , whose shape is
11102
+ :math:`(N, C, L_{out})`.
11103
+
11104
+ Raises:
11105
+ TypeError: When `output_size` is not ``None`` and `output_size` is not list[int] or tuple[int].
11106
+ TypeError: When `scales` is not ``None`` and `scales` is not list[float] or tuple[float].
11107
+ TypeError: If dtype of `x` is not int [uint8, float16, float32, float64].
11108
+ ValueError: If any value of `output_size` is negative or zero when `output_size` is not ``None``.
11109
+ ValueError: If any value of `scales` is negative or zero when `scales` is not ``None``.
11110
+ ValueError: If shape of `x` is not 3D.
11111
+ ValueError: If none of `scales` and `output_size` is specified or both specified.
11112
+ ValueError: If size of `scales` is not equal 1 when `scales` is specified.
11113
+ ValueError: If size of `output_size` is not equal 1 when `output_size` is specified.
11114
+
11115
+ Supported Platforms:
11116
+ ``Ascend``
11117
+
11118
+ Examples:
11119
+ >>> import numpy as np
11120
+ >>> from mindspore import Tensor, ops
11121
+ >>> from mindspore import dtype as mstype
11122
+ >>> x = Tensor(np.arange(10).reshape(1, 2, 5), mstype.float32)
11123
+ >>> output_size = [8,]
11124
+ >>> net = ops.auto_generate.UpsampleNearest1D()
11125
+ >>> output = net(x, output_size, None)
11126
+ >>> print(output)
11127
+ [[[0., 0., 1., 1., 2., 3., 3., 4.],
11128
+ [5., 5., 6., 6., 7., 8., 8., 9.]]]
11129
+ """
11130
+ __mindspore_signature__ = (
11131
+ sig.make_sig('x'),
11132
+ sig.make_sig('output_size', default=None),
11133
+ sig.make_sig('scales', default=None),
11134
+ )
11135
+
11136
+ @prim_arg_register
11137
+ def __init__(self):
11138
+ pass
11139
+
11140
+ def __call__(self, x, output_size=None, scales=None):
11141
+ return _convert_stub(pyboost_upsample_nearest1d(self, [x, output_size, scales]))
11142
+
11143
+ upsample_nearest1d_op=UpsampleNearest1D()
11144
+
11145
+
11146
+ class UpsampleNearest2DGrad(Primitive):
11147
+ r"""
11148
+ Upsample the 2-D gradient data with the nearest neighbor interpolation algorithm.
11149
+
11150
+ Note:
11151
+ Only one of 'scales' and 'output_size' can be specified, and it is an error if both are specified.
11152
+
11153
+ Inputs:
11154
+ - **dy** (Tensor) - Tensor of shape [N, C, H, W], Must be one of the following types:
11155
+ float16, float32, float64.
11156
+ - **input_size** (tuple[int]): An required tuple[int], which contain 4 elements:
11157
+ [min_batch, channels, height, width].
11158
+ Must: input_size[0] == dy.shape[0], input_size[1] == dy.shape[1].
11159
+ - **output_size** (tuple[int]): An optional tuple[int]. Default: ``None``.
11160
+ It contains 2 elements: height, width, whose elements should be the same as `dy`. Must:
11161
+ dy.shape[2] == output_size[0],
11162
+ dy.shape[3] == output_size[1].
11163
+ - **scales** (tuple[float]): An optional tuple[float]. Default: ``None``.
11164
+ The scale array along each dimension, contain 2 elements: scale_height, scale_width. Must:
11165
+ dy.shape[2] == floor(input_size[2] * scales[0],
11166
+ dy.shape[3] == floor(input_size[3] * scales[1].
11167
+
11168
+ Outputs:
11169
+ - **dx**- (Tensor) - A 4-D tensor. Has the same type as `dy`, shape depends on `input_size`.
11170
+ """
11171
+ __mindspore_signature__ = (
11172
+ sig.make_sig('dy'),
11173
+ sig.make_sig('input_size'),
11174
+ sig.make_sig('output_size', default=None),
11175
+ sig.make_sig('scales', default=None),
11176
+ )
11177
+
11178
+ @prim_arg_register
11179
+ def __init__(self):
11180
+ pass
11181
+
11182
+ def __call__(self, dy, input_size, output_size=None, scales=None):
11183
+ return _convert_stub(pyboost_upsample_nearest2d_grad(self, [dy, input_size, output_size, scales]))
11184
+
11185
+ upsample_nearest2d_grad_op=UpsampleNearest2DGrad()
11186
+
11187
+
11188
+ class UpsampleNearest2D(Primitive):
11189
+ r"""
11190
+ Performs nearest neighbor upsampling operation.
11191
+
11192
+ This operator scale up the volumetric input with specified `output_size` or `scales` factors, using nearest
11193
+ neighbor algorithm.
11194
+
11195
+ One of `output_size` or `scales` must be given, and can not specified both at the same time.
11196
+
11197
+ Inputs:
11198
+ - **x** (Tensor) - 4D tensor of shape :math:`(N, C, H_{in}, W_{in})`.
11199
+ Supporting types: [uint8, float16, float32, float64].
11200
+ - **output_size** (Union[tuple[int], list[int]]): A tuple or list of int specifying the output volumetric size.
11201
+ Default: ``None``.
11202
+ - **scales** (Union[tuple[float], list[float]]): A tuple or list of float specifying the upsampling factors.
11203
+ Default: ``None``.
11204
+
11205
+ Outputs:
11206
+ - **y** (Tensor) - Upsampled output with the same type as `x` , whose shape is
11207
+ :math:`(N, C, H_{out}, W_{out})`.
11208
+
11209
+ Raises:
11210
+ TypeError: When `output_size` is not ``None`` and `output_size` is not list[int] or tuple[int].
11211
+ TypeError: When `scales` is not ``None`` and `scales` is not list[float] or tuple[float].
11212
+ TypeError: If dtype of `x` is not int [uint8, float16, float32, float64].
11213
+ ValueError: If any value of `output_size` is negative or zero when `output_size` is not ``None``.
11214
+ ValueError: If any value of `scales` is negative or zero when `scales` is not ``None``.
11215
+ ValueError: If shape of `x` is not 4D.
11216
+ ValueError: If none of `scales` and `output_size` is specified or both specified.
11217
+ ValueError: If size of `scales` is not equal 2 when `scales` is specified.
11218
+ ValueError: If size of `output_size` is not equal 2 when `output_size` is specified.
11219
+
11220
+ Supported Platforms:
11221
+ ``Ascend``
11222
+
11223
+ Examples:
11224
+ >>> import numpy as np
11225
+ >>> from mindspore import Tensor, ops
11226
+ >>> from mindspore import dtype as mstype
11227
+ >>> x = Tensor(np.arange(12).astype(np.float32).reshape(1, 2, 2, 3))
11228
+ >>> output_size = [4, 4]
11229
+ >>> net = ops.auto_generate.UpsampleNearest2D()
11230
+ >>> output = net(x, output_size, None)
11231
+ >>> print(output)
11232
+ [[[[0., 0., 1., 2.],
11233
+ [0., 0., 1., 2.],
11234
+ [3., 3., 4., 5.],
11235
+ [3., 3., 4., 5.]],
11236
+ [[6., 6., 7., 8.],
11237
+ [6., 6., 7., 8.],
11238
+ [9., 9., 10., 10.],
11239
+ [9., 9., 10., 10.]]]]
11240
+ """
11241
+ __mindspore_signature__ = (
11242
+ sig.make_sig('x'),
11243
+ sig.make_sig('output_size', default=None),
11244
+ sig.make_sig('scales', default=None),
11245
+ )
11246
+
11247
+ @prim_arg_register
11248
+ def __init__(self):
11249
+ pass
11250
+
11251
+ def __call__(self, x, output_size=None, scales=None):
11252
+ return _convert_stub(pyboost_upsample_nearest2d(self, [x, output_size, scales]))
11253
+
11254
+ upsample_nearest2d_op=UpsampleNearest2D()
11255
+
11256
+
11257
+ class UpsampleNearest3DGrad(Primitive):
11258
+ r"""
11259
+ Upsample the 3-D gradient data with the nearest neighbor interpolation algorithm.
11260
+
11261
+ Note:
11262
+ Only one of 'scales' and 'output_size' can be specified, and it is an error if both are specified.
11263
+
11264
+ Inputs:
11265
+ - **dy** (Tensor) - Tensor of shape [N, C, D, H, W], Must be one of the following types:
11266
+ float16, float32, float64.
11267
+ - **input_size** (tuple[int]): An required tuple[int], which contain 5 elements:
11268
+ [min_batch, channels, depth, height, width].
11269
+ Must: input_size[0] == dy.shape[0], input_size[1] == dy.shape[1].
11270
+ - **output_size** (tuple[int]): An optional tuple[int]. Default: ``None``.
11271
+ It contains 3 elements: depth, height, width, whose elements should be the same as `dy`. Must:
11272
+ dy.shape[2] == output_size[0],
11273
+ dy.shape[3] == output_size[1],
11274
+ dy.shape[4] == output_size[2].
11275
+ - **scales** (tuple[float]): An optional tuple[float]. Default: ``None``.
11276
+ The scale array along each dimension, contain 3 elements: scale_depth, scale_height, scale_width. Must:
11277
+ dy.shape[2] == floor(input_size[2] * scales[0],
11278
+ dy.shape[3] == floor(input_size[3] * scales[1],
11279
+ dy.shape[4] == floor(input_size[4] * scales[2].
11280
+
11281
+ Outputs:
11282
+ - **dx**- (Tensor) - A 5-D tensor. Has the same type as `dy`, shape depends on `input_size`.
11283
+ """
11284
+ __mindspore_signature__ = (
11285
+ sig.make_sig('dy'),
11286
+ sig.make_sig('input_size'),
11287
+ sig.make_sig('output_size', default=None),
11288
+ sig.make_sig('scales', default=None),
11289
+ )
11290
+
11291
+ @prim_arg_register
11292
+ def __init__(self):
11293
+ pass
11294
+
11295
+ def __call__(self, dy, input_size, output_size=None, scales=None):
11296
+ return _convert_stub(pyboost_upsample_nearest3d_grad(self, [dy, input_size, output_size, scales]))
11297
+
11298
+ upsample_nearest3d_grad_op=UpsampleNearest3DGrad()
11299
+
11300
+
11301
+ class UpsampleNearest3D(Primitive):
11302
+ r"""
11303
+ Performs nearest neighbor upsampling operation.
11304
+
11305
+ This operator scale up the volumetric input with specified `output_size` or `scales` factors, using nearest
11306
+ neighbor algorithm.
11307
+
11308
+ One of `output_size` or `scales` must be given, and can not specified both at the same time.
11309
+
11310
+ Inputs:
11311
+ - **x** (Tensor) - 5D tensor of shape :math:`(N, C, D_{in}, H_{in}, W_{in})`.
11312
+ Supporting types: [float16, float32, float64].
11313
+ - **output_size** (Union[tuple[int], list[int]]): A tuple or list of int specifying the output volumetric size.
11314
+ Default: ``None``.
11315
+ - **scales** (Union[tuple[float], list[float]]): A tuple or list of float specifying the upsampling factors.
11316
+ Default: ``None``.
11317
+
11318
+ Outputs:
11319
+ - **y** (Tensor) - Upsampled output with the same type as `x` , whose shape is
11320
+ :math:`(N, C, D_{out}, H_{out}, W_{out})`.
11321
+
11322
+ Raises:
11323
+ TypeError: When `output_size` is not ``None`` and `output_size` is not list[int] or tuple[int].
11324
+ TypeError: When `scales` is not ``None`` and `scales` is not list[float] or tuple[float].
11325
+ TypeError: If dtype of `x` is not int [float16, float32, float64].
11326
+ ValueError: If any value of `output_size` is negative or zero when `output_size` is not ``None``.
11327
+ ValueError: If any value of `scales` is negative or zero when `scales` is not ``None``.
11328
+ ValueError: If shape of `x` is not 5D.
11329
+ ValueError: If none of `scales` and `output_size` is specified or both specified.
11330
+ ValueError: If size of `scales` is not equal 3 when `scales` is specified.
11331
+ ValueError: If size of `output_size` is not equal 3 when `output_size` is specified.
11332
+
11333
+ Supported Platforms:
11334
+ ``Ascend`` ``GPU`` ``CPU``
11335
+
11336
+ Examples:
11337
+ >>> import numpy as np
11338
+ >>> from mindspore import Tensor, ops
11339
+ >>> from mindspore import dtype as mstype
11340
+ >>> x = Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])
11341
+ ... .reshape([1, 1, 2, 2, 4]), mstype.float32)
11342
+ >>> output_size = [3, 4, 5]
11343
+ >>> net = ops.UpsampleNearest3D()
11344
+ >>> output = net(x, output_size, None)
11345
+ >>> print(output)
11346
+ [[[[[ 1. 1. 2. 3. 4.]
11347
+ [ 1. 1. 2. 3. 4.]
11348
+ [ 5. 5. 6. 7. 8.]
11349
+ [ 5. 5. 6. 7. 8.]]
11350
+ [[ 1. 1. 2. 3. 4.]
11351
+ [ 1. 1. 2. 3. 4.]
11352
+ [ 5. 5. 6. 7. 8.]
11353
+ [ 5. 5. 6. 7. 8.]]
11354
+ [[ 9. 9. 10. 11. 12.]
11355
+ [ 9. 9. 10. 11. 12.]
11356
+ [13. 13. 14. 15. 16.]
11357
+ [13. 13. 14. 15. 16.]]]]]
11358
+ """
11359
+ __mindspore_signature__ = (
11360
+ sig.make_sig('x'),
11361
+ sig.make_sig('output_size', default=None),
11362
+ sig.make_sig('scales', default=None),
11363
+ )
11364
+
11365
+ @prim_arg_register
11366
+ def __init__(self):
11367
+ pass
11368
+
11369
+ def __call__(self, x, output_size=None, scales=None):
11370
+ return _convert_stub(pyboost_upsample_nearest3d(self, [x, output_size, scales]))
11371
+
11372
+ upsample_nearest3d_op=UpsampleNearest3D()
11373
+
11374
+
11375
+ class UpsampleTrilinear3DGrad(Primitive):
11376
+ r"""
11377
+ Upsample the 3-D gradient data with trilinear interpolation algorithm.
11378
+
11379
+ Note:
11380
+ One of 'scales' and 'output_size' must be specified. And it is an error if both are specified.
11381
+
11382
+ Inputs:
11383
+ - **dy** (Tensor) - Tensor of shape [N, C, D, H, W]. Must be one of the following types:
11384
+ float16, float32, float64.
11385
+ - **input_size** (Union[tuple[int], list[int]]): An required tuple[int] which contains 5 elements:
11386
+ [batch, channels, depth, height, width]. Must:
11387
+ input_size[0] == dy.shape[0]
11388
+ input_size[1] == dy.shape[1].
11389
+ - **output_size** (Union[tuple[int], list[int]]): An optional tuple[int]. Default: ``None``.
11390
+ It contains 3 elements: depth, height, width, whose elements should be the same as `dy`. Must:
11391
+ dy.shape[2] == output_size[0],
11392
+ dy.shape[3] == output_size[1],
11393
+ dy.shape[4] == output_size[2].
11394
+ - **scales** (Union[tuple[float], list[float]]): An optional tuple[float]. Default: ``None``.
11395
+ The scale array along each dimension, contain 3 elements: scale_depth, scale_height, scale_width. Must:
11396
+ dy.shape[2] == floor(input_size[2] * scales[0],
11397
+ dy.shape[3] == floor(input_size[3] * scales[1],
11398
+ dy.shape[4] == floor(input_size[4] * scales[2].
11399
+ - **align_corners** (bool): An optional bool. Default: ``False``.
11400
+
11401
+ Outputs:
11402
+ - **dx** (Tensor) - A Tensor with shape depending on intput_size, and its' dtype is the same as `dy`.
11403
+ """
11404
+ __mindspore_signature__ = (
11405
+ sig.make_sig('dy'),
11406
+ sig.make_sig('input_size'),
11407
+ sig.make_sig('output_size', default=None),
11408
+ sig.make_sig('scales', default=None),
11409
+ )
11410
+
11411
+ @prim_arg_register
11412
+ def __init__(self, align_corners=False):
11413
+ self._set_prim_arg("align_corners", align_corners)
11414
+
11415
+ def __call__(self, dy, input_size, output_size=None, scales=None):
11416
+ return _convert_stub(pyboost_upsample_trilinear3d_grad(self, [dy, input_size, output_size, scales, self.align_corners]))
11417
+
11418
+ class UpsampleTrilinear3D(Primitive):
11419
+ r"""
11420
+ Performs upsampling with trilinear interpolation across 3dims for 5dim input Tensor.
11421
+
11422
+ This operator scale up the volumetric input with specified `output_size` or `scales` factors,
11423
+ using trilinear upscaling algorithm.
11424
+
11425
+ Note:
11426
+ One of `scales` and `output_size` must be specified. And it is an error if both are specified.
11427
+
11428
+ Args:
11429
+ align_corners (bool, optional): An optional bool. Default: ``False``.
11430
+ If ``True``, the input and output tensors are aligned by the center points of their corner pixels,
11431
+ preserving the values at the corner pixels.
11432
+ If ``False`` , the input and output tensors are aligned by the corner points of their corner pixels,
11433
+ and the interpolation use edge value padding for out of boundary values.
11434
+
11435
+ Inputs:
11436
+ - **x** (Tensor) - 5D tensor of shape :math:`(N, C, D_{in}, H_{in}, W_{in})`. Supporting types:
11437
+ [float16, float32, float64].
11438
+ - **output_size** (Union[tuple[int], list[int]]): A tuple or list of 3 int elements
11439
+ :math:`(output\_depth, output\_height, output\_width)`. Default: ``None``.
11440
+ - **scales** (Union[tuple[float], list[float]]): A tuple or list of 3 float
11441
+ elements :math:`(scale\_depth, scale\_height, scale\_width)`. Default: ``None``.
11442
+
11443
+ Outputs:
11444
+ - **y** (Tensor) - Upsampled output with the same data type as `x`, whose shape is
11445
+ :math:`(N, C, D_{out}, H_{out}, W_{out})`.
11446
+
11447
+ Raises:
11448
+ TypeError: When `output_size` is not ``None`` and `output_size` is not list[int] or tuple[int].
11449
+ TypeError: When `scales` is not ``None`` and `scales` is not list[float] or tuple[float].
11450
+ TypeError: If dtype of `x` is not in [float16, float32, float64].
11451
+ TypeError: If type of `align_corners` is not bool.
11452
+ ValueError: If any value of `output_size` is negative or zero when `output_size` is not ``None``.
11453
+ ValueError: If any value of `scales` is negative or zero when `scales` is not ``None``.
11454
+ ValueError: If shape of `x` is not 5D.
11455
+ ValueError: If none of `scales` and `output_size` is specified or both specified.
11456
+ ValueError: If size of `scales` is not equal 3 when `scales` is specified.
11457
+ ValueError: If size of `output_size` is not equal 3 when `output_size` is specified.
11458
+
11459
+ Supported Platforms:
11460
+ ``Ascend`` ``GPU`` ``CPU``
11461
+
11462
+ Examples:
11463
+ >>> import numpy as np
11464
+ >>> from mindspore import Tensor, ops
11465
+ >>> net = ops.UpsampleTrilinear3D()
11466
+ >>> in_x = Tensor(input_data=np.random.randn(2, 3, 4, 512, 256))
11467
+ >>> output_size=[4, 64, 48]
11468
+ >>> out = net(in_x, output_size, None)
11469
+ >>> print(out.shape)
11470
+ (2, 3, 4, 64, 48)
11471
+ >>>
11472
+ >>> net = ops.UpsampleTrilinear3D()
11473
+ >>> in_x = Tensor(np.arange(1, 5, dtype=np.float32).reshape((1, 1, 1, 2, 2)))
11474
+ >>> output_size=[2, 4, 4]
11475
+ >>> out = net(in_x, output_size, None)
11476
+ >>> print(out)
11477
+ [[[[[1. 1.25 1.75 2. ]
11478
+ [1.5 1.75 2.25 2.5 ]
11479
+ [2.5 2.75 3.25 3.5 ]
11480
+ [3. 3.25 3.75 4. ]]
11481
+ [[1. 1.25 1.75 2. ]
11482
+ [1.5 1.75 2.25 2.5 ]
11483
+ [2.5 2.75 3.25 3.5 ]
11484
+ [3. 3.25 3.75 4. ]]]]]
11485
+ """
11486
+ __mindspore_signature__ = (
11487
+ sig.make_sig('x'),
11488
+ sig.make_sig('output_size', default=None),
11489
+ sig.make_sig('scales', default=None),
11490
+ )
11491
+
11492
+ @prim_arg_register
11493
+ def __init__(self, align_corners=False):
11494
+ self._set_prim_arg("align_corners", align_corners)
11495
+
11496
+ def __call__(self, x, output_size=None, scales=None):
11497
+ return _convert_stub(pyboost_upsample_trilinear3d(self, [x, output_size, scales, self.align_corners]))
11498
+
11499
+ class View(Primitive):
11500
+ r"""
11501
+ .. code-block::
11502
+
11503
+ prim = ops.View()
11504
+ out = prim(input, shape)
11505
+
11506
+ is equivalent to
11507
+
11508
+ .. code-block::
11509
+
11510
+ ops.view(input, shape)
8252
11511
 
8253
11512
  Refer to :func:`mindspore.ops.view` for more details.
8254
11513
  """
@@ -8295,7 +11554,36 @@ class WeightQuantBatchMatmul(Primitive):
8295
11554
  self._set_prim_arg("antiquant_group_size", antiquant_group_size)
8296
11555
 
8297
11556
  def __call__(self, x, weight, antiquant_scale, antiquant_offset=None, quant_scale=None, quant_offset=None, bias=None):
8298
- return super().__call__(x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias, self.transpose_x, self.transpose_weight, self.antiquant_group_size)
11557
+ return _convert_stub(pyboost_weight_quant_batch_matmul(self, [x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias, self.transpose_x, self.transpose_weight, self.antiquant_group_size]))
11558
+
11559
+ class ZerosLikeExt(Primitive):
11560
+ r"""
11561
+ Returns a Tensor with a value of 0 and its shape and data type is the same as the input.
11562
+
11563
+ Refer to :func:`mindspore.ops.zeros_like` for more details.
11564
+
11565
+ Args:
11566
+ - **input_x** (Tensor) - Tensor of any dimension.
11567
+
11568
+ Returns:
11569
+ Tensor, has the same shape and type as `input_x` but filled with ones.
11570
+
11571
+ Supported Platforms:
11572
+ ``Ascend``
11573
+ """
11574
+ __mindspore_signature__ = (
11575
+ sig.make_sig('input'),
11576
+ sig.make_sig('dtype', default=None),
11577
+ )
11578
+
11579
+ @prim_arg_register
11580
+ def __init__(self):
11581
+ pass
11582
+
11583
+ def __call__(self, input, dtype=None):
11584
+ return _convert_stub(pyboost_zeros_like_ext(self, [input, dtype if dtype is None else dtype_to_type_id('ZerosLikeExt', 'dtype', dtype)]))
11585
+
11586
+ zeros_like_ext_op=ZerosLikeExt()
8299
11587
 
8300
11588
 
8301
11589
  class ZerosLike(Primitive):