mindspore 2.3.0__cp39-none-any.whl → 2.3.0rc2__cp39-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (423) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Third_Party_Open_Source_Software_Notice +0 -1512
  3. mindspore/__init__.py +1 -2
  4. mindspore/_c_dataengine.cpython-39-aarch64-linux-gnu.so +0 -0
  5. mindspore/_c_expression.cpython-39-aarch64-linux-gnu.so +0 -0
  6. mindspore/_c_mindrecord.cpython-39-aarch64-linux-gnu.so +0 -0
  7. mindspore/_checkparam.py +25 -5
  8. mindspore/_extends/graph_kernel/model/graph_parallel.py +1 -1
  9. mindspore/_extends/parse/__init__.py +2 -2
  10. mindspore/_extends/parse/compile_config.py +0 -29
  11. mindspore/_extends/parse/namespace.py +2 -2
  12. mindspore/_extends/parse/parser.py +5 -21
  13. mindspore/_extends/parse/resources.py +7 -5
  14. mindspore/_extends/parse/standard_method.py +59 -40
  15. mindspore/_mindspore_offline_debug.cpython-39-aarch64-linux-gnu.so +0 -0
  16. mindspore/amp.py +5 -26
  17. mindspore/bin/cache_admin +0 -0
  18. mindspore/bin/cache_server +0 -0
  19. mindspore/boost/adasum.py +1 -1
  20. mindspore/boost/base.py +1 -1
  21. mindspore/boost/boost_cell_wrapper.py +1 -1
  22. mindspore/boost/grad_freeze.py +2 -2
  23. mindspore/boost/less_batch_normalization.py +6 -9
  24. mindspore/common/__init__.py +1 -8
  25. mindspore/common/_register_for_tensor.py +9 -8
  26. mindspore/common/api.py +65 -275
  27. mindspore/common/dtype.py +4 -8
  28. mindspore/common/dump.py +5 -2
  29. mindspore/common/jit_config.py +1 -1
  30. mindspore/common/lazy_inline.py +2 -14
  31. mindspore/common/parameter.py +15 -14
  32. mindspore/common/recompute.py +5 -20
  33. mindspore/common/sparse_tensor.py +6 -21
  34. mindspore/common/tensor.py +52 -100
  35. mindspore/communication/__init__.py +11 -6
  36. mindspore/communication/management.py +94 -92
  37. mindspore/context.py +18 -180
  38. mindspore/dataset/engine/datasets.py +46 -69
  39. mindspore/dataset/engine/datasets_user_defined.py +53 -72
  40. mindspore/dataset/engine/datasets_vision.py +2 -2
  41. mindspore/dataset/engine/queue.py +38 -56
  42. mindspore/dataset/engine/validators.py +5 -11
  43. mindspore/dataset/vision/__init__.py +5 -5
  44. mindspore/dataset/vision/c_transforms.py +5 -5
  45. mindspore/dataset/vision/py_transforms_util.py +1 -1
  46. mindspore/dataset/vision/transforms.py +46 -591
  47. mindspore/dataset/vision/utils.py +1 -121
  48. mindspore/dataset/vision/validators.py +3 -9
  49. mindspore/hal/__init__.py +1 -7
  50. mindspore/hal/device.py +1 -1
  51. mindspore/include/api/model.h +0 -3
  52. mindspore/include/dataset/vision.h +2 -54
  53. mindspore/include/mindapi/base/types.h +0 -1
  54. mindspore/lib/libdnnl.so.2 +0 -0
  55. mindspore/lib/libmindspore.so +0 -0
  56. mindspore/lib/libmindspore_backend.so +0 -0
  57. mindspore/lib/libmindspore_common.so +0 -0
  58. mindspore/lib/libmindspore_core.so +0 -0
  59. mindspore/lib/libmindspore_glog.so.0 +0 -0
  60. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  61. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  62. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  63. mindspore/lib/libmindspore_shared_lib.so +0 -0
  64. mindspore/lib/libmpi_adapter.so +0 -0
  65. mindspore/lib/libmpi_collective.so +0 -0
  66. mindspore/lib/libnnacl.so +0 -0
  67. mindspore/lib/libopencv_core.so.4.5 +0 -0
  68. mindspore/lib/libps_cache.so +0 -0
  69. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +0 -35
  70. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  71. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  72. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  73. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +0 -72
  74. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  75. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/{aclnn_all_finite.h → aclnn_add_custom.h} +11 -9
  76. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +1 -1
  77. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +1 -1
  78. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
  79. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +12 -184
  80. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +15 -7
  81. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +15 -7
  82. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
  83. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
  84. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +31 -77
  85. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +31 -77
  86. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
  87. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
  88. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +5 -4
  89. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
  90. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  91. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  92. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  93. mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
  94. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  95. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/DeviceBin +0 -0
  96. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
  97. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
  98. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +286 -275
  99. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
  100. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
  101. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
  102. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
  103. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -1
  104. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -1
  105. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -3
  106. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -5
  107. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_tiling.h +45 -1
  108. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -1
  109. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +4 -8
  110. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_tiling.h +4 -11
  111. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/kernel/flash_attention_score_mix_hwsync.h +0 -18
  112. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -6
  113. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +75 -1
  114. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/kernel/matmul.h +5 -5
  115. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/matmul_impl.h +3 -18
  116. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_common_tiling.h +5 -5
  117. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_info.h +2 -2
  118. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/tiling_data.h +3 -36
  119. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/kernel/matmul_stridedslice_fusion.h +2 -2
  120. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/matmul_stridedslice_fusion_impl.h +4 -22
  121. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +2 -16
  122. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/kernel/paged_attention_mix_hwsync.h +3 -1
  123. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +4 -5
  124. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_tiling.h +4 -9
  125. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +2 -5
  126. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -1
  127. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +4 -10
  128. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +12 -0
  129. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -1
  130. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -1
  131. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +1 -1
  132. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +2 -10
  133. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +1 -5
  134. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -1
  135. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -17
  136. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +7 -2
  137. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
  138. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
  139. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layernorm_impl.so +0 -0
  140. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_impl.so +0 -0
  141. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_impl.so +0 -0
  142. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_impl.so +0 -0
  143. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_impl.so +0 -0
  144. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_impl.so +0 -0
  145. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_stridedslice_fusion_impl.so +0 -0
  146. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
  147. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libnot_equal_impl.so +0 -0
  148. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_impl.so +0 -0
  149. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_impl.so +0 -0
  150. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
  151. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
  152. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_full_mix.o +0 -0
  153. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
  154. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
  155. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
  156. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_full_mix.o +0 -0
  157. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
  158. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bnsd_full_mix.o +0 -0
  159. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bsh_full_mix.o +0 -0
  160. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bnsd_full_mix.o +0 -0
  161. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bsh_full_mix.o +0 -0
  162. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblcal.so +0 -0
  163. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
  164. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  165. mindspore/mindrecord/filewriter.py +2 -2
  166. mindspore/mint/__init__.py +40 -720
  167. mindspore/mint/nn/__init__.py +7 -89
  168. mindspore/mint/nn/functional.py +16 -165
  169. mindspore/mint/optim/adamw.py +16 -15
  170. mindspore/nn/__init__.py +2 -0
  171. mindspore/nn/cell.py +98 -97
  172. mindspore/nn/extend/basic.py +2 -2
  173. mindspore/nn/extend/embedding.py +1 -1
  174. mindspore/nn/extend/layer/normalization.py +5 -7
  175. mindspore/nn/generator.py +297 -0
  176. mindspore/nn/layer/activation.py +3 -4
  177. mindspore/nn/layer/basic.py +16 -79
  178. mindspore/nn/layer/conv.py +8 -17
  179. mindspore/nn/layer/embedding.py +4 -1
  180. mindspore/nn/layer/math.py +1 -1
  181. mindspore/nn/layer/normalization.py +1 -1
  182. mindspore/nn/layer/pooling.py +0 -5
  183. mindspore/nn/layer/rnn_cells.py +2 -2
  184. mindspore/nn/loss/loss.py +19 -19
  185. mindspore/nn/optim/adasum.py +1 -1
  186. mindspore/nn/optim/sgd.py +2 -3
  187. mindspore/nn/probability/distribution/exponential.py +1 -1
  188. mindspore/nn/probability/distribution/geometric.py +1 -1
  189. mindspore/nn/probability/distribution/logistic.py +1 -1
  190. mindspore/nn/wrap/cell_wrapper.py +1 -25
  191. mindspore/nn/wrap/loss_scale.py +1 -24
  192. mindspore/numpy/array_ops.py +1 -5
  193. mindspore/numpy/dtypes.py +3 -3
  194. mindspore/numpy/math_ops.py +8 -8
  195. mindspore/ops/__init__.py +1 -1
  196. mindspore/ops/_grad_experimental/grad_comm_ops.py +16 -75
  197. mindspore/ops/_vmap/vmap_array_ops.py +0 -27
  198. mindspore/ops/_vmap/vmap_math_ops.py +1 -29
  199. mindspore/ops/_vmap/vmap_nn_ops.py +18 -19
  200. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +8 -34
  201. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +9 -2
  202. mindspore/ops/auto_generate/gen_arg_handler.py +0 -26
  203. mindspore/ops/auto_generate/gen_extend_func.py +27 -603
  204. mindspore/ops/auto_generate/gen_ops_def.py +203 -993
  205. mindspore/ops/auto_generate/gen_ops_prim.py +402 -1946
  206. mindspore/ops/auto_generate/pyboost_inner_prim.py +20 -90
  207. mindspore/ops/composite/base.py +6 -3
  208. mindspore/ops/composite/math_ops.py +1 -1
  209. mindspore/ops/composite/multitype_ops/_compile_utils.py +17 -24
  210. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
  211. mindspore/ops/extend/__init__.py +3 -2
  212. mindspore/ops/extend/array_func.py +51 -10
  213. mindspore/ops/extend/nn_func.py +78 -2
  214. mindspore/ops/function/__init__.py +13 -8
  215. mindspore/ops/function/array_func.py +179 -455
  216. mindspore/ops/function/clip_func.py +1 -1
  217. mindspore/ops/function/grad/grad_func.py +3 -3
  218. mindspore/ops/function/math_func.py +103 -117
  219. mindspore/ops/function/nn_func.py +163 -275
  220. mindspore/ops/function/other_func.py +2 -2
  221. mindspore/ops/function/random_func.py +69 -202
  222. mindspore/ops/function/sparse_func.py +4 -4
  223. mindspore/ops/functional.py +327 -332
  224. mindspore/ops/operations/__init__.py +3 -13
  225. mindspore/ops/operations/_grad_ops.py +27 -3
  226. mindspore/ops/operations/_inner_ops.py +356 -53
  227. mindspore/ops/operations/_rl_inner_ops.py +2 -2
  228. mindspore/ops/operations/_tensor_array.py +8 -8
  229. mindspore/ops/operations/array_ops.py +65 -82
  230. mindspore/ops/operations/comm_ops.py +93 -784
  231. mindspore/ops/operations/custom_ops.py +28 -51
  232. mindspore/ops/operations/debug_ops.py +4 -4
  233. mindspore/ops/operations/inner_ops.py +2 -2
  234. mindspore/ops/operations/manually_defined/ops_def.py +4 -304
  235. mindspore/ops/operations/math_ops.py +50 -3
  236. mindspore/ops/operations/nn_ops.py +247 -14
  237. mindspore/ops/operations/other_ops.py +3 -3
  238. mindspore/ops/operations/random_ops.py +1 -1
  239. mindspore/ops/operations/sparse_ops.py +1 -1
  240. mindspore/ops/primitive.py +8 -9
  241. mindspore/ops/silent_check.py +5 -5
  242. mindspore/ops_generate/arg_dtype_cast.py +9 -2
  243. mindspore/ops_generate/arg_handler.py +0 -26
  244. mindspore/ops_generate/gen_aclnn_implement.py +4 -1
  245. mindspore/ops_generate/gen_ops.py +4 -26
  246. mindspore/ops_generate/gen_pyboost_func.py +12 -41
  247. mindspore/ops_generate/gen_utils.py +0 -21
  248. mindspore/ops_generate/pyboost_utils.py +2 -7
  249. mindspore/ops_generate/template.py +0 -1
  250. mindspore/parallel/_auto_parallel_context.py +1 -21
  251. mindspore/parallel/_tensor.py +5 -0
  252. mindspore/parallel/_transformer/transformer.py +1 -1
  253. mindspore/parallel/_utils.py +1 -15
  254. mindspore/parallel/algo_parameter_config.py +3 -1
  255. mindspore/parallel/checkpoint_transform.py +9 -12
  256. mindspore/parallel/cluster/process_entity/_api.py +29 -28
  257. mindspore/parallel/cluster/process_entity/_utils.py +3 -13
  258. mindspore/parallel/cluster/run.py +16 -13
  259. mindspore/parallel/parameter_broadcast.py +2 -2
  260. mindspore/parallel/shard.py +17 -31
  261. mindspore/profiler/__init__.py +2 -3
  262. mindspore/profiler/common/util.py +2 -107
  263. mindspore/profiler/envprofiling.py +1 -1
  264. mindspore/profiler/parser/ascend_analysis/constant.py +21 -8
  265. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -82
  266. mindspore/profiler/parser/ascend_analysis/function_event.py +28 -43
  267. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +27 -49
  268. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +10 -15
  269. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +20 -25
  270. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +5 -5
  271. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +1 -10
  272. mindspore/profiler/parser/ascend_hccl_generator.py +1 -4
  273. mindspore/profiler/parser/ascend_msprof_exporter.py +22 -43
  274. mindspore/profiler/parser/ascend_timeline_generator.py +5 -7
  275. mindspore/profiler/parser/minddata_parser.py +3 -72
  276. mindspore/profiler/profiling.py +59 -176
  277. mindspore/rewrite/api/node.py +1 -1
  278. mindspore/rewrite/common/namespace.py +5 -5
  279. mindspore/rewrite/parsers/assign_parser.py +0 -2
  280. mindspore/rewrite/parsers/class_def_parser.py +4 -8
  281. mindspore/run_check/_check_version.py +1 -1
  282. mindspore/scipy/fft.py +3 -1
  283. mindspore/scipy/linalg.py +3 -2
  284. mindspore/scipy/ops.py +3 -5
  285. mindspore/scipy/optimize/__init__.py +2 -2
  286. mindspore/train/__init__.py +4 -4
  287. mindspore/train/anf_ir_pb2.py +2 -8
  288. mindspore/train/callback/__init__.py +2 -5
  289. mindspore/train/callback/_backup_and_restore.py +2 -2
  290. mindspore/train/callback/_checkpoint.py +16 -104
  291. mindspore/train/callback/_landscape.py +1 -1
  292. mindspore/train/callback/_time_monitor.py +1 -1
  293. mindspore/train/data_sink.py +4 -5
  294. mindspore/train/dataset_helper.py +20 -45
  295. mindspore/train/model.py +38 -266
  296. mindspore/train/serialization.py +105 -256
  297. mindspore/train/summary/_summary_adapter.py +1 -1
  298. mindspore/version.py +1 -1
  299. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +2 -2
  300. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +303 -420
  301. mindspore/_extends/pijit/__init__.py +0 -23
  302. mindspore/_extends/pijit/pijit_func_white_list.py +0 -343
  303. mindspore/common/file_system.py +0 -48
  304. mindspore/common/generator.py +0 -260
  305. mindspore/common/no_inline.py +0 -54
  306. mindspore/common/np_dtype.py +0 -25
  307. mindspore/communication/comm_func.py +0 -1140
  308. mindspore/hal/memory.py +0 -326
  309. mindspore/lib/libavcodec.so.59 +0 -0
  310. mindspore/lib/libavdevice.so.59 +0 -0
  311. mindspore/lib/libavfilter.so.8 +0 -0
  312. mindspore/lib/libavformat.so.59 +0 -0
  313. mindspore/lib/libavutil.so.57 +0 -0
  314. mindspore/lib/libmindspore_np_dtype.so +0 -0
  315. mindspore/lib/libswresample.so.4 +0 -0
  316. mindspore/lib/libswscale.so.6 +0 -0
  317. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.cpp +0 -326
  318. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.py +0 -180
  319. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.json +0 -58
  320. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.o +0 -0
  321. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.json +0 -58
  322. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.o +0 -0
  323. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.json +0 -58
  324. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.o +0 -0
  325. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/all_finite.json +0 -109
  326. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/binary_info_config.json +0 -38
  327. mindspore/lib/plugin/ascend/custom_compiler/OWNERS +0 -12
  328. mindspore/lib/plugin/ascend/custom_compiler/setup.py +0 -255
  329. mindspore/lib/plugin/ascend/custom_compiler/start.sh +0 -26
  330. mindspore/lib/plugin/ascend/custom_compiler/template.json +0 -40
  331. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
  332. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -69
  333. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/base_type.h +0 -133
  334. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -32
  335. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -35
  336. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/tiling_info.h +0 -60
  337. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -37
  338. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
  339. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
  340. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_op.h +0 -34
  341. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_backoff_base.h +0 -62
  342. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -33
  343. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_ops.h +0 -88
  344. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_pa_op.h +0 -45
  345. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
  346. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -95
  347. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -84
  348. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -61
  349. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -224
  350. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
  351. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
  352. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
  353. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
  354. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
  355. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
  356. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
  357. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
  358. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
  359. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
  360. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
  361. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
  362. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
  363. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
  364. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
  365. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
  366. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
  367. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
  368. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
  369. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
  370. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
  371. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
  372. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
  373. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
  374. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
  375. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
  376. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
  377. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
  378. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
  379. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
  380. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
  381. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
  382. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
  383. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
  384. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
  385. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
  386. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_impl.h +0 -45
  387. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_tiling.h +0 -187
  388. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul.h +0 -245
  389. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_interface.h +0 -24
  390. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_utils.h +0 -111
  391. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/tiling_data.h +0 -54
  392. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
  393. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
  394. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
  395. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -364
  396. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -69
  397. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
  398. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -114
  399. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -98
  400. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.json +0 -19
  401. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.o +0 -0
  402. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aic_0.o +0 -0
  403. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aiv_0.o +0 -0
  404. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.json +0 -19
  405. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.o +0 -0
  406. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aic_0.o +0 -0
  407. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aiv_0.o +0 -0
  408. mindspore/mint/linalg/__init__.py +0 -22
  409. mindspore/nn/layer/embedding_service.py +0 -531
  410. mindspore/nn/layer/embedding_service_layer.py +0 -393
  411. mindspore/ops/function/reshard_func.py +0 -102
  412. mindspore/ops/operations/_infer_ops.py +0 -19
  413. mindspore/ops/operations/reshard_ops.py +0 -53
  414. mindspore/profiler/common/process_pool.py +0 -41
  415. mindspore/profiler/common/singleton.py +0 -28
  416. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  417. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  418. mindspore/train/callback/_cluster_monitor.py +0 -201
  419. mindspore/train/callback/_flops_collector.py +0 -238
  420. mindspore/train/callback/_mindio_ttp.py +0 -443
  421. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
  422. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
  423. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
@@ -25,25 +25,17 @@ from mindspore.ops.auto_generate.gen_arg_handler import *
25
25
  from mindspore._c_expression import OpDtype
26
26
  from mindspore.common._stub_tensor import _convert_stub
27
27
  from mindspore._c_expression import pyboost_abs
28
- from mindspore._c_expression import pyboost_adamw
28
+ from mindspore._c_expression import pyboost_adam_weight_decay_ext
29
29
  from mindspore._c_expression import pyboost_add_ext
30
- from mindspore._c_expression import pyboost_add_layernorm_v2
31
- from mindspore._c_expression import pyboost_add
32
- from mindspore._c_expression import pyboost_addmm
33
30
  from mindspore._c_expression import pyboost_arange
34
31
  from mindspore._c_expression import pyboost_argmax_ext
35
32
  from mindspore._c_expression import pyboost_argmax_with_value
36
33
  from mindspore._c_expression import pyboost_argmin_with_value
37
- from mindspore._c_expression import pyboost_atan2_ext
38
34
  from mindspore._c_expression import pyboost_avg_pool2d_grad
39
35
  from mindspore._c_expression import pyboost_avg_pool2d
40
36
  from mindspore._c_expression import pyboost_batch_mat_mul
41
37
  from mindspore._c_expression import pyboost_batch_norm_ext
42
38
  from mindspore._c_expression import pyboost_batch_norm_grad_ext
43
- from mindspore._c_expression import pyboost_binary_cross_entropy_grad
44
- from mindspore._c_expression import pyboost_binary_cross_entropy
45
- from mindspore._c_expression import pyboost_binary_cross_entropy_with_logits_backward
46
- from mindspore._c_expression import pyboost_binary_cross_entropy_with_logits
47
39
  from mindspore._c_expression import pyboost_bmm_ext
48
40
  from mindspore._c_expression import pyboost_broadcast_to
49
41
  from mindspore._c_expression import pyboost_cast
@@ -51,8 +43,6 @@ from mindspore._c_expression import pyboost_ceil
51
43
  from mindspore._c_expression import pyboost_chunk
52
44
  from mindspore._c_expression import pyboost_clamp_scalar
53
45
  from mindspore._c_expression import pyboost_clamp_tensor
54
- from mindspore._c_expression import pyboost_col2im_ext
55
- from mindspore._c_expression import pyboost_col2im_grad
56
46
  from mindspore._c_expression import pyboost_concat
57
47
  from mindspore._c_expression import pyboost_constant_pad_nd
58
48
  from mindspore._c_expression import pyboost_contiguous
@@ -60,7 +50,6 @@ from mindspore._c_expression import pyboost_convolution_grad
60
50
  from mindspore._c_expression import pyboost_convolution
61
51
  from mindspore._c_expression import pyboost_copy
62
52
  from mindspore._c_expression import pyboost_cos
63
- from mindspore._c_expression import pyboost_cumsum_ext
64
53
  from mindspore._c_expression import pyboost_dense
65
54
  from mindspore._c_expression import pyboost_div
66
55
  from mindspore._c_expression import pyboost_divmod
@@ -69,27 +58,22 @@ from mindspore._c_expression import pyboost_dropout_do_mask_ext
69
58
  from mindspore._c_expression import pyboost_dropout_ext
70
59
  from mindspore._c_expression import pyboost_dropout_gen_mask_ext
71
60
  from mindspore._c_expression import pyboost_dropout_grad_ext
72
- from mindspore._c_expression import pyboost_elu_ext
73
- from mindspore._c_expression import pyboost_elu_grad_ext
74
61
  from mindspore._c_expression import pyboost_embedding_dense_backward
75
62
  from mindspore._c_expression import pyboost_embedding
76
63
  from mindspore._c_expression import pyboost_equal
77
64
  from mindspore._c_expression import pyboost_erf
78
65
  from mindspore._c_expression import pyboost_erfinv
79
66
  from mindspore._c_expression import pyboost_exp
80
- from mindspore._c_expression import pyboost_eye
81
67
  from mindspore._c_expression import pyboost_ffn_ext
82
68
  from mindspore._c_expression import pyboost_fill_scalar
83
69
  from mindspore._c_expression import pyboost_fill_tensor
84
70
  from mindspore._c_expression import pyboost_flash_attention_score_grad
85
71
  from mindspore._c_expression import pyboost_flash_attention_score
86
72
  from mindspore._c_expression import pyboost_flatten_ext
87
- from mindspore._c_expression import pyboost_floor
88
73
  from mindspore._c_expression import pyboost_gather_d_grad_v2
89
74
  from mindspore._c_expression import pyboost_gather_d
90
75
  from mindspore._c_expression import pyboost_gelu_grad
91
76
  from mindspore._c_expression import pyboost_gelu
92
- from mindspore._c_expression import pyboost_generator
93
77
  from mindspore._c_expression import pyboost_greater_equal
94
78
  from mindspore._c_expression import pyboost_greater
95
79
  from mindspore._c_expression import pyboost_grid_sampler_2d_grad
@@ -98,10 +82,6 @@ from mindspore._c_expression import pyboost_grid_sampler_3d_grad
98
82
  from mindspore._c_expression import pyboost_grid_sampler_3d
99
83
  from mindspore._c_expression import pyboost_group_norm_grad
100
84
  from mindspore._c_expression import pyboost_group_norm
101
- from mindspore._c_expression import pyboost_im2col_ext
102
- from mindspore._c_expression import pyboost_index_add_ext
103
- from mindspore._c_expression import pyboost_index_select
104
- from mindspore._c_expression import pyboost_isclose
105
85
  from mindspore._c_expression import pyboost_isfinite
106
86
  from mindspore._c_expression import pyboost_layer_norm_ext
107
87
  from mindspore._c_expression import pyboost_layer_norm_grad_ext
@@ -117,7 +97,6 @@ from mindspore._c_expression import pyboost_logical_or
117
97
  from mindspore._c_expression import pyboost_masked_fill
118
98
  from mindspore._c_expression import pyboost_matmul_ext
119
99
  from mindspore._c_expression import pyboost_matmul
120
- from mindspore._c_expression import pyboost_matrix_inverse_ext
121
100
  from mindspore._c_expression import pyboost_max
122
101
  from mindspore._c_expression import pyboost_max_pool_grad_with_indices
123
102
  from mindspore._c_expression import pyboost_max_pool_grad_with_mask
@@ -130,21 +109,15 @@ from mindspore._c_expression import pyboost_minimum
130
109
  from mindspore._c_expression import pyboost_mul
131
110
  from mindspore._c_expression import pyboost_mv
132
111
  from mindspore._c_expression import pyboost_neg
133
- from mindspore._c_expression import pyboost_non_zero_ext
134
- from mindspore._c_expression import pyboost_non_zero
135
112
  from mindspore._c_expression import pyboost_norm
136
- from mindspore._c_expression import pyboost_normal_float_float
137
- from mindspore._c_expression import pyboost_normal_float_tensor
138
- from mindspore._c_expression import pyboost_normal_tensor_float
139
- from mindspore._c_expression import pyboost_normal_tensor_tensor
113
+ from mindspore._c_expression import pyboost_normal_ext
140
114
  from mindspore._c_expression import pyboost_not_equal
141
115
  from mindspore._c_expression import pyboost_one_hot_ext
142
116
  from mindspore._c_expression import pyboost_ones_like_ext
143
117
  from mindspore._c_expression import pyboost_ones
144
118
  from mindspore._c_expression import pyboost_pow
145
119
  from mindspore._c_expression import pyboost_prod_ext
146
- from mindspore._c_expression import pyboost_rand_ext
147
- from mindspore._c_expression import pyboost_rand_like_ext
120
+ from mindspore._c_expression import pyboost_quant_batch_matmul
148
121
  from mindspore._c_expression import pyboost_reciprocal
149
122
  from mindspore._c_expression import pyboost_reduce_all
150
123
  from mindspore._c_expression import pyboost_reduce_any
@@ -156,27 +129,20 @@ from mindspore._c_expression import pyboost_reflection_pad_3d_grad
156
129
  from mindspore._c_expression import pyboost_reflection_pad_3d
157
130
  from mindspore._c_expression import pyboost_relu_grad
158
131
  from mindspore._c_expression import pyboost_relu
159
- from mindspore._c_expression import pyboost_repeat_interleave_grad
160
- from mindspore._c_expression import pyboost_repeat_interleave_int
161
- from mindspore._c_expression import pyboost_repeat_interleave_tensor
132
+ from mindspore._c_expression import pyboost_repeat_interleave
162
133
  from mindspore._c_expression import pyboost_replication_pad_1d_grad
163
134
  from mindspore._c_expression import pyboost_replication_pad_1d
164
135
  from mindspore._c_expression import pyboost_replication_pad_2d_grad
165
136
  from mindspore._c_expression import pyboost_replication_pad_2d
166
137
  from mindspore._c_expression import pyboost_replication_pad_3d_grad
167
138
  from mindspore._c_expression import pyboost_replication_pad_3d
168
- from mindspore._c_expression import pyboost_reshape
169
139
  from mindspore._c_expression import pyboost_reverse_v2
170
- from mindspore._c_expression import pyboost_rms_norm_grad
171
- from mindspore._c_expression import pyboost_rms_norm
172
140
  from mindspore._c_expression import pyboost_rsqrt
173
141
  from mindspore._c_expression import pyboost_scatter_add_ext
174
142
  from mindspore._c_expression import pyboost_scatter
175
- from mindspore._c_expression import pyboost_searchsorted
176
143
  from mindspore._c_expression import pyboost_select
177
144
  from mindspore._c_expression import pyboost_sigmoid_grad
178
145
  from mindspore._c_expression import pyboost_sigmoid
179
- from mindspore._c_expression import pyboost_sign
180
146
  from mindspore._c_expression import pyboost_silu_grad
181
147
  from mindspore._c_expression import pyboost_silu
182
148
  from mindspore._c_expression import pyboost_sin
@@ -185,24 +151,21 @@ from mindspore._c_expression import pyboost_softmax_backward
185
151
  from mindspore._c_expression import pyboost_softmax
186
152
  from mindspore._c_expression import pyboost_softplus_ext
187
153
  from mindspore._c_expression import pyboost_softplus_grad_ext
188
- from mindspore._c_expression import pyboost_sort_ext
189
154
  from mindspore._c_expression import pyboost_split_tensor
190
155
  from mindspore._c_expression import pyboost_split_with_size
191
156
  from mindspore._c_expression import pyboost_sqrt
192
157
  from mindspore._c_expression import pyboost_square
193
158
  from mindspore._c_expression import pyboost_stack_ext
194
159
  from mindspore._c_expression import pyboost_sub_ext
195
- from mindspore._c_expression import pyboost_sub
196
160
  from mindspore._c_expression import pyboost_sum_ext
197
161
  from mindspore._c_expression import pyboost_tanh_grad
198
162
  from mindspore._c_expression import pyboost_tanh
199
163
  from mindspore._c_expression import pyboost_tile
200
164
  from mindspore._c_expression import pyboost_topk_ext
201
165
  from mindspore._c_expression import pyboost_transpose
166
+ from mindspore._c_expression import pyboost_tril
202
167
  from mindspore._c_expression import pyboost_triu
203
168
  from mindspore._c_expression import pyboost_uniform_ext
204
- from mindspore._c_expression import pyboost_unique2
205
- from mindspore._c_expression import pyboost_unique_dim
206
169
  from mindspore._c_expression import pyboost_upsample_bilinear2d_grad
207
170
  from mindspore._c_expression import pyboost_upsample_bilinear2d
208
171
  from mindspore._c_expression import pyboost_upsample_linear1d_grad
@@ -215,14 +178,9 @@ from mindspore._c_expression import pyboost_upsample_nearest3d_grad
215
178
  from mindspore._c_expression import pyboost_upsample_nearest3d
216
179
  from mindspore._c_expression import pyboost_upsample_trilinear3d_grad
217
180
  from mindspore._c_expression import pyboost_upsample_trilinear3d
181
+ from mindspore._c_expression import pyboost_weight_quant_batch_matmul
218
182
  from mindspore._c_expression import pyboost_zeros_like_ext
219
183
  from mindspore._c_expression import pyboost_zeros
220
- from mindspore._c_expression import pyboost_dynamic_quant_ext
221
- from mindspore._c_expression import pyboost_grouped_matmul
222
- from mindspore._c_expression import pyboost_moe_finalize_routing
223
- from mindspore._c_expression import pyboost_quant_batch_matmul
224
- from mindspore._c_expression import pyboost_quant_v2
225
- from mindspore._c_expression import pyboost_weight_quant_batch_matmul
226
184
 
227
185
 
228
186
  class ACosGrad(Primitive):
@@ -350,6 +308,106 @@ class Acosh(Primitive):
350
308
  acosh_op=Acosh()
351
309
 
352
310
 
311
+ class AdamWeightDecayExt(Primitive):
312
+ r"""
313
+ Implements Adam Weight Decay algorithm.
314
+
315
+ .. math::
316
+ \begin{aligned}
317
+ &\textbf{input} : \gamma \text{(lr)}, \: \beta_1, \beta_2
318
+ \text{(betas)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)},
319
+ \: \epsilon \text{ (epsilon)} \\
320
+ &\hspace{13mm} \lambda \text{(weight decay)}, \: \textit{amsgrad},
321
+ \: \textit{maximize} \\
322
+ &\textbf{initialize} : m_0 \leftarrow 0 \text{ (first moment)}, v_0 \leftarrow 0
323
+ \text{ ( second moment)}, \: \widehat{v_0}^{max}\leftarrow 0 \\[-1.ex]
324
+ &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
325
+ &\hspace{5mm}\textbf{if} \: \textit{maximize}: \\
326
+ &\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\
327
+ &\hspace{5mm}\textbf{else} \\
328
+ &\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
329
+ &\hspace{5mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1} \\
330
+ &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\
331
+ &\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\
332
+ &\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\
333
+ &\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\
334
+ &\hspace{5mm}\textbf{if} \: amsgrad \\
335
+ &\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max},
336
+ \widehat{v_t}) \\
337
+ &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
338
+ \big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big) \\
339
+ &\hspace{5mm}\textbf{else} \\
340
+ &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
341
+ \big(\sqrt{\widehat{v_t}} + \epsilon \big) \\
342
+ &\bf{return} \: \theta_t \\[-1.ex]
343
+ \end{aligned}
344
+
345
+ .. warning::
346
+ This is an experimental optimizer API that is subject to change.
347
+ This module must be used with lr scheduler module in `LRScheduler Class
348
+ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.experimental.html#lrscheduler-class>`_ .
349
+
350
+ Inputs:
351
+ - **var** (Parameter) - Weights to be updated. The shape is :math:`(N, *)` where :math:`*` means,
352
+ any number of additional dimensions. The data type can be float16 or float32.
353
+ - **m** (Parameter) - The 1st moment vector in the updating formula,
354
+ it should have the the shape as `var`. The data type can be float16 or float32.
355
+ - **v** (Parameter) - The 2nd moment vector in the updating formula,
356
+ it should have the same shape as `m`.
357
+ - **max_v** (Parameter) - The 2nd moment vector in the updating formula,
358
+ it should have the same shape as `m`.
359
+ - **gradient** (Tensor) - Gradient, has the same shape as `var`
360
+ - **step** (float, int) - step
361
+ - **lr** (float) - :math:`lr` in the updating formula. The paper suggested value is :math:`10^{-8}`,
362
+ the data type should be float32.
363
+ - **beta1** (float) - The exponential decay rate for the 1st moment estimations,
364
+ the data type should be float32. The paper suggested value is :math:`0.9`
365
+ - **beta2** (float) - The exponential decay rate for the 2nd moment estimations,
366
+ the data type should be float32. The paper suggested value is :math:`0.999`
367
+ - **decay** (float) - weight decay (L2 penalty), must be a scalar tensor with float32 data type.
368
+ - **eps** (float) - Term added to the denominator to improve numerical stability,
369
+ the data type should be float32.
370
+ - **amsgrad** (bool) - whether to use the AMSGrad algorithm. Default: ``False``.
371
+ - **maximize** (bool) - maximize the params based on the objective, instead of minimizing.
372
+ Default: ``False``.
373
+ .
374
+
375
+ Outputs:
376
+ Tuple of 3 Tensor, the updated parameters.
377
+
378
+ - **var** (Tensor) - The same shape and data type as `var`.
379
+ - **m** (Tensor) - The same shape and data type as `m`.
380
+ - **v** (Tensor) - The same shape and data type as `v`.
381
+
382
+ Supported Platforms:
383
+ ``Ascend``
384
+ """
385
+ __mindspore_signature__ = (
386
+ sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
387
+ sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1),
388
+ sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1),
389
+ sig.make_sig('max_v', dtype=sig.sig_dtype.T1),
390
+ sig.make_sig('gradient', dtype=sig.sig_dtype.T),
391
+ sig.make_sig('step', dtype=sig.sig_dtype.T2),
392
+ sig.make_sig('lr', dtype=sig.sig_dtype.T3),
393
+ sig.make_sig('beta1', dtype=sig.sig_dtype.T3),
394
+ sig.make_sig('beta2', dtype=sig.sig_dtype.T3),
395
+ sig.make_sig('decay', dtype=sig.sig_dtype.T3),
396
+ sig.make_sig('eps', dtype=sig.sig_dtype.T3),
397
+ sig.make_sig('amsgrad', dtype=sig.sig_dtype.T4, default=False),
398
+ sig.make_sig('maximize', dtype=sig.sig_dtype.T5, default=False),
399
+ )
400
+
401
+ @prim_arg_register
402
+ def __init__(self):
403
+ self.add_prim_attr("side_effect_mem", True)
404
+
405
+ def __call__(self, var, m, v, max_v, gradient, step, lr, beta1, beta2, decay, eps, amsgrad=False, maximize=False):
406
+ return _convert_stub(pyboost_adam_weight_decay_ext(self, [var, m, v, max_v, gradient, step, lr, beta1, beta2, decay, eps, amsgrad, maximize]))
407
+
408
+ adam_weight_decay_ext_op=AdamWeightDecayExt()
409
+
410
+
353
411
  class AdamWeightDecay(Primitive):
354
412
  r"""
355
413
  Updates gradients by the Adaptive Moment Estimation algorithm with weight decay (AdamWeightDecay).
@@ -466,106 +524,6 @@ class AdamWeightDecay(Primitive):
466
524
  return super().__call__(var, m, v, lr, beta1, beta2, epsilon, decay, gradient, self.use_locking)
467
525
 
468
526
 
469
- class AdamW(Primitive):
470
- r"""
471
- Implements Adam Weight Decay algorithm.
472
-
473
- .. math::
474
- \begin{aligned}
475
- &\textbf{input} : \gamma \text{(lr)}, \: \beta_1, \beta_2
476
- \text{(betas)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)},
477
- \: \epsilon \text{ (epsilon)} \\
478
- &\hspace{13mm} \lambda \text{(weight decay)}, \: \textit{amsgrad},
479
- \: \textit{maximize} \\
480
- &\textbf{initialize} : m_0 \leftarrow 0 \text{ (first moment)}, v_0 \leftarrow 0
481
- \text{ ( second moment)}, \: \widehat{v_0}^{max}\leftarrow 0 \\[-1.ex]
482
- &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
483
- &\hspace{5mm}\textbf{if} \: \textit{maximize}: \\
484
- &\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\
485
- &\hspace{5mm}\textbf{else} \\
486
- &\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
487
- &\hspace{5mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1} \\
488
- &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\
489
- &\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\
490
- &\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\
491
- &\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\
492
- &\hspace{5mm}\textbf{if} \: amsgrad \\
493
- &\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max},
494
- \widehat{v_t}) \\
495
- &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
496
- \big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big) \\
497
- &\hspace{5mm}\textbf{else} \\
498
- &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
499
- \big(\sqrt{\widehat{v_t}} + \epsilon \big) \\
500
- &\bf{return} \: \theta_t \\[-1.ex]
501
- \end{aligned}
502
-
503
- .. warning::
504
- This is an experimental optimizer API that is subject to change.
505
- This module must be used with lr scheduler module in `LRScheduler Class
506
- <https://www.mindspore.cn/docs/en/master/api_python/mindspore.experimental.html#lrscheduler-class>`_ .
507
-
508
- Inputs:
509
- - **var** (Parameter) - Weights to be updated. The shape is :math:`(N, *)` where :math:`*` means,
510
- any number of additional dimensions. The data type can be float16 or float32.
511
- - **m** (Parameter) - The 1st moment vector in the updating formula,
512
- it should have the the shape as `var`. The data type can be float16 or float32.
513
- - **v** (Parameter) - The 2nd moment vector in the updating formula,
514
- it should have the same shape as `m`.
515
- - **max_v** (Parameter) - The 2nd moment vector in the updating formula,
516
- it should have the same shape as `m`.
517
- - **gradient** (Tensor) - Gradient, has the same shape as `var`
518
- - **step** (Tensor) - step
519
- - **lr** (float) - :math:`lr` in the updating formula. The paper suggested value is :math:`10^{-8}`,
520
- the data type should be float.
521
- - **beta1** (float) - The exponential decay rate for the 1st moment estimations,
522
- the data type should be float. The paper suggested value is :math:`0.9`
523
- - **beta2** (float) - The exponential decay rate for the 2nd moment estimations,
524
- the data type should be float. The paper suggested value is :math:`0.999`
525
- - **decay** (float) - weight decay (L2 penalty), must be a scalar tensor with float data type.
526
- - **eps** (float) - Term added to the denominator to improve numerical stability,
527
- the data type should be float.
528
- - **amsgrad** (bool) - whether to use the AMSGrad algorithm. Default: ``False``.
529
- - **maximize** (bool) - maximize the params based on the objective, instead of minimizing.
530
- Default: ``False``.
531
- .
532
-
533
- Outputs:
534
- Tuple of 3 Tensor, the updated parameters.
535
-
536
- - **var** (Tensor) - The same shape and data type as `var`.
537
- - **m** (Tensor) - The same shape and data type as `m`.
538
- - **v** (Tensor) - The same shape and data type as `v`.
539
-
540
- Supported Platforms:
541
- ``Ascend``
542
- """
543
- __mindspore_signature__ = (
544
- sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
545
- sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1),
546
- sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1),
547
- sig.make_sig('max_v', dtype=sig.sig_dtype.T1),
548
- sig.make_sig('gradient', dtype=sig.sig_dtype.T),
549
- sig.make_sig('step', dtype=sig.sig_dtype.T2),
550
- sig.make_sig('lr', dtype=sig.sig_dtype.T3),
551
- sig.make_sig('beta1', dtype=sig.sig_dtype.T3),
552
- sig.make_sig('beta2', dtype=sig.sig_dtype.T3),
553
- sig.make_sig('decay', dtype=sig.sig_dtype.T3),
554
- sig.make_sig('eps', dtype=sig.sig_dtype.T3),
555
- sig.make_sig('amsgrad', dtype=sig.sig_dtype.T4, default=False),
556
- sig.make_sig('maximize', dtype=sig.sig_dtype.T5, default=False),
557
- )
558
-
559
- @prim_arg_register
560
- def __init__(self):
561
- self.add_prim_attr("side_effect_mem", True)
562
-
563
- def __call__(self, var, m, v, max_v, gradient, step, lr, beta1, beta2, decay, eps, amsgrad=False, maximize=False):
564
- return _convert_stub(pyboost_adamw(self, [var, m, v, max_v, gradient, step, lr, beta1, beta2, decay, eps, amsgrad, maximize]))
565
-
566
- adamw_op=AdamW()
567
-
568
-
569
527
  class AddExt(Primitive):
570
528
  r"""
571
529
  .. code-block::
@@ -619,7 +577,8 @@ class Add(Primitive):
619
577
  pass
620
578
 
621
579
  def __call__(self, input, other):
622
- return _convert_stub(pyboost_add(self, [input, other]))
580
+ return super().__call__(input, other)
581
+
623
582
 
624
583
  add_op=Add()
625
584
 
@@ -728,31 +687,6 @@ class Addcmul(Primitive):
728
687
  addcmul_op=Addcmul()
729
688
 
730
689
 
731
- class Addmm(Primitive):
732
- r"""
733
- .. code-block::
734
-
735
- prim = ops.Addmm()
736
- out = prim(input, mat1, mat2, beta, alpha)
737
-
738
- is equivalent to
739
-
740
- .. code-block::
741
-
742
- ops.addmm(input, mat1, mat2, beta, alpha)
743
-
744
- Refer to :func:`mindspore.ops.addmm` for more details.
745
- """
746
- @prim_arg_register
747
- def __init__(self):
748
- pass
749
-
750
- def __call__(self, input, mat1, mat2, beta, alpha):
751
- return _convert_stub(pyboost_addmm(self, [input, mat1, mat2, beta, alpha]))
752
-
753
- addmm_op=Addmm()
754
-
755
-
756
690
  class AddN(Primitive):
757
691
  r"""
758
692
  .. code-block::
@@ -1108,56 +1042,16 @@ class ApplyRotaryPosEmb(Primitive):
1108
1042
 
1109
1043
  class Arange(Primitive):
1110
1044
  r"""
1111
- Creates a sequence of numbers that begins at `start` and extends by increments of
1112
- `step` up to but not including `end`.
1113
-
1114
- Inputs:
1115
- start (number): The first number in the sequence.
1116
- Must have type: int32 ,int64, float32, or float64.
1117
- end (number): Upper end of the sequence, exclusive.
1118
- Must have type: int32 ,int64, float32, or float64.
1119
- step (number): Number that increments `start`.
1120
- Must have type: int32 ,int64, float32, or float64.
1121
- dtype (mindspore.dtype, optional): Specified dtype of the result tensor. Default: ``None`` .
1122
- Supported values are: int32, int64, float32, float64, and bfloat16.
1123
-
1124
- Outputs:
1125
- A 1-D Tensor with the required dtype. When dtype is ``None``, then:
1126
- If `start`, `end` and `step` are all integers, the Tensor dtype is int64.
1127
- If at least one of `start`, `end` and `step` is floating-point numbers, the Tensor dtype is float32.
1128
-
1129
- Raises:
1130
- TypeError: If the datatype of `start`, `end` or `step` is not supported.
1131
- ValueError: If `step` = 0.
1132
- ValueError: If `start` >= `end` when `step` > 0.
1133
- ValueError: If `start` <= `end` when `step` < 0.
1134
-
1135
- Supported Platforms:
1136
- `Ascend`
1137
-
1138
- Examples:
1139
- >>> from mindspore import ops
1140
- >>> start = 0
1141
- >>> end = 10
1142
- >>> step = 4
1143
- >>> net = ops.Arange()
1144
- >>> output = net(start, end, step)
1145
- >>> print(output)
1146
- [0 4 8]
1045
+
1147
1046
  """
1148
- __mindspore_signature__ = (
1149
- sig.make_sig('start', dtype=sig.sig_dtype.T),
1150
- sig.make_sig('end', dtype=sig.sig_dtype.T),
1151
- sig.make_sig('step', dtype=sig.sig_dtype.T),
1152
- sig.make_sig('dtype', dtype=sig.sig_dtype.T1, default=None),
1153
- )
1047
+ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T, sig.sig_dtype.T)
1154
1048
 
1155
1049
  @prim_arg_register
1156
1050
  def __init__(self):
1157
1051
  pass
1158
1052
 
1159
- def __call__(self, start, end, step, dtype=None):
1160
- return _convert_stub(pyboost_arange(self, [start, end, step, dtype if dtype is None else dtype_to_type_id('Arange', 'dtype', dtype)]))
1053
+ def __call__(self, start, end, step):
1054
+ return _convert_stub(pyboost_arange(self, [start, end, step]))
1161
1055
 
1162
1056
  arange_op=Arange()
1163
1057
 
@@ -1173,9 +1067,9 @@ class ArgMaxExt(Primitive):
1173
1067
 
1174
1068
  .. code-block::
1175
1069
 
1176
- ops.argmax_ext(input, dim, keepdim)
1070
+ ops.argmax(input, dim, keepdim)
1177
1071
 
1178
- Refer to :func:`mindspore.ops.argmax_ext` for more details.
1072
+ Refer to :func:`mindspore.ops.argmax` for more details.
1179
1073
  """
1180
1074
  __mindspore_signature__ = (
1181
1075
  sig.make_sig('input'),
@@ -1543,33 +1437,6 @@ class Assign(Primitive):
1543
1437
  assign_op=Assign()
1544
1438
 
1545
1439
 
1546
- class Atan2Ext(Primitive):
1547
- r"""
1548
- .. code-block::
1549
-
1550
- prim = ops.Atan2Ext()
1551
- out = prim(input, other)
1552
-
1553
- is equivalent to
1554
-
1555
- .. code-block::
1556
-
1557
- ops.atan2_ext(input, other)
1558
-
1559
- Refer to :func:`mindspore.ops.atan2_ext` for more details.
1560
- """
1561
- __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
1562
-
1563
- @prim_arg_register
1564
- def __init__(self):
1565
- pass
1566
-
1567
- def __call__(self, input, other):
1568
- return _convert_stub(pyboost_atan2_ext(self, [input, other]))
1569
-
1570
- atan2_ext_op=Atan2Ext()
1571
-
1572
-
1573
1440
  class Atan2(Primitive):
1574
1441
  r"""
1575
1442
  .. code-block::
@@ -1929,74 +1796,18 @@ class BatchMatMul(Primitive):
1929
1796
 
1930
1797
  class BatchNormExt(Primitive):
1931
1798
  r"""
1932
- Batch Normalization for input data and updated parameters.
1933
-
1934
- Batch Normalization is widely used in convolutional neural networks. This operation
1935
- applies Batch Normalization over inputs to avoid internal covariate shift as described
1936
- in the paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal
1937
- Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the
1938
- features using a mini-batch of data and the learned parameters can be described
1939
- in the following formula,
1940
-
1941
- .. math::
1942
-
1943
- y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
1944
-
1945
- where :math:`\gamma` is weight, :math:`\beta` is bias, :math:`\epsilon` is epsilon,
1946
- :math:`mean` is the mean of :math:`x`,
1947
- :math:`variance` is the variance of :math:`x`.
1948
-
1949
- Args:
1950
- input (Tensor): Tensor of shape :math:`(N, C, *)`, where :math:`*` means, any number of additional
1951
- dimensions. with bfloat16, float16 or float32 data type. For Atlas training products, the shape must be
1952
- 2-4 dimensions currently.
1953
- weight (Tensor): Tensor of shape :math:`(C,)`, with bfloat16, float16 or float32 data type.
1954
- bias (Tensor): Tensor of shape :math:`(C,)`, with bfloat16, float16 or float32 data type.
1955
- running_mean (Tensor): Tensor of shape :math:`(C,)`, with bfloat16, float16 or float32 data type.
1956
- running_var (Tensor): Tensor of shape :math:`(C,)`, with bfloat16, float16 or float32 data type.
1957
- training (bool, optional): If `training` is ``True`` , `mean` and `variance` are computed during
1958
- training. If `training` is ``False`` , they're loaded from checkpoint during inference. Default: ``False`` .
1959
- momentum (float, optional): The hyper parameter to compute moving average for running_mean and
1960
- running_var (e.g. :math:`new\_running\_mean = (1 - momentum) * running\_mean + momentum * current\_mean`).
1961
- Default: ``0.1``
1962
- epsilon (float, optional): A small value added for numerical stability. Default: ``1e-5``.
1963
-
1964
- returns:
1965
- Tensor, the normalized inputs, has the same shape and dtype as `input`.
1966
-
1967
- Raises:
1968
- TypeError: If `training` is not a bool.
1969
- TypeError: If dtype of `epsilon` or `momentum` is not float.
1970
- TypeError: If `input`, `weight`, `bias`, `running_mean` or `running_var` is not a Tensor.
1971
- TypeError: If dtype of `input`, `weight` is not bfloat16, float16 or float32.
1972
-
1973
- Supported Platforms:
1974
- ``Ascend``
1975
-
1976
- Examples:
1977
- >>> import mindspore
1978
- >>> import numpy as np
1979
- >>> from mindspore import Tensor, ops
1980
- >>> input_x = Tensor(np.ones([2, 2]), mindspore.float32)
1981
- >>> weight = Tensor(np.ones([2]), mindspore.float32)
1982
- >>> bias = Tensor(np.ones([2]), mindspore.float32)
1983
- >>> running_mean = Tensor(np.ones([2]), mindspore.float32)
1984
- >>> running_var = Tensor(np.ones([2]), mindspore.float32)
1985
- >>> output = ops.batch_norm_ext(input_x, weight, bias, running_mean, running_var)
1986
- >>> print(output)
1987
- [[1. 1.]
1988
- [1. 1.]]
1989
- """
1990
- __mindspore_signature__ = (
1991
- sig.make_sig('input'),
1992
- sig.make_sig('weight'),
1993
- sig.make_sig('bias'),
1994
- sig.make_sig('running_mean'),
1995
- sig.make_sig('runnning_var'),
1996
- sig.make_sig('training', default=False),
1997
- sig.make_sig('momentum', default=0.1),
1998
- sig.make_sig('epsilon', default=1e-5),
1999
- )
1799
+
1800
+ """
1801
+ __mindspore_signature__ = (
1802
+ sig.make_sig('input'),
1803
+ sig.make_sig('weight'),
1804
+ sig.make_sig('bias'),
1805
+ sig.make_sig('running_mean'),
1806
+ sig.make_sig('runnning_var'),
1807
+ sig.make_sig('training', default=False),
1808
+ sig.make_sig('momentum', default=0.1),
1809
+ sig.make_sig('epsilon', default=1e-5),
1810
+ )
2000
1811
 
2001
1812
  @prim_arg_register
2002
1813
  def __init__(self):
@@ -2176,239 +1987,6 @@ class BiasAdd(Primitive):
2176
1987
  return super().__call__(input_x, bias, self.data_format)
2177
1988
 
2178
1989
 
2179
- class BinaryCrossEntropyGrad(Primitive):
2180
- r"""
2181
-
2182
- """
2183
- __mindspore_signature__ = (
2184
- sig.make_sig('input'),
2185
- sig.make_sig('target'),
2186
- sig.make_sig('grad_output'),
2187
- sig.make_sig('weight', default=None),
2188
- )
2189
-
2190
- @prim_arg_register
2191
- def __init__(self, reduction='mean'):
2192
- self._set_prim_arg_with_handler("reduction", reduction, str_to_enum)
2193
-
2194
- def __call__(self, input, target, grad_output, weight=None):
2195
- return _convert_stub(pyboost_binary_cross_entropy_grad(self, [input, target, grad_output, weight, self.reduction]))
2196
-
2197
- class BinaryCrossEntropy(Primitive):
2198
- r"""
2199
- Computes the binary cross entropy between the logits and the labels.
2200
-
2201
- Sets logits as :math:`x`, labels as :math:`y`, output as :math:`\ell(x, y)`.
2202
- Let,
2203
-
2204
- .. math::
2205
- L = \{l_1,\dots,l_N\}^\top, \quad
2206
- l_n = - w_n \left[ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right]
2207
-
2208
- In which, :math:`L` indicates the loss of all batch_sizes, :math:`l` indicates the loss of one batch_size,
2209
- and n indicates one batch_size in the 1-N range, :math:`w_n` indicates the
2210
- weight of :math:`n`-th batch of binary cross entropy. Then,
2211
-
2212
- .. math::
2213
- \ell(x, y) = \begin{cases}
2214
- L, & \text{if reduction} = \text{'none';}\\
2215
- \operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\
2216
- \operatorname{sum}(L), & \text{if reduction} = \text{'sum'.}
2217
- \end{cases}
2218
-
2219
- .. warning::
2220
- - The value of :math:`x` must range from 0 to 1.
2221
-
2222
- Args:
2223
- reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
2224
- ``'sum'`` . Default: ``'mean'`` .
2225
-
2226
- - ``'none'``: no reduction will be applied.
2227
- - ``'mean'``: compute and return the weighted mean of elements in the output.
2228
- - ``'sum'``: the output elements will be summed.
2229
-
2230
- Inputs:
2231
- - **logits** (Tensor) - The predictive value whose data type must be float16 or float32,
2232
- The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
2233
- - **labels** (Tensor) - The target value which has the same shape and data type as `logits`. And the data type is float16 or float32.
2234
- - **weight** (Tensor, optional) - A rescaling weight applied to the loss of each batch element.
2235
- And it must have the same shape and data type as `logits`. Default: ``None`` .
2236
-
2237
- Outputs:
2238
- Tensor or Scalar. Returns Tensor that has the same dtype and shape as `logits` if `reduction` is 'none'.
2239
- Otherwise, returns a scalar Tensor.
2240
-
2241
- Raises:
2242
- TypeError: If dtype of `logits`, `labels` or `weight` (if given) is neither float16 nor float32.
2243
- ValueError: If `reduction` is not one of ``'none'``, ``'mean'`` or ``'sum'``.
2244
- ValueError: If shape of `labels` is not the same as `logits` or `weight` (if given).
2245
- TypeError: If `logits`, `labels` or `weight` is not a Tensor.
2246
-
2247
- Supported Platforms:
2248
- ``Ascend`` ``GPU`` ``CPU``
2249
-
2250
- Examples:
2251
- >>> import mindspore
2252
- >>> import numpy as np
2253
- >>> from mindspore import Tensor, nn, ops
2254
- >>> class Net(nn.Cell):
2255
- ... def __init__(self):
2256
- ... super(Net, self).__init__()
2257
- ... self.binary_cross_entropy = ops.BinaryCrossEntropy()
2258
- ... def construct(self, logits, labels, weight):
2259
- ... result = self.binary_cross_entropy(logits, labels, weight)
2260
- ... return result
2261
- ...
2262
- >>> net = Net()
2263
- >>> logits = Tensor(np.array([0.2, 0.7, 0.1]), mindspore.float32)
2264
- >>> labels = Tensor(np.array([0., 1., 0.]), mindspore.float32)
2265
- >>> weight = Tensor(np.array([1, 2, 2]), mindspore.float32)
2266
- >>> output = net(logits, labels, weight)
2267
- >>> print(output)
2268
- 0.38240486
2269
- """
2270
- __mindspore_signature__ = (
2271
- sig.make_sig('input'),
2272
- sig.make_sig('target'),
2273
- sig.make_sig('weight', default=None),
2274
- )
2275
-
2276
- @prim_arg_register
2277
- def __init__(self, reduction='mean'):
2278
- self._set_prim_arg_with_handler("reduction", reduction, str_to_enum)
2279
-
2280
- def __call__(self, input, target, weight=None):
2281
- return _convert_stub(pyboost_binary_cross_entropy(self, [input, target, weight, self.reduction]))
2282
-
2283
- class BinaryCrossEntropyWithLogitsBackward(Primitive):
2284
- r"""
2285
-
2286
- """
2287
- __mindspore_signature__ = (
2288
- sig.make_sig('grad_output'),
2289
- sig.make_sig('input'),
2290
- sig.make_sig('target'),
2291
- sig.make_sig('weight', default=None),
2292
- sig.make_sig('posWeight', default=None),
2293
- sig.make_sig('reduction', default='mean'),
2294
- )
2295
-
2296
- @prim_arg_register
2297
- def __init__(self):
2298
- pass
2299
-
2300
- def __call__(self, grad_output, input, target, weight=None, posWeight=None, reduction='mean'):
2301
- return _convert_stub(pyboost_binary_cross_entropy_with_logits_backward(self, [grad_output, input, target, weight, posWeight, str_to_enum('BinaryCrossEntropyWithLogitsBackward', 'reduction', reduction)]))
2302
-
2303
- binary_cross_entropy_with_logits_backward_op=BinaryCrossEntropyWithLogitsBackward()
2304
-
2305
-
2306
- class BCEWithLogitsLoss(Primitive):
2307
- r"""
2308
- Adds sigmoid activation function to `input` as logits, and uses the given logits to compute binary cross entropy
2309
- between the logits and the target.
2310
-
2311
- Sets input `input` as :math:`X`, input `target` as :math:`Y`, input weight as :math:`W`, output as :math:`L`.
2312
- Then,
2313
-
2314
- .. math::
2315
-
2316
- \begin{array}{ll} \\
2317
- p_{ij} = sigmoid(X_{ij}) = \frac{1}{1 + e^{-X_{ij}}} \\
2318
- L_{ij} = -[Y_{ij}log(p_{ij}) + (1 - Y_{ij})log(1 - p_{ij})]
2319
- \end{array}
2320
-
2321
- :math:`i` indicates the :math:`i^{th}` sample, :math:`j` indicates the category. Then,
2322
-
2323
- .. math::
2324
- \ell(x, y) = \begin{cases}
2325
- L, & \text{if reduction} = \text{'none';}\\
2326
- \operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\
2327
- \operatorname{sum}(L), & \text{if reduction} = \text{'sum'.}
2328
- \end{cases}
2329
-
2330
- :math:`\ell` indicates the method of calculating the loss. There are three methods:
2331
- the first method is to provide the loss value directly,
2332
- the second method is to calculate the average value of all losses,
2333
- and the third method is to calculate the sum of all losses.
2334
-
2335
- This operator will multiply the output by the corresponding weight.
2336
- The tensor `weight` assigns different weights to each piece of data in the batch,
2337
- and the tensor `pos_weight` adds corresponding weights to the positive examples of each category.
2338
-
2339
- In addition, it can trade off recall and precision by adding weights to positive examples.
2340
- In the case of multi-label classification the loss can be described as:
2341
-
2342
- .. math::
2343
- \begin{array}{ll} \\
2344
- p_{ij,c} = sigmoid(X_{ij,c}) = \frac{1}{1 + e^{-X_{ij,c}}} \\
2345
- L_{ij,c} = -[P_{c}Y_{ij,c} * log(p_{ij,c}) + (1 - Y_{ij,c})log(1 - p_{ij,c})]
2346
- \end{array}
2347
-
2348
- where c is the class number (c>1 for multi-label binary classification, c=1 for single-label binary classification),
2349
- n is the number of the sample in the batch and :math:`P_c` is the weight of the positive answer for the class c.
2350
- :math:`P_c>1` increases the recall, :math:`P_c<1` increases the precision.
2351
-
2352
- Args:
2353
- reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
2354
- ``'sum'`` . Default: ``'mean'`` .
2355
-
2356
- - ``'none'``: no reduction will be applied.
2357
- - ``'mean'``: compute and return the weighted mean of elements in the output.
2358
- - ``'sum'``: the output elements will be summed.
2359
-
2360
- Inputs:
2361
- - **input** (Tensor) - Input `input`. Data type must be float16, float32 or bfloat16(only Atlas A2 series products are supported).
2362
- Tensor of shape :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
2363
- - **target** (Tensor) - Ground truth label, has the same shape as `input`.
2364
- Data type must be float16, float32 or bfloat16(only Atlas A2 series products are supported).
2365
- - **weight** (Tensor) - A rescaling weight applied to the loss of each batch element. It can be
2366
- broadcast to a tensor with shape of `input`. Data type must be float16, float32 or bfloat16(only Atlas A2 series products are supported).
2367
- - **pos_weight** (Tensor) - A weight of positive examples. Must be a vector with length equal to the
2368
- number of classes. It can be broadcast to a tensor with shape of `input`.
2369
- Data type must be float16, float32 or bfloat16(only Atlas A2 series products are supported).
2370
-
2371
- Outputs:
2372
- Tensor or Scalar, if `reduction` is ``'none'``, it's a tensor with the same shape and type as input `input`.
2373
- Otherwise, the output is a scalar.
2374
-
2375
- Raises:
2376
- TypeError: If any input is not Tensor.
2377
- TypeError: If data type of any input is not float16, float32 or bfloat16.
2378
- TypeError: If data type of `reduction` is not string.
2379
- ValueError: If `weight` or `pos_weight` can not be broadcast to a tensor with shape of `input`.
2380
- ValueError: If `reduction` is not one of ``'none'``, ``'mean'`` or ``'sum'``.
2381
-
2382
- Supported Platforms:
2383
- ``Ascend`` ``GPU`` ``CPU``
2384
-
2385
- Examples:
2386
- >>> import mindspore
2387
- >>> import numpy as np
2388
- >>> from mindspore import Tensor, ops
2389
- >>> input = Tensor(np.array([[-0.8, 1.2, 0.7], [-0.1, -0.4, 0.7]]), mindspore.float32)
2390
- >>> target = Tensor(np.array([[0.3, 0.8, 1.2], [-0.6, 0.1, 2.2]]), mindspore.float32)
2391
- >>> weight = Tensor(np.array([1.0, 1.0, 1.0]), mindspore.float32)
2392
- >>> pos_weight = Tensor(np.array([1.0, 1.0, 1.0]), mindspore.float32)
2393
- >>> loss = ops.BCEWithLogitsLoss()
2394
- >>> output = loss(input, target, weight, pos_weight)
2395
- >>> print(output)
2396
- 0.3463612
2397
- """
2398
- __mindspore_signature__ = (
2399
- sig.make_sig('input'),
2400
- sig.make_sig('target'),
2401
- sig.make_sig('weight', default=None),
2402
- sig.make_sig('posWeight', default=None),
2403
- )
2404
-
2405
- @prim_arg_register
2406
- def __init__(self, reduction='mean'):
2407
- self._set_prim_arg_with_handler("reduction", reduction, str_to_enum)
2408
-
2409
- def __call__(self, input, target, weight=None, posWeight=None):
2410
- return _convert_stub(pyboost_binary_cross_entropy_with_logits(self, [input, target, weight, posWeight, self.reduction]))
2411
-
2412
1990
  class BatchMatMulExt(Primitive):
2413
1991
  r"""
2414
1992
  .. code-block::
@@ -2756,62 +2334,6 @@ class ClampTensor(Primitive):
2756
2334
  clamp_tensor_op=ClampTensor()
2757
2335
 
2758
2336
 
2759
- class Col2ImExt(Primitive):
2760
- r"""
2761
- .. code-block::
2762
-
2763
- prim = ops.Col2ImExt()
2764
- out = prim(input, output_size, kernel_size, dilation, padding, stride)
2765
-
2766
- is equivalent to
2767
-
2768
- .. code-block::
2769
-
2770
- ops.fold_ext(input, output_size, kernel_size, dilation, padding, stride)
2771
-
2772
- Refer to :func:`mindspore.ops.fold_ext` for more details.
2773
- """
2774
- __mindspore_signature__ = (
2775
- sig.make_sig('input'),
2776
- sig.make_sig('output_size'),
2777
- sig.make_sig('kernel_size'),
2778
- sig.make_sig('dilation', default=1),
2779
- sig.make_sig('padding', default=0),
2780
- sig.make_sig('stride', default=1),
2781
- )
2782
-
2783
- @prim_arg_register
2784
- def __init__(self):
2785
- pass
2786
-
2787
- def __call__(self, input, output_size, kernel_size, dilation=1, padding=0, stride=1):
2788
- return _convert_stub(pyboost_col2im_ext(self, [input, to_pair('Col2ImExt', 'output_size', output_size), to_pair('Col2ImExt', 'kernel_size', kernel_size), to_pair('Col2ImExt', 'dilation', dilation), to_pair('Col2ImExt', 'padding', padding), to_pair('Col2ImExt', 'stride', stride)]))
2789
-
2790
- col2im_ext_op=Col2ImExt()
2791
-
2792
-
2793
- class Col2ImGrad(Primitive):
2794
- r"""
2795
-
2796
- """
2797
- __mindspore_signature__ = (
2798
- sig.make_sig('input'),
2799
- sig.make_sig('kernel_size'),
2800
- sig.make_sig('dilation', default=1),
2801
- sig.make_sig('padding', default=0),
2802
- sig.make_sig('stride', default=1),
2803
- )
2804
-
2805
- @prim_arg_register
2806
- def __init__(self):
2807
- pass
2808
-
2809
- def __call__(self, input, kernel_size, dilation=1, padding=0, stride=1):
2810
- return _convert_stub(pyboost_col2im_grad(self, [input, to_pair('Col2ImGrad', 'kernel_size', kernel_size), to_pair('Col2ImGrad', 'dilation', dilation), to_pair('Col2ImGrad', 'padding', padding), to_pair('Col2ImGrad', 'stride', stride)]))
2811
-
2812
- col2im_grad_op=Col2ImGrad()
2813
-
2814
-
2815
2337
  class Complex(Primitive):
2816
2338
  r"""
2817
2339
  Returns a complex Tensor from the real part and the imag part.
@@ -3303,37 +2825,6 @@ class Cummin(Primitive):
3303
2825
  return super().__call__(input, self.axis)
3304
2826
 
3305
2827
 
3306
- class CumsumExt(Primitive):
3307
- r"""
3308
- .. code-block::
3309
-
3310
- prim = ops.CumsumExt()
3311
- out = prim(input, dim, dtype)
3312
-
3313
- is equivalent to
3314
-
3315
- .. code-block::
3316
-
3317
- ops.cumsum_ext(input, dim, dtype)
3318
-
3319
- Refer to :func:`mindspore.ops.cumsum_ext` for more details.
3320
- """
3321
- __mindspore_signature__ = (
3322
- sig.make_sig('input'),
3323
- sig.make_sig('dim'),
3324
- sig.make_sig('dtype', default=None),
3325
- )
3326
-
3327
- @prim_arg_register
3328
- def __init__(self):
3329
- pass
3330
-
3331
- def __call__(self, input, dim, dtype=None):
3332
- return _convert_stub(pyboost_cumsum_ext(self, [input, dim, dtype if dtype is None else dtype_to_type_id('CumsumExt', 'dtype', dtype)]))
3333
-
3334
- cumsum_ext_op=CumsumExt()
3335
-
3336
-
3337
2828
  class DCT(Primitive):
3338
2829
  r"""
3339
2830
 
@@ -3593,11 +3084,18 @@ class DropoutExt(Primitive):
3593
3084
  r"""
3594
3085
 
3595
3086
  """
3087
+ __mindspore_signature__ = (
3088
+ sig.make_sig('input'),
3089
+ sig.make_sig('p', default=0.5),
3090
+ sig.make_sig('seed', default=0),
3091
+ sig.make_sig('offset', default=0),
3092
+ )
3093
+
3596
3094
  @prim_arg_register
3597
3095
  def __init__(self):
3598
3096
  self.add_prim_attr("side_effect_hidden", True)
3599
3097
 
3600
- def __call__(self, input, p, seed, offset):
3098
+ def __call__(self, input, p=0.5, seed=0, offset=0):
3601
3099
  return _convert_stub(pyboost_dropout_ext(self, [input, p, seed, offset]))
3602
3100
 
3603
3101
  dropout_ext_op=DropoutExt()
@@ -3625,8 +3123,8 @@ class DropoutGradExt(Primitive):
3625
3123
  def __init__(self):
3626
3124
  pass
3627
3125
 
3628
- def __call__(self, input, mask, p):
3629
- return _convert_stub(pyboost_dropout_grad_ext(self, [input, mask, p]))
3126
+ def __call__(self, x, mask, p):
3127
+ return _convert_stub(pyboost_dropout_grad_ext(self, [x, mask, p]))
3630
3128
 
3631
3129
  dropout_grad_ext_op=DropoutGradExt()
3632
3130
 
@@ -3736,48 +3234,6 @@ class Eig(Primitive):
3736
3234
  return super().__call__(x, self.compute_v)
3737
3235
 
3738
3236
 
3739
- class EluExt(Primitive):
3740
- r"""
3741
- .. code-block::
3742
-
3743
- prim = ops.EluExt(alpha)
3744
- out = prim(input)
3745
-
3746
- is equivalent to
3747
-
3748
- .. code-block::
3749
-
3750
- ops.elu_ext(input, alpha)
3751
-
3752
- Refer to :func:`mindspore.ops.elu_ext` for more details.
3753
- """
3754
- @prim_arg_register
3755
- def __init__(self, alpha=1.0):
3756
- self._set_prim_arg("alpha", alpha)
3757
-
3758
- def __call__(self, input):
3759
- return _convert_stub(pyboost_elu_ext(self, [input, self.alpha]))
3760
-
3761
- class EluGradExt(Primitive):
3762
- r"""
3763
- Gradients of EluExt operation.
3764
- """
3765
- __mindspore_signature__ = (
3766
- sig.make_sig('dout'),
3767
- sig.make_sig('x'),
3768
- sig.make_sig('alpha', default=1.0),
3769
- )
3770
-
3771
- @prim_arg_register
3772
- def __init__(self):
3773
- pass
3774
-
3775
- def __call__(self, dout, x, alpha=1.0):
3776
- return _convert_stub(pyboost_elu_grad_ext(self, [dout, x, alpha]))
3777
-
3778
- elu_grad_ext_op=EluGradExt()
3779
-
3780
-
3781
3237
  class EluGrad(Primitive):
3782
3238
  r"""
3783
3239
  Gradients of Elu operation.
@@ -4152,7 +3608,8 @@ class Eye(Primitive):
4152
3608
  pass
4153
3609
 
4154
3610
  def __call__(self, n, m, dtype):
4155
- return _convert_stub(pyboost_eye(self, [n, m, dtype_to_type_id('Eye', 'dtype', dtype)]))
3611
+ return super().__call__(n, m, dtype_to_type_id('Eye', 'dtype', dtype))
3612
+
4156
3613
 
4157
3614
  eye_op=Eye()
4158
3615
 
@@ -4932,7 +4389,8 @@ class Floor(Primitive):
4932
4389
  pass
4933
4390
 
4934
4391
  def __call__(self, input):
4935
- return _convert_stub(pyboost_floor(self, [input]))
4392
+ return super().__call__(input)
4393
+
4936
4394
 
4937
4395
  floor_op=Floor()
4938
4396
 
@@ -5115,20 +4573,6 @@ class GeLU(Primitive):
5115
4573
  gelu_op=GeLU()
5116
4574
 
5117
4575
 
5118
- class Generator(Primitive):
5119
- r"""
5120
-
5121
- """
5122
- @prim_arg_register
5123
- def __init__(self):
5124
- self.add_prim_attr("side_effect_mem", True)
5125
-
5126
- def __call__(self, cmd, inputs):
5127
- return _convert_stub(pyboost_generator(self, [cmd, inputs]))
5128
-
5129
- generator_op=Generator()
5130
-
5131
-
5132
4576
  class Geqrf(Primitive):
5133
4577
  r"""
5134
4578
  .. code-block::
@@ -5537,11 +4981,11 @@ class GroupNorm(Primitive):
5537
4981
  [0. 0. 0. 0.]]]]
5538
4982
  """
5539
4983
  __mindspore_signature__ = (
5540
- sig.make_sig('input'),
5541
- sig.make_sig('num_groups'),
5542
- sig.make_sig('weight', default=None),
5543
- sig.make_sig('bias', default=None),
5544
- sig.make_sig('eps', default=1e-5),
4984
+ sig.make_sig('input', dtype=sig.sig_dtype.T),
4985
+ sig.make_sig('num_groups', dtype=sig.sig_dtype.T1),
4986
+ sig.make_sig('weight', dtype=sig.sig_dtype.T, default=None),
4987
+ sig.make_sig('bias', dtype=sig.sig_dtype.T, default=None),
4988
+ sig.make_sig('eps', dtype=sig.sig_dtype.T2, default=1e-5),
5545
4989
  )
5546
4990
 
5547
4991
  @prim_arg_register
@@ -5881,86 +5325,6 @@ class IFFTShift(Primitive):
5881
5325
  ifftshift_op=IFFTShift()
5882
5326
 
5883
5327
 
5884
- class Im2ColExt(Primitive):
5885
- r"""
5886
- .. code-block::
5887
-
5888
- prim = ops.Im2ColExt()
5889
- out = prim(input, kernel_size, dilation, padding, stride)
5890
-
5891
- is equivalent to
5892
-
5893
- .. code-block::
5894
-
5895
- ops.unfold_ext(input, kernel_size, dilation, padding, stride)
5896
-
5897
- Refer to :func:`mindspore.ops.unfold_ext` for more details.
5898
- """
5899
- __mindspore_signature__ = (
5900
- sig.make_sig('input'),
5901
- sig.make_sig('kernel_size'),
5902
- sig.make_sig('dilation', default=1),
5903
- sig.make_sig('padding', default=0),
5904
- sig.make_sig('stride', default=1),
5905
- )
5906
-
5907
- @prim_arg_register
5908
- def __init__(self):
5909
- pass
5910
-
5911
- def __call__(self, input, kernel_size, dilation=1, padding=0, stride=1):
5912
- return _convert_stub(pyboost_im2col_ext(self, [input, to_pair('Im2ColExt', 'kernel_size', kernel_size), to_pair('Im2ColExt', 'dilation', dilation), to_pair('Im2ColExt', 'padding', padding), to_pair('Im2ColExt', 'stride', stride)]))
5913
-
5914
- im2col_ext_op=Im2ColExt()
5915
-
5916
-
5917
- class IndexAddExt(Primitive):
5918
- r"""
5919
-
5920
- """
5921
- __mindspore_signature__ = (
5922
- sig.make_sig('input', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
5923
- sig.make_sig('index', dtype=sig.sig_dtype.T1),
5924
- sig.make_sig('source', dtype=sig.sig_dtype.T),
5925
- sig.make_sig('axis', dtype=sig.sig_dtype.T2),
5926
- sig.make_sig('alpha', dtype=sig.sig_dtype.T3, default=1),
5927
- )
5928
-
5929
- @prim_arg_register
5930
- def __init__(self):
5931
- pass
5932
-
5933
- def __call__(self, input, index, source, axis, alpha=1):
5934
- return _convert_stub(pyboost_index_add_ext(self, [input, index, source, axis, alpha]))
5935
-
5936
- index_add_ext_op=IndexAddExt()
5937
-
5938
-
5939
- class IndexSelect(Primitive):
5940
- r"""
5941
- .. code-block::
5942
-
5943
- prim = ops.IndexSelect()
5944
- out = prim(input, dim, index)
5945
-
5946
- is equivalent to
5947
-
5948
- .. code-block::
5949
-
5950
- ops.index_select_ext(input, dim, index)
5951
-
5952
- Refer to :func:`mindspore.ops.index_select_ext` for more details.
5953
- """
5954
- @prim_arg_register
5955
- def __init__(self):
5956
- pass
5957
-
5958
- def __call__(self, input, dim, index):
5959
- return _convert_stub(pyboost_index_select(self, [input, dim, index]))
5960
-
5961
- index_select_op=IndexSelect()
5962
-
5963
-
5964
5328
  class IRFFTGrad(Primitive):
5965
5329
  r"""
5966
5330
 
@@ -6017,52 +5381,6 @@ class IRFFT(Primitive):
6017
5381
  irfft_op=IRFFT()
6018
5382
 
6019
5383
 
6020
- class IsClose(Primitive):
6021
- r"""
6022
- Returns a tensor of Boolean values indicating whether each element of `input`
6023
- is "close" to the corresponding element of `other`. Closeness is defined as:
6024
-
6025
- .. math::
6026
- |input-other| <= atol + rtol * |other|
6027
-
6028
- Refer to :func:`mindspore.ops.isclose` for more details.
6029
-
6030
- Args:
6031
- rtol(float, optional): Relative tolerance. Default: ``1e-05`` .
6032
- atol(float, optional): Absolute tolerance. Default: ``1e-08`` .
6033
- equal_nan(bool, optional): If ``True`` , then two NaNs will be considered equal. Default: ``True`` .
6034
-
6035
- Inputs:
6036
- - **input** (Tensor) - First tensor to compare.
6037
- - **other** (Tensor) - Second tensor to compare.
6038
-
6039
- Outputs:
6040
- Tensor, with the same shape as `input` and `other` after broadcasting, its dtype is bool.
6041
-
6042
- Supported Platforms:
6043
- ``Ascend`` ``GPU`` ``CPU``
6044
-
6045
- Examples:
6046
- >>> import mindspore
6047
- >>> import numpy as np
6048
- >>> from mindspore import Tensor
6049
- >>> from mindspore.ops import IsClose
6050
- >>> input = Tensor(np.array([1.3, 2.1, 3.2, 4.1, 5.1]), mindspore.float16)
6051
- >>> other = Tensor(np.array([1.3, 3.3, 2.3, 3.1, 5.1]), mindspore.float16)
6052
- >>> isclose = IsClose()
6053
- >>> output = isclose(input, other)
6054
- >>> print(output)
6055
- [ True False False False True]
6056
- """
6057
- @prim_arg_register
6058
- def __init__(self, rtol=1e-05, atol=1e-08, equal_nan=True):
6059
- self._set_prim_arg("rtol", type_it('IsClose', 'rtol', rtol, (OpDtype.DT_BOOL, OpDtype.DT_INT), OpDtype.DT_FLOAT))
6060
- self._set_prim_arg("atol", type_it('IsClose', 'atol', atol, (OpDtype.DT_BOOL, OpDtype.DT_INT), OpDtype.DT_FLOAT))
6061
- self._set_prim_arg("equal_nan", equal_nan)
6062
-
6063
- def __call__(self, input, other):
6064
- return _convert_stub(pyboost_isclose(self, [input, other, self.rtol, self.atol, self.equal_nan]))
6065
-
6066
5384
  class IsFinite(Primitive):
6067
5385
  r"""
6068
5386
  .. code-block::
@@ -7159,31 +6477,6 @@ class MatrixExp(Primitive):
7159
6477
  matrix_exp_op=MatrixExp()
7160
6478
 
7161
6479
 
7162
- class MatrixInverseExt(Primitive):
7163
- r"""
7164
- .. code-block::
7165
-
7166
- prim = ops.MatrixInverseExt()
7167
- out = prim(input)
7168
-
7169
- is equivalent to
7170
-
7171
- .. code-block::
7172
-
7173
- ops.matrix_inverse_ext(input)
7174
-
7175
- Refer to :func:`mindspore.ops.matrix_inverse_ext` for more details.
7176
- """
7177
- @prim_arg_register
7178
- def __init__(self):
7179
- pass
7180
-
7181
- def __call__(self, input):
7182
- return _convert_stub(pyboost_matrix_inverse_ext(self, [input]))
7183
-
7184
- matrix_inverse_ext_op=MatrixInverseExt()
7185
-
7186
-
7187
6480
  class Max(Primitive):
7188
6481
  r"""
7189
6482
  .. code-block::
@@ -7543,6 +6836,42 @@ class Minimum(Primitive):
7543
6836
  minimum_op=Minimum()
7544
6837
 
7545
6838
 
6839
+ class MoeFinalizeRouting(Primitive):
6840
+ r"""
6841
+ .. code-block::
6842
+
6843
+ prim = ops.MoeFinalizeRouting()
6844
+ out = prim(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
6845
+
6846
+ is equivalent to
6847
+
6848
+ .. code-block::
6849
+
6850
+ ops.moe_finalize_routing(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
6851
+
6852
+ Refer to :func:`mindspore.ops.moe_finalize_routing` for more details.
6853
+ """
6854
+ __mindspore_signature__ = (
6855
+ sig.make_sig('expanded_x'),
6856
+ sig.make_sig('x1'),
6857
+ sig.make_sig('x2', default=None),
6858
+ sig.make_sig('bias', default=None),
6859
+ sig.make_sig('scales', default=None),
6860
+ sig.make_sig('expanded_row_idx', default=None),
6861
+ sig.make_sig('expanded_expert_idx', default=None),
6862
+ )
6863
+
6864
+ @prim_arg_register
6865
+ def __init__(self):
6866
+ pass
6867
+
6868
+ def __call__(self, expanded_x, x1, x2=None, bias=None, scales=None, expanded_row_idx=None, expanded_expert_idx=None):
6869
+ return super().__call__(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
6870
+
6871
+
6872
+ moe_finalize_routing_op=MoeFinalizeRouting()
6873
+
6874
+
7546
6875
  class Mul(Primitive):
7547
6876
  r"""
7548
6877
  .. code-block::
@@ -7798,54 +7127,28 @@ class NLLLoss(Primitive):
7798
7127
  return super().__call__(logits, labels, weight, self.reduction, self.ignore_index)
7799
7128
 
7800
7129
 
7801
- class NonZeroExt(Primitive):
7130
+ class NonZero(Primitive):
7802
7131
  r"""
7132
+ .. code-block::
7133
+
7134
+ prim = ops.NonZero()
7135
+ out = prim(input)
7136
+
7137
+ is equivalent to
7138
+
7139
+ .. code-block::
7803
7140
 
7141
+ ops.nonzero(input)
7142
+
7143
+ Refer to :func:`mindspore.ops.nonzero` for more details.
7804
7144
  """
7805
7145
  @prim_arg_register
7806
7146
  def __init__(self):
7807
7147
  pass
7808
7148
 
7809
7149
  def __call__(self, input):
7810
- return _convert_stub(pyboost_non_zero_ext(self, [input]))
7811
-
7812
- non_zero_ext_op=NonZeroExt()
7813
-
7814
-
7815
- class NonZero(Primitive):
7816
- r"""
7817
- Return a Tensor of the positions of all non-zero values.
7818
-
7819
- Inputs:
7820
- - **input** (Tensor) - The input Tensor, its rank should be greater than or eaqual to 1.
7821
-
7822
- Outputs:
7823
- Tensor, a 2-D Tensor whose data type is int64, containing the positions of all non-zero values of the input.
7824
-
7825
- Raises:
7826
- TypeError: If `input` is not Tensor.
7827
- ValueError: If dim of `input` equals to 0.
7828
-
7829
- Supported Platforms:
7830
- ``Ascend`` ``GPU`` ``CPU``
7831
-
7832
- Examples:
7833
- >>> import mindspore
7834
- >>> import numpy as np
7835
- >>> from mindspore import Tensor, ops
7836
- >>> input = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
7837
- >>> output = ops.NonZero()(input)
7838
- >>> print(output)
7839
- [[0]
7840
- [2]
7841
- [4]]
7842
- """
7843
- @prim_arg_register
7844
- def __init__(self):
7845
- pass
7150
+ return super().__call__(input)
7846
7151
 
7847
- def __call__(self, input):
7848
- return _convert_stub(pyboost_non_zero(self, [input]))
7849
7152
 
7850
7153
  non_zero_op=NonZero()
7851
7154
 
@@ -7872,60 +7175,32 @@ class Norm(Primitive):
7872
7175
  norm_op=Norm()
7873
7176
 
7874
7177
 
7875
- class NormalFloatFloat(Primitive):
7876
- r"""
7877
-
7878
- """
7879
- @prim_arg_register
7880
- def __init__(self):
7881
- pass
7882
-
7883
- def __call__(self, mean, std, size, seed, offset):
7884
- return _convert_stub(pyboost_normal_float_float(self, [mean, std, size, seed, offset]))
7885
-
7886
- normal_float_float_op=NormalFloatFloat()
7887
-
7888
-
7889
- class NormalFloatTensor(Primitive):
7890
- r"""
7891
-
7892
- """
7893
- @prim_arg_register
7894
- def __init__(self):
7895
- pass
7896
-
7897
- def __call__(self, mean, std, seed, offset):
7898
- return _convert_stub(pyboost_normal_float_tensor(self, [mean, std, seed, offset]))
7899
-
7900
- normal_float_tensor_op=NormalFloatTensor()
7901
-
7902
-
7903
- class NormalTensorFloat(Primitive):
7178
+ class NormalExt(Primitive):
7904
7179
  r"""
7905
-
7906
- """
7907
- @prim_arg_register
7908
- def __init__(self):
7909
- pass
7180
+ Generates random numbers according to the Normal random number distribution.
7910
7181
 
7911
- def __call__(self, mean, std, seed, offset):
7912
- return _convert_stub(pyboost_normal_tensor_float(self, [mean, std, seed, offset]))
7182
+ Inputs:
7183
+ - **mean** (Union[float, Tensor]) - The mean is a tensor with the mean of each output element's normal distribution.
7184
+ - **std** (Union[float, Tensor]) - The tensor of per-element standard deviations.
7185
+ - **generator** (Generator, optional) - Mindspore generator.
7913
7186
 
7914
- normal_tensor_float_op=NormalTensorFloat()
7187
+ Outputs:
7188
+ - **output** (Tensor) - With the same type and shape as the 'mean'.
7915
7189
 
7190
+ Raises:
7191
+ TypeError: If `mean` or `std` is not Union[float, Tensor].
7916
7192
 
7917
- class NormalTensorTensor(Primitive):
7918
- r"""
7919
-
7193
+ Supported Platforms:
7194
+ ``Ascend``
7920
7195
  """
7921
7196
  @prim_arg_register
7922
7197
  def __init__(self):
7923
7198
  pass
7924
7199
 
7925
7200
  def __call__(self, mean, std, seed, offset):
7926
- return _convert_stub(pyboost_normal_tensor_tensor(self, [mean, std, seed, offset]))
7201
+ return _convert_stub(pyboost_normal_ext(self, [mean, std, seed, offset]))
7927
7202
 
7928
- normal_tensor_tensor_op=NormalTensorTensor()
7203
+ normal_ext_op=NormalExt()
7929
7204
 
7930
7205
 
7931
7206
  class NotEqual(Primitive):
@@ -8155,9 +7430,9 @@ class OneHotExt(Primitive):
8155
7430
  >>> num_classes = 3
8156
7431
  >>> output = ops.extend.one_hot(tensor, num_classes)
8157
7432
  >>> print(output)
8158
- [[1 0 0]
8159
- [0 1 0]
8160
- [0 0 1]]
7433
+ [[1. 0. 0.]
7434
+ [0. 1. 0.]
7435
+ [0. 0. 1.]]
8161
7436
  """
8162
7437
  @prim_arg_register
8163
7438
  def __init__(self, axis=-1):
@@ -8426,38 +7701,98 @@ prelu_op=PReLU()
8426
7701
 
8427
7702
  class ProdExt(Primitive):
8428
7703
  r"""
8429
- .. code-block::
8430
-
8431
- prim = ops.ProdExt()
8432
- out = prim(input, axis, keep_dims, dtype)
8433
-
8434
- is equivalent to
8435
-
8436
- .. code-block::
8437
-
8438
- ops.prod_ext(input, axis, keep_dims, dtype)
8439
-
8440
- Refer to :func:`mindspore.ops.prod_ext` for more details.
8441
- """
8442
- __mindspore_signature__ = (
8443
- sig.make_sig('input'),
8444
- sig.make_sig('axis', default=None),
8445
- sig.make_sig('keep_dims', default=False),
8446
- sig.make_sig('dtype', default=None),
8447
- )
7704
+ Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can
7705
+ reduce a dimension of `input` along the `axis`. Determine whether the dimensions of the output and input are the
7706
+ same by controlling `keep_dims`.
8448
7707
 
8449
- @prim_arg_register
8450
- def __init__(self):
8451
- pass
7708
+ Args:
7709
+ input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
7710
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
7711
+ axis (int): The dimensions to reduce. Default: ``None`` , reduce all dimensions.
7712
+ Only constant value is allowed. Assume the rank of `input` is r, and the value range is [-r,r).
7713
+ keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
7714
+ If ``False`` , don't keep these dimensions. Default: ``False`` .
7715
+ dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
8452
7716
 
8453
- def __call__(self, input, axis=None, keep_dims=False, dtype=None):
8454
- return _convert_stub(pyboost_prod_ext(self, [input, axis, keep_dims, dtype if dtype is None else dtype_to_type_id('ProdExt', 'dtype', dtype)]))
7717
+ Returns:
7718
+ Tensor, has the same data type as input tensor.
8455
7719
 
8456
- prod_ext_op=ProdExt()
7720
+ - If `axis` is ``None`` , and `keep_dims` is ``False`` ,
7721
+ the output is a 0-D tensor representing the product of all elements in the input tensor.
7722
+ - If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
7723
+ the shape of output is :math:`(input_0, input_2, ..., input_R)`.
8457
7724
 
7725
+ Raises:
7726
+ TypeError: If `input` is not a Tensor.
7727
+ TypeError: If `axis` is not one of the following: int or None.
7728
+ TypeError: If `keep_dims` is not a bool.
7729
+ ValueError: If `axis` is out of range.
8458
7730
 
8459
- class PromptKVCache(Primitive):
8460
- r"""
7731
+ Supported Platforms:
7732
+ ``Ascend`` ``GPU`` ``CPU``
7733
+
7734
+ Examples:
7735
+ >>> import mindspore
7736
+ >>> import numpy as np
7737
+ >>> from mindspore import Tensor, ops
7738
+ >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
7739
+ >>> output = ops.ProdExt()(x, 1, keep_dims=True)
7740
+ >>> result = output.shape
7741
+ >>> print(result)
7742
+ (3, 1, 5, 6)
7743
+ >>> # case 1: Reduces a dimension by multiplying all elements in the dimension.
7744
+ >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
7745
+ ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
7746
+ ... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
7747
+ >>> output = ops.ProdExt()(x)
7748
+ >>> print(output)
7749
+ 2.2833798e+33
7750
+ >>> print(output.shape)
7751
+ ()
7752
+ >>> # case 2: Reduces a dimension along axis 0.
7753
+ >>> output = ops.ProdExt()(x, 0, True)
7754
+ >>> print(output)
7755
+ [[[ 28. 28. 28. 28. 28. 28.]
7756
+ [ 80. 80. 80. 80. 80. 80.]
7757
+ [162. 162. 162. 162. 162. 162.]]]
7758
+ >>> # case 3: Reduces a dimension along axis 1.
7759
+ >>> output = ops.ProdExt()(x, 1, True)
7760
+ >>> print(output)
7761
+ [[[ 6. 6. 6. 6. 6. 6.]]
7762
+ [[120. 120. 120. 120. 120. 120.]]
7763
+ [[504. 504. 504. 504. 504. 504.]]]
7764
+ >>> # case 4: Reduces a dimension along axis 2.
7765
+ >>> output = ops.ProdExt()(x, 2, True)
7766
+ >>> print(output)
7767
+ [[[1.00000e+00]
7768
+ [6.40000e+01]
7769
+ [7.29000e+02]]
7770
+ [[4.09600e+03]
7771
+ [1.56250e+04]
7772
+ [4.66560e+04]]
7773
+ [[1.17649e+05]
7774
+ [2.62144e+05]
7775
+ [5.31441e+05]]]
7776
+ """
7777
+ __mindspore_signature__ = (
7778
+ sig.make_sig('input'),
7779
+ sig.make_sig('axis', default=None),
7780
+ sig.make_sig('keep_dims', default=False),
7781
+ sig.make_sig('dtype', default=None),
7782
+ )
7783
+
7784
+ @prim_arg_register
7785
+ def __init__(self):
7786
+ pass
7787
+
7788
+ def __call__(self, input, axis=None, keep_dims=False, dtype=None):
7789
+ return _convert_stub(pyboost_prod_ext(self, [input, axis, keep_dims, dtype if dtype is None else dtype_to_type_id('ProdExt', 'dtype', dtype)]))
7790
+
7791
+ prod_ext_op=ProdExt()
7792
+
7793
+
7794
+ class PromptKVCache(Primitive):
7795
+ r"""
8461
7796
  .. code-block::
8462
7797
 
8463
7798
  prim = ops.PromptKVCache(align_mode)
@@ -8533,47 +7868,37 @@ class Qr(Primitive):
8533
7868
  return super().__call__(x, self.full_matrices)
8534
7869
 
8535
7870
 
8536
- class RandExt(Primitive):
7871
+ class QuantBatchMatmul(Primitive):
8537
7872
  r"""
7873
+ .. code-block::
7874
+
7875
+ prim = ops.QuantBatchMatmul(transpose_x1, transpose_x2, dtype)
7876
+ out = prim(x1, x2, scale, offset, bias)
7877
+
7878
+ is equivalent to
8538
7879
 
8539
- """
8540
- __mindspore_signature__ = (
8541
- sig.make_sig('shape'),
8542
- sig.make_sig('seed'),
8543
- sig.make_sig('offset'),
8544
- sig.make_sig('dtype', default=None),
8545
- )
8546
-
8547
- @prim_arg_register
8548
- def __init__(self):
8549
- pass
8550
-
8551
- def __call__(self, shape, seed, offset, dtype=None):
8552
- return _convert_stub(pyboost_rand_ext(self, [shape, seed, offset, dtype if dtype is None else dtype_to_type_id('RandExt', 'dtype', dtype)]))
8553
-
8554
- rand_ext_op=RandExt()
8555
-
8556
-
8557
- class RandLikeExt(Primitive):
8558
- r"""
7880
+ .. code-block::
8559
7881
 
7882
+ ops.quant_batch_matmul(x1, x2, scale, offset, bias, transpose_x1, transpose_x2, dtype)
7883
+
7884
+ Refer to :func:`mindspore.ops.quant_batch_matmul` for more details.
8560
7885
  """
8561
7886
  __mindspore_signature__ = (
8562
- sig.make_sig('tensor'),
8563
- sig.make_sig('seed'),
8564
- sig.make_sig('offset'),
8565
- sig.make_sig('dtype', default=None),
7887
+ sig.make_sig('x1', dtype=sig.sig_dtype.T),
7888
+ sig.make_sig('x2', dtype=sig.sig_dtype.T),
7889
+ sig.make_sig('scale', dtype=sig.sig_dtype.T1),
7890
+ sig.make_sig('offset', dtype=sig.sig_dtype.T2, default=None),
7891
+ sig.make_sig('bias', dtype=sig.sig_dtype.T3, default=None),
8566
7892
  )
8567
7893
 
8568
7894
  @prim_arg_register
8569
- def __init__(self):
8570
- pass
8571
-
8572
- def __call__(self, tensor, seed, offset, dtype=None):
8573
- return _convert_stub(pyboost_rand_like_ext(self, [tensor, seed, offset, dtype if dtype is None else dtype_to_type_id('RandLikeExt', 'dtype', dtype)]))
8574
-
8575
- rand_like_ext_op=RandLikeExt()
7895
+ def __init__(self, transpose_x1=False, transpose_x2=False, dtype=mstype.float16):
7896
+ self._set_prim_arg("transpose_x1", transpose_x1)
7897
+ self._set_prim_arg("transpose_x2", transpose_x2)
7898
+ self._set_prim_arg_with_handler("dtype", dtype, dtype_to_type_id)
8576
7899
 
7900
+ def __call__(self, x1, x2, scale, offset=None, bias=None):
7901
+ return _convert_stub(pyboost_quant_batch_matmul(self, [x1, x2, scale, offset, bias, self.transpose_x1, self.transpose_x2, self.dtype]))
8577
7902
 
8578
7903
  class RandpermV2(Primitive):
8579
7904
  r"""
@@ -9575,83 +8900,25 @@ class ReLU(Primitive):
9575
8900
  relu_op=ReLU()
9576
8901
 
9577
8902
 
9578
- class RepeatInterleaveGrad(Primitive):
9579
- r"""
9580
- Gradients of RepeatInterleave operation.
9581
- """
9582
- @prim_arg_register
9583
- def __init__(self):
9584
- pass
9585
-
9586
- def __call__(self, input, repeats, dim):
9587
- return _convert_stub(pyboost_repeat_interleave_grad(self, [input, repeats, dim]))
9588
-
9589
- repeat_interleave_grad_op=RepeatInterleaveGrad()
9590
-
9591
-
9592
- class RepeatInterleaveInt(Primitive):
9593
- r"""
9594
- Repeat elements of a tensor along an axis, like `numpy.repeat`.
9595
-
9596
- Args:
9597
- input (Tensor): The tensor to repeat values for. Must be of type: float16,
9598
- float32, int8, uint8, int16, int32, or int64.
9599
- repeats (int): The number of times to repeat, must be positive.
9600
- dim (int, optional): The dim along which to repeat, Default: ``None``. if dims is None,
9601
- the input Tensor will be flattened and the output will alse be flattened.
9602
- output_size (int, optional): Total output size for the given axis (e.g. sum of repeats),
9603
- Default: ``None``.
9604
-
9605
- Returns:
9606
- One tensor with values repeated along the specified dim. If input has shape
9607
- :math:`(s1, s2, ..., sn)` and dim is i, the output will have shape :math:`(s1, s2, ...,
9608
- si * repeats, ..., sn)`. The output type will be the same as the type of `input`.
9609
-
9610
- Supported Platforms:
9611
- ``Ascend``
9612
- """
9613
- __mindspore_signature__ = (
9614
- sig.make_sig('input'),
9615
- sig.make_sig('repeats'),
9616
- sig.make_sig('dim', default=None),
9617
- sig.make_sig('output_size', default=None),
9618
- )
9619
-
9620
- @prim_arg_register
9621
- def __init__(self):
9622
- pass
9623
-
9624
- def __call__(self, input, repeats, dim=None, output_size=None):
9625
- return _convert_stub(pyboost_repeat_interleave_int(self, [input, repeats, dim, output_size]))
9626
-
9627
- repeat_interleave_int_op=RepeatInterleaveInt()
9628
-
9629
-
9630
- class RepeatInterleaveTensor(Primitive):
8903
+ class RepeatInterleave(Primitive):
9631
8904
  r"""
9632
- Repeat elements of a tensor along an axis, like `numpy.repeat`.
9633
-
9634
- Args:
9635
- input (Tensor): The tensor to repeat values for. Must be of type: float16,
9636
- float32, int8, uint8, int16, int32, or int64.
9637
- repeats (Union[tuple, list, Tensor]): The number of times to repeat, must be positive.
9638
- dim (int, optional): The dim along which to repeat, Default: ``None``. if dims is None,
9639
- the input Tensor will be flattened and the output will alse be flattened.
9640
- output_size (int, optional): Total output size for the given axis (e.g. sum of repeats),
9641
- Default: ``None``.
9642
-
9643
- Returns:
9644
- One tensor with values repeated along the specified dim. If input has shape
9645
- :math:`(s1, s2, ..., sn)` and dim is i, the output will have shape :math:`(s1, s2, ...,
9646
- si * repeats, ..., sn)`. The output type will be the same as the type of `input`.
9647
-
9648
- Supported Platforms:
9649
- ``Ascend``
8905
+ .. code-block::
8906
+
8907
+ prim = ops.RepeatInterleave()
8908
+ out = prim(input, repeats, axis, output_size)
8909
+
8910
+ is equivalent to
8911
+
8912
+ .. code-block::
8913
+
8914
+ ops.repeat_interleave(input, repeats, axis, output_size)
8915
+
8916
+ Refer to :func:`mindspore.ops.repeat_interleave` for more details.
9650
8917
  """
9651
8918
  __mindspore_signature__ = (
9652
8919
  sig.make_sig('input'),
9653
8920
  sig.make_sig('repeats'),
9654
- sig.make_sig('dim', default=None),
8921
+ sig.make_sig('axis', default=None),
9655
8922
  sig.make_sig('output_size', default=None),
9656
8923
  )
9657
8924
 
@@ -9659,10 +8926,10 @@ class RepeatInterleaveTensor(Primitive):
9659
8926
  def __init__(self):
9660
8927
  pass
9661
8928
 
9662
- def __call__(self, input, repeats, dim=None, output_size=None):
9663
- return _convert_stub(pyboost_repeat_interleave_tensor(self, [input, repeats, dim, output_size]))
8929
+ def __call__(self, input, repeats, axis=None, output_size=None):
8930
+ return _convert_stub(pyboost_repeat_interleave(self, [input, repeats, axis, output_size]))
9664
8931
 
9665
- repeat_interleave_tensor_op=RepeatInterleaveTensor()
8932
+ repeat_interleave_op=RepeatInterleave()
9666
8933
 
9667
8934
 
9668
8935
  class ReplicationPad1DGrad(Primitive):
@@ -9803,7 +9070,8 @@ class Reshape(Primitive):
9803
9070
  pass
9804
9071
 
9805
9072
  def __call__(self, input, shape):
9806
- return _convert_stub(pyboost_reshape(self, [input, shape]))
9073
+ return super().__call__(input, shape)
9074
+
9807
9075
 
9808
9076
  reshape_op=Reshape()
9809
9077
 
@@ -10339,58 +9607,6 @@ class RightShift(Primitive):
10339
9607
  right_shift_op=RightShift()
10340
9608
 
10341
9609
 
10342
- class RmsNormGrad(Primitive):
10343
- r"""
10344
- Calculates the gradient of RmsNorm operation.
10345
-
10346
- .. warning::
10347
- This is an experimental API that is subject to change or deletion.
10348
-
10349
- Inputs:
10350
- - **dy** (Tensor) - The grad of previous operator, support data type: float16, float32, bfloat16.
10351
- - **x** (Tensor) - Input data of RmsNorm, support data type: float16, float32, bfloat16.
10352
- - **rstd** (Tensor) - The second output of RmsNorm, support data type: float16, float32, bfloat16.
10353
- - **gamma** (Tensor) - Support data type: float16, float32, bfloat16.
10354
-
10355
- Returns:
10356
- - **dx** (Tensor) - Has the same type and shape as `dy`.
10357
- - **dgamma** (Tensor) - A float32 Tensor with the same shape as `gamma`.
10358
-
10359
- Supported Platforms:
10360
- ``Ascend``
10361
- """
10362
- @prim_arg_register
10363
- def __init__(self):
10364
- pass
10365
-
10366
- def __call__(self, dy, x, rstd, gamma):
10367
- return _convert_stub(pyboost_rms_norm_grad(self, [dy, x, rstd, gamma]))
10368
-
10369
- rms_norm_grad_op=RmsNormGrad()
10370
-
10371
-
10372
- class RmsNorm(Primitive):
10373
- r"""
10374
- .. code-block::
10375
-
10376
- prim = ops.RmsNorm(epsilon)
10377
- out = prim(x, gamma)
10378
-
10379
- is equivalent to
10380
-
10381
- .. code-block::
10382
-
10383
- ops.rms_norm(x, gamma, epsilon)
10384
-
10385
- Refer to :func:`mindspore.ops.rms_norm` for more details.
10386
- """
10387
- @prim_arg_register
10388
- def __init__(self, epsilon=1e-6):
10389
- self._set_prim_arg("epsilon", epsilon)
10390
-
10391
- def __call__(self, x, gamma):
10392
- return _convert_stub(pyboost_rms_norm(self, [x, gamma, self.epsilon]))
10393
-
10394
9610
  class Roll(Primitive):
10395
9611
  r"""
10396
9612
  Rolls the elements of a tensor along an axis.
@@ -10535,74 +9751,7 @@ scalar_cast_op=ScalarCast()
10535
9751
 
10536
9752
  class ScatterAddExt(Primitive):
10537
9753
  r"""
10538
- Add all elements in `src` to the index specified by `index` to `input` along dimension specified by `dim`.
10539
-
10540
- It takes three inputs `input`, `src` and `index` of the same rank r >= 1.
10541
-
10542
- For a 3-D tensor, the operation updates input as follows:
10543
-
10544
- .. code-block::
10545
-
10546
- input[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
10547
-
10548
- input[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
10549
-
10550
- input[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
10551
-
10552
- Inputs:
10553
- - **input** (Tensor) - The target tensor. The rank must be at least 1.
10554
- - **dim** (int) - Which dim to scatter. Accepted range is [-r, r) where r = rank(`input`). Default: ``0``.
10555
- - **index** (Tensor) - The index of `input` to do scatter operation whose data type must be mindspore.int32 or
10556
- mindspore.int64. Same rank as `input`. Except for the dimension specified by `dim`,
10557
- the size of each dimension of `index` must be less than or equal to the size of
10558
- the corresponding dimension of `input`.
10559
- - **src** (Tensor) - The tensor doing the scatter operation with `input`, has the same type as `input` and
10560
- the size of each dimension must be greater than or equal to that of `index`.
10561
-
10562
- Outputs:
10563
- Tensor, has the same shape and type as `input`.
10564
-
10565
- Raises:
10566
- TypeError: If `index` is neither int32 nor int64.
10567
- ValueError: If anyone of the rank among `input`, `index` and `src` less than 1.
10568
- ValueError: If the rank of `input`, `index` and `src` is not the same.
10569
- ValueError: If, outside dimension `dim`, the size of any dimension of `index` is greater than the size of
10570
- the corresponding dimension of `input` .
10571
- ValueError: If the size of any dimension of `src` is less than that of `index`.
10572
-
10573
- Supported Platforms:
10574
- ``Ascend``
10575
-
10576
- Examples:
10577
- >>> import numpy as np
10578
- >>> import mindspore as ms
10579
- >>> from mindspore import Tensor, ops
10580
- >>> input = Tensor(np.array([[1, 2, 3, 4, 5]]), dtype=ms.float32)
10581
- >>> src = Tensor(np.array([[8, 8]]), dtype=ms.float32)
10582
- >>> index = Tensor(np.array([[2, 4]]), dtype=ms.int64)
10583
- >>> out = ops.ScatterAddExt()(input=input, dim=1, index=index, src=src)
10584
- >>> print(out)
10585
- [[1. 2. 11. 4. 13.]]
10586
- >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
10587
- >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
10588
- >>> index = Tensor(np.array([[0, 0, 0], [2, 2, 2], [4, 4, 4]]), dtype=ms.int64)
10589
- >>> out = ops.ScatterAddExt()(input=input, dim=0, index=index, src=src)
10590
- >>> print(out)
10591
- [[1. 2. 3. 0. 0.]
10592
- [0. 0. 0. 0. 0.]
10593
- [4. 5. 6. 0. 0.]
10594
- [0. 0. 0. 0. 0.]
10595
- [7. 8. 9. 0. 0.]]
10596
- >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
10597
- >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
10598
- >>> index = Tensor(np.array([[0, 2, 4], [0, 2, 4], [0, 2, 4]]), dtype=ms.int64)
10599
- >>> out = ops.ScatterAddExt()(input=input, dim=1, index=index, src=src)
10600
- >>> print(out)
10601
- [[1. 0. 2. 0. 3.]
10602
- [4. 0. 5. 0. 6.]
10603
- [7. 0. 8. 0. 9.]
10604
- [0. 0. 0. 0. 0.]
10605
- [0. 0. 0. 0. 0.]]
9754
+
10606
9755
  """
10607
9756
  @prim_arg_register
10608
9757
  def __init__(self):
@@ -10654,68 +9803,6 @@ class Scatter(Primitive):
10654
9803
  scatter_op=Scatter()
10655
9804
 
10656
9805
 
10657
- class SearchSorted(Primitive):
10658
- r"""
10659
- Return the position indices such that after inserting the values into the `sorted_sequence`, the order of innermost
10660
- dimension of the `sorted_sequence` remains unchanged.
10661
-
10662
- .. warning::
10663
- This is an experimental API that is subject to change or deletion.
10664
-
10665
- Refer to :func:`mindspore.ops.searchsorted` for more details.
10666
-
10667
- Args:
10668
- dtype (mindspore.dtype, optional): The specified type of output tensor. Optional values are: ``mstype.int32`` and
10669
- ``mstype.int64``. Default value: ``mstype.int64``.
10670
- right (bool, optional): Search Strategy. If ``True`` , return the last suitable index found;
10671
- if ``False`` , return the first such index. Default: ``False`` .
10672
-
10673
- Inputs:
10674
- - **sorted_sequence** (Tensor) - The input tensor. It must contain a monotonically increasing sequence on the innermost dimension.
10675
- - **values** (Tensor) - The value that should be inserted.
10676
- - **sorter** (Tensor, optional) - if provided, a tensor matching the shape of the unsorted sorted_sequence
10677
- containing a sequence of indices that sort it in the ascending order on the innermost dimension and type must be int64. Default: ``None`` .
10678
-
10679
- Outputs:
10680
- Tensor containing the indices from the innermost dimension of `sorted_sequence` such that,
10681
- if insert the corresponding value in the `values` Tensor, the order of `sorted_sequence` would be preserved,
10682
- whose datatype is int32 if out_int32 is ``True`` , otherwise int64, and shape is the same as the shape of
10683
- `values`.
10684
-
10685
- Raises:
10686
- ValueError: If the dimension of `sorted_sequence` isn't 1 and all dimensions except the last dimension of `sorted_sequence` and `values` are different.
10687
- ValueError: If `sorted_sequence` value is a scalar.
10688
- ValueError: If `values` is a scalar when `sorted_sequence` dimension is not 1.
10689
-
10690
- Supported Platforms:
10691
- ``Ascend`` ``GPU`` ``CPU``
10692
-
10693
- Examples:
10694
- >>> import mindspore
10695
- >>> import numpy as np
10696
- >>> from mindspore import Tensor, ops
10697
- >>> searchsorted = ops.SearchSorted()
10698
- >>> sorted_sequence = Tensor(np.array([[0, 1, 3, 5, 7], [2, 4, 6, 8, 10]]), mindspore.float32)
10699
- >>> values = Tensor(np.array([[3, 6, 9], [3, 6, 9]]), mindspore.float32)
10700
- >>> output = searchsorted(sorted_sequence, values)
10701
- >>> print(output)
10702
- [[2 4 5]
10703
- [1 2 4]]
10704
- """
10705
- __mindspore_signature__ = (
10706
- sig.make_sig('sorted_sequence'),
10707
- sig.make_sig('values'),
10708
- sig.make_sig('sorter', default=None),
10709
- )
10710
-
10711
- @prim_arg_register
10712
- def __init__(self, dtype=mstype.int64, right=False):
10713
- self._set_prim_arg_with_handler("dtype", dtype, dtype_to_type_id)
10714
- self._set_prim_arg("right", right)
10715
-
10716
- def __call__(self, sorted_sequence, values, sorter=None):
10717
- return _convert_stub(pyboost_searchsorted(self, [sorted_sequence, values, sorter, self.dtype, self.right]))
10718
-
10719
9806
  class Select(Primitive):
10720
9807
  r"""
10721
9808
  .. code-block::
@@ -10803,31 +9890,6 @@ class Sigmoid(Primitive):
10803
9890
  sigmoid_op=Sigmoid()
10804
9891
 
10805
9892
 
10806
- class Sign(Primitive):
10807
- r"""
10808
- .. code-block::
10809
-
10810
- prim = ops.Sign()
10811
- out = prim(input)
10812
-
10813
- is equivalent to
10814
-
10815
- .. code-block::
10816
-
10817
- ops.sign(input)
10818
-
10819
- Refer to :func:`mindspore.ops.sign` for more details.
10820
- """
10821
- @prim_arg_register
10822
- def __init__(self):
10823
- pass
10824
-
10825
- def __call__(self, input):
10826
- return _convert_stub(pyboost_sign(self, [input]))
10827
-
10828
- sign_op=Sign()
10829
-
10830
-
10831
9893
  class SiLUGrad(Primitive):
10832
9894
  r"""
10833
9895
  Performs grad of SiLU operation.
@@ -10946,41 +10008,7 @@ sinh_op=Sinh()
10946
10008
 
10947
10009
  class SliceExt(Primitive):
10948
10010
  r"""
10949
- Returns a sliced tensor from input tensor, and
10950
- the dimension axis is input from start to end by step.
10951
-
10952
- Args:
10953
- input (Tensor): the tensor to slice.
10954
- dim (int): dimension along which to slice.
10955
- start (int): the starting dimension.
10956
- end (int): the ending dimension.
10957
- step (int): the slice step size
10958
-
10959
- Returns:
10960
- Tensor.
10961
-
10962
- Raises:
10963
- ValueError: If dim is out of range [-input.ndim, input.ndim).
10964
- ValueError: If start is out of range [-input.shape[dim], input.shape[dim]].
10965
- ValueError: It end is out of range [start, input.shape[dim]].
10966
-
10967
- Supported Platforms:
10968
- ``Ascend``
10969
-
10970
- Examples:
10971
- >>> import mindspore
10972
- >>> from mindspore import ops
10973
- >>> from mindspore import Tensor
10974
- >>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mindspore.int32)
10975
- >>> output = ops.SliceExt()(x, 0, 0, 2, 1)
10976
- >>> print(output)
10977
- [[ 1 2 3]
10978
- [ 4 5 6]]
10979
- >>> output = ops.SliceExt()(x, 1, 1, 3, 1)
10980
- >>> print(output)
10981
- [[ 2 3]
10982
- [ 5 6]
10983
- [ 8 9]]
10011
+
10984
10012
  """
10985
10013
  @prim_arg_register
10986
10014
  def __init__(self):
@@ -11134,38 +10162,6 @@ class SolveTriangular(Primitive):
11134
10162
  solve_triangular_op=SolveTriangular()
11135
10163
 
11136
10164
 
11137
- class SortExt(Primitive):
11138
- r"""
11139
- .. code-block::
11140
-
11141
- prim = ops.SortExt()
11142
- out = prim(input, dim, descending, stable)
11143
-
11144
- is equivalent to
11145
-
11146
- .. code-block::
11147
-
11148
- ops.sort_ext(input, dim, descending, stable)
11149
-
11150
- Refer to :func:`mindspore.ops.sort_ext` for more details.
11151
- """
11152
- __mindspore_signature__ = (
11153
- sig.make_sig('input'),
11154
- sig.make_sig('dim', default=-1),
11155
- sig.make_sig('descending', default=False),
11156
- sig.make_sig('stable', default=False),
11157
- )
11158
-
11159
- @prim_arg_register
11160
- def __init__(self):
11161
- pass
11162
-
11163
- def __call__(self, input, dim=-1, descending=False, stable=False):
11164
- return _convert_stub(pyboost_sort_ext(self, [input, dim, descending, stable]))
11165
-
11166
- sort_ext_op=SortExt()
11167
-
11168
-
11169
10165
  class Split(Primitive):
11170
10166
  r"""
11171
10167
  Splits the input tensor into output_num of tensors along the given axis and output numbers.
@@ -11432,7 +10428,8 @@ class Sub(Primitive):
11432
10428
  pass
11433
10429
 
11434
10430
  def __call__(self, input, other):
11435
- return _convert_stub(pyboost_sub(self, [input, other]))
10431
+ return super().__call__(input, other)
10432
+
11436
10433
 
11437
10434
  sub_op=Sub()
11438
10435
 
@@ -11657,32 +10654,6 @@ class TopkExt(Primitive):
11657
10654
  topk_ext_op=TopkExt()
11658
10655
 
11659
10656
 
11660
- class TopKRouter(Primitive):
11661
- r"""
11662
- .. code-block::
11663
-
11664
- prim = ops.TopKRouter()
11665
- out = prim(input, capacity, expert_num)
11666
-
11667
- is equivalent to
11668
-
11669
- .. code-block::
11670
-
11671
- ops.topkrouter(input, capacity, expert_num)
11672
-
11673
- Refer to :func:`mindspore.ops.topkrouter` for more details.
11674
- """
11675
- @prim_arg_register
11676
- def __init__(self):
11677
- pass
11678
-
11679
- def __call__(self, input, capacity, expert_num):
11680
- return super().__call__(input, capacity, expert_num)
11681
-
11682
-
11683
- topkrouter_op=TopKRouter()
11684
-
11685
-
11686
10657
  class Trace(Primitive):
11687
10658
  r"""
11688
10659
  .. code-block::
@@ -11734,6 +10705,28 @@ class Transpose(Primitive):
11734
10705
  transpose_op=Transpose()
11735
10706
 
11736
10707
 
10708
+ class Tril(Primitive):
10709
+ r"""
10710
+ .. code-block::
10711
+
10712
+ prim = ops.Tril(diagonal)
10713
+ out = prim(input)
10714
+
10715
+ is equivalent to
10716
+
10717
+ .. code-block::
10718
+
10719
+ ops.tril(input, diagonal)
10720
+
10721
+ Refer to :func:`mindspore.ops.tril` for more details.
10722
+ """
10723
+ @prim_arg_register
10724
+ def __init__(self, diagonal=0):
10725
+ self._set_prim_arg("diagonal", diagonal)
10726
+
10727
+ def __call__(self, input):
10728
+ return _convert_stub(pyboost_tril(self, [input, self.diagonal]))
10729
+
11737
10730
  class Triu(Primitive):
11738
10731
  r"""
11739
10732
  .. code-block::
@@ -11793,8 +10786,8 @@ class UniformExt(Primitive):
11793
10786
 
11794
10787
  Inputs:
11795
10788
  - **tensor** (Tensor) - The tensor of random tensor to be generated.
11796
- - **a** (number) - Lower bound of the random numbers. Default: 0.0.
11797
- - **b** (number) - Upper bound of the random numbers. Default: 0.0.
10789
+ - **a** (float) - Lower bound of the random numbers. Default: 0.0.
10790
+ - **b** (float) - Upper bound of the random numbers. Default: 0.0.
11798
10791
  - **seed** (int) - Seed for random number generation. Default: 0.
11799
10792
  - **offset** (int) - Positional offset in the tensor to start filling with random numbers. Default: 0.
11800
10793
 
@@ -11829,135 +10822,6 @@ class UniformExt(Primitive):
11829
10822
  uniform_ext_op=UniformExt()
11830
10823
 
11831
10824
 
11832
- class Unique2(Primitive):
11833
- r"""
11834
- Returns the unique elements of input tensor.
11835
-
11836
- when `return_inverse=True`, also return a tensor containing the index of each value of input
11837
- tensor corresponding to the output unique tensor.
11838
- when `return_counts=True`, also return a tensor containing the number of occurrences for each
11839
- unique value or tensor
11840
-
11841
- Inputs:
11842
- - **input**(Tensor) - The input tensor.
11843
- - **sorted**(bool) - Whether to sort the unique elements in ascending order before returning as output.
11844
- Default: ``True`` .
11845
- - **return_inverse**(bool) - Whether to also return the indices for where elements in the original input ended up in
11846
- the returned unique list. Default: ``False`` .
11847
- - **return_counts**(bool) - Whether to also return the counts for each unique element. Default: ``False`` .
11848
-
11849
-
11850
- Returns:
11851
- A tensor or a tuple of tensors containing some of tensor objects (`output`, `inverse_indices`, `counts`).
11852
-
11853
- - **output**(Tensor) - the output list of unique scalar elements.
11854
- - **inverse_indices**(Tensor) - Return when ``return_inverse`` is True. It represents the indices for where
11855
- elements in the original input map to in the output; The shape is input.shape[dim].
11856
- - **counts**(Tensor) - Return when ``return_counts`` is True. It represents the number of occurrences for each
11857
- unique value or tensor. The shape is output.shape(dim).
11858
-
11859
-
11860
- Raises:
11861
- TypeError: If `input` is not a Tensor.
11862
-
11863
- Supported Platforms:
11864
- ``Ascend``
11865
-
11866
- Examples:
11867
- >>> import mindspore
11868
- >>> import numpy as np
11869
- >>> from mindspore import Tensor, nn
11870
- >>> from mindspore import ops
11871
- >>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
11872
- >>> unique = ops.auto_generate.Unique2()
11873
- >>> output = unique(x, return_inverse=True)
11874
- >>> print(output)
11875
- (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1]))
11876
- >>> y = output[0]
11877
- >>> print(y)
11878
- [1 2 5]
11879
- >>> idx = output[1]
11880
- >>> print(idx)
11881
- [0 1 2 1]
11882
- """
11883
- __mindspore_signature__ = (
11884
- sig.make_sig('input'),
11885
- sig.make_sig('sorted', default=True),
11886
- sig.make_sig('return_inverse', default=False),
11887
- sig.make_sig('return_counts', default=False),
11888
- )
11889
-
11890
- @prim_arg_register
11891
- def __init__(self):
11892
- pass
11893
-
11894
- def __call__(self, input, sorted=True, return_inverse=False, return_counts=False):
11895
- return _convert_stub(pyboost_unique2(self, [input, sorted, return_inverse, return_counts]))
11896
-
11897
- unique2_op=Unique2()
11898
-
11899
-
11900
- class UniqueDim(Primitive):
11901
- r"""
11902
- Returns the unique elements of input tensor.
11903
-
11904
- when `return_inverse=True`, also return a tensor containing the index of each value of input
11905
- tensor corresponding to the output unique tensor.
11906
-
11907
- Inputs:
11908
- - **input**(Tensor) - The input tensor.
11909
- - **sorted**(bool) - Whether to sort the unique elements in ascending order before returning as output.
11910
- - **return_inverse**(bool) - Whether to also return the indices for where elements in the original input ended up in
11911
- the returned unique list.
11912
- - **dim**(int) - the dimension to operate upon.
11913
-
11914
-
11915
- Returns:
11916
- A tensor or a tuple of tensors containing some of tensor objects (`output`, `inverse_indices`, `counts`).
11917
-
11918
- - **output**(Tensor) - the output list of unique scalar elements.
11919
- - **inverse_indices**(Tensor) - Return when ``return_inverse`` is True. It represents the indices for where
11920
- elements in the original input map to in the output; The shape is input.shape[dim].
11921
- - **counts**(Tensor) - Return the number of occurrences for each unique value or tensor. The shape is
11922
- output.shape(dim).
11923
-
11924
-
11925
- Raises:
11926
- TypeError: If `input` is not a Tensor.
11927
-
11928
- Supported Platforms:
11929
- ``Ascend``
11930
-
11931
- Examples:
11932
- >>> import mindspore
11933
- >>> import numpy as np
11934
- >>> from mindspore import Tensor, nn
11935
- >>> from mindspore import ops
11936
- >>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
11937
- >>> unique = ops.auto_generate.UniqueDim()
11938
- >>> output = unique(x, sorted=True, return_inverse=True, dim=0)
11939
- >>> print(output)
11940
- (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1]))
11941
- >>> y = output[0]
11942
- >>> print(y)
11943
- [1 2 5]
11944
- >>> idx = output[1]
11945
- >>> print(idx)
11946
- [0 1 2 1]
11947
- >>> counts = output[1]
11948
- >>> print(counts)
11949
- [1 2 1]
11950
- """
11951
- @prim_arg_register
11952
- def __init__(self):
11953
- pass
11954
-
11955
- def __call__(self, input, sorted, return_inverse, dim):
11956
- return _convert_stub(pyboost_unique_dim(self, [input, sorted, return_inverse, dim]))
11957
-
11958
- unique_dim_op=UniqueDim()
11959
-
11960
-
11961
10825
  class UnsortedSegmentSum(Primitive):
11962
10826
  r"""
11963
10827
  .. code-block::
@@ -12658,6 +11522,40 @@ class View(Primitive):
12658
11522
  view_op=View()
12659
11523
 
12660
11524
 
11525
+ class WeightQuantBatchMatmul(Primitive):
11526
+ r"""
11527
+ .. code-block::
11528
+
11529
+ prim = ops.WeightQuantBatchMatmul(transpose_x, transpose_weight, antiquant_group_size)
11530
+ out = prim(x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias)
11531
+
11532
+ is equivalent to
11533
+
11534
+ .. code-block::
11535
+
11536
+ ops.weight_quant_batch_matmul(x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias, transpose_x, transpose_weight, antiquant_group_size)
11537
+
11538
+ Refer to :func:`mindspore.ops.weight_quant_batch_matmul` for more details.
11539
+ """
11540
+ __mindspore_signature__ = (
11541
+ sig.make_sig('x'),
11542
+ sig.make_sig('weight'),
11543
+ sig.make_sig('antiquant_scale'),
11544
+ sig.make_sig('antiquant_offset', default=None),
11545
+ sig.make_sig('quant_scale', default=None),
11546
+ sig.make_sig('quant_offset', default=None),
11547
+ sig.make_sig('bias', default=None),
11548
+ )
11549
+
11550
+ @prim_arg_register
11551
+ def __init__(self, transpose_x=False, transpose_weight=False, antiquant_group_size=0):
11552
+ self._set_prim_arg("transpose_x", transpose_x)
11553
+ self._set_prim_arg("transpose_weight", transpose_weight)
11554
+ self._set_prim_arg("antiquant_group_size", antiquant_group_size)
11555
+
11556
+ def __call__(self, x, weight, antiquant_scale, antiquant_offset=None, quant_scale=None, quant_offset=None, bias=None):
11557
+ return _convert_stub(pyboost_weight_quant_batch_matmul(self, [x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias, self.transpose_x, self.transpose_weight, self.antiquant_group_size]))
11558
+
12661
11559
  class ZerosLikeExt(Primitive):
12662
11560
  r"""
12663
11561
  Returns a Tensor with a value of 0 and its shape and data type is the same as the input.
@@ -12668,7 +11566,7 @@ class ZerosLikeExt(Primitive):
12668
11566
  - **input_x** (Tensor) - Tensor of any dimension.
12669
11567
 
12670
11568
  Returns:
12671
- Return a tensor filled with the value 0, with the same size as input.
11569
+ Tensor, has the same shape and type as `input_x` but filled with ones.
12672
11570
 
12673
11571
  Supported Platforms:
12674
11572
  ``Ascend``
@@ -12723,445 +11621,3 @@ class ZerosLike(Primitive):
12723
11621
 
12724
11622
 
12725
11623
  zeros_like_op=ZerosLike()
12726
-
12727
-
12728
- class DynamicQuantExt(Primitive):
12729
- r"""
12730
- Performs dynamic quantization on the input tensor.
12731
-
12732
- Note:
12733
- - Dynamic quantization is performed by adjusting the scale of the input tensor dynamically.
12734
- - The `smooth_scales` tensor provides a mechanism to smooth out the scaling factors to avoid sudden changes.
12735
- - The input tensor `x` must be at least 1-dimensional, with shape :math:`(batches, n)`.
12736
- - The `smooth_scales` tensor must have shape `(n)`.
12737
- - The output `scale` tensor has shape `(batches)`.
12738
-
12739
- .. math::
12740
- \begin{array}{ll} \\
12741
- \text{scale} = \frac{\max(\left| x \right|, \text{axis}=-1)}{127} \\
12742
- \text{y} = \text{round}\left(\frac{x}{\text{scale}}\right) \\
12743
- \end{array}
12744
-
12745
- Inputs:
12746
- x (Tensor): The first input is a tensor of data type float16 or bfloat16.
12747
- It contains the data to be quantized.
12748
- smooth_scales (Tensor): The second input is a tensor of data type float16 or bfloat16.
12749
- It contains the scaling factors used for dynamic quantization.
12750
-
12751
- Outputs:
12752
- tuple[Tensor], tuple of 2 tensors, representing the quantized values and the scales used.
12753
-
12754
- - **y** (Tensor) - The quantized tensor.
12755
- - **scale** (Tensor) - The scales used for quantization.
12756
-
12757
- Raises:
12758
- ValueError: If the rank of `x` is not at least 1.
12759
-
12760
- Supported Platforms:
12761
- ``Ascend``
12762
-
12763
- Examples:
12764
- >>> import mindspore
12765
- >>> import numpy as np
12766
- >>> from mindspore import Tensor, ops
12767
- >>> input = Tensor(np.random.rand(2, 3), mindspore.float16)
12768
- >>> smooth_scales = Tensor(np.random.rand(3), mindspore.float16)
12769
- >>> output = ops.auto_generate.DynamicQuantExt()(input, smooth_scales)
12770
- >>> print(output.shape)
12771
- (2, 3)
12772
- """
12773
- __mindspore_signature__ = (
12774
- sig.make_sig('x'),
12775
- sig.make_sig('smooth_scales', default=None),
12776
- )
12777
-
12778
- @prim_arg_register
12779
- def __init__(self):
12780
- pass
12781
-
12782
- def __call__(self, x, smooth_scales=None):
12783
- return _convert_stub(pyboost_dynamic_quant_ext(self, [x, smooth_scales]))
12784
-
12785
- dynamic_quant_ext_op=DynamicQuantExt()
12786
-
12787
-
12788
- class FusedInferAttentionScore(Primitive):
12789
- r"""
12790
- The interface for fully inference.
12791
-
12792
- .. warning::
12793
- This is an experimental API that is subject to change or deletion.
12794
-
12795
- Args:
12796
- num_heads (int): The number of heads, equal to `N` when input_layout is `BNSD`.
12797
- scale_value (float): The scale value indicating the scale coefficient, which is used as the scalar of Muls in the calculation.
12798
- Generally, the value is 1.0 / (D ** 0.5). Default: ``1.0``.
12799
- pre_tokens (int): Parameter for sparse computation, represents how many tokens are counted forward.
12800
- Default: ``2147483547``. Invalid when Q_S is 1.
12801
- next_tokens (int): Parameter for sparse computation, represents how many tokens are counted backward.
12802
- Default: ``2147483547``. Invalid when Q_S is 1.
12803
- input_layout (str): Specifies the layout of input `query`, key and value. "BSH", "BNSD" or "BSND" is supported.
12804
- Default "BSH".
12805
- num_key_value_heads (int): head numbers of key/value which are used in GQA algorithm. Default: ``0``.
12806
- The value 0 indicates if the key and value have the same head nums, num_heads must be divisible by num_key_value_heads.
12807
- sparse_mode (int): Indicates sparse mode. Default ``0``.
12808
-
12809
- - 0: Indicates the defaultMask mode. If attn_mask is not passed, the mask operation is not performed,
12810
- and preTokens and nextTokens(internally assigned as INT_MAX) are ignored. If passed in, the full attn_mask
12811
- matrix (S1 * S2) needs to be passed in, indicating that the part between preTokens and nextTokens needs to
12812
- be calculated.
12813
- - 1: Represents allMask, that is, passing in the complete attn_mask matrix.
12814
- - 2: Representing the leftUpCausal mode corresponds to the lower triangle scenario divided by the left
12815
- vertex, and the optimized attn_mask matrix (2048*2048) is required.
12816
- - 3: Representing the rightDownCausal model corresponds to the lower triangle scene divided by the lower
12817
- right vertex, and the optimized attn_mask matrix (2048*2048) is required.
12818
- - 4: Represents the band scenario, that is, the part between counting preTokens and nextTokens, and the
12819
- optimized attn_mask matrix (2048*2048) is required.
12820
- - 5: Represents the prefix scenario, not implemented yet.
12821
- - 6: Represents the global scenario, not implemented yet.
12822
- - 7: Represents the dilated scenario, not implemented yet.
12823
- - 8: Represents the block_local scenario, not implemented yet.
12824
-
12825
- inner_precise (int): There are four modes: 0, 1, 2, and 3. Only support 0 and 1 when Q_S is 1. Default: ``1``.
12826
- - 0: Enable high-precise mode, without row invalid correction.
12827
- - 1: High-performance mode, without row invalid correction.
12828
- - 2: Enable high-precise mode, with row invalid correction.
12829
- - 3: High-performance mode, with row invalid correction.
12830
-
12831
- block_size (int): Maximum number of tokens per block in the KV cache block for PageAttention. Default: ``0``.
12832
- antiquant_mode (int): Pseudo-quantization mode, 0: per-channel, 1: per-token. This parameter is invalid when Q_S greater than or equal to 2. Default: ``0``.
12833
- softmax_lse_flag (bool): Whether to output softmax_lse. Default: ``False``.
12834
-
12835
- Inputs:
12836
- - **query** (Tensor) - The query tensor with data type of Int8, float16 or BFloat16.
12837
- Input tensor of shape :math:`(B, S, H)`, :math:`(B, N, S, D)`, or :math:`(B, S, N, D)`.
12838
- - **key** (TensorList) - The key tensor with data type of float16 or BFloat16.
12839
- Input tensor of shape :math:`(B, S, H)`, :math:`(B, N, S, D)`, or :math:`(B, S, N, D)`.
12840
- - **value** (TensorList) - The value tensor with data type of float16 or BFloat16.
12841
- Input tensor of shape :math:`(B, S, H)`, :math:`(B, N, S, D)`, or :math:`(B, S, N, D)`.
12842
- - **pse_shift** (Tensor) - The padding mask tensor with data type of float16 or BFloat16. Default: ``None``.
12843
- - **attn_mask** (Tensor) - The attention mask tensor with data type of int8, uint8 or bool. For each element, 0 indicates retention and 1 indicates discard.
12844
- Default: ``None``.
12845
- - **actual_seq_lengths** (Tensor) - Describe actual sequence length of each input with data type of int64.
12846
- Default: ``None``.
12847
- - **actual_seq_lengths_kv** (Tensor) - Describe actual sequence length of each input with data type of int64.
12848
- Default: ``None``.
12849
- - **dequant_scale1** (Tensor) - Quantization factor for inverse quantization after BMM1 with data type of uint64.
12850
- Default: ``None``.
12851
- - **quant_scale1** (Tensor) - Quantization factors for quantization before BMM2 with data type of float32.
12852
- Default: ``None``.
12853
- - **dequant_scale2** (Tensor) - Quantization factors for quantification after BMM2 with data type of uint64.
12854
- Default: ``None``.
12855
- - **quant_scale2** (Tensor) - Quantization factors for output quantization with data type of float32, BFloat16.
12856
- Default: ``None``.
12857
- - **quant_offset2** (Tensor) - Quantization offset for output quantization with data type of float32, BFloat16.
12858
- Default: ``None``.
12859
- - **antiquant_scale** (Tensor) - Inverse quantization factor with data type of float16, float32, BFloat16. Only support float16 when Q_S greater than or equal to 2.
12860
- Default: ``None``.
12861
- - **antiquant_offset** (Tensor) - Inverse quantization offset with data type of float16, float32, BFloat16. Only support float16 when Q_S greater than or equal to 2.
12862
- Default: ``None``.
12863
- - **block_table** (Tensor) - Block mapping table in KV cache for PageAttention.
12864
- Default: ``None``.
12865
- - **query_padding_size** (Tensor) - Whether each batch of data in the Query is right-aligned. If yes, the number of alignment times is provided. Reserved parameter, not supported yet.
12866
- Default: ``None``.
12867
- - **kv_padding_size** (Tensor) - Whether each batch of data in the Key/Value is right-aligned. If yes, the number of alignment times is provided. Valid only when Q_S is 1.
12868
- Default: ``None``.
12869
-
12870
- Outputs:
12871
- - **attention_out** (Tensor) - Input tensor, and the shape is :math:`(B, S, H)`, :math:`(B, N, S, D)`, or :math:`(B, S, N, D)`.
12872
- - **softmas_lse** (Tensor[Float32]) - Shape is `(B, N, Q_S, 1)`. The softmas_lse is calculated only if softmax_lse_flag is 1.
12873
-
12874
- Supported Platforms:
12875
- ``Ascend``
12876
-
12877
- Examples:
12878
- >>> from mindspore.ops.operations import _infer_ops as infer_ops
12879
- >>> from mindspore import Tensor
12880
- >>> import numpy as np
12881
- >>> B = 1
12882
- >>> N = 16
12883
- >>> S = 256
12884
- >>> D = 16
12885
- >>> query = Tensor(np.ones((B, N, S, D), dtype=np.float16))
12886
- >>> key = [Tensor(np.ones((B, N, S, D), dtype=np.float16))]
12887
- >>> value = [Tensor(np.ones((B, N, S, D), dtype=np.float16))]
12888
- >>> fias = infer_ops.FusedInferAttentionScore(num_heads=N, input_layout='BNSD')
12889
- >>> out = fias(query, key, value, None, None,
12890
- ... None, None, None, None, None,
12891
- ... None, None, None, None, None,
12892
- ... None, None)
12893
- >>> print(out[0].shape)
12894
- (1, 16, 256, 16)
12895
- """
12896
- __mindspore_signature__ = (
12897
- sig.make_sig('query'),
12898
- sig.make_sig('key'),
12899
- sig.make_sig('value'),
12900
- sig.make_sig('pse_shift', default=None),
12901
- sig.make_sig('attn_mask', default=None),
12902
- sig.make_sig('actual_seq_lengths', default=None),
12903
- sig.make_sig('actual_seq_lengths_kv', default=None),
12904
- sig.make_sig('dequant_scale1', default=None),
12905
- sig.make_sig('quant_scale1', default=None),
12906
- sig.make_sig('dequant_scale2', default=None),
12907
- sig.make_sig('quant_scale2', default=None),
12908
- sig.make_sig('quant_offset2', default=None),
12909
- sig.make_sig('antiquant_scale', default=None),
12910
- sig.make_sig('antiquant_offset', default=None),
12911
- sig.make_sig('block_table', default=None),
12912
- sig.make_sig('query_padding_size', default=None),
12913
- sig.make_sig('kv_padding_size', default=None),
12914
- )
12915
-
12916
- @prim_arg_register
12917
- def __init__(self, num_heads, scale_value=1.0, pre_tokens=2147483647, next_tokens=2147483647, input_layout='BSH', num_key_value_heads=0, sparse_mode=0, inner_precise=1, block_size=0, antiquant_mode=0, softmax_lse_flag=False):
12918
- self._set_prim_arg("num_heads", num_heads)
12919
- self._set_prim_arg("scale_value", scale_value)
12920
- self._set_prim_arg("pre_tokens", pre_tokens)
12921
- self._set_prim_arg("next_tokens", next_tokens)
12922
- self._set_prim_arg_with_handler("input_layout", input_layout, str_to_enum)
12923
- self._set_prim_arg("num_key_value_heads", num_key_value_heads)
12924
- self._set_prim_arg("sparse_mode", sparse_mode)
12925
- self._set_prim_arg("inner_precise", inner_precise)
12926
- self._set_prim_arg("block_size", block_size)
12927
- self._set_prim_arg("antiquant_mode", antiquant_mode)
12928
- self._set_prim_arg("softmax_lse_flag", softmax_lse_flag)
12929
-
12930
- def __call__(self, query, key, value, pse_shift=None, attn_mask=None, actual_seq_lengths=None, actual_seq_lengths_kv=None, dequant_scale1=None, quant_scale1=None, dequant_scale2=None, quant_scale2=None, quant_offset2=None, antiquant_scale=None, antiquant_offset=None, block_table=None, query_padding_size=None, kv_padding_size=None):
12931
- return super().__call__(query, key, value, pse_shift, attn_mask, actual_seq_lengths, actual_seq_lengths_kv, dequant_scale1, quant_scale1, dequant_scale2, quant_scale2, quant_offset2, antiquant_scale, antiquant_offset, block_table, query_padding_size, kv_padding_size, self.num_heads, self.scale_value, self.pre_tokens, self.next_tokens, self.input_layout, self.num_key_value_heads, self.sparse_mode, self.inner_precise, self.block_size, self.antiquant_mode, self.softmax_lse_flag)
12932
-
12933
-
12934
- class GroupedMatmul(Primitive):
12935
- r"""
12936
- .. code-block::
12937
-
12938
- prim = ops.GroupedMatmul(split_item, group_type)
12939
- out = prim(x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, group_list)
12940
-
12941
- is equivalent to
12942
-
12943
- .. code-block::
12944
-
12945
- ops.grouped_matmul(x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, group_list, split_item, group_type)
12946
-
12947
- Refer to :func:`mindspore.ops.grouped_matmul` for more details.
12948
- """
12949
- __mindspore_signature__ = (
12950
- sig.make_sig('x'),
12951
- sig.make_sig('weight'),
12952
- sig.make_sig('bias', default=None),
12953
- sig.make_sig('scale', default=None),
12954
- sig.make_sig('offset', default=None),
12955
- sig.make_sig('antiquant_scale', default=None),
12956
- sig.make_sig('antiquant_offset', default=None),
12957
- sig.make_sig('group_list', default=None),
12958
- )
12959
-
12960
- @prim_arg_register
12961
- def __init__(self, split_item=0, group_type=-1):
12962
- self._set_prim_arg("split_item", split_item)
12963
- self._set_prim_arg("group_type", group_type)
12964
-
12965
- def __call__(self, x, weight, bias=None, scale=None, offset=None, antiquant_scale=None, antiquant_offset=None, group_list=None):
12966
- return _convert_stub(pyboost_grouped_matmul(self, [x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, group_list, self.split_item, self.group_type]))
12967
-
12968
- class KVCacheScatterUpdate(Primitive):
12969
- r"""
12970
- .. code-block::
12971
-
12972
- prim = ops.KVCacheScatterUpdate()
12973
- out = prim(var, indices, updates, axis, reduce)
12974
-
12975
- is equivalent to
12976
-
12977
- .. code-block::
12978
-
12979
- ops.kv_cache_scatter_update(var, indices, updates, axis, reduce)
12980
-
12981
- Refer to :func:`mindspore.ops.kv_cache_scatter_update` for more details.
12982
- """
12983
- __mindspore_signature__ = (
12984
- sig.make_sig('var'),
12985
- sig.make_sig('indices'),
12986
- sig.make_sig('updates'),
12987
- sig.make_sig('axis'),
12988
- sig.make_sig('reduce', default='none'),
12989
- )
12990
-
12991
- @prim_arg_register
12992
- def __init__(self):
12993
- pass
12994
-
12995
- def __call__(self, var, indices, updates, axis, reduce='none'):
12996
- return super().__call__(var, indices, updates, axis, str_to_enum('KVCacheScatterUpdate', 'reduce', reduce))
12997
-
12998
-
12999
- kv_cache_scatter_update_op=KVCacheScatterUpdate()
13000
-
13001
-
13002
- class MoeFinalizeRouting(Primitive):
13003
- r"""
13004
- .. code-block::
13005
-
13006
- prim = ops.MoeFinalizeRouting()
13007
- out = prim(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
13008
-
13009
- is equivalent to
13010
-
13011
- .. code-block::
13012
-
13013
- ops.moe_finalize_routing(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
13014
-
13015
- Refer to :func:`mindspore.ops.moe_finalize_routing` for more details.
13016
- """
13017
- __mindspore_signature__ = (
13018
- sig.make_sig('expanded_x'),
13019
- sig.make_sig('x1'),
13020
- sig.make_sig('x2', default=None),
13021
- sig.make_sig('bias', default=None),
13022
- sig.make_sig('scales', default=None),
13023
- sig.make_sig('expanded_row_idx', default=None),
13024
- sig.make_sig('expanded_expert_idx', default=None),
13025
- )
13026
-
13027
- @prim_arg_register
13028
- def __init__(self):
13029
- pass
13030
-
13031
- def __call__(self, expanded_x, x1, x2=None, bias=None, scales=None, expanded_row_idx=None, expanded_expert_idx=None):
13032
- return _convert_stub(pyboost_moe_finalize_routing(self, [expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx]))
13033
-
13034
- moe_finalize_routing_op=MoeFinalizeRouting()
13035
-
13036
-
13037
- class QuantBatchMatmul(Primitive):
13038
- r"""
13039
- .. code-block::
13040
-
13041
- prim = ops.QuantBatchMatmul(transpose_x1, transpose_x2, dtype)
13042
- out = prim(x1, x2, scale, offset, bias)
13043
-
13044
- is equivalent to
13045
-
13046
- .. code-block::
13047
-
13048
- ops.quant_batch_matmul(x1, x2, scale, offset, bias, transpose_x1, transpose_x2, dtype)
13049
-
13050
- Refer to :func:`mindspore.ops.quant_batch_matmul` for more details.
13051
- """
13052
- __mindspore_signature__ = (
13053
- sig.make_sig('x1', dtype=sig.sig_dtype.T),
13054
- sig.make_sig('x2', dtype=sig.sig_dtype.T),
13055
- sig.make_sig('scale', dtype=sig.sig_dtype.T1),
13056
- sig.make_sig('offset', dtype=sig.sig_dtype.T2, default=None),
13057
- sig.make_sig('bias', dtype=sig.sig_dtype.T3, default=None),
13058
- )
13059
-
13060
- @prim_arg_register
13061
- def __init__(self, transpose_x1=False, transpose_x2=False, dtype=mstype.float16):
13062
- self._set_prim_arg("transpose_x1", transpose_x1)
13063
- self._set_prim_arg("transpose_x2", transpose_x2)
13064
- self._set_prim_arg_with_handler("dtype", dtype, dtype_to_type_id)
13065
-
13066
- def __call__(self, x1, x2, scale, offset=None, bias=None):
13067
- return _convert_stub(pyboost_quant_batch_matmul(self, [x1, x2, scale, offset, bias, self.transpose_x1, self.transpose_x2, self.dtype]))
13068
-
13069
- class QuantV2(Primitive):
13070
- r"""
13071
- Returns the quantized value of input x.
13072
-
13073
- If `sqrt_mode` is False:
13074
-
13075
- .. math::
13076
- y = round(scale * x + offset)
13077
-
13078
- If `sqrt_mode` is True:
13079
-
13080
- .. math::
13081
- y = round(scale * x * scale + offset)
13082
-
13083
- Inputs:
13084
- x (Tensor) : Input tensor.
13085
- Its data type must be mindspore.float16, mindspore.float32 or mindspore.bfloat32.
13086
- scale (Tensor) : Scaling ratio tensor in quantization. Its type is the same as x.
13087
- Its shape in the last axis must equal to the shape of x in the last axis,
13088
- and shapes of other dimensions must be 1.
13089
- offset (Tensor) : Offset tensor in quantization. Its type is the same as x.
13090
- Its shape in the last axis must equal to the shape of x in the last axis,
13091
- and shapes of other dimensions must be 1.
13092
- sqrt_mode (bool) : Specifies whether to perform square root on `scale`. Only support: ``False``.
13093
- rounding_mode (str) : Specifies the way to round. Only support: "ROUND".
13094
- dst_type (Type) : Specifies the output type. Only support ``int8``.
13095
-
13096
- Returns:
13097
- Tensor, the quantized output tensor of type mindspore.int8. Its shape is the same as x.
13098
-
13099
- Raises:
13100
- TypeError: If input, scale or offset is not a Tensor.
13101
- ValueError: The shape of scale or offset in the last axis is different from the shape of x in the last axis.
13102
-
13103
- Supported Platforms:
13104
- ``Ascend``
13105
-
13106
- Examples:
13107
- >>> from mindspore import Tensor
13108
- >>> from mindspore.ops.operations import _infer_ops as infer_ops
13109
- >>> x = Tensor([100.0, 150.0], mstype.float32)
13110
- >>> scale = Tensor([80.0, 40.0], mstype.float32)
13111
- >>> offset = Tensor([0.0, 2.0], mstype.float32)
13112
- >>> quant = infer_ops.QuantV2()
13113
- >>> y = quant(x, scale, offset, False, "Round", mstype.int8)
13114
- [127 127]
13115
- """
13116
- __mindspore_signature__ = (
13117
- sig.make_sig('x'),
13118
- sig.make_sig('scale'),
13119
- sig.make_sig('offset'),
13120
- sig.make_sig('sqrt_mode', default=False),
13121
- sig.make_sig('rounding_mode', default='ROUND'),
13122
- sig.make_sig('dst_type', default=mstype.int8),
13123
- )
13124
-
13125
- @prim_arg_register
13126
- def __init__(self):
13127
- pass
13128
-
13129
- def __call__(self, x, scale, offset, sqrt_mode=False, rounding_mode='ROUND', dst_type=mstype.int8):
13130
- return _convert_stub(pyboost_quant_v2(self, [x, scale, offset, sqrt_mode, str_to_enum('QuantV2', 'rounding_mode', rounding_mode), dtype_to_type_id('QuantV2', 'dst_type', dst_type)]))
13131
-
13132
- quant_v2_op=QuantV2()
13133
-
13134
-
13135
- class WeightQuantBatchMatmul(Primitive):
13136
- r"""
13137
- .. code-block::
13138
-
13139
- prim = ops.WeightQuantBatchMatmul(transpose_x, transpose_weight, antiquant_group_size)
13140
- out = prim(x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias)
13141
-
13142
- is equivalent to
13143
-
13144
- .. code-block::
13145
-
13146
- ops.weight_quant_batch_matmul(x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias, transpose_x, transpose_weight, antiquant_group_size)
13147
-
13148
- Refer to :func:`mindspore.ops.weight_quant_batch_matmul` for more details.
13149
- """
13150
- __mindspore_signature__ = (
13151
- sig.make_sig('x'),
13152
- sig.make_sig('weight'),
13153
- sig.make_sig('antiquant_scale'),
13154
- sig.make_sig('antiquant_offset', default=None),
13155
- sig.make_sig('quant_scale', default=None),
13156
- sig.make_sig('quant_offset', default=None),
13157
- sig.make_sig('bias', default=None),
13158
- )
13159
-
13160
- @prim_arg_register
13161
- def __init__(self, transpose_x=False, transpose_weight=False, antiquant_group_size=0):
13162
- self._set_prim_arg("transpose_x", transpose_x)
13163
- self._set_prim_arg("transpose_weight", transpose_weight)
13164
- self._set_prim_arg("antiquant_group_size", antiquant_group_size)
13165
-
13166
- def __call__(self, x, weight, antiquant_scale, antiquant_offset=None, quant_scale=None, quant_offset=None, bias=None):
13167
- return _convert_stub(pyboost_weight_quant_batch_matmul(self, [x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias, self.transpose_x, self.transpose_weight, self.antiquant_group_size]))