mindspore 2.7.0__cp310-cp310-win_amd64.whl → 2.7.0rc1__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (196) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +1 -1
  3. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  6. mindspore/_checkparam.py +2 -2
  7. mindspore/_extends/builtin_operations.py +3 -3
  8. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  9. mindspore/_extends/parse/__init__.py +3 -3
  10. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -0
  11. mindspore/_extends/parse/parser.py +22 -28
  12. mindspore/_extends/parse/standard_method.py +1 -15
  13. mindspore/_extends/pijit/pijit_func_white_list.py +5 -2
  14. mindspore/_extends/remote/kernel_build_server_ascend.py +75 -0
  15. mindspore/amp.py +18 -0
  16. mindspore/avcodec-59.dll +0 -0
  17. mindspore/avdevice-59.dll +0 -0
  18. mindspore/avfilter-8.dll +0 -0
  19. mindspore/avformat-59.dll +0 -0
  20. mindspore/avutil-57.dll +0 -0
  21. mindspore/common/__init__.py +12 -18
  22. mindspore/common/_tensor_cpp_method.py +1 -1
  23. mindspore/common/_tensor_docs.py +38 -102
  24. mindspore/common/_utils.py +1 -9
  25. mindspore/common/api.py +106 -155
  26. mindspore/common/{dynamic_shape/auto_dynamic_shape.py → auto_dynamic_shape.py} +23 -17
  27. mindspore/common/dtype.py +57 -98
  28. mindspore/common/dump.py +1 -1
  29. mindspore/common/file_system.py +9 -59
  30. mindspore/common/hook_handle.py +3 -22
  31. mindspore/common/np_dtype.py +3 -3
  32. mindspore/common/parameter.py +20 -4
  33. mindspore/common/recompute.py +4 -2
  34. mindspore/common/tensor.py +52 -38
  35. mindspore/communication/_hccl_management.py +297 -0
  36. mindspore/context.py +21 -15
  37. mindspore/dataset/__init__.py +1 -1
  38. mindspore/dataset/audio/transforms.py +1 -1
  39. mindspore/dataset/core/config.py +1 -35
  40. mindspore/dataset/engine/datasets.py +315 -330
  41. mindspore/dataset/engine/datasets_user_defined.py +22 -38
  42. mindspore/dataset/transforms/c_transforms.py +2 -2
  43. mindspore/dataset/transforms/transforms.py +3 -3
  44. mindspore/dataset/vision/__init__.py +1 -1
  45. mindspore/dataset/vision/py_transforms.py +8 -8
  46. mindspore/dataset/vision/transforms.py +5 -17
  47. mindspore/dataset/vision/utils.py +21 -632
  48. mindspore/device_context/ascend/op_tuning.py +1 -35
  49. mindspore/dnnl.dll +0 -0
  50. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -3
  51. mindspore/include/api/cell.h +4 -28
  52. mindspore/include/api/cfg.h +7 -24
  53. mindspore/include/api/context.h +0 -1
  54. mindspore/include/api/delegate.h +2 -0
  55. mindspore/include/api/dual_abi_helper.h +19 -100
  56. mindspore/include/api/graph.h +1 -14
  57. mindspore/include/api/kernel.h +3 -16
  58. mindspore/include/api/kernel_api.h +1 -9
  59. mindspore/include/api/metrics/accuracy.h +0 -9
  60. mindspore/include/api/model.h +1 -5
  61. mindspore/include/api/model_group.h +0 -4
  62. mindspore/include/api/model_parallel_runner.h +0 -2
  63. mindspore/include/api/status.h +10 -48
  64. mindspore/include/api/types.h +1 -6
  65. mindspore/include/dataset/constants.h +0 -9
  66. mindspore/jpeg62.dll +0 -0
  67. mindspore/mindrecord/tools/cifar10.py +2 -3
  68. mindspore/mindrecord/tools/cifar10_to_mr.py +5 -5
  69. mindspore/mindspore_backend_common.dll +0 -0
  70. mindspore/mindspore_backend_manager.dll +0 -0
  71. mindspore/mindspore_common.dll +0 -0
  72. mindspore/mindspore_core.dll +0 -0
  73. mindspore/mindspore_cpu_res_manager.dll +0 -0
  74. mindspore/mindspore_dump.dll +0 -0
  75. mindspore/mindspore_frontend.dll +0 -0
  76. mindspore/mindspore_glog.dll +0 -0
  77. mindspore/mindspore_memory_pool.dll +0 -0
  78. mindspore/mindspore_ms_backend.dll +0 -0
  79. mindspore/mindspore_ops.dll +0 -0
  80. mindspore/mindspore_ops_host.dll +0 -0
  81. mindspore/mindspore_ops_kernel_common.dll +0 -0
  82. mindspore/mindspore_profiler.dll +0 -0
  83. mindspore/mindspore_pyboost.dll +0 -0
  84. mindspore/mindspore_pynative.dll +0 -0
  85. mindspore/mindspore_res_manager.dll +0 -0
  86. mindspore/mindspore_runtime_pipeline.dll +0 -0
  87. mindspore/mint/distributed/__init__.py +0 -4
  88. mindspore/mint/distributed/distributed.py +14 -217
  89. mindspore/mint/nn/layer/_functions.py +2 -1
  90. mindspore/mint/nn/layer/conv.py +6 -6
  91. mindspore/mint/nn/layer/normalization.py +3 -3
  92. mindspore/nn/cell.py +174 -216
  93. mindspore/nn/layer/activation.py +2 -4
  94. mindspore/nn/layer/basic.py +13 -7
  95. mindspore/nn/layer/image.py +1 -1
  96. mindspore/nn/optim/adam.py +3 -1
  97. mindspore/nn/optim/lamb.py +3 -1
  98. mindspore/nn/optim/tft_wrapper.py +3 -2
  99. mindspore/nn/probability/distribution/_utils/utils.py +2 -2
  100. mindspore/nn/wrap/cell_wrapper.py +5 -39
  101. mindspore/nn/wrap/grad_reducer.py +15 -0
  102. mindspore/numpy/array_creations.py +2 -2
  103. mindspore/numpy/utils_const.py +1 -1
  104. mindspore/opencv_core452.dll +0 -0
  105. mindspore/opencv_imgcodecs452.dll +0 -0
  106. mindspore/opencv_imgproc452.dll +0 -0
  107. mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
  108. mindspore/ops/_op_impl/cpu/__init__.py +0 -1
  109. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +2 -12
  110. mindspore/ops/auto_generate/gen_extend_func.py +4 -4
  111. mindspore/ops/auto_generate/gen_ops_def.py +16 -290
  112. mindspore/ops/auto_generate/gen_ops_prim.py +76 -563
  113. mindspore/ops/composite/base.py +1 -1
  114. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
  115. mindspore/ops/function/__init__.py +0 -1
  116. mindspore/ops/function/array_func.py +6 -10
  117. mindspore/ops/function/debug_func.py +2 -4
  118. mindspore/ops/function/grad/grad_func.py +12 -4
  119. mindspore/ops/function/math_func.py +32 -44
  120. mindspore/ops/function/nn_func.py +20 -18
  121. mindspore/ops/functional.py +1 -2
  122. mindspore/ops/functional_overload.py +12 -23
  123. mindspore/ops/operations/_inner_ops.py +12 -11
  124. mindspore/ops/operations/array_ops.py +50 -4
  125. mindspore/ops/operations/comm_ops.py +15 -1
  126. mindspore/ops/operations/custom_ops.py +4 -10
  127. mindspore/ops/operations/debug_ops.py +6 -6
  128. mindspore/ops/operations/manually_defined/ops_def.py +12 -12
  129. mindspore/ops/operations/math_ops.py +5 -5
  130. mindspore/ops/operations/nn_ops.py +1 -1
  131. mindspore/ops/primitive.py +10 -3
  132. mindspore/ops/tensor_method.py +7 -16
  133. mindspore/ops_generate/pyboost/gen_pyboost_func.py +16 -0
  134. mindspore/parallel/_auto_parallel_context.py +15 -5
  135. mindspore/parallel/_parallel_serialization.py +2 -3
  136. mindspore/parallel/_ps_context.py +2 -2
  137. mindspore/parallel/_transformer/transformer.py +4 -4
  138. mindspore/parallel/_utils.py +11 -5
  139. mindspore/parallel/auto_parallel.py +9 -23
  140. mindspore/parallel/checkpoint_transform.py +0 -2
  141. mindspore/parallel/cluster/process_entity/_api.py +1 -4
  142. mindspore/parallel/cluster/run.py +3 -5
  143. mindspore/parallel/function/reshard_func.py +5 -6
  144. mindspore/parallel/nn/parallel_cell_wrapper.py +3 -40
  145. mindspore/parallel/nn/parallel_grad_reducer.py +8 -0
  146. mindspore/parallel/shard.py +21 -7
  147. mindspore/parallel/transform_safetensors.py +4 -10
  148. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +9 -10
  149. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +1 -1
  150. mindspore/profiler/common/msprof_cmd_tool.py +2 -2
  151. mindspore/profiler/common/path_manager.py +0 -9
  152. mindspore/profiler/common/profiler_context.py +2 -25
  153. mindspore/profiler/common/profiler_meta_data.py +0 -1
  154. mindspore/profiler/common/profiler_op_analyse.py +6 -10
  155. mindspore/{ops/_op_impl/cpu/joinedstr_op.py → profiler/common/validator/__init__.py} +1 -15
  156. mindspore/profiler/common/validator/validate_path.py +84 -0
  157. mindspore/profiler/dynamic_profiler.py +46 -91
  158. mindspore/profiler/envprofiler.py +5 -30
  159. mindspore/profiler/experimental_config.py +1 -16
  160. mindspore/profiler/platform/cpu_profiler.py +4 -10
  161. mindspore/profiler/platform/npu_profiler.py +1 -1
  162. mindspore/profiler/profiler.py +145 -193
  163. mindspore/profiler/profiler_action_controller.py +1 -1
  164. mindspore/profiler/profiler_interface.py +2 -2
  165. mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
  166. mindspore/runtime/__init__.py +4 -6
  167. mindspore/runtime/executor.py +0 -27
  168. mindspore/runtime/memory.py +0 -1
  169. mindspore/runtime/thread_bind_core.py +1 -1
  170. mindspore/swresample-4.dll +0 -0
  171. mindspore/swscale-6.dll +0 -0
  172. mindspore/tinyxml2.dll +0 -0
  173. mindspore/train/_utils.py +3 -3
  174. mindspore/train/amp.py +3 -0
  175. mindspore/train/callback/_callback.py +1 -2
  176. mindspore/train/callback/_checkpoint.py +8 -1
  177. mindspore/train/callback/_flops_collector.py +6 -10
  178. mindspore/train/callback/_train_fault_tolerance.py +7 -3
  179. mindspore/train/data_sink.py +4 -4
  180. mindspore/train/dataset_helper.py +5 -5
  181. mindspore/train/model.py +20 -4
  182. mindspore/train/serialization.py +15 -35
  183. mindspore/train/train_thor/model_thor.py +2 -2
  184. mindspore/turbojpeg.dll +0 -0
  185. mindspore/utils/hooks.py +81 -0
  186. mindspore/utils/utils.py +8 -8
  187. mindspore/version.py +1 -1
  188. {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +1 -1
  189. {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +193 -192
  190. mindspore/_extends/parallel_compile/akg_compiler/custom.py +0 -1109
  191. mindspore/common/dynamic_shape/__init__.py +0 -0
  192. mindspore/common/dynamic_shape/enable_dynamic.py +0 -197
  193. /mindspore/common/{dynamic_shape/_auto_dynamic.py → _auto_dynamic.py} +0 -0
  194. {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
  195. {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
  196. {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
@@ -119,8 +119,6 @@ from mindspore._c_expression import pyboost_copy
119
119
  from mindspore._c_expression import pyboost_cosh
120
120
  from mindspore._c_expression import pyboost_cos
121
121
  from mindspore._c_expression import pyboost_count_nonzero
122
- from mindspore._c_expression import pyboost_cross_entropy_loss_grad
123
- from mindspore._c_expression import pyboost_cross_entropy_loss
124
122
  from mindspore._c_expression import pyboost_cross
125
123
  from mindspore._c_expression import pyboost_cummax
126
124
  from mindspore._c_expression import pyboost_cummin_ext
@@ -129,7 +127,6 @@ from mindspore._c_expression import pyboost_dense
129
127
  from mindspore._c_expression import pyboost_diagonal_view
130
128
  from mindspore._c_expression import pyboost_diag_ext
131
129
  from mindspore._c_expression import pyboost_dist_comm_all_gather_into_tensor
132
- from mindspore._c_expression import pyboost_dist_comm_all_gather_into_tensor_uneven
133
130
  from mindspore._c_expression import pyboost_dist_comm_all_gather
134
131
  from mindspore._c_expression import pyboost_dist_comm_all_reduce
135
132
  from mindspore._c_expression import pyboost_dist_comm_all_to_all_v
@@ -144,7 +141,6 @@ from mindspore._c_expression import pyboost_dist_comm_isend
144
141
  from mindspore._c_expression import pyboost_dist_comm_reduce
145
142
  from mindspore._c_expression import pyboost_dist_comm_reduce_scatter
146
143
  from mindspore._c_expression import pyboost_dist_comm_reduce_scatter_tensor
147
- from mindspore._c_expression import pyboost_dist_comm_reduce_scatter_tensor_uneven
148
144
  from mindspore._c_expression import pyboost_dist_comm_scatter
149
145
  from mindspore._c_expression import pyboost_dist_comm_scatter_tensor
150
146
  from mindspore._c_expression import pyboost_divmods
@@ -235,8 +231,6 @@ from mindspore._c_expression import pyboost_inner_comm_reduce_scatter
235
231
  from mindspore._c_expression import pyboost_inplace_addmm
236
232
  from mindspore._c_expression import pyboost_inplace_adds_ext
237
233
  from mindspore._c_expression import pyboost_inplace_add_ext
238
- from mindspore._c_expression import pyboost_inplace_bernoulli_scalar
239
- from mindspore._c_expression import pyboost_inplace_bernoulli_tensor
240
234
  from mindspore._c_expression import pyboost_inplace_clamp_scalar
241
235
  from mindspore._c_expression import pyboost_inplace_clamp_tensor
242
236
  from mindspore._c_expression import pyboost_inplace_copy
@@ -267,8 +261,6 @@ from mindspore._c_expression import pyboost_inplace_normal
267
261
  from mindspore._c_expression import pyboost_inplace_put
268
262
  from mindspore._c_expression import pyboost_inplace_random
269
263
  from mindspore._c_expression import pyboost_inplace_relu
270
- from mindspore._c_expression import pyboost_inplace_remainder_tensor_scalar
271
- from mindspore._c_expression import pyboost_inplace_remainder_tensor_tensor
272
264
  from mindspore._c_expression import pyboost_inplace_scatter_add
273
265
  from mindspore._c_expression import pyboost_inplace_scatter_src
274
266
  from mindspore._c_expression import pyboost_inplace_scatter_src_reduce
@@ -319,7 +311,6 @@ from mindspore._c_expression import pyboost_log_softmax_ext
319
311
  from mindspore._c_expression import pyboost_log_softmax_grad
320
312
  from mindspore._c_expression import pyboost_log_softmax
321
313
  from mindspore._c_expression import pyboost_masked_fill
322
- from mindspore._c_expression import pyboost_masked_scatter
323
314
  from mindspore._c_expression import pyboost_masked_select_grad
324
315
  from mindspore._c_expression import pyboost_masked_select
325
316
  from mindspore._c_expression import pyboost_matmul_ext
@@ -343,7 +334,6 @@ from mindspore._c_expression import pyboost_min_dim
343
334
  from mindspore._c_expression import pyboost_min
344
335
  from mindspore._c_expression import pyboost_mish_ext
345
336
  from mindspore._c_expression import pyboost_mish_grad_ext
346
- from mindspore._c_expression import pyboost_mla
347
337
  from mindspore._c_expression import pyboost_mm_ext
348
338
  from mindspore._c_expression import pyboost_moe_distribute_combine
349
339
  from mindspore._c_expression import pyboost_moe_distribute_dispatch
@@ -429,7 +419,6 @@ from mindspore._c_expression import pyboost_replication_pad_3d
429
419
  from mindspore._c_expression import pyboost_reshape_and_cache
430
420
  from mindspore._c_expression import pyboost_reshape
431
421
  from mindspore._c_expression import pyboost_reverse_v2
432
- from mindspore._c_expression import pyboost_ring_attention_update
433
422
  from mindspore._c_expression import pyboost_rms_norm_grad
434
423
  from mindspore._c_expression import pyboost_rms_norm
435
424
  from mindspore._c_expression import pyboost_roll
@@ -555,7 +544,6 @@ from mindspore._c_expression import pyboost_weight_quant_batch_matmul
555
544
  from mindspore._c_expression import pyboost_any_ext
556
545
  from mindspore._c_expression import pyboost_any
557
546
  from mindspore._c_expression import pyboost_einsum_ext
558
- from mindspore._c_expression import pyboost_func_dropout_ext
559
547
  from mindspore._c_expression import pyboost_func_max_pool2d
560
548
  from mindspore._c_expression import pyboost_gmm_backward_fusion
561
549
  from mindspore._c_expression import pyboost_gmm_backward
@@ -5651,96 +5639,6 @@ class CountNonZero(Primitive):
5651
5639
  count_nonzero_op=CountNonZero()
5652
5640
 
5653
5641
 
5654
- class CrossEntropyLossGrad(Primitive):
5655
- r"""
5656
- .. code-block::
5657
-
5658
- prim = ops.CrossEntropyLossGrad()
5659
- out = prim(grad_loss, log_prob, target, weight, grad_zloss, lse_for_zloss, reduction, ignore_index, label_smoothing, lse_square_scale_for_zloss)
5660
-
5661
- is equivalent to
5662
-
5663
- .. code-block::
5664
-
5665
- ops.cross_entropy_loss_grad(grad_loss, log_prob, target, weight, grad_zloss, lse_for_zloss, reduction, ignore_index, label_smoothing, lse_square_scale_for_zloss)
5666
-
5667
- Refer to :func:`mindspore.ops.cross_entropy_loss_grad` for more details.
5668
- """
5669
- __mindspore_signature__ = (
5670
- sig.make_sig('grad_loss'),
5671
- sig.make_sig('log_prob'),
5672
- sig.make_sig('target'),
5673
- sig.make_sig('weight', default=None),
5674
- sig.make_sig('grad_zloss', default=None),
5675
- sig.make_sig('lse_for_zloss', default=None),
5676
- sig.make_sig('reduction', default='mean'),
5677
- sig.make_sig('ignore_index', default=-100),
5678
- sig.make_sig('label_smoothing', default=0.0),
5679
- sig.make_sig('lse_square_scale_for_zloss', default=0.0),
5680
- )
5681
-
5682
- @prim_arg_register
5683
- def __init__(self):
5684
- pass
5685
-
5686
- def __call__(self, grad_loss, log_prob, target, weight=None, grad_zloss=None, lse_for_zloss=None, reduction='mean', ignore_index=-100, label_smoothing=0.0, lse_square_scale_for_zloss=0.0):
5687
- # Add for jit context.
5688
- if jit_context() and jit_context().compiled:
5689
- return jit_context().default_output()
5690
- res = pyboost_cross_entropy_loss_grad(self, [grad_loss, log_prob, target, weight, grad_zloss, lse_for_zloss, str_to_enum('CrossEntropyLossGrad', 'reduction', reduction), ignore_index, label_smoothing, lse_square_scale_for_zloss])
5691
- # Add for jit context.
5692
- if jit_context():
5693
- return jit_context().run_op(self, res, grad_loss, log_prob, target, weight, grad_zloss, lse_for_zloss, str_to_enum('CrossEntropyLossGrad', 'reduction', reduction), ignore_index, label_smoothing, lse_square_scale_for_zloss)
5694
- return res
5695
-
5696
-
5697
- cross_entropy_loss_grad_op=CrossEntropyLossGrad()
5698
-
5699
-
5700
- class CrossEntropyLoss(Primitive):
5701
- r"""
5702
- .. code-block::
5703
-
5704
- prim = ops.CrossEntropyLoss()
5705
- out = prim(input, target, weight, reduction, ignore_index, label_smoothing, lse_square_scale_for_zloss, return_zloss)
5706
-
5707
- is equivalent to
5708
-
5709
- .. code-block::
5710
-
5711
- ops.cross_entropy_loss(input, target, weight, reduction, ignore_index, label_smoothing, lse_square_scale_for_zloss, return_zloss)
5712
-
5713
- Refer to :func:`mindspore.ops.cross_entropy_loss` for more details.
5714
- """
5715
- __mindspore_signature__ = (
5716
- sig.make_sig('input'),
5717
- sig.make_sig('target'),
5718
- sig.make_sig('weight', default=None),
5719
- sig.make_sig('reduction', default='mean'),
5720
- sig.make_sig('ignore_index', default=-100),
5721
- sig.make_sig('label_smoothing', default=0.0),
5722
- sig.make_sig('lse_square_scale_for_zloss', default=0.0),
5723
- sig.make_sig('return_zloss', default=False),
5724
- )
5725
-
5726
- @prim_arg_register
5727
- def __init__(self):
5728
- pass
5729
-
5730
- def __call__(self, input, target, weight=None, reduction='mean', ignore_index=-100, label_smoothing=0.0, lse_square_scale_for_zloss=0.0, return_zloss=False):
5731
- # Add for jit context.
5732
- if jit_context() and jit_context().compiled:
5733
- return jit_context().default_output()
5734
- res = pyboost_cross_entropy_loss(self, [input, target, weight, str_to_enum('CrossEntropyLoss', 'reduction', reduction), ignore_index, label_smoothing, lse_square_scale_for_zloss, return_zloss])
5735
- # Add for jit context.
5736
- if jit_context():
5737
- return jit_context().run_op(self, res, input, target, weight, str_to_enum('CrossEntropyLoss', 'reduction', reduction), ignore_index, label_smoothing, lse_square_scale_for_zloss, return_zloss)
5738
- return res
5739
-
5740
-
5741
- cross_entropy_loss_op=CrossEntropyLoss()
5742
-
5743
-
5744
5642
  class Cross(Primitive):
5745
5643
  r"""
5746
5644
  Returns the cross product of vectors in dimension `dim` of input and other.
@@ -6299,28 +6197,6 @@ class DistCommAllGatherIntoTensor(Primitive):
6299
6197
  dist_comm_all_gather_into_tensor_op=DistCommAllGatherIntoTensor()
6300
6198
 
6301
6199
 
6302
- class DistCommAllGatherIntoTensorUneven(Primitive):
6303
- r"""
6304
-
6305
- """
6306
- @prim_arg_register
6307
- def __init__(self):
6308
- pass
6309
-
6310
- def __call__(self, other, input, output_split_sizes, rank_size, group):
6311
- # Add for jit context.
6312
- if jit_context() and jit_context().compiled:
6313
- return jit_context().default_output()
6314
- res = pyboost_dist_comm_all_gather_into_tensor_uneven(self, [other, input, output_split_sizes, rank_size, group])
6315
- # Add for jit context.
6316
- if jit_context():
6317
- return jit_context().run_op(self, res, other, input, output_split_sizes, rank_size, group)
6318
- return res
6319
-
6320
-
6321
- dist_comm_all_gather_into_tensor_uneven_op=DistCommAllGatherIntoTensorUneven()
6322
-
6323
-
6324
6200
  class DistCommAllGather(Primitive):
6325
6201
  r"""
6326
6202
 
@@ -6642,28 +6518,6 @@ class DistCommReduceScatterTensor(Primitive):
6642
6518
  dist_comm_reduce_scatter_tensor_op=DistCommReduceScatterTensor()
6643
6519
 
6644
6520
 
6645
- class DistCommReduceScatterTensorUneven(Primitive):
6646
- r"""
6647
-
6648
- """
6649
- @prim_arg_register
6650
- def __init__(self):
6651
- pass
6652
-
6653
- def __call__(self, other, input, input_split_size, rank_size, op_type, group):
6654
- # Add for jit context.
6655
- if jit_context() and jit_context().compiled:
6656
- return jit_context().default_output()
6657
- res = pyboost_dist_comm_reduce_scatter_tensor_uneven(self, [other, input, input_split_size, rank_size, op_type, group])
6658
- # Add for jit context.
6659
- if jit_context():
6660
- return jit_context().run_op(self, res, other, input, input_split_size, rank_size, op_type, group)
6661
- return res
6662
-
6663
-
6664
- dist_comm_reduce_scatter_tensor_uneven_op=DistCommReduceScatterTensorUneven()
6665
-
6666
-
6667
6521
  class DistCommScatter(Primitive):
6668
6522
  r"""
6669
6523
 
@@ -6807,7 +6661,7 @@ class Div(Primitive):
6807
6661
  - One of the two inputs must be a Tensor, when the two inputs have different shapes,
6808
6662
  they must be able to broadcast to a common shape.
6809
6663
  - The two inputs can not be bool type at the same time,
6810
- [True, Tensor(True), Tensor(np.array([True]))] are all considered bool type.
6664
+ [True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
6811
6665
  - The two inputs comply with the implicit type conversion rules to make the data types
6812
6666
  consistent.
6813
6667
 
@@ -6815,11 +6669,11 @@ class Div(Primitive):
6815
6669
  - **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
6816
6670
  a bool or a tensor whose data type is
6817
6671
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
6818
- `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
6672
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
6819
6673
  - **y** (Union[Tensor, number.Number, bool]) - The second input is a number.Number or
6820
6674
  a bool or a tensor whose data type is
6821
6675
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
6822
- `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
6676
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
6823
6677
 
6824
6678
  Outputs:
6825
6679
  Tensor, the shape is the same as the one of the input `x` , `y` after broadcasting,
@@ -8731,7 +8585,7 @@ class FillScalar(Primitive):
8731
8585
  fill_value (number.Number): Value to fill the returned tensor. Complex numbers are not supported for now.
8732
8586
 
8733
8587
  Keyword Args:
8734
- dtype (mindspore.dtype): The specified type of output tensor. `bool` and `number` are supported, for
8588
+ dtype (mindspore.dtype): The specified type of output tensor. `bool_` and `number` are supported, for
8735
8589
  details, please refer to :class:`mindspore.dtype` . Default: ``None`` .
8736
8590
 
8737
8591
  Returns:
@@ -8778,7 +8632,7 @@ class FillTensor(Primitive):
8778
8632
  scalar Tensor or 1-D Tensor with shape of [1].
8779
8633
 
8780
8634
  Keyword Args:
8781
- dtype (mindspore.dtype): The specified type of output tensor. `bool` and `number` are supported, for
8635
+ dtype (mindspore.dtype): The specified type of output tensor. `bool_` and `number` are supported, for
8782
8636
  details, please refer to :class:`mindspore.dtype` . Default: ``None`` .
8783
8637
 
8784
8638
  Returns:
@@ -11876,86 +11730,6 @@ class InplaceAddExt(Primitive):
11876
11730
  inplace_add_ext_op=InplaceAddExt()
11877
11731
 
11878
11732
 
11879
- class InplaceBernoulliScalar(Primitive):
11880
- r"""
11881
- .. code-block::
11882
-
11883
- prim = ops.InplaceBernoulliScalar()
11884
- out = prim(input, p, seed, offset)
11885
-
11886
- is equivalent to
11887
-
11888
- .. code-block::
11889
-
11890
- ops.inplace_bernoulli_scalar(input, p, seed, offset)
11891
-
11892
- Refer to :func:`mindspore.ops.inplace_bernoulli_scalar` for more details.
11893
- """
11894
- __mindspore_signature__ = (
11895
- sig.make_sig('input', sig.sig_rw.RW_WRITE),
11896
- sig.make_sig('p'),
11897
- sig.make_sig('seed'),
11898
- sig.make_sig('offset'),
11899
- )
11900
-
11901
- @prim_arg_register
11902
- def __init__(self):
11903
- self.add_prim_attr("side_effect_mem", True)
11904
-
11905
- def __call__(self, input, p, seed, offset):
11906
- # Add for jit context.
11907
- if jit_context() and jit_context().compiled:
11908
- return jit_context().default_output()
11909
- res = pyboost_inplace_bernoulli_scalar(self, [input, p, seed, offset])
11910
- # Add for jit context.
11911
- if jit_context():
11912
- return jit_context().run_op(self, res, input, p, seed, offset)
11913
- return res
11914
-
11915
-
11916
- inplace_bernoulli_scalar_op=InplaceBernoulliScalar()
11917
-
11918
-
11919
- class InplaceBernoulliTensor(Primitive):
11920
- r"""
11921
- .. code-block::
11922
-
11923
- prim = ops.InplaceBernoulliTensor()
11924
- out = prim(input, p, seed, offset)
11925
-
11926
- is equivalent to
11927
-
11928
- .. code-block::
11929
-
11930
- ops.inplace_bernoulli_tensor(input, p, seed, offset)
11931
-
11932
- Refer to :func:`mindspore.ops.inplace_bernoulli_tensor` for more details.
11933
- """
11934
- __mindspore_signature__ = (
11935
- sig.make_sig('input', sig.sig_rw.RW_WRITE),
11936
- sig.make_sig('p'),
11937
- sig.make_sig('seed'),
11938
- sig.make_sig('offset'),
11939
- )
11940
-
11941
- @prim_arg_register
11942
- def __init__(self):
11943
- self.add_prim_attr("side_effect_mem", True)
11944
-
11945
- def __call__(self, input, p, seed, offset):
11946
- # Add for jit context.
11947
- if jit_context() and jit_context().compiled:
11948
- return jit_context().default_output()
11949
- res = pyboost_inplace_bernoulli_tensor(self, [input, p, seed, offset])
11950
- # Add for jit context.
11951
- if jit_context():
11952
- return jit_context().run_op(self, res, input, p, seed, offset)
11953
- return res
11954
-
11955
-
11956
- inplace_bernoulli_tensor_op=InplaceBernoulliTensor()
11957
-
11958
-
11959
11733
  class InplaceClampScalar(Primitive):
11960
11734
  r"""
11961
11735
  .. code-block::
@@ -12039,34 +11813,33 @@ class InplaceCopy(Primitive):
12039
11813
  .. code-block::
12040
11814
 
12041
11815
  prim = ops.InplaceCopy()
12042
- out = prim(input, src, non_blocking)
11816
+ out = prim(input, src)
12043
11817
 
12044
11818
  is equivalent to
12045
11819
 
12046
11820
  .. code-block::
12047
11821
 
12048
- ops.inplace_copy(input, src, non_blocking)
11822
+ ops.inplace_copy(input, src)
12049
11823
 
12050
11824
  Refer to :func:`mindspore.ops.inplace_copy` for more details.
12051
11825
  """
12052
11826
  __mindspore_signature__ = (
12053
11827
  sig.make_sig('input', sig.sig_rw.RW_WRITE),
12054
11828
  sig.make_sig('src'),
12055
- sig.make_sig('non_blocking', default=False),
12056
11829
  )
12057
11830
 
12058
11831
  @prim_arg_register
12059
11832
  def __init__(self):
12060
11833
  self.add_prim_attr("side_effect_mem", True)
12061
11834
 
12062
- def __call__(self, input, src, non_blocking=False):
11835
+ def __call__(self, input, src):
12063
11836
  # Add for jit context.
12064
11837
  if jit_context() and jit_context().compiled:
12065
11838
  return jit_context().default_output()
12066
- res = pyboost_inplace_copy(self, [input, src, non_blocking])
11839
+ res = pyboost_inplace_copy(self, [input, src])
12067
11840
  # Add for jit context.
12068
11841
  if jit_context():
12069
- return jit_context().run_op(self, res, input, src, non_blocking)
11842
+ return jit_context().run_op(self, res, input, src)
12070
11843
  return res
12071
11844
 
12072
11845
 
@@ -13055,60 +12828,6 @@ class InplaceReLU(Primitive):
13055
12828
  inplace_relu_op=InplaceReLU()
13056
12829
 
13057
12830
 
13058
- class InplaceRemainderTensorScalar(Primitive):
13059
- r"""
13060
-
13061
- """
13062
- __mindspore_signature__ = (
13063
- sig.make_sig('input', sig.sig_rw.RW_WRITE),
13064
- sig.make_sig('other'),
13065
- )
13066
-
13067
- @prim_arg_register
13068
- def __init__(self):
13069
- self.add_prim_attr("side_effect_mem", True)
13070
-
13071
- def __call__(self, input, other):
13072
- # Add for jit context.
13073
- if jit_context() and jit_context().compiled:
13074
- return jit_context().default_output()
13075
- res = pyboost_inplace_remainder_tensor_scalar(self, [input, other])
13076
- # Add for jit context.
13077
- if jit_context():
13078
- return jit_context().run_op(self, res, input, other)
13079
- return res
13080
-
13081
-
13082
- inplace_remainder_tensor_scalar_op=InplaceRemainderTensorScalar()
13083
-
13084
-
13085
- class InplaceRemainderTensorTensor(Primitive):
13086
- r"""
13087
-
13088
- """
13089
- __mindspore_signature__ = (
13090
- sig.make_sig('input', sig.sig_rw.RW_WRITE),
13091
- sig.make_sig('other'),
13092
- )
13093
-
13094
- @prim_arg_register
13095
- def __init__(self):
13096
- self.add_prim_attr("side_effect_mem", True)
13097
-
13098
- def __call__(self, input, other):
13099
- # Add for jit context.
13100
- if jit_context() and jit_context().compiled:
13101
- return jit_context().default_output()
13102
- res = pyboost_inplace_remainder_tensor_tensor(self, [input, other])
13103
- # Add for jit context.
13104
- if jit_context():
13105
- return jit_context().run_op(self, res, input, other)
13106
- return res
13107
-
13108
-
13109
- inplace_remainder_tensor_tensor_op=InplaceRemainderTensorTensor()
13110
-
13111
-
13112
12831
  class InplaceScatterAdd(Primitive):
13113
12832
  r"""
13114
12833
  .. code-block::
@@ -14029,40 +13748,6 @@ class Kthvalue(Primitive):
14029
13748
  kthvalue_op=Kthvalue()
14030
13749
 
14031
13750
 
14032
- class KvScaleCache(Primitive):
14033
- r"""
14034
- .. code-block::
14035
-
14036
- prim = ops.KvScaleCache()
14037
- out = prim(key_scale, value_scale, key_value_scale_cache, batch_valid_length, cache_mode)
14038
-
14039
- is equivalent to
14040
-
14041
- .. code-block::
14042
-
14043
- ops.kv_scale_cache(key_scale, value_scale, key_value_scale_cache, batch_valid_length, cache_mode)
14044
-
14045
- Refer to :func:`mindspore.ops.kv_scale_cache` for more details.
14046
- """
14047
- __mindspore_signature__ = (
14048
- sig.make_sig('key_scale', dtype=sig.sig_dtype.T),
14049
- sig.make_sig('value_scale', dtype=sig.sig_dtype.T),
14050
- sig.make_sig('key_value_scale_cache', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1),
14051
- sig.make_sig('batch_valid_length', dtype=sig.sig_dtype.T2),
14052
- sig.make_sig('cache_mode', dtype=sig.sig_dtype.T3),
14053
- )
14054
-
14055
- @prim_arg_register
14056
- def __init__(self):
14057
- self.add_prim_attr("side_effect_mem", True)
14058
-
14059
- def __call__(self, key_scale, value_scale, key_value_scale_cache, batch_valid_length, cache_mode):
14060
- return super().__call__(key_scale, value_scale, key_value_scale_cache, batch_valid_length, cache_mode)
14061
-
14062
-
14063
- kv_scale_cache_op=KvScaleCache()
14064
-
14065
-
14066
13751
  class L1LossBackwardExt(Primitive):
14067
13752
  r"""
14068
13753
 
@@ -15085,24 +14770,24 @@ class LogicalAnd(Primitive):
15085
14770
  >>> import mindspore
15086
14771
  >>> import numpy as np
15087
14772
  >>> from mindspore import Tensor, ops
15088
- >>> x = Tensor(np.array([True, False, True]), mindspore.bool)
15089
- >>> y = Tensor(np.array([True, True, False]), mindspore.bool)
14773
+ >>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
14774
+ >>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
15090
14775
  >>> logical_and = ops.LogicalAnd()
15091
14776
  >>> output = logical_and(x, y)
15092
14777
  >>> print(output)
15093
14778
  [ True False False]
15094
- >>> x = Tensor(1, mindspore.bool)
15095
- >>> y = Tensor(0, mindspore.bool)
14779
+ >>> x = Tensor(1, mindspore.bool_)
14780
+ >>> y = Tensor(0, mindspore.bool_)
15096
14781
  >>> output = ops.LogicalAnd()(x, y)
15097
14782
  >>> print(output)
15098
14783
  False
15099
14784
  >>> x = True
15100
- >>> y = Tensor(0, mindspore.bool)
14785
+ >>> y = Tensor(0, mindspore.bool_)
15101
14786
  >>> output = ops.LogicalAnd()(x, y)
15102
14787
  >>> print(output)
15103
14788
  False
15104
14789
  >>> x = True
15105
- >>> y = Tensor(np.array([True, False]), mindspore.bool)
14790
+ >>> y = Tensor(np.array([True, False]), mindspore.bool_)
15106
14791
  >>> output = ops.LogicalAnd()(x, y)
15107
14792
  >>> print(output)
15108
14793
  [True False]
@@ -15146,7 +14831,7 @@ class LogicalNot(Primitive):
15146
14831
  >>> import mindspore
15147
14832
  >>> import numpy as np
15148
14833
  >>> from mindspore import Tensor, ops
15149
- >>> x = Tensor(np.array([True, False, True]), mindspore.bool)
14834
+ >>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
15150
14835
  >>> logical_not = ops.LogicalNot()
15151
14836
  >>> output = logical_not(x)
15152
14837
  >>> print(output)
@@ -15192,24 +14877,24 @@ class LogicalOr(Primitive):
15192
14877
  >>> import mindspore
15193
14878
  >>> import numpy as np
15194
14879
  >>> from mindspore import Tensor, ops
15195
- >>> x = Tensor(np.array([True, False, True]), mindspore.bool)
15196
- >>> y = Tensor(np.array([True, True, False]), mindspore.bool)
14880
+ >>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
14881
+ >>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
15197
14882
  >>> logical_or = ops.LogicalOr()
15198
14883
  >>> output = logical_or(x, y)
15199
14884
  >>> print(output)
15200
14885
  [ True True True]
15201
- >>> x = Tensor(1, mindspore.bool)
15202
- >>> y = Tensor(0, mindspore.bool)
14886
+ >>> x = Tensor(1, mindspore.bool_)
14887
+ >>> y = Tensor(0, mindspore.bool_)
15203
14888
  >>> output = ops.LogicalOr()(x, y)
15204
14889
  >>> print(output)
15205
14890
  True
15206
14891
  >>> x = True
15207
- >>> y = Tensor(0, mindspore.bool)
14892
+ >>> y = Tensor(0, mindspore.bool_)
15208
14893
  >>> output = ops.LogicalOr()(x, y)
15209
14894
  >>> print(output)
15210
14895
  True
15211
14896
  >>> x = True
15212
- >>> y = Tensor(np.array([True, False]), mindspore.bool)
14897
+ >>> y = Tensor(np.array([True, False]), mindspore.bool_)
15213
14898
  >>> output = ops.LogicalOr()(x, y)
15214
14899
  >>> print(output)
15215
14900
  [True True]
@@ -15259,24 +14944,24 @@ class LogicalXor(Primitive):
15259
14944
  >>> import mindspore
15260
14945
  >>> import numpy as np
15261
14946
  >>> from mindspore import Tensor, ops
15262
- >>> x = Tensor(np.array([True, False, True]), mindspore.bool)
15263
- >>> y = Tensor(np.array([True, True, False]), mindspore.bool)
14947
+ >>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
14948
+ >>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
15264
14949
  >>> logical_xor = ops.LogicalXor()
15265
14950
  >>> output = logical_xor(x, y)
15266
14951
  >>> print(output)
15267
14952
  [ False True True]
15268
- >>> x = Tensor(1, mindspore.bool)
15269
- >>> y = Tensor(0, mindspore.bool)
14953
+ >>> x = Tensor(1, mindspore.bool_)
14954
+ >>> y = Tensor(0, mindspore.bool_)
15270
14955
  >>> output = ops.LogicalXor()(x, y)
15271
14956
  >>> print(output)
15272
14957
  True
15273
14958
  >>> x = True
15274
- >>> y = Tensor(0, mindspore.bool)
14959
+ >>> y = Tensor(0, mindspore.bool_)
15275
14960
  >>> output = ops.LogicalXor()(x, y)
15276
14961
  >>> print(output)
15277
14962
  True
15278
14963
  >>> x = True
15279
- >>> y = Tensor(np.array([True, False]), mindspore.bool)
14964
+ >>> y = Tensor(np.array([True, False]), mindspore.bool_)
15280
14965
  >>> output = ops.LogicalXor()(x, y)
15281
14966
  >>> print(output)
15282
14967
  [False True]
@@ -15749,39 +15434,6 @@ class MaskedFill(Primitive):
15749
15434
  masked_fill_op=MaskedFill()
15750
15435
 
15751
15436
 
15752
- class MaskedScatter(Primitive):
15753
- r"""
15754
- .. code-block::
15755
-
15756
- prim = ops.MaskedScatter()
15757
- out = prim(input, mask, source)
15758
-
15759
- is equivalent to
15760
-
15761
- .. code-block::
15762
-
15763
- ops.masked_scatter(input, mask, source)
15764
-
15765
- Refer to :func:`mindspore.ops.masked_scatter` for more details.
15766
- """
15767
- @prim_arg_register
15768
- def __init__(self):
15769
- pass
15770
-
15771
- def __call__(self, input, mask, source):
15772
- # Add for jit context.
15773
- if jit_context() and jit_context().compiled:
15774
- return jit_context().default_output()
15775
- res = pyboost_masked_scatter(self, [input, mask, source])
15776
- # Add for jit context.
15777
- if jit_context():
15778
- return jit_context().run_op(self, res, input, mask, source)
15779
- return res
15780
-
15781
-
15782
- masked_scatter_op=MaskedScatter()
15783
-
15784
-
15785
15437
  class MaskedSelectGrad(Primitive):
15786
15438
  r"""
15787
15439
 
@@ -16861,113 +16513,6 @@ class MishGradExt(Primitive):
16861
16513
  mish_grad_ext_op=MishGradExt()
16862
16514
 
16863
16515
 
16864
- class Mla(Primitive):
16865
- r"""
16866
- .. code-block::
16867
-
16868
- prim = ops.Mla()
16869
- out = prim(query, q_rope, kv_cache, k_rope, block_tables, attn_mask, deq_scale_qk, deq_scale_pv, q_seq_lens, context_lens, head_num, scale_value, kv_head_num, mask_mode, is_ring)
16870
-
16871
- is equivalent to
16872
-
16873
- .. code-block::
16874
-
16875
- ops.mla(query, q_rope, kv_cache, k_rope, block_tables, attn_mask, deq_scale_qk, deq_scale_pv, q_seq_lens, context_lens, head_num, scale_value, kv_head_num, mask_mode, is_ring)
16876
-
16877
- Refer to :func:`mindspore.ops.mla` for more details.
16878
- """
16879
- __mindspore_signature__ = (
16880
- sig.make_sig('query'),
16881
- sig.make_sig('q_rope'),
16882
- sig.make_sig('kv_cache'),
16883
- sig.make_sig('k_rope'),
16884
- sig.make_sig('block_tables'),
16885
- sig.make_sig('attn_mask', default=None),
16886
- sig.make_sig('deq_scale_qk', default=None),
16887
- sig.make_sig('deq_scale_pv', default=None),
16888
- sig.make_sig('q_seq_lens', default=None),
16889
- sig.make_sig('context_lens', default=None),
16890
- sig.make_sig('head_num', default=32),
16891
- sig.make_sig('scale_value', default=0.0),
16892
- sig.make_sig('kv_head_num', default=1),
16893
- sig.make_sig('mask_mode', default='MASK_NONE'),
16894
- sig.make_sig('is_ring', default=0),
16895
- )
16896
-
16897
- @prim_arg_register
16898
- def __init__(self):
16899
- pass
16900
-
16901
- def __call__(self, query, q_rope, kv_cache, k_rope, block_tables, attn_mask=None, deq_scale_qk=None, deq_scale_pv=None, q_seq_lens=None, context_lens=None, head_num=32, scale_value=0.0, kv_head_num=1, mask_mode='MASK_NONE', is_ring=0):
16902
- # Add for jit context.
16903
- if jit_context() and jit_context().compiled:
16904
- return jit_context().default_output()
16905
- res = pyboost_mla(self, [query, q_rope, kv_cache, k_rope, block_tables, attn_mask, deq_scale_qk, deq_scale_pv, q_seq_lens, context_lens, head_num, scale_value, kv_head_num, str_to_enum('Mla', 'mask_mode', mask_mode), is_ring])
16906
- # Add for jit context.
16907
- if jit_context():
16908
- return jit_context().run_op(self, res, query, q_rope, kv_cache, k_rope, block_tables, attn_mask, deq_scale_qk, deq_scale_pv, q_seq_lens, context_lens, head_num, scale_value, kv_head_num, str_to_enum('Mla', 'mask_mode', mask_mode), is_ring)
16909
- return res
16910
-
16911
-
16912
- mla_op=Mla()
16913
-
16914
-
16915
- class MlaPreprocess(Primitive):
16916
- r"""
16917
- .. code-block::
16918
-
16919
- prim = ops.MlaPreprocess()
16920
- out = prim(input1, gamma1, beta1, quant_scale1, quant_offset1, wdqkv, bias1, gamma2, beta2, quant_scale2, quant_offset2, gamma3, sin1, cos1, sin2, cos2, key_cache, slot_mapping, wuq, bias2, slot_wuk, de_scale1, de_scale2, ctkv_scale, qnope_scale, krope_cache, param_cache_mode)
16921
-
16922
- is equivalent to
16923
-
16924
- .. code-block::
16925
-
16926
- ops.mla_preprocess(input1, gamma1, beta1, quant_scale1, quant_offset1, wdqkv, bias1, gamma2, beta2, quant_scale2, quant_offset2, gamma3, sin1, cos1, sin2, cos2, key_cache, slot_mapping, wuq, bias2, slot_wuk, de_scale1, de_scale2, ctkv_scale, qnope_scale, krope_cache, param_cache_mode)
16927
-
16928
- Refer to :func:`mindspore.ops.mla_preprocess` for more details.
16929
- """
16930
- __mindspore_signature__ = (
16931
- sig.make_sig('input1'),
16932
- sig.make_sig('gamma1'),
16933
- sig.make_sig('beta1'),
16934
- sig.make_sig('quant_scale1'),
16935
- sig.make_sig('quant_offset1'),
16936
- sig.make_sig('wdqkv'),
16937
- sig.make_sig('bias1'),
16938
- sig.make_sig('gamma2'),
16939
- sig.make_sig('beta2'),
16940
- sig.make_sig('quant_scale2'),
16941
- sig.make_sig('quant_offset2'),
16942
- sig.make_sig('gamma3'),
16943
- sig.make_sig('sin1'),
16944
- sig.make_sig('cos1'),
16945
- sig.make_sig('sin2'),
16946
- sig.make_sig('cos2'),
16947
- sig.make_sig('key_cache', sig.sig_rw.RW_WRITE),
16948
- sig.make_sig('slot_mapping'),
16949
- sig.make_sig('wuq'),
16950
- sig.make_sig('bias2'),
16951
- sig.make_sig('slot_wuk'),
16952
- sig.make_sig('de_scale1'),
16953
- sig.make_sig('de_scale2'),
16954
- sig.make_sig('ctkv_scale'),
16955
- sig.make_sig('qnope_scale'),
16956
- sig.make_sig('krope_cache', sig.sig_rw.RW_WRITE),
16957
- sig.make_sig('param_cache_mode', default=0),
16958
- )
16959
-
16960
- @prim_arg_register
16961
- def __init__(self):
16962
- self.add_prim_attr("side_effect_mem", True)
16963
-
16964
- def __call__(self, input1, gamma1, beta1, quant_scale1, quant_offset1, wdqkv, bias1, gamma2, beta2, quant_scale2, quant_offset2, gamma3, sin1, cos1, sin2, cos2, key_cache, slot_mapping, wuq, bias2, slot_wuk, de_scale1, de_scale2, ctkv_scale, qnope_scale, krope_cache, param_cache_mode=0):
16965
- return super().__call__(input1, gamma1, beta1, quant_scale1, quant_offset1, wdqkv, bias1, gamma2, beta2, quant_scale2, quant_offset2, gamma3, sin1, cos1, sin2, cos2, key_cache, slot_mapping, wuq, bias2, slot_wuk, de_scale1, de_scale2, ctkv_scale, qnope_scale, krope_cache, param_cache_mode)
16966
-
16967
-
16968
- mla_preprocess_op=MlaPreprocess()
16969
-
16970
-
16971
16516
  class Mm(Primitive):
16972
16517
  r"""
16973
16518
  .. code-block::
@@ -17983,7 +17528,7 @@ class NLLLoss(Primitive):
17983
17528
 
17984
17529
  .. warning::
17985
17530
  This is an experimental API that is subject to change or deletion.
17986
-
17531
+
17987
17532
  Args:
17988
17533
  reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
17989
17534
  ``'sum'`` . Default: ``'mean'`` .
@@ -18940,7 +18485,7 @@ class PowScalarTensor(Primitive):
18940
18485
  input (Number): The first input is a Number.
18941
18486
  exponent (Tensor): The second input is a tensor whose data type is
18942
18487
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
18943
- `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
18488
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
18944
18489
 
18945
18490
  Returns:
18946
18491
  Tensor, the shape is the same as the one after broadcasting,
@@ -18993,7 +18538,7 @@ class PowTensorScalar(Primitive):
18993
18538
  Args:
18994
18539
  input (Tensor): The first input is a tensor whose data type is
18995
18540
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
18996
- `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
18541
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
18997
18542
  exponent (Number): The second input is a Number.
18998
18543
 
18999
18544
  Returns:
@@ -21753,50 +21298,6 @@ class RightShift(Primitive):
21753
21298
  right_shift_op=RightShift()
21754
21299
 
21755
21300
 
21756
- class RingAttentionUpdate(Primitive):
21757
- r"""
21758
- .. code-block::
21759
-
21760
- prim = ops.RingAttentionUpdate()
21761
- out = prim(prev_attn_out, prev_softmax_max, prev_softmax_sum, cur_attn_out, cur_softmax_max, cur_softmax_sum, actual_seq_qlen, layout)
21762
-
21763
- is equivalent to
21764
-
21765
- .. code-block::
21766
-
21767
- ops.ring_attention_update(prev_attn_out, prev_softmax_max, prev_softmax_sum, cur_attn_out, cur_softmax_max, cur_softmax_sum, actual_seq_qlen, layout)
21768
-
21769
- Refer to :func:`mindspore.ops.ring_attention_update` for more details.
21770
- """
21771
- __mindspore_signature__ = (
21772
- sig.make_sig('prev_attn_out'),
21773
- sig.make_sig('prev_softmax_max'),
21774
- sig.make_sig('prev_softmax_sum'),
21775
- sig.make_sig('cur_attn_out'),
21776
- sig.make_sig('cur_softmax_max'),
21777
- sig.make_sig('cur_softmax_sum'),
21778
- sig.make_sig('actual_seq_qlen', default=None),
21779
- sig.make_sig('layout', default='SBH'),
21780
- )
21781
-
21782
- @prim_arg_register
21783
- def __init__(self):
21784
- pass
21785
-
21786
- def __call__(self, prev_attn_out, prev_softmax_max, prev_softmax_sum, cur_attn_out, cur_softmax_max, cur_softmax_sum, actual_seq_qlen=None, layout='SBH'):
21787
- # Add for jit context.
21788
- if jit_context() and jit_context().compiled:
21789
- return jit_context().default_output()
21790
- res = pyboost_ring_attention_update(self, [prev_attn_out, prev_softmax_max, prev_softmax_sum, cur_attn_out, cur_softmax_max, cur_softmax_sum, actual_seq_qlen, str_to_enum('RingAttentionUpdate', 'layout', layout)])
21791
- # Add for jit context.
21792
- if jit_context():
21793
- return jit_context().run_op(self, res, prev_attn_out, prev_softmax_max, prev_softmax_sum, cur_attn_out, cur_softmax_max, cur_softmax_sum, actual_seq_qlen, str_to_enum('RingAttentionUpdate', 'layout', layout))
21794
- return res
21795
-
21796
-
21797
- ring_attention_update_op=RingAttentionUpdate()
21798
-
21799
-
21800
21301
  class RmsNormGrad(Primitive):
21801
21302
  r"""
21802
21303
  Calculates the gradient of RmsNorm operation.
@@ -23137,18 +22638,52 @@ class SmoothL1LossGrad(Primitive):
23137
22638
 
23138
22639
  class SmoothL1Loss(Primitive):
23139
22640
  r"""
23140
- .. code-block::
22641
+ Calculate the smooth L1 loss, and the L1 loss function has robustness.
23141
22642
 
23142
- prim = ops.SmoothL1Loss(beta, reduction)
23143
- out = prim(prediction, target)
22643
+ Refer to :func:`mindspore.ops.smooth_l1_loss` for more details.
23144
22644
 
23145
- is equivalent to
22645
+ .. warning::
22646
+ This API has poor performance on CPU and it is recommended to run it on the Ascend/GPU.
23146
22647
 
23147
- .. code-block::
22648
+ Args:
22649
+ beta (number, optional): A parameter used to control the point where the function will change between
22650
+ L1 to L2 loss. Default: ``1.0`` .
23148
22651
 
23149
- ops.smooth_l1_loss(prediction, target, beta, reduction)
22652
+ - Ascend: The value should be equal to or greater than zero.
22653
+ - CPU/GPU: The value should be greater than zero.
22654
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
22655
+ ``'sum'`` . Default: ``'none'`` .
23150
22656
 
23151
- Refer to :func:`mindspore.ops.smooth_l1_loss` for more details.
22657
+ - ``'none'``: no reduction will be applied.
22658
+ - ``'mean'``: compute and return the mean of elements in the output.
22659
+ - ``'sum'``: the output elements will be summed.
22660
+
22661
+ Inputs:
22662
+ - **logits** (Tensor) - Input Tensor of any dimension. Supported dtypes:
22663
+
22664
+ - Ascend: float16, float32, bfloat16.
22665
+ - CPU/GPU: float16, float32, float64.
22666
+ - **labels** (Tensor) - Ground truth data.
22667
+
22668
+ - CPU/Ascend: has the same shape as the `logits`, `logits` and `labels` comply with the implicit type conversion rules to make the data types consistent.
22669
+ - GPU: has the same shape and dtype as the `logits`.
22670
+
22671
+ Outputs:
22672
+ Tensor, if `reduction` is ``'none'``, then output is a tensor with the same shape as `logits`. Otherwise the shape of output tensor is :math:`()`.
22673
+
22674
+ Supported Platforms:
22675
+ ``Ascend`` ``GPU`` ``CPU``
22676
+
22677
+ Examples:
22678
+ >>> import mindspore
22679
+ >>> import numpy as np
22680
+ >>> from mindspore import Tensor, ops
22681
+ >>> loss = ops.SmoothL1Loss()
22682
+ >>> logits = Tensor(np.array([1, 2, 3]), mindspore.float32)
22683
+ >>> labels = Tensor(np.array([1, 2, 2]), mindspore.float32)
22684
+ >>> output = loss(logits, labels)
22685
+ >>> print(output)
22686
+ [0. 0. 0.5]
23152
22687
  """
23153
22688
  @prim_arg_register
23154
22689
  def __init__(self, beta=1.0, reduction='none'):
@@ -26541,10 +26076,10 @@ class Xlogy(Primitive):
26541
26076
  Inputs:
26542
26077
  - **input** (Tensor) - The first input is a tensor whose data
26543
26078
  type is `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
26544
- `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
26079
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
26545
26080
  - **other** (Tensor) - The second input is a tensor whose data
26546
26081
  type is `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
26547
- `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
26082
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
26548
26083
 
26549
26084
  Outputs:
26550
26085
  - **y** (Tensor) - the shape is the broadcast of `input` and `other`,
@@ -27677,7 +27212,7 @@ class MoeGatingTopKSoftmax(Primitive):
27677
27212
  ... [0.2, 0.2, 0.4, 0.2],
27678
27213
  ... [0.3, 0.3, 0.1, 0.3],
27679
27214
  ... [0.1, 0.7, 0.1, 0.1]]), ms.float16)
27680
- >>> finished = Tensor(np.array([True, True, True, True]), ms.bool)
27215
+ >>> finished = Tensor(np.array([True, True, True, True]), ms.bool_)
27681
27216
  >>> net = _infer_ops.MoeGatingTopKSoftmax()
27682
27217
  >>> output = net(x, finished, k=4)
27683
27218
  >>> print(output[0])
@@ -28178,28 +27713,6 @@ class EinsumExt(Primitive):
28178
27713
  einsum_ext_op=EinsumExt()
28179
27714
 
28180
27715
 
28181
- class FuncDropoutExt(Primitive):
28182
- r"""
28183
-
28184
- """
28185
- @prim_arg_register
28186
- def __init__(self):
28187
- pass
28188
-
28189
- def __call__(self, input, p, training, inplace, seed, offset):
28190
- # Add for jit context.
28191
- if jit_context() and jit_context().compiled:
28192
- return jit_context().default_output()
28193
- res = pyboost_func_dropout_ext(self, [input, p, training, inplace, seed, offset])
28194
- # Add for jit context.
28195
- if jit_context():
28196
- return jit_context().run_op(self, res, input, p, training, inplace, seed, offset)
28197
- return res
28198
-
28199
-
28200
- func_dropout_ext_op=FuncDropoutExt()
28201
-
28202
-
28203
27716
  class FuncMaxPool2D(Primitive):
28204
27717
  r"""
28205
27718