mindspore 2.4.1__cp311-cp311-manylinux1_x86_64.whl → 2.5.0__cp311-cp311-manylinux1_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (866) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Third_Party_Open_Source_Software_Notice +39 -0
  3. mindspore/__init__.py +8 -3
  4. mindspore/_akg/akg/composite/build_module.py +6 -2
  5. mindspore/_akg/akg/utils/kernel_exec.py +2 -2
  6. mindspore/_c_dataengine.cpython-311-x86_64-linux-gnu.so +0 -0
  7. mindspore/_c_expression.cpython-311-x86_64-linux-gnu.so +0 -0
  8. mindspore/_c_mindrecord.cpython-311-x86_64-linux-gnu.so +0 -0
  9. mindspore/_checkparam.py +0 -5
  10. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  11. mindspore/_extends/parse/compile_config.py +64 -0
  12. mindspore/_extends/parse/deprecated/__init__.py +0 -0
  13. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +375 -0
  14. mindspore/_extends/parse/parser.py +23 -5
  15. mindspore/_extends/parse/standard_method.py +123 -27
  16. mindspore/_extends/pijit/pijit_func_white_list.py +1 -1
  17. mindspore/amp.py +7 -1
  18. mindspore/boost/boost_cell_wrapper.py +136 -41
  19. mindspore/common/__init__.py +3 -1
  20. mindspore/common/_register_for_tensor.py +0 -1
  21. mindspore/common/_stub_tensor.py +25 -4
  22. mindspore/common/_tensor_cpp_method.py +17 -0
  23. mindspore/common/_tensor_docs.py +6132 -0
  24. mindspore/common/api.py +99 -25
  25. mindspore/common/dtype.py +34 -34
  26. mindspore/common/dump.py +2 -1
  27. mindspore/common/file_system.py +8 -1
  28. mindspore/common/generator.py +2 -0
  29. mindspore/common/hook_handle.py +3 -1
  30. mindspore/common/initializer.py +3 -4
  31. mindspore/common/lazy_inline.py +8 -2
  32. mindspore/common/mindir_util.py +10 -2
  33. mindspore/common/parameter.py +30 -27
  34. mindspore/common/tensor.py +713 -1337
  35. mindspore/communication/__init__.py +1 -1
  36. mindspore/communication/_comm_helper.py +10 -0
  37. mindspore/communication/comm_func.py +215 -173
  38. mindspore/communication/management.py +23 -20
  39. mindspore/context.py +292 -193
  40. mindspore/dataset/__init__.py +23 -19
  41. mindspore/dataset/callback/ds_callback.py +2 -1
  42. mindspore/dataset/core/config.py +84 -3
  43. mindspore/dataset/engine/cache_admin.py +3 -3
  44. mindspore/dataset/engine/cache_client.py +5 -4
  45. mindspore/dataset/engine/datasets.py +192 -149
  46. mindspore/dataset/engine/datasets_audio.py +14 -0
  47. mindspore/dataset/engine/datasets_standard_format.py +28 -11
  48. mindspore/dataset/engine/datasets_text.py +38 -1
  49. mindspore/dataset/engine/datasets_user_defined.py +125 -65
  50. mindspore/dataset/engine/datasets_vision.py +81 -8
  51. mindspore/dataset/engine/iterators.py +281 -63
  52. mindspore/dataset/engine/obs/util.py +8 -0
  53. mindspore/dataset/engine/queue.py +40 -0
  54. mindspore/dataset/engine/samplers.py +26 -2
  55. mindspore/dataset/engine/serializer_deserializer.py +1 -1
  56. mindspore/dataset/engine/validators.py +43 -11
  57. mindspore/dataset/transforms/py_transforms_util.py +17 -0
  58. mindspore/dataset/transforms/transforms.py +29 -12
  59. mindspore/dataset/vision/validators.py +1 -2
  60. mindspore/device_context/__init__.py +21 -0
  61. mindspore/device_context/ascend/__init__.py +25 -0
  62. mindspore/device_context/ascend/device.py +72 -0
  63. mindspore/device_context/ascend/op_debug.py +94 -0
  64. mindspore/device_context/ascend/op_precision.py +193 -0
  65. mindspore/device_context/ascend/op_tuning.py +127 -0
  66. mindspore/device_context/cpu/__init__.py +25 -0
  67. mindspore/device_context/cpu/device.py +62 -0
  68. mindspore/device_context/cpu/op_tuning.py +43 -0
  69. mindspore/device_context/gpu/__init__.py +21 -0
  70. mindspore/device_context/gpu/device.py +70 -0
  71. mindspore/device_context/gpu/op_precision.py +67 -0
  72. mindspore/device_context/gpu/op_tuning.py +175 -0
  73. mindspore/device_manager.py +134 -0
  74. mindspore/experimental/llm_boost/__init__.py +3 -2
  75. mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
  76. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +211 -0
  77. mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
  78. mindspore/experimental/llm_boost/atb/boost_base.py +239 -64
  79. mindspore/experimental/llm_boost/atb/llama_boost.py +52 -30
  80. mindspore/experimental/llm_boost/atb/qwen_boost.py +47 -24
  81. mindspore/experimental/llm_boost/register.py +1 -0
  82. mindspore/experimental/optim/adadelta.py +26 -22
  83. mindspore/experimental/optim/adam.py +3 -0
  84. mindspore/experimental/optim/lr_scheduler.py +33 -24
  85. mindspore/experimental/optim/radam.py +33 -30
  86. mindspore/hal/device.py +28 -0
  87. mindspore/hal/event.py +17 -0
  88. mindspore/hal/memory.py +94 -3
  89. mindspore/hal/stream.py +91 -6
  90. mindspore/include/api/context.h +1 -2
  91. mindspore/include/dataset/constants.h +2 -2
  92. mindspore/lib/libavcodec.so.59 +0 -0
  93. mindspore/lib/libavdevice.so.59 +0 -0
  94. mindspore/lib/libavfilter.so.8 +0 -0
  95. mindspore/lib/libavformat.so.59 +0 -0
  96. mindspore/lib/libavutil.so.57 +0 -0
  97. mindspore/lib/libdnnl.so.2 +0 -0
  98. mindspore/lib/libmindspore_backend.so +0 -0
  99. mindspore/lib/libmindspore_common.so +0 -0
  100. mindspore/lib/libmindspore_core.so +0 -0
  101. mindspore/lib/libmindspore_glog.so.0 +0 -0
  102. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  103. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  104. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  105. mindspore/lib/libmindspore_ops.so +0 -0
  106. mindspore/lib/libmpi_adapter.so +0 -0
  107. mindspore/lib/libmpi_collective.so +0 -0
  108. mindspore/lib/libnnacl.so +0 -0
  109. mindspore/lib/libopencv_core.so.4.5 +0 -0
  110. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  111. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  112. mindspore/lib/libps_cache.so +0 -0
  113. mindspore/lib/libswresample.so.4 +0 -0
  114. mindspore/lib/libswscale.so.6 +0 -0
  115. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910_93/aic-ascend910_93-ops-info.json +2048 -0
  116. mindspore/lib/plugin/ascend/custom_aicore_ops/op_proto/libop_proto.so +0 -0
  117. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  118. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  119. mindspore/lib/plugin/ascend/custom_ascendc_910/framework/npu_supported_ops.json +10 -0
  120. mindspore/lib/plugin/ascend/custom_ascendc_910/op_api/lib/libcust_opapi.so +0 -0
  121. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +182 -0
  122. mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl → custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl}/dynamic/decoder_kv_cache.py +51 -16
  123. mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl → custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl}/dynamic/prompt_kv_cache.py +51 -16
  124. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.json +158 -0
  125. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.o +0 -0
  126. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.json +158 -0
  127. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.o +0 -0
  128. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.json +158 -0
  129. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.o +0 -0
  130. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.json +158 -0
  131. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.o +0 -0
  132. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.json +158 -0
  133. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.o +0 -0
  134. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.json +158 -0
  135. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.o +0 -0
  136. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.json +158 -0
  137. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.o +0 -0
  138. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.json +158 -0
  139. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.o +0 -0
  140. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.json +167 -0
  141. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.o +0 -0
  142. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.json +167 -0
  143. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.o +0 -0
  144. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.json +167 -0
  145. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.o +0 -0
  146. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.json +167 -0
  147. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.o +0 -0
  148. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.json +167 -0
  149. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.o +0 -0
  150. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.json +167 -0
  151. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.o +0 -0
  152. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.json +167 -0
  153. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.o +0 -0
  154. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.json +167 -0
  155. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.o +0 -0
  156. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/config/ascend910/binary_info_config.json +302 -0
  157. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/config/ascend910/decoder_kv_cache.json +892 -0
  158. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/config/ascend910/prompt_kv_cache.json +892 -0
  159. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64/libcust_opmaster_rt2.0.so +0 -0
  160. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
  161. mindspore/lib/plugin/ascend/custom_ascendc_910/op_proto/inc/op_proto.h +33 -0
  162. mindspore/lib/plugin/ascend/custom_ascendc_910/op_proto/lib/linux/x86_64/libcust_opsproto_rt2.0.so +0 -0
  163. mindspore/lib/plugin/ascend/custom_ascendc_910/version.info +1 -0
  164. mindspore/lib/plugin/ascend/custom_ascendc_910b/framework/npu_supported_ops.json +14 -0
  165. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_api/include/aclnn_decoder_kv_cache.h +59 -0
  166. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_api/include/aclnn_prompt_kv_cache.h +59 -0
  167. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_api/lib/libcust_opapi.so +0 -0
  168. mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl → custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl}/dynamic/all_finite.py +51 -16
  169. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/decoder_kv_cache.cpp +192 -0
  170. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/decoder_kv_cache.py +215 -0
  171. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/prompt_kv_cache.cpp +274 -0
  172. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/prompt_kv_cache.py +215 -0
  173. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.json +80 -0
  174. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.o +0 -0
  175. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.json +80 -0
  176. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.o +0 -0
  177. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.json +80 -0
  178. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.o +0 -0
  179. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.json +158 -0
  180. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.o +0 -0
  181. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.json +158 -0
  182. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.o +0 -0
  183. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.json +158 -0
  184. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.o +0 -0
  185. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.json +158 -0
  186. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.o +0 -0
  187. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.json +158 -0
  188. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.o +0 -0
  189. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.json +158 -0
  190. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.o +0 -0
  191. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.json +158 -0
  192. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.o +0 -0
  193. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.json +158 -0
  194. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.o +0 -0
  195. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.json +167 -0
  196. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.o +0 -0
  197. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.json +167 -0
  198. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.o +0 -0
  199. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.json +167 -0
  200. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.o +0 -0
  201. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.json +167 -0
  202. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.o +0 -0
  203. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.json +167 -0
  204. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.o +0 -0
  205. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.json +167 -0
  206. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.o +0 -0
  207. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.json +167 -0
  208. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.o +0 -0
  209. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.json +167 -0
  210. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.o +0 -0
  211. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.json +78 -0
  212. mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.o → custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.o} +0 -0
  213. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.json +78 -0
  214. mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.o → custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.o} +0 -0
  215. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.json +78 -0
  216. mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.o → custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.o} +0 -0
  217. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.json +156 -0
  218. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.o +0 -0
  219. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.json +156 -0
  220. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.o +0 -0
  221. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.json +156 -0
  222. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.o +0 -0
  223. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.json +156 -0
  224. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.o +0 -0
  225. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.json +156 -0
  226. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.o +0 -0
  227. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.json +156 -0
  228. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.o +0 -0
  229. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.json +156 -0
  230. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.o +0 -0
  231. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.json +156 -0
  232. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.o +0 -0
  233. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.json +165 -0
  234. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.o +0 -0
  235. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.json +165 -0
  236. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.o +0 -0
  237. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.json +165 -0
  238. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.o +0 -0
  239. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.json +165 -0
  240. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.o +0 -0
  241. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.json +165 -0
  242. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.o +0 -0
  243. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.json +165 -0
  244. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.o +0 -0
  245. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.json +165 -0
  246. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.o +0 -0
  247. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.json +165 -0
  248. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.o +0 -0
  249. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.json +78 -0
  250. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.o +0 -0
  251. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.json +78 -0
  252. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.o +0 -0
  253. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.json +78 -0
  254. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.o +0 -0
  255. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.json +156 -0
  256. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.o +0 -0
  257. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.json +156 -0
  258. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.o +0 -0
  259. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.json +156 -0
  260. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.o +0 -0
  261. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.json +156 -0
  262. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.o +0 -0
  263. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.json +156 -0
  264. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.o +0 -0
  265. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.json +156 -0
  266. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.o +0 -0
  267. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.json +156 -0
  268. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.o +0 -0
  269. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.json +156 -0
  270. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.o +0 -0
  271. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.json +165 -0
  272. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.o +0 -0
  273. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.json +165 -0
  274. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.o +0 -0
  275. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.json +165 -0
  276. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.o +0 -0
  277. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.json +165 -0
  278. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.o +0 -0
  279. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.json +165 -0
  280. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.o +0 -0
  281. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.json +165 -0
  282. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.o +0 -0
  283. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.json +165 -0
  284. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.o +0 -0
  285. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.json +165 -0
  286. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.o +0 -0
  287. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend310p/all_finite.json +139 -0
  288. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend310p/binary_info_config.json +361 -0
  289. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend310p/decoder_kv_cache.json +892 -0
  290. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend310p/prompt_kv_cache.json +892 -0
  291. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/all_finite.json +139 -0
  292. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/binary_info_config.json +361 -0
  293. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/decoder_kv_cache.json +892 -0
  294. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/prompt_kv_cache.json +892 -0
  295. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910b/all_finite.json +139 -0
  296. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910b/binary_info_config.json +361 -0
  297. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910b/decoder_kv_cache.json +892 -0
  298. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910b/prompt_kv_cache.json +892 -0
  299. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64/libcust_opmaster_rt2.0.so +0 -0
  300. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
  301. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_proto/lib/linux/x86_64/libcust_opsproto_rt2.0.so +0 -0
  302. mindspore/lib/plugin/ascend/custom_ascendc_910b/version.info +1 -0
  303. mindspore/lib/plugin/ascend/custom_compiler/setup.py +1 -1
  304. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  305. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  306. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  307. mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
  308. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  309. mindspore/lib/plugin/ascend/libmindspore_internal_kernels.so +0 -0
  310. mindspore/lib/plugin/ascend/libms_ascend_native_boost.so +0 -0
  311. mindspore/lib/plugin/ascend/libms_atb_boost.so +0 -0
  312. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
  313. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
  314. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +960 -958
  315. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
  316. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
  317. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
  318. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
  319. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{acme/include/base_type.h → base_type.h} +25 -20
  320. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{cast/cast_tiling.h → internal.h} +6 -4
  321. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_op.h +114 -0
  322. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/boost_kernel.h +70 -0
  323. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/llama_impl.h +85 -0
  324. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/model_interface.h +52 -0
  325. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/tensor.h +81 -0
  326. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_creator.h +123 -0
  327. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +155 -110
  328. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{acme/include/tiling_info.h → tiling_info.h} +12 -9
  329. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tiling_utils.h +178 -0
  330. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layer_norm_op.so +0 -0
  331. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_op.so +0 -0
  332. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_quant_op.so +0 -0
  333. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_310p_op.so +0 -0
  334. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_op.so +0 -0
  335. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_op.so +0 -0
  336. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcompare_op.so +0 -0
  337. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_op.so +0 -0
  338. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libllama_op.so +0 -0
  339. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_op.so +0 -0
  340. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
  341. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_optiling.so +0 -0
  342. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmulti_weight_matmul_kernel_op.so +0 -0
  343. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_nz_op.so +0 -0
  344. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_op.so +0 -0
  345. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_op.so +0 -0
  346. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_f16_nz/internal_pp_matmul_f16_nz.o +0 -0
  347. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_f16_nz/internal_pp_matmul_f16_nz_0.o +0 -0
  348. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_i8_nz_compress/internal_pp_matmul_i8_nz_compress.o +0 -0
  349. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_i8_nz_compress/internal_pp_matmul_i8_nz_compress_0.o +0 -0
  350. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_int8_nz/internal_pp_matmul_int8_nz.o +0 -0
  351. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_int8_nz/internal_pp_matmul_int8_nz_0.o +0 -0
  352. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libadd_rms_norm_quant_ascend310p.so +0 -0
  353. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libapply_rotary_pos_emb_310p_impl.so → op_kernels/ascend310p/so_kernels/libapply_rotary_pos_emb_310p_ascend310p.so} +0 -0
  354. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libcast_ascend310p.so +0 -0
  355. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libcompare_ascend310p.so +0 -0
  356. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libgelu_ascend310p.so +0 -0
  357. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libmatmul_ascend310p.so +0 -0
  358. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libreshape_and_cache_nz_ascend310p.so +0 -0
  359. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_4b60f88cdc28b25a36bad2d8b0a88092.json +163 -0
  360. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_4b60f88cdc28b25a36bad2d8b0a88092.o +0 -0
  361. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_cde61da2bd6fededcb1ba310a6ad16ee.json +163 -0
  362. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_cde61da2bd6fededcb1ba310a6ad16ee.o +0 -0
  363. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
  364. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
  365. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bsh_full_mix.o +0 -0
  366. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
  367. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
  368. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
  369. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bsh_full_mix.o +0 -0
  370. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
  371. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_matmul_postfusion_mix/internal_matmul_postfusion_mix.o +0 -0
  372. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_matmul_postfusion_mix/internal_matmul_postfusion_mix_mix_aic_0.o +0 -0
  373. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_matmul_postfusion_mix/internal_matmul_postfusion_mix_mix_aiv_0.o +0 -0
  374. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_multi_weight_matmul_postfusion_mix/internal_multi_weight_matmul_postfusion_mix.o +0 -0
  375. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_multi_weight_matmul_postfusion_mix/internal_multi_weight_matmul_postfusion_mix_mix_aic_0.o +0 -0
  376. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_multi_weight_matmul_postfusion_mix/internal_multi_weight_matmul_postfusion_mix_mix_aiv_0.o +0 -0
  377. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/matmul_add_rmsnorm/matmul_add_rmsnorm_bf16_bf16.o +0 -0
  378. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/matmul_add_rmsnorm/matmul_add_rmsnorm_bf16_fp16.o +0 -0
  379. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/matmul_add_rmsnorm/matmul_add_rmsnorm_bf16_fp32.o +0 -0
  380. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/matmul_add_rmsnorm/matmul_add_rmsnorm_fp16_bf16.o +0 -0
  381. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/matmul_add_rmsnorm/matmul_add_rmsnorm_fp16_fp16.o +0 -0
  382. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/matmul_add_rmsnorm/matmul_add_rmsnorm_fp16_fp32.o +0 -0
  383. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/paged_attention_v2/paged_attention_v2.o +0 -0
  384. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/paged_attention_v2/paged_attention_v2_mix_aic_0.o +0 -0
  385. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/paged_attention_v2/paged_attention_v2_mix_aiv_0.o +0 -0
  386. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/so_kernels/libadd_layer_norm_ascend910b.so +0 -0
  387. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libadd_rms_norm_impl.so → op_kernels/ascend910b/so_kernels/libadd_rms_norm_ascend910b.so} +0 -0
  388. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/so_kernels/libadd_rms_norm_quant_ascend910b.so +0 -0
  389. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libapply_rotary_pos_emb_impl.so → op_kernels/ascend910b/so_kernels/libapply_rotary_pos_emb_ascend910b.so} +0 -0
  390. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libcast_impl.so → op_kernels/ascend910b/so_kernels/libcast_ascend910b.so} +0 -0
  391. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libnot_equal_impl.so → op_kernels/ascend910b/so_kernels/libcompare_ascend910b.so} +0 -0
  392. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libgelu_impl.so → op_kernels/ascend910b/so_kernels/libgelu_ascend910b.so} +0 -0
  393. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/so_kernels/libllama_ascend910b.so +0 -0
  394. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libmatmul_impl.so → op_kernels/ascend910b/so_kernels/libmatmul_ascend910b.so} +0 -0
  395. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libmulti_weight_matmul_kernel_impl.so → op_kernels/ascend910b/so_kernels/libmulti_weight_matmul_kernel_ascend910b.so} +0 -0
  396. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libreshape_and_cache_impl.so → op_kernels/ascend910b/so_kernels/libreshape_and_cache_ascend910b.so} +0 -0
  397. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/librms_norm_impl.so → op_kernels/ascend910b/so_kernels/librms_norm_ascend910b.so} +0 -0
  398. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
  399. mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
  400. mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
  401. mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
  402. mindspore/lib/plugin/gpu10.1/libnvidia_collective.so +0 -0
  403. mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
  404. mindspore/lib/plugin/gpu11.1/libnvidia_collective.so +0 -0
  405. mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
  406. mindspore/lib/plugin/gpu11.6/libnvidia_collective.so +0 -0
  407. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  408. mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
  409. mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
  410. mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
  411. mindspore/log.py +12 -0
  412. mindspore/mindrecord/__init__.py +1 -1
  413. mindspore/mindrecord/config.py +17 -316
  414. mindspore/mindrecord/filereader.py +1 -9
  415. mindspore/mindrecord/filewriter.py +5 -15
  416. mindspore/mindrecord/mindpage.py +1 -9
  417. mindspore/mint/__init__.py +824 -218
  418. mindspore/mint/distributed/__init__.py +66 -4
  419. mindspore/mint/distributed/distributed.py +2594 -44
  420. mindspore/mint/linalg/__init__.py +6 -0
  421. mindspore/mint/nn/__init__.py +473 -14
  422. mindspore/mint/nn/functional.py +486 -11
  423. mindspore/mint/nn/layer/__init__.py +17 -4
  424. mindspore/mint/nn/layer/_functions.py +330 -0
  425. mindspore/mint/nn/layer/activation.py +169 -1
  426. mindspore/mint/nn/layer/basic.py +123 -0
  427. mindspore/mint/nn/layer/conv.py +727 -0
  428. mindspore/mint/nn/layer/normalization.py +215 -19
  429. mindspore/mint/nn/layer/padding.py +797 -0
  430. mindspore/mint/nn/layer/pooling.py +170 -0
  431. mindspore/mint/optim/__init__.py +2 -1
  432. mindspore/mint/optim/adam.py +223 -0
  433. mindspore/mint/optim/adamw.py +26 -19
  434. mindspore/mint/special/__init__.py +2 -1
  435. mindspore/multiprocessing/__init__.py +5 -0
  436. mindspore/nn/__init__.py +2 -0
  437. mindspore/nn/cell.py +142 -21
  438. mindspore/nn/dynamic_lr.py +2 -1
  439. mindspore/nn/layer/activation.py +6 -6
  440. mindspore/nn/layer/basic.py +35 -25
  441. mindspore/nn/layer/channel_shuffle.py +3 -3
  442. mindspore/nn/layer/conv.py +3 -0
  443. mindspore/nn/layer/embedding.py +3 -3
  444. mindspore/nn/layer/normalization.py +8 -7
  445. mindspore/nn/layer/padding.py +4 -3
  446. mindspore/nn/layer/pooling.py +55 -23
  447. mindspore/nn/layer/rnn_cells.py +1 -1
  448. mindspore/nn/layer/rnns.py +2 -1
  449. mindspore/nn/layer/timedistributed.py +5 -5
  450. mindspore/nn/layer/transformer.py +48 -26
  451. mindspore/nn/learning_rate_schedule.py +5 -3
  452. mindspore/nn/loss/loss.py +31 -36
  453. mindspore/nn/optim/ada_grad.py +1 -0
  454. mindspore/nn/optim/adadelta.py +2 -2
  455. mindspore/nn/optim/adam.py +1 -1
  456. mindspore/nn/optim/lars.py +1 -4
  457. mindspore/nn/optim/optimizer.py +1 -1
  458. mindspore/nn/optim/rprop.py +2 -2
  459. mindspore/nn/optim/thor.py +2 -1
  460. mindspore/nn/utils/__init__.py +22 -0
  461. mindspore/nn/utils/init.py +73 -0
  462. mindspore/nn/wrap/cell_wrapper.py +4 -6
  463. mindspore/nn/wrap/loss_scale.py +3 -4
  464. mindspore/numpy/array_creations.py +60 -62
  465. mindspore/numpy/array_ops.py +148 -143
  466. mindspore/numpy/logic_ops.py +41 -42
  467. mindspore/numpy/math_ops.py +361 -359
  468. mindspore/numpy/utils.py +16 -16
  469. mindspore/numpy/utils_const.py +4 -4
  470. mindspore/ops/__init__.py +2 -1
  471. mindspore/ops/_grad_experimental/grad_comm_ops.py +107 -8
  472. mindspore/ops/_grad_experimental/grad_debug_ops.py +6 -1
  473. mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
  474. mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
  475. mindspore/ops/_op_impl/cpu/__init__.py +1 -0
  476. mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
  477. mindspore/ops/_vmap/vmap_array_ops.py +20 -19
  478. mindspore/ops/_vmap/vmap_base.py +0 -2
  479. mindspore/ops/_vmap/vmap_grad_nn_ops.py +19 -13
  480. mindspore/ops/_vmap/vmap_math_ops.py +11 -9
  481. mindspore/ops/_vmap/vmap_nn_ops.py +20 -34
  482. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +149 -12
  483. mindspore/ops/auto_generate/gen_arg_handler.py +0 -61
  484. mindspore/ops/auto_generate/gen_extend_func.py +554 -60
  485. mindspore/ops/auto_generate/gen_ops_def.py +1621 -115
  486. mindspore/ops/auto_generate/gen_ops_prim.py +8027 -3411
  487. mindspore/ops/auto_generate/pyboost_inner_prim.py +183 -79
  488. mindspore/ops/composite/base.py +1 -1
  489. mindspore/ops/composite/multitype_ops/_compile_utils.py +229 -30
  490. mindspore/ops/composite/multitype_ops/pow_impl.py +0 -29
  491. mindspore/ops/function/__init__.py +12 -0
  492. mindspore/ops/function/array_func.py +561 -159
  493. mindspore/ops/function/clip_func.py +64 -0
  494. mindspore/ops/function/debug_func.py +28 -20
  495. mindspore/ops/function/image_func.py +1 -1
  496. mindspore/ops/function/linalg_func.py +5 -4
  497. mindspore/ops/function/math_func.py +1664 -294
  498. mindspore/ops/function/nn_func.py +988 -317
  499. mindspore/ops/function/parameter_func.py +3 -56
  500. mindspore/ops/function/random_func.py +243 -33
  501. mindspore/ops/function/sparse_unary_func.py +1 -1
  502. mindspore/ops/functional.py +18 -5
  503. mindspore/ops/functional_overload.py +897 -0
  504. mindspore/ops/operations/__init__.py +3 -2
  505. mindspore/ops/operations/_embedding_cache_ops.py +4 -4
  506. mindspore/ops/operations/_grad_ops.py +2 -34
  507. mindspore/ops/operations/_infer_ops.py +2 -1
  508. mindspore/ops/operations/_inner_ops.py +38 -8
  509. mindspore/ops/operations/array_ops.py +45 -303
  510. mindspore/ops/operations/comm_ops.py +23 -17
  511. mindspore/ops/operations/custom_ops.py +7 -49
  512. mindspore/ops/operations/debug_ops.py +42 -47
  513. mindspore/ops/operations/inner_ops.py +6 -4
  514. mindspore/ops/operations/linalg_ops.py +3 -2
  515. mindspore/ops/operations/manually_defined/ops_def.py +185 -104
  516. mindspore/ops/operations/math_ops.py +11 -216
  517. mindspore/ops/operations/nn_ops.py +153 -310
  518. mindspore/ops/primitive.py +23 -21
  519. mindspore/ops/tensor_method.py +1669 -0
  520. mindspore/ops_generate/aclnn_kernel_register_auto_cc_generator.py +110 -0
  521. mindspore/ops_generate/add_tensor_docs_generator.py +54 -0
  522. mindspore/ops_generate/arg_handler.py +0 -61
  523. mindspore/ops_generate/auto_grad_impl_cc_generator.py +135 -0
  524. mindspore/ops_generate/auto_grad_reg_cc_generator.py +93 -0
  525. mindspore/ops_generate/base_generator.py +11 -0
  526. mindspore/ops_generate/cpp_create_prim_instance_helper_generator.py +108 -0
  527. mindspore/ops_generate/functional_map_cpp_generator.py +491 -0
  528. mindspore/ops_generate/functional_overload_py_generator.py +110 -0
  529. mindspore/ops_generate/functions_cc_generator.py +233 -0
  530. mindspore/ops_generate/gen_aclnn_implement.py +110 -114
  531. mindspore/ops_generate/gen_constants.py +157 -3
  532. mindspore/ops_generate/gen_ops.py +245 -990
  533. mindspore/ops_generate/gen_pyboost_func.py +97 -998
  534. mindspore/ops_generate/gen_utils.py +119 -33
  535. mindspore/ops_generate/lite_ops_cpp_generator.py +155 -0
  536. mindspore/ops_generate/op_api_proto.py +206 -0
  537. mindspore/ops_generate/op_def_py_generator.py +131 -0
  538. mindspore/ops_generate/op_prim_py_generator.py +480 -0
  539. mindspore/ops_generate/op_proto.py +373 -108
  540. mindspore/ops_generate/op_template_parser.py +436 -0
  541. mindspore/ops_generate/ops_def_cc_generator.py +288 -0
  542. mindspore/ops_generate/ops_def_h_generator.py +74 -0
  543. mindspore/ops_generate/ops_name_h_generator.py +68 -0
  544. mindspore/ops_generate/ops_primitive_h_generator.py +81 -0
  545. mindspore/ops_generate/pyboost_functions_cpp_generator.py +370 -0
  546. mindspore/ops_generate/pyboost_functions_h_generator.py +68 -0
  547. mindspore/ops_generate/pyboost_functions_py_generator.py +148 -0
  548. mindspore/ops_generate/pyboost_grad_function_cpp_generator.py +154 -0
  549. mindspore/ops_generate/pyboost_inner_prim_generator.py +131 -0
  550. mindspore/ops_generate/pyboost_native_grad_functions_generator.py +268 -0
  551. mindspore/ops_generate/pyboost_op_cpp_code_generator.py +851 -0
  552. mindspore/ops_generate/pyboost_overload_functions_cpp_generator.py +344 -0
  553. mindspore/ops_generate/pyboost_utils.py +92 -33
  554. mindspore/ops_generate/template.py +294 -44
  555. mindspore/ops_generate/tensor_func_reg_cpp_generator.py +422 -0
  556. mindspore/parallel/__init__.py +3 -3
  557. mindspore/parallel/_auto_parallel_context.py +44 -34
  558. mindspore/parallel/_cell_wrapper.py +22 -3
  559. mindspore/parallel/_parallel_serialization.py +13 -2
  560. mindspore/parallel/_utils.py +4 -2
  561. mindspore/parallel/algo_parameter_config.py +1 -1
  562. mindspore/parallel/checkpoint_transform.py +44 -0
  563. mindspore/parallel/cluster/process_entity/_api.py +131 -37
  564. mindspore/parallel/cluster/process_entity/_utils.py +41 -6
  565. mindspore/parallel/cluster/run.py +20 -3
  566. mindspore/parallel/parameter_broadcast.py +1 -1
  567. mindspore/parallel/shard.py +3 -0
  568. mindspore/parallel/transform_safetensors.py +119 -253
  569. mindspore/profiler/__init__.py +17 -4
  570. mindspore/profiler/analysis/__init__.py +0 -0
  571. mindspore/profiler/analysis/parser/__init__.py +0 -0
  572. mindspore/profiler/analysis/parser/ascend_cann_parser.py +166 -0
  573. mindspore/profiler/analysis/parser/base_parser.py +158 -0
  574. mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
  575. mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
  576. mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
  577. mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
  578. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +261 -0
  579. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
  580. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +84 -0
  581. mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
  582. mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
  583. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
  584. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
  585. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
  586. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
  587. mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
  588. mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
  589. mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
  590. mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
  591. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +260 -0
  592. mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
  593. mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
  594. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
  595. mindspore/profiler/analysis/task_manager.py +131 -0
  596. mindspore/profiler/analysis/time_converter.py +84 -0
  597. mindspore/profiler/analysis/viewer/__init__.py +0 -0
  598. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +333 -0
  599. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
  600. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +252 -0
  601. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +313 -0
  602. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +322 -0
  603. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +265 -0
  604. mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
  605. mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
  606. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +97 -0
  607. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
  608. mindspore/profiler/analysis/work_flow.py +73 -0
  609. mindspore/profiler/common/ascend_msprof_exporter.py +138 -0
  610. mindspore/profiler/common/command_executor.py +90 -0
  611. mindspore/profiler/common/constant.py +174 -3
  612. mindspore/profiler/common/file_manager.py +208 -0
  613. mindspore/profiler/common/log.py +130 -0
  614. mindspore/profiler/common/msprof_cmd_tool.py +202 -0
  615. mindspore/profiler/common/path_manager.py +371 -0
  616. mindspore/profiler/common/process_bar.py +168 -0
  617. mindspore/profiler/common/process_pool.py +9 -3
  618. mindspore/profiler/common/profiler_context.py +476 -0
  619. mindspore/profiler/common/profiler_info.py +304 -0
  620. mindspore/profiler/common/profiler_output_path.py +284 -0
  621. mindspore/profiler/common/profiler_parameters.py +210 -0
  622. mindspore/profiler/common/profiler_path_manager.py +120 -0
  623. mindspore/profiler/common/record_function.py +76 -0
  624. mindspore/profiler/common/tlv_decoder.py +76 -0
  625. mindspore/profiler/common/util.py +75 -2
  626. mindspore/profiler/dynamic_profiler.py +270 -37
  627. mindspore/profiler/envprofiler.py +138 -0
  628. mindspore/profiler/mstx.py +199 -0
  629. mindspore/profiler/platform/__init__.py +21 -0
  630. mindspore/profiler/platform/base_profiler.py +40 -0
  631. mindspore/profiler/platform/cpu_profiler.py +124 -0
  632. mindspore/profiler/platform/gpu_profiler.py +74 -0
  633. mindspore/profiler/platform/npu_profiler.py +309 -0
  634. mindspore/profiler/profiler.py +580 -93
  635. mindspore/profiler/profiler_action_controller.py +187 -0
  636. mindspore/profiler/profiler_interface.py +114 -0
  637. mindspore/profiler/schedule.py +208 -0
  638. mindspore/rewrite/api/symbol_tree.py +1 -2
  639. mindspore/run_check/_check_version.py +18 -13
  640. mindspore/runtime/__init__.py +37 -0
  641. mindspore/runtime/device.py +27 -0
  642. mindspore/runtime/event.py +209 -0
  643. mindspore/runtime/executor.py +148 -0
  644. mindspore/runtime/memory.py +392 -0
  645. mindspore/runtime/stream.py +460 -0
  646. mindspore/runtime/thread_bind_core.py +401 -0
  647. mindspore/train/__init__.py +2 -2
  648. mindspore/train/_utils.py +53 -18
  649. mindspore/train/amp.py +8 -4
  650. mindspore/train/callback/_checkpoint.py +32 -18
  651. mindspore/train/callback/_early_stop.py +1 -1
  652. mindspore/train/callback/_flops_collector.py +105 -69
  653. mindspore/train/callback/_history.py +1 -1
  654. mindspore/train/callback/_summary_collector.py +44 -6
  655. mindspore/train/callback/_tft_register.py +37 -15
  656. mindspore/train/dataset_helper.py +11 -11
  657. mindspore/train/metrics/precision.py +4 -5
  658. mindspore/train/mind_ir_pb2.py +167 -46
  659. mindspore/train/model.py +13 -14
  660. mindspore/train/serialization.py +461 -72
  661. mindspore/train/summary/summary_record.py +1 -2
  662. mindspore/train/train_thor/model_thor.py +1 -1
  663. mindspore/utils/__init__.py +4 -2
  664. mindspore/utils/bin/dataset-cache +0 -0
  665. mindspore/utils/bin/dataset-cache-server +0 -0
  666. mindspore/utils/dryrun.py +138 -0
  667. mindspore/utils/runtime_execution_order_check.py +550 -0
  668. mindspore/version.py +1 -1
  669. {mindspore-2.4.1.dist-info → mindspore-2.5.0.dist-info}/METADATA +3 -4
  670. {mindspore-2.4.1.dist-info → mindspore-2.5.0.dist-info}/RECORD +683 -490
  671. {mindspore-2.4.1.dist-info → mindspore-2.5.0.dist-info}/entry_points.txt +1 -1
  672. mindspore/_data_dump.cpython-311-x86_64-linux-gnu.so +0 -0
  673. mindspore/bin/cache_admin +0 -0
  674. mindspore/bin/cache_server +0 -0
  675. mindspore/common/_tensor_overload.py +0 -139
  676. mindspore/lib/libmindspore_np_dtype.so +0 -0
  677. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
  678. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.json +0 -58
  679. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.json +0 -58
  680. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.json +0 -58
  681. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/all_finite.json +0 -109
  682. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/binary_info_config.json +0 -38
  683. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64/libcust_opmaster_rt2.0.so +0 -0
  684. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
  685. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/x86_64/libcust_opsproto_rt2.0.so +0 -0
  686. mindspore/lib/plugin/ascend/custom_ascendc_ops/version.info +0 -1
  687. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
  688. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -82
  689. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -113
  690. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -193
  691. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/dtype_registry.h +0 -90
  692. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -46
  693. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
  694. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
  695. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_layer_norm_op.h +0 -60
  696. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_rms_norm_op.h +0 -50
  697. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_rms_norm_quant_op.h +0 -50
  698. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/apply_rotary_pos_emb_nz_op.h +0 -42
  699. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/apply_rotary_pos_emb_op.h +0 -55
  700. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -34
  701. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_only_ops.h +0 -94
  702. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_op_base.h +0 -97
  703. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
  704. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/flash_attention_score_op.h +0 -92
  705. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/gelu_op.h +0 -44
  706. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_add_rmsnorm_op.h +0 -73
  707. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -108
  708. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/multi_impls_op.h +0 -64
  709. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/multi_weight_matmul_op.h +0 -91
  710. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/paged_attention_op.h +0 -99
  711. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/reshape_and_cache_nz_op.h +0 -44
  712. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/reshape_and_cache_op.h +0 -44
  713. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/rms_norm_op.h +0 -64
  714. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -179
  715. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -69
  716. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/profiling_util.h +0 -366
  717. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -56
  718. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/kernel/add.h +0 -21
  719. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/tiling/add_tiling.h +0 -43
  720. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -46
  721. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb.h +0 -23
  722. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_base.h +0 -456
  723. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_bf16.h +0 -217
  724. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp.h +0 -391
  725. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp16.h +0 -126
  726. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -230
  727. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_tiling.h +0 -43
  728. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_value.h +0 -27
  729. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/apply_rotary_pos_emb_nz_impl.h +0 -34
  730. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz.h +0 -23
  731. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_base.h +0 -460
  732. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_fp16.h +0 -116
  733. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_fp32.h +0 -230
  734. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_tiling.h +0 -43
  735. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_value.h +0 -27
  736. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -74
  737. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -74
  738. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_impl.h +0 -48
  739. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/kernel/cast_kernel.h +0 -21
  740. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -55
  741. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_tiling.h +0 -27
  742. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/kernel/compare_kernel.h +0 -23
  743. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
  744. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
  745. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
  746. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
  747. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
  748. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
  749. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
  750. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
  751. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
  752. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
  753. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
  754. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
  755. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
  756. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
  757. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
  758. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
  759. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
  760. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
  761. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
  762. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
  763. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
  764. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
  765. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
  766. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
  767. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
  768. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
  769. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
  770. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
  771. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
  772. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
  773. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
  774. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
  775. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
  776. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
  777. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
  778. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
  779. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +0 -68
  780. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -99
  781. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +0 -21
  782. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/lccl/lccl_wrapper.h +0 -58
  783. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/ms_int_types.h +0 -91
  784. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/ms_int_utils.h +0 -108
  785. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +0 -64
  786. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/add_param.h +0 -68
  787. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +0 -40
  788. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/cast_param.h +0 -30
  789. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
  790. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
  791. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
  792. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -38
  793. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +0 -42
  794. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +0 -33
  795. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -377
  796. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache_nz/kernel/reshape_and_cache_nz.h +0 -24
  797. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache_nz/reshape_and_cache_nz_impl.h +0 -42
  798. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache_nz/reshape_and_cache_nz_tiling.h +0 -27
  799. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -46
  800. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/kernel/sub_kernel.h +0 -20
  801. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -48
  802. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_tiling.h +0 -25
  803. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +0 -399
  804. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/utils.h +0 -41
  805. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +0 -45
  806. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_tiling.h +0 -29
  807. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +0 -30
  808. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -69
  809. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_core.h +0 -43
  810. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_entity.h +0 -38
  811. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_sink.h +0 -69
  812. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_stream.h +0 -41
  813. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -71
  814. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -165
  815. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +0 -20
  816. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
  817. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -121
  818. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -106
  819. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
  820. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
  821. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layer_norm_impl.so +0 -0
  822. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_quant_acme_impl.so +0 -0
  823. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_310p_old_impl.so +0 -0
  824. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_old_impl.so +0 -0
  825. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_nz_impl.so +0 -0
  826. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_nz_old_impl.so +0 -0
  827. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix.json +0 -19
  828. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix.o +0 -0
  829. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix_mix_aic_0.o +0 -0
  830. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix_mix_aiv_0.o +0 -0
  831. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix.json +0 -19
  832. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix.o +0 -0
  833. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix_mix_aic_0.o +0 -0
  834. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix_mix_aiv_0.o +0 -0
  835. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
  836. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
  837. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bsh_full_mix.o +0 -0
  838. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
  839. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
  840. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
  841. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bsh_full_mix.o +0 -0
  842. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
  843. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/matmul_add_rmsnorm/matmul_add_rmsnorm_bf16_bf16.o +0 -0
  844. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/matmul_add_rmsnorm/matmul_add_rmsnorm_bf16_fp16.o +0 -0
  845. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/matmul_add_rmsnorm/matmul_add_rmsnorm_bf16_fp32.o +0 -0
  846. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/matmul_add_rmsnorm/matmul_add_rmsnorm_fp16_bf16.o +0 -0
  847. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/matmul_add_rmsnorm/matmul_add_rmsnorm_fp16_fp16.o +0 -0
  848. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/matmul_add_rmsnorm/matmul_add_rmsnorm_fp16_fp32.o +0 -0
  849. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_bf16_bnsd_mix.o +0 -0
  850. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_bf16_bsh_mix.o +0 -0
  851. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_fp16_bnsd_mix.o +0 -0
  852. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_fp16_bsh_mix.o +0 -0
  853. mindspore/profiler/envprofiling.py +0 -254
  854. mindspore/profiler/profiling.py +0 -1926
  855. /mindspore/lib/plugin/ascend/{custom_ascendc_ops → custom_ascendc_910}/op_api/include/aclnn_decoder_kv_cache.h +0 -0
  856. /mindspore/lib/plugin/ascend/{custom_ascendc_ops → custom_ascendc_910}/op_api/include/aclnn_prompt_kv_cache.h +0 -0
  857. /mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl → custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl}/dynamic/decoder_kv_cache.cpp +0 -0
  858. /mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl → custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl}/dynamic/prompt_kv_cache.cpp +0 -0
  859. /mindspore/lib/plugin/ascend/{custom_ascendc_ops → custom_ascendc_910b}/op_api/include/aclnn_all_finite.h +0 -0
  860. /mindspore/lib/plugin/ascend/{custom_ascendc_ops → custom_ascendc_910b}/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +0 -0
  861. /mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json → custom_ascendc_910b/op_impl/ai_core/tbe/config/ascend910_93/aic-ascend910_93-ops-info.json} +0 -0
  862. /mindspore/lib/plugin/ascend/{custom_ascendc_ops → custom_ascendc_910b}/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +0 -0
  863. /mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl → custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl}/dynamic/all_finite.cpp +0 -0
  864. /mindspore/lib/plugin/ascend/{custom_ascendc_ops → custom_ascendc_910b}/op_proto/inc/op_proto.h +0 -0
  865. {mindspore-2.4.1.dist-info → mindspore-2.5.0.dist-info}/WHEEL +0 -0
  866. {mindspore-2.4.1.dist-info → mindspore-2.5.0.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,4 @@
1
- # Copyright 2020-2023 Huawei Technologies Co., Ltd
1
+ # Copyright 2020-2024 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -31,15 +31,18 @@ from mindspore.ops.primitive import PrimitiveWithInfer
31
31
  from mindspore.ops.primitive import PrimitiveWithCheck
32
32
  from mindspore.ops.primitive import prim_attr_register
33
33
  from mindspore.run_check._check_version import AscendEnvChecker
34
- from ..auto_generate import (CeLU, Flatten, LogSoftmax, LogSoftmaxExt, ReLU, ReLU6, Dense, Tanh,
34
+ from mindspore._c_expression import pyboost_all_finite
35
+ from mindspore.common._stub_tensor import _convert_stub
36
+ from ..auto_generate import (CeLU, Flatten, LogSoftmax, LogSoftmaxExt, GLU, ReLU, ReLU6, Dense, Tanh,
35
37
  Elu, Sigmoid, Softmax, SoftplusExt, HSwish, HSigmoid, AvgPool, BiasAdd,
36
38
  NLLLoss, OneHot, GeLU, FastGeLU, PReLU, RmsNorm, IncreFlashAttention, MSELossExt,
37
39
  GridSampler3D, GridSampler2D, LayerNorm, LayerNormExt, HShrink, AdamWeightDecay, Dropout,
38
40
  ApplyRotaryPosEmb, PagedAttention, PagedAttentionMask, ReshapeAndCache,
39
- FlashAttentionScore, Embedding, UpsampleNearest1D, UpsampleNearest2D,
41
+ FlashAttentionScore, PromptFlashAttention, Embedding, UpsampleNearest1D, UpsampleNearest2D,
40
42
  UpsampleNearest3D, UpsampleTrilinear3D,
41
43
  UpsampleBilinear2D, UpsampleLinear1D,
42
- BinaryCrossEntropy, BCEWithLogitsLoss, SoftShrink)
44
+ BinaryCrossEntropy, BCEWithLogitsLoss, SoftShrink,
45
+ SmoothL1Loss)
43
46
  from .manually_defined import BatchNorm
44
47
 
45
48
 
@@ -612,12 +615,12 @@ class InstanceNorm(PrimitiveWithInfer):
612
615
  Inputs:
613
616
  - **input_x** (Tensor) - The input of InstanceNorm, Tensor of shape :math:`(N, C)`,
614
617
  data type: float16 or float32.
615
- - **gamma** (Parameter) - Scale, Tensor of shape :math:`(C,)`,
618
+ - **gamma** (Union[Parameter, Tensor])) - Scale, Tensor of shape :math:`(C,)`,
616
619
  data type: float32.
617
- - **beta** (Parameter) - Bias, Tensor of shape :math:`(C,)`,
620
+ - **beta** (Union[Parameter, Tensor])) - Bias, Tensor of shape :math:`(C,)`,
618
621
  data type: float32.
619
- - **mean** (Parameter) - Mean value, Tensor of shape :math:`(C,)`, data type: float32.
620
- - **variance** (Parameter) - Variance value, Tensor of shape :math:`(C,)`, data type: float32.
622
+ - **mean** (Union[Parameter, Tensor])) - Mean value, Tensor of shape :math:`(C,)`, data type: float32.
623
+ - **variance** (Union[Parameter, Tensor])) - Variance value, Tensor of shape :math:`(C,)`, data type: float32.
621
624
 
622
625
  Outputs:
623
626
  Tuple of 3 Tensors, the normalized input, the updated parameters.
@@ -1430,6 +1433,9 @@ class MaxPool3D(Primitive):
1430
1433
  \max_{l=0, \ldots, d_{ker}-1} \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
1431
1434
  \text{input}(N_i, C_j, s_0 \times d + l, s_1 \times h + m, s_2 \times w + n)
1432
1435
 
1436
+ .. note::
1437
+ For Atlas training series products, this primitive is not supported.
1438
+
1433
1439
  Args:
1434
1440
  kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
1435
1441
  is an int number that represents depth, height and width of the kernel, or a tuple
@@ -2284,9 +2290,9 @@ class ApplyMomentum(Primitive):
2284
2290
  gradient_scale (float): The scale of the gradient. Default: ``1.0`` .
2285
2291
 
2286
2292
  Inputs:
2287
- - **variable** (Parameter) - Weights to be updated. Data type must be float64, int64, float, float16,
2288
- int16, int32, int8, uint16, uint32, uint64, uint8, complex64, complex128.
2289
- - **accumulation** (Parameter) - Accumulated gradient value by moment weight,
2293
+ - **variable** (Union[Parameter, Tensor]) - Weights to be updated. Data type must be float64, int64, float,
2294
+ float16, int16, int32, int8, uint16, uint32, uint64, uint8, complex64, complex128.
2295
+ - **accumulation** (Union[Parameter, Tensor]) - Accumulated gradient value by moment weight,
2290
2296
  has the same data type with `variable`.
2291
2297
  - **learning_rate** (Union[Number, Tensor]) - The learning rate value, must be a float64, int64, float,
2292
2298
  float16, int16, int32, int8, uint16, uint32, uint64, uint8, complex64, complex128 number or
@@ -2303,7 +2309,7 @@ class ApplyMomentum(Primitive):
2303
2309
 
2304
2310
  Raises:
2305
2311
  TypeError: If the `use_locking` or `use_nesterov` is not a bool or `gradient_scale` is not a float.
2306
- TypeError: If the data type of `var`, `accum` and `grad` conversion of Parameter is not supported.
2312
+ TypeError: If the data type of `var`, `accum` and `grad` conversion is not supported.
2307
2313
 
2308
2314
  Supported Platforms:
2309
2315
  ``Ascend`` ``GPU`` ``CPU``
@@ -2351,55 +2357,6 @@ class ApplyMomentum(Primitive):
2351
2357
  self.add_prim_attr('side_effect_mem', True)
2352
2358
 
2353
2359
 
2354
- class SmoothL1Loss(Primitive):
2355
- r"""
2356
- Calculate the smooth L1 loss, and the L1 loss function has robustness.
2357
-
2358
- Refer to :func:`mindspore.ops.smooth_l1_loss` for more details.
2359
-
2360
- Args:
2361
- beta (float, optional): A parameter used to control the point where the function will change between
2362
- L1 to L2 loss. The value should be greater than zero. Default: ``1.0`` .
2363
- reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
2364
- ``'sum'`` . Default: ``'none'`` .
2365
-
2366
- - ``'none'``: no reduction will be applied.
2367
- - ``'mean'``: compute and return the mean of elements in the output.
2368
- - ``'sum'``: the output elements will be summed.
2369
-
2370
- Inputs:
2371
- - **logits** (Tensor) - Input Tensor of any dimension. Data type must be float16, float32 or float64.
2372
- - **labels** (Tensor) - Ground truth data, has the same shape and dtype as the `logits`.
2373
-
2374
- Outputs:
2375
- Tensor, loss float tensor, same shape and dtype as the `logits`.
2376
-
2377
- Supported Platforms:
2378
- ``Ascend`` ``GPU`` ``CPU``
2379
-
2380
- Examples:
2381
- >>> import mindspore
2382
- >>> import numpy as np
2383
- >>> from mindspore import Tensor, ops
2384
- >>> loss = ops.SmoothL1Loss()
2385
- >>> logits = Tensor(np.array([1, 2, 3]), mindspore.float32)
2386
- >>> labels = Tensor(np.array([1, 2, 2]), mindspore.float32)
2387
- >>> output = loss(logits, labels)
2388
- >>> print(output)
2389
- [0. 0. 0.5]
2390
- """
2391
-
2392
- @prim_attr_register
2393
- def __init__(self, beta=1.0, reduction='none'):
2394
- """Initialize SmoothL1Loss."""
2395
- validator.check_value_type('beta', beta, [float], self.name)
2396
- validator.check('beta', beta, '', 0, validator.GT, self.name)
2397
- validator.check_string(
2398
- reduction, ['none', 'sum', 'mean'], 'reduction', self.name)
2399
- self.add_prim_attr('sigma', self.beta)
2400
- self.init_prim_io_names(inputs=['prediction', 'target'], outputs=['output'])
2401
-
2402
-
2403
2360
  class MultiMarginLoss(Primitive):
2404
2361
  r"""
2405
2362
  Creates a loss function that minimizes the hinge loss
@@ -3607,11 +3564,11 @@ class Adam(Primitive):
3607
3564
  If ``False`` , update the gradients without using NAG. Default: ``False`` .
3608
3565
 
3609
3566
  Inputs:
3610
- - **var** (Parameter) - Weights to be updated. The shape is :math:`(N, *)` where :math:`*` means,
3567
+ - **var** (Union[Parameter, Tensor]) - Weights to be updated. The shape is :math:`(N, *)` where :math:`*` means,
3611
3568
  any number of additional dimensions. The data type can be float16 or float32.
3612
- - **m** (Parameter) - The 1st moment vector in the updating formula,
3569
+ - **m** (Union[Parameter, Tensor]) - The 1st moment vector in the updating formula,
3613
3570
  the shape should be the same as `var`.
3614
- - **v** (Parameter) - the 2nd moment vector in the updating formula,
3571
+ - **v** (Union[Parameter, Tensor]) - the 2nd moment vector in the updating formula,
3615
3572
  the shape should be the same as `var`.
3616
3573
  - **beta1_power** (float) - :math:`beta_1^t(\beta_1^{t})` in the updating formula.
3617
3574
  - **beta2_power** (float) - :math:`beta_2^t(\beta_2^{t})` in the updating formula.
@@ -3782,8 +3739,8 @@ class AdamNoUpdateParam(Primitive):
3782
3739
 
3783
3740
  class FusedSparseAdam(Primitive):
3784
3741
  r"""
3785
- Merges the duplicate value of the gradient and then updates parameters by the Adaptive Moment Estimation (Adam)
3786
- algorithm. This operator is used when the gradient is sparse.
3742
+ Merges the duplicate value of the gradient and then updates parameters or tensors by the Adaptive Moment Estimation
3743
+ (Adam) algorithm. This operator is used when the gradient is sparse.
3787
3744
 
3788
3745
  The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.
3789
3746
 
@@ -3816,11 +3773,12 @@ class FusedSparseAdam(Primitive):
3816
3773
  If ``False`` , update the gradients without using NAG. Default: ``False`` .
3817
3774
 
3818
3775
  Inputs:
3819
- - **var** (Parameter) - Parameters to be updated with float32 data type. The shape is :math:`(N, *)`
3820
- where :math:`*` means, any number of additional dimensions.
3821
- - **m** (Parameter) - The 1st moment vector in the updating formula, has the same shape and data type as `var`.
3822
- - **v** (Parameter) - The 2nd moment vector in the updating formula, has the same shape and data type as `var`.
3823
- Mean square gradients, has the same type as `var` with float32 data type.
3776
+ - **var** (Union[Parameter, Tensor]) - Parameters or tensors to be updated with float32 data type. The shape is:
3777
+ math:`(N, *)` where :math:`*` means, any number of additional dimensions.
3778
+ - **m** (Union[Parameter, Tensor]) - The 1st moment vector in the updating formula, has the same shape and data
3779
+ type as `var`.
3780
+ - **v** (Union[Parameter, Tensor]) - The 2nd moment vector in the updating formula, has the same shape and data
3781
+ type as `var`. Mean square gradients, has the same type as `var` with float32 data type.
3824
3782
  - **beta1_power** (Tensor) - :math:`beta_1^t` in the updating formula with float32 data type.
3825
3783
  The shape is :math:`(1, )`.
3826
3784
  - **beta2_power** (Tensor) - :math:`beta_2^t` in the updating formula with float32 data type.
@@ -3838,7 +3796,7 @@ class FusedSparseAdam(Primitive):
3838
3796
  - **indices** (Tensor) - Gradient indices with int32 data type and indices.shape[0] = gradient.shape[0].
3839
3797
 
3840
3798
  Outputs:
3841
- Tuple of 3 Tensors, this operator will update the input parameters directly, the outputs are useless.
3799
+ Tuple of 3 Tensors, this operator will update the input parameters or tensors directly, the outputs are useless.
3842
3800
 
3843
3801
  - **var** (Tensor) - A Tensor with shape :math:`(N, *)`.
3844
3802
  - **m** (Tensor) - A Tensor with shape :math:`(1, )`.
@@ -3908,8 +3866,8 @@ class FusedSparseAdam(Primitive):
3908
3866
 
3909
3867
  class FusedSparseLazyAdam(Primitive):
3910
3868
  r"""
3911
- Merges the duplicate value of the gradient and then updates parameters by the Adaptive Moment Estimation (Adam)
3912
- algorithm. This operator is used when the gradient is sparse. The behavior is not equivalent to the
3869
+ Merges the duplicate value of the gradient and then updates parameters or tensors by the Adaptive Moment Estimation
3870
+ (Adam) algorithm. This operator is used when the gradient is sparse. The behavior is not equivalent to the
3913
3871
  original Adam algorithm, as only the current indices parameters will be updated.
3914
3872
 
3915
3873
  The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.
@@ -3943,11 +3901,12 @@ class FusedSparseLazyAdam(Primitive):
3943
3901
  If ``False`` , update the gradients without using NAG. Default: ``False`` .
3944
3902
 
3945
3903
  Inputs:
3946
- - **var** (Parameter) - Parameters to be updated with float32 data type. The shape is :math:`(N, *)`
3947
- where :math:`*` means, any number of additional dimensions.
3948
- - **m** (Parameter) - The 1st moment vector in the updating formula, has the same shape and data type as `var`.
3949
- - **v** (Parameter) - The 2nd moment vector in the updating formula, has the same shape and data type as `var`.
3950
- Mean square gradients, has the same type as `var` with float32 data type.
3904
+ - **var** (Union[Parameter, Tensor]) - Parameters or tensors to be updated with float32 data type. The shape is:
3905
+ math:`(N, *)` where :math:`*` means, any number of additional dimensions.
3906
+ - **m** (Union[Parameter, Tensor]) - The 1st moment vector in the updating formula, has the same shape and data
3907
+ type as `var`.
3908
+ - **v** (Union[Parameter, Tensor]) - The 2nd moment vector in the updating formula, has the same shape and data
3909
+ type as `var`. Mean square gradients, has the same type as `var` with float32 data type.
3951
3910
  - **beta1_power** (Tensor) - :math:`beta_1^t` in the updating formula with float32 data type.
3952
3911
  The shape is :math:`(1, )`.
3953
3912
  - **beta2_power** (Tensor) - :math:`beta_2^t` in the updating formula with float32 data type.
@@ -3965,7 +3924,7 @@ class FusedSparseLazyAdam(Primitive):
3965
3924
  - **indices** (Tensor) - Gradient indices with int32 data type and indices.shape[0] = gradient.shape[0].
3966
3925
 
3967
3926
  Outputs:
3968
- Tuple of 3 Tensors, this operator will update the input parameters directly, the outputs are useless.
3927
+ Tuple of 3 Tensors, this operator will update the input parameters or tensors directly, the outputs are useless.
3969
3928
 
3970
3929
  - **var** (Tensor) - A Tensor with shape :math:`(N, *)`.
3971
3930
  - **m** (Tensor) - A Tensor with shape :math:`(1, )`.
@@ -4051,17 +4010,18 @@ class FusedSparseFtrl(Primitive):
4051
4010
  use_locking (bool): Use locks for updating operation if True . Default: ``False`` .
4052
4011
 
4053
4012
  Inputs:
4054
- - **var** (Parameter) - The variable to be updated. The data type must be float32. The shape is :math:`(N, *)`
4055
- where :math:`*` means, any number of additional dimensions.
4056
- - **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.
4057
- - **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.
4013
+ - **var** (Union[Parameter, Tensor]) - The variable to be updated. The data type must be float32. The shape is:
4014
+ math:`(N, *)` where :math:`*` means, any number of additional dimensions.
4015
+ - **accum** (Union[Parameter, Tensor]) - The accumulation to be updated, must be same type and shape as `var`.
4016
+ - **linear** (Union[Parameter, Tensor]) - the linear coefficient to be updated, must be same type and shape as
4017
+ `var`.
4058
4018
  - **grad** (Tensor) - A tensor of the same type as `var` and
4059
4019
  grad.shape[1:] = var.shape[1:] if var.shape > 1.
4060
4020
  - **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
4061
4021
  The type must be int32 and indices.shape[0] = grad.shape[0].
4062
4022
 
4063
4023
  Outputs:
4064
- Tuple of 3 Tensor, this operator will update the input parameters directly, the outputs are useless.
4024
+ Tuple of 3 Tensor, this operator will update the input parameters or tensors directly, the outputs are useless.
4065
4025
 
4066
4026
  - **var** (Tensor) - A Tensor with shape :math:`(N, *)`.
4067
4027
  - **accum** (Tensor) - A Tensor with shape :math:`(1, )`.
@@ -4148,9 +4108,10 @@ class FusedSparseProximalAdagrad(Primitive):
4148
4108
  Default: ``False`` .
4149
4109
 
4150
4110
  Inputs:
4151
- - **var** (Parameter) - Variable tensor to be updated. The data type must be float32.
4111
+ - **var** (Union[Parameter, Tensor]) - Variable tensor to be updated. The data type must be float32.
4152
4112
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
4153
- - **accum** (Parameter) - Variable tensor to be updated, has the same shape and data type as `var`.
4113
+ - **accum** (Union[Parameter, Tensor]) - Variable tensor to be updated, has the same shape and data type as
4114
+ `var`.
4154
4115
  - **lr** (Tensor) - The learning rate value. The data type must be float32. The shape is :math:`(1, )`.
4155
4116
  - **l1** (Tensor) - l1 regularization strength. The data type must be float32. The shape is :math:`(1, )`.
4156
4117
  - **l2** (Tensor) - l2 regularization strength. The data type must be float32. The shape is :math:`(1, )`.
@@ -4160,7 +4121,7 @@ class FusedSparseProximalAdagrad(Primitive):
4160
4121
  The type must be int32 and indices.shape[0] = grad.shape[0].
4161
4122
 
4162
4123
  Outputs:
4163
- Tuple of 2 Tensors, this operator will update the input parameters directly, the outputs are useless.
4124
+ Tuple of 2 Tensors, this operator will update the input parameters or tensors directly, the outputs are useless.
4164
4125
 
4165
4126
  - **var** (Tensor) - A Tensor with shape :math:`(N, *)`.
4166
4127
  - **accum** (Tensor) - A Tensor with shape :math:`(1, )`.
@@ -4339,11 +4300,11 @@ class ApplyAdaMax(Primitive):
4339
4300
  the relatively highest priority data type.
4340
4301
 
4341
4302
  Inputs:
4342
- - **var** (Parameter) - Variable to be updated. With float32 or float16 data type.
4303
+ - **var** (Union[Parameter, Tensor]) - Variable to be updated. With float32 or float16 data type.
4343
4304
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
4344
- - **m** (Parameter) - The 1st moment vector in the updating formula, has the same shape as `var`.
4305
+ - **m** (Union[Parameter, Tensor]) - The 1st moment vector in the updating formula, has the same shape as `var`.
4345
4306
  With float32 or float16 data type.
4346
- - **v** (Parameter) - The 2nd moment vector in the updating formula. Mean square gradients
4307
+ - **v** (Union[Parameter, Tensor]) - The 2nd moment vector in the updating formula. Mean square gradients
4347
4308
  with the same shape as `var`. With float32 or float16 data type.
4348
4309
  - **beta1_power** (Union[Number, Tensor]) - :math:`beta_1^t` in the updating formula, must be a scalar.
4349
4310
  With float32 or float16 data type.
@@ -4359,7 +4320,7 @@ class ApplyAdaMax(Primitive):
4359
4320
  With float32 or float16 data type.
4360
4321
 
4361
4322
  Outputs:
4362
- Tuple of 3 Tensor, the updated parameters.
4323
+ Tuple of 3 Tensor, the updated parameters or tensors.
4363
4324
 
4364
4325
  - **var** (Tensor) - The same shape and data type as `var`.
4365
4326
  - **m** (Tensor) - The same shape and data type as `m`.
@@ -4453,10 +4414,11 @@ class ApplyAdadelta(Primitive):
4453
4414
  the relatively highest priority data type.
4454
4415
 
4455
4416
  Inputs:
4456
- - **var** (Parameter) - Weights to be updated. With float32 or float16 data type.
4417
+ - **var** (Union[Parameter, Tensor]) - Weights to be updated. With float32 or float16 data type.
4457
4418
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
4458
- - **accum** (Parameter) - Accumulation to be updated, has the same shape and data type as `var`.
4459
- - **accum_update** (Parameter) - Accum_update to be updated, has the same shape and data type as `var`.
4419
+ - **accum** (Union[Parameter, Tensor]) - Accumulation to be updated, has the same shape and data type as `var`.
4420
+ - **accum_update** (Union[Parameter, Tensor]) - Accum_update to be updated, has the same shape and data type as
4421
+ `var`.
4460
4422
  - **lr** (Union[Number, Tensor]) - Learning rate, must be a scalar. With float32 or float16 data type.
4461
4423
  - **rho** (Union[Number, Tensor]) - Decay rate, must be a scalar. With float32 or float16 data type.
4462
4424
  - **epsilon** (Union[Number, Tensor]) - A small value added for numerical stability, must be a scalar.
@@ -4464,7 +4426,7 @@ class ApplyAdadelta(Primitive):
4464
4426
  - **grad** (Tensor) - Gradients, has the same shape and data type as `var`.
4465
4427
 
4466
4428
  Outputs:
4467
- Tuple of 3 Tensor, the updated parameters.
4429
+ Tuple of 3 Tensor, the updated parameters or tensors.
4468
4430
 
4469
4431
  - **var** (Tensor) - The same shape and data type as `var`.
4470
4432
  - **accum** (Tensor) - The same shape and data type as `accum`.
@@ -4555,14 +4517,14 @@ class ApplyAdagrad(Primitive):
4555
4517
  update_slots (bool): If ``True`` , `accum` will be updated. Default: ``True`` .
4556
4518
 
4557
4519
  Inputs:
4558
- - **var** (Parameter) - Variable to be updated. With float or complex data type.
4520
+ - **var** (Union[Parameter, Tensor]) - Variable to be updated. With float or complex data type.
4559
4521
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
4560
- - **accum** (Parameter) - Accumulation to be updated. The shape must be the same as `var`.
4522
+ - **accum** (Union[Parameter, Tensor]) - Accumulation to be updated. The shape must be the same as `var`.
4561
4523
  - **lr** (Union[Number, Tensor]) - The learning rate value, must be a scalar. With float or complex data type.
4562
4524
  - **grad** (Tensor) - A tensor for gradient. The shape must be the same as `var`.
4563
4525
 
4564
4526
  Outputs:
4565
- Tuple of 2 Tensors, the updated parameters.
4527
+ Tuple of 2 Tensors, the updated parameters or tensors.
4566
4528
 
4567
4529
  - **var** (Tensor) - The same shape and data type as `var`.
4568
4530
  - **accum** (Tensor) - The same shape and data type as `accum`.
@@ -4642,15 +4604,15 @@ class ApplyAdagradV2(Primitive):
4642
4604
  update_slots (bool): If ``True`` , `accum` will be updated. Default: ``True`` .
4643
4605
 
4644
4606
  Inputs:
4645
- - **var** (Parameter) - Variable to be updated. With float16 or float32 data type.
4607
+ - **var** (Union[Parameter, Tensor]) - Variable to be updated. With float16 or float32 data type.
4646
4608
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
4647
- - **accum** (Parameter) - Accumulation to be updated. The shape must be the same as `var`.
4609
+ - **accum** (Union[Parameter, Tensor]) - Accumulation to be updated. The shape must be the same as `var`.
4648
4610
  - **lr** (Union[Number, Tensor]) - The learning rate value, must be a float number or
4649
4611
  a scalar tensor with float16 or float32 data type.
4650
4612
  - **grad** (Tensor) - A tensor for gradient. The shape must be the same as `var`.
4651
4613
 
4652
4614
  Outputs:
4653
- Tuple of 2 Tensors, the updated parameters.
4615
+ Tuple of 2 Tensors, the updated parameters or tensors.
4654
4616
 
4655
4617
  - **var** (Tensor) - The same shape and data type as `var`.
4656
4618
  - **accum** (Tensor) - The same shape and data type as `accum`.
@@ -4753,16 +4715,17 @@ class SparseApplyAdagradV2(Primitive):
4753
4715
  update_slots (bool): If ``True`` , the computation logic will be different to `False`. Default: ``True`` .
4754
4716
 
4755
4717
  Inputs:
4756
- - **var** (Parameter) - Variable to be updated. The data type must be float16 or float32.
4718
+ - **var** (Union[Parameter, Tensor]) - Variable to be updated. The data type must be float16 or float32.
4757
4719
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
4758
- - **accum** (Parameter) - Accumulation to be updated. The shape must be the same as `var`.
4720
+ - **accum** (Union[Parameter, Tensor]) - Accumulation to be updated. The shape must be the same as `var`.
4759
4721
  - **grad** (Tensor) - Gradients has the same shape as `var` and
4760
4722
  :math:`grad.shape[1:] = var.shape[1:]` if var.shape > 1.
4761
4723
  - **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
4762
- The type must be int32 and :math:`indices.shape[0] = grad.shape[0]`.
4724
+ The type must be int32 and :math:`indices.shape[0] = grad.shape[0]`. The value of indices
4725
+ must be unique. Otherwise, the result is unpredictable.
4763
4726
 
4764
4727
  Outputs:
4765
- Tuple of 2 tensors, the updated parameters.
4728
+ Tuple of 2 tensors, the updated parameters or tensors.
4766
4729
 
4767
4730
  - **var** (Tensor) - The same shape and data type as `var`.
4768
4731
  - **accum** (Tensor) - The same shape and data type as `accum`.
@@ -4842,9 +4805,10 @@ class ApplyProximalAdagrad(Primitive):
4842
4805
  Default: ``False`` .
4843
4806
 
4844
4807
  Inputs:
4845
- - **var** (Parameter) - Variable to be updated. The data type must be float16 or float32.
4808
+ - **var** (Union[Parameter, Tensor]) - Variable to be updated. The data type must be float16 or float32.
4846
4809
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
4847
- - **accum** (Parameter) - Accumulation to be updated, must have the same shape and dtype as `var`.
4810
+ - **accum** (Union[Parameter, Tensor]) - Accumulation to be updated, must have the same shape and dtype as
4811
+ `var`.
4848
4812
  - **lr** (Union[Number, Tensor]) - The learning rate value, must be a scalar. The data type must be
4849
4813
  float16 or float32.
4850
4814
  - **l1** (Union[Number, Tensor]) - l1 regularization strength, must be a scalar. The data type must be
@@ -4854,7 +4818,7 @@ class ApplyProximalAdagrad(Primitive):
4854
4818
  - **grad** (Tensor) - Gradient with the same shape and dtype as `var`.
4855
4819
 
4856
4820
  Outputs:
4857
- Tuple of 2 Tensors, the updated parameters.
4821
+ Tuple of 2 Tensors, the updated parameters or tensors.
4858
4822
 
4859
4823
  - **var** (Tensor) - The same shape and data type as `var`.
4860
4824
  - **accum** (Tensor) - The same shape and data type as `accum`.
@@ -4939,9 +4903,9 @@ class SparseApplyProximalAdagrad(Primitive):
4939
4903
  Default: ``False`` .
4940
4904
 
4941
4905
  Inputs:
4942
- - **var** (Parameter) - Variable tensor to be updated. The data type must be float16 or float32.
4906
+ - **var** (Union[Parameter, Tensor]) - Variable tensor to be updated. The data type must be float16 or float32.
4943
4907
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
4944
- - **accum** (Parameter) - Variable tensor to be updated, has the same shape as `var`.
4908
+ - **accum** (Parameterv) - Variable tensor to be updated, has the same shape as `var`.
4945
4909
  - **lr** (Union[Number, Tensor]) - The learning rate value, must be a float number or
4946
4910
  a scalar tensor with float16 or float32 data type. It must be positive.
4947
4911
  - **l1** (Union[Number, Tensor]) - l1 regularization strength, must be a float number or
@@ -4955,7 +4919,7 @@ class SparseApplyProximalAdagrad(Primitive):
4955
4919
  following types: int32, int64 and :math:`indices.shape[0] = grad.shape[0]`.
4956
4920
 
4957
4921
  Outputs:
4958
- Tuple of 2 tensors, the updated parameters.
4922
+ Tuple of 2 tensors, the updated parameters or tensors.
4959
4923
 
4960
4924
  - **var** (Tensor) - The same shape and data type as `var`.
4961
4925
  - **accum** (Tensor) - The same shape and data type as `accum`.
@@ -5041,9 +5005,9 @@ class ApplyAddSign(Primitive):
5041
5005
  the relatively highest priority data type.
5042
5006
 
5043
5007
  Inputs:
5044
- - **var** (Parameter) - Variable tensor to be updated.
5008
+ - **var** (Union[Parameter, Tensor]) - Variable tensor to be updated.
5045
5009
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
5046
- - **m** (Parameter) - Variable tensor to be updated, has the same data type as `var`.
5010
+ - **m** (Union[Parameter, Tensor]) - Variable tensor to be updated, has the same data type as `var`.
5047
5011
  - **lr** (Union[Number, Tensor]) - The learning rate value, must be a scalar.
5048
5012
  - **alpha** (Union[Number, Tensor]) - Must be a scalar.
5049
5013
  - **sign_decay** (Union[Number, Tensor]) - Must be a scalar.
@@ -5051,7 +5015,7 @@ class ApplyAddSign(Primitive):
5051
5015
  - **grad** (Tensor) - A tensor of the same shape as `var`, for the gradient.
5052
5016
 
5053
5017
  Outputs:
5054
- Tuple of 2 Tensors, the updated parameters.
5018
+ Tuple of 2 Tensors, the updated parameters or tensors.
5055
5019
 
5056
5020
  - **var** (Tensor) - The same shape and data type as `var`.
5057
5021
  - **m** (Tensor) - The same shape and data type as `m`.
@@ -5140,10 +5104,10 @@ class ApplyPowerSign(Primitive):
5140
5104
  On Ascend, input data type of float64 is currently not supported.
5141
5105
 
5142
5106
  Inputs:
5143
- - **var** (Parameter) - Variable tensor to be updated. With float64, float32 or float16 data type.
5144
- If data type of `var` is float16, all inputs must have the same data type as `var`.
5107
+ - **var** (Union[Parameter, Tensor]) - Variable tensor to be updated. With float64, float32 or float16 data
5108
+ type. If data type of `var` is float16, all inputs must have the same data type as `var`.
5145
5109
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
5146
- - **m** (Parameter) - Variable tensor to be updated, has the same shape as `var`.
5110
+ - **m** (Union[Parameter, Tensor]) - Variable tensor to be updated, has the same shape as `var`.
5147
5111
  - **lr** (Union[Number, Tensor]) - The learning rate value, should be a scalar or Tensor
5148
5112
  with float64, float32 or float16 data type.
5149
5113
  - **logbase** (Union[Number, Tensor]) - Should be a scalar or Tensor with float64, float32 or float16 data type.
@@ -5154,7 +5118,7 @@ class ApplyPowerSign(Primitive):
5154
5118
  - **grad** (Tensor) - A tensor of the same shape as `var`, for the gradient.
5155
5119
 
5156
5120
  Outputs:
5157
- Tuple of 2 Tensors, the updated parameters.
5121
+ Tuple of 2 Tensors, the updated parameters or tensors.
5158
5122
 
5159
5123
  - **var** (Tensor) - The same shape and data type as `var`.
5160
5124
  - **m** (Tensor) - The same shape and data type as `m`.
@@ -5231,7 +5195,7 @@ class ApplyGradientDescent(Primitive):
5231
5195
  the relatively highest priority data type.
5232
5196
 
5233
5197
  Inputs:
5234
- - **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.
5198
+ - **var** (Union[Parameter, Tensor]) - Variable tensor to be updated. With float32 or float16 data type.
5235
5199
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
5236
5200
  - **alpha** (Union[Number, Tensor]) - Scaling factor, must be a scalar. With float32 or float16 data type.
5237
5201
  - **delta** (Tensor) - A tensor for the change, has the same shape as `var`.
@@ -5300,7 +5264,7 @@ class ApplyProximalGradientDescent(Primitive):
5300
5264
  the relatively highest priority data type.
5301
5265
 
5302
5266
  Inputs:
5303
- - **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.
5267
+ - **var** (Union[Parameter, Tensor]) - Variable tensor to be updated. With float32 or float16 data type.
5304
5268
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
5305
5269
  - **alpha** (Union[Number, Tensor]) - Scaling factor, must be a scalar. With float32 or float16 data type.
5306
5270
  - **l1** (Union[Number, Tensor]) - l1 regularization strength, must be a scalar.
@@ -5444,10 +5408,10 @@ class ApplyFtrl(Primitive):
5444
5408
  use_locking (bool): Use locks for updating operation if ``True`` . Default: ``False`` .
5445
5409
 
5446
5410
  Inputs:
5447
- - **var** (Parameter) - The variable to be updated. The data type must be float16 or float32.
5411
+ - **var** (Union[Parameter, Tensor]) - The variable to be updated. The data type must be float16 or float32.
5448
5412
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
5449
- - **accum** (Parameter) - The accumulation to be updated, must be same shape as `var`.
5450
- - **linear** (Parameter) - The linear coefficient to be updated, must be same shape as `var`.
5413
+ - **accum** (Union[Parameter, Tensor]) - The accumulation to be updated, must be same shape as `var`.
5414
+ - **linear** (Union[Parameter, Tensor]) - The linear coefficient to be updated, must be same shape as `var`.
5451
5415
  - **grad** (Tensor) - Gradient. The data type must be float16 or float32.
5452
5416
  - **lr** (Union[Number, Tensor]) - The learning rate value, must be positive. Default: ``0.001`` .
5453
5417
  It must be a float number or a scalar tensor with float16 or float32 data type.
@@ -5460,16 +5424,16 @@ class ApplyFtrl(Primitive):
5460
5424
  Default: ``-0.5`` . It must be a float number or a scalar tensor with float16 or float32 data type.
5461
5425
 
5462
5426
  Outputs:
5463
- - **var** (Tensor) - Represents the updated `var`. As the input parameters has been updated in-place, this
5464
- value is always zero when the platform is GPU.
5427
+ - **var** (Tensor) - Represents the updated `var`. As the input parameters or tensors has been updated in-place,
5428
+ this value is always zero when the platform is GPU.
5465
5429
 
5466
5430
  Raises:
5467
5431
  TypeError: If `use_locking` is not a bool.
5468
5432
  TypeError: If dtype of `var`, `grad`, `lr`, `l1`, `l2` or `lr_power` is neither float16 nor float32.
5469
5433
  TypeError: If `lr`, `l1`, `l2` or `lr_power` is neither a Number nor a Tensor.
5470
5434
  TypeError: If `grad` is not a Tensor.
5471
- TypeError: If the parameter types of `var`, `accum` and `linear` are inconsistent.
5472
- TypeError: If the parameter types of `grad`, `lr`, `l1`, `l2`, `lr_power` are inconsistent with `var`
5435
+ TypeError: If the parameter or tensor types of `var`, `accum` and `linear` are inconsistent.
5436
+ TypeError: If the parameter or tensor types of `grad`, `lr`, `l1`, `l2`, `lr_power` are inconsistent with `var`
5473
5437
  and the precision is greater than `var`.
5474
5438
 
5475
5439
  Supported Platforms:
@@ -5544,10 +5508,10 @@ class SparseApplyFtrl(Primitive):
5544
5508
  use_locking (bool, optional): Use locks for updating operation if ``True`` . Default: ``False`` .
5545
5509
 
5546
5510
  Inputs:
5547
- - **var** (Parameter) - The variable to be updated. The data type must be float16 or float32.
5511
+ - **var** (Union[Parameter, Tensor]) - The variable to be updated. The data type must be float16 or float32.
5548
5512
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
5549
- - **accum** (Parameter) - The accumulation to be updated, must be same shape as `var`.
5550
- - **linear** (Parameter) - The linear coefficient to be updated, must be the same shape as `var`.
5513
+ - **accum** (Union[Parameter, Tensor]) - The accumulation to be updated, must be same shape as `var`.
5514
+ - **linear** (Union[Parameter, Tensor]) - The linear coefficient to be updated, must be the same shape as `var`.
5551
5515
  - **grad** (Tensor) - A tensor must meet with :math:`grad.shape[1:] = var.shape[1:]`
5552
5516
  if var.shape > 1.
5553
5517
  - **indices** (Tensor) - A tensor of indices in the first dimension of `var` and `accum`.
@@ -6904,7 +6868,7 @@ class SparseApplyAdadelta(Primitive):
6904
6868
  to make the data types consistent. Besides, inputs of 'lr' and 'rho' also support implicit type conversion.
6905
6869
  If they have different data types, the lower priority data type will be converted to
6906
6870
  relatively highest priority data type.
6907
- RuntimeError exception will be thrown when the data type conversion of Parameter is required.
6871
+ RuntimeError exception will be thrown when the data type conversion of Parameter or Tensor is required.
6908
6872
 
6909
6873
  Note:
6910
6874
  If there are negative values or values greater than or equal to var.shape[0] in `indices`,
@@ -6916,11 +6880,11 @@ class SparseApplyAdadelta(Primitive):
6916
6880
  Default: ``False`` .
6917
6881
 
6918
6882
  Inputs:
6919
- - **var** (Parameter) - Weights to be updated. With float32 or float16 data type.
6920
- - **accum** (Parameter) - Accumulation to be updated. Mush have the same shape and dtype as `var`.
6921
- With float32 or float16 data type.
6922
- - **accum_update** (Parameter) - Accum_update to be updated. Must have the same shape and dtype as `var`.
6923
- With float32 or float16 data type.
6883
+ - **var** (Union[Parameter, Tensor]) - Weights to be updated. With float32 or float16 data type.
6884
+ - **accum** (Union[Parameter, Tensor]) - Accumulation to be updated. Mush have the same shape and dtype as
6885
+ `var`. With float32 or float16 data type.
6886
+ - **accum_update** (Union[Parameter, Tensor]) - Accum_update to be updated. Must have the same shape and dtype
6887
+ as `var`. With float32 or float16 data type.
6924
6888
  - **lr** (Union[float, Tensor]) - Learning rate, must be a scalar. With float32 or float16 data type.
6925
6889
  - **rho** (Union[float, Tensor]) - Decay rate, must be a scalar. With float32 or float16 data type.
6926
6890
  - **grad** (Tensor) - A tensor for gradient. Must have the same shape and dtype as `var`.
@@ -6928,7 +6892,7 @@ class SparseApplyAdadelta(Primitive):
6928
6892
  Must be one of the following types: int32, int64 and indices.shape[0] = grad.shape[0].
6929
6893
 
6930
6894
  Outputs:
6931
- Tuple of 3 Tensor, the updated parameters.
6895
+ Tuple of 3 Tensor, the updated parameters or tensors.
6932
6896
 
6933
6897
  - **var** (Tensor) - The same shape and data type as `var`.
6934
6898
  - **accum** (Tensor) - The same shape and data type as `accum`.
@@ -7158,7 +7122,8 @@ class Conv3DTranspose(Primitive):
7158
7122
  \times (\text{kernel_size}[2] - 1) + \text{output_padding}[2] + 1
7159
7123
 
7160
7124
  Note:
7161
- In Ascend, only support :math:`group=1`.
7125
+ - In Ascend, only support :math:`group=1`.
7126
+ - For Atlas A2 training series products, `output_padding` is currently not supported.
7162
7127
 
7163
7128
  Args:
7164
7129
  in_channel (int): The channel of the input x.
@@ -7204,12 +7169,15 @@ class Conv3DTranspose(Primitive):
7204
7169
  Inputs:
7205
7170
  - **dout** (Tensor) - The gradients with respect to the output of the convolution.
7206
7171
  The shape conforms to the default.
7207
- data_format :math:`(N, C_{in}, D_{out}, H_{out}, W_{out})`. Currently dout data type only supports float16
7208
- and float32.
7172
+ data_format :math:`(N, C_{in}, D_{out}, H_{out}, W_{out})`.
7173
+ Supported dtypes:
7174
+
7175
+ - Ascend: float16.
7176
+ - GPU/CPU: float16, float32.
7209
7177
  - **weight** (Tensor) - Set size of kernel is :math:`(K_d, K_h, K_w)`, then the shape is
7210
7178
  :math:`(C_{in}, C_{out}//group, K_d, K_h, K_w)`. Where :math:`group` is the Args parameter,
7211
7179
  :math:`//` is the symbol for integer division.
7212
- Currently weight data type only supports float16 and float32.
7180
+ It has the same dtype as `dout`.
7213
7181
  - **bias** (Tensor) - Tensor of shape :math:`C_{out}`. Currently, only support none. Default: ``None`` .
7214
7182
 
7215
7183
  Outputs:
@@ -7495,12 +7463,12 @@ class ApplyAdagradDA(Primitive):
7495
7463
  Otherwise the behavior is undefined, but may exhibit less contention. Default: ``False`` .
7496
7464
 
7497
7465
  Inputs:
7498
- - **var** (Parameter) - Variable to be updated. The data type must be float16 or float32.
7466
+ - **var** (Union[Parameter, Tensor]) - Variable to be updated. The data type must be float16 or float32.
7499
7467
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
7500
- - **gradient_accumulator** (Parameter) - The dict of mutable tensor :math:`grad\_accum`. Must have the same
7501
- shape as `var`.
7502
- - **gradient_squared_accumulator** (Parameter) - The dict of mutable tensor :math:`grad\_squared\_accum`.
7468
+ - **gradient_accumulator** (Union[Parameter, Tensor]) - The dict of mutable tensor :math:`grad\_accum`.
7503
7469
  Must have the same shape as `var`.
7470
+ - **gradient_squared_accumulator** (Union[Parameter, Tensor]) - The dict of mutable tensor
7471
+ :math:`grad\_squared\_accum`. Must have the same shape as `var`.
7504
7472
  - **grad** (Tensor) - A tensor for gradient. Must have the same shape as `var`.
7505
7473
  - **lr** ([Number, Tensor]) - Scaling factor. Must be a scalar. With float32 or float16 data type.
7506
7474
  - **l1** ([Number, Tensor]) - L1 regularization. Must be a scalar. With float32 or float16 data type.
@@ -7508,12 +7476,12 @@ class ApplyAdagradDA(Primitive):
7508
7476
  - **global_step** ([Number, Tensor]) - Training step number. Must be a scalar. With int32 or int64 data type.
7509
7477
 
7510
7478
  Outputs:
7511
- Tuple of 1 Tensors, the updated parameters.
7479
+ Tuple of 1 Tensors, the updated parameters or tensors.
7512
7480
 
7513
7481
  - **var** (Tensor) - The same shape and data type as `var`.
7514
7482
 
7515
7483
  Raises:
7516
- TypeError: If `var`, `gradient_accumulator` or `gradient_squared_accumulator` is not a Parameter.
7484
+ TypeError: If `var`, `gradient_accumulator` or `gradient_squared_accumulator` neither a Parameter nor a Tensor.
7517
7485
  TypeError: If `grad` is not a Tensor.
7518
7486
  TypeError: If `lr`, `l1`, `l2` or `global_step` is neither a Number nor a Tensor.
7519
7487
  TypeError: If use_locking is not a bool.
@@ -7607,10 +7575,12 @@ class SparseApplyRMSProp(Primitive):
7607
7575
  otherwise the behavior is undefined, but may exhibit less contention. Default: ``False`` .
7608
7576
 
7609
7577
  Inputs:
7610
- - **var** (Parameter) - Variable to be updated. The data type must be float16 or float32.
7578
+ - **var** (Union[Parameter, Tensor]) - Variable to be updated. The data type must be float16 or float32.
7611
7579
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
7612
- - **ms** (Parameter) - The dict of mutable tensor ms. Must have the same shape and dtype as `var`.
7613
- - **mom** (Parameter) - The dict of mutable tensor mom. Must have the same shape and dtype as `var`.
7580
+ - **ms** (Union[Parameter, Tensor]) - The dict of mutable tensor ms. Must have the same shape and dtype as
7581
+ `var`.
7582
+ - **mom** (Union[Parameter, Tensor]) - The dict of mutable tensor mom. Must have the same shape and dtype as
7583
+ `var`.
7614
7584
  - **lr** ([Number, Tensor]) - Learning rate. Must be a scalar. With float16 or float32 data type.
7615
7585
  - **grad** (Tensor) - A tensor for gradient. Must have the same shape and dtype as `var`.
7616
7586
  - **indices** (Tensor) - A tensor of indices in the first dimension of `var`, `ms` and `mom`.
@@ -7618,7 +7588,7 @@ class SparseApplyRMSProp(Primitive):
7618
7588
  following types: int32, int64 and indices.shape[0] = var.shape[0].
7619
7589
 
7620
7590
  Outputs:
7621
- Tuple of 3 Tensors, the updated parameters.
7591
+ Tuple of 3 Tensors, the updated parameters or tensors.
7622
7592
 
7623
7593
  - **var** (Tensor) - The same shape and data type as `var`.
7624
7594
  - **ms** (Tensor) - The same shape and data type as `ms`.
@@ -7724,12 +7694,12 @@ class SparseApplyCenteredRMSProp(Primitive):
7724
7694
  Default: ``False`` .
7725
7695
 
7726
7696
  Inputs:
7727
- - **var** (Parameter) - Variable tensor to be updated. The data type must be int8, int16, int32, int64,
7728
- uint8, uint16, uint32, uint64, float16, float32 or float64.
7697
+ - **var** (Union[Parameter, Tensor]) - Variable tensor to be updated. The data type must be int8, int16, int32,
7698
+ int64, uint8, uint16, uint32, uint64, float16, float32 or float64.
7729
7699
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
7730
- - **mg** (Parameter) - Mean gradients. Must have the same shape and dtype as `var`.
7731
- - **ms** (Parameter) - Mean square gradients. Must have the same shape and dtype as `var`.
7732
- - **mom** (Parameter) - Delta of `var`. Must have the same shape and dtype as `var`.
7700
+ - **mg** (Union[Parameter, Tensor]) - Mean gradients. Must have the same shape and dtype as `var`.
7701
+ - **ms** (Union[Parameter, Tensor]) - Mean square gradients. Must have the same shape and dtype as `var`.
7702
+ - **mom** (Union[Parameter, Tensor]) - Delta of `var`. Must have the same shape and dtype as `var`.
7733
7703
  - **lr** (Union[Number, Tensor]) - Learning rate. Must be a float number or a scalar tensor.
7734
7704
  Must have the same type as `var`.
7735
7705
  - **rho** (Union[Number, Tensor]) - Decay rate. Must be a float number or a scalar tensor.
@@ -7832,8 +7802,9 @@ class ApplyKerasMomentum(Primitive):
7832
7802
  so in the end, the var you get is actually var + momentum * accum. Default: ``False`` .
7833
7803
 
7834
7804
  Inputs:
7835
- - **var** (Parameter) - Variable to be updated. With float16 or float32 data type.
7836
- - **accum** (Parameter) - Must have the same shape and type as `var`. With float16 or float32 data type.
7805
+ - **var** (Union[Parameter, Tensor]) - Variable to be updated. With float16 or float32 data type.
7806
+ - **accum** (Union[Parameter, Tensor]) - Must have the same shape and type as `var`. With float16 or float32
7807
+ data type.
7837
7808
  - **lr** (Union[Number, Tensor]) - Scaling factor. Must be a scalar. With float16 or float32 data type.
7838
7809
  - **grad** (Tensor) - The gradient. Must have the same shape and type as `var`.
7839
7810
  With float16 or float32 data type.
@@ -7984,12 +7955,12 @@ class ApplyAdamWithAmsgrad(Primitive):
7984
7955
  Default: ``False`` .
7985
7956
 
7986
7957
  Inputs:
7987
- - **var** (Parameter) - Variable to be updated. The data type can be float16 or float32.
7988
- - **m** (Parameter) - The 1st moment vector in the updating formula,
7958
+ - **var** (Union[Parameter, Tensor]) - Variable to be updated. The data type can be float16 or float32.
7959
+ - **m** (Union[Parameter, Tensor]) - The 1st moment vector in the updating formula,
7989
7960
  the shape and data type value should be the same as `var`.
7990
- - **v** (Parameter) - the 2nd moment vector in the updating formula,
7961
+ - **v** (Union[Parameter, Tensor]) - the 2nd moment vector in the updating formula,
7991
7962
  the shape and data type value should be the same as `var`.
7992
- - **vhat** (Parameter) - :math:`\hat v_t` in the updating formula,
7963
+ - **vhat** (Union[Parameter, Tensor]) - :math:`\hat v_t` in the updating formula,
7993
7964
  the shape and data type value should be the same as `var`.
7994
7965
  - **beta1_power** (Union[float, Tensor]) - :math:`beta_1^t(\beta_1^{t})` in the updating formula,
7995
7966
  a scalar tensor with float16 or float32 data type.
@@ -7999,7 +7970,7 @@ class ApplyAdamWithAmsgrad(Primitive):
7999
7970
  - **grad** (Tensor) - The gradient, has the same shape and data type as `var`.
8000
7971
 
8001
7972
  Outputs:
8002
- Tuple of 4 Tensors, the updated parameters.
7973
+ Tuple of 4 Tensors, the updated parameters or tensors.
8003
7974
 
8004
7975
  - **var** (Tensor) - The same shape and data type as `var`.
8005
7976
  - **m** (Tensor) - The same shape and data type as `m`.
@@ -8007,7 +7978,7 @@ class ApplyAdamWithAmsgrad(Primitive):
8007
7978
  - **vhat** (Tensor) - The same shape and data type as `vhat`.
8008
7979
 
8009
7980
  Raises:
8010
- TypeError: If `var`, `m`, `v`, `vhat` is not a Parameter.
7981
+ TypeError: If `var`, `m`, `v`, `vhat` neither a Parameter nor a Tensor.
8011
7982
  TypeError: If `beta1_power`, `beta2_power`, `lr` is neither a Number nor a Tensor.
8012
7983
  TypeError: If `grad` is not a Tensor.
8013
7984
  TypeError: If dtype of `var`, `m`, `v`, `vhat`, `beta1_power`, `beta2_power`,
@@ -8091,12 +8062,12 @@ class ApplyAdamWithAmsgradV2(Primitive):
8091
8062
  Default: ``False`` .
8092
8063
 
8093
8064
  Inputs:
8094
- - **var** (Parameter) - Variable to be updated. The data type can be float16, float32 or float64.
8095
- - **m** (Parameter) - The 1st moment vector in the updating formula,
8065
+ - **var** (Union[Parameter, Tensor]) - Variable to be updated. The data type can be float16, float32 or float64.
8066
+ - **m** (Union[Parameter, Tensor]) - The 1st moment vector in the updating formula,
8096
8067
  the shape should be the same as `var`.
8097
- - **v** (Parameter) - The 2nd moment vector in the updating formula,
8068
+ - **v** (Union[Parameter, Tensor]) - The 2nd moment vector in the updating formula,
8098
8069
  the shape should be the same as `var`.
8099
- - **vhat** (Parameter) - :math:`\hat v_t` in the updating formula,
8070
+ - **vhat** (Union[Parameter, Tensor]) - :math:`\hat v_t` in the updating formula,
8100
8071
  the shape and data type value should be the same as `var`.
8101
8072
  - **beta1_power** (Union[float, Tensor]) - :math:`beta_1^t(\beta_1^{t})` in the updating formula,
8102
8073
  with float16, float32 or float64 data type.
@@ -8112,7 +8083,7 @@ class ApplyAdamWithAmsgradV2(Primitive):
8112
8083
  - **grad** (Tensor) - The gradient, has the same shape as `var`.
8113
8084
 
8114
8085
  Outputs:
8115
- Tuple of 4 Tensors, the updated parameters.
8086
+ Tuple of 4 Tensors, the updated parameters or tensors.
8116
8087
 
8117
8088
  - **var** (Tensor) - The same shape and data type as `var`.
8118
8089
  - **m** (Tensor) - The same shape and data type as `m`.
@@ -8120,7 +8091,7 @@ class ApplyAdamWithAmsgradV2(Primitive):
8120
8091
  - **vhat** (Tensor) - The same shape and data type as `vhat`.
8121
8092
 
8122
8093
  Raises:
8123
- TypeError: If `var`, `m`, `v`, `vhat` is not a Parameter.
8094
+ TypeError: If `var`, `m`, `v`, `vhat` neither a Parameter nor a Tensor.
8124
8095
  TypeError: If dtype of `var`, `m`, `v`, `vhat`, `beta1_power`, `beta2_power`,
8125
8096
  `lr`, `beta1` , `beta2` , `epsilon` or `grad` is not float64, float32 or float16.
8126
8097
  RuntimeError: If the data type of `var`, `m`, `v` , `vhat` and `grad` conversion of Parameter is not supported.
@@ -8800,11 +8771,11 @@ class SparseApplyAdagradDA(Primitive):
8800
8771
  Otherwise the behavior is undefined, but may exhibit less contention. Default: ``False`` .
8801
8772
 
8802
8773
  Inputs:
8803
- - **var** (Parameter) - Variable to be updated.
8774
+ - **var** (Union[Parameter, Tensor]) - Variable to be updated.
8804
8775
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
8805
- - **grad_accum** (Parameter) - The dict of mutable tensor grad_accum. Must have the same
8776
+ - **grad_accum** (Union[Parameter, Tensor]) - The dict of mutable tensor grad_accum. Must have the same
8806
8777
  shape and dtype as `var`.
8807
- - **grad_square_accum** (Parameter) - The dict of mutable tensor grad_square_accum.
8778
+ - **grad_square_accum** (Union[Parameter, Tensor]) - The dict of mutable tensor grad_square_accum.
8808
8779
  Must have the same shape and dtype as `var`.
8809
8780
  - **grad** (Tensor) - A tensor of the same type as `var` and grad.shape[1:] = var.shape[1:] if rank(var) > 1.
8810
8781
  - **indices** (Tensor) - A tensor of indices in the first dimension of `var` and `accum`.
@@ -8982,8 +8953,8 @@ class SparseApplyProximalGradientDescent(Primitive):
8982
8953
  Default: ``False`` .
8983
8954
 
8984
8955
  Inputs:
8985
- - **var** (Parameter) - Variable tensor to be updated. The data type must be int8, int16, int32, int64,
8986
- uint8, uint16, uint32, uint64, float16, float32 or float64.
8956
+ - **var** (Union[Parameter, Tensor]) - Variable tensor to be updated. The data type must be int8, int16, int32,
8957
+ int64, uint8, uint16, uint32, uint64, float16, float32 or float64.
8987
8958
  The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
8988
8959
  - **alpha** (Union[Number, Tensor]) - Scaling factor. Must be a scalar with same type as `var`.
8989
8960
  - **l1** (Union[Number, Tensor]) - L1 regularization. Must be a scalar with same type as `var`.
@@ -8998,7 +8969,7 @@ class SparseApplyProximalGradientDescent(Primitive):
8998
8969
  - **var** (Tensor) - Tensor, has the same shape and type as 'var'.
8999
8970
 
9000
8971
  Raises:
9001
- TypeError: If `var`, `grad` or `indices` is not a Parameter..
8972
+ TypeError: If `var` neither a Parameter nor a Tensor.
9002
8973
  TypeError: If `alpha`, `l1`, `l2` is neither a Number nor a Tensor.
9003
8974
  TypeError: If `use_locking` is not a bool.
9004
8975
  TypeError: If dtype of `var`, `alpha`, `l1`, `l2` or `grad` is not one of int8, int16,
@@ -9134,51 +9105,6 @@ class NuclearNorm(Primitive):
9134
9105
  validator.check_value_type("keepdim", keepdim, [bool], self.name)
9135
9106
 
9136
9107
 
9137
- class GLU(Primitive):
9138
- r"""
9139
- Computes GLU (Gated Linear Unit activation function) of input tensors.
9140
-
9141
- .. warning::
9142
- This is an experimental API that is subject to change or deletion.
9143
-
9144
- Refer to :func:`mindspore.ops.glu` for more details.
9145
-
9146
- Args:
9147
- axis (int, optional): Axis on which to split the input.
9148
- The value of `axis` must be an int within range [-rank(`x`), rank(`x`)).
9149
- Default: ``-1`` , specifying the last dimension.
9150
-
9151
- Inputs:
9152
- - **x** (Tensor) - Input tensor. `x.shape[axis]` must be even.
9153
-
9154
- Outputs:
9155
- Tensor, has the same data type with `x`.
9156
-
9157
- Supported Platforms:
9158
- ``Ascend`` ``CPU``
9159
-
9160
- Examples:
9161
- >>> from mindspore import ops, Tensor
9162
- >>> from mindspore import dtype as mstype
9163
- >>> import numpy as np
9164
- >>> axis = 0
9165
- >>> x = Tensor(np.array([0.3220, 0.9545, 0.7879, 0.0975, 0.3698,
9166
- ... 0.5135, 0.5740, 0.3435, 0.1895, 0.8764,
9167
- ... 0.4980, 0.9673, 0.9879, 0.6988, 0.9022,
9168
- ... 0.9304, 0.1558, 0.0153, 0.1559, 0.9852]).reshape([2, 2, 5]), mstype.float32)
9169
- >>> glu = ops.GLU(axis=axis)
9170
- >>> y = glu(x)
9171
- >>> print(y)
9172
- [[[0.20028052 0.6916126 0.57412136 0.06512236 0.26307625]
9173
- [0.3682598 0.3093122 0.17306386 0.10212085 0.63814086]]]
9174
- """
9175
-
9176
- @prim_attr_register
9177
- def __init__(self, axis=-1):
9178
- """Initialize GLU"""
9179
- validator.check_value_type("axis", axis, [int], self.name)
9180
-
9181
-
9182
9108
  class FractionalMaxPoolWithFixedKsize(Primitive):
9183
9109
  r"""
9184
9110
  Applies a 2D fractional max pooling to an input signal composed of multiple input planes.
@@ -9262,7 +9188,8 @@ class FractionalMaxPoolWithFixedKsize(Primitive):
9262
9188
  class ChannelShuffle(Primitive):
9263
9189
  r"""
9264
9190
  Divide the channels in a tensor of shape :math:`(*, C, H, W)` into :math:`g` group and
9265
- rearrange them as :math:`(*, \frac C g, g, H*W)`, while keeping the original tensor shapes.
9191
+ rearrange them as :math:`(*, \frac{C}{g}, g, H*W)`, while retaining the original tensor
9192
+ shape in the final output.
9266
9193
 
9267
9194
  .. warning::
9268
9195
  This is an experimental API that is subject to change or deletion.
@@ -9470,93 +9397,6 @@ class WKV(Primitive):
9470
9397
  outputs=["output", "out_sp", "out_sq", "out_sm"])
9471
9398
 
9472
9399
 
9473
- class PromptFlashAttention(Primitive):
9474
- r"""
9475
- The interface for fully inference.
9476
- B -- Batch size
9477
- S -- Sequence length
9478
- H -- Hidden size
9479
-
9480
- Note:
9481
- experiment ops
9482
-
9483
- .. warning::
9484
- This is an experimental API that is subject to change or deletion.
9485
-
9486
- Args:
9487
- num_heads (int): The number of heads.
9488
- scale_value (float): The scale value indicating the scale coefficient, which is used as the scalar of
9489
- Muls in the calculation. Default: 1.0.
9490
- pre_tokens (int): Previous tokens. Default: 2147483547.
9491
- next_tokens (int): next tokens. Default: 0.
9492
- indicate the upper triangle, Indicate the number of data blocks involved in the calculation. The value 0
9493
- indicates that the data blocks in the upper triangle are not involved in the calculation
9494
- input_layout (str): the data layout of the input qkv, support `(BSH)` and `(BNSD)`, Default `BSH`.
9495
- num_key_value_heads (int): head numbers of key/value which are used in GQA algorithm.
9496
- The value o indicates if the key and value have the same head nums, use numHeads. Default: 0.
9497
- sparse_mode (int): Default: 0
9498
- inner_precise (int): 0, float16 high precision. 1, high performance. default 1
9499
-
9500
- Inputs:
9501
- - **query** (Tensor) - The query tensor with data type of float16 or float32.
9502
- Input tensor of shape :math:`(B, S, H)` / `(B, N, S, D)`.
9503
- - **key** (Tensor) - The key tensor with data type of float16 or float32.
9504
- Input tensor of shape :math:`(B, S, H)` / `(B, N, S, D)`.
9505
- - **value** (Tensor) - The value tensor with data type of float16 or float32.
9506
- Input tensor of shape :math:`(B, S, H)` / `(B, N, S, D)`.
9507
- - **attn_mask** (Tensor) - The attention mask tensor with data type of float16 or float32.
9508
- For each element, 0 indicates retention and 1 indicates discard. Input tensor of shape :math:`(B, 1, S, S)`.
9509
- - **actual_seq_lengths** (Tensor): Describe actual sequence length of each input with data type of int64.
9510
- - **actual_seq_lengths_kv** (Tensor): Describe actual sequence length of each input with data type of int64.
9511
- - **pse_shift** (Tensor) - The position encoding tensor with data type of float16 or float32.
9512
- - **dep_scale1** (Tensor)
9513
- - **quant_scale1** (Tensor)
9514
- - **deq_scale2** (Tensor)
9515
- - **quant_scale2** (Tensor)
9516
- - **quant_offset2** (Tensor)
9517
-
9518
- Outputs:
9519
- - **attention_out** (Tensor) - Input tensor of shape :math:`(B, S, H)` / `(B, N, S, D)`.
9520
-
9521
- Supported Platforms:
9522
- ``Ascend``
9523
-
9524
- Examples:
9525
- >>> import mindspore.ops.operations.nn_ops as P
9526
- >>> from mindspore import Tensor
9527
- >>> import numpy as np
9528
- >>> B = 1
9529
- >>> N = 16
9530
- >>> S = 256
9531
- >>> D = 16
9532
- >>> query = Tensor(np.ones((B, N, S, D), dtype=np.float16))
9533
- >>> key = Tensor(np.ones((B, N, S, D), dtype=np.float16))
9534
- >>> value = Tensor(np.ones((B, N, S, D), dtype=np.float16))
9535
- >>> attn_mask = Tensor(np.ones((B, 1, S, S), dtype=np.float16))
9536
- >>> pfa = P.PromptFlashAttention(N, input_layout='BNSD')
9537
- >>> out = pfa(query, key, value, attn_mask, None, None, None, None, None, None, None, None)
9538
- >>> print(out.shape)
9539
- (1, 16, 256, 16)
9540
- """
9541
-
9542
- @prim_attr_register
9543
- def __init__(self, num_heads, scale_value=1.0, pre_tokens=214748647, next_tokens=0, input_layout='BSH',
9544
- num_key_value_heads=0, sparse_mode=0, inner_precise=1):
9545
- """Initialize PromptFlashAttention."""
9546
- validator.check_value_type('num_heads', num_heads, [int], self.name)
9547
- validator.check_value_type('scale_value', scale_value, [float], self.name)
9548
- validator.check_value_type('pre_tokens', pre_tokens, [int], self.name)
9549
- validator.check_value_type('next_tokens', next_tokens, [int], self.name)
9550
- validator.check_value_type('input_layout', input_layout, [str], self.name)
9551
- validator.check_value_type('num_key_value_heads', num_key_value_heads, [int], self.name)
9552
- validator.check_value_type('sparse_mode', sparse_mode, [int], self.name)
9553
- validator.check_value_type('inner_precise', inner_precise, [int], self.name)
9554
- self.init_prim_io_names(inputs=["query", "key", "value", "attn_mask", "actual_seq_lengths",
9555
- "actual_seq_lengths_kv", "pse_shift", "deq_scale1", "quant_scale1",
9556
- "deq_scale2", "quant_scale2", "quant_offset2"],
9557
- outputs=["attention_out"])
9558
-
9559
-
9560
9400
  class AllFinite(Primitive):
9561
9401
  r"""
9562
9402
  Check all gradients is finite.
@@ -9573,3 +9413,6 @@ class AllFinite(Primitive):
9573
9413
  raise RuntimeError(
9574
9414
  "The version of Ascend AI software package installed "
9575
9415
  "in the current environment does not support AllFinite.")
9416
+
9417
+ def __call__(self, *args):
9418
+ return _convert_stub(pyboost_all_finite(self, args))