mindspore 2.4.10__cp311-none-any.whl → 2.5.0__cp311-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (690) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Third_Party_Open_Source_Software_Notice +39 -0
  3. mindspore/__init__.py +8 -3
  4. mindspore/_akg/akg/composite/build_module.py +6 -2
  5. mindspore/_akg/akg/utils/kernel_exec.py +2 -2
  6. mindspore/_c_dataengine.cpython-311-aarch64-linux-gnu.so +0 -0
  7. mindspore/_c_expression.cpython-311-aarch64-linux-gnu.so +0 -0
  8. mindspore/_c_mindrecord.cpython-311-aarch64-linux-gnu.so +0 -0
  9. mindspore/_checkparam.py +0 -5
  10. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  11. mindspore/_extends/parse/compile_config.py +64 -0
  12. mindspore/_extends/parse/deprecated/__init__.py +0 -0
  13. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +375 -0
  14. mindspore/_extends/parse/parser.py +23 -5
  15. mindspore/_extends/parse/standard_method.py +123 -27
  16. mindspore/_extends/pijit/pijit_func_white_list.py +1 -1
  17. mindspore/amp.py +7 -1
  18. mindspore/boost/boost_cell_wrapper.py +136 -41
  19. mindspore/common/__init__.py +3 -1
  20. mindspore/common/_register_for_tensor.py +0 -1
  21. mindspore/common/_stub_tensor.py +25 -4
  22. mindspore/common/_tensor_cpp_method.py +17 -0
  23. mindspore/common/_tensor_docs.py +6132 -0
  24. mindspore/common/api.py +98 -21
  25. mindspore/common/dtype.py +34 -34
  26. mindspore/common/dump.py +2 -1
  27. mindspore/common/file_system.py +8 -3
  28. mindspore/common/generator.py +2 -0
  29. mindspore/common/hook_handle.py +3 -1
  30. mindspore/common/initializer.py +3 -4
  31. mindspore/common/lazy_inline.py +8 -2
  32. mindspore/common/mindir_util.py +10 -2
  33. mindspore/common/parameter.py +31 -15
  34. mindspore/common/tensor.py +713 -1337
  35. mindspore/communication/__init__.py +1 -1
  36. mindspore/communication/_comm_helper.py +5 -0
  37. mindspore/communication/comm_func.py +215 -173
  38. mindspore/communication/management.py +23 -20
  39. mindspore/context.py +285 -191
  40. mindspore/dataset/__init__.py +23 -19
  41. mindspore/dataset/callback/ds_callback.py +2 -1
  42. mindspore/dataset/core/config.py +84 -3
  43. mindspore/dataset/engine/cache_admin.py +3 -3
  44. mindspore/dataset/engine/cache_client.py +5 -4
  45. mindspore/dataset/engine/datasets.py +192 -149
  46. mindspore/dataset/engine/datasets_audio.py +14 -0
  47. mindspore/dataset/engine/datasets_standard_format.py +11 -11
  48. mindspore/dataset/engine/datasets_text.py +38 -1
  49. mindspore/dataset/engine/datasets_user_defined.py +100 -66
  50. mindspore/dataset/engine/datasets_vision.py +81 -8
  51. mindspore/dataset/engine/iterators.py +281 -63
  52. mindspore/dataset/engine/obs/util.py +8 -0
  53. mindspore/dataset/engine/queue.py +40 -0
  54. mindspore/dataset/engine/samplers.py +26 -2
  55. mindspore/dataset/engine/serializer_deserializer.py +1 -1
  56. mindspore/dataset/engine/validators.py +43 -11
  57. mindspore/dataset/transforms/py_transforms_util.py +17 -0
  58. mindspore/dataset/transforms/transforms.py +29 -12
  59. mindspore/dataset/vision/validators.py +1 -2
  60. mindspore/device_context/__init__.py +21 -0
  61. mindspore/device_context/ascend/__init__.py +25 -0
  62. mindspore/device_context/ascend/device.py +72 -0
  63. mindspore/device_context/ascend/op_debug.py +94 -0
  64. mindspore/device_context/ascend/op_precision.py +193 -0
  65. mindspore/device_context/ascend/op_tuning.py +127 -0
  66. mindspore/device_context/cpu/__init__.py +25 -0
  67. mindspore/device_context/cpu/device.py +62 -0
  68. mindspore/device_context/cpu/op_tuning.py +43 -0
  69. mindspore/device_context/gpu/__init__.py +21 -0
  70. mindspore/device_context/gpu/device.py +70 -0
  71. mindspore/device_context/gpu/op_precision.py +67 -0
  72. mindspore/device_context/gpu/op_tuning.py +175 -0
  73. mindspore/device_manager.py +134 -0
  74. mindspore/experimental/llm_boost/__init__.py +1 -0
  75. mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
  76. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +211 -0
  77. mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
  78. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  79. mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
  80. mindspore/experimental/llm_boost/register.py +1 -0
  81. mindspore/experimental/optim/adadelta.py +26 -22
  82. mindspore/experimental/optim/adam.py +3 -0
  83. mindspore/experimental/optim/lr_scheduler.py +33 -24
  84. mindspore/experimental/optim/radam.py +33 -30
  85. mindspore/hal/device.py +28 -0
  86. mindspore/hal/event.py +17 -0
  87. mindspore/hal/memory.py +94 -3
  88. mindspore/hal/stream.py +91 -6
  89. mindspore/include/api/context.h +0 -1
  90. mindspore/lib/libavcodec.so.59 +0 -0
  91. mindspore/lib/libavdevice.so.59 +0 -0
  92. mindspore/lib/libavfilter.so.8 +0 -0
  93. mindspore/lib/libavformat.so.59 +0 -0
  94. mindspore/lib/libavutil.so.57 +0 -0
  95. mindspore/lib/libdnnl.so.2 +0 -0
  96. mindspore/lib/libmindspore_backend.so +0 -0
  97. mindspore/lib/libmindspore_common.so +0 -0
  98. mindspore/lib/libmindspore_core.so +0 -0
  99. mindspore/lib/libmindspore_glog.so.0 +0 -0
  100. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  101. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  102. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  103. mindspore/lib/libmindspore_ops.so +0 -0
  104. mindspore/lib/libmpi_adapter.so +0 -0
  105. mindspore/lib/libmpi_collective.so +0 -0
  106. mindspore/lib/libnnacl.so +0 -0
  107. mindspore/lib/libopencv_core.so.4.5 +0 -0
  108. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  109. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  110. mindspore/lib/libps_cache.so +0 -0
  111. mindspore/lib/libswresample.so.4 +0 -0
  112. mindspore/lib/libswscale.so.6 +0 -0
  113. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910_93/aic-ascend910_93-ops-info.json +2048 -0
  114. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  115. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  116. mindspore/lib/plugin/ascend/custom_ascendc_910/op_api/lib/libcust_opapi.so +0 -0
  117. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl/dynamic/decoder_kv_cache.py +1 -1
  118. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl/dynamic/prompt_kv_cache.py +1 -1
  119. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
  120. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
  121. mindspore/lib/plugin/ascend/custom_ascendc_910/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
  122. mindspore/lib/plugin/ascend/custom_ascendc_910/version.info +1 -1
  123. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_api/lib/libcust_opapi.so +0 -0
  124. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/config/ascend910_93/aic-ascend910_93-ops-info.json +224 -0
  125. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/all_finite.py +1 -1
  126. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/decoder_kv_cache.py +1 -1
  127. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/prompt_kv_cache.py +1 -1
  128. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.json +78 -0
  129. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.o +0 -0
  130. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.json +78 -0
  131. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.o +0 -0
  132. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.json +78 -0
  133. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.o +0 -0
  134. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.json +156 -0
  135. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.o +0 -0
  136. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.json +156 -0
  137. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.o +0 -0
  138. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.json +156 -0
  139. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.o +0 -0
  140. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.json +156 -0
  141. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.o +0 -0
  142. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.json +156 -0
  143. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.o +0 -0
  144. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.json +156 -0
  145. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.o +0 -0
  146. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.json +156 -0
  147. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.o +0 -0
  148. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.json +156 -0
  149. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.o +0 -0
  150. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.json +165 -0
  151. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.o +0 -0
  152. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.json +165 -0
  153. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.o +0 -0
  154. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.json +165 -0
  155. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.o +0 -0
  156. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.json +165 -0
  157. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.o +0 -0
  158. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.json +165 -0
  159. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.o +0 -0
  160. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.json +165 -0
  161. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.o +0 -0
  162. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.json +165 -0
  163. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.o +0 -0
  164. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.json +165 -0
  165. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.o +0 -0
  166. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/all_finite.json +139 -0
  167. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/binary_info_config.json +361 -0
  168. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/decoder_kv_cache.json +892 -0
  169. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/prompt_kv_cache.json +892 -0
  170. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
  171. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
  172. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
  173. mindspore/lib/plugin/ascend/custom_ascendc_910b/version.info +1 -1
  174. mindspore/lib/plugin/ascend/custom_compiler/setup.py +1 -1
  175. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  176. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  177. mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
  178. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  179. mindspore/lib/plugin/ascend/libmindspore_internal_kernels.so +0 -0
  180. mindspore/lib/plugin/ascend/libms_ascend_native_boost.so +0 -0
  181. mindspore/lib/plugin/ascend/libms_atb_boost.so +0 -0
  182. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +957 -955
  183. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
  184. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/liblcal_static.a +0 -0
  185. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{acme/include/base_type.h → base_type.h} +25 -20
  186. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{cast/cast_tiling.h → internal.h} +6 -4
  187. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_op.h +114 -0
  188. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/boost_kernel.h +70 -0
  189. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/llama_impl.h +85 -0
  190. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/model_interface.h +52 -0
  191. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/tensor.h +81 -0
  192. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_creator.h +123 -0
  193. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +155 -110
  194. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{acme/include/tiling_info.h → tiling_info.h} +12 -9
  195. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tiling_utils.h +178 -0
  196. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layer_norm_op.so +0 -0
  197. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_op.so +0 -0
  198. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_quant_op.so +0 -0
  199. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_310p_op.so +0 -0
  200. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_op.so +0 -0
  201. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_op.so +0 -0
  202. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcompare_op.so +0 -0
  203. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_op.so +0 -0
  204. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libllama_op.so +0 -0
  205. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_op.so +0 -0
  206. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
  207. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_optiling.so +0 -0
  208. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmulti_weight_matmul_kernel_op.so +0 -0
  209. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_nz_op.so +0 -0
  210. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_op.so +0 -0
  211. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_op.so +0 -0
  212. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_f16_nz/internal_pp_matmul_f16_nz.o +0 -0
  213. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_f16_nz/internal_pp_matmul_f16_nz_0.o +0 -0
  214. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_i8_nz_compress/internal_pp_matmul_i8_nz_compress.o +0 -0
  215. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_i8_nz_compress/internal_pp_matmul_i8_nz_compress_0.o +0 -0
  216. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_int8_nz/internal_pp_matmul_int8_nz.o +0 -0
  217. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_int8_nz/internal_pp_matmul_int8_nz_0.o +0 -0
  218. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libadd_rms_norm_quant_ascend310p.so +0 -0
  219. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libapply_rotary_pos_emb_310p_impl.so → op_kernels/ascend310p/so_kernels/libapply_rotary_pos_emb_310p_ascend310p.so} +0 -0
  220. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libcast_ascend310p.so +0 -0
  221. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libcompare_ascend310p.so +0 -0
  222. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libgelu_ascend310p.so +0 -0
  223. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libmatmul_ascend310p.so +0 -0
  224. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libreshape_and_cache_nz_ascend310p.so +0 -0
  225. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_4b60f88cdc28b25a36bad2d8b0a88092.json +163 -0
  226. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_4b60f88cdc28b25a36bad2d8b0a88092.o +0 -0
  227. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_cde61da2bd6fededcb1ba310a6ad16ee.json +163 -0
  228. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_cde61da2bd6fededcb1ba310a6ad16ee.o +0 -0
  229. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
  230. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
  231. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bsh_full_mix.o +0 -0
  232. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
  233. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
  234. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
  235. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bsh_full_mix.o +0 -0
  236. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
  237. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_matmul_postfusion_mix/internal_matmul_postfusion_mix.o +0 -0
  238. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_matmul_postfusion_mix/internal_matmul_postfusion_mix_mix_aic_0.o +0 -0
  239. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_matmul_postfusion_mix/internal_matmul_postfusion_mix_mix_aiv_0.o +0 -0
  240. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_multi_weight_matmul_postfusion_mix/internal_multi_weight_matmul_postfusion_mix.o +0 -0
  241. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_multi_weight_matmul_postfusion_mix/internal_multi_weight_matmul_postfusion_mix_mix_aic_0.o +0 -0
  242. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_multi_weight_matmul_postfusion_mix/internal_multi_weight_matmul_postfusion_mix_mix_aiv_0.o +0 -0
  243. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_bf16_bf16.o +0 -0
  244. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_bf16_fp16.o +0 -0
  245. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_bf16_fp32.o +0 -0
  246. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_fp16_bf16.o +0 -0
  247. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_fp16_fp16.o +0 -0
  248. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_fp16_fp32.o +0 -0
  249. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/paged_attention_v2/paged_attention_v2.o +0 -0
  250. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/paged_attention_v2/paged_attention_v2_mix_aic_0.o +0 -0
  251. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/paged_attention_v2/paged_attention_v2_mix_aiv_0.o +0 -0
  252. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libadd_layer_norm_impl.so → op_kernels/ascend910b/so_kernels/libadd_layer_norm_ascend910b.so} +0 -0
  253. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libadd_rms_norm_impl.so → op_kernels/ascend910b/so_kernels/libadd_rms_norm_ascend910b.so} +0 -0
  254. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/so_kernels/libadd_rms_norm_quant_ascend910b.so +0 -0
  255. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libapply_rotary_pos_emb_impl.so → op_kernels/ascend910b/so_kernels/libapply_rotary_pos_emb_ascend910b.so} +0 -0
  256. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libcast_impl.so → op_kernels/ascend910b/so_kernels/libcast_ascend910b.so} +0 -0
  257. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libnot_equal_impl.so → op_kernels/ascend910b/so_kernels/libcompare_ascend910b.so} +0 -0
  258. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libgelu_impl.so → op_kernels/ascend910b/so_kernels/libgelu_ascend910b.so} +0 -0
  259. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/so_kernels/libllama_ascend910b.so +0 -0
  260. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libmatmul_impl.so → op_kernels/ascend910b/so_kernels/libmatmul_ascend910b.so} +0 -0
  261. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libmulti_weight_matmul_kernel_impl.so → op_kernels/ascend910b/so_kernels/libmulti_weight_matmul_kernel_ascend910b.so} +0 -0
  262. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libreshape_and_cache_impl.so → op_kernels/ascend910b/so_kernels/libreshape_and_cache_ascend910b.so} +0 -0
  263. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/librms_norm_impl.so → op_kernels/ascend910b/so_kernels/librms_norm_ascend910b.so} +0 -0
  264. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
  265. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  266. mindspore/log.py +12 -0
  267. mindspore/mindrecord/__init__.py +1 -1
  268. mindspore/mindrecord/config.py +17 -316
  269. mindspore/mindrecord/filereader.py +1 -9
  270. mindspore/mindrecord/filewriter.py +5 -15
  271. mindspore/mindrecord/mindpage.py +1 -9
  272. mindspore/mint/__init__.py +824 -218
  273. mindspore/mint/distributed/__init__.py +66 -4
  274. mindspore/mint/distributed/distributed.py +2594 -44
  275. mindspore/mint/linalg/__init__.py +6 -0
  276. mindspore/mint/nn/__init__.py +473 -14
  277. mindspore/mint/nn/functional.py +486 -11
  278. mindspore/mint/nn/layer/__init__.py +17 -4
  279. mindspore/mint/nn/layer/_functions.py +330 -0
  280. mindspore/mint/nn/layer/activation.py +169 -1
  281. mindspore/mint/nn/layer/basic.py +123 -0
  282. mindspore/mint/nn/layer/conv.py +727 -0
  283. mindspore/mint/nn/layer/normalization.py +215 -19
  284. mindspore/mint/nn/layer/padding.py +797 -0
  285. mindspore/mint/nn/layer/pooling.py +170 -0
  286. mindspore/mint/optim/__init__.py +2 -1
  287. mindspore/mint/optim/adam.py +223 -0
  288. mindspore/mint/optim/adamw.py +26 -19
  289. mindspore/mint/special/__init__.py +2 -1
  290. mindspore/multiprocessing/__init__.py +5 -0
  291. mindspore/nn/cell.py +126 -19
  292. mindspore/nn/dynamic_lr.py +2 -1
  293. mindspore/nn/layer/activation.py +6 -6
  294. mindspore/nn/layer/basic.py +35 -25
  295. mindspore/nn/layer/channel_shuffle.py +3 -3
  296. mindspore/nn/layer/embedding.py +3 -3
  297. mindspore/nn/layer/normalization.py +8 -7
  298. mindspore/nn/layer/padding.py +4 -3
  299. mindspore/nn/layer/pooling.py +47 -13
  300. mindspore/nn/layer/rnn_cells.py +1 -1
  301. mindspore/nn/layer/rnns.py +2 -1
  302. mindspore/nn/layer/timedistributed.py +5 -5
  303. mindspore/nn/layer/transformer.py +48 -26
  304. mindspore/nn/learning_rate_schedule.py +5 -3
  305. mindspore/nn/loss/loss.py +31 -36
  306. mindspore/nn/optim/ada_grad.py +1 -0
  307. mindspore/nn/optim/adadelta.py +2 -2
  308. mindspore/nn/optim/adam.py +1 -1
  309. mindspore/nn/optim/lars.py +1 -4
  310. mindspore/nn/optim/optimizer.py +1 -1
  311. mindspore/nn/optim/rprop.py +2 -2
  312. mindspore/nn/optim/thor.py +2 -1
  313. mindspore/nn/utils/init.py +13 -11
  314. mindspore/nn/wrap/cell_wrapper.py +4 -6
  315. mindspore/nn/wrap/loss_scale.py +3 -4
  316. mindspore/numpy/array_creations.py +60 -62
  317. mindspore/numpy/array_ops.py +148 -143
  318. mindspore/numpy/logic_ops.py +41 -42
  319. mindspore/numpy/math_ops.py +361 -359
  320. mindspore/numpy/utils.py +16 -16
  321. mindspore/numpy/utils_const.py +4 -4
  322. mindspore/ops/__init__.py +2 -1
  323. mindspore/ops/_grad_experimental/grad_comm_ops.py +94 -13
  324. mindspore/ops/_grad_experimental/grad_debug_ops.py +6 -1
  325. mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
  326. mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
  327. mindspore/ops/_op_impl/cpu/__init__.py +1 -0
  328. mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
  329. mindspore/ops/_vmap/vmap_array_ops.py +20 -19
  330. mindspore/ops/_vmap/vmap_base.py +0 -2
  331. mindspore/ops/_vmap/vmap_grad_nn_ops.py +19 -13
  332. mindspore/ops/_vmap/vmap_math_ops.py +11 -9
  333. mindspore/ops/_vmap/vmap_nn_ops.py +20 -34
  334. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +149 -12
  335. mindspore/ops/auto_generate/gen_arg_handler.py +0 -61
  336. mindspore/ops/auto_generate/gen_extend_func.py +554 -60
  337. mindspore/ops/auto_generate/gen_ops_def.py +1621 -115
  338. mindspore/ops/auto_generate/gen_ops_prim.py +8024 -3409
  339. mindspore/ops/auto_generate/pyboost_inner_prim.py +183 -79
  340. mindspore/ops/composite/base.py +1 -1
  341. mindspore/ops/composite/multitype_ops/_compile_utils.py +229 -30
  342. mindspore/ops/composite/multitype_ops/pow_impl.py +0 -29
  343. mindspore/ops/function/__init__.py +12 -0
  344. mindspore/ops/function/array_func.py +561 -159
  345. mindspore/ops/function/clip_func.py +64 -0
  346. mindspore/ops/function/debug_func.py +28 -20
  347. mindspore/ops/function/image_func.py +1 -1
  348. mindspore/ops/function/linalg_func.py +5 -4
  349. mindspore/ops/function/math_func.py +1659 -290
  350. mindspore/ops/function/nn_func.py +988 -317
  351. mindspore/ops/function/parameter_func.py +3 -56
  352. mindspore/ops/function/random_func.py +243 -33
  353. mindspore/ops/function/sparse_unary_func.py +1 -1
  354. mindspore/ops/functional.py +18 -5
  355. mindspore/ops/functional_overload.py +897 -0
  356. mindspore/ops/operations/__init__.py +3 -2
  357. mindspore/ops/operations/_embedding_cache_ops.py +4 -4
  358. mindspore/ops/operations/_grad_ops.py +2 -34
  359. mindspore/ops/operations/_infer_ops.py +2 -1
  360. mindspore/ops/operations/_inner_ops.py +38 -8
  361. mindspore/ops/operations/array_ops.py +45 -303
  362. mindspore/ops/operations/comm_ops.py +19 -16
  363. mindspore/ops/operations/custom_ops.py +11 -55
  364. mindspore/ops/operations/debug_ops.py +42 -47
  365. mindspore/ops/operations/inner_ops.py +6 -4
  366. mindspore/ops/operations/linalg_ops.py +3 -2
  367. mindspore/ops/operations/manually_defined/ops_def.py +185 -104
  368. mindspore/ops/operations/math_ops.py +11 -216
  369. mindspore/ops/operations/nn_ops.py +146 -308
  370. mindspore/ops/primitive.py +23 -21
  371. mindspore/ops/tensor_method.py +1669 -0
  372. mindspore/ops_generate/aclnn_kernel_register_auto_cc_generator.py +110 -0
  373. mindspore/ops_generate/add_tensor_docs_generator.py +54 -0
  374. mindspore/ops_generate/arg_handler.py +0 -61
  375. mindspore/ops_generate/auto_grad_impl_cc_generator.py +135 -0
  376. mindspore/ops_generate/auto_grad_reg_cc_generator.py +93 -0
  377. mindspore/ops_generate/base_generator.py +11 -0
  378. mindspore/ops_generate/cpp_create_prim_instance_helper_generator.py +108 -0
  379. mindspore/ops_generate/functional_map_cpp_generator.py +491 -0
  380. mindspore/ops_generate/functional_overload_py_generator.py +110 -0
  381. mindspore/ops_generate/functions_cc_generator.py +233 -0
  382. mindspore/ops_generate/gen_aclnn_implement.py +110 -114
  383. mindspore/ops_generate/gen_constants.py +157 -3
  384. mindspore/ops_generate/gen_ops.py +245 -990
  385. mindspore/ops_generate/gen_pyboost_func.py +97 -998
  386. mindspore/ops_generate/gen_utils.py +119 -33
  387. mindspore/ops_generate/lite_ops_cpp_generator.py +155 -0
  388. mindspore/ops_generate/op_api_proto.py +206 -0
  389. mindspore/ops_generate/op_def_py_generator.py +131 -0
  390. mindspore/ops_generate/op_prim_py_generator.py +480 -0
  391. mindspore/ops_generate/op_proto.py +373 -108
  392. mindspore/ops_generate/op_template_parser.py +436 -0
  393. mindspore/ops_generate/ops_def_cc_generator.py +288 -0
  394. mindspore/ops_generate/ops_def_h_generator.py +74 -0
  395. mindspore/ops_generate/ops_name_h_generator.py +68 -0
  396. mindspore/ops_generate/ops_primitive_h_generator.py +81 -0
  397. mindspore/ops_generate/pyboost_functions_cpp_generator.py +370 -0
  398. mindspore/ops_generate/pyboost_functions_h_generator.py +68 -0
  399. mindspore/ops_generate/pyboost_functions_py_generator.py +148 -0
  400. mindspore/ops_generate/pyboost_grad_function_cpp_generator.py +154 -0
  401. mindspore/ops_generate/pyboost_inner_prim_generator.py +131 -0
  402. mindspore/ops_generate/pyboost_native_grad_functions_generator.py +268 -0
  403. mindspore/ops_generate/pyboost_op_cpp_code_generator.py +851 -0
  404. mindspore/ops_generate/pyboost_overload_functions_cpp_generator.py +344 -0
  405. mindspore/ops_generate/pyboost_utils.py +92 -33
  406. mindspore/ops_generate/template.py +294 -44
  407. mindspore/ops_generate/tensor_func_reg_cpp_generator.py +422 -0
  408. mindspore/parallel/__init__.py +3 -3
  409. mindspore/parallel/_auto_parallel_context.py +24 -33
  410. mindspore/parallel/_parallel_serialization.py +13 -2
  411. mindspore/parallel/_utils.py +4 -1
  412. mindspore/parallel/algo_parameter_config.py +1 -1
  413. mindspore/parallel/checkpoint_transform.py +44 -0
  414. mindspore/parallel/cluster/process_entity/_api.py +131 -37
  415. mindspore/parallel/cluster/process_entity/_utils.py +41 -6
  416. mindspore/parallel/cluster/run.py +20 -3
  417. mindspore/parallel/parameter_broadcast.py +1 -1
  418. mindspore/parallel/shard.py +3 -0
  419. mindspore/parallel/transform_safetensors.py +119 -253
  420. mindspore/profiler/__init__.py +17 -4
  421. mindspore/profiler/analysis/__init__.py +0 -0
  422. mindspore/profiler/analysis/parser/__init__.py +0 -0
  423. mindspore/profiler/analysis/parser/ascend_cann_parser.py +166 -0
  424. mindspore/profiler/analysis/parser/base_parser.py +158 -0
  425. mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
  426. mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
  427. mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
  428. mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
  429. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +261 -0
  430. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
  431. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +84 -0
  432. mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
  433. mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
  434. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
  435. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
  436. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
  437. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
  438. mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
  439. mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
  440. mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
  441. mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
  442. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +260 -0
  443. mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
  444. mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
  445. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
  446. mindspore/profiler/analysis/task_manager.py +131 -0
  447. mindspore/profiler/analysis/time_converter.py +84 -0
  448. mindspore/profiler/analysis/viewer/__init__.py +0 -0
  449. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +333 -0
  450. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
  451. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +252 -0
  452. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +313 -0
  453. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +322 -0
  454. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +265 -0
  455. mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
  456. mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
  457. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +97 -0
  458. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
  459. mindspore/profiler/analysis/work_flow.py +73 -0
  460. mindspore/profiler/common/ascend_msprof_exporter.py +138 -0
  461. mindspore/profiler/common/command_executor.py +90 -0
  462. mindspore/profiler/common/constant.py +174 -3
  463. mindspore/profiler/common/file_manager.py +208 -0
  464. mindspore/profiler/common/log.py +130 -0
  465. mindspore/profiler/common/msprof_cmd_tool.py +202 -0
  466. mindspore/profiler/common/path_manager.py +371 -0
  467. mindspore/profiler/common/process_bar.py +168 -0
  468. mindspore/profiler/common/process_pool.py +9 -3
  469. mindspore/profiler/common/profiler_context.py +476 -0
  470. mindspore/profiler/common/profiler_info.py +304 -0
  471. mindspore/profiler/common/profiler_output_path.py +284 -0
  472. mindspore/profiler/common/profiler_parameters.py +210 -0
  473. mindspore/profiler/common/profiler_path_manager.py +120 -0
  474. mindspore/profiler/common/record_function.py +76 -0
  475. mindspore/profiler/common/tlv_decoder.py +76 -0
  476. mindspore/profiler/common/util.py +75 -2
  477. mindspore/profiler/dynamic_profiler.py +270 -37
  478. mindspore/profiler/envprofiler.py +138 -0
  479. mindspore/profiler/mstx.py +199 -0
  480. mindspore/profiler/platform/__init__.py +21 -0
  481. mindspore/profiler/platform/base_profiler.py +40 -0
  482. mindspore/profiler/platform/cpu_profiler.py +124 -0
  483. mindspore/profiler/platform/gpu_profiler.py +74 -0
  484. mindspore/profiler/platform/npu_profiler.py +309 -0
  485. mindspore/profiler/profiler.py +580 -93
  486. mindspore/profiler/profiler_action_controller.py +187 -0
  487. mindspore/profiler/profiler_interface.py +114 -0
  488. mindspore/profiler/schedule.py +208 -0
  489. mindspore/rewrite/api/symbol_tree.py +1 -2
  490. mindspore/run_check/_check_version.py +2 -6
  491. mindspore/runtime/__init__.py +37 -0
  492. mindspore/runtime/device.py +27 -0
  493. mindspore/runtime/event.py +209 -0
  494. mindspore/runtime/executor.py +148 -0
  495. mindspore/runtime/memory.py +392 -0
  496. mindspore/runtime/stream.py +460 -0
  497. mindspore/runtime/thread_bind_core.py +401 -0
  498. mindspore/train/__init__.py +2 -2
  499. mindspore/train/_utils.py +53 -18
  500. mindspore/train/amp.py +8 -4
  501. mindspore/train/callback/_checkpoint.py +32 -18
  502. mindspore/train/callback/_early_stop.py +1 -1
  503. mindspore/train/callback/_flops_collector.py +105 -69
  504. mindspore/train/callback/_history.py +1 -1
  505. mindspore/train/callback/_summary_collector.py +44 -6
  506. mindspore/train/callback/_tft_register.py +31 -10
  507. mindspore/train/dataset_helper.py +11 -11
  508. mindspore/train/metrics/precision.py +4 -5
  509. mindspore/train/mind_ir_pb2.py +167 -46
  510. mindspore/train/model.py +13 -15
  511. mindspore/train/serialization.py +462 -76
  512. mindspore/train/summary/summary_record.py +1 -2
  513. mindspore/train/train_thor/model_thor.py +1 -1
  514. mindspore/utils/__init__.py +4 -2
  515. mindspore/utils/bin/dataset-cache +0 -0
  516. mindspore/utils/bin/dataset-cache-server +0 -0
  517. mindspore/utils/dryrun.py +138 -0
  518. mindspore/utils/runtime_execution_order_check.py +550 -0
  519. mindspore/version.py +1 -1
  520. {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/METADATA +2 -3
  521. {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/RECORD +524 -458
  522. {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/entry_points.txt +1 -1
  523. mindspore/_data_dump.cpython-311-aarch64-linux-gnu.so +0 -0
  524. mindspore/bin/cache_admin +0 -0
  525. mindspore/bin/cache_server +0 -0
  526. mindspore/common/_tensor_overload.py +0 -139
  527. mindspore/lib/libmindspore_np_dtype.so +0 -0
  528. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
  529. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -82
  530. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -113
  531. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -193
  532. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/dtype_registry.h +0 -90
  533. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -46
  534. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
  535. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
  536. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_layer_norm_op.h +0 -60
  537. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_rms_norm_op.h +0 -50
  538. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_rms_norm_quant_op.h +0 -50
  539. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/apply_rotary_pos_emb_nz_op.h +0 -42
  540. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/apply_rotary_pos_emb_op.h +0 -55
  541. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -34
  542. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_only_ops.h +0 -94
  543. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_op_base.h +0 -97
  544. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
  545. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/flash_attention_score_op.h +0 -97
  546. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/gelu_op.h +0 -44
  547. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_add_rmsnorm_op.h +0 -73
  548. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -108
  549. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/multi_impls_op.h +0 -64
  550. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/multi_weight_matmul_op.h +0 -91
  551. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/paged_attention_op.h +0 -99
  552. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/reshape_and_cache_nz_op.h +0 -44
  553. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/reshape_and_cache_op.h +0 -44
  554. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/rms_norm_op.h +0 -64
  555. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -179
  556. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -69
  557. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/profiling_util.h +0 -366
  558. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -56
  559. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/kernel/add.h +0 -21
  560. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/tiling/add_tiling.h +0 -43
  561. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -46
  562. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb.h +0 -23
  563. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_base.h +0 -456
  564. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_bf16.h +0 -217
  565. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp.h +0 -391
  566. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp16.h +0 -126
  567. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -230
  568. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_tiling.h +0 -43
  569. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_value.h +0 -27
  570. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/apply_rotary_pos_emb_nz_impl.h +0 -34
  571. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz.h +0 -23
  572. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_base.h +0 -460
  573. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_fp16.h +0 -116
  574. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_fp32.h +0 -230
  575. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_tiling.h +0 -43
  576. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_value.h +0 -27
  577. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -74
  578. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -74
  579. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_impl.h +0 -48
  580. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/kernel/cast_kernel.h +0 -21
  581. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -55
  582. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_tiling.h +0 -27
  583. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/kernel/compare_kernel.h +0 -23
  584. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
  585. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
  586. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
  587. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
  588. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
  589. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
  590. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
  591. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
  592. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
  593. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
  594. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
  595. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
  596. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
  597. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
  598. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
  599. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
  600. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
  601. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
  602. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
  603. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
  604. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
  605. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
  606. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
  607. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
  608. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
  609. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
  610. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
  611. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
  612. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
  613. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
  614. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
  615. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
  616. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
  617. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
  618. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
  619. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
  620. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +0 -68
  621. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -99
  622. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +0 -21
  623. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/lccl/lccl_wrapper.h +0 -58
  624. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/ms_int_types.h +0 -91
  625. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/ms_int_utils.h +0 -108
  626. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +0 -64
  627. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/add_param.h +0 -68
  628. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +0 -40
  629. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/cast_param.h +0 -30
  630. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
  631. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
  632. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
  633. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -38
  634. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +0 -42
  635. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +0 -33
  636. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -377
  637. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache_nz/kernel/reshape_and_cache_nz.h +0 -24
  638. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache_nz/reshape_and_cache_nz_impl.h +0 -42
  639. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache_nz/reshape_and_cache_nz_tiling.h +0 -27
  640. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -46
  641. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/kernel/sub_kernel.h +0 -20
  642. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -48
  643. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_tiling.h +0 -25
  644. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +0 -399
  645. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/utils.h +0 -41
  646. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +0 -45
  647. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_tiling.h +0 -29
  648. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +0 -30
  649. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -69
  650. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_core.h +0 -43
  651. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_entity.h +0 -38
  652. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_sink.h +0 -69
  653. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_stream.h +0 -41
  654. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -71
  655. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -165
  656. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +0 -20
  657. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
  658. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -121
  659. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -106
  660. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
  661. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
  662. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_quant_acme_impl.so +0 -0
  663. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_310p_old_impl.so +0 -0
  664. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_old_impl.so +0 -0
  665. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_nz_impl.so +0 -0
  666. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_nz_old_impl.so +0 -0
  667. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix.json +0 -19
  668. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix.o +0 -0
  669. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix_mix_aic_0.o +0 -0
  670. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix_mix_aiv_0.o +0 -0
  671. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix.json +0 -19
  672. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix.o +0 -0
  673. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix_mix_aic_0.o +0 -0
  674. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix_mix_aiv_0.o +0 -0
  675. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
  676. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
  677. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bsh_full_mix.o +0 -0
  678. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
  679. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
  680. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
  681. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bsh_full_mix.o +0 -0
  682. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
  683. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_bf16_bnsd_mix.o +0 -0
  684. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_bf16_bsh_mix.o +0 -0
  685. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_fp16_bnsd_mix.o +0 -0
  686. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_fp16_bsh_mix.o +0 -0
  687. mindspore/profiler/envprofiling.py +0 -254
  688. mindspore/profiler/profiling.py +0 -1926
  689. {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/WHEEL +0 -0
  690. {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/top_level.txt +0 -0
@@ -34,28 +34,31 @@ from mindspore.ops.primitive import constexpr, _primexpr
34
34
  from mindspore.ops.operations._inner_ops import TileSize
35
35
  from mindspore.ops.auto_generate import Cummin, BatchMatMul, BernoulliExt, lin_space_ext_op, BitwiseAndScalar,\
36
36
  BitwiseAndTensor, BitwiseOrScalar, BitwiseOrTensor, BitwiseXorScalar, BitwiseXorTensor, RemainderTensorTensor,\
37
- RemainderTensorScalar, RemainderScalarTensor
37
+ RemainderTensorScalar, RemainderScalarTensor, std_mean_op, var_mean_op, InplaceErfinv
38
38
  from mindspore.ops import auto_generate
39
39
  from mindspore.ops.operations.math_ops import STFT
40
40
  from mindspore.ops.operations.math_ops import LuUnpack
41
41
  from mindspore.ops.auto_generate.pyboost_inner_prim import roll_impl, cross_impl
42
+ from mindspore.ops.auto_generate.pyboost_inner_prim import reduce_max_impl, reduce_min_impl
42
43
  from mindspore.ops.operations.math_ops import Ormqr
43
44
  from mindspore.ops.operations.math_ops import DivMod
44
45
  from mindspore.ops.operations.array_ops import MatrixSetDiagV3, Transpose
45
- from mindspore.ops.auto_generate import (minimum, maximum, mul, sin, sinc, sinh, cummax, real, conj, add, sub, cos,
46
- cosh, nan_to_num, norm_op, lp_norm_v2_op,
46
+ from mindspore.ops.auto_generate import (minimum, maximum, mul, muls, sin, sinc, sinh, cummax, real, conj, add, sub, cos,
47
+ cosh, nan_to_num, norm_op, lp_norm_v2_op, linalg_vector_norm_op, std_op,
47
48
  matrix_exp, sqrt, rsqrt, square, trace, nextafter, abs, acos, acosh, angle,
48
49
  asin, asinh, atan, atan2, atanh, ceil, equal, erf, erfc, erfinv, exp, expm1,
49
50
  floor, floor_divide, floor_mod, gcd, greater, greater_equal, less, less_equal,
50
- log, log1p, neg, not_equal, pow, round_op, isfinite, argmax_ext, mean_ext_op,
51
+ log, log1p, neg, not_equal, round_op, isfinite, argmax_ext, mean_ext_op,
51
52
  sum_ext_op, prod_ext_op, all, matrix_inverse_ext, atan2_ext, sign, acos_ext,
52
53
  acosh_ext, asin_ext, asinh_ext, atan_ext, tan, median_ext_op, median_dim_op,
53
- xlogy_op, xlogy_scalar_other_op, xlogy_scalar_self_op, trunc, histc_ext)
54
-
54
+ xlogy_op, xlogy_scalar_other_op, xlogy_scalar_self_op, trunc, histc_ext,
55
+ bincount_ext, rotated_iou_op, cat, narrow, var_op, pow, pow_scalar_tensor_op,
56
+ frac_ext, pow_tensor_scalar_op, not_equal_op, isinf, addmv_op, cdist,
57
+ addbmm_op, addmm_op, grouped_matmul_v2, transpose_ext, grouped_matmul_v4)
55
58
 
56
59
 
57
60
  from mindspore.ops.auto_generate.gen_ops_def import add_ext, sub_ext, bmm_ext
58
- from mindspore.ops.auto_generate import tanh
61
+ from mindspore.ops.auto_generate import tanh, tanh_
59
62
  from mindspore.nn import layer
60
63
  from mindspore._checkparam import check_is_number
61
64
  from mindspore import _checkparam as validator
@@ -143,7 +146,8 @@ transpose_ = P.Transpose()
143
146
  xdivy_ = P.Xdivy()
144
147
  tensor_div_ = P.Div()
145
148
  tensor_divmod_ = DivMod()
146
- generator_step_ = Tensor(10, mstype.int64)
149
+ generator_step_ = Tensor(12, mstype.int64)
150
+ tuple_to_tensor_ = TupleToTensor()
147
151
 
148
152
  #####################################
149
153
  # Private Operation Functions.
@@ -186,7 +190,8 @@ dtype_ = P.DType()
186
190
  eps_ = P.Eps()
187
191
  erf_ = P.Erf()
188
192
  erfc_ = P.Erfc()
189
- erfinv_ = P.Erfinv()
193
+ erfinv_ext_ = P.Erfinv()
194
+ inplace_erfinv_ = InplaceErfinv()
190
195
  exp2_ = P.Pow()
191
196
  expand_dims_ = P.ExpandDims()
192
197
  fill_v2_ = P.FillV2()
@@ -197,7 +202,6 @@ igammac_ = Igammac()
197
202
  imag_ = P.Imag()
198
203
  inv_ = P.math_ops.Inv()
199
204
  invert_ = P.Invert()
200
- isinf_ = P.IsInf()
201
205
  isnan_ = P.IsNan()
202
206
  lcm_ = Lcm()
203
207
  lerp_ = P.Lerp()
@@ -230,7 +234,6 @@ size_ = P.Size()
230
234
  scalar_to_tensor_ = P.ScalarToTensor()
231
235
  shape_ = P.Shape()
232
236
  sparse_segment_mean_ = SparseSegmentMean()
233
- tanh_ = P.Tanh()
234
237
  tensor_round_ = P.Round()
235
238
  tile_ = P.Tile()
236
239
  tile_size_ = TileSize()
@@ -446,10 +449,9 @@ def bincount(input, weights=None, minlength=0):
446
449
  if weights is not None:
447
450
  if input.shape != weights.shape:
448
451
  raise ValueError('for bincount `input` and `weights` must have the same length')
449
- idx_mapping *= weights
452
+ idx_mapping = weights * idx_mapping
450
453
  return reduce_sum_(idx_mapping.astype(mstype.float32), 1).ravel()
451
454
 
452
-
453
455
  def bucketize(input, boundaries, *, right=False):
454
456
  r"""
455
457
  Bucketizes `input` based on `boundaries`. If `right` is ``False``, the left boundary is closed. For each element x
@@ -824,7 +826,8 @@ def float_power(input, exponent):
824
826
 
825
827
  Raises:
826
828
  TypeError: If neither `input` nor `exponent` is a Tensor.
827
- TypeError: If the data type of `input` or `exponent` is not in Tensor and Number.
829
+ TypeError: If the data type of `input` is not Tensor or Number.
830
+ TypeError: If the data type of `exponent` is not Tensor or Number.
828
831
 
829
832
  Supported Platforms:
830
833
  ``GPU`` ``CPU``
@@ -857,6 +860,81 @@ def float_power(input, exponent):
857
860
  return pow(input, exponent)
858
861
 
859
862
 
863
+ def float_power_ext(input, exponent):
864
+ """
865
+ Computes `input` to the power of `exponent` element-wise in double precision, and always
866
+ returns a mindspore.float64 tensor.
867
+
868
+ .. math::
869
+
870
+ out_{i} = input_{i} ^ {exponent_{i}}
871
+
872
+ .. warning::
873
+ This is an experimental API that is subject to change or deletion.
874
+
875
+ Note:
876
+ Unlike `ops.pow`, this function always uses double precision for calculations, while
877
+ the precision of `ops.pow` depends on type promotion.
878
+ Currently, this function does not support complex number calculations.
879
+ Since float64 calculations are significantly slower on ascend devices compared to other data
880
+ types, it is strongly recommended to use this function only in scenarios where double precision
881
+ is required and performance is not a priority. Otherwise, using `ops.pow` is a better choice.
882
+
883
+ Args:
884
+ input (Union[Tensor, Number]): The first input is a tensor or a number.
885
+ exponent (Union[Tensor, Number]): The second input, if the first input is Tensor,
886
+ the second input can be Number or Tensor. Otherwise, it must be a Tensor.
887
+
888
+ Returns:
889
+ Tensor, the shape is the same as the one after broadcasting, the return value type
890
+ is mindspore.float64.
891
+
892
+ Raises:
893
+ TypeError: If neither `input` nor `exponent` is a Tensor.
894
+ TypeError: If the data type of `input` or `exponent` is neither a tensor nor a number,
895
+ or it contains complex numbers.
896
+ ValueError: If `input` and `exponent` have different shapes and cannot be broadcasted
897
+ to each other.
898
+
899
+ Supported Platforms:
900
+ ``Ascend`` ``GPU`` ``CPU``
901
+
902
+ Examples:
903
+ >>> from mindspore import Tensor, ops
904
+ >>> input = Tensor([1, 2, 3])
905
+ >>> ops.function.math_func.float_power_ext(input, 2)
906
+ Tensor(shape=[3], dtype=Float64, value= [ 1.00000000e+00, 4.00000000e+00, 9.00000000e+00])
907
+ >>>
908
+ >>> exp = Tensor([2, -3, -4])
909
+ >>> ops.function.math_func.float_power_ext(input, exp)
910
+ Tensor(shape=[3], dtype=Float64, value= [ 1.00000000e+00, 1.25000000e-01, 1.23456790e-02])
911
+ """
912
+ if not (isinstance(input, Tensor) or isinstance(exponent, Tensor)):
913
+ raise TypeError("At least one of the types of inputs must be tensor, " +
914
+ f"but the type of 'input' got is {type(input)}, " +
915
+ f"and the type of 'exponent' is {type(exponent)}.")
916
+ if (not isinstance(input, (Tensor, int, float, bool))) or \
917
+ (isinstance(input, Tensor) and is_complex(input)):
918
+ raise TypeError("The type of 'input' must be Tensor or Number (excluding complex), " +
919
+ f"but got {type(input)}.")
920
+ if (not isinstance(exponent, (Tensor, int, float, bool))) or \
921
+ (isinstance(exponent, Tensor) and is_complex(exponent)):
922
+ raise TypeError("The type of 'exponent' must be Tensor or Number (excluding complex), " +
923
+ f"but got {type(exponent)}.")
924
+
925
+ op = pow
926
+ if isinstance(input, Tensor) and isinstance(exponent, numbers.Number):
927
+ op = pow_tensor_scalar_op
928
+ if isinstance(input, numbers.Number) and isinstance(exponent, Tensor):
929
+ op = pow_scalar_tensor_op
930
+
931
+ if isinstance(input, Tensor) and input.dtype != mstype.float64:
932
+ input = cast_(input, mstype.float64)
933
+ if isinstance(exponent, Tensor) and exponent.dtype != mstype.float64:
934
+ exponent = cast_(exponent, mstype.float64)
935
+ return op(input, exponent)
936
+
937
+
860
938
  def floor_div(x, y):
861
939
  """
862
940
  Alias for :func:`mindspore.ops.floor_divide` .
@@ -913,7 +991,7 @@ def logdet(input):
913
991
  Calculates log determinant of one or a batch of square matrices.
914
992
 
915
993
  Args:
916
- input (Tensor): Tensor of shape :math:`(*, n, n)` where :math:`*` means zero or more batch dimensions.
994
+ input (Tensor): Tensor of shape :math:`(*, N, N)` where :math:`*` means zero or more batch dimensions.
917
995
 
918
996
  Returns:
919
997
  Tensor, the log determinant of `input`. If the matrix determinant is smaller than 0, nan will be returned. If
@@ -939,7 +1017,8 @@ def logdet(input):
939
1017
 
940
1018
  def i0(input):
941
1019
  r"""
942
- Alias for :func:`mindspore.ops.bessel_i0` .
1020
+ For details, please refer to :func:`mindspore.ops.bessel_i0`.
1021
+ The parameter `input` of the current interface is the same as the parameter `x` of the reference interface.
943
1022
 
944
1023
  Supported Platforms:
945
1024
  ``GPU`` ``CPU``
@@ -1044,8 +1123,8 @@ def inplace_index_add(var, indices, updates, axis): # pylint: disable=redefined
1044
1123
  Adds Tensor `updates` to specified axis and indices of Tensor `var` element-wise.
1045
1124
 
1046
1125
  Args:
1047
- var (Parameter): The input Parameter to add to, with data type uint8, int8, int16, int32,
1048
- float16, float32, float64.
1126
+ var (Union[Parameter, Tensor]): The input Parameter or Tensor to add to, with data type uint8, int8, int16,
1127
+ int32, float16, float32, float64.
1049
1128
  indices (Tensor): The indies along `axis` to perform the addition. A 1D Tensor
1050
1129
  of shape :math:`(updates.shape[axis],)`, every value of it
1051
1130
  should be in range :math:`[0, var.shape[axis])` with data type int32.
@@ -1057,7 +1136,6 @@ def inplace_index_add(var, indices, updates, axis): # pylint: disable=redefined
1057
1136
  Tensor, updated result, has the same shape and dtype as `var`.
1058
1137
 
1059
1138
  Raises:
1060
- TypeError: If `var` is not a Parameter.
1061
1139
  TypeError: If neither `indices` nor `updates` is a Tensor.
1062
1140
  ValueError: If `axis` is out of valid range.
1063
1141
  ValueError: If `var` rank is not the same as `updates` rank.
@@ -1163,7 +1241,7 @@ def logical_not(input):
1163
1241
 
1164
1242
 
1165
1243
  def logical_or(input, other):
1166
- """
1244
+ r"""
1167
1245
  Computes the "logical OR" of two tensors element-wise.
1168
1246
 
1169
1247
  Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
@@ -1175,7 +1253,7 @@ def logical_or(input, other):
1175
1253
 
1176
1254
  .. math::
1177
1255
 
1178
- out_{i} = input_{i} \\vee other_{i}
1256
+ out_{i} = input_{i} \vee other_{i}
1179
1257
 
1180
1258
  Note:
1181
1259
  logical_or supports broadcasting.
@@ -1639,7 +1717,7 @@ def xlogy_ext(input, other):
1639
1717
 
1640
1718
  .. math::
1641
1719
 
1642
- out_i = input_{i}\log{other_{i}}
1720
+ out_i = input_{i} * \log({other_{i}})
1643
1721
 
1644
1722
  Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
1645
1723
  The inputs must be two tensors or one tensor and one scalar.
@@ -1813,40 +1891,92 @@ def polar(abs, angle): # pylint: disable=redefined-outer-name
1813
1891
  y_{i} = abs_{i} * \cos(angle_{i}) + abs_{i} * \sin(angle_{i}) * j
1814
1892
 
1815
1893
  Args:
1816
- abs (Tensor): Radial distance. The shape of tensor is
1817
- :math:`(N,*)` where :math:`N` means the batchsize of the input tensor,
1818
- :math:`*` means, any number of additional dimensions.
1819
- Must be one of the following types: float32, float64.
1820
- angle (Tensor): Polar angle. It has the same shape and dtype as `abs`.
1894
+ abs (Tensor, float): Radial distance. Tensor of any dimension,
1895
+ with dtype required to be float32.
1896
+
1897
+ angle (Tensor, float): Polar angle. It has the same shape and dtype as `abs`.
1821
1898
 
1822
1899
  Returns:
1823
- Tensor, has the same shape as `abs`.
1900
+ Tensor, with the same shape as `abs` and the dtype is complex64.
1824
1901
 
1825
- - If the inputs are float32, data type must be complex64.
1826
- - If the inputs are float64, data type must be complex128.
1827
1902
 
1828
1903
  Raises:
1829
1904
  TypeError: If neither `abs` nor `angle` is a Tensor.
1830
- TypeError: If the dtype of input is not one of: float32, float64.
1905
+ TypeError: If the dtype of input is not one of: float32.
1831
1906
  TypeError: If the dtypes of `abs` and `angle` are not the same.
1832
1907
  ValueError: If `abs`'s shape is not the same as `angle`.
1833
1908
 
1834
1909
  Supported Platforms:
1835
- ``GPU`` ``CPU``
1910
+ ``Ascend`` ``GPU`` ``CPU``
1836
1911
 
1837
1912
  Examples:
1838
1913
  >>> import mindspore
1839
1914
  >>> import numpy as np
1840
1915
  >>> from mindspore import Tensor, ops
1841
- >>> abs = Tensor(np.array([1, 2]), mindspore.float64)
1842
- >>> angle = Tensor(np.array([np.pi / 2, 5 * np.pi / 4]), mindspore.float64)
1916
+ >>> abs = Tensor(np.array([1, 2]), mindspore.float32)
1917
+ >>> angle = Tensor(np.array([np.pi / 2, 5 * np.pi / 4]), mindspore.float32)
1843
1918
  >>> output = ops.polar(abs, angle)
1844
1919
  >>> print(output)
1845
- [ 6.12323400e-17+1.j -1.41421356e+00-1.41421356j]
1920
+ [ -4.3711388e-08+1.j -1.4142137e+00-1.4142134j]
1846
1921
  """
1847
1922
  return polar_(abs, angle)
1848
1923
 
1849
1924
 
1925
+ def pow_ext(input, exponent):
1926
+ """
1927
+ Calculates the `exponent` power of each element in `input`.
1928
+
1929
+ When `exponent` is a Tensor, the shapes of `input` and `exponent` must be broadcastable.
1930
+
1931
+ .. math::
1932
+
1933
+ out_{i} = input_{i} ^{ exponent_{i}}
1934
+
1935
+ .. warning::
1936
+ This is an experimental API that is subject to change or deletion.
1937
+
1938
+ Args:
1939
+ input (Union[Tensor, Number]): The first input is a Number or a tensor whose data type is
1940
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1941
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1942
+ exponent (Union[Tensor, Number]): The second input is a Number or a tensor whose data type is
1943
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1944
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1945
+
1946
+ Returns:
1947
+ Tensor, the shape is the same as the one after broadcasting,
1948
+ and the data type is the one with higher precision or higher digits among the two inputs.
1949
+
1950
+ Raises:
1951
+ TypeError: If types of `input` and `exponent` are bool.
1952
+ TypeError: The `input` is tensor and of type int or bool, while the `exponent` is negative int.
1953
+
1954
+ Supported Platforms:
1955
+ ``Ascend``
1956
+
1957
+ Examples:
1958
+ >>> import mindspore
1959
+ >>> import numpy as np
1960
+ >>> from mindspore import Tensor, ops
1961
+ >>> input = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
1962
+ >>> exponent = 3.0
1963
+ >>> output = ops.pow(input, exponent)
1964
+ >>> print(output)
1965
+ [ 1. 8. 64.]
1966
+ >>>
1967
+ >>> input = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
1968
+ >>> exponent = Tensor(np.array([2.0, 4.0, 3.0]), mindspore.float32)
1969
+ >>> output = ops.pow(input, exponent)
1970
+ >>> print(output)
1971
+ [ 1. 16. 64.]
1972
+ """
1973
+ if isinstance(input, Tensor) and isinstance(exponent, numbers.Number):
1974
+ return pow_tensor_scalar_op(input, exponent)
1975
+ if isinstance(input, numbers.Number) and isinstance(exponent, Tensor):
1976
+ return pow_scalar_tensor_op(input, exponent)
1977
+ return pow(input, exponent)
1978
+
1979
+
1850
1980
  def arccos(input):
1851
1981
  """
1852
1982
  Alias for :func:`mindspore.ops.acos` .
@@ -2070,6 +2200,9 @@ def bitwise_xor(input, other):
2070
2200
  If they have different data types, the lower priority data type will be converted to
2071
2201
  the relatively highest priority data type.
2072
2202
 
2203
+ .. warning::
2204
+ This API has poor performance on CPU and it is recommended to run it on the Ascend/GPU.
2205
+
2073
2206
  Args:
2074
2207
  input (Tensor): The first input tensor with shape :math:`(N, *)` where :math:`*` means
2075
2208
  any number of additional dimensions.
@@ -2278,6 +2411,9 @@ def inverse(input):
2278
2411
  """
2279
2412
  Compute the inverse of the input matrix.
2280
2413
 
2414
+ Note:
2415
+ The `input` dtype of complex numbers is not supported.
2416
+
2281
2417
  Args:
2282
2418
  input (Tensor): A matrix to be calculated. Input `input` must be at least two dimensions, and the size of
2283
2419
  the last two dimensions must be the same size. And the matrix must be invertible.
@@ -2701,6 +2837,45 @@ def eps(x):
2701
2837
  return eps_(x)
2702
2838
 
2703
2839
 
2840
+ def erfinv_(input):
2841
+ r"""
2842
+ Update the `input` tensor in-place by computing the inverse error function with `input`, which is defined in the
2843
+ range `(-1, 1)` as:
2844
+
2845
+ .. math::
2846
+
2847
+
2848
+ erfinv(erf(input)) = input
2849
+
2850
+ .. warning::
2851
+ This is an experimental API that is subject to change or deletion.
2852
+
2853
+ Args:
2854
+ input (Tensor): The input tensor to compute with.
2855
+
2856
+ Returns:
2857
+
2858
+ Tensor.
2859
+
2860
+ Raises:
2861
+ TypeError: If `input` is not a Tensor.
2862
+ TypeError: If `input.dtype` is not one of: bool, int8, int16, int32, int64, uint8, float16, float32, bfloat16.
2863
+
2864
+ Supported Platforms:
2865
+ ``Ascend``
2866
+
2867
+ Examples:
2868
+ >>> import mindspore
2869
+ >>> import numpy as np
2870
+ >>> from mindspore import Tensor, ops
2871
+ >>> input = Tensor(np.array([0, 0.5, -0.9]), mindspore.float32)
2872
+ >>> output = ops.erfinv_(input)
2873
+ >>> print(output)
2874
+ [ 0. 0.47693613 -1.1630869 ]
2875
+ """
2876
+ return inplace_erfinv_(input)
2877
+
2878
+
2704
2879
  def linspace(start, end, steps):
2705
2880
  r"""
2706
2881
  Returns a Tensor whose value is `steps` evenly spaced in the interval `start` and `end` (including `start` and
@@ -2721,7 +2896,7 @@ def linspace(start, end, steps):
2721
2896
  Must be positive int number or 0D int32/int64 Tensor.
2722
2897
 
2723
2898
  Returns:
2724
- Tensor, has the same dtype as `start`, and the shape of :math:`(steps)`.
2899
+ Tensor, has the same dtype as `start`, and the shape of :math:`(steps,)`.
2725
2900
 
2726
2901
  Raises:
2727
2902
  TypeError: If `start` or `end` is not a Tensor.
@@ -2780,7 +2955,7 @@ def linspace_ext(start, end, steps, *, dtype=None):
2780
2955
  Tensor, has the shape of :math:`(steps,)`, with dtype specified by `dtype`.
2781
2956
 
2782
2957
  Raises:
2783
- TypeError: If type of `start` or dtype of `end` is not supported.
2958
+ TypeError: If dtype of `start` or dtype of `end` is not supported.
2784
2959
  ValueError: If `steps` is not positive integer.
2785
2960
 
2786
2961
  Supported Platforms:
@@ -3398,13 +3573,6 @@ def ne(input, other):
3398
3573
  r"""
3399
3574
  Computes the non-equivalence of two tensors element-wise.
3400
3575
 
3401
- Note:
3402
- - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
3403
- consistent.
3404
- - When the inputs are two tensors, the shapes of them could be broadcast.
3405
- - When the inputs are one tensor and one scalar, the scalar could only be a constant.
3406
- - Broadcasting is supported.
3407
-
3408
3576
  .. math::
3409
3577
 
3410
3578
  out_{i} =\begin{cases}
@@ -3412,6 +3580,13 @@ def ne(input, other):
3412
3580
  & \text{False, if } input_{i} = other_{i}
3413
3581
  \end{cases}
3414
3582
 
3583
+ Note:
3584
+ - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
3585
+ consistent.
3586
+ - When the inputs are two tensors, the shapes of them could be broadcast.
3587
+ - When the inputs are one tensor and one scalar, the scalar could only be a constant.
3588
+ - Broadcasting is supported.
3589
+
3415
3590
  Args:
3416
3591
  input (Union[Tensor, Number, bool]): The first input is a number or
3417
3592
  a bool or a tensor whose data type is number or bool.
@@ -3753,7 +3928,7 @@ def median_ext(input, dim=None, keepdim=False):
3753
3928
  Raises:
3754
3929
  TypeError: If dtype of ``input`` is not one of the following: uint8, int16, int32, int64, float16 or float32.
3755
3930
  TypeError: If input ``input`` is not a Tensor.
3756
- TypeError: If ``dim`` is not a int.
3931
+ TypeError: If ``dim`` is not an int.
3757
3932
  TypeError: If ``keepdim`` is not a bool.
3758
3933
  ValueError: If ``dim`` is not in range of [-x.dim, x.dim-1].
3759
3934
 
@@ -3799,7 +3974,7 @@ def median(input, axis=-1, keepdims=False):
3799
3974
  Raises:
3800
3975
  TypeError: If dtype of `input` is not one of the following: int16, int32, int64, float32, float64.
3801
3976
  TypeError: If input `input` is not a Tensor.
3802
- TypeError: If `axis` is not a int.
3977
+ TypeError: If `axis` is not an int.
3803
3978
  TypeError: If `keepdims` is not a bool.
3804
3979
  ValueError: If `axis` is not in range of [-x.dim, x.dim-1].
3805
3980
 
@@ -3924,7 +4099,7 @@ def orgqr(input, input2):
3924
4099
  computes the first :math:`N` columns of a product of
3925
4100
  `Householder <https://en.wikipedia.org/wiki/Householder_transformation#Householder_matrix>`_
3926
4101
  matrices. Suppose input `input` is a matrix of size :math:`(M, N)` after householder transformation.
3927
- When the diagonal of `input` is set to 1, every colunm of lower triangular in `input` is
4102
+ When the diagonal of `input` is set to 1, every column of lower triangular in `input` is
3928
4103
  denoted as :math:`w_j` for :math:`j` for
3929
4104
  :math:`j=1, \ldots, M`, this function returns the first :math:`N` columns of the matrix
3930
4105
 
@@ -4172,6 +4347,8 @@ def logspace(start, end, steps, base=10, *, dtype=mstype.float32):
4172
4347
  end (Union[float, Tensor]): End value of interval.
4173
4348
  steps (int): The steps must be a non-negative integer.
4174
4349
  base (int, optional): The base must be a non-negative integer. Default: ``10`` .
4350
+
4351
+ Keyword Args:
4175
4352
  dtype (mindspore.dtype, optional): The dtype of output. Default: ``mstype.float32`` .
4176
4353
 
4177
4354
  Returns:
@@ -4375,6 +4552,126 @@ def vander(x, N=None):
4375
4552
  return F.tensor_pow(x, exponent)
4376
4553
 
4377
4554
 
4555
+ def var_ext(input, dim=None, *, correction=1, keepdim=False):
4556
+ r"""
4557
+ Calculates the variance over the dimensions specified by `dim`. `dim` can be a single dimension, list of
4558
+ dimensions, or None to reduce over all dimensions.
4559
+
4560
+ The variance (:math:`\delta ^2`) is calculated as:
4561
+
4562
+ .. math::
4563
+ \delta ^2 = \frac{1}{\max(0, N - \delta N)}\sum^{N - 1}_{i = 0}(x_i - \bar{x})^2
4564
+
4565
+ where :math:`x` is the sample set of elements, :math:`\bar{x}` is the sample mean, :math:`N` is the number
4566
+ of samples and :math:`\delta N` is the `correction`.
4567
+
4568
+ .. warning::
4569
+ This is an experimental API that is subject to change or deletion.
4570
+
4571
+ Args:
4572
+ input (Tensor): The tensor used to calculate the variance.
4573
+ dim (None, int, tuple(int), optional): The dimension or dimensions to reduce. Defaults to ``None``.
4574
+ If ``None``, all dimensions are reduced.
4575
+
4576
+ Keyword Args:
4577
+ correction (int, optional): The difference between the sample size and sample degrees of freedom. Defaults
4578
+ to Bessel’s correction. Defaults to ``1``.
4579
+ keepdim (bool, optional): Whether the output tensor has dim retained or not. If ``True`` , keep these
4580
+ reduced dimensions and the length is 1. If ``False``, don't keep these dimensions. Defaults to ``False``.
4581
+
4582
+ Returns:
4583
+ Tensor, the variance.
4584
+ Suppose the shape of `input` is :math:`(x_0, x_1, ..., x_R)`:
4585
+
4586
+ - If `dim` is () and `keepdim` is set to ``False`` , returns a 0-D Tensor, indicating the variance of all
4587
+ elements in `input`.
4588
+ - If `dim` is int, e.g. ``1`` and `keepdim` is set to ``False`` , then the returned Tensor has shape
4589
+ :math:`(x_0, x_2, ..., x_R)`.
4590
+ - If `dim` is tuple(int) or list(int), e.g. ``(1, 2)`` and `keepdim` is set to ``False`` , then the returned
4591
+ Tensor has shape :math:`(x_0, x_3, ..., x_R)`.
4592
+
4593
+ Raises:
4594
+ TypeError: If `input` is not a Tensor.
4595
+ TypeError: If `input` is not in bfloat16, float16, flaot32.
4596
+ TypeError: If `dim` is not one of the followings: None, int, list, tuple.
4597
+ TypeError: If `correction` is not an int.
4598
+ TypeError: If `keepdim` is not a bool.
4599
+ ValueError: If `dim` is out of range :math:`[-input.ndim, input.ndim)`.
4600
+
4601
+ Supported Platforms:
4602
+ ``Ascend``
4603
+
4604
+ Examples:
4605
+ >>> import mindspore
4606
+ >>> from mindspore import Tensor, ops
4607
+ >>> input = Tensor([[8, 2, 1], [5, 9, 3], [4, 6, 7]], mindspore.float32)
4608
+ >>> output = ops.var_ext(input, dim=0, correction=1, keepdim=True)
4609
+ >>> print(output)
4610
+ [[ 4.333333, 12.333333, 9.333333]]
4611
+ """
4612
+ return var_op(input, dim, correction, keepdim)
4613
+
4614
+
4615
+ def std_ext(input, dim=None, *, correction=1, keepdim=False):
4616
+ r"""
4617
+ Calculates the standard deviation over the dimensions specified by `dim`. `dim` can be a single dimension, list of
4618
+ dimensions, or None to reduce over all dimensions.
4619
+
4620
+ The standard deviation (:math:`\sigma`) is calculated as:
4621
+
4622
+ .. math::
4623
+ \sigma =\sqrt{\frac{1}{N-\delta N}\sum_{j-1}^{N-1}\left(s e l f_{i j}-\overline{x_{i}}\right)^{2}}
4624
+
4625
+ where :math:`x` is the sample set of elements, :math:`\bar{x}` is the sample mean, :math:`N` is the number
4626
+ of samples and :math:`\delta N` is the `correction`.
4627
+
4628
+ .. warning::
4629
+ This is an experimental API that is subject to change or deletion.
4630
+
4631
+ Args:
4632
+ input (Tensor): The tensor used to calculate the standard deviation.
4633
+ dim (None, int, tuple(int), optional): The dimension or dimensions to reduce. Defaults to ``None``.
4634
+ If ``None``, all dimensions are reduced.
4635
+
4636
+ Keyword Args:
4637
+ correction (int, optional): The difference between the sample size and sample degrees of freedom. Defaults
4638
+ to Bessel’s correction. Defaults to ``1``.
4639
+ keepdim (bool, optional): Whether the output tensor has dim retained or not. If ``True`` , keep these
4640
+ reduced dimensions and the length is 1. If ``False``, don't keep these dimensions. Defaults to ``False``.
4641
+
4642
+ Returns:
4643
+ Tensor, the standard deviation.
4644
+ Suppose the shape of `input` is :math:`(x_0, x_1, ..., x_R)`:
4645
+
4646
+ - If `dim` is () and `keepdim` is set to ``False`` , returns a 0-D Tensor, indicating the standard deviation of
4647
+ all elements in `input`.
4648
+ - If `dim` is int, e.g. ``1`` and `keepdim` is set to ``False`` , then the returned Tensor has shape
4649
+ :math:`(x_0, x_2, ..., x_R)`.
4650
+ - If `dim` is tuple(int) or list(int), e.g. ``(1, 2)`` and `keepdim` is set to ``False`` , then the returned
4651
+ Tensor has shape :math:`(x_0, x_3, ..., x_R)`.
4652
+
4653
+ Raises:
4654
+ TypeError: If `input` is not a Tensor.
4655
+ TypeError: If `input` is not in bfloat16, float16, float32.
4656
+ TypeError: If `dim` is not one of the followings: None, int, tuple.
4657
+ TypeError: If `correction` is not an int.
4658
+ TypeError: If `keepdim` is not a bool.
4659
+ ValueError: If `dim` is out of range :math:`[-input.ndim, input.ndim)`.
4660
+
4661
+ Supported Platforms:
4662
+ ``Ascend``
4663
+
4664
+ Examples:
4665
+ >>> import numpy as np
4666
+ >>> from mindspore import mint, Tensor
4667
+ >>> input = Tensor(np.array([[1, 2, 3], [-1, 1, 4]]).astype(np.float32))
4668
+ >>> output = ops.std_ext(input, dim=1, correction=1, keepdim=False)
4669
+ >>> print(output)
4670
+ [1. 2.5166113]
4671
+ """
4672
+ return std_op(input, dim, correction, keepdim)
4673
+
4674
+
4378
4675
  def var(input, axis=None, ddof=0, keepdims=False):
4379
4676
  r"""
4380
4677
  Returns the variance of each row of the input Tensor by default, or it can calculate them
@@ -4627,73 +4924,183 @@ def std_mean(input, axis=None, ddof=0, keepdims=False):
4627
4924
  return tensor_pow(output[0], 0.5), output[1]
4628
4925
 
4629
4926
 
4630
- def reciprocal(input):
4927
+ def std_mean_ext(input, dim=None, *, correction=1, keepdim=False):
4631
4928
  r"""
4632
- Returns reciprocal of a tensor element-wise.
4929
+ By default, return the standard deviation and mean of each dimension in Tensor.
4930
+ If dim is a dimension list, calculate the standard deviation and mean of the corresponding dimension.
4931
+
4932
+ The standard deviation (:math:`\sigma`) is calculated as:
4633
4933
 
4634
4934
  .. math::
4635
4935
 
4636
- out_{i} = \frac{1}{x_{i}}
4936
+ \sigma = \sqrt{\frac{1}{N - \delta N} \sum_{j=0}^{N-1} \left(self_{ij} - \overline{x_{i}}\right)^{2}}
4937
+
4938
+ where is :math:`x` the sample set of elements, :math:`\bar{x}` is the sample mean,
4939
+ :math:`N` is the number of samples and :math:`\delta N` is the `correction` .
4940
+
4941
+ .. warning::
4942
+ This is an experimental API that is subject to change or deletion.
4637
4943
 
4638
4944
  Args:
4639
- input (Tensor): The input tensor.
4945
+ input (Tensor): The input tensor. Supported dtypes: float16, float32.
4946
+ dim (Union[int, tuple(int), list(int)], optional):
4947
+ Specify the dimensions for calculating standard deviation and mean. Default value: ``None``.
4948
+
4949
+ Keyword Args:
4950
+ correction (int, optional): Difference between the sample size and sample degrees of freedom.
4951
+ Defaults to Bessel's correction. Default: ``1``.
4952
+ keepdim (bool, optional): Whether to preserve the dimensions of the output Tensor.
4953
+ If True, retain the reduced dimension with a size of 1. Otherwise, remove the dimensions.
4954
+ Default value: ``False``.
4640
4955
 
4641
4956
  Returns:
4642
- Tensor, has the same shape as the `input`.
4957
+ A tuple of standard deviation and mean.
4643
4958
 
4644
4959
  Raises:
4645
4960
  TypeError: If `input` is not a Tensor.
4961
+ TypeError: If `dim` is not one of the following data types: int, tuple, list, or Tensor.
4962
+ TypeError: If `keepdim` is not a bool.
4963
+ ValueError: If `dim` is out of range.
4646
4964
 
4647
4965
  Supported Platforms:
4648
- ``Ascend`` ``GPU`` ``CPU``
4966
+ ``Ascend``
4649
4967
 
4650
4968
  Examples:
4651
4969
  >>> import mindspore as ms
4652
- >>> from mindspore import ops
4653
- >>> import numpy as np
4654
- >>> input = ms.Tensor(np.array([1.0, 2.0, 4.0]), ms.float32)
4655
- >>> output = ops.reciprocal(input)
4656
- >>> print(output)
4657
- [1. 0.5 0.25]
4970
+ >>> input = ms.Tensor([[1, 2, 3, 4], [-1, 1, 4, -10]], ms.float32)
4971
+ >>> output_std, output_mean = ms.mint.std_mean(input, 1, correction=2, keepdim=True)
4972
+ >>> print(output_std)
4973
+ [[1.5811388]
4974
+ [7.3824115]]
4975
+ >>> print(output_mean)
4976
+ [[ 2.5]
4977
+ [-1.5]]
4658
4978
  """
4659
- return reciprocal_(input)
4979
+ return std_mean_op(input, dim, correction, keepdim)
4660
4980
 
4661
4981
 
4662
- def outer(input, vec2):
4663
- """
4664
- Return outer product of `input` and `vec2`. If `input` is a vector of size :math:`n`
4665
- and `vec2` is a vector of size :math:`m` , then output must be a matrix of shape :math:`(n, m)` .
4982
+ def var_mean_ext(input, dim=None, *, correction=1, keepdim=False):
4983
+ r"""
4984
+ By default, return the variance and mean of each dimension in Tensor.
4985
+ If dim is a dimension list, calculate the variance and mean of the corresponding dimension.
4666
4986
 
4667
- Note:
4668
- This function does not broadcast.
4987
+ The variance (:math:`\sigma ^2`) is calculated as:
4988
+
4989
+ .. math::
4990
+
4991
+ \sigma ^2 = \frac{1}{N - \delta N} \sum_{j=0}^{N-1} \left(self_{ij} - \overline{x_{i}}\right)^{2}
4992
+
4993
+ where is :math:`x` the sample set of elements, :math:`\bar{x}` is the sample mean,
4994
+ :math:`N` is the number of samples and :math:`\delta N` is the `correction` .
4995
+
4996
+ .. warning::
4997
+ This is an experimental API that is subject to change or deletion.
4669
4998
 
4670
4999
  Args:
4671
- input (Tensor): 1-D input vector.
4672
- vec2 (Tensor): 1-D input vector.
5000
+ input (Tensor): The input tensor. Supported dtypes: float16, float32.
5001
+ dim (Union[int, tuple(int), list(int)], optional):
5002
+ Specify the dimensions for calculating variance and mean. Default value: ``None``.
5003
+
5004
+ Keyword Args:
5005
+ correction (int, optional): Difference between the sample size and sample degrees of freedom.
5006
+ Defaults to Bessel's correction. Default: ``1``.
5007
+ keepdim (bool, optional): Whether to preserve the dimensions of the output Tensor.
5008
+ If True, retain the reduced dimension with a size of 1. Otherwise, remove the dimensions.
5009
+ Default value: ``False``.
4673
5010
 
4674
5011
  Returns:
4675
- out (Tensor, optional), 2-D matrix, the outer product of two vectors.
5012
+ A tuple of variance and mean.
4676
5013
 
4677
5014
  Raises:
4678
- TypeError: If `input` or `vec2` is not a Tensor.
5015
+ TypeError: If `input` is not a Tensor.
5016
+ TypeError: If `dim` is not one of the following data types: int, tuple, list, or Tensor.
5017
+ TypeError: If `keepdim` is not a bool.
5018
+ ValueError: If `dim` is out of range.
4679
5019
 
4680
5020
  Supported Platforms:
4681
- ``Ascend`` ``GPU`` ``CPU``
5021
+ ``Ascend``
4682
5022
 
4683
5023
  Examples:
4684
- >>> import mindspore
4685
- >>> import numpy as np
4686
- >>> from mindspore import Tensor
4687
- >>> from mindspore import ops
4688
- >>> input = Tensor(np.array([7, 8, 9]), mindspore.int32)
4689
- >>> vec2 = Tensor(np.array([7, 10, 11]), mindspore.int32)
4690
- >>> out = ops.outer(input, vec2)
4691
- >>> print(out)
4692
- [[49 70 77]
4693
- [56 80 88]
4694
- [63 90 99]]
4695
- """
4696
-
5024
+ >>> import mindspore as ms
5025
+ >>> input = ms.Tensor([[1, 2, 3, 4], [-1, 1, 4, -10]], ms.float32)
5026
+ >>> output_var, output_mean = ms.mint.var_mean(input, 1, correction=2, keepdim=True)
5027
+ >>> print(output_var)
5028
+ [[ 2.5]
5029
+ [54.5]]
5030
+ >>> print(output_mean)
5031
+ [[ 2.5]
5032
+ [-1.5]]
5033
+ """
5034
+ return var_mean_op(input, dim, correction, keepdim)
5035
+
5036
+
5037
+ def reciprocal(input):
5038
+ r"""
5039
+ Returns reciprocal of a tensor element-wise.
5040
+
5041
+ .. math::
5042
+
5043
+ out_{i} = \frac{1}{x_{i}}
5044
+
5045
+ Args:
5046
+ input (Tensor): The input tensor.
5047
+
5048
+ Returns:
5049
+ Tensor, has the same shape as the `input`.
5050
+
5051
+ Raises:
5052
+ TypeError: If `input` is not a Tensor.
5053
+
5054
+ Supported Platforms:
5055
+ ``Ascend`` ``GPU`` ``CPU``
5056
+
5057
+ Examples:
5058
+ >>> import mindspore as ms
5059
+ >>> from mindspore import ops
5060
+ >>> import numpy as np
5061
+ >>> input = ms.Tensor(np.array([1.0, 2.0, 4.0]), ms.float32)
5062
+ >>> output = ops.reciprocal(input)
5063
+ >>> print(output)
5064
+ [1. 0.5 0.25]
5065
+ """
5066
+ return reciprocal_(input)
5067
+
5068
+
5069
+ def outer(input, vec2):
5070
+ """
5071
+ Return outer product of `input` and `vec2`. If `input` is a vector of size :math:`n`
5072
+ and `vec2` is a vector of size :math:`m` , then output must be a matrix of shape :math:`(n, m)` .
5073
+
5074
+ Note:
5075
+ This function does not broadcast.
5076
+
5077
+ Args:
5078
+ input (Tensor): 1-D input vector.
5079
+ vec2 (Tensor): 1-D input vector.
5080
+
5081
+ Returns:
5082
+ out (Tensor, optional), 2-D matrix, the outer product of two vectors.
5083
+
5084
+ Raises:
5085
+ TypeError: If `input` or `vec2` is not a Tensor.
5086
+
5087
+ Supported Platforms:
5088
+ ``Ascend`` ``GPU`` ``CPU``
5089
+
5090
+ Examples:
5091
+ >>> import mindspore
5092
+ >>> import numpy as np
5093
+ >>> from mindspore import Tensor
5094
+ >>> from mindspore import ops
5095
+ >>> input = Tensor(np.array([7, 8, 9]), mindspore.int32)
5096
+ >>> vec2 = Tensor(np.array([7, 10, 11]), mindspore.int32)
5097
+ >>> out = ops.outer(input, vec2)
5098
+ >>> print(out)
5099
+ [[49 70 77]
5100
+ [56 80 88]
5101
+ [63 90 99]]
5102
+ """
5103
+
4697
5104
  if not isinstance(input, (Tensor, Tensor_)):
4698
5105
  raise TypeError("the input input must be Tensor!")
4699
5106
  if not isinstance(vec2, (Tensor, Tensor_)):
@@ -4799,6 +5206,103 @@ def addbmm(input, batch1, batch2, *, beta=1, alpha=1):
4799
5206
  return beta * input + alpha * (bmm_res.sum(axis=0))
4800
5207
 
4801
5208
 
5209
+ def addbmm_ext(input, batch1, batch2, *, beta=1, alpha=1):
5210
+ r"""
5211
+ Applies batch matrix multiplication to `batch1` and `batch2`, with a reduced add step and add `input` to the result.
5212
+
5213
+ The optional values `alpha` and `beta` are the matrix-matrix product between `batch1` and `batch2` and the scale
5214
+ factor for the added tensor `input` respectively. If `beta` is 0, then `input` will be ignored.
5215
+
5216
+ .. math::
5217
+ output = \beta input + \alpha (\sum_{i=0}^{b-1} {batch1_i @ batch2_i})
5218
+
5219
+ .. warning::
5220
+ This is an experimental API that is subject to change or deletion.
5221
+
5222
+ Args:
5223
+ input (Tensor): Tensor to be added.
5224
+ batch1 (Tensor): The first batch of tensor to be multiplied.
5225
+ batch2 (Tensor): The second batch of tensor to be multiplied.
5226
+
5227
+ Keyword Args:
5228
+ beta (Union[int, float], optional): Multiplier for `input`. Default: ``1`` .
5229
+ alpha (Union[int, float], optional): Multiplier for `batch1` @ `batch2`. Default: ``1`` .
5230
+
5231
+ Returns:
5232
+ Tensor, has the same dtype as `input`.
5233
+
5234
+ Raises:
5235
+ TypeError: If `alpha` or `beta` is not an int or float.
5236
+ ValueError: If `batch1`, `batch2` cannot apply batch matrix multiplication.
5237
+ ValueError: If `batch1` and `batch2` are not 3-D tensors.
5238
+
5239
+ Supported Platforms:
5240
+ ``Ascend``
5241
+
5242
+ Examples:
5243
+ >>> import numpy as np
5244
+ >>> from mindspore import Tensor, ops
5245
+ >>> m = np.ones((3, 3)).astype(np.float32)
5246
+ >>> arr1 = np.arange(24).astype(np.float32).reshape((2, 3, 4))
5247
+ >>> arr2 = np.arange(24).astype(np.float32).reshape((2, 4, 3))
5248
+ >>> a = Tensor(arr1)
5249
+ >>> b = Tensor(arr2)
5250
+ >>> c = Tensor(m)
5251
+ >>> output = ops.addbmm_ext(c, a, b)
5252
+ >>> print(output)
5253
+ [[ 949. 1009. 1069.]
5254
+ [1285. 1377. 1469.]
5255
+ [1621. 1745. 1869.]]
5256
+ """
5257
+ return addbmm_op(input, batch1, batch2, beta, alpha)
5258
+
5259
+
5260
+ def addmm_ext(input, mat1, mat2, *, beta=1, alpha=1):
5261
+ r"""
5262
+ Performs a matrix multiplication of the 2-D matrices mat1 and mat2. The matrix input is added to the final result.
5263
+ The formula is defined as follows:
5264
+
5265
+ .. math::
5266
+ output = \beta input + \alpha (mat1 @ mat2)
5267
+
5268
+ .. warning::
5269
+ This is an experimental API that is subject to change or deletion.
5270
+
5271
+ Args:
5272
+ input (Tensor): matrix to be added, the shape must be broadcastable with mat1 @ mat2.
5273
+ mat1 (Tensor): the first matrix to be matrix multiplied, must be 2-D Tensor, with the same shape of the input.
5274
+ mat2 (Tensor): the second matrix to be matrix multiplied, must be 2-D Tensor, with the same shape of the input.
5275
+
5276
+ Keyword Args:
5277
+ beta (Union[float, int], optional): multiplier for input. Default: ``1`` .
5278
+ alpha (Union[float, int], optional): multiplier for :math:`mat1 @ mat2`. Default: ``1`` .
5279
+
5280
+ Returns:
5281
+ Tensor, with the same dtype as `input` and the same shape as mat1 @ mat2.
5282
+
5283
+ Raises:
5284
+ TypeError: If the type of `input`, `mat1` or `mat2` is not Tensor.
5285
+ TypeError: If the types of `input`, `mat1`, `mat2` are different.
5286
+ ValueError: If `mat1` and `mat2` are not 2-D tensors.
5287
+
5288
+ Supported Platforms:
5289
+ ``Ascend``
5290
+
5291
+ Examples:
5292
+ >>> import numpy as np
5293
+ >>> from mindspore import Tensor, mint
5294
+ >>> input = Tensor(np.ones([3, 3]).astype(np.float32))
5295
+ >>> mat1 = Tensor(np.ones([3, 4]).astype(np.float32))
5296
+ >>> mat2 = Tensor(np.ones([4, 3]).astype(np.float32))
5297
+ >>> output = ops.function.math_func.addmm_ext(input, mat1, mat2)
5298
+ >>> print(output)
5299
+ [[5. 5. 5.]
5300
+ [5. 5. 5.]
5301
+ [5. 5. 5.]]
5302
+ """
5303
+ return addmm_op(input, mat1, mat2, beta, alpha)
5304
+
5305
+
4802
5306
  def addmm(input, mat1, mat2, *, beta=1, alpha=1):
4803
5307
  r"""
4804
5308
  Multiplies matrix `mat1` and matrix `mat2`. The matrix `input` is added to the final result.
@@ -4910,6 +5414,53 @@ def addmv(input, mat, vec, *, beta=1, alpha=1):
4910
5414
  return out
4911
5415
 
4912
5416
 
5417
+ def addmv_ext(input, mat, vec, *, beta=1, alpha=1):
5418
+ """
5419
+ Performs a matrix-vector product of `mat` and `vec`, and add the input vector `input` to the final result.
5420
+
5421
+ If `mat` is a tensor of size :math:`(N, M)` , `vec` is a 1-D tensor of size :math:`M` , then `input` must be
5422
+ broadcastable with a 1-D tensor of size :math:`N` . In this case, `output` is a 1-D Tensor of size :math:`N` .
5423
+
5424
+ .. math::
5425
+ output = \beta input + \alpha (mat @ vec)
5426
+
5427
+ .. warning::
5428
+ This is an experimental API that is subject to change or deletion.
5429
+
5430
+ Args:
5431
+ input (Tensor): Vector to be added.
5432
+ mat (Tensor): The first tensor needs to be multiplied.
5433
+ vec (Tensor): The second tensor needs to be multiplied.
5434
+
5435
+ Keyword Args:
5436
+ beta (Union[float, int], optional): Coefficient of `input`. Default: ``1``.
5437
+ alpha (Union[float, int], optional): Coefficient of :math:`mat @ vec` . Default: ``1``.
5438
+
5439
+ Returns:
5440
+ Tensor, with a shape of :math:`(N,)` , and its dtype is the same as `input`.
5441
+
5442
+ Raises:
5443
+ TypeError: If dtype of `input`, `mat` or `vec` is not tensor.
5444
+ TypeError: If dtypes of `mat` and `vec` are not the same.
5445
+ ValueError: If `mat` is not a 2-D tensor.
5446
+ ValueError: If `vec` is not a 1-D tensor.
5447
+
5448
+ Supported Platforms:
5449
+ ``Ascend``
5450
+
5451
+ Examples:
5452
+ >>> import numpy as np
5453
+ >>> from mindspore import Tensor, mint
5454
+ >>> input = Tensor(np.array([2., 3.]).astype(np.float32))
5455
+ >>> mat = Tensor(np.array([[2., 5., 3.], [4., 2., 2.]]).astype(np.float32))
5456
+ >>> vec = Tensor(np.array([3., 2., 4.]).astype(np.float32))
5457
+ >>> output = mint.addmv(input, mat, vec)
5458
+ >>> print(output)
5459
+ [30. 27.]
5460
+ """
5461
+ return addmv_op(input, mat, vec, beta, alpha)
5462
+
5463
+
4913
5464
  def adjoint(x):
4914
5465
  r"""
4915
5466
  Calculates the conjugation of Tensor element by element, and transposes the last two dimensions.
@@ -5047,60 +5598,13 @@ def lcm(input, other):
5047
5598
  return lcm_(input, other)
5048
5599
 
5049
5600
 
5050
- def cdist(x1, x2, p=2.0):
5051
- """
5052
- Computes p-norm distance between each pair of row vectors of two input Tensors.
5053
-
5054
- Note:
5055
- On Ascend, the supported dtypes are float16 and float32.
5056
- On CPU, the supported dtypes are float16 and float32.
5057
- On GPU, the supported dtypes are float32 and float64.
5058
-
5059
- Args:
5060
- x1 (Tensor): Input tensor of shape :math:`(B, P, M)`.
5061
- Letter :math:`B` represents 0 or positive int number.
5062
- When :math:`B` is equal to 0, it means this dimension can be ignored,
5063
- i.e. shape of the tensor is :math:`(P, M)`.
5064
- x2 (Tensor): Input tensor of shape :math:`(B, R, M)`, has the same dtype as `x1`.
5065
- p (float, optional): P value for the p-norm distance to calculate between each
5066
- vector pair, P ∈ [0,∞]. Default: ``2.0`` .
5067
-
5068
- Returns:
5069
- Tensor, p-norm distance, has the same dtype as `x1`, its shape is :math:`(B, P, R)`.
5070
-
5071
- Raises:
5072
- TypeError: If `x1` or `x2` is not Tensor.
5073
- TypeError: If dtype of `x1` or `x2` is not listed in the "Note" above.
5074
- TypeError: If `p` is not float32.
5075
- ValueError: If `p` is negative.
5076
- ValueError: If dimension of `x1` is not the same as `x2`.
5077
- ValueError: If dimension of `x1` or `x2` is neither 2 nor 3.
5078
- ValueError: If the batch shape of `x1` is not the same as the shape of `x2`.
5079
- ValueError: If the number of columns of `x1` is not the same as that of `x2`.
5080
-
5081
- Supported Platforms:
5082
- ``Ascend`` ``GPU`` ``CPU``
5083
-
5084
- Examples:
5085
- >>> import numpy as np
5086
- >>> from mindspore import Tensor, ops
5087
- >>> x = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
5088
- >>> y = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
5089
- >>> output = ops.cdist(x, y, 2.0)
5090
- >>> print(output)
5091
- [[[2.8284273 2.8284273]
5092
- [1.4142137 1.4142137]]]
5093
- """
5094
- cdist_ = _get_cache_prim(P.Cdist)(p)
5095
- return cdist_(x1, x2)
5096
-
5097
-
5098
5601
  def lerp(input, end, weight):
5099
5602
  """
5100
5603
  Does a linear interpolation of two tensors input and end based on a float or tensor weight.
5101
5604
 
5102
5605
  If `weight` is a tensor, the shapes of three inputs need to be broadcast;
5103
5606
  If `weight` is a float, the shapes of `input` and `end` need to be broadcast.
5607
+ If `weight` is a float and platform is Ascend, the types of `input` and `end` need to be float32.
5104
5608
 
5105
5609
  .. math::
5106
5610
 
@@ -5217,7 +5721,7 @@ def bernoulli_ext(input, *, generator=None):
5217
5721
  Default: ``None``, uses the default pseudorandom number generator.
5218
5722
 
5219
5723
  Returns:
5220
- output (Tensor): The output tensor, with the same shape and dtype as `input`.
5724
+ output (Tensor), The output tensor, with the same shape and dtype as `input`.
5221
5725
 
5222
5726
  Raises:
5223
5727
  TypeError: If dtype of `input` is not one of: float16, float32, float64, bfloat16.
@@ -5810,7 +6314,7 @@ def atleast_1d(inputs):
5810
6314
  return tuple([_expand(arr, 1) for arr in inputs])
5811
6315
 
5812
6316
 
5813
- def dstack(inputs):
6317
+ def dstack(tensors):
5814
6318
  r"""
5815
6319
  Stacks tensors along the third axis.
5816
6320
 
@@ -5818,7 +6322,7 @@ def dstack(inputs):
5818
6322
  2-D tensors :math:`(M,N)` should be reshaped to :math:`(M,N,1)` before concatenation.
5819
6323
 
5820
6324
  Args:
5821
- inputs (Union(List[Tensor], Tuple[Tensor])): A sequence of tensors.
6325
+ tensors (Union(List[Tensor], Tuple[Tensor])): A sequence of tensors.
5822
6326
  The tensors must have the same shape along all but the third axis.
5823
6327
  1-D or 2-D tensors must have the same shape.
5824
6328
 
@@ -5827,8 +6331,8 @@ def dstack(inputs):
5827
6331
  The output shape is similar to the output of `numpy.dstack()` function.
5828
6332
 
5829
6333
  Raises:
5830
- TypeError: If `inputs` is not tuple or list.
5831
- ValueError: If `inputs` is empty.
6334
+ TypeError: If `tensors` is not tuple or list.
6335
+ ValueError: If `tensors` is empty.
5832
6336
 
5833
6337
  Supported Platforms:
5834
6338
  ``Ascend`` ``GPU`` ``CPU``
@@ -5847,24 +6351,24 @@ def dstack(inputs):
5847
6351
  [ 5. 11.]
5848
6352
  [ 6. 12.]]]
5849
6353
  """
5850
- if not isinstance(inputs, (tuple, list)):
5851
- raise TypeError(f"For 'dstack', 'inputs' must be list or tuple of tensors, but got {type(inputs)}")
5852
- if not inputs:
5853
- raise TypeError(f"For 'dstack', 'inputs' can not be empty.")
5854
- trans_inputs = ()
5855
- for tensor in inputs:
6354
+ if not isinstance(tensors, (tuple, list)):
6355
+ raise TypeError(f"For 'dstack', 'tensors' must be list or tuple of tensors, but got {type(tensors)}")
6356
+ if not tensors:
6357
+ raise TypeError(f"For 'dstack', 'tensors' can not be empty.")
6358
+ trans_tensors = ()
6359
+ for tensor in tensors:
5856
6360
  if not isinstance(tensor, Tensor):
5857
- raise TypeError(f"For 'dstack', each elements of 'inputs' must be Tensor, but got {type(tensor)}")
5858
- if tensor.size == 0:
5859
- raise TypeError(f"For 'dstack', each elements of 'inputs' can not be empty.")
5860
- if tensor.ndim <= 1:
5861
- tensor = _expand(tensor, 2)
5862
- if tensor.ndim == 2:
6361
+ raise TypeError(f"For 'dstack', each elements of 'tensors' must be Tensor, but got {type(tensor)}")
6362
+ if tensor.ndim == 0:
6363
+ tensor = reshape_(tensor, (1, 1, 1))
6364
+ elif tensor.ndim == 1:
6365
+ tensor = expand_dims_(expand_dims_(tensor, 0), 2)
6366
+ elif tensor.ndim == 2:
5863
6367
  tensor = expand_dims_(tensor, 2)
5864
- trans_inputs += (tensor,)
5865
- if not trans_inputs:
6368
+ trans_tensors += (tensor,)
6369
+ if not trans_tensors:
5866
6370
  raise ValueError("For 'dstack', at least one tensor is needed to concatenate.")
5867
- return _get_cache_prim(P.Concat)(2)(trans_inputs)
6371
+ return _get_cache_prim(P.Concat)(2)(trans_tensors)
5868
6372
 
5869
6373
 
5870
6374
  @_primexpr
@@ -5950,6 +6454,113 @@ def diff(x, n=1, axis=-1, prepend=None, append=None):
5950
6454
  return a2 - a1
5951
6455
 
5952
6456
 
6457
+ def _diff_is_scalar_or_scalar_tensor(value):
6458
+ """judge the value"""
6459
+ if isinstance(value, int):
6460
+ return True
6461
+
6462
+ if isinstance(value, ms.Tensor) and value.shape == ():
6463
+ return True
6464
+
6465
+ return False
6466
+
6467
+
6468
+ def _diff_check(input, n, dim):
6469
+ """judge the input n and dim"""
6470
+ if not isinstance(input, Tensor):
6471
+ raise TypeError("For 'diff', 'input' must be a tensor")
6472
+
6473
+ if not _diff_is_scalar_or_scalar_tensor(n):
6474
+ raise TypeError("For 'diff', 'n' must be a int scalar or int scalar tensor")
6475
+
6476
+ if not _diff_is_scalar_or_scalar_tensor(dim):
6477
+ raise TypeError("For 'diff', 'dim' must be a scalar or scalar tensor")
6478
+
6479
+ if input.dtype in (mstype.complex64, mstype.complex128, mstype.float64, mstype.int16):
6480
+ raise TypeError("For 'diff', 'input' do not support complex64/complex128/float64/int16")
6481
+
6482
+
6483
+ def _diff_helper(input, n, dim):
6484
+ """calculate the forward difference"""
6485
+ out_len = input.shape[dim] - 1
6486
+ is_bool = (input.dtype == mstype.bool_)
6487
+ result = input
6488
+
6489
+ for i in range(n): # pylint: disable=unused-variable
6490
+ if is_bool:
6491
+ result = logical_xor(narrow(result, dim, 1, out_len), narrow(result, dim, 0, out_len))
6492
+ else:
6493
+ result = sub_ext(narrow(result, dim, 1, out_len), narrow(result, dim, 0, out_len))
6494
+
6495
+ if out_len == 0:
6496
+ break
6497
+ out_len -= 1
6498
+
6499
+ return result
6500
+
6501
+
6502
+ def _diff_prepend_append_on_dim(input, prepend, append, dim):
6503
+ """append tensor on dim"""
6504
+ if prepend is not None and append is None:
6505
+ return cat((prepend, input), dim)
6506
+
6507
+ if prepend is None and append is not None:
6508
+ return cat((input, append), dim)
6509
+
6510
+ return cat((prepend, input, append), dim)
6511
+
6512
+
6513
+ def diff_ext(input, n=1, dim=-1, prepend=None, append=None):
6514
+ r"""
6515
+ Computes the n-th forward difference along the given dimension.
6516
+
6517
+ The first-order differences are given by :math:`out[i] = input[i+1] - input[i]`. Higher-order differences are
6518
+ calculated by using `torch.diff()` recursively.
6519
+
6520
+ .. warning::
6521
+ This is an experimental API that is subject to change or deletion.
6522
+
6523
+ Args:
6524
+ input (Tensor): the tensor to compute the differences on.
6525
+ n (int, optional): the number of times to recursively compute the difference.
6526
+ Default: ``1`` .
6527
+ dim (int, optional): the dimension to compute the difference along.
6528
+ Default is the last dimension. Default: ``0`` .
6529
+ prepend (Tensor, optional): values to prepend or append to `input` along `dim`
6530
+ before computing the difference. Their dimensions must be equivalent to that of input,
6531
+ and their shapes must match input's shape except on `dim`. Default: ``None`` .
6532
+ append (Tensor, optional): values to prepend or append to `input` along `dim`
6533
+ before computing the difference. Their dimensions must be equivalent to that of input,
6534
+ and their shapes must match input's shape except on `dim`. Default: ``None`` .
6535
+
6536
+ Returns:
6537
+ Tensor, the result of n-th forward difference computation.
6538
+
6539
+ Raises:
6540
+ TypeError: If `input` is not a tensor.
6541
+ TypeError: If `n` is not a scalar or scalar tensor.
6542
+ TypeError: If `dim` is not a scalar or scalar tensor.
6543
+ TypeError: If `input` type is complex64, complex128, float64, int16.
6544
+
6545
+ Supported Platforms:
6546
+ ``Ascend``
6547
+
6548
+ Examples:
6549
+ >>> from mindspore import Tensor, ops
6550
+ >>> x = Tensor([1, 3, -1, 0, 4])
6551
+ >>> out = ops.diff_ext(x)
6552
+ >>> print(out.asnumpy())
6553
+ [ 2 -4 1 4]
6554
+ """
6555
+ _diff_check(input, n, dim)
6556
+
6557
+ if (prepend is None and append is None) or n == 0:
6558
+ return _diff_helper(input, n, dim)
6559
+
6560
+ input = _diff_prepend_append_on_dim(input, prepend, append, dim)
6561
+ return _diff_helper(input, n, dim)
6562
+
6563
+
5953
6564
  def tril_indices(row, col, offset=0, *, dtype=mstype.int64):
5954
6565
  r"""
5955
6566
  Calculates the indices of the lower triangular elements in a `row` * `col` matrix
@@ -6638,7 +7249,7 @@ def logsumexp(input, axis, keep_dims=False):
6638
7249
  Args:
6639
7250
  input (Tensor): The input tensor. With float16 or float32 data type.
6640
7251
  axis (Union[int, tuple(int), list(int)]): The dimensions to reduce. Only constant value is allowed.
6641
- keep_dims (bool): If True, keep these reduced dimensions and the length is 1.
7252
+ keep_dims (bool, optional): If True, keep these reduced dimensions and the length is 1.
6642
7253
  If ``False`` , don't keep these dimensions.
6643
7254
  Default : ``False`` .
6644
7255
 
@@ -6686,10 +7297,11 @@ def amin(input, axis=None, keepdims=False, *, initial=None, where=None):
6686
7297
  Args:
6687
7298
  input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
6688
7299
  :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
6689
- axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` , reduce all
6690
- dimensions. Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
6691
- keepdims (bool): If ``True`` , keep these reduced dimensions and the length is 1. If ``False`` , don't keep
6692
- these dimensions. Default: ``False`` .
7300
+ axis (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce. Default: ``None`` ,
7301
+ reduce all dimensions. Only constant value is allowed. Assume the rank of `x` is r, and the value range is
7302
+ [-r,r).
7303
+ keepdims (bool, optional): If ``True`` , keep these reduced dimensions and the length is 1. If ``False`` ,
7304
+ don't keep these dimensions. Default: ``False`` .
6693
7305
 
6694
7306
  Keyword Args:
6695
7307
  initial (scalar, optional): The minimum value of an output element. Must be present to allow computation
@@ -6796,10 +7408,11 @@ def amax(input, axis=None, keepdims=False, *, initial=None, where=None):
6796
7408
  Args:
6797
7409
  input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
6798
7410
  :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
6799
- axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` , reduce all
6800
- dimensions. Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
6801
- keepdims (bool): If ``True`` , keep these reduced dimensions and the length is 1. If ``False`` , don't keep
6802
- these dimensions. Default: ``False`` .
7411
+ axis (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce. Default: ``None`` ,
7412
+ reduce all dimensions. Only constant value is allowed. Assume the rank of `x` is r, and the value range is
7413
+ [-r,r).
7414
+ keepdims (bool, optional): If ``True`` , keep these reduced dimensions and the length is 1. If ``False`` ,
7415
+ don't keep these dimensions. Default: ``False`` .
6803
7416
 
6804
7417
  Keyword Args:
6805
7418
  initial (scalar, optional): The minimum value of an output element. Must be present to allow computation
@@ -6878,6 +7491,174 @@ def amax(input, axis=None, keepdims=False, *, initial=None, where=None):
6878
7491
  return _get_cache_prim(P.ReduceMax)(keepdims)(input, axis)
6879
7492
 
6880
7493
 
7494
+ def amax_ext(input, dim=(), keepdim=False):
7495
+ r"""
7496
+ Computes the maximum value of of all elements along the specified `dim` dimension of the `input`,
7497
+ and retains the dimension based on the `keepdim` parameter.
7498
+
7499
+ .. warning::
7500
+ This is an experimental API that is subject to change or deletion.
7501
+
7502
+ Args:
7503
+ input (Tensor): Input tensor.
7504
+ dim (Union[int, tuple(int), list(int)], optional): The dimension to be reduced,
7505
+ the value should be within `[-len(input.shape), len(input.shape) - 1]`,
7506
+ when the `dim` is `()`, all dimensions are reduced, default: ``()``.
7507
+ keepdim (bool, optional): Whether the output tensor retains the dimension `dim`, default: ``False``.
7508
+
7509
+ Returns:
7510
+ Tensor, has same type as `input`, and the shape changes according to the values of `dim` and `keepdim`.
7511
+
7512
+ - If `dim` is `()`, and `keepdim` is False, the output is a 0-D tensor representing the maximum value of
7513
+ all elements in the `input` tensor.
7514
+ - If `dim` is `1`, and `keepdim` is False, the shape of output is
7515
+ :math:`(input.shape[0], input.shape[2], ..., input.shape[n])`.
7516
+ - If `dim` is `(1, 2)`, and `keepdim` is False, the shape of output is
7517
+ :math:`(input.shape[0], input.shape[3], ..., input.shape[n])`.
7518
+
7519
+ Raises:
7520
+ TypeError: If `input` is not a Tensor.
7521
+ TypeError: If `dim` is not an int or tuple(int) or list(int).
7522
+ TypeError: If `keepdim` is not a bool.
7523
+ ValueError: If the value of any elements of `dim` is not in the range
7524
+ `[-len(input.shape), len(input.shape) - 1]`.
7525
+ RuntimeError: If any element of `dim` is repeated.
7526
+
7527
+ Supported Platforms:
7528
+ ``Ascend`` ``CPU``
7529
+
7530
+ Examples:
7531
+ >>> import mindspore
7532
+ >>> import numpy as np
7533
+ >>> from mindspore import Tensor
7534
+ >>> from mindspore import ops
7535
+ >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
7536
+ >>> output = ops.function.math_func.amax_ext(x, 1, keepdim=True)
7537
+ >>> result = output.shape
7538
+ >>> print(result)
7539
+ (3, 1, 5, 6)
7540
+ >>> # case 1: Reduces a dimension by the maximum value of all elements in the dimension.
7541
+ >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
7542
+ ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
7543
+ ... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]),
7544
+ ... mindspore.float32)
7545
+ >>> output = ops.function.math_func.amax_ext(x)
7546
+ >>> print(output)
7547
+ 9.0
7548
+ >>> print(output.shape)
7549
+ ()
7550
+ >>> # case 2: Reduces a dimension along axis 0.
7551
+ >>> output = ops.function.math_func.amax_ext(x, 0, True)
7552
+ >>> print(output)
7553
+ [[[7. 7. 7. 7. 7. 7.]
7554
+ [8. 8. 8. 8. 8. 8.]
7555
+ [9. 9. 9. 9. 9. 9.]]]
7556
+ >>> # case 3: Reduces a dimension along axis 1.
7557
+ >>> output = ops.function.math_func.amax_ext(x, 1, True)
7558
+ >>> print(output)
7559
+ [[[3. 3. 3. 3. 3. 3.]]
7560
+ [[6. 6. 6. 6. 6. 6.]]
7561
+ [[9. 9. 9. 9. 9. 9.]]]
7562
+ >>> # case 4: Reduces a dimension along axis 2.
7563
+ >>> output = ops.function.math_func.amax_ext(x, 2, True)
7564
+ >>> print(output)
7565
+ [[[1.]
7566
+ [2.]
7567
+ [3.]]
7568
+ [[4.]
7569
+ [5.]
7570
+ [6.]]
7571
+ [[7.]
7572
+ [8.]
7573
+ [9.]]]
7574
+ """
7575
+ return reduce_max_impl(input, dim, keepdim)
7576
+
7577
+
7578
+ def amin_ext(input, dim=(), keepdim=False):
7579
+ r"""
7580
+ Computes the minimum value of of all elements along the specified `dim` dimension of the `input`,
7581
+ and retains the dimension based on the `keepdim` parameter.
7582
+
7583
+ .. warning::
7584
+ This is an experimental API that is subject to change or deletion.
7585
+
7586
+ Args:
7587
+ input (Tensor): Input tensor.
7588
+ dim (Union[int, tuple(int), list(int)], optional): The dimension to be reduced,
7589
+ the value should be within `[-len(input.shape), len(input.shape) - 1]`,
7590
+ when the `dim` is `()`, all dimensions are reduced, default: ``()``.
7591
+ keepdim (bool, optional): Whether the output tensor retains the dimension `dim`, default: ``False``.
7592
+
7593
+ Returns:
7594
+ Tensor, has same type as `input`, and the shape changes according to the values of `dim` and `keepdim`.
7595
+
7596
+ - If `dim` is `()`, and `keepdim` is False, the output is a 0-D tensor representing the minimum value of
7597
+ all elements in the `input` tensor.
7598
+ - If `dim` is `1`, and `keepdim` is False, the shape of output is
7599
+ :math:`(input.shape[0], input.shape[2], ..., input.shape[n])`.
7600
+ - If `dim` is `(1, 2)`, and `keepdim` is False, the shape of output is
7601
+ :math:`(input.shape[0], input.shape[3], ..., input.shape[n])`.
7602
+
7603
+ Raises:
7604
+ TypeError: If `input` is not a Tensor.
7605
+ TypeError: If `dim` is not an int or tuple(int) or list(int).
7606
+ TypeError: If `keepdim` is not a bool.
7607
+ ValueError: If the value of any elements of `dim` is not in the range
7608
+ `[-len(input.shape), len(input.shape) - 1]`.
7609
+ RuntimeError: If any element of `dim` is repeated.
7610
+
7611
+ Supported Platforms:
7612
+ ``Ascend`` ``CPU``
7613
+
7614
+ Examples:
7615
+ >>> import mindspore
7616
+ >>> import numpy as np
7617
+ >>> from mindspore import Tensor
7618
+ >>> from mindspore import ops
7619
+ >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
7620
+ >>> output = ops.function.math_func.amin_ext(x, 1, keepdim=True)
7621
+ >>> result = output.shape
7622
+ >>> print(result)
7623
+ (3, 1, 5, 6)
7624
+ >>> # case 1: Reduces a dimension by the minimum value of all elements in the dimension.
7625
+ >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
7626
+ ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
7627
+ ... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]),
7628
+ ... mindspore.float32)
7629
+ >>> output = ops.function.math_func.amin_ext(x)
7630
+ >>> print(output)
7631
+ 1.0
7632
+ >>> print(output.shape)
7633
+ ()
7634
+ >>> # case 2: Reduces a dimension along axis 0.
7635
+ >>> output = ops.function.math_func.amin_ext(x, 0, True)
7636
+ >>> print(output)
7637
+ [[[1. 1. 1. 1. 1. 1.]
7638
+ [2. 2. 2. 2. 2. 2.]
7639
+ [3. 3. 3. 3. 3. 3.]]]
7640
+ >>> # case 3: Reduces a dimension along axis 1.
7641
+ >>> output = ops.function.math_func.amin_ext(x, 1, True)
7642
+ >>> print(output)
7643
+ [[[1. 1. 1. 1. 1. 1.]]
7644
+ [[4. 4. 4. 4. 4. 4.]]
7645
+ [[7. 7. 7. 7. 7. 7.]]]
7646
+ >>> # case 4: Reduces a dimension along axis 2.
7647
+ >>> output = ops.function.math_func.amin_ext(x, 2, True)
7648
+ >>> print(output)
7649
+ [[[1.]
7650
+ [2.]
7651
+ [3.]]
7652
+ [[4.]
7653
+ [5.]
7654
+ [6.]]
7655
+ [[7.]
7656
+ [8.]
7657
+ [9.]]]
7658
+ """
7659
+ return reduce_min_impl(input, dim, keepdim)
7660
+
7661
+
6881
7662
  def mean(x, axis=None, keep_dims=False):
6882
7663
  r"""
6883
7664
  Reduces all dimension of a tensor by averaging all elements in the dimension, by default.
@@ -7153,6 +7934,16 @@ def _multi_svd_norm(x, row_axis, col_axis, op):
7153
7934
  raise ValueError(f"For svd_norm, the op input must be one of ['amax', 'amin', 'sum'], but got f{op}")
7154
7935
 
7155
7936
 
7937
+ def _reshape_matrix_norm(input, res, dim, keepdims):
7938
+ """reshape res of matrix_norm if keepdims is True."""
7939
+ if keepdims:
7940
+ res_shape = list(input.shape)
7941
+ res_shape[dim[0]] = 1
7942
+ res_shape[dim[1]] = 1
7943
+ res = res.reshape(res_shape)
7944
+ return res
7945
+
7946
+
7156
7947
  def _normalize_axis_index(axis, ndim):
7157
7948
  """normalize_axis_index for norm."""
7158
7949
  # pylint: disable=chained-comparison
@@ -7482,21 +8273,311 @@ def _check_vector_norm_ord(ord):
7482
8273
  f"or must be int or float, but got {ord}.")
7483
8274
 
7484
8275
 
7485
- def _compute_vector_norm_inf(x, dim, keepdims, norm_func):
7486
- """compute vector norm of `x` when ord is ``inf`` or ``-inf`` """
7487
- if len(dim) == 1:
7488
- ret_norm = norm_func(ops.abs(x), axis=dim[0], keepdims=keepdims)[0]
7489
- else:
7490
- start_dim = min(dim)
7491
- end_dim = max(dim)
7492
- flatten_x = ops.flatten(x, start_dim=start_dim, end_dim=end_dim)
7493
- ret_norm = norm_func(ops.abs(flatten_x), axis=start_dim, keepdims=False)[0]
7494
- if keepdims is True:
7495
- ret_shape = list(x.shape)
7496
- for i in dim:
7497
- ret_shape[i] = 1
7498
- ret_norm = ret_norm.reshape(ret_shape)
7499
- return ret_norm
8276
+ def _compute_vector_norm_inf(x, dim, keepdims, norm_func):
8277
+ """compute vector norm of `x` when ord is ``inf`` or ``-inf`` """
8278
+ if len(dim) == 1:
8279
+ ret_norm = norm_func(ops.abs(x), axis=dim[0], keepdims=keepdims)[0]
8280
+ else:
8281
+ start_dim = min(dim)
8282
+ end_dim = max(dim)
8283
+ flatten_x = ops.flatten(x, start_dim=start_dim, end_dim=end_dim)
8284
+ ret_norm = norm_func(ops.abs(flatten_x), axis=start_dim, keepdims=False)[0]
8285
+ if keepdims is True:
8286
+ ret_shape = list(x.shape)
8287
+ for i in dim:
8288
+ ret_shape[i] = 1
8289
+ ret_norm = ret_norm.reshape(ret_shape)
8290
+ return ret_norm
8291
+
8292
+
8293
+ @_primexpr
8294
+ def _check_vector_norm_inputs(x, ord):
8295
+ """vector_norm inputs check"""
8296
+ if not isinstance(x, (Tensor, Tensor_)):
8297
+ raise TypeError(f"For `vector_norm`, the `x` must be Tensor!, but get {type(x)}.")
8298
+
8299
+ if not isinstance(ord, (bool, int, float)):
8300
+ raise ValueError(f"For `vector_norm`, the ord mode must be one of [bool, int, float, inf, -inf], "
8301
+ f"but got {ord}.")
8302
+
8303
+
8304
+ def vector_norm_ext(x, ord=2, dim=None, keepdim=False, *, dtype=None):
8305
+ r"""
8306
+ Returns the vector norm of the given tensor on the specified dimensions.
8307
+
8308
+ `ord` is the calculation mode of norm. The following norm modes are supported.
8309
+
8310
+ ========================== ==========================================
8311
+ `ord` norm for vectors
8312
+ ========================== ==========================================
8313
+ ``2`` (Default) ``2``-norm (see below)
8314
+ ``inf`` :math:`max(abs(x))`
8315
+ ``-inf`` :math:`min(abs(x))`
8316
+ ``0`` :math:`sum(x!=0)`
8317
+ other ``int`` or ``float`` :math:`sum(abs(x)^{ord})^{(1 / ord)}`
8318
+ ========================== ==========================================
8319
+
8320
+ .. warning::
8321
+ This is an experimental API that is subject to change or deletion.
8322
+
8323
+ Args:
8324
+ x (Tensor): Tensor of shape :math:`(*)` where * is zero s more batch dimensions.
8325
+ ord (Union[bool, int, float, inf, -inf], optional): norm's mode. refer to the table above for
8326
+ behavior. Default: ``2`` .
8327
+ dim (Union[int, List(int), Tuple(int)], optional): The dimensions along which to perform the vector norm
8328
+ calculation. Default: ``None`` .
8329
+
8330
+ - When `dim` is an integer, a list or a tuple, the norm calculation will be performed across these specified
8331
+ dimensions, while the remaining dimensions will be considered as batch dimensions.
8332
+
8333
+ - When `dim` is None, the norm will be calculated after flattening the Tensor `x` .
8334
+
8335
+ keepdim (bool): whether the output Tensor retains the original dimension. Default: ``False`` .
8336
+
8337
+ Keyword Args:
8338
+ dtype (:class:`mindspore.dtype`, optional): When set, `x` will be converted to the specified type,
8339
+ `dtype` before execution, and dtype of returned Tensor will also be `dtype`.
8340
+ When `dtype` is ``None`` , the dtype of `x` is preserved. Default: ``None`` .
8341
+
8342
+ Returns:
8343
+ Tensor, the result of norm calculation on the specified dimension, `dim`.
8344
+
8345
+ Raises:
8346
+ TypeError: If `x` is not a Tensor.
8347
+ TypeError: If `dim` is neither an int nor a list or tuple.
8348
+ ValueError: If `ord` is not in [bool, int, float, inf, -inf].
8349
+ ValueError: The elements of `dim` are duplicate.
8350
+ ValueError: If any elements of `dim` is out of range.
8351
+
8352
+ Supported Platforms:
8353
+ ``Ascend``
8354
+
8355
+ Examples:
8356
+ >>> import mindspore as ms
8357
+ >>> x = ms.ops.arange(0, 12, dtype=ms.float32) - 6
8358
+ >>> print(ms.ops.vector_norm_ext(x, ord=2))
8359
+ 12.083046
8360
+ >>> print(ms.ops.vector_norm_ext(x, ord=float('inf')))
8361
+ 6.0
8362
+ >>> print(ms.ops.vector_norm_ext(x, ord=float('-inf')))
8363
+ 0.0
8364
+ >>> print(ms.ops.vector_norm_ext(x, ord=0))
8365
+ 11.0
8366
+ >>> print(ms.ops.vector_norm_ext(x, ord=4.5))
8367
+ 7.2243643
8368
+ """
8369
+ _check_vector_norm_inputs(x, ord)
8370
+ if float(ord) in [0.0, 1.0, 2.0, 3.0]:
8371
+ return linalg_vector_norm_op(x, float(ord), dim, keepdim, dtype)
8372
+
8373
+ if x.dtype in [mstype.bfloat16, mstype.float16, mstype.float32]:
8374
+ if dtype is None:
8375
+ return lp_norm_v2_op(x, ord, dim, keepdim, 0.0)
8376
+ return ops.cast(lp_norm_v2_op(x, ord, dim, keepdim, 0.0), dtype)
8377
+
8378
+ cast_dtype = x.dtype if dtype is None else dtype
8379
+ x = ops.cast(x, mstype.float32)
8380
+ return ops.cast(lp_norm_v2_op(x, ord, dim, keepdim, 0.0), cast_dtype)
8381
+
8382
+
8383
+ def matrix_norm_ext(A, ord='fro', dim=(-2, -1), keepdim=False, *, dtype=None):
8384
+ r"""
8385
+ Returns the matrix norm of a given tensor on the specified dimensions.
8386
+
8387
+ `ord` is the calculation mode of norm. The following norm modes are supported.
8388
+
8389
+ ====================== ================================
8390
+ `ord` norm for matrix
8391
+ ====================== ================================
8392
+ ``'fro'`` (Default) Frobenius norm
8393
+ ``'nuc'`` nuclear norm
8394
+ ``inf`` :math:`max(sum(abs(x), dim=1))`
8395
+ ``-inf`` :math:`min(sum(abs(x), dim=1))`
8396
+ ``1`` :math:`max(sum(abs(x), dim=0))`
8397
+ ``-1`` :math:`min(sum(abs(x), dim=0))`
8398
+ ``2`` largest singular value
8399
+ ``-2`` smallest singular value
8400
+ ====================== ================================
8401
+
8402
+ .. warning::
8403
+ This is an experimental API that is subject to change or deletion.
8404
+
8405
+ Args:
8406
+ A (Tensor): Tensor of shape :math:`(*, m, n)` where * is zero or more batch dimensions.
8407
+ ord (Union[int, inf, -inf, 'fro', 'nuc'], optional): norm's mode. refer to the table above for
8408
+ behavior. Default: ``'fro'`` .
8409
+ dim (Tuple(int, int), optional): calculate the dimension of the matrix norm.
8410
+ Default: ``(-2, -1)`` .
8411
+ keepdim (bool): whether the output Tensor retains the original dimension. Default: ``False`` .
8412
+
8413
+ Keyword Args:
8414
+ dtype (:class:`mindspore.dtype`, optional): When set, `A` will be converted to the specified type,
8415
+ `dtype`, before execution, and dtype of returned Tensor will also be `dtype`.
8416
+ When `dtype` is ``None`` , the dtype of `A` is preserved. Default: ``None`` .
8417
+
8418
+ Returns:
8419
+ Tensor, the result of norm calculation on the specified dimension, `dim`.
8420
+
8421
+ Raises:
8422
+ TypeError: If `dim` is not a tuple of int.
8423
+ ValueError: If the length of `dim` is not equal to 2.
8424
+ ValueError: If `ord` is not in [2, -2, 1, -1, float('inf'), float('-inf'), 'fro', 'nuc'].
8425
+ ValueError: If two elements of `dim` is same after normalize.
8426
+ ValueError: If any elements of `dim` is out of range.
8427
+
8428
+ Note:
8429
+ Dynamic shape, Dynamic rank and mutable input is not supported in `graph mode (mode=mindspore.GRAPH_MODE)
8430
+ <https://www.mindspore.cn/docs/en/master/model_train/program_form/static_graph.html>`_.
8431
+
8432
+ Supported Platforms:
8433
+ ``Ascend``
8434
+
8435
+ Examples:
8436
+ >>> import mindspore as ms
8437
+ >>> A = ms.ops.arange(0, 12, dtype=ms.float32).reshape(3, 4)
8438
+ >>> print(ms.ops.matrix_norm_ext(A, ord='fro'))
8439
+ 22.494444
8440
+ >>> print(ms.ops.matrix_norm_ext(A, ord='nuc'))
8441
+ 24.364643
8442
+ >>> print(ms.ops.matrix_norm_ext(A, ord=float('inf')))
8443
+ 38.0
8444
+ >>> print(ms.ops.matrix_norm_ext(A, ord=float('-inf')))
8445
+ 6.0
8446
+ """
8447
+ ndim = A.ndim
8448
+ row_axis, col_axis = _check_matrix_norm_axis(dim, ndim)
8449
+ _check_matrix_norm_ord(ord)
8450
+ if ord == 'fro':
8451
+ return vector_norm_ext(A, 2, dim, keepdim, dtype=dtype)
8452
+ if ord == 'nuc':
8453
+ res = _multi_svd_norm(A, row_axis, col_axis, 'sum')
8454
+ return _reshape_matrix_norm(A, res, dim, keepdim)
8455
+ if ord == 2:
8456
+ res = _multi_svd_norm(A, row_axis, col_axis, 'amax')
8457
+ return _reshape_matrix_norm(A, res, dim, keepdim)
8458
+ if ord == -2:
8459
+ res = _multi_svd_norm(A, row_axis, col_axis, 'amin')
8460
+ return _reshape_matrix_norm(A, res, dim, keepdim)
8461
+ if ord in [float('inf'), -float('inf')]:
8462
+ row_axis, col_axis = col_axis, row_axis
8463
+ if not keepdim and col_axis > row_axis:
8464
+ col_axis -= 1
8465
+ if ord < 0:
8466
+ return ops.amin(vector_norm_ext(A, 1, row_axis, keepdim, dtype=dtype), col_axis, keepdim)
8467
+ return ops.amax(vector_norm_ext(A, 1, row_axis, keepdim, dtype=dtype), col_axis, keepdim)
8468
+
8469
+
8470
+ @_primexpr
8471
+ def _check_linalg_norm_input(dim, ord, ndim):
8472
+ """dim check"""
8473
+ if dim is None:
8474
+ if ord is not None and ndim > 2:
8475
+ raise ValueError("For `linalg.norm`, the input must be 1D or 2D when `ord` is specified but `dim` is None.")
8476
+ dim = tuple(range(ndim))
8477
+ if (ord is None) or (ord == 'fro' and ndim == 2) or (ord == 2 and ndim == 1):
8478
+ return dim, True
8479
+ return dim, False
8480
+ if isinstance(dim, int):
8481
+ dim = (dim,)
8482
+ elif isinstance(dim, (list, tuple)):
8483
+ if len(dim) > 2:
8484
+ raise ValueError(f"For `linalg.norm`, the length of `dim` must be 1 or 2 when dim is not None",
8485
+ f"but got {len(dim)}.")
8486
+ else:
8487
+ raise TypeError(f'For `linalg.norm`, the dim should be int, list of int or tuple of int, but got {type(dim)}')
8488
+ return dim, False
8489
+
8490
+
8491
+ def linalg_norm(A, ord=None, dim=None, keepdim=False, *, dtype=None):
8492
+ r"""
8493
+ Returns the matrix norm or vector norm of a given tensor.
8494
+
8495
+ `ord` is the calculation mode of norm. The following norm modes are supported.
8496
+
8497
+ ====================== ================================ ==========================================
8498
+ `ord` norm for matrices norm for vectors
8499
+ ====================== ================================ ==========================================
8500
+ `None` (default) Frobenius norm `2`-norm (see below)
8501
+ `'fro'` Frobenius norm -- not supported --
8502
+ `'nuc'` nuclear norm -- not supported --
8503
+ `inf` :math:`max(sum(abs(x), dim=1))` :math:`max(abs(x))`
8504
+ `-inf` :math:`min(sum(abs(x), dim=1))` :math:`min(abs(x))`
8505
+ `0` -- not supported -- :math:`sum(x != 0)`
8506
+ `1` :math:`max(sum(abs(x), dim=0))` as below
8507
+ `-1` :math:`min(sum(abs(x), dim=0))` as below
8508
+ `2` largest singular value as below
8509
+ `-2` smallest singular value as below
8510
+ other `int` or `float` -- not supported -- :math:`sum(abs(x)^{ord})^{(1 / ord)}`
8511
+ ====================== ================================ ==========================================
8512
+
8513
+ .. warning::
8514
+ This is an experimental API that is subject to change or deletion.
8515
+
8516
+ Args:
8517
+ A (Tensor): Tensor of shape :math:`(*, n)` or :math:`(*, m, n)` where * is zero or more batch dimensions.
8518
+ ord (Union[int, float, inf, -inf, 'fro', 'nuc'], optional): norm's mode. refer to the table above for
8519
+ behavior. Default: ``None`` .
8520
+ dim (Union[int, Tuple(int)], optional): calculate the dimension of vector norm or matrix norm.
8521
+ Default: ``None`` .
8522
+
8523
+ - When `dim` is int, it will be calculated by vector norm.
8524
+
8525
+ - When `dim` is a 2-tuple, it will be calculated by matrix norm.
8526
+
8527
+ - If `dim` is None and `ord` is None, `A` will be flattened to 1D and the 2-norm
8528
+ of the vector will be calculated.
8529
+
8530
+ - If `dim` is None and `ord` is not None, `A` must be 1D or 2D.
8531
+
8532
+ keepdim (bool): whether the output Tensor retains the original dimension. Default: ``False`` .
8533
+
8534
+ Keyword Args:
8535
+ dtype (:class:`mindspore.dtype`, optional): When set, `A` will be converted to the specified type,
8536
+ `dtype`, before execution, and dtype of returned Tensor will also be `dtype`. Default: ``None`` .
8537
+
8538
+ Returns:
8539
+ Tensor, the result of norm calculation on the specified dimension, `dim`, has the same dtype as `A`.
8540
+
8541
+ Raises:
8542
+ ValueError: If `dim` is out of range.
8543
+ TypeError: If `dim` is neither an int nor a tuple of int.
8544
+ TypeError: If `A` is a vector and `ord` is a str.
8545
+ ValueError: If `A` is a matrices and `ord` is not in valid mode.
8546
+ ValueError: If two elements of `dim` is same after normalize.
8547
+ ValueError: If any elements of `dim` is out of range.
8548
+
8549
+ Note:
8550
+ Dynamic shape, Dynamic rank and mutable input is not supported in `graph mode (mode=mindspore.GRAPH_MODE)
8551
+ <https://www.mindspore.cn/docs/en/master/model_train/program_form/static_graph.html>`_.
8552
+
8553
+ Supported Platforms:
8554
+ ``Ascend``
8555
+
8556
+ Examples:
8557
+ >>> import mindspore as ms
8558
+ >>> from mindspore import ops
8559
+ >>> data_range = ops.arange(-13, 13, dtype=ms.float32)
8560
+ >>> x = data_range[data_range != 0]
8561
+ >>> print(ops.function.math_func.linalg_norm(x))
8562
+ 38.327538
8563
+ >>> print(ops.function.math_func.linalg_norm(x, 1))
8564
+ 169.0
8565
+ >>> n = ops.arange(27, dtype=ms.float32).reshape(3, 3, 3)
8566
+ >>> print(ops.function.math_func.linalg_norm(n, dim=(1, 2)))
8567
+ [14.282857 39.76179 66.45299 ]
8568
+ >>> print(ops.function.math_func.linalg_norm(n[0, :, :]))
8569
+ 14.282857
8570
+ >>> print(ops.function.math_func.linalg_norm(n[1, :, :]))
8571
+ 39.76179
8572
+ """
8573
+ dim, immediate = _check_linalg_norm_input(dim, ord, A.ndim)
8574
+ if immediate:
8575
+ return vector_norm_ext(A, 2, dim, keepdim, dtype=dtype)
8576
+ if ord is not None:
8577
+ if ord in ['fro', 'nuc'] or (dim is not None and len(dim) == 2) or (dim is None and A.ndim == 2):
8578
+ return matrix_norm_ext(A, ord, dim, keepdim, dtype=dtype)
8579
+ return vector_norm_ext(A, ord, dim, keepdim, dtype=dtype)
8580
+ return vector_norm_ext(A, 2, dim, keepdim, dtype=dtype)
7500
8581
 
7501
8582
 
7502
8583
  def norm_ext(input, p='fro', dim=None, keepdim=False, *, dtype=None):
@@ -7508,46 +8589,44 @@ def norm_ext(input, p='fro', dim=None, keepdim=False, *, dtype=None):
7508
8589
  ====================== ================================ ==========================================
7509
8590
  `p` norm for matrices norm for vectors
7510
8591
  ====================== ================================ ==========================================
7511
- `None` (default) Frobenius norm `2`-norm (see below)
7512
8592
  `'fro'` Frobenius norm -- not supported --
7513
8593
  `'nuc'` nuclear norm -- not supported --
7514
- `inf` :math:`max(sum(abs(x), dim=1))` :math:`max(abs(x))`
7515
- `-inf` :math:`min(sum(abs(x), dim=1))` :math:`min(abs(x))`
7516
- `0` -- not supported -- :math:`sum(x != 0)`
7517
- other `int` or `float` -- not supported -- :math:`sum(abs(x)^{ord})^{(1 / ord)}`
8594
+ other `int` or `float` -- not supported -- :math:`sum(abs(x)^{p})^{(1 / p)}`
7518
8595
  ====================== ================================ ==========================================
7519
8596
 
7520
8597
  .. warning::
7521
8598
  This is an experimental API that is subject to change or deletion.
7522
8599
 
7523
8600
  Args:
7524
- input (Tensor): The input of norm with data type of bfloat16, float16 or float32.
7525
- The shape is :math:`(*)` where :math:`*` means, any number of additional dimensions.
7526
- p (Union[int, float, inf, -inf, 'fro', 'nuc'], optional): norm's mode. refer to the table above for
8601
+ input (Tensor): The shape is :math:`(*)` or :math:`(*, m, n)`
8602
+ where :math:`*` means, any number of additional dimensions.
8603
+ p (Union[bool, int, float, inf, -inf, 'fro', 'nuc'], optional): norm's mode. refer to the table above for
7527
8604
  behavior. Default: ``fro`` .
7528
- dim (Union[int, Tuple(int)], optional): calculate the dimension of vector norm or matrix norm.
8605
+ dim (Union[int, List(int), Tuple(int)], optional): calculate the dimension of vector norm or matrix norm.
7529
8606
  Default: ``None`` .
7530
- keepdim (bool): whether the output Tensor retains the original dimension. Default: ``False`` .
8607
+ keepdim (bool, optional): whether the output Tensor retains the original dimension. Default: ``False`` .
7531
8608
 
7532
8609
  Keyword Args:
7533
8610
  dtype (:class:`mindspore.dtype`, optional): When set, `input` will be converted to the specified type,
7534
8611
  `dtype`, before execution, and dtype of returned Tensor will also be `dtype`. Default: ``None`` .
7535
8612
 
7536
8613
  Returns:
7537
- Tensor, the result of norm calculation on the specified dimension, `dim`, has the same dtype as `input`.
8614
+ Tensor, the result of norm calculation on the specified dimension, `dim`.
7538
8615
 
7539
8616
  Raises:
8617
+ TypeError: If `input` is not a Tensor.
7540
8618
  ValueError: If `dim` is out of range.
7541
8619
  TypeError: If `dim` is neither an int nor a tuple of int.
7542
- ValueError: If two elements of `dim` is same after normalize.
8620
+ ValueError: If two elements of `dim` is same after normalized.
7543
8621
  ValueError: If any elements of `dim` is out of range.
7544
8622
 
8623
+ Note:
8624
+ Dynamic shape, Dynamic rank and mutable input is not supported in `graph mode (mode=mindspore.GRAPH_MODE)
8625
+ <https://www.mindspore.cn/docs/en/master/model_train/program_form/static_graph.html>`_.
8626
+
7545
8627
  Supported Platforms:
7546
8628
  ``Ascend``
7547
8629
 
7548
- Note:
7549
- Currently, it only support `ops.function.math_func.norm_ext(input, p=number)`.
7550
-
7551
8630
  Examples:
7552
8631
  >>> import mindspore as ms
7553
8632
  >>> from mindspore import ops
@@ -7559,25 +8638,19 @@ def norm_ext(input, p='fro', dim=None, keepdim=False, *, dtype=None):
7559
8638
  """
7560
8639
  if not isinstance(input, (Tensor, Tensor_)):
7561
8640
  raise TypeError(f"For `norm_ext`, the `input` must be Tensor!, but get {type(input)}.")
7562
-
7563
- if (dim is not None) or keepdim or (dtype is not None):
7564
- raise ValueError(f"For `norm_ext`, the value of `dim`, `keepdim` and `dtype` must be default value currently.")
7565
-
7566
- if isinstance(p, (int, float)):
7567
- if float(p) in [0.0, 1.0, 2.0, 3.0]:
7568
- return norm_op(input, p, dim, keepdim, dtype)
7569
- if input.dtype in [mstype.bfloat16, mstype.float16, mstype.float32]:
7570
- return lp_norm_v2_op(input, p, dim, keepdim, 0.0)
7571
- dtype = input.dtype
7572
- input = ops.cast(input, mstype.float32)
7573
- return ops.cast(lp_norm_v2_op(input, p, dim, keepdim, 0.0), dtype)
7574
-
8641
+ if isinstance(p, (bool, int, float)):
8642
+ return vector_norm_ext(input, p, dim, keepdim, dtype=dtype)
7575
8643
  if p == 'fro':
7576
8644
  if isinstance(dim, (list, tuple)) and len(dim) > 2:
7577
8645
  raise ValueError(f"For `norm_ext`, the size of `dim` cannot be greater than 2 "
7578
- f"when the mode of norm is `fro`.")
7579
- return norm_op(input, 2.0, dim, keepdim, dtype)
7580
- raise ValueError(f"For `norm_ext`, the value of `p` cannot be `{p}` currently.")
8646
+ f"when the norm mode is `fro`.")
8647
+ return linalg_vector_norm_op(input, 2.0, dim, keepdim, dtype)
8648
+ if p == 'nuc':
8649
+ dim = tuple(range(input.ndim)) if dim is None else dim
8650
+ return matrix_norm_ext(input, p, dim, keepdim, dtype=dtype)
8651
+ raise ValueError(f"For `norm_ext`, the value of `p` must be one of [int, float, inf, -inf, 'fro', 'nuc',] "
8652
+ f"but got `{p}`.")
8653
+
7581
8654
 
7582
8655
  def vector_norm(x, ord=2, axis=None, keepdims=False, *, dtype=None):
7583
8656
  r"""
@@ -7671,7 +8744,7 @@ def vector_norm(x, ord=2, axis=None, keepdims=False, *, dtype=None):
7671
8744
  @_primexpr
7672
8745
  def _check_matrix_norm_axis(axis, ndim):
7673
8746
  """matrix_norm axis check"""
7674
- if not isinstance(axis, tuple):
8747
+ if not isinstance(axis, (list, tuple)):
7675
8748
  raise TypeError(f'For matrix_norm , the axis should be tuple of int, but got {type(axis)}')
7676
8749
  if len(axis) != 2:
7677
8750
  raise ValueError(f'For matrix_norm, the length of axis should be 2, but got {len(axis)}.')
@@ -7741,21 +8814,21 @@ def matrix_norm(A, ord='fro', axis=(-2, -1), keepdims=False, *, dtype=None):
7741
8814
  Examples:
7742
8815
  >>> import mindspore as ms
7743
8816
  >>> A = ms.ops.arange(0, 12, dtype=ms.float32).reshape(3, 4)
7744
- >>> print(ms.ops.matrix_norm(x, ord='fro'))
8817
+ >>> print(ms.ops.matrix_norm(A, ord='fro'))
7745
8818
  22.494444
7746
- >>> print(ms.ops.matrix_norm(x, ord='nuc'))
8819
+ >>> print(ms.ops.matrix_norm(A, ord='nuc'))
7747
8820
  24.364643
7748
- >>> print(ms.ops.matrix_norm(x, ord=float('inf')))
8821
+ >>> print(ms.ops.matrix_norm(A, ord=float('inf')))
7749
8822
  38.0
7750
- >>> print(ms.ops.matrix_norm(x, ord=float('-inf')))
8823
+ >>> print(ms.ops.matrix_norm(A, ord=float('-inf')))
7751
8824
  6.0
7752
- >>> print(ms.ops.vector_norm(x, ord=1))
8825
+ >>> print(ms.ops.vector_norm(A, ord=1))
7753
8826
  21.0
7754
- >>> print(ms.ops.vector_norm(x, ord=-1))
8827
+ >>> print(ms.ops.vector_norm(A, ord=-1))
7755
8828
  12.0
7756
- >>> print(ms.ops.vector_norm(x, ord=2))
8829
+ >>> print(ms.ops.vector_norm(A, ord=2))
7757
8830
  22.409302
7758
- >>> print(ms.ops.vector_norm(x, ord=-2))
8831
+ >>> print(ms.ops.vector_norm(A, ord=-2))
7759
8832
  1.672928e-07
7760
8833
  """
7761
8834
  ndim = A.ndim
@@ -8707,6 +9780,8 @@ def log2(input):
8707
9780
  >>> print(output)
8708
9781
  [1. 2. 3.]
8709
9782
  """
9783
+ if input.dtype == mstype.bool_:
9784
+ input = input.astype(mstype.int64)
8710
9785
  x_dtype = dtype_(input)
8711
9786
  denominator = log_(_make_tensor(2, x_dtype))
8712
9787
  frac_log = log_(input)
@@ -9424,7 +10499,7 @@ def trapz(y, x=None, *, dx=1.0, dim=-1):
9424
10499
  TypeError: If `y` is not a Tensor.
9425
10500
  TypeError: If `x` is not None and is not a Tensor.
9426
10501
  TypeError: If `dx` is not a float number.
9427
- TypeError: If `dim` is not a Integer.
10502
+ TypeError: If `dim` is not an integer.
9428
10503
 
9429
10504
  Supported Platforms:
9430
10505
  ``Ascend`` ``GPU`` ``CPU``
@@ -9477,7 +10552,7 @@ def cholesky(input_x, upper=False):
9477
10552
  Args:
9478
10553
  input_x (Tensor): Tensor of shape :math:`(*, N, N)`, where :math:`*` is zero or more batch dimensions
9479
10554
  consisting of symmetric positive-definite matrices, with float32 or float64 data type.
9480
- upper (bool): If `upper` is `True`, returns an upper-triangular matrix. If `upper` is `False`, returns
10555
+ upper (bool, optional): If `upper` is `True`, returns an upper-triangular matrix. If `upper` is `False`, returns
9481
10556
  a lower-triangular matrix. Default: ``False`` .
9482
10557
 
9483
10558
  Returns:
@@ -9630,7 +10705,7 @@ def cross(input, other, dim=None):
9630
10705
  found with the size `3`. Default: ``None``.
9631
10706
 
9632
10707
  Returns:
9633
- Tensor, has the same shape and type as input `input`.
10708
+ Tensor, has the same shape and type as `input`.
9634
10709
 
9635
10710
  Raises:
9636
10711
  TypeError: If `input` is not a Tensor.
@@ -9794,7 +10869,7 @@ def cumprod(input, dim, dtype=None):
9794
10869
  y_i = x_1 * x_2 * x_3 * ... * x_i
9795
10870
 
9796
10871
  Args:
9797
- input (Tensor[Number]): The input tensor.
10872
+ input (Tensor): The input tensor.
9798
10873
  :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
9799
10874
  dim (int): The dimensions to compute the cumulative product. Only constant value is allowed.
9800
10875
  dtype (:class:`mindspore.dtype`, optional): The desired data type of output.
@@ -10034,53 +11109,12 @@ def polygamma(n, input):
10034
11109
  return poly_gamma_(n, input)
10035
11110
 
10036
11111
 
10037
- def isinf(input):
10038
- r"""
10039
- Determines which elements are inf or -inf for each position.
10040
-
10041
- .. math::
10042
-
10043
- out_i = \begin{cases}
10044
- & \ True,\ \text{ if } x_{i} = \text{Inf} \\
10045
- & \ False,\ \text{ if } x_{i} \ne \text{Inf}
10046
- \end{cases}
10047
-
10048
- where :math:`Inf` means not a number.
10049
-
10050
- Args:
10051
- input (Tensor): The input tensor.
10052
-
10053
- Returns:
10054
- Tensor, has the same shape of input, and the dtype is bool.
10055
-
10056
- Raises:
10057
- TypeError: If `input` is not a Tensor.
10058
-
10059
- Supported Platforms:
10060
- ``Ascend`` ``GPU`` ``CPU``
10061
-
10062
- Examples:
10063
- >>> import mindspore
10064
- >>> import numpy as np
10065
- >>> from mindspore import Tensor, ops
10066
- >>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
10067
- >>> output = ops.isinf(x)
10068
- >>> print(output)
10069
- [False False True]
10070
- >>> x = Tensor(2.1, mindspore.float64)
10071
- >>> output = ops.isinf(x)
10072
- >>> print(output)
10073
- False
10074
- """
10075
- return isinf_(input)
10076
-
10077
-
10078
11112
  def _is_sign_inf(x, fn):
10079
11113
  """Tests element-wise for infinity with sign."""
10080
11114
  shape = x.shape
10081
11115
  zeros_tensor = zeros_(shape, mstype.float32)
10082
11116
  ones_tensor = ones_(shape, mstype.float32)
10083
- is_inf = isinf_(x)
11117
+ is_inf = isinf(x)
10084
11118
  is_sign = fn(x, zeros_tensor)
10085
11119
  res = ops.select(is_inf, ones_tensor, zeros_tensor)
10086
11120
  res = ops.select(is_sign, res, zeros_tensor)
@@ -10091,6 +11125,9 @@ def isposinf(input):
10091
11125
  """
10092
11126
  Tests element-wise for positive infinity.
10093
11127
 
11128
+ .. warning::
11129
+ For Ascend, it is only supported on platforms above Atlas A2.
11130
+
10094
11131
  Args:
10095
11132
  input (Tensor): Input values.
10096
11133
 
@@ -10119,6 +11156,9 @@ def isneginf(input):
10119
11156
  """
10120
11157
  Tests element-wise for negative infinity.
10121
11158
 
11159
+ .. warning::
11160
+ For Ascend, it is only supported on platforms above Atlas A2.
11161
+
10122
11162
  Args:
10123
11163
  input (Tensor): Input Tensor.
10124
11164
 
@@ -10161,7 +11201,7 @@ def logical_xor(input, other):
10161
11201
 
10162
11202
  Raises:
10163
11203
  TypeError: If the dtype of `input` or `other` is not bool or can not be implicitly converted to bool.
10164
- ValueError: If the shape of two inputs cannot be broadcast.
11204
+ ValueError: If the shapes of two inputs cannot be broadcast.
10165
11205
 
10166
11206
  Supported Platforms:
10167
11207
  ``Ascend`` ``CPU``
@@ -10482,7 +11522,7 @@ def tanhshrink(input):
10482
11522
 
10483
11523
  if input.dtype in mstype.int_type + mstype.uint_type:
10484
11524
  input = input.astype(mstype.float32)
10485
- return input - tanh_(input)
11525
+ return input - tanh(input)
10486
11526
 
10487
11527
 
10488
11528
  def zeta(input, other):
@@ -11412,34 +12452,18 @@ def _calc_new_shape(shape, axes, position=0):
11412
12452
 
11413
12453
  def tensor_dot(x1, x2, axes):
11414
12454
  """
11415
- Computation of Tensor contraction on arbitrary axes between tensors `a` and `b`.
11416
-
11417
- Contraction allows for the summation of products of elements of `a` and `b` on specified axes.
11418
- The same number of axes must be specified for both x1 and x2, and values must be within range
11419
- of number of dims of both `a` and `b`.
11420
-
11421
- Selected dims in both inputs must also match.
11422
-
11423
- axes = 0 leads to outer product.
11424
- axes = 1 leads to normal matrix multiplication when inputs both 2D.
11425
- axes = 1 is the same as axes = ((1,),(0,)) where both `a` and `b` are 2D.
11426
- axes = 2 is the same as axes = ((1,2),(0,1)) where both `a` and `b` are 3D.
12455
+ Compute the tensor dot product along the specified axes.
11427
12456
 
11428
12457
  Args:
11429
- x1 (Tensor): First tensor in tensor_dot with datatype float16 or float32
11430
- x2 (Tensor): Second tensor in tensor_dot with datatype float16 or float32
11431
- axes (Union[int, tuple(int), tuple(tuple(int)), list(list(int))]): Single value or
11432
- tuple/list of length 2 with dimensions specified for `a` and `b` each. If single value `N` passed,
11433
- automatically picks up last N dims from `a` input shape and first N dims from `b` input shape in order
11434
- as axes for each respectively.
12458
+ x1 (Tensor): Input tensor.
12459
+ x2 (Tensor): Input tensor.
12460
+ axes (Union[int, tuple(int), tuple(tuple(int)), list(list(int))]): The number of dimensions to sum over. If an
12461
+ integer `k` is provided, then sum over the last `k` axes of `x1` and the first `k` axes of `x2`, in order.
12462
+ If a tuple or list is provided, then `axes[0]` specifies the axes of `x1` and `axes[1]` specifies the axes
12463
+ of `x2`.
11435
12464
 
11436
12465
  Returns:
11437
- Tensor, the shape of the output tensor is :math:`(N + M)`, where :math:`N` and :math:`M` are the free axes not
11438
- contracted in both inputs.
11439
-
11440
- Raises:
11441
- TypeError: If `x1` or `x2` is not a Tensor.
11442
- TypeError: If `axes` is not one of the following: int, tuple, list.
12466
+ Tensor.
11443
12467
 
11444
12468
  Supported Platforms:
11445
12469
  ``Ascend`` ``GPU`` ``CPU``
@@ -11499,6 +12523,8 @@ def vecdot(x, y, *, axis=-1):
11499
12523
  y (Tensor): Second batch of vectors. The shape of Tensor is :math:`(*,N)`
11500
12524
  where :math:`*` means, any number of additional dimensions. Supporting broadcasting.
11501
12525
  The dtype of Tensor should be one of the following types: float, double, int, complex64 and complex128.
12526
+
12527
+ Keyword Args:
11502
12528
  axis (int): Dimension across which to calculate the dot product. Default: ``-1`` .
11503
12529
 
11504
12530
  Returns:
@@ -11926,15 +12952,16 @@ def round(input, *, decimals=0):
11926
12952
  input (Tensor): The input tensor.
11927
12953
 
11928
12954
  Keyword Args:
11929
- decimals (int, optional): Number of decimal places to round to (default: 0). If decimals is negative,
12955
+ decimals (int, optional): Number of decimal places to round to (default: ``0``). If decimals is negative,
11930
12956
  it specifies the number of positions to the left of the decimal point. It supports converting the
11931
- single-element tensor to an int.
12957
+ single-element tensor to an int. When `input` type is int32 or int64, the `decimals` should be 0.
11932
12958
 
11933
12959
  Returns:
11934
12960
  Tensor, has the same shape and type as the `input`.
11935
12961
 
11936
12962
  Raises:
11937
12963
  TypeError: If `input` is not a Tensor.
12964
+ RuntimeError: If `input` type is int32 or int64, the `decimals` is not 0.
11938
12965
 
11939
12966
  Supported Platforms:
11940
12967
  ``Ascend`` ``GPU`` ``CPU``
@@ -11955,6 +12982,344 @@ def round(input, *, decimals=0):
11955
12982
  return round_op(input, decimals)
11956
12983
 
11957
12984
 
12985
+ def isnan_ext(tensor):
12986
+ r"""
12987
+ Returns a new tensor with boolean elements representing if each element of input is :math:`Nan` or not.
12988
+ Complex values are considered NaN when either their real and/or imaginary part is :math:`Nan`.
12989
+
12990
+ .. warning::
12991
+ This is an experimental API that is subject to change or deletion.
12992
+
12993
+ Args:
12994
+ input (Tensor): The input tensor.
12995
+
12996
+ Returns:
12997
+ Tensor, return a Boolean Tensor. If the input is :math:`Nan`, the value is ``True``.
12998
+ Otherwise, the value is ``False``.
12999
+
13000
+ Supported Platforms:
13001
+ ``Ascend``
13002
+
13003
+ Examples:
13004
+ >>> import mindspore as ms
13005
+ >>> import numpy as np
13006
+ >>> input1 = Tensor([np.nan, 2, 3, 4])
13007
+ >>> output = ms.mint.isnan(input1)
13008
+ >>> print(output)
13009
+ [ True False False False]
13010
+ """
13011
+ return not_equal_op(tensor, tensor)
13012
+
13013
+
13014
+ def rotated_iou(boxes, query_boxes, trans=False, mode=0, is_cross=True, v_threshold=0.0, e_threshold=0.0):
13015
+ r"""
13016
+ Calculate the overlap area between rotated rectangles.
13017
+
13018
+ .. warning::
13019
+ This is an experimental API that is subject to change or deletion.
13020
+
13021
+ .. note::
13022
+ The input data types supported by the Ascend platform include
13023
+ bfloat16, float16, float32.
13024
+
13025
+ Args:
13026
+ boxes (Tensor): The first set of rectangles which has a
13027
+ shape of :math:`(B, N, 5)`.
13028
+ query_boxes (Tensor): The second set of rectangles which
13029
+ has a shape of :math:`(B, K, 5)`.
13030
+ trans (bool): Distinguish the rectangles representations
13031
+ of boxes and query_boxes. If ``True``, the format of boxes
13032
+ and query_boxes is ``'xyxyt'``, else the format is ``'xywht'``.
13033
+ The default value is ``False``.
13034
+ mode (int): Distinguish the calculation mode. If the value
13035
+ is ``1``, the calculation mode is ``'iof'``, else the
13036
+ calculation mode is ``'iou'``. The default value is ``0``.
13037
+ is_cross (bool): If ``True``, use cross-calculation, else use
13038
+ one-to-one calculation. The default value is ``True``.
13039
+ v_threshold (float): Provide condition relaxation for
13040
+ intersection calculation. The default value is ``0.0``.
13041
+ e_threshold (float): Provide condition relaxation for
13042
+ intersection calculation. The default value is ``0.0``.
13043
+
13044
+ Returns:
13045
+ Tensor, the shape is :math:`(B, N, K)`.
13046
+
13047
+ Raises:
13048
+ TypeError: If `boxes` is not a Tensor.
13049
+ TypeError: If `query_boxes` is not a Tensor.
13050
+ ValueError: If `boxes` and `query_boxes` do not has same first dim.
13051
+ ValueError: If the third dimension of `boxes` or `query_boxes` is not ``5``.
13052
+
13053
+ Supported Platforms:
13054
+ ``Ascend``
13055
+
13056
+ Examples:
13057
+ >>> import mindspore
13058
+ >>> import numpy as np
13059
+ >>> from mindspore import Tensor, ops
13060
+ >>> a = np.random.uniform(0,1,(2,2,5)).astype(np.float16)
13061
+ >>> b = np.random.uniform(0,1,(2,3,5)).astype(np.float16)
13062
+ >>> box1 = Tensor(a)
13063
+ >>> box2 = Tensor(b)
13064
+ >>> output = ops.rotated_iou(box1, box2, trans=False, mode=0, is_cross=True)
13065
+ """
13066
+ origin_dtype = boxes.dtype
13067
+ if (origin_dtype != mstype.float16 and origin_dtype != mstype.float32
13068
+ and origin_dtype != mstype.bfloat16):
13069
+ raise ValueError(f"input boxes type is illegal.")
13070
+
13071
+ if (query_boxes.dtype != mstype.float16 and query_boxes.dtype != mstype.float32
13072
+ and query_boxes.dtype != mstype.bfloat16):
13073
+ raise ValueError(f"input query_boxes type is illegal.")
13074
+
13075
+ boxes_perm = (0, 2, 1)
13076
+ boxes_cp = permute(boxes, boxes_perm)
13077
+ if boxes_cp.dtype == mstype.float16 or boxes_cp.dtype == mstype.bfloat16:
13078
+ boxes_cp = cast_(boxes_cp, mstype.float32)
13079
+
13080
+ query_boxes_perm = (0, 2, 1)
13081
+ query_boxes_cp = permute(query_boxes, query_boxes_perm)
13082
+ if query_boxes_cp.dtype == mstype.float16 or query_boxes_cp.dtype == mstype.bfloat16:
13083
+ query_boxes_cp = cast_(query_boxes_cp, mstype.float32)
13084
+
13085
+ iou = rotated_iou_op(boxes_cp, query_boxes_cp, trans, mode, is_cross, v_threshold, e_threshold)
13086
+ return cast_(iou, origin_dtype)
13087
+
13088
+
13089
+ def mul_ext(input, other):
13090
+ r"""
13091
+ Multiply other value by input Tensor.
13092
+
13093
+ .. math::
13094
+
13095
+ out_{i} = input_{i} \times other_{i}
13096
+
13097
+ Note:
13098
+ - When the two inputs have different shapes, they must be able to broadcast to a common shape.
13099
+ - The two inputs comply with the implicit type conversion rules to make the data types
13100
+ consistent.
13101
+
13102
+ Args:
13103
+ input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
13104
+ a bool or a tensor whose data type is
13105
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
13106
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
13107
+ other (Union[Tensor, number.Number, bool]): The second input, is a number.Number or
13108
+ a bool or a tensor whose data type is
13109
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
13110
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
13111
+
13112
+ Returns:
13113
+ Tensor with a shape that is the same as the broadcasted shape of the input `input` and `other`,
13114
+ and the data type is the one with higher precision or higher digits among the two inputs.
13115
+
13116
+ Raises:
13117
+ TypeError: If the type of `input`, `other` is not one of the following: Tensor, number.Number, bool.
13118
+
13119
+ Supported Platforms:
13120
+ ``Ascend``
13121
+
13122
+ Examples:
13123
+ >>> import numpy as np
13124
+ >>> import mindspore
13125
+ >>> from mindspore import Tensor
13126
+ >>> from mindspore import ops
13127
+ >>> x = Tensor(np.array([2, 6, 9]).astype(np.int32))
13128
+ >>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
13129
+ >>> output = ops.mul_ext(x, y)
13130
+ >>> print(output)
13131
+ [8. 30. 54.]
13132
+ >>> # the data type of x is int32, the data type of y is float32,
13133
+ >>> # and the output is the data format of higher precision float32.
13134
+ >>> print(output.dtype)
13135
+ Float32
13136
+ """
13137
+ if isinstance(other, (float, int, bool)) and isinstance(input, Tensor):
13138
+ return muls(input, other)
13139
+ return mul(input, other)
13140
+
13141
+
13142
+ def _for_each_transpose(inputs):
13143
+ inputs_t = []
13144
+ for input_i in inputs:
13145
+ input_i_t = transpose_ext(input_i, -1, -2)
13146
+ inputs_t.append(input_i_t)
13147
+ return inputs_t
13148
+
13149
+
13150
+ def _is_transposed(input_tensor):
13151
+ dim = input_tensor.dim()
13152
+ if dim < 2 or dim > 3:
13153
+ raise ValueError("input tensor of _is_transposed should be either 2- or 3-dimensional.")
13154
+ input_shape = input_tensor.shape
13155
+ input_strides = input_tensor.stride()
13156
+ if input_strides[-2] == 1 and input_strides[-1] == input_shape[-2]:
13157
+ return True
13158
+ return False
13159
+
13160
+
13161
+ def gmm(x, weight, *, bias=None, group_list=None, group_type=0):
13162
+ r"""
13163
+ Grouping matrix multiplication.
13164
+
13165
+ .. warning::
13166
+ - This is an experimental API that is subject to change or deletion.
13167
+ - `group_type` must be constant.
13168
+
13169
+ .. note::
13170
+ - When `group_type` is 2, `weight` must be a non-continuous tensor after transpose.
13171
+ - Only when `group_type` is 0 and `bias` is None, the reverse derivative is supported,
13172
+ which is implemented by the function gmm_backward.
13173
+
13174
+ Args:
13175
+ x (tuple[Tensor]): The first tensors to be multiplied.
13176
+ weight (tuple[Tensor]): The second tensors to be multiplied.
13177
+
13178
+ Keyword Args:
13179
+ bias (tuple[Tensor], optional): Biases added to outputs. In the training scenario,
13180
+ the bias only supoorts None. Default: ``None`` .
13181
+
13182
+ group_list (Union[list[int], tuple(int)], optional): Represents the index of
13183
+ the different groups on the grouping axis. It must be a non-negative ascending
13184
+ sequence . Default: ``None`` .
13185
+
13186
+ If `group_type` is 0, the last element in `group_list` should be equal to the
13187
+ first dimension of the tensor in `x` .
13188
+
13189
+ If `group_type` is 2, the last element in `group_list` should be equal to the
13190
+ second dimension of the tensor in `x` .
13191
+
13192
+ group_type (int, optional): Represents the dim that need to be grouped. Default: ``0`` .
13193
+ For example, :math: `C[m,n] = A[m,k] \times B[k,n]`.
13194
+
13195
+ If `group_type` is 0, it means that the m-axis is grouped, where tensors in `x`
13196
+ should be 2-D, tensors in `weight` should be 3-D, and the tensors of result would
13197
+ be 2-D.
13198
+
13199
+ If `group_type` is 2, it means that the k-axis is grouped, where each tensor in `x`
13200
+ and `weight` should be 2-D, and the tensors of result would be 3-D.
13201
+
13202
+ Returns:
13203
+ tuple[Tensor], the results of grouping matrix multiplication.
13204
+
13205
+ Raises:
13206
+ TypeError: If `group_type` is not a int.
13207
+ ValueError: If `group_type` is invalid.
13208
+ ValueError: If the length of `x` or `weight` is not 1.
13209
+
13210
+ Supported Platforms:
13211
+ ``Ascend``
13212
+
13213
+ Examples:
13214
+ >>> import mindspore
13215
+ >>> import numpy as np
13216
+ >>> from mindspore import Tensor, ops
13217
+ >>> x = Tensor(np.random.uniform(0,1, (10, 20)).astype(np.float32))
13218
+ >>> weight = Tensor(np.random.uniform(0,1, (4, 20, 8)).astype(np.float32))
13219
+ >>> group_list = [2, 6, 8, 10]
13220
+ >>> y = ops.function.math_func.gmm([x,], [weight,], group_list=group_list)
13221
+ >>> print(y[0].shape)
13222
+ >>> [10, 8]
13223
+ """
13224
+ return grouped_matmul_v2(x, weight, bias=bias, group_list=group_list,
13225
+ split_item=3, group_type=group_type)
13226
+
13227
+
13228
+ def gmm_backward(grad, x, weight, *, group_list=None):
13229
+ r"""
13230
+ the grad of gmm
13231
+ """
13232
+ gradients = ops.auto_generate.gmm_backward(grad, x, weight, group_list)
13233
+ dx = gradients[:len(x)]
13234
+ dw = gradients[-len(weight):]
13235
+ db = []
13236
+ return dx, dw, db
13237
+
13238
+
13239
+ def gmm_v2(x, weight, *, bias=None, group_list=None, group_type=0, group_list_type=0):
13240
+ r"""
13241
+ Grouping matrix multiplication.
13242
+
13243
+ .. warning::
13244
+ - This is an experimental API that is subject to change or deletion.
13245
+ - `group_type` must be constant.
13246
+
13247
+ .. note::
13248
+ - When `group_type` is 2, the tensors in `weight` must be non-continuous tensors after
13249
+ transpose.
13250
+ - Only when `group_type` is 0 and `bias` is None, the reverse derivative is supported,
13251
+ which is implemented by the function gmm_v2_backward.
13252
+
13253
+ Args:
13254
+ x (tuple[Tensor]): The first tensors to be multiplied.
13255
+ weight (tuple[Tensor]): The second tensors to be multiplied.
13256
+
13257
+ Keyword Args:
13258
+ bias (tuple[Tensor], optional): Biases added to outputs. In the training scenario,
13259
+ the bias only supoorts None. Default: ``None`` .
13260
+
13261
+ group_list (Tensor, optional): Represents the index of the different groups on
13262
+ the grouping axis. Supported dtypes: int64. Default: ``None`` .
13263
+
13264
+ If `group_list_type` is 0, it must be a non-negative ascending sequence.
13265
+ And when `group_type` is 0, the last element in `group_list` should be equal to
13266
+ the first dimension of the tensor in `x` . When `group_type` is 2, the last element
13267
+ in `group_list` should be equal to the second dimension of the tensor in `x` .
13268
+
13269
+ If `group_list_type` is 1, the value in `group_list` are the size of each group.
13270
+
13271
+ group_type (int, optional): Represents the axes that need to be grouped. For example,
13272
+ :math: `C[m,n] = A[m,k] \times B[k,n]`. Default: ``0`` .
13273
+
13274
+ If `group_type` is 0, it means that the m-axis is grouped, where tensors in `x`
13275
+ should be 2-D, tensors in `weight` should be 3-D, and the tensors of result would be
13276
+ 2-D.
13277
+
13278
+ If `group_type` is 2, it means that the k-axis is grouped, where each tensor in `x`
13279
+ and `weight` should be 2-D, and the tensors of result would be 3-D.
13280
+
13281
+ group_list_type (int, optional): If it's 0, the value in `group_list` are the cumsum
13282
+ result of the size of each group. If it's 1, the value in `group_list` are the size
13283
+ of each group.
13284
+
13285
+ Returns:
13286
+ tuple[Tensor], the results of grouping matrix multiplication.
13287
+
13288
+ Raises:
13289
+ TypeError: If `group_type` is not a int.
13290
+ ValueError: If `group_type` is invalid.
13291
+ ValueError: If the length of `x` or `weight` is not 1.
13292
+
13293
+ Supported Platforms:
13294
+ ``Ascend``
13295
+
13296
+ Examples:
13297
+ >>> import mindspore
13298
+ >>> import numpy as np
13299
+ >>> from mindspore import Tensor, ops
13300
+ >>> x = Tensor(np.random.uniform(0,1, (10, 20)).astype(np.float32))
13301
+ >>> weight = Tensor(np.random.uniform(0,1, (4, 20, 8)).astype(np.float32))
13302
+ >>> group_list = Tensor([2, 4, 2, 2])
13303
+ >>> y = ops.function.math_func.gmm_v2([x,], [weight,], group_list=group_list, group_list_type=1)
13304
+ >>> print(y[0].shape)
13305
+ >>> [10, 8]
13306
+ """
13307
+ return grouped_matmul_v4(x, weight, bias=bias, group_list=group_list, split_item=3,
13308
+ group_type=group_type, group_list_type=group_list_type, act_type=0)
13309
+
13310
+
13311
+ def gmm_v2_backward(grad, x, weight, *, group_list=None, group_list_type=0):
13312
+ r"""
13313
+ the grad of gmm_v2
13314
+ """
13315
+ gradients = ops.auto_generate.gmm_v2_backward(grad, x, weight, group_list, group_list_type)
13316
+ dx = gradients[:len(x)]
13317
+ dw = gradients[-len(weight):]
13318
+ db = []
13319
+
13320
+ return dx, dw, db
13321
+
13322
+
11958
13323
  __all__ = [
11959
13324
  'addn',
11960
13325
  'absolute',
@@ -12041,6 +13406,7 @@ __all__ = [
12041
13406
  'inplace_sub',
12042
13407
  'isfinite',
12043
13408
  'isnan',
13409
+ 'isnan_ext',
12044
13410
  'isclose',
12045
13411
  'isreal',
12046
13412
  'isneginf',
@@ -12086,6 +13452,7 @@ __all__ = [
12086
13452
  'sinh',
12087
13453
  'cosh',
12088
13454
  'tanh',
13455
+ 'tanh_',
12089
13456
  'tanhshrink',
12090
13457
  'asinh',
12091
13458
  'arcsinh',
@@ -12147,6 +13514,7 @@ __all__ = [
12147
13514
  'atleast_1d',
12148
13515
  'dstack',
12149
13516
  'diff',
13517
+ 'diff_ext',
12150
13518
  'atleast_2d',
12151
13519
  'cartesian_prod',
12152
13520
  'atleast_3d',
@@ -12177,6 +13545,7 @@ __all__ = [
12177
13545
  'signbit',
12178
13546
  'accumulate_n',
12179
13547
  'iou',
13548
+ 'rotated_iou',
12180
13549
  'baddbmm',
12181
13550
  'baddbmm_ext',
12182
13551
  'bmm',