mindspore 2.4.10__cp39-none-any.whl → 2.5.0__cp39-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (689) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Third_Party_Open_Source_Software_Notice +39 -0
  3. mindspore/__init__.py +8 -3
  4. mindspore/_akg/akg/composite/build_module.py +6 -2
  5. mindspore/_akg/akg/utils/kernel_exec.py +2 -2
  6. mindspore/_c_dataengine.cpython-39-aarch64-linux-gnu.so +0 -0
  7. mindspore/_c_expression.cpython-39-aarch64-linux-gnu.so +0 -0
  8. mindspore/_c_mindrecord.cpython-39-aarch64-linux-gnu.so +0 -0
  9. mindspore/_checkparam.py +0 -5
  10. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  11. mindspore/_extends/parse/compile_config.py +64 -0
  12. mindspore/_extends/parse/deprecated/__init__.py +0 -0
  13. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +375 -0
  14. mindspore/_extends/parse/parser.py +23 -5
  15. mindspore/_extends/parse/standard_method.py +123 -27
  16. mindspore/_extends/pijit/pijit_func_white_list.py +1 -1
  17. mindspore/amp.py +7 -1
  18. mindspore/boost/boost_cell_wrapper.py +136 -41
  19. mindspore/common/__init__.py +3 -1
  20. mindspore/common/_register_for_tensor.py +0 -1
  21. mindspore/common/_stub_tensor.py +25 -4
  22. mindspore/common/_tensor_cpp_method.py +17 -0
  23. mindspore/common/_tensor_docs.py +6132 -0
  24. mindspore/common/api.py +98 -21
  25. mindspore/common/dtype.py +34 -34
  26. mindspore/common/dump.py +2 -1
  27. mindspore/common/file_system.py +8 -3
  28. mindspore/common/generator.py +2 -0
  29. mindspore/common/hook_handle.py +3 -1
  30. mindspore/common/initializer.py +3 -4
  31. mindspore/common/lazy_inline.py +8 -2
  32. mindspore/common/mindir_util.py +10 -2
  33. mindspore/common/parameter.py +31 -15
  34. mindspore/common/tensor.py +713 -1337
  35. mindspore/communication/__init__.py +1 -1
  36. mindspore/communication/_comm_helper.py +5 -0
  37. mindspore/communication/comm_func.py +215 -173
  38. mindspore/communication/management.py +23 -20
  39. mindspore/context.py +285 -191
  40. mindspore/dataset/__init__.py +23 -19
  41. mindspore/dataset/callback/ds_callback.py +2 -1
  42. mindspore/dataset/core/config.py +84 -3
  43. mindspore/dataset/engine/cache_admin.py +3 -3
  44. mindspore/dataset/engine/cache_client.py +5 -4
  45. mindspore/dataset/engine/datasets.py +192 -149
  46. mindspore/dataset/engine/datasets_audio.py +14 -0
  47. mindspore/dataset/engine/datasets_standard_format.py +11 -11
  48. mindspore/dataset/engine/datasets_text.py +38 -1
  49. mindspore/dataset/engine/datasets_user_defined.py +100 -66
  50. mindspore/dataset/engine/datasets_vision.py +81 -8
  51. mindspore/dataset/engine/iterators.py +281 -63
  52. mindspore/dataset/engine/obs/util.py +8 -0
  53. mindspore/dataset/engine/queue.py +40 -0
  54. mindspore/dataset/engine/samplers.py +26 -2
  55. mindspore/dataset/engine/serializer_deserializer.py +1 -1
  56. mindspore/dataset/engine/validators.py +43 -11
  57. mindspore/dataset/transforms/py_transforms_util.py +17 -0
  58. mindspore/dataset/transforms/transforms.py +29 -12
  59. mindspore/dataset/vision/validators.py +1 -2
  60. mindspore/device_context/__init__.py +21 -0
  61. mindspore/device_context/ascend/__init__.py +25 -0
  62. mindspore/device_context/ascend/device.py +72 -0
  63. mindspore/device_context/ascend/op_debug.py +94 -0
  64. mindspore/device_context/ascend/op_precision.py +193 -0
  65. mindspore/device_context/ascend/op_tuning.py +127 -0
  66. mindspore/device_context/cpu/__init__.py +25 -0
  67. mindspore/device_context/cpu/device.py +62 -0
  68. mindspore/device_context/cpu/op_tuning.py +43 -0
  69. mindspore/device_context/gpu/__init__.py +21 -0
  70. mindspore/device_context/gpu/device.py +70 -0
  71. mindspore/device_context/gpu/op_precision.py +67 -0
  72. mindspore/device_context/gpu/op_tuning.py +175 -0
  73. mindspore/device_manager.py +134 -0
  74. mindspore/experimental/llm_boost/__init__.py +1 -0
  75. mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
  76. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +211 -0
  77. mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
  78. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  79. mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
  80. mindspore/experimental/llm_boost/register.py +1 -0
  81. mindspore/experimental/optim/adadelta.py +26 -22
  82. mindspore/experimental/optim/adam.py +3 -0
  83. mindspore/experimental/optim/lr_scheduler.py +33 -24
  84. mindspore/experimental/optim/radam.py +33 -30
  85. mindspore/hal/device.py +28 -0
  86. mindspore/hal/event.py +17 -0
  87. mindspore/hal/memory.py +94 -3
  88. mindspore/hal/stream.py +91 -6
  89. mindspore/include/api/context.h +0 -1
  90. mindspore/lib/libavcodec.so.59 +0 -0
  91. mindspore/lib/libavdevice.so.59 +0 -0
  92. mindspore/lib/libavfilter.so.8 +0 -0
  93. mindspore/lib/libavformat.so.59 +0 -0
  94. mindspore/lib/libavutil.so.57 +0 -0
  95. mindspore/lib/libdnnl.so.2 +0 -0
  96. mindspore/lib/libmindspore_backend.so +0 -0
  97. mindspore/lib/libmindspore_common.so +0 -0
  98. mindspore/lib/libmindspore_core.so +0 -0
  99. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  100. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  101. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  102. mindspore/lib/libmindspore_ops.so +0 -0
  103. mindspore/lib/libmpi_adapter.so +0 -0
  104. mindspore/lib/libmpi_collective.so +0 -0
  105. mindspore/lib/libnnacl.so +0 -0
  106. mindspore/lib/libopencv_core.so.4.5 +0 -0
  107. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  108. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  109. mindspore/lib/libps_cache.so +0 -0
  110. mindspore/lib/libswresample.so.4 +0 -0
  111. mindspore/lib/libswscale.so.6 +0 -0
  112. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910_93/aic-ascend910_93-ops-info.json +2048 -0
  113. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  114. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  115. mindspore/lib/plugin/ascend/custom_ascendc_910/op_api/lib/libcust_opapi.so +0 -0
  116. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl/dynamic/decoder_kv_cache.py +1 -1
  117. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl/dynamic/prompt_kv_cache.py +1 -1
  118. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
  119. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
  120. mindspore/lib/plugin/ascend/custom_ascendc_910/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
  121. mindspore/lib/plugin/ascend/custom_ascendc_910/version.info +1 -1
  122. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_api/lib/libcust_opapi.so +0 -0
  123. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/config/ascend910_93/aic-ascend910_93-ops-info.json +224 -0
  124. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/all_finite.py +1 -1
  125. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/decoder_kv_cache.py +1 -1
  126. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/prompt_kv_cache.py +1 -1
  127. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.json +78 -0
  128. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.o +0 -0
  129. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.json +78 -0
  130. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.o +0 -0
  131. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.json +78 -0
  132. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.o +0 -0
  133. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.json +156 -0
  134. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.o +0 -0
  135. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.json +156 -0
  136. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.o +0 -0
  137. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.json +156 -0
  138. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.o +0 -0
  139. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.json +156 -0
  140. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.o +0 -0
  141. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.json +156 -0
  142. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.o +0 -0
  143. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.json +156 -0
  144. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.o +0 -0
  145. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.json +156 -0
  146. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.o +0 -0
  147. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.json +156 -0
  148. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.o +0 -0
  149. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.json +165 -0
  150. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.o +0 -0
  151. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.json +165 -0
  152. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.o +0 -0
  153. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.json +165 -0
  154. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.o +0 -0
  155. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.json +165 -0
  156. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.o +0 -0
  157. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.json +165 -0
  158. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.o +0 -0
  159. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.json +165 -0
  160. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.o +0 -0
  161. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.json +165 -0
  162. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.o +0 -0
  163. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.json +165 -0
  164. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.o +0 -0
  165. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/all_finite.json +139 -0
  166. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/binary_info_config.json +361 -0
  167. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/decoder_kv_cache.json +892 -0
  168. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/prompt_kv_cache.json +892 -0
  169. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
  170. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
  171. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
  172. mindspore/lib/plugin/ascend/custom_ascendc_910b/version.info +1 -1
  173. mindspore/lib/plugin/ascend/custom_compiler/setup.py +1 -1
  174. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  175. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  176. mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
  177. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  178. mindspore/lib/plugin/ascend/libmindspore_internal_kernels.so +0 -0
  179. mindspore/lib/plugin/ascend/libms_ascend_native_boost.so +0 -0
  180. mindspore/lib/plugin/ascend/libms_atb_boost.so +0 -0
  181. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +957 -955
  182. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
  183. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/liblcal_static.a +0 -0
  184. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{acme/include/base_type.h → base_type.h} +25 -20
  185. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{cast/cast_tiling.h → internal.h} +6 -4
  186. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_op.h +114 -0
  187. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/boost_kernel.h +70 -0
  188. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/llama_impl.h +85 -0
  189. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/model_interface.h +52 -0
  190. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/tensor.h +81 -0
  191. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_creator.h +123 -0
  192. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +155 -110
  193. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{acme/include/tiling_info.h → tiling_info.h} +12 -9
  194. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tiling_utils.h +178 -0
  195. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layer_norm_op.so +0 -0
  196. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_op.so +0 -0
  197. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_quant_op.so +0 -0
  198. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_310p_op.so +0 -0
  199. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_op.so +0 -0
  200. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_op.so +0 -0
  201. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcompare_op.so +0 -0
  202. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_op.so +0 -0
  203. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libllama_op.so +0 -0
  204. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_op.so +0 -0
  205. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
  206. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_optiling.so +0 -0
  207. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmulti_weight_matmul_kernel_op.so +0 -0
  208. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_nz_op.so +0 -0
  209. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_op.so +0 -0
  210. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_op.so +0 -0
  211. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_f16_nz/internal_pp_matmul_f16_nz.o +0 -0
  212. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_f16_nz/internal_pp_matmul_f16_nz_0.o +0 -0
  213. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_i8_nz_compress/internal_pp_matmul_i8_nz_compress.o +0 -0
  214. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_i8_nz_compress/internal_pp_matmul_i8_nz_compress_0.o +0 -0
  215. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_int8_nz/internal_pp_matmul_int8_nz.o +0 -0
  216. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_int8_nz/internal_pp_matmul_int8_nz_0.o +0 -0
  217. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libadd_rms_norm_quant_ascend310p.so +0 -0
  218. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libapply_rotary_pos_emb_310p_impl.so → op_kernels/ascend310p/so_kernels/libapply_rotary_pos_emb_310p_ascend310p.so} +0 -0
  219. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libcast_ascend310p.so +0 -0
  220. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libcompare_ascend310p.so +0 -0
  221. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libgelu_ascend310p.so +0 -0
  222. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libmatmul_ascend310p.so +0 -0
  223. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libreshape_and_cache_nz_ascend310p.so +0 -0
  224. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_4b60f88cdc28b25a36bad2d8b0a88092.json +163 -0
  225. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_4b60f88cdc28b25a36bad2d8b0a88092.o +0 -0
  226. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_cde61da2bd6fededcb1ba310a6ad16ee.json +163 -0
  227. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_cde61da2bd6fededcb1ba310a6ad16ee.o +0 -0
  228. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
  229. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
  230. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bsh_full_mix.o +0 -0
  231. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
  232. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
  233. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
  234. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bsh_full_mix.o +0 -0
  235. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
  236. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_matmul_postfusion_mix/internal_matmul_postfusion_mix.o +0 -0
  237. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_matmul_postfusion_mix/internal_matmul_postfusion_mix_mix_aic_0.o +0 -0
  238. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_matmul_postfusion_mix/internal_matmul_postfusion_mix_mix_aiv_0.o +0 -0
  239. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_multi_weight_matmul_postfusion_mix/internal_multi_weight_matmul_postfusion_mix.o +0 -0
  240. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_multi_weight_matmul_postfusion_mix/internal_multi_weight_matmul_postfusion_mix_mix_aic_0.o +0 -0
  241. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_multi_weight_matmul_postfusion_mix/internal_multi_weight_matmul_postfusion_mix_mix_aiv_0.o +0 -0
  242. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_bf16_bf16.o +0 -0
  243. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_bf16_fp16.o +0 -0
  244. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_bf16_fp32.o +0 -0
  245. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_fp16_bf16.o +0 -0
  246. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_fp16_fp16.o +0 -0
  247. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_fp16_fp32.o +0 -0
  248. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/paged_attention_v2/paged_attention_v2.o +0 -0
  249. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/paged_attention_v2/paged_attention_v2_mix_aic_0.o +0 -0
  250. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/paged_attention_v2/paged_attention_v2_mix_aiv_0.o +0 -0
  251. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libadd_layer_norm_impl.so → op_kernels/ascend910b/so_kernels/libadd_layer_norm_ascend910b.so} +0 -0
  252. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libadd_rms_norm_impl.so → op_kernels/ascend910b/so_kernels/libadd_rms_norm_ascend910b.so} +0 -0
  253. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/so_kernels/libadd_rms_norm_quant_ascend910b.so +0 -0
  254. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libapply_rotary_pos_emb_impl.so → op_kernels/ascend910b/so_kernels/libapply_rotary_pos_emb_ascend910b.so} +0 -0
  255. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libcast_impl.so → op_kernels/ascend910b/so_kernels/libcast_ascend910b.so} +0 -0
  256. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libnot_equal_impl.so → op_kernels/ascend910b/so_kernels/libcompare_ascend910b.so} +0 -0
  257. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libgelu_impl.so → op_kernels/ascend910b/so_kernels/libgelu_ascend910b.so} +0 -0
  258. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/so_kernels/libllama_ascend910b.so +0 -0
  259. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libmatmul_impl.so → op_kernels/ascend910b/so_kernels/libmatmul_ascend910b.so} +0 -0
  260. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libmulti_weight_matmul_kernel_impl.so → op_kernels/ascend910b/so_kernels/libmulti_weight_matmul_kernel_ascend910b.so} +0 -0
  261. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libreshape_and_cache_impl.so → op_kernels/ascend910b/so_kernels/libreshape_and_cache_ascend910b.so} +0 -0
  262. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/librms_norm_impl.so → op_kernels/ascend910b/so_kernels/librms_norm_ascend910b.so} +0 -0
  263. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
  264. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  265. mindspore/log.py +12 -0
  266. mindspore/mindrecord/__init__.py +1 -1
  267. mindspore/mindrecord/config.py +17 -316
  268. mindspore/mindrecord/filereader.py +1 -9
  269. mindspore/mindrecord/filewriter.py +5 -15
  270. mindspore/mindrecord/mindpage.py +1 -9
  271. mindspore/mint/__init__.py +824 -218
  272. mindspore/mint/distributed/__init__.py +66 -4
  273. mindspore/mint/distributed/distributed.py +2594 -44
  274. mindspore/mint/linalg/__init__.py +6 -0
  275. mindspore/mint/nn/__init__.py +473 -14
  276. mindspore/mint/nn/functional.py +486 -11
  277. mindspore/mint/nn/layer/__init__.py +17 -4
  278. mindspore/mint/nn/layer/_functions.py +330 -0
  279. mindspore/mint/nn/layer/activation.py +169 -1
  280. mindspore/mint/nn/layer/basic.py +123 -0
  281. mindspore/mint/nn/layer/conv.py +727 -0
  282. mindspore/mint/nn/layer/normalization.py +215 -19
  283. mindspore/mint/nn/layer/padding.py +797 -0
  284. mindspore/mint/nn/layer/pooling.py +170 -0
  285. mindspore/mint/optim/__init__.py +2 -1
  286. mindspore/mint/optim/adam.py +223 -0
  287. mindspore/mint/optim/adamw.py +26 -19
  288. mindspore/mint/special/__init__.py +2 -1
  289. mindspore/multiprocessing/__init__.py +5 -0
  290. mindspore/nn/cell.py +126 -19
  291. mindspore/nn/dynamic_lr.py +2 -1
  292. mindspore/nn/layer/activation.py +6 -6
  293. mindspore/nn/layer/basic.py +35 -25
  294. mindspore/nn/layer/channel_shuffle.py +3 -3
  295. mindspore/nn/layer/embedding.py +3 -3
  296. mindspore/nn/layer/normalization.py +8 -7
  297. mindspore/nn/layer/padding.py +4 -3
  298. mindspore/nn/layer/pooling.py +47 -13
  299. mindspore/nn/layer/rnn_cells.py +1 -1
  300. mindspore/nn/layer/rnns.py +2 -1
  301. mindspore/nn/layer/timedistributed.py +5 -5
  302. mindspore/nn/layer/transformer.py +48 -26
  303. mindspore/nn/learning_rate_schedule.py +5 -3
  304. mindspore/nn/loss/loss.py +31 -36
  305. mindspore/nn/optim/ada_grad.py +1 -0
  306. mindspore/nn/optim/adadelta.py +2 -2
  307. mindspore/nn/optim/adam.py +1 -1
  308. mindspore/nn/optim/lars.py +1 -4
  309. mindspore/nn/optim/optimizer.py +1 -1
  310. mindspore/nn/optim/rprop.py +2 -2
  311. mindspore/nn/optim/thor.py +2 -1
  312. mindspore/nn/utils/init.py +13 -11
  313. mindspore/nn/wrap/cell_wrapper.py +4 -6
  314. mindspore/nn/wrap/loss_scale.py +3 -4
  315. mindspore/numpy/array_creations.py +60 -62
  316. mindspore/numpy/array_ops.py +148 -143
  317. mindspore/numpy/logic_ops.py +41 -42
  318. mindspore/numpy/math_ops.py +361 -359
  319. mindspore/numpy/utils.py +16 -16
  320. mindspore/numpy/utils_const.py +4 -4
  321. mindspore/ops/__init__.py +2 -1
  322. mindspore/ops/_grad_experimental/grad_comm_ops.py +94 -13
  323. mindspore/ops/_grad_experimental/grad_debug_ops.py +6 -1
  324. mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
  325. mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
  326. mindspore/ops/_op_impl/cpu/__init__.py +1 -0
  327. mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
  328. mindspore/ops/_vmap/vmap_array_ops.py +20 -19
  329. mindspore/ops/_vmap/vmap_base.py +0 -2
  330. mindspore/ops/_vmap/vmap_grad_nn_ops.py +19 -13
  331. mindspore/ops/_vmap/vmap_math_ops.py +11 -9
  332. mindspore/ops/_vmap/vmap_nn_ops.py +20 -34
  333. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +149 -12
  334. mindspore/ops/auto_generate/gen_arg_handler.py +0 -61
  335. mindspore/ops/auto_generate/gen_extend_func.py +554 -60
  336. mindspore/ops/auto_generate/gen_ops_def.py +1621 -115
  337. mindspore/ops/auto_generate/gen_ops_prim.py +8024 -3409
  338. mindspore/ops/auto_generate/pyboost_inner_prim.py +183 -79
  339. mindspore/ops/composite/base.py +1 -1
  340. mindspore/ops/composite/multitype_ops/_compile_utils.py +229 -30
  341. mindspore/ops/composite/multitype_ops/pow_impl.py +0 -29
  342. mindspore/ops/function/__init__.py +12 -0
  343. mindspore/ops/function/array_func.py +561 -159
  344. mindspore/ops/function/clip_func.py +64 -0
  345. mindspore/ops/function/debug_func.py +28 -20
  346. mindspore/ops/function/image_func.py +1 -1
  347. mindspore/ops/function/linalg_func.py +5 -4
  348. mindspore/ops/function/math_func.py +1659 -290
  349. mindspore/ops/function/nn_func.py +988 -317
  350. mindspore/ops/function/parameter_func.py +3 -56
  351. mindspore/ops/function/random_func.py +243 -33
  352. mindspore/ops/function/sparse_unary_func.py +1 -1
  353. mindspore/ops/functional.py +18 -5
  354. mindspore/ops/functional_overload.py +897 -0
  355. mindspore/ops/operations/__init__.py +3 -2
  356. mindspore/ops/operations/_embedding_cache_ops.py +4 -4
  357. mindspore/ops/operations/_grad_ops.py +2 -34
  358. mindspore/ops/operations/_infer_ops.py +2 -1
  359. mindspore/ops/operations/_inner_ops.py +38 -8
  360. mindspore/ops/operations/array_ops.py +45 -303
  361. mindspore/ops/operations/comm_ops.py +19 -16
  362. mindspore/ops/operations/custom_ops.py +11 -55
  363. mindspore/ops/operations/debug_ops.py +42 -47
  364. mindspore/ops/operations/inner_ops.py +6 -4
  365. mindspore/ops/operations/linalg_ops.py +3 -2
  366. mindspore/ops/operations/manually_defined/ops_def.py +185 -104
  367. mindspore/ops/operations/math_ops.py +11 -216
  368. mindspore/ops/operations/nn_ops.py +146 -308
  369. mindspore/ops/primitive.py +23 -21
  370. mindspore/ops/tensor_method.py +1669 -0
  371. mindspore/ops_generate/aclnn_kernel_register_auto_cc_generator.py +110 -0
  372. mindspore/ops_generate/add_tensor_docs_generator.py +54 -0
  373. mindspore/ops_generate/arg_handler.py +0 -61
  374. mindspore/ops_generate/auto_grad_impl_cc_generator.py +135 -0
  375. mindspore/ops_generate/auto_grad_reg_cc_generator.py +93 -0
  376. mindspore/ops_generate/base_generator.py +11 -0
  377. mindspore/ops_generate/cpp_create_prim_instance_helper_generator.py +108 -0
  378. mindspore/ops_generate/functional_map_cpp_generator.py +491 -0
  379. mindspore/ops_generate/functional_overload_py_generator.py +110 -0
  380. mindspore/ops_generate/functions_cc_generator.py +233 -0
  381. mindspore/ops_generate/gen_aclnn_implement.py +110 -114
  382. mindspore/ops_generate/gen_constants.py +157 -3
  383. mindspore/ops_generate/gen_ops.py +245 -990
  384. mindspore/ops_generate/gen_pyboost_func.py +97 -998
  385. mindspore/ops_generate/gen_utils.py +119 -33
  386. mindspore/ops_generate/lite_ops_cpp_generator.py +155 -0
  387. mindspore/ops_generate/op_api_proto.py +206 -0
  388. mindspore/ops_generate/op_def_py_generator.py +131 -0
  389. mindspore/ops_generate/op_prim_py_generator.py +480 -0
  390. mindspore/ops_generate/op_proto.py +373 -108
  391. mindspore/ops_generate/op_template_parser.py +436 -0
  392. mindspore/ops_generate/ops_def_cc_generator.py +288 -0
  393. mindspore/ops_generate/ops_def_h_generator.py +74 -0
  394. mindspore/ops_generate/ops_name_h_generator.py +68 -0
  395. mindspore/ops_generate/ops_primitive_h_generator.py +81 -0
  396. mindspore/ops_generate/pyboost_functions_cpp_generator.py +370 -0
  397. mindspore/ops_generate/pyboost_functions_h_generator.py +68 -0
  398. mindspore/ops_generate/pyboost_functions_py_generator.py +148 -0
  399. mindspore/ops_generate/pyboost_grad_function_cpp_generator.py +154 -0
  400. mindspore/ops_generate/pyboost_inner_prim_generator.py +131 -0
  401. mindspore/ops_generate/pyboost_native_grad_functions_generator.py +268 -0
  402. mindspore/ops_generate/pyboost_op_cpp_code_generator.py +851 -0
  403. mindspore/ops_generate/pyboost_overload_functions_cpp_generator.py +344 -0
  404. mindspore/ops_generate/pyboost_utils.py +92 -33
  405. mindspore/ops_generate/template.py +294 -44
  406. mindspore/ops_generate/tensor_func_reg_cpp_generator.py +422 -0
  407. mindspore/parallel/__init__.py +3 -3
  408. mindspore/parallel/_auto_parallel_context.py +24 -33
  409. mindspore/parallel/_parallel_serialization.py +13 -2
  410. mindspore/parallel/_utils.py +4 -1
  411. mindspore/parallel/algo_parameter_config.py +1 -1
  412. mindspore/parallel/checkpoint_transform.py +44 -0
  413. mindspore/parallel/cluster/process_entity/_api.py +131 -37
  414. mindspore/parallel/cluster/process_entity/_utils.py +41 -6
  415. mindspore/parallel/cluster/run.py +20 -3
  416. mindspore/parallel/parameter_broadcast.py +1 -1
  417. mindspore/parallel/shard.py +3 -0
  418. mindspore/parallel/transform_safetensors.py +119 -253
  419. mindspore/profiler/__init__.py +17 -4
  420. mindspore/profiler/analysis/__init__.py +0 -0
  421. mindspore/profiler/analysis/parser/__init__.py +0 -0
  422. mindspore/profiler/analysis/parser/ascend_cann_parser.py +166 -0
  423. mindspore/profiler/analysis/parser/base_parser.py +158 -0
  424. mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
  425. mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
  426. mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
  427. mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
  428. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +261 -0
  429. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
  430. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +84 -0
  431. mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
  432. mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
  433. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
  434. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
  435. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
  436. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
  437. mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
  438. mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
  439. mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
  440. mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
  441. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +260 -0
  442. mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
  443. mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
  444. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
  445. mindspore/profiler/analysis/task_manager.py +131 -0
  446. mindspore/profiler/analysis/time_converter.py +84 -0
  447. mindspore/profiler/analysis/viewer/__init__.py +0 -0
  448. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +333 -0
  449. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
  450. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +252 -0
  451. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +313 -0
  452. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +322 -0
  453. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +265 -0
  454. mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
  455. mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
  456. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +97 -0
  457. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
  458. mindspore/profiler/analysis/work_flow.py +73 -0
  459. mindspore/profiler/common/ascend_msprof_exporter.py +138 -0
  460. mindspore/profiler/common/command_executor.py +90 -0
  461. mindspore/profiler/common/constant.py +174 -3
  462. mindspore/profiler/common/file_manager.py +208 -0
  463. mindspore/profiler/common/log.py +130 -0
  464. mindspore/profiler/common/msprof_cmd_tool.py +202 -0
  465. mindspore/profiler/common/path_manager.py +371 -0
  466. mindspore/profiler/common/process_bar.py +168 -0
  467. mindspore/profiler/common/process_pool.py +9 -3
  468. mindspore/profiler/common/profiler_context.py +476 -0
  469. mindspore/profiler/common/profiler_info.py +304 -0
  470. mindspore/profiler/common/profiler_output_path.py +284 -0
  471. mindspore/profiler/common/profiler_parameters.py +210 -0
  472. mindspore/profiler/common/profiler_path_manager.py +120 -0
  473. mindspore/profiler/common/record_function.py +76 -0
  474. mindspore/profiler/common/tlv_decoder.py +76 -0
  475. mindspore/profiler/common/util.py +75 -2
  476. mindspore/profiler/dynamic_profiler.py +270 -37
  477. mindspore/profiler/envprofiler.py +138 -0
  478. mindspore/profiler/mstx.py +199 -0
  479. mindspore/profiler/platform/__init__.py +21 -0
  480. mindspore/profiler/platform/base_profiler.py +40 -0
  481. mindspore/profiler/platform/cpu_profiler.py +124 -0
  482. mindspore/profiler/platform/gpu_profiler.py +74 -0
  483. mindspore/profiler/platform/npu_profiler.py +309 -0
  484. mindspore/profiler/profiler.py +580 -93
  485. mindspore/profiler/profiler_action_controller.py +187 -0
  486. mindspore/profiler/profiler_interface.py +114 -0
  487. mindspore/profiler/schedule.py +208 -0
  488. mindspore/rewrite/api/symbol_tree.py +1 -2
  489. mindspore/run_check/_check_version.py +2 -6
  490. mindspore/runtime/__init__.py +37 -0
  491. mindspore/runtime/device.py +27 -0
  492. mindspore/runtime/event.py +209 -0
  493. mindspore/runtime/executor.py +148 -0
  494. mindspore/runtime/memory.py +392 -0
  495. mindspore/runtime/stream.py +460 -0
  496. mindspore/runtime/thread_bind_core.py +401 -0
  497. mindspore/train/__init__.py +2 -2
  498. mindspore/train/_utils.py +53 -18
  499. mindspore/train/amp.py +8 -4
  500. mindspore/train/callback/_checkpoint.py +32 -18
  501. mindspore/train/callback/_early_stop.py +1 -1
  502. mindspore/train/callback/_flops_collector.py +105 -69
  503. mindspore/train/callback/_history.py +1 -1
  504. mindspore/train/callback/_summary_collector.py +44 -6
  505. mindspore/train/callback/_tft_register.py +31 -10
  506. mindspore/train/dataset_helper.py +11 -11
  507. mindspore/train/metrics/precision.py +4 -5
  508. mindspore/train/mind_ir_pb2.py +167 -46
  509. mindspore/train/model.py +13 -15
  510. mindspore/train/serialization.py +462 -76
  511. mindspore/train/summary/summary_record.py +1 -2
  512. mindspore/train/train_thor/model_thor.py +1 -1
  513. mindspore/utils/__init__.py +4 -2
  514. mindspore/utils/bin/dataset-cache +0 -0
  515. mindspore/utils/bin/dataset-cache-server +0 -0
  516. mindspore/utils/dryrun.py +138 -0
  517. mindspore/utils/runtime_execution_order_check.py +550 -0
  518. mindspore/version.py +1 -1
  519. {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/METADATA +2 -3
  520. {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/RECORD +523 -457
  521. {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/entry_points.txt +1 -1
  522. mindspore/_data_dump.cpython-39-aarch64-linux-gnu.so +0 -0
  523. mindspore/bin/cache_admin +0 -0
  524. mindspore/bin/cache_server +0 -0
  525. mindspore/common/_tensor_overload.py +0 -139
  526. mindspore/lib/libmindspore_np_dtype.so +0 -0
  527. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
  528. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -82
  529. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -113
  530. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -193
  531. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/dtype_registry.h +0 -90
  532. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -46
  533. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
  534. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
  535. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_layer_norm_op.h +0 -60
  536. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_rms_norm_op.h +0 -50
  537. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_rms_norm_quant_op.h +0 -50
  538. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/apply_rotary_pos_emb_nz_op.h +0 -42
  539. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/apply_rotary_pos_emb_op.h +0 -55
  540. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -34
  541. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_only_ops.h +0 -94
  542. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_op_base.h +0 -97
  543. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
  544. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/flash_attention_score_op.h +0 -97
  545. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/gelu_op.h +0 -44
  546. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_add_rmsnorm_op.h +0 -73
  547. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -108
  548. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/multi_impls_op.h +0 -64
  549. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/multi_weight_matmul_op.h +0 -91
  550. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/paged_attention_op.h +0 -99
  551. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/reshape_and_cache_nz_op.h +0 -44
  552. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/reshape_and_cache_op.h +0 -44
  553. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/rms_norm_op.h +0 -64
  554. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -179
  555. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -69
  556. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/profiling_util.h +0 -366
  557. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -56
  558. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/kernel/add.h +0 -21
  559. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/tiling/add_tiling.h +0 -43
  560. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -46
  561. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb.h +0 -23
  562. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_base.h +0 -456
  563. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_bf16.h +0 -217
  564. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp.h +0 -391
  565. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp16.h +0 -126
  566. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -230
  567. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_tiling.h +0 -43
  568. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_value.h +0 -27
  569. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/apply_rotary_pos_emb_nz_impl.h +0 -34
  570. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz.h +0 -23
  571. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_base.h +0 -460
  572. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_fp16.h +0 -116
  573. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_fp32.h +0 -230
  574. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_tiling.h +0 -43
  575. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_value.h +0 -27
  576. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -74
  577. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -74
  578. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_impl.h +0 -48
  579. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/kernel/cast_kernel.h +0 -21
  580. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -55
  581. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_tiling.h +0 -27
  582. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/kernel/compare_kernel.h +0 -23
  583. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
  584. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
  585. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
  586. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
  587. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
  588. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
  589. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
  590. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
  591. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
  592. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
  593. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
  594. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
  595. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
  596. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
  597. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
  598. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
  599. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
  600. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
  601. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
  602. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
  603. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
  604. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
  605. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
  606. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
  607. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
  608. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
  609. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
  610. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
  611. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
  612. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
  613. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
  614. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
  615. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
  616. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
  617. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
  618. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
  619. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +0 -68
  620. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -99
  621. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +0 -21
  622. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/lccl/lccl_wrapper.h +0 -58
  623. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/ms_int_types.h +0 -91
  624. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/ms_int_utils.h +0 -108
  625. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +0 -64
  626. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/add_param.h +0 -68
  627. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +0 -40
  628. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/cast_param.h +0 -30
  629. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
  630. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
  631. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
  632. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -38
  633. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +0 -42
  634. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +0 -33
  635. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -377
  636. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache_nz/kernel/reshape_and_cache_nz.h +0 -24
  637. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache_nz/reshape_and_cache_nz_impl.h +0 -42
  638. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache_nz/reshape_and_cache_nz_tiling.h +0 -27
  639. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -46
  640. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/kernel/sub_kernel.h +0 -20
  641. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -48
  642. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_tiling.h +0 -25
  643. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +0 -399
  644. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/utils.h +0 -41
  645. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +0 -45
  646. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_tiling.h +0 -29
  647. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +0 -30
  648. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -69
  649. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_core.h +0 -43
  650. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_entity.h +0 -38
  651. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_sink.h +0 -69
  652. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_stream.h +0 -41
  653. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -71
  654. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -165
  655. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +0 -20
  656. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
  657. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -121
  658. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -106
  659. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
  660. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
  661. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_quant_acme_impl.so +0 -0
  662. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_310p_old_impl.so +0 -0
  663. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_old_impl.so +0 -0
  664. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_nz_impl.so +0 -0
  665. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_nz_old_impl.so +0 -0
  666. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix.json +0 -19
  667. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix.o +0 -0
  668. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix_mix_aic_0.o +0 -0
  669. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix_mix_aiv_0.o +0 -0
  670. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix.json +0 -19
  671. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix.o +0 -0
  672. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix_mix_aic_0.o +0 -0
  673. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix_mix_aiv_0.o +0 -0
  674. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
  675. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
  676. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bsh_full_mix.o +0 -0
  677. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
  678. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
  679. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
  680. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bsh_full_mix.o +0 -0
  681. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
  682. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_bf16_bnsd_mix.o +0 -0
  683. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_bf16_bsh_mix.o +0 -0
  684. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_fp16_bnsd_mix.o +0 -0
  685. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_fp16_bsh_mix.o +0 -0
  686. mindspore/profiler/envprofiling.py +0 -254
  687. mindspore/profiler/profiling.py +0 -1926
  688. {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/WHEEL +0 -0
  689. {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/top_level.txt +0 -0
@@ -31,14 +31,13 @@ from mindspore.common.hook_handle import _TensorHookHandle
31
31
 
32
32
  from mindspore.common._utils import get_slice_num
33
33
  from mindspore.common._register_for_tensor import tensor_operator_registry
34
- from mindspore.common._tensor_overload import (repeat_interleave_mint, add_mint, item_mint, isnan_mint, flatten_mint,
35
- max_mint, mean_mint, min_mint, split_mint, sub_mint)
36
34
  from mindspore._c_expression import Tensor as Tensor_
37
35
  from mindspore import _checkparam as validator
38
36
  from mindspore._checkparam import check_is_number, is_stub_tensor, check_hook_fn
39
37
  from mindspore._check_jit_forbidden_api import jit_forbidden_register
40
38
  from mindspore.common.symbol import Symbol
41
39
 
40
+
42
41
  np_types = (np.int8, np.int16, np.int32, np.int64,
43
42
  np.uint8, np.uint16, np.uint32, np.uint64, np.float16,
44
43
  np.float32, np.float64, np.bool_, np.complex64, np.complex128)
@@ -47,7 +46,7 @@ np_types = (np.int8, np.int16, np.int32, np.int64,
47
46
  def _check_input_data_type(input_data):
48
47
  """Check the type of input_data for Tensor"""
49
48
  validator.check_value_type('input_data', input_data,
50
- (Tensor_, Tensor, np.ndarray, np.str_, list, tuple, float, int, bool, complex),
49
+ (Tensor_, Tensor, np.ndarray, np.str_, list, tuple, float, int, bool, complex, bytes),
51
50
  'Tensor')
52
51
  valid_dtypes = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64,
53
52
  np.float16, np.float32, np.float64, np.bool_, np.str_, np.complex64, np.complex128)
@@ -119,8 +118,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
119
118
  Tensor is a data structure that stores an n-dimensional array.
120
119
 
121
120
  Note:
122
- If `init` interface is used to initialize `Tensor`, the `Tensor.init_data` API needs to be called to load the
123
- actual data to `Tensor`.
121
+ - If `init` interface is used to initialize `Tensor`, the `Tensor.init_data` API needs to be called to load the
122
+ actual data to `Tensor`.
123
+ - All modes of CPU and GPU, and Atlas training series with `graph mode (mode=mindspore.GRAPH_MODE)
124
+ <https://www.mindspore.cn/docs/en/master/model_train/program_form/static_graph.html>`_ do not supported
125
+ in-place operations yet.
124
126
 
125
127
  Warning:
126
128
  To convert dtype of a `Tensor`, it is recommended to use `Tensor.astype()` rather than
@@ -352,12 +354,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
352
354
  return out
353
355
 
354
356
  def __bool__(self):
355
- data = self.asnumpy()
356
- if data.shape == ():
357
- return bool(data)
358
- if data.shape == (1,):
359
- return bool(data[0])
360
- raise ValueError("The truth value of an array with more than one element is ambiguous.")
357
+ return bool(self._item())
361
358
 
362
359
  @staticmethod
363
360
  def _convert_scalar_(data, func, message):
@@ -385,13 +382,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
385
382
  def __pos__(self):
386
383
  return self
387
384
 
388
- def __abs__(self):
389
- return tensor_operator_registry.get('abs')(self)
390
-
391
- @add_mint
392
- def __add__(self, other):
393
- return tensor_operator_registry.get('__add__')(self, other)
394
-
395
385
  def __and__(self, other):
396
386
  if isinstance(other, (int, bool, float, Tensor)):
397
387
  return tensor_operator_registry.get('bitwise_and')(self, other)
@@ -410,28 +400,15 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
410
400
  def __radd__(self, other):
411
401
  return self.__add__(other)
412
402
 
413
- def __iadd__(self, other):
414
- return self.__add__(other)
415
-
416
- @sub_mint
417
- def __sub__(self, other):
418
- return tensor_operator_registry.get('__sub__')(self, other)
419
-
420
403
  def __rsub__(self, other):
421
404
  return tensor_operator_registry.get('__sub__')(other, self)
422
405
 
423
- def __isub__(self, other):
424
- return self.__sub__(other)
425
-
426
406
  def __mul__(self, other):
427
407
  return tensor_operator_registry.get('__mul__')(self, other)
428
408
 
429
409
  def __rmul__(self, other):
430
410
  return self.__mul__(other)
431
411
 
432
- def __imul__(self, other):
433
- return self.__mul__(other)
434
-
435
412
  def __matmul__(self, other):
436
413
  return tensor_operator_registry.get('__matmul__')(self, other)
437
414
 
@@ -456,9 +433,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
456
433
  def __imod__(self, other):
457
434
  return self.__mod__(other)
458
435
 
459
- def __pow__(self, other):
460
- return tensor_operator_registry.get('__pow__')(self, other)
461
-
462
436
  def __rpow__(self, other):
463
437
  return tensor_operator_registry.get('__rpow__')(self, other)
464
438
 
@@ -479,25 +453,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
479
453
  out = tensor_operator_registry.get('__le__')(self, other)
480
454
  return out
481
455
 
482
- def __getitem__(self, index):
483
- out = tensor_operator_registry.get('__getitem__')(self, index)
484
- if out is not self:
485
- out.parent_tensor_ = self
486
- out.index_of_parent_ = index
487
- return out
488
-
489
- def __setitem__(self, index, value):
490
- out = tensor_operator_registry.get('__setitem__')(self, index, value)
491
- if isinstance(out, tuple):
492
- if self.parent_tensor_ is not None and self.index_of_parent_ is not None:
493
- self.parent_tensor_.__setitem__(self.index_of_parent_, out[0])
494
- return self
495
- return self
496
- self.assign_value(out)
497
- if self.parent_tensor_ is not None and self.index_of_parent_ is not None:
498
- self.parent_tensor_.__setitem__(self.index_of_parent_, self)
499
- return self
500
-
501
456
  def __gt__(self, other):
502
457
  out = tensor_operator_registry.get('__gt__')(self, other)
503
458
  return out
@@ -530,10 +485,58 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
530
485
  self.__dict__.update(state)
531
486
  Tensor_.__setstate__(self, value)
532
487
 
488
+ def __array__(self, dtype=None):
489
+ """support create numpy array from tensor."""
490
+ if dtype is None:
491
+ return self.asnumpy()
492
+ return self.asnumpy().astype(dtype, copy=False)
493
+
494
+ def __contains__(self, element):
495
+ """support 'in' operator."""
496
+ if isinstance(element, (Tensor, numbers.Number)):
497
+ return (element == self).any().item()
498
+ return False
499
+
500
+ def _getitem_origin(self, index):
501
+ """__getitem__ origin process, called by TensorPy::TensorGetItem"""
502
+ out = tensor_operator_registry.get('_tensor_getitem_origin')(self, index)
503
+ if out is not self:
504
+ out.parent_tensor_ = self
505
+ out.index_of_parent_ = index
506
+ return out
507
+
508
+ def _setitem_origin(self, index, value):
509
+ """__setitem__ origin process, called by TensorPy::TensorSetItem"""
510
+ out = tensor_operator_registry.get('_tensor_setitem_origin')(self, index, value)
511
+ if isinstance(out, tuple):
512
+ if self.parent_tensor_ is not None and self.index_of_parent_ is not None:
513
+ self.parent_tensor_.__setitem__(self.index_of_parent_, out[0])
514
+ return self
515
+ return self
516
+ self.assign_value(out)
517
+ if self.parent_tensor_ is not None and self.index_of_parent_ is not None:
518
+ self.parent_tensor_.__setitem__(self.index_of_parent_, self)
519
+ return self
520
+
521
+ def _getitem(self, index):
522
+ """__getitem__ process, called by TensorPy::TensorGetItem"""
523
+ return tensor_operator_registry.get('_tensor_getitem')(self, index)
524
+
525
+ def _setitem(self, index, value):
526
+ """__setitem__ process, called by TensorPy::TensorSetItem"""
527
+ return tensor_operator_registry.get('_tensor_setitem')(self, index, value)
528
+
533
529
  @property
534
530
  def shape(self):
535
531
  """
536
532
  For details, please refer to :func:`mindspore.ops.shape`.
533
+
534
+ Examples:
535
+ >>> from mindspore import Tensor
536
+ >>> import numpy as np
537
+ >>> x = Tensor(np.array([[1, 2], [3, 4]]))
538
+ >>> print(x.shape)
539
+ (2, 2)
537
540
  """
538
541
  return self._shape
539
542
 
@@ -546,7 +549,16 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
546
549
 
547
550
  @property
548
551
  def dtype(self):
549
- """Return the dtype of the tensor (:class:`mindspore.dtype`)."""
552
+ """
553
+ Return the dtype of the tensor (:class:`mindspore.dtype`).
554
+
555
+ Examples:
556
+ >>> from mindspore import Tensor
557
+ >>> import numpy as np
558
+ >>> x = Tensor(np.array([1, 2], dtype=np.float32))
559
+ >>> print(x.dtype)
560
+ Float32
561
+ """
550
562
  return self._dtype
551
563
 
552
564
  @property
@@ -789,7 +801,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
789
801
 
790
802
  def ndimension(self):
791
803
  r"""
792
- Alias for :func:`mindspore.Tensor.ndim`.
804
+ Alias for :attr:`mindspore.Tensor.ndim`.
793
805
  """
794
806
  return len(self._shape)
795
807
 
@@ -821,30 +833,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
821
833
  self.const_arg = const_arg
822
834
  return self
823
835
 
824
- def arccosh(self):
825
- r"""
826
- For details, please refer to :func:`mindspore.ops.arccosh`.
827
- """
828
- return tensor_operator_registry.get('acosh')(self)
829
-
830
- def arcsin(self):
831
- r"""
832
- For details, please refer to :func:`mindspore.ops.arcsin`.
833
- """
834
- return tensor_operator_registry.get('asin')(self)
835
-
836
- def arctan(self):
837
- r"""
838
- For details, please refer to :func:`mindspore.ops.arctan`.
839
- """
840
- return tensor_operator_registry.get('atan')(self)
841
-
842
- def arctan2(self, other):
843
- r"""
844
- For details, please refer to :func:`mindspore.ops.arctan2`.
845
- """
846
- return tensor_operator_registry.get('atan2')(self, other)
847
-
848
836
  def cauchy(self, median=0.0, sigma=1.0):
849
837
  r"""
850
838
  Fills the tensor with numbers drawn from the Cauchy distribution. It is
@@ -942,31 +930,17 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
942
930
  self.assign_value_cpp(value)
943
931
  return self
944
932
 
945
- def bincount(self, weights=None, minlength=0):
946
- r"""
947
- For details, please refer to :func:`mindspore.ops.bincount`.
948
- """
949
- return tensor_operator_registry.get('bincount')(self, weights, minlength)
950
-
951
- def chunk(self, chunks, axis=0):
952
- r"""
953
- For details, please refer to :func:`mindspore.ops.chunk`.
933
+ def item(self):
954
934
  """
955
- return tensor_operator_registry.get('chunk')(self, chunks, axis)
956
-
957
- @item_mint
958
- def item(self, index=None):
959
- """
960
- Get the item at the specified index of the tensor.
961
-
962
- Args:
963
- index (Union[None, int, tuple(int)]): The index in Tensor. Default: ``None``.
935
+ Return the value of this tensor as standard Python number.
936
+ This only works for tensors with one element.
964
937
 
965
938
  Returns:
966
939
  A scalar, type is defined by the dtype of the Tensor.
967
940
 
968
941
  Raises:
969
- ValueError: If the length of the `index` is not equal to self.ndim.
942
+ ValueError: If the count of value in tensor is more than one.
943
+ TypeError: The type of element in tensor is not supported.
970
944
 
971
945
  Supported Platforms:
972
946
  ``Ascend`` ``GPU`` ``CPU``
@@ -974,19 +948,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
974
948
  Examples:
975
949
  >>> import mindspore as ms
976
950
  >>> from mindspore import Tensor
977
- >>> x = Tensor([[1, 2, 3], [4, 5, 6]], ms.float32)
978
- >>> print(x.item((0, 1)))
979
- 2.0
980
951
  >>> x = Tensor(1.2, ms.float32)
981
952
  >>> print(x.item())
982
953
  1.2
983
954
  """
984
-
985
- if index is not None:
986
- output = self.asnumpy().item(index)
987
- else:
988
- output = self.asnumpy().item()
989
- return output
955
+ return self._item()
990
956
 
991
957
  def itemset(self, *args):
992
958
  r"""
@@ -998,7 +964,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
998
964
  Args:
999
965
  args (Union[(numbers.Number), (int/tuple(int), numbers.Number)]): The arguments that
1000
966
  specify the index and value. If `args` contain one argument (a scalar),
1001
- it is only used in case tensor is of size 1. If `args` contain two
967
+ it is only used in case tensor is of size 1. If `args` contains two
1002
968
  arguments, the last argument is the value to be set and must be a
1003
969
  scalar, the first argument specifies a single tensor element location.
1004
970
  It is either an int or a tuple.
@@ -1069,7 +1035,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1069
1035
  self.init_data()
1070
1036
  return Tensor_.asnumpy(self)
1071
1037
 
1072
- def numpy(self, *, force=False):
1038
+ def numpy(self):
1073
1039
  """
1074
1040
  Alias for :func:`mindspore.Tensor.asnumpy`.
1075
1041
  """
@@ -1118,14 +1084,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1118
1084
  """
1119
1085
  return tensor_operator_registry.get('select_scatter')(self, src, axis, index)
1120
1086
 
1121
- def histc(self, bins=100, min=0., max=0.):
1122
- """
1123
- For details, please refer to :func:`mindspore.ops.histc`.
1124
- """
1125
- validator.check_value_type('min', min, (int, float,), 'Tensor.histc')
1126
- validator.check_value_type('max', max, (int, float,), 'Tensor.histc')
1127
- return tensor_operator_registry.get('histc')(self, bins, float(min), float(max))
1128
-
1129
1087
  def geqrf(self):
1130
1088
  """
1131
1089
  For details, please refer to :func:`mindspore.ops.geqrf`.
@@ -1175,6 +1133,8 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1175
1133
  >>> print(z.is_contiguous())
1176
1134
  True
1177
1135
  """
1136
+ if not self._need_contiguous():
1137
+ return self
1178
1138
  return tensor_operator_registry.get('contiguous')(self)
1179
1139
 
1180
1140
  def is_contiguous(self):
@@ -1236,26 +1196,33 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1236
1196
  """
1237
1197
  return Tensor_.storage_offset(self)
1238
1198
 
1239
- def register_hook(self, hook_fn):
1199
+ def register_hook(self, hook):
1240
1200
  """
1241
1201
  Registers a backward hook for tensor.
1242
1202
 
1243
1203
  Note:
1244
- - The `register_backward_hook(hook_fn)` does not work in graph mode or functions decorated with 'jit'.
1245
- - The 'hook_fn' must be defined as the following code. `grad` is the gradient passed to the tensor,
1204
+ - The `hook` must be defined as the following code. `grad` is the gradient passed to the tensor,
1246
1205
  which may be modified by returning a new output gradient.
1247
- - The 'hook_fn' should have the following signature:
1248
- hook_fn(grad) -> New output gradient, but can not return None or not set return value.
1206
+ - The `hook` should have the following signature:
1207
+ hook(grad) -> New output gradient, but can not return None or not set return value.
1208
+ - The following constraints must be met under graph mode:
1209
+
1210
+ - The `hook` must satisfy the syntax constraints of the graph mode.
1211
+ - Registering `hook` for `Parameter` is not supported in the graph (i.e., function `Cell.construct` or
1212
+ function decorated by `@jit`).
1213
+ - It is not supported to delete `hook` inside graph.
1214
+
1215
+ - Register `hook` in the graph will return then `Tensor` it self.
1249
1216
 
1250
1217
  Args:
1251
- hook_fn (function): Python function. Tensor backward hook function.
1218
+ hook (function): Python function. Tensor backward hook function.
1252
1219
 
1253
1220
  Returns:
1254
- A handle corresponding to the `hook_fn` . The handle can be used to remove the added `hook_fn` by calling
1221
+ A handle corresponding to the `hook` . The handle can be used to remove the added `hook` by calling
1255
1222
  `handle.remove()` .
1256
1223
 
1257
1224
  Raises:
1258
- TypeError: If the `hook_fn` is not a function of python.
1225
+ TypeError: If the `hook` is not a function of python.
1259
1226
 
1260
1227
  Supported Platforms:
1261
1228
  ``Ascend`` ``GPU`` ``CPU``
@@ -1278,12 +1245,15 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1278
1245
  >>> print(output)
1279
1246
  (Tensor(shape=[], dtype=Float32, value=8), Tensor(shape=[], dtype=Float32, value=6))
1280
1247
  """
1281
- if not check_hook_fn("register_hook", hook_fn):
1282
- return _TensorHookHandle()
1283
- handle = _TensorHookHandle()
1284
- handle.id = Tensor_.register_hook(self, hook_fn)
1248
+ if not check_hook_fn("register_hook", hook):
1249
+ return _TensorHookHandle(self)
1250
+ handle = _TensorHookHandle(self)
1251
+ handle.id = Tensor_.register_hook(self, hook)
1285
1252
  return handle
1286
1253
 
1254
+ def _remove_hook(self):
1255
+ pass
1256
+
1287
1257
  def flush_from_cache(self):
1288
1258
  """
1289
1259
  Flush cache data to host if tensor is cache enable.
@@ -1310,94 +1280,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1310
1280
  """
1311
1281
  return tensor_operator_registry.get('addcmul')(self, tensor1, tensor2, value)
1312
1282
 
1313
- @add_mint
1314
- def add(self, other):
1315
- r"""
1316
- For details, please refer to :func:`mindspore.ops.add`.
1317
- """
1318
- return tensor_operator_registry.get('add')(self, other)
1319
-
1320
- def add_(self, other, *, alpha=1):
1321
- """
1322
- inplace update self by following compute:
1323
- self = self + other * alpha.
1324
-
1325
- .. warning::
1326
- This is an experimental API that is subject to change or deletion.
1327
- The `other` tensor must be broadcastable with the `self` tensor. It may be of a different data type.
1328
-
1329
- Args:
1330
- other (Tensor): the source tensor Add to self Tensor.
1331
- alpha (Number): no effect currently.
1332
-
1333
- Returns:
1334
- Return self Tensor.
1335
-
1336
- Supported Platforms:
1337
- ``Ascend``
1338
-
1339
- Examples:
1340
- >>> import numpy as np
1341
- >>> from mindspore import Tensor
1342
- >>> a = Tensor(np.ones((2,3)).astype("float32"))
1343
- >>> b = Tensor(np.ones((2,3)).astype("float32"))
1344
- >>> a.add_(b)
1345
- >>> print(a)
1346
- [[2. 2. 2.]
1347
- [2. 2. 2.]]
1348
- """
1349
- if isinstance(other, (int, float)):
1350
- ret = tensor_operator_registry.get("adds_")(self, other, alpha)
1351
- else:
1352
- ret = tensor_operator_registry.get("add_")(self, other, alpha)
1353
- return ret
1354
-
1355
- def subtract(self, other, *, alpha=1):
1356
- r"""
1357
- For details, please refer to :func:`mindspore.ops.subtract`.
1358
- """
1359
- return tensor_operator_registry.get('sub')(self, alpha * other)
1360
-
1361
- def true_divide(self, value):
1362
- r"""
1363
- Alias for Tensor.div() with :math:`rounding\_mode=None`.
1364
- For details, please refer to :func:`mindspore.ops.div`.
1365
- """
1366
- return tensor_operator_registry.get('div')(self, value, rounding_mode=None)
1367
-
1368
- def triu(self, diagonal=0):
1369
- r"""
1370
- For details, please refer to :func:`mindspore.ops.triu`.
1371
-
1372
- .. warning::
1373
- This is an experimental API that is subject to change or deletion.
1374
-
1375
- """
1376
- validator.check_value_type('diagonal', diagonal, [int], 'triu')
1377
- return tensor_operator_registry.get('triu')(self, diagonal)
1378
-
1379
- def addbmm(self, batch1, batch2, *, beta=1, alpha=1):
1380
- r"""
1381
- For details, please refer to :func:`mindspore.ops.addbmm`.
1382
- """
1383
- return tensor_operator_registry.get('addbmm')(self, batch1, batch2, beta=beta, alpha=alpha)
1384
-
1385
- def addmm(self, mat1, mat2, *, beta=1, alpha=1):
1386
- r"""
1387
- For details, please refer to :func:`mindspore.ops.addmm`.
1388
- """
1389
- return tensor_operator_registry.get('addmm')(self, mat1, mat2, beta=beta, alpha=alpha)
1390
-
1391
1283
  def addmm_(self, mat1, mat2, *, beta=1, alpha=1):
1392
1284
  r"""
1393
- For details, please refer to :func:`mindspore.ops.addmm`.
1394
-
1395
- .. note::
1396
- The output results are directly updated in the Tensor.
1285
+ In-place version of :func:`mindspore.Tensor.addmm`.
1397
1286
 
1398
1287
  .. warning::
1399
1288
  This is an experimental API that is subject to change or deletion.
1400
-
1401
1289
  """
1402
1290
  return tensor_operator_registry.get('addmm_')(self, mat1, mat2, beta=beta, alpha=alpha)
1403
1291
 
@@ -1413,32 +1301,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1413
1301
  """
1414
1302
  return tensor_operator_registry.get('adjoint')(self)
1415
1303
 
1416
- def all(self, axis=None, keep_dims=False):
1417
- r"""
1418
- For details, please refer to :func:`mindspore.ops.all`.
1419
- """
1420
- return tensor_operator_registry.get('all')(self, axis, keep_dims)
1421
-
1422
1304
  def angle(self):
1423
1305
  r"""
1424
1306
  For details, please refer to :func:`mindspore.ops.angle`.
1425
1307
  """
1426
1308
  return tensor_operator_registry.get('angle')(self)
1427
1309
 
1428
- def any(self, axis=None, keep_dims=False):
1429
- r"""
1430
- For details, please refer to :func:`mindspore.ops.any`.
1431
- """
1432
- if axis is None:
1433
- axis = ()
1434
- return tensor_operator_registry.get('any')(self, axis, keep_dims)
1435
-
1436
- def atan2(self, other):
1437
- r"""
1438
- For details, please refer to :func:`mindspore.ops.atan2`.
1439
- """
1440
- return tensor_operator_registry.get('atan2')(self, other)
1441
-
1442
1310
  def baddbmm(self, batch1, batch2, beta=1, alpha=1):
1443
1311
  r"""
1444
1312
  For details, please refer to :func:`mindspore.ops.baddbmm`.
@@ -1474,41 +1342,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1474
1342
  shape = shape[0]
1475
1343
  return tensor_operator_registry.get('reshape')(self, shape)
1476
1344
 
1477
- def view_as(self, other):
1478
- r"""
1479
- View self Tensor as the same shape as `other` .
1480
-
1481
- Args:
1482
- other(Tensor): The returned Tensor has the same shape as `other`.
1483
-
1484
- Returns:
1485
- Tensor, has the same shape as `other`.
1486
-
1487
- Raises:
1488
- TypeError: If `other` is not a Tensor.
1489
-
1490
- Supported Platforms:
1491
- ``Ascend`` ``GPU`` ``CPU``
1492
-
1493
- Examples:
1494
- >>> from mindspore import Tensor
1495
- >>> from mindspore import dtype as mstype
1496
- >>> a = Tensor([[1, 2, 3], [2, 3, 4]], mstype.float32)
1497
- >>> b = Tensor([1, 1, 1, 1, 1, 1], mstype.float32)
1498
- >>> output = a.view_as(b)
1499
- >>> print(output)
1500
- [1. 2. 3. 2. 3. 4.]
1501
- """
1502
- if not isinstance(other, (Tensor, Tensor_)):
1503
- raise TypeError(f"For view_as, the input other must be a Tensor, but got {type(other)}")
1504
- return self.view(other.shape)
1505
-
1506
- def t(self):
1507
- r"""
1508
- For details, please refer to :func:`mindspore.ops.t`.
1509
- """
1510
- return tensor_operator_registry.get("t")(self)
1511
-
1512
1345
  def bitwise_and(self, other):
1513
1346
  """
1514
1347
  For details, please refer to :func:`mindspore.ops.bitwise_and`.
@@ -1541,12 +1374,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1541
1374
  other = _cast(other, self.dtype)
1542
1375
  return tensor_operator_registry.get('bitwise_right_shift')(self, other)
1543
1376
 
1544
- def scatter(self, axis, index, src):
1545
- """
1546
- For details, please refer to :func:`mindspore.ops.scatter`.
1547
- """
1548
- return tensor_operator_registry.get('scatter')(self, axis, index, src)
1549
-
1550
1377
  def scatter_mul(self, indices, updates):
1551
1378
  """
1552
1379
  For details, please refer to :func:`mindspore.ops.scatter_mul`.
@@ -1565,12 +1392,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1565
1392
  """
1566
1393
  return tensor_operator_registry.get('ger')(self, vec2)
1567
1394
 
1568
- def gt(self, x):
1569
- """
1570
- For details, please refer to :func:`mindspore.ops.gt`.
1571
- """
1572
- return tensor_operator_registry.get('gt')(self, x)
1573
-
1574
1395
  def ge(self, x):
1575
1396
  """
1576
1397
  For details, please refer to :func:`mindspore.ops.ge`.
@@ -1583,108 +1404,49 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1583
1404
  """
1584
1405
  return tensor_operator_registry.get('broadcast_to')(self, shape)
1585
1406
 
1586
- def expand_as(self, x):
1587
- """
1588
- Expand the dimension of target tensor to the dimension of input tensor.
1589
-
1590
- Args:
1591
- x (Tensor): The input tensor. The shape of the input tensor must obey
1592
- the broadcasting rule.
1593
-
1594
- Returns:
1595
- Tensor, has the same dimension as input tensor.
1596
-
1597
- Examples:
1598
- >>> import numpy as np
1599
- >>> from mindspore import Tensor
1600
- >>> from mindspore import dtype as mstype
1601
- >>> x = Tensor([1, 2, 3], dtype=mstype.float32)
1602
- >>> y = Tensor(np.ones((2, 3)), dtype=mstype.float32)
1603
- >>> output = x.expand_as(y)
1604
- >>> print(output)
1605
- [[1. 2. 3.]
1606
- [1. 2. 3.]]
1607
- """
1608
- return tensor_operator_registry.get('broadcast_to')(self, x.shape)
1609
-
1610
- def exp(self):
1611
- """
1612
- For details, please refer to :func:`mindspore.ops.exp`.
1613
- """
1614
- return tensor_operator_registry.get('exp')(self)
1615
-
1616
1407
  def real(self):
1617
1408
  r"""
1618
1409
  For details, please refer to :func:`mindspore.ops.real`.
1619
1410
  """
1620
1411
  return tensor_operator_registry.get('real')(self)
1621
1412
 
1622
- def rsqrt(self):
1413
+ def tanh_(self):
1623
1414
  r"""
1624
- For details, please refer to :func:`mindspore.ops.rsqrt`.
1625
- """
1626
- return tensor_operator_registry.get('rsqrt')(self)
1415
+ Computes hyperbolic tangent of self inplace element-wise. The Tanh function is defined as:
1627
1416
 
1628
- def reciprocal(self):
1629
- r"""
1630
- For details, please refer to :func:`mindspore.ops.reciprocal`.
1631
- """
1632
- return tensor_operator_registry.get('reciprocal')(self)
1417
+ .. math::
1633
1418
 
1634
- def sqrt(self):
1635
- """
1636
- For details, please refer to :func:`mindspore.ops.sqrt`.
1637
- """
1638
- return tensor_operator_registry.get('sqrt')(self)
1419
+ tanh(x_i) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1},
1639
1420
 
1640
- def square(self):
1641
- """
1642
- For details, please refer to :func:`mindspore.ops.square`.
1643
- """
1644
- return tensor_operator_registry.get('square')(self)
1421
+ where :math:`x_i` is an element of the input Tensor.
1645
1422
 
1646
- @sub_mint
1647
- def sub(self, y):
1648
- r"""
1649
- For details, please refer to :func:`mindspore.ops.sub`.
1650
- """
1651
- return tensor_operator_registry.get('sub')(self, y)
1423
+ Tanh Activation Function Graph:
1652
1424
 
1653
- def tan(self):
1654
- """
1655
- For details, please refer to :func:`mindspore.ops.tan`.
1656
- """
1657
- return tensor_operator_registry.get('tan')(self)
1425
+ .. image:: ../../images/Tanh.png
1426
+ :align: center
1658
1427
 
1659
- def tanh(self):
1660
- r"""
1661
- For details, please refer to :func:`mindspore.ops.tanh`.
1662
- """
1663
- return tensor_operator_registry.get('tanh')(self)
1428
+ .. warning::
1429
+ - This is an experimental API that is subject ot change or deletion.
1664
1430
 
1665
- def cosh(self):
1666
- r"""
1667
- For details, please refer to :func:`mindspore.ops.cosh`.
1668
- """
1669
- return tensor_operator_registry.get('cosh')(self)
1431
+ Returns:
1432
+ Tensor, with the same type and shape as the `self`.
1670
1433
 
1671
- def acos(self):
1672
- r"""
1673
- For details, please refer to :func:`mindspore.ops.acos`.
1674
- """
1675
- return tensor_operator_registry.get('acos')(self)
1434
+ Raises:
1435
+ TypeError: If `self` is not a Tensor.
1676
1436
 
1677
- def arccos(self):
1678
- r"""
1679
- Alias for :func:`mindspore.Tensor.acos`.
1680
- """
1681
- return self.acos()
1437
+ Supported Platforms:
1438
+ ``Ascend``
1682
1439
 
1683
- def cos(self):
1684
- r"""
1685
- For details, please refer to :func:`mindspore.ops.cos`.
1440
+ Examples:
1441
+ >>> import mindspore
1442
+ >>> import numpy as np
1443
+ >>> from mindspore import Tensor
1444
+ >>> x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
1445
+ >>> output = x.tanh_()
1446
+ >>> print(output)
1447
+ [0.7615941 0.9640276 0.9950547 0.9993293 0.9999092]
1686
1448
  """
1687
- return tensor_operator_registry.get('cos')(self)
1449
+ return tensor_operator_registry.get('tanh_')(self)
1688
1450
 
1689
1451
  def cov(self, *, correction=1, fweights=None, aweights=None):
1690
1452
  r"""
@@ -1692,41 +1454,14 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1692
1454
  """
1693
1455
  return tensor_operator_registry.get('cov')(self, correction=correction, fweights=fweights, aweights=aweights)
1694
1456
 
1695
- def acosh(self):
1696
- """
1697
- For details, please refer to :func:`mindspore.ops.acosh`.
1698
- """
1699
- return tensor_operator_registry.get('acosh')(self)
1700
-
1701
- def asin(self):
1457
+ def floor_(self):
1702
1458
  r"""
1703
- For details, please refer to :func:`mindspore.ops.asin`.
1704
- """
1705
- return tensor_operator_registry.get('asin')(self)
1706
-
1707
- def abs(self):
1708
- """
1709
- For details, please refer to :func:`mindspore.ops.abs`.
1710
- """
1711
- return tensor_operator_registry.get('abs')(self)
1712
-
1713
- def absolute(self):
1714
- """
1715
- Alias for :func:`mindspore.Tensor.abs`.
1716
- """
1717
- return self.abs()
1718
-
1719
- def ceil(self):
1720
- """
1721
- For details, please refer to :func:`mindspore.ops.ceil`.
1722
- """
1723
- return tensor_operator_registry.get('ceil')(self)
1459
+ In-place version of :func:`mindspore.Tensor.floor`.
1724
1460
 
1725
- def floor(self):
1726
- """
1727
- For details, please refer to :func:`mindspore.ops.floor`.
1461
+ .. warning::
1462
+ This is an experimental API that is subject to change or deletion.
1728
1463
  """
1729
- return tensor_operator_registry.get('floor')(self)
1464
+ return tensor_operator_registry.get('floor_')(self)
1730
1465
 
1731
1466
  def floor_divide(self, other):
1732
1467
  """
@@ -1737,18 +1472,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1737
1472
  """
1738
1473
  return tensor_operator_registry.get('floor_divide')(self, other)
1739
1474
 
1740
- def lerp(self, end, weight):
1741
- """
1742
- For details, please refer to :func:`mindspore.ops.lerp`.
1743
- """
1744
- return tensor_operator_registry.get('lerp')(self, end, weight)
1745
-
1746
- def negative(self):
1747
- r"""
1748
- For details, please refer to :func:`mindspore.ops.negative`.
1749
- """
1750
- return tensor_operator_registry.get("negative")(self)
1751
-
1752
1475
  # pylint: disable=redefined-builtin
1753
1476
  def norm(self, ord=None, dim=None, keepdim=False, *, dtype=None):
1754
1477
  """
@@ -1764,7 +1487,8 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1764
1487
 
1765
1488
  def approximate_equal(self, other, tolerance=1e-5):
1766
1489
  r"""
1767
- For details, please refer to :func:`mindspore.ops.approximate_equal`.
1490
+ For details, please refer to :func:`mindspore.ops.approximate_equal`,
1491
+ The parameter `other` of current interface is the same as the parameter `y` of the reference interface.
1768
1492
  """
1769
1493
  validator.check_isinstance("x", self, Tensor)
1770
1494
  validator.check_isinstance("y", other, Tensor)
@@ -1775,12 +1499,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1775
1499
  tensor_operator_registry.get('__sub__')(input_x, input_y)
1776
1500
  ), tolerance)
1777
1501
 
1778
- def log1p(self):
1779
- r"""
1780
- For details, please refer to :func:`mindspore.ops.log1p`.
1781
- """
1782
- return tensor_operator_registry.get('log1p')(self)
1783
-
1784
1502
  def logit(self, eps=None):
1785
1503
  r"""
1786
1504
  For details, please refer to :func:`mindspore.ops.logit`.
@@ -1814,6 +1532,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1814
1532
  def logsumexp(self, axis, keepdims=False):
1815
1533
  r"""
1816
1534
  For details, please refer to :func:`mindspore.ops.logsumexp`.
1535
+
1536
+ Note:
1537
+ The input parameter `keepdims` of the inputs has the same meaning as the input parameter `keep_dims` in
1538
+ :func:`mindspore.ops.logsumexp`.
1817
1539
  """
1818
1540
  return tensor_operator_registry.get('logsumexp')(self, axis, keepdims)
1819
1541
 
@@ -1825,22 +1547,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1825
1547
 
1826
1548
  def i0(self):
1827
1549
  r"""
1828
- For details, please refer to :func:`mindspore.ops.i0`.
1550
+ For details, please refer to :func:`mindspore.ops.bessel_i0`.
1829
1551
  """
1830
1552
  return tensor_operator_registry.get('i0')(self)
1831
1553
 
1832
- def isclose(self, x2, rtol=1e-05, atol=1e-08, equal_nan=False):
1833
- """
1834
- For details, please refer to :func:`mindspore.ops.isclose`.
1835
- """
1836
- return tensor_operator_registry.get('isclose')(self, x2, rtol, atol, equal_nan)
1837
-
1838
- def isneginf(self):
1839
- r"""
1840
- For details, please refer to :func:`mindspore.ops.isneginf`.
1841
- """
1842
- return tensor_operator_registry.get('isneginf')(self)
1843
-
1844
1554
  def isposinf(self):
1845
1555
  r"""
1846
1556
  For details, please refer to :func:`mindspore.ops.isposinf`.
@@ -1853,15 +1563,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1853
1563
  """
1854
1564
  return tensor_operator_registry.get('isreal')(self)
1855
1565
 
1856
- def isfinite(self):
1566
+ def is_complex(self):
1857
1567
  r"""
1858
- For details, please refer to :func:`mindspore.ops.isfinite`.
1859
- """
1860
- return tensor_operator_registry.get('isfinite')(self)
1861
-
1862
- def is_complex(self):
1863
- r"""
1864
- For details, please refer to :func:`mindspore.ops.is_complex`.
1568
+ For details, please refer to :func:`mindspore.ops.is_complex`.
1865
1569
  """
1866
1570
  return tensor_operator_registry.get('is_complex')(self)
1867
1571
 
@@ -1871,49 +1575,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1871
1575
  """
1872
1576
  return tensor_operator_registry.get('inv')(self)
1873
1577
 
1874
- def inverse(self):
1875
- r"""
1876
- For details, please refer to :func:`mindspore.ops.inverse`.
1877
- """
1878
- return tensor_operator_registry.get('inverse')(self)
1879
-
1880
1578
  def invert(self):
1881
1579
  r"""
1882
1580
  For details, please refer to :func:`mindspore.ops.invert`.
1883
1581
  """
1884
1582
  return tensor_operator_registry.get('invert')(self)
1885
1583
 
1886
- def pow(self, exponent):
1887
- r"""
1888
- For details, please refer to :func:`mindspore.ops.pow`.
1889
- """
1890
- return tensor_operator_registry.get('pow')(self, exponent)
1891
-
1892
- def log(self):
1893
- """
1894
- For details, please refer to :func:`mindspore.ops.log`.
1895
- """
1896
- return tensor_operator_registry.get('log')(self)
1897
-
1898
- def log10(self):
1899
- r"""
1900
- For details, please refer to :func:`mindspore.ops.log10`.
1901
- """
1902
- return tensor_operator_registry.get('log10')(self)
1903
-
1904
- def log2(self):
1905
- r"""
1906
- For details, please refer to :func:`mindspore.ops.log2`.
1907
- """
1908
- return tensor_operator_registry.get('log2')(self)
1909
-
1910
- @mean_mint
1911
- def mean(self, axis=None, keep_dims=False):
1912
- """
1913
- For details, please refer to :func:`mindspore.ops.mean`.
1914
- """
1915
- return tensor_operator_registry.get('mean')(self, axis, keep_dims)
1916
-
1917
1584
  def amin(self, axis=None, keepdims=False, *, initial=None, where=None):
1918
1585
  """
1919
1586
  For details, please refer to :func:`mindspore.ops.amin`.
@@ -1925,6 +1592,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1925
1592
  def reverse(self, axis):
1926
1593
  """
1927
1594
  For details, please refer to :func:`mindspore.ops.flip`.
1595
+ The `axis` parameter in `Tensor.reverse` is equivalent to the `dims` parameter in :func:`mindspore.ops.flip`.
1928
1596
  """
1929
1597
  return tensor_operator_registry.get('flip')(self, axis)
1930
1598
 
@@ -1948,84 +1616,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
1948
1616
  """
1949
1617
  return tensor_operator_registry.get("reverse_sequence")(self, seq_lengths, seq_dim, batch_dim)
1950
1618
 
1951
- def prod(self, axis=None, keep_dims=False, dtype=None):
1952
- """
1953
- For details, please refer to :func:`mindspore.ops.prod`.
1954
- """
1955
- return tensor_operator_registry.get('prod')(self, axis, keep_dims, dtype)
1956
-
1957
- def select(self, condition, y):
1958
- r"""
1959
- For details, please refer to :func:`mindspore.ops.select`.
1960
- """
1961
- if not isinstance(condition, Tensor):
1962
- raise TypeError(f"For 'Tensor.select', the argument 'condition' should be Tensor,"
1963
- f" but got {type(condition)}.")
1964
- if not isinstance(y, (Tensor, int, float)):
1965
- raise TypeError(f"For 'Tensor.select', the argument 'y' should be Tensor, int or float,"
1966
- f" but got {type(y)}.")
1967
- if isinstance(y, int) and self.dtype != mstype.int32:
1968
- raise TypeError(f"For 'Tensor.select', if the argument 'y' is int,"
1969
- f" then the tensor type should be int32 but got {self.dtype}")
1970
- if isinstance(y, float) and self.dtype != mstype.float32:
1971
- raise TypeError(f"For 'Tensor.select', if the argument 'y' is float,"
1972
- f" then the tensor type should be float32 but got {self.dtype}")
1973
- input_y = y
1974
- if isinstance(y, (int, float)):
1975
- input_y = tensor_operator_registry.get('zeros_like')(self) + y
1976
- if isinstance(y, int):
1977
- input_y = tensor_operator_registry.get('cast')(input_y, mstype.int32)
1978
- else:
1979
- input_y = tensor_operator_registry.get('cast')(input_y, mstype.float32)
1980
- return tensor_operator_registry.get('select')(condition, self, input_y)
1981
-
1982
- def transpose(self, *axes):
1983
- r"""
1984
- For details, please refer to :func:`mindspore.ops.transpose`.
1985
- """
1986
- perm = validator.check_transpose_axis(axes, self.ndim)
1987
- return tensor_operator_registry.get('transpose')(self, perm)
1988
-
1989
1619
  def col2im(self, output_size, kernel_size, dilation, padding_value, stride):
1990
1620
  """
1991
1621
  For details, please refer to :func:`mindspore.ops.col2im`.
1992
1622
  """
1993
1623
  return tensor_operator_registry.get('col2im')(self, output_size, kernel_size, dilation, padding_value, stride)
1994
1624
 
1995
- def reshape(self, *shape):
1996
- r"""
1997
- Rearranges the input Tensor based on the given `shape` .
1998
-
1999
- The `shape` can only have one -1 at most, in which case it's inferred from the remaining dimensions and
2000
- the number of elements in the input.
2001
-
2002
- Args:
2003
- shape (Union[int, tuple[int], list[int]]): If `shape` is a tuple or list, its elements should be
2004
- integers, and only constant value is allowed. i.e., :math:`(y_1, y_2, ..., y_S)`.
2005
-
2006
- Returns:
2007
- Tensor, If the given `shape` does not contain -1, the `shape` of tensor is :math:`(y_1, y_2, ..., y_S)`.
2008
- If the k-th position in the given `shape` is -1, the `shape` of tensor is :math:`(y_1, ..., y_{k-1},
2009
- \frac{\prod_{i=1}^{R}x_{i}}{y_1\times ...\times y_{k-1}\times y_{k+1}\times...\times y_S} , y_{k+1},
2010
- ..., y_S)`, in where the shape of input tensor is :math:`(x_1, x_2, ..., x_R)`.
2011
-
2012
- Supported Platforms:
2013
- ``Ascend`` ``GPU`` ``CPU``
2014
-
2015
- Examples:
2016
- >>> import mindspore
2017
- >>> import numpy as np
2018
- >>> from mindspore import Tensor, ops
2019
- >>> input = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
2020
- >>> output = input.reshape(3, 2)
2021
- >>> print(output)
2022
- [[-0.1 0.3]
2023
- [ 3.6 0.4]
2024
- [ 0.5 -3.2]]
2025
- """
2026
- new_shape = validator.check_reshape_shp(shape)
2027
- return tensor_operator_registry.get('reshape')(self, new_shape)
2028
-
2029
1625
  def reshape_as(self, other):
2030
1626
  """
2031
1627
  Change the shape of the Tensor to the shape of `other` without changing the data.
@@ -2078,12 +1674,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2078
1674
  reshape_op = tensor_operator_registry.get('reshape')
2079
1675
  return reshape_op(self, (-1,))
2080
1676
 
2081
- def round(self, decimals=0):
2082
- """
2083
- For details, please refer to :func:`mindspore.ops.round`.
2084
- """
2085
- return tensor_operator_registry.get('round')(self, decimals=decimals)
2086
-
2087
1677
  def roll(self, shifts, dims):
2088
1678
  """
2089
1679
  For details, please refer to :func:`mindspore.ops.roll`.
@@ -2102,18 +1692,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2102
1692
  """
2103
1693
  return tensor_operator_registry.get('deg2rad')(self)
2104
1694
 
2105
- def dot(self, other):
2106
- r"""
2107
- For details, please refer to :func:`mindspore.ops.dot`.
2108
- """
2109
- return tensor_operator_registry.get('dot')(self, other)
2110
-
2111
- def outer(self, vec2):
2112
- r"""
2113
- For details, please refer to :func:`mindspore.ops.outer`.
2114
- """
2115
- return tensor_operator_registry.get('outer')(self, vec2)
2116
-
2117
1695
  def rad2deg(self):
2118
1696
  r"""
2119
1697
  For details, please refer to :func:`mindspore.ops.rad2deg`.
@@ -2130,16 +1708,32 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2130
1708
  r"""
2131
1709
  Alias for :func:`mindspore.Tensor.numel`.
2132
1710
  """
2133
- return tensor_operator_registry.get('nelement')(self)
1711
+ return self.size
2134
1712
 
2135
1713
  def numel(self):
2136
1714
  r"""
2137
- For details, please refer to :func:`mindspore.ops.numel`.
1715
+ Returns a Scalar of type int that represents the total number of elements in the Tensor.
1716
+
1717
+ Returns:
1718
+ int. A scalar representing the total of elements in the Tensor.
1719
+
1720
+ Supported Platforms:
1721
+ ``Ascend`` ``GPU`` ``CPU``
1722
+
1723
+ Examples:
1724
+ >>> import mindspore
1725
+ >>> import numpy as np
1726
+ >>> from mindspore import Tensor
1727
+ >>> input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
1728
+ >>> print(input_x.numel())
1729
+ 4
2138
1730
  """
2139
- return tensor_operator_registry.get('numel')(self)
1731
+ return self.size
2140
1732
 
2141
1733
  def permute(self, *axis):
2142
1734
  """
1735
+ Tensor.permute supports unpacking the `axis` argument automatically when it is passed as an indefinite number of
1736
+ positional arguments, which has a slight difference from the input parameter of :func:`mindspore.ops.permute`.
2143
1737
  For details, please refer to :func:`mindspore.ops.permute`.
2144
1738
  """
2145
1739
  perm = validator.check_transpose_axis(axis, self.ndim)
@@ -2151,19 +1745,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2151
1745
  """
2152
1746
  return tensor_operator_registry.get("positive")(self)
2153
1747
 
2154
- def remainder(self, divisor):
2155
- r"""
2156
- For details, please refer to :func:`mindspore.ops.remainder`.
2157
- """
2158
- return tensor_operator_registry.get('remainder')(self, divisor)
2159
-
2160
- @flatten_mint
2161
- def flatten(self, order='C', *, start_dim=0, end_dim=-1):
2162
- r"""
2163
- For details, please refer to :func:`mindspore.ops.flatten`.
2164
- """
2165
- return tensor_operator_registry.get('flatten')(self, order, start_dim=start_dim, end_dim=end_dim)
2166
-
2167
1748
  def float_power(self, other):
2168
1749
  r"""
2169
1750
  For details, please refer to :func:`mindspore.ops.float_power`.
@@ -2178,22 +1759,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2178
1759
 
2179
1760
  def fmin(self, other):
2180
1761
  r"""
2181
- For details, please refer to :func:`mindspore.ops.fmin`.
1762
+ This interface is deprecated from version 2.4 and will be removed in a future version.
2182
1763
  """
2183
1764
  return tensor_operator_registry.get('fmin')(self, other)
2184
1765
 
2185
- def fmod(self, other):
2186
- r"""
2187
- For details, please refer to :func:`mindspore.ops.fmod`.
2188
- """
2189
- return tensor_operator_registry.get('fmod')(self, other)
2190
-
2191
- def narrow(self, axis, start, length):
2192
- """
2193
- For details, please refer to :func:`mindspore.ops.narrow`.
2194
- """
2195
- return tensor_operator_registry.get('narrow')(self, axis, start, length)
2196
-
2197
1766
  def swapaxes(self, axis0, axis1):
2198
1767
  """
2199
1768
  For details, please refer to :func:`mindspore.ops.swapaxes`.
@@ -2218,20 +1787,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2218
1787
  """
2219
1788
  return tensor_operator_registry.get('slogdet')(self)
2220
1789
 
2221
- def tril(self, diagonal=0):
2222
- """
2223
- For details, please refer to :func:`mindspore.ops.tril`.
2224
- """
2225
- return tensor_operator_registry.get('tril')(self, diagonal)
2226
-
2227
- def unsqueeze(self, dim):
2228
- """
2229
- For details, please refer to :func:`mindspore.ops.unsqueeze`.
2230
- """
2231
- validator.check_is_int(dim, 'dim')
2232
- validator.check_int_range(dim, -self.ndim - 1, self.ndim + 1, validator.INC_LEFT, 'dim')
2233
- return tensor_operator_registry.get('unsqueeze')(self, dim)
2234
-
2235
1790
  def expand_dims(self, axis):
2236
1791
  """
2237
1792
  For details, please refer to :func:`mindspore.ops.expand_dims`.
@@ -2271,26 +1826,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2271
1826
  dtype = _check_astype_and_convert(dtype)
2272
1827
  if not copy and dtype == self.dtype:
2273
1828
  return self
2274
- return tensor_operator_registry.get('cast')(self, dtype)
2275
-
2276
- def argmax(self, axis=None, keepdims=False):
2277
- """
2278
- For details, please refer to :func:`mindspore.ops.argmax`.
2279
- """
2280
- out = tensor_operator_registry.get('argmax')(self, axis, keepdims)
2281
- return out
2282
-
2283
- def argmin(self, axis=None, keepdims=False):
2284
- """
2285
- For details, please refer to :func:`mindspore.ops.argmin`.
2286
- """
2287
- out = tensor_operator_registry.get('argmin')(self, axis, keepdims)
2288
- return out
1829
+ return self.to(dtype)
2289
1830
 
2290
1831
  def argmax_with_value(self, axis=0, keep_dims=False):
2291
1832
  """
2292
- Returns the maximum value with corresponding index.
2293
-
2294
1833
  Compute the max value of input Tensor on the specified axis, and return the max value and index.
2295
1834
 
2296
1835
  Note:
@@ -2299,9 +1838,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2299
1838
  - The value range of `axis` is [-dims, dims - 1]. `dims` is the dimension length of this tensor.
2300
1839
 
2301
1840
  Args:
2302
- axis (int): The dimension to reduce. Default: ``0`` .
2303
- keep_dims (bool): Whether to reduce dimension, if ``true`` the output will keep the same dimension as the
2304
- input, the output will reduce dimension if ``false`` . Default: ``False`` .
1841
+ axis (int, optional): The dimension to reduce. Default: ``0`` .
1842
+ keep_dims (bool, optional): Whether to reduce dimension, if ``true`` the output will keep the same dimension
1843
+ as the input, the output will reduce dimension if ``false`` . Default: ``False`` .
2305
1844
 
2306
1845
  Returns:
2307
1846
  tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the maximum value of the input
@@ -2338,7 +1877,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2338
1877
 
2339
1878
  def argmin_with_value(self, axis=0, keep_dims=False):
2340
1879
  """
2341
- Returns the minimum value with corresponding index.
1880
+ Compute the max value of input Tensor on the specified axis, return the minimum value and index.
2342
1881
 
2343
1882
  Note:
2344
1883
  - In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
@@ -2346,9 +1885,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2346
1885
  - The value range of `axis` is [-dims, dims - 1]. `dims` is the dimension length of this tensor.
2347
1886
 
2348
1887
  Args:
2349
- axis (int): The dimension to reduce. Default: 0.
2350
- keep_dims (bool): Whether to reduce dimension, if true the output will keep the same dimension as the input,
2351
- the output will reduce dimension if false. Default: ``False``.
1888
+ axis (int, optional): The dimension to reduce. Default: ``0``.
1889
+ keep_dims (bool, optional): Whether to reduce dimension, if true the output will keep the same dimension
1890
+ as the input, the output will reduce dimension if false. Default: ``False``.
2352
1891
 
2353
1892
  Returns:
2354
1893
  tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the minimum value of the input
@@ -2383,23 +1922,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2383
1922
  return (self, Tensor(0))
2384
1923
  return tensor_operator_registry.get('argmin_with_value')(self, axis, keep_dims)
2385
1924
 
2386
- def cumsum(self, axis=None, dtype=None):
2387
- """
2388
- For details, please refer to :func:`mindspore.ops.cumsum`.
2389
- """
2390
- x = self
2391
- original_dtype = x.dtype
2392
- # If original tensor is int, and has precision less then int32, convert to int32
2393
- if x.dtype in (mstype.bool_, mstype.int8, mstype.int16, mstype.uint8, mstype.int16):
2394
- x = x.astype(mstype.int32)
2395
- if axis is None:
2396
- x = x.ravel()
2397
- axis = 0
2398
- validator.check_axis_in_range(axis, x.ndim)
2399
- if dtype is not None and original_dtype != dtype:
2400
- return tensor_operator_registry.get('cumsum')()(x, axis).astype(dtype, copy=False)
2401
- return tensor_operator_registry.get('cumsum')()(x, axis)
2402
-
2403
1925
  def cummin(self, axis):
2404
1926
  r"""
2405
1927
  For details, please refer to :func:`mindspore.ops.cummin`.
@@ -2418,12 +1940,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2418
1940
  """
2419
1941
  return tensor_operator_registry.get('index_fill')(self, axis, index, value)
2420
1942
 
2421
- def index_select(self, axis, index):
2422
- """
2423
- For details, please refer to :func:`mindspore.ops.index_select`.
2424
- """
2425
- return tensor_operator_registry.get('index_select')(self, axis, index)
2426
-
2427
1943
  def inplace_update(self, v, indices):
2428
1944
  """
2429
1945
  For details, please refer to :func:`mindspore.ops.inplace_update`.
@@ -2497,158 +2013,77 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2497
2013
  """
2498
2014
  return tensor_operator_registry.get("copy_")(self, src)
2499
2015
 
2500
- @max_mint
2501
- def max(self, axis=None, keepdims=False, *, initial=None, where=True, return_indices=False):
2016
+ def scatter_add_(self, dim, index, src):
2502
2017
  """
2503
- Return the maximum of a tensor or maximum along an axis.
2504
-
2505
- Note:
2506
- When `axis` is ``None``, `keepdims` and subsequent parameters
2507
- have no effect. At the same time, the index is fixed to return 0.
2508
-
2509
- Args:
2510
- axis (Union[None, int, list, tuple of ints], optional): Axis or
2511
- axes along which to operate. By default, flattened input is used. If
2512
- this is a tuple of ints, the maximum is selected over multiple axes,
2513
- instead of a single axis or all the axes as before. Default: ``None`` .
2514
- keepdims (bool, optional):
2515
- If this is set to ``True`` , the axes which are reduced are left in the
2516
- result as dimensions with size one. With this option, the result will
2517
- broadcast correctly against the input array. Default: ``False`` .
2518
-
2519
- Keyword Args:
2520
- initial (scalar, optional):
2521
- The minimum value of an output element. Must be present to allow
2522
- computation on empty slice. Default: ``None`` .
2523
- where (bool Tensor, optional):
2524
- A boolean tensor which is broadcasted to match the dimensions of array,
2525
- and selects elements to include in the reduction. If non-default value
2526
- is passed, initial must also be provided. Default: ``True`` .
2527
- return_indices (bool, optional): Whether to return the index of the maximum value.
2528
- Default: ``False`` . If `axis` is a list or tuple of ints, it must be ``False`` .
2529
-
2530
- Returns:
2531
- Tensor or scalar, maximum of input tensor. If `axis` is ``None`` , the result is a scalar
2532
- value. If `axis` is given, the result is a tensor of dimension ``self.ndim - 1``.
2018
+ Add all elements in `src` to the index specified by `index` to `self` along dimension specified by `dim`,
2019
+ `scatter_add` is an in-place operation.
2020
+ The ranks of `self`, `index` and `src` must be greater or equal to 1.
2533
2021
 
2534
- Raises:
2535
- TypeError: If arguments have types not specified above.
2022
+ For a 3-D tensor, the operation updates `self` as follows:
2536
2023
 
2537
- See also:
2538
- - :func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
2539
- - :func:`mindspore.Tensor.argmax`: Return the indices of the maximum values along an axis.
2540
- - :func:`mindspore.Tensor.min`: Return the minimum of a tensor or minimum along an axis.
2024
+ .. code-block::
2541
2025
 
2542
- Supported Platforms:
2543
- ``Ascend`` ``GPU`` ``CPU``
2026
+ self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
2544
2027
 
2545
- Examples:
2546
- >>> import numpy as np
2547
- >>> from mindspore import Tensor
2548
- >>> a = Tensor(np.arange(4).reshape((2, 2)).astype('float32'))
2549
- >>> output = a.max()
2550
- >>> print(output)
2551
- 3.0
2552
- >>> value, indices = a.max(axis=0, return_indices=True)
2553
- >>> print(value)
2554
- [2. 3.]
2555
- >>> print(indices)
2556
- [1 1]
2557
- """
2558
- if isinstance(axis, (list, tuple)):
2559
- reduce_ = tensor_operator_registry.get("reduce")
2560
- reduce_max = tensor_operator_registry.get("reduce_max")
2561
- maximum = tensor_operator_registry.get("maximum")
2562
- return reduce_(self, reduce_max(keepdims), cmp_fn=maximum, axis=axis, keepdims=keepdims,
2563
- initial=initial, where=where)
2564
- values, indices = tensor_operator_registry.get("max")(self, axis, keepdims, initial=initial, where=where)
2565
- if not return_indices:
2566
- return values
2567
- return values, indices
2568
-
2569
- @min_mint
2570
- def min(self, axis=None, keepdims=False, *, initial=None, where=True, return_indices=False):
2571
- """
2572
- Return the minimum of a tensor or minimum along an axis.
2028
+ self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
2573
2029
 
2574
- Note:
2575
- When `axis` is ``None``, `keepdims` and subsequent parameters
2576
- have no effect. At the same time, the index is fixed to return 0.
2030
+ self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
2577
2031
 
2578
2032
  Args:
2579
- axis (Union[None, int, list, tuple of ints], optional): An axis or
2580
- axes along which to operate. By default, flattened input is used. If
2581
- `axis` is a tuple of ints, the minimum is selected over multiple axes,
2582
- instead of a single axis or all the axes as before. Default: ``None`` .
2583
- keepdims (bool, optional):
2584
- If ``True`` , the axes which are reduced are left in the
2585
- result as dimensions with size one. With this option, the result will
2586
- broadcast correctly against the input array. Default: ``False`` .
2587
-
2588
- Keyword Args:
2589
- initial (scalar, optional):
2590
- The minimum value of an output element. Must be present to allow
2591
- computation on empty slice. Default: ``None`` .
2592
- where (Tensor[bool], optional):
2593
- A boolean tensor which is broadcasted to match the dimensions of array,
2594
- and selects elements to include in the reduction. If non-default value
2595
- is passed, initial must also be provided. Default: ``True`` .
2596
- return_indices (bool, optional): Whether to return the index of the minimum value. Default: ``False`` .
2597
- If `axis` is a list or tuple of ints, it must be ``False`` .
2033
+ dim (int): Which dim to scatter. Accepted range is [-r, r) where r = rank(`self`).
2034
+ index (Tensor): The index of `self` to do scatter operation whose data type must
2035
+ be int32 or int64. Same rank as `self`. Except for the dimension
2036
+ specified by `dim`, size of each dimension of `index` must be less than or equal to the size of
2037
+ the corresponding dimension of `self`.
2038
+ src (Tensor): The tensor doing the scatter operation with `self`, has the same type as `self` and
2039
+ the size of each dimension must be greater than or equal to that of `index`.
2598
2040
 
2599
2041
  Returns:
2600
- Tensor or scalar, minimum of input tensor. If `axis` is ``None`` , the result is a scalar
2601
- value. If `axis` is given, the result is a tensor of dimension ``self.ndim - 1``.
2042
+ Tensor, has the same shape and type as `self`.
2602
2043
 
2603
2044
  Raises:
2604
- TypeError: If arguments have types not specified above.
2605
-
2606
- See also:
2607
- - :func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
2608
- - :func:`mindspore.Tensor.argmax`: Return the indices of the maximum values along an axis.
2609
- - :func:`mindspore.Tensor.max`: Return the minimum of a tensor or minimum along an axis.
2045
+ TypeError: If `index` is neither int32 nor int64.
2046
+ ValueError: If anyone of the rank among `self`, `index` and `src` is less than 1.
2047
+ ValueError: If the ranks of `self`, `index` and `src` are not the same.
2048
+ ValueError: The size of any dimension of `index` except the dimension specified by `dim` is
2049
+ greater than the size of the corresponding dimension of `self`.
2050
+ ValueError: If the size of any dimension of `src` is less than that of `index`.
2610
2051
 
2611
2052
  Supported Platforms:
2612
- ``Ascend`` ``GPU`` ``CPU``
2053
+ ``Ascend``
2613
2054
 
2614
2055
  Examples:
2615
2056
  >>> import numpy as np
2057
+ >>> import mindspore as ms
2616
2058
  >>> from mindspore import Tensor
2617
- >>> a = Tensor(np.arange(4).reshape((2, 2)).astype('float32'))
2618
- >>> output = a.min()
2619
- >>> print(output)
2620
- 0.0
2621
- >>> output = a.min(axis=0)
2622
- >>> print(output)
2623
- [0. 1.]
2624
- >>> output = a.min(axis=0, initial=9, where=Tensor([False]))
2625
- >>> print(output)
2626
- [9. 9.]
2627
- >>> output = a.min(axis=0, initial=9, where=Tensor([False, True]))
2628
- >>> print(output)
2629
- [9. 1.]
2630
- >>> value, indices = a.min(axis=0, return_indices=True)
2631
- >>> print(value)
2632
- [0. 1.]
2633
- >>> print(indices)
2634
- [0 0]
2635
- """
2636
- if isinstance(axis, (list, tuple)):
2637
- reduce_ = tensor_operator_registry.get("reduce")
2638
- reduce_min = tensor_operator_registry.get("reduce_min")
2639
- minimum = tensor_operator_registry.get("minimum")
2640
- return reduce_(self, reduce_min(keepdims), cmp_fn=minimum, axis=axis, keepdims=keepdims,
2641
- initial=initial, where=where)
2642
- values, indices = tensor_operator_registry.get("min")(self, axis, keepdims, initial=initial, where=where)
2643
- if not return_indices:
2644
- return values
2645
- return values, indices
2646
-
2647
- def scatter_add(self, indices, updates):
2648
- """
2649
- For details, please refer to :func:`mindspore.ops.scatter_add`.
2059
+ >>> input = Tensor(np.array([[1, 2, 3, 4, 5]]), dtype=ms.float32)
2060
+ >>> src = Tensor(np.array([[8, 8]]), dtype=ms.float32)
2061
+ >>> index = Tensor(np.array([[2, 4]]), dtype=ms.int64)
2062
+ >>> out = input.scatter_add_(1, index, src)
2063
+ >>> print(out)
2064
+ [[1. 2. 11. 4. 13.]]
2065
+ >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
2066
+ >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
2067
+ >>> index = Tensor(np.array([[0, 0, 0], [2, 2, 2], [4, 4, 4]]), dtype=ms.int64)
2068
+ >>> out = input.scatter_add_(0, index, src)
2069
+ >>> print(out)
2070
+ [[1. 2. 3. 0. 0.]
2071
+ [0. 0. 0. 0. 0.]
2072
+ [4. 5. 6. 0. 0.]
2073
+ [0. 0. 0. 0. 0.]
2074
+ [7. 8. 9. 0. 0.]]
2075
+ >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
2076
+ >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
2077
+ >>> index = Tensor(np.array([[0, 2, 4], [0, 2, 4], [0, 2, 4]]), dtype=ms.int64)
2078
+ >>> out = input.scatter_add_(1, index, src)
2079
+ >>> print(out)
2080
+ [[1. 0. 2. 0. 3.]
2081
+ [4. 0. 5. 0. 6.]
2082
+ [7. 0. 8. 0. 9.]
2083
+ [0. 0. 0. 0. 0.]
2084
+ [0. 0. 0. 0. 0.]]
2650
2085
  """
2651
- return tensor_operator_registry.get("tensor_scatter_add")(self, indices, updates)
2086
+ return tensor_operator_registry.get("inplace_scatter_add")(self, dim, index, src)
2652
2087
 
2653
2088
  def scatter_sub(self, indices, updates):
2654
2089
  """
@@ -2774,18 +2209,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2774
2209
  """
2775
2210
  return tensor_operator_registry.get('fill_diagonal')(fill_value, wrap)(self)
2776
2211
 
2777
- def masked_fill(self, mask, value):
2778
- """
2779
- For details, please refer to :func:`mindspore.ops.masked_fill`.
2780
- """
2781
- if isinstance(value, (float, int)):
2782
- value = tensor_operator_registry.get("scalar_to_tensor")(value, self.dtype)
2783
- if not isinstance(mask, Tensor):
2784
- raise TypeError("For 'Tensor.masked_fill', the type of the argument 'mask' must be Tensor, but "
2785
- "got {}.".format(type(mask)))
2786
- validator.check_type_name('mask', mask.dtype, [mstype.bool_], "Tensor")
2787
- return tensor_operator_registry.get("masked_fill")(self, mask, value)
2788
-
2789
2212
  def ptp(self, axis=None, keepdims=False):
2790
2213
  """
2791
2214
  The name of the function comes from the acronym for "peak to peak". Calculate the difference between the
@@ -2829,23 +2252,14 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2829
2252
 
2830
2253
  return self.max(axis, keepdims) - self.min(axis, keepdims)
2831
2254
 
2832
- def minimum(self, other):
2833
- r"""
2834
- For details, please refer to :func:`mindspore.ops.minimum`.
2835
- """
2836
- return tensor_operator_registry.get('minimum')(self, other)
2837
-
2838
- def clamp(self, min=None, max=None):
2255
+ def clamp_(self, min=None, max=None):
2839
2256
  r"""
2840
- For details, please refer to :func:`mindspore.ops.clamp`.
2841
- """
2842
- return tensor_operator_registry.get('clamp')(self, min, max)
2257
+ In-place version of :func:`mindspore.Tensor.clamp`.
2843
2258
 
2844
- def clip(self, min=None, max=None):
2845
- r"""
2846
- Alias for :func:`mindspore.Tensor.clamp`.
2259
+ .. warning::
2260
+ This is an experimental API that is subject to change or deletion.
2847
2261
  """
2848
- return self.clamp(min, max)
2262
+ return tensor_operator_registry.get('clamp_')(self, min, max)
2849
2263
 
2850
2264
  def init_data(self, slice_index=None, shape=None, opt_shard_group=None):
2851
2265
  """
@@ -3007,7 +2421,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3007
2421
 
3008
2422
  def det(self):
3009
2423
  r"""
3010
- For details, please refer to :func:`mindspore.ops.det`.
2424
+ This interface is deprecated from version 2.4 and will be removed in a future version.
3011
2425
  """
3012
2426
  return tensor_operator_registry.get('det')(self)
3013
2427
 
@@ -3017,12 +2431,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3017
2431
  """
3018
2432
  return tensor_operator_registry.get('diff')(self, n, axis, prepend, append)
3019
2433
 
3020
- def frac(self):
3021
- r"""
3022
- For details, please refer to :func:`mindspore.ops.frac`.
3023
- """
3024
- return tensor_operator_registry.get('frac')(self)
3025
-
3026
2434
  def argwhere(self):
3027
2435
  r"""
3028
2436
  For details, please refer to :func:`mindspore.ops.argwhere`.
@@ -3049,13 +2457,15 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3049
2457
 
3050
2458
  def lgamma(self):
3051
2459
  r"""
3052
- For details, please refer to :func:`mindspore.ops.lgamma`.
2460
+ This interface is deprecated from version 2.4 and will be removed in a future version.
3053
2461
  """
3054
2462
  return tensor_operator_registry.get('lgamma')(self)
3055
2463
 
3056
2464
  def diagonal(self, offset=0, axis1=0, axis2=1):
3057
2465
  """
3058
2466
  For details, please refer to :func:`mindspore.ops.diagonal`.
2467
+ The parameter `axis1` of the current interface is the same as the parameter `dim1` of the reference interface,
2468
+ the parameter `axis2` of the current interface is the same as the parameter `dim2` of the reference interface.
3059
2469
  """
3060
2470
  return tensor_operator_registry.get('diagonal')(self, offset, axis1, axis2)
3061
2471
 
@@ -3304,17 +2714,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3304
2714
  validator.check_value_type('indices', indices, (Tensor, Tensor_,), 'Tensor.gather_nd')
3305
2715
  return tensor_operator_registry.get('gather_nd')(self, indices)
3306
2716
 
3307
- def gather(self, input_indices, axis, batch_dims=0):
3308
- r"""
3309
- For details, please refer to :func:`mindspore.ops.gather`.
3310
- """
3311
- validator.check_is_int(axis, 'axis')
3312
- validator.check_is_int(batch_dims, "batch_dims")
3313
- return tensor_operator_registry.get('gather')(self, input_indices, axis, batch_dims)
3314
-
3315
2717
  def uniform(self, from_=0., to=1., generator=None):
3316
2718
  r"""
3317
- Generates random numbers in the half-open interval [from\_, to).
2719
+ Generates random numbers that follows a uniform distribution within the half-open interval :math:`[from\_, to)`.
2720
+
2721
+ .. math::
2722
+ P(x)= \frac{1}{to - from\_}
3318
2723
 
3319
2724
  Args:
3320
2725
  from\_ (number): The lower bound of the interval.
@@ -3341,132 +2746,50 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3341
2746
  """
3342
2747
  return tensor_operator_registry.get('uniform')(self, from_, to, generator)
3343
2748
 
3344
- def var(self, axis=None, ddof=0, keepdims=False):
3345
- """
3346
- Compute the variance along the specified axis.
3347
-
3348
- The variance is the average of the squared deviations from the mean, i.e.,
3349
- :math:`var = mean(abs(x - x.mean())**2)`.
2749
+ def uniform_(self, from_=0, to=1, *, generator=None):
2750
+ r"""
2751
+ Update the `self` tensor in place by generating random numbers sampled from uniform distribution in the
2752
+ half-open interval :math:`[from\_, to)`.
3350
2753
 
3351
- Return the variance, which is computed for the flattened array by default,
3352
- otherwise over the specified axis.
2754
+ .. math::
2755
+ P(x)= \frac{1}{to - from\_}
3353
2756
 
3354
- Note:
3355
- Numpy arguments `dtype`, `out` and `where` are not supported.
2757
+ .. warning::
2758
+ This is an experimental API that is subject to change or deletion.
3356
2759
 
3357
2760
  Args:
3358
- axis (Union[None, int, tuple(int)]): Axis or axes along which the variance is computed.
3359
- The default is to compute the variance of the flattened array. Default: ``None`` .
3360
- ddof (int): Means Delta Degrees of Freedom. Default: ``0`` .
3361
- The divisor used in calculations is :math:`N - ddof`, where :math:`N` represents the number of elements.
3362
- keepdims (bool): Default: ``False`` .
3363
-
3364
- Returns:
3365
- Variance tensor.
3366
-
3367
- See also:
3368
- - :func:`mindspore.Tensor.mean`: Reduce a dimension of a tensor by averaging all elements in the dimension.
3369
- - :func:`mindspore.Tensor.std`: Compute the standard deviation along the specified axis.
3370
-
3371
- Supported Platforms:
3372
- ``Ascend`` ``GPU`` ``CPU``
3373
-
3374
- Examples:
3375
- >>> import numpy as np
3376
- >>> from mindspore import Tensor
3377
- >>> input_x = Tensor(np.array([1., 2., 3., 4.], np.float32))
3378
- >>> output = input_x.var()
3379
- >>> print(output)
3380
- 1.25
3381
- """
3382
- if 0 in self.shape:
3383
- return Tensor(float('nan'), self.dtype)
3384
- if not isinstance(ddof, int):
3385
- raise TypeError("For 'Tensor.var', the type of the argument 'ddof' must be int, but got "
3386
- "{}.".format(type(ddof)))
3387
- if not isinstance(keepdims, bool):
3388
- raise TypeError("For 'Tensor.var', the type of the argument 'keepdims' must be bool, but "
3389
- "got {}.".format(type(keepdims)))
3390
-
3391
- if axis is None:
3392
- axis = ()
3393
- else:
3394
- axis = validator.check_and_canonicalize_axes(axis, self.ndim)
3395
- x_mean = tensor_operator_registry.get('mean')(self, axis, True)
3396
- x_sub = tensor_operator_registry.get('__sub__')(self, x_mean)
3397
- x_pow = tensor_operator_registry.get('__pow__')(x_sub, 2)
3398
- x_sum = tensor_operator_registry.get('reducesum')(bool(keepdims))(x_pow, axis)
3399
- nums = 1
3400
- if axis == ():
3401
- nums = self.size
3402
- else:
3403
- for ax in axis:
3404
- nums *= self.shape[ax]
3405
- return tensor_operator_registry.get('__truediv__')(x_sum, nums - ddof)
3406
-
3407
- def std(self, axis=None, ddof=0, keepdims=False):
3408
- """
3409
- For details, please refer to :func:`mindspore.ops.std`.
3410
- """
3411
- x_var = self.var(axis, ddof, keepdims)
3412
- return tensor_operator_registry.get('__pow__')(x_var, 0.5)
3413
-
3414
- def sum(self, axis=None, dtype=None, keepdims=False, initial=None):
3415
- """
3416
- Return sum of tensor elements over a given axis.
2761
+ from\_ (Union[number.Number, Tensor], optional): The lower bound of the uniform distribution, it can be a
2762
+ scalar value or a tensor of any dimension with a single element. Default: ``0``.
2763
+ to (Union[number.Number, Tensor], optional): The upper bound of the uniform distribution, it can be a
2764
+ scalar value or a tensor of any dimension with a single element. Default: ``1``.
3417
2765
 
3418
- Note:
3419
- Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are not supported.
3420
- The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
3421
-
3422
- Args:
3423
- axis (Union[None, int, tuple(int), list(int), Tensor]): Axis or axes along which a sum is performed.
3424
- Default: ``None`` .
3425
- If ``None`` , sum all the elements of the input tensor.
3426
- If the `axis` is negative, it counts from the last to the first `axis`.
3427
- If the `axis` is a tuple or list of ints, a sum is performed on all the axes specified in the tuple
3428
- or list instead of a single `axis` or all the axes as before.
3429
- dtype (:class:`mindspore.dtype`, optional): defaults to ``None`` . Overrides the dtype of the
3430
- output Tensor.
3431
- keepdims (bool): If this is set to ``True`` , the axes which are reduced are left in the result as
3432
- dimensions with size one. With this option, the result will broadcast correctly against the input
3433
- array. If the default value is passed, then `keepdims` will not be passed through to the sum method
3434
- of sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
3435
- implement `keepdims` any exceptions will be raised. Default: ``False`` .
3436
- initial (scalar): Starting value for the sum. Default: ``None`` .
2766
+ Keyword Args:
2767
+ generator (:class:`mindspore.Generator`, optional): a pseudorandom number generator.
2768
+ Default: ``None``, uses the default pseudorandom number generator.
3437
2769
 
3438
2770
  Returns:
3439
- Tensor. A tensor with the same shape as input, with the specified `axis` removed.
3440
- If the input tensor is a 0-d array, or if the `axis` is ``None`` , a scalar is returned.
2771
+ Return `self` Tensor.
3441
2772
 
3442
2773
  Raises:
3443
- TypeError: If input is not array_like, or `axis` is not int, tuple of ints, list of ints or Tensor,
3444
- or `keepdims` is not integer, or `initial` is not scalar.
3445
- ValueError: If any `axis` is out of range or duplicate axes exist.
3446
-
3447
- See also:
3448
- - :func:`mindspore.Tensor.cumsum`: Return the cumulative sum of the elements along a given `axis`.
2774
+ TypeError: If `from_` or `to` is neither a number nor a Tensor.
2775
+ TypeError: If dtype of `from` or `to` is not one of: bool, int8, int16, int32, int64, uint8, float32,
2776
+ float64.
2777
+ ValueError: If `from_` or `to` is Tensor but contains multiple elements.
2778
+ RuntimeError: If `from_` is larger than `to`.
3449
2779
 
3450
2780
  Supported Platforms:
3451
- ``Ascend`` ``GPU`` ``CPU``
2781
+ ``Ascend``
3452
2782
 
3453
2783
  Examples:
3454
- >>> import numpy as np
3455
- >>> from mindspore import Tensor
3456
- >>> input_x = Tensor(np.array([-1, 0, 1]).astype(np.float32))
3457
- >>> print(input_x.sum())
3458
- 0.0
3459
- >>> input_x = Tensor(np.arange(10).reshape(2, 5).astype(np.float32))
3460
- >>> print(input_x.sum(axis=1))
3461
- [10. 35.]
3462
- """
3463
- if initial is None:
3464
- res = tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype)
3465
- else:
3466
- res = tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype) + initial
3467
- if dtype is not None and (dtype == mstype.bool_):
3468
- res = res.astype(mstype.bool_)
3469
- return res
2784
+ >>> import mindspore
2785
+ >>> x = mindspore.ops.ones((4, 2))
2786
+ >>> generator = mindspore.Generator()
2787
+ >>> generator.manual_seed(100)
2788
+ >>> output = x.uniform_(1., 2., generator=generator)
2789
+ >>> print(output.shape)
2790
+ (4, 2)
2791
+ """
2792
+ return tensor_operator_registry.get('uniform_')(self, from_=from_, to=to, generator=generator)
3470
2793
 
3471
2794
  def sum_to_size(self, *size):
3472
2795
  r"""
@@ -3512,12 +2835,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3512
2835
  return x.sum(tuple(axes), keepdims=True)
3513
2836
  return x
3514
2837
 
3515
- def nansum(self, axis=None, keepdims=False, dtype=None):
3516
- """
3517
- For details, please refer to :func:`mindspore.ops.nansum`.
3518
- """
3519
- return tensor_operator_registry.get('nansum')(self, axis=axis, keepdims=keepdims, dtype=dtype)
3520
-
3521
2838
  def nanmean(self, axis=None, keepdims=False, *, dtype=None):
3522
2839
  r"""
3523
2840
  For details, please refer to :func:`mindspore.ops.nanmean`.
@@ -3603,19 +2920,52 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3603
2920
  repeated_subs.append(tensor_operator_registry.get('repeat_elements')(sub, rep, axis))
3604
2921
  return tensor_operator_registry.get('concatenate')(repeated_subs, axis)
3605
2922
 
3606
- @repeat_interleave_mint
3607
- def repeat_interleave(self, repeats, dim=None):
3608
- """
3609
- For details, please refer to :func:`mindspore.ops.repeat_interleave`.
3610
- """
3611
- return tensor_operator_registry.get('repeat_interleave')(self, repeats, dim)
3612
-
3613
2923
  def bernoulli(self, p=0.5, seed=None):
3614
2924
  r"""
3615
2925
  For details, please refer to :func:`mindspore.ops.bernoulli`.
3616
2926
  """
3617
2927
  return tensor_operator_registry.get('bernoulli')(self, p, seed)
3618
2928
 
2929
+ def random_(self, from_=0, to=None, *, generator=None):
2930
+ r"""
2931
+ Fill the tensor with numbers sampled from a discrete uniform distribution over an
2932
+ interval :math:`[from\_, to-1]`.
2933
+
2934
+ .. warning::
2935
+ This is an experimental API that is subject to change or deletion.
2936
+
2937
+ Args:
2938
+ from\_ (Union[number.Number, Tensor], optional): the lower bound of the generated random number.
2939
+ It can be a scalar value or a tensor of any dimension with only a single element. Default: 0.
2940
+ to (Union[number.Number, Tensor], optional): the upper bound of the generated random number.
2941
+ By default it's the upper limit of the input data type.
2942
+ It can be a scalar value or a tensor of any dimension with only a single element.
2943
+ Default: ``None``.
2944
+
2945
+ Keyword Args:
2946
+ generator (:class:`mindspore.Generator`, optional): a pseudorandom number generator.
2947
+ Default: ``None``, uses the default pseudorandom number generator.
2948
+
2949
+ Returns:
2950
+ The input tensor.
2951
+
2952
+ Raises:
2953
+ TypeError: If `from_` or `to` is not integer.
2954
+ RuntimeError: If `from_` >= `to`.
2955
+
2956
+ Supported Platforms:
2957
+ ``Ascend``
2958
+
2959
+ Examples:
2960
+ >>> from mindspore import Tensor
2961
+ >>> a = Tensor([[2, 3, 4], [1, 2, 3]])
2962
+ >>> from_ = 0
2963
+ >>> to = 5
2964
+ >>> print(a.random_(from_, to).shape)
2965
+ (2, 3)
2966
+ """
2967
+ return tensor_operator_registry.get('random_')(self, from_=from_, to=to, generator=generator)
2968
+
3619
2969
  def random_categorical(self, num_sample, seed=0, dtype=mstype.int64):
3620
2970
  r"""
3621
2971
  For details, please refer to :func:`mindspore.ops.random_categorical`.
@@ -3624,12 +2974,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3624
2974
  validator.check_is_int(seed, 'seed')
3625
2975
  return tensor_operator_registry.get('random_categorical')(self, num_sample, seed, dtype)
3626
2976
 
3627
- def masked_select(self, mask):
3628
- """
3629
- For details, please refer to :func:`mindspore.ops.masked_select`.
3630
- """
3631
- return tensor_operator_registry.get('masked_select')(self, mask)
3632
-
3633
2977
  def gather_elements(self, dim, index):
3634
2978
  """
3635
2979
  For details, please refer to :func:`mindspore.ops.gather_elements`.
@@ -3637,28 +2981,75 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3637
2981
  validator.check_value_type('index', index, (Tensor, Tensor_,), 'Tensor.gather_elements')
3638
2982
  return tensor_operator_registry.get('gather_elements')(self, dim, index)
3639
2983
 
3640
- def nonzero(self, as_tuple=False):
3641
- """
3642
- For details, please refer to :func:`mindspore.ops.nonzero`.
3643
- """
3644
- return tensor_operator_registry.get('nonzero')(self, as_tuple)
2984
+ def nonzero(self, *, as_tuple=False):
2985
+ r"""
2986
+ Return the positions of all non-zero values.
3645
2987
 
3646
- def svd(self, full_matrices=False, compute_uv=True):
3647
- """
3648
- For details, please refer to :func:`mindspore.ops.svd`.
3649
- """
3650
- svd_op = tensor_operator_registry.get("svd")
3651
- if compute_uv:
3652
- return svd_op(full_matrices, compute_uv)(self)
2988
+ Note:
2989
+ The rank of `self`.
3653
2990
 
3654
- s, _, _ = svd_op(full_matrices, compute_uv)(self)
3655
- return s
2991
+ - Ascend: its rank can be equal to 0 except O2 mode.
2992
+ - CPU/GPU: its rank should be greater than or eaqual to 1.
3656
2993
 
3657
- def hardshrink(self, lambd=0.5):
3658
- r"""
3659
- For details, please refer to :func:`mindspore.ops.hardshrink`.
2994
+ Keyword Args:
2995
+ as_tuple (bool, optional): Whether the output is tuple.
2996
+ If ``False`` , return Tensor. Default: ``False`` .
2997
+ If ``True`` , return Tuple of Tensor, only support ``Ascend`` .
2998
+
2999
+ Returns:
3000
+ - If `as_tuple` is ``False``, return the Tensor, a 2-D Tensor whose data type is int64,
3001
+ containing the positions of all non-zero values of the `self` .
3002
+ - If `as_tuple` is ``True``, return the Tuple of Tensor and data type is int64.
3003
+ The Tuple length is the dimension of the `self` tensor,
3004
+ and each element is the 1D tensor of the subscript of all non-zero elements of
3005
+ the `self` tensor in that dimension.
3006
+
3007
+ Raises:
3008
+ TypeError: If `self` is not Tensor.
3009
+ TypeError: If `as_tuple` is not bool.
3010
+ RuntimeError: On GPU or CPU or Ascend O2 mode, if dim of `input` equals to 0.
3011
+
3012
+ Supported Platforms:
3013
+ ``Ascend`` ``GPU`` ``CPU``
3014
+
3015
+ Examples:
3016
+ >>> import mindspore
3017
+ >>> import numpy as np
3018
+ >>> from mindspore import Tensor
3019
+ >>> x = Tensor(np.array([[[1, 0], [-5, 0]]]), mindspore.int32)
3020
+ >>> output = x.nonzero()
3021
+ >>> print(output)
3022
+ [[0 0 0]
3023
+ [0 1 0]]
3024
+ >>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
3025
+ >>> output = x.nonzero(as_tuple=False)
3026
+ >>> print(output)
3027
+ [[0]
3028
+ [2]
3029
+ [4]]
3030
+ >>> x = Tensor(np.array([[[1, 0], [-5, 0]]]), mindspore.int32)
3031
+ >>> output = x.nonzero(as_tuple=True)
3032
+ >>> print(output)
3033
+ (Tensor(shape=[2], dtype=Int64, value=[0, 0]),
3034
+ Tensor(shape=[2], dtype=Int64, value=[0, 1]),
3035
+ Tensor(shape=[2], dtype=Int64, value=[0, 0]))
3036
+ >>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
3037
+ >>> output = x.nonzero(as_tuple=True)
3038
+ >>> print(output)
3039
+ (Tensor(shape=[3], dtype=Int64, value=[0, 2, 4]), )
3040
+ """
3041
+ return tensor_operator_registry.get('nonzero')(self, as_tuple=as_tuple)
3042
+
3043
+ def svd(self, full_matrices=False, compute_uv=True):
3044
+ """
3045
+ For details, please refer to :func:`mindspore.ops.svd`.
3660
3046
  """
3661
- return tensor_operator_registry.get('hardshrink')(self, lambd)
3047
+ svd_op = tensor_operator_registry.get("svd")
3048
+ if compute_uv:
3049
+ return svd_op(full_matrices, compute_uv)(self)
3050
+
3051
+ s, _, _ = svd_op(full_matrices, compute_uv)(self)
3052
+ return s
3662
3053
 
3663
3054
  def heaviside(self, values):
3664
3055
  r"""
@@ -3777,12 +3168,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3777
3168
  """
3778
3169
  return self.asnumpy().tolist()
3779
3170
 
3780
- def unbind(self, dim=0):
3781
- r"""
3782
- For details, please refer to :func:`mindspore.ops.unbind`.
3783
- """
3784
- return tensor_operator_registry.get('unbind')(self, dim)
3785
-
3786
3171
  def unsorted_segment_min(self, segment_ids, num_segments):
3787
3172
  r"""
3788
3173
  For details, please refer to :func:`mindspore.ops.unsorted_segment_min`.
@@ -3838,13 +3223,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3838
3223
  """
3839
3224
  return tensor_operator_registry.get("xdivy")(self, y)
3840
3225
 
3841
- @split_mint
3842
- def split(self, split_size_or_sections, axis=0):
3843
- """
3844
- For details, please refer to :func:`mindspore.ops.split`.
3845
- """
3846
- return tensor_operator_registry.get('split')(self, split_size_or_sections, axis)
3847
-
3848
3226
  def tensor_split(self, indices_or_sections, axis=0):
3849
3227
  """
3850
3228
  For details, please refer to :func:`mindspore.ops.tensor_split`.
@@ -3873,6 +3251,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3873
3251
  def xlogy(self, y):
3874
3252
  r"""
3875
3253
  For details, please refer to :func:`mindspore.ops.xlogy`.
3254
+ The parameter `y` of the current interface is the same as the parameter `other` of the reference interface.
3876
3255
  """
3877
3256
  return tensor_operator_registry.get("xlogy")(self, y)
3878
3257
 
@@ -3885,30 +3264,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3885
3264
  """
3886
3265
  return tensor_operator_registry.get("eigvals")()(self)
3887
3266
 
3888
- def erf(self):
3889
- r"""
3890
- For details, please refer to :func:`mindspore.ops.erf`.
3891
- """
3892
- return tensor_operator_registry.get("erf")(self)
3893
-
3894
- def erfc(self):
3895
- r"""
3896
- For details, please refer to :func:`mindspore.ops.erfc`.
3897
- """
3898
- return tensor_operator_registry.get("erfc")(self)
3899
-
3900
- def tile(self, reps):
3901
- r"""
3902
- For details, please refer to :func:`mindspore.ops.tile`.
3903
- """
3904
- return tensor_operator_registry.get('tile')(self, reps)
3905
-
3906
- def topk(self, k, dim=None, largest=True, sorted=True):
3907
- r"""
3908
- For details, please refer to :func:`mindspore.ops.topk`.
3909
- """
3910
- return tensor_operator_registry.get("topk")(self, k, dim, largest, sorted)
3911
-
3912
3267
  def top_k(self, k, sorted=True):
3913
3268
  r"""
3914
3269
  `Tensor.top_k` is deprecated, please use `Tensor.topk` instead.
@@ -3917,55 +3272,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3917
3272
  validator.check_bool(sorted, 'sorted')
3918
3273
  return tensor_operator_registry.get("top_k")(self, k, sorted)
3919
3274
 
3920
- def sigmoid(self):
3921
- r"""
3922
- For details, please refer to :func:`mindspore.ops.sigmoid`.
3923
- """
3924
- return tensor_operator_registry.get("sigmoid")(self)
3925
-
3926
- def median(self, axis=-1, keepdims=False):
3927
- r"""
3928
- For details, please refer to :func:`mindspore.ops.median`.
3929
- """
3930
- validator.check_axis_in_range(axis, self.ndim)
3931
- return tensor_operator_registry.get('median')(False, axis, keepdims)(self)
3932
-
3933
- def addmv(self, mat, vec, beta=1, alpha=1):
3934
- r"""
3935
- For details, please refer to :func:`mindspore.ops.addmv`.
3936
- """
3937
- return tensor_operator_registry.get('addmv')(self, mat, vec, beta=beta, alpha=alpha)
3938
-
3939
- def asinh(self):
3940
- r"""
3941
- For details, please refer to :func:`mindspore.ops.asinh`.
3942
- """
3943
- return tensor_operator_registry.get('asinh')(self)
3944
-
3945
- def arcsinh(self):
3946
- r"""
3947
- Alias for :func:`mindspore.Tensor.asinh`.
3948
- """
3949
- return tensor_operator_registry.get('arcsinh')(self)
3950
-
3951
- def atan(self):
3952
- r"""
3953
- For details, please refer to :func:`mindspore.ops.atan`.
3954
- """
3955
- return tensor_operator_registry.get('atan')(self)
3956
-
3957
- def atanh(self):
3958
- r"""
3959
- For details, please refer to :func:`mindspore.ops.atanh`.
3960
- """
3961
- return tensor_operator_registry.get('atanh')(self)
3962
-
3963
- def arctanh(self):
3964
- r"""
3965
- Alias for :func:`mindspore.Tensor.atanh`.
3966
- """
3967
- return tensor_operator_registry.get('arctanh')(self)
3968
-
3969
3275
  def bmm(self, mat2):
3970
3276
  r"""
3971
3277
  For details, please refer to :func:`mindspore.ops.bmm`.
@@ -3976,8 +3282,14 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3976
3282
  r"""
3977
3283
  Performs tensor dtype conversion.
3978
3284
 
3285
+ Note:
3286
+ - If the `self` Tensor already has the correct `mindspore.dtype`, then self is returned.
3287
+ Otherwise, the returned tensor is a copy of `self` with the desired mindspore.dtype.
3288
+ - When converting complex numbers to boolean type, the imaginary part of the complex number is not
3289
+ taken into account. As long as the real part is non-zero, it returns True; otherwise, it returns False.
3290
+
3979
3291
  Args:
3980
- dtype (Number): The valid data type of the output tensor. Only constant value is allowed.
3292
+ dtype (dtype.Number): The valid data type of the output tensor. Only constant value is allowed.
3981
3293
 
3982
3294
  Returns:
3983
3295
  Tensor, converted to the specified `dtype`.
@@ -3999,7 +3311,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3999
3311
  >>> print(output.dtype)
4000
3312
  Int32
4001
3313
  """
4002
- return tensor_operator_registry.get('to')(self, dtype)
3314
+ return self if self.dtype == dtype else self._to(dtype)
4003
3315
 
4004
3316
  def type(self, dtype=None):
4005
3317
  r"""
@@ -4029,29 +3341,49 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4029
3341
  return str(self.dtype)
4030
3342
  return self.astype(dtype)
4031
3343
 
3344
+
4032
3345
  def type_as(self, other):
4033
3346
  r"""
4034
- Change the dtype of the Tensor to the dtype of `other`.
3347
+ Returns self tensor cast to the type of the with the input other tensor.
3348
+
3349
+ .. warning::
3350
+ This is an experimental API that is subject to change or deletion.
3351
+
3352
+ Note:
3353
+ When converting complex numbers to boolean type, the imaginary part of the complex number is not
3354
+ taken into account. As long as the real part is non-zero, it returns True; otherwise, it returns False.
4035
3355
 
4036
3356
  Args:
4037
- other (Tensor): The return tensor has the same dtype as `other`.
3357
+ other (Tensor): The tensor whose data type is specified.
3358
+ The shape of tensor is :math:`(x_0, x_1, ..., x_R)`.
4038
3359
 
4039
3360
  Returns:
4040
- Tensor, has the same dtype as `other`.
3361
+ Tensor, the shape of tensor is the same as `self`, :math:`(x_0, x_1, ..., x_R)`.
3362
+
3363
+ Raises:
3364
+ TypeError: If `other` is not a Tensor.
4041
3365
 
4042
3366
  Supported Platforms:
4043
3367
  ``Ascend`` ``GPU`` ``CPU``
4044
3368
 
4045
3369
  Examples:
4046
3370
  >>> import mindspore
3371
+ >>> import numpy as np
4047
3372
  >>> from mindspore import Tensor
4048
- >>> x = Tensor([[1, 2], [3, 4]], dtype=mindspore.float32)
4049
- >>> y = Tensor([[1, 2], [3, 4]], dtype=mindspore.int32)
4050
- >>> x = x.type_as(y)
4051
- >>> print(x.dtype)
3373
+ >>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
3374
+ >>> self = Tensor(input_np)
3375
+ >>> other_np = np.random.randn(2, 3, 4).astype(np.int32)
3376
+ >>> other = Tensor(other_np)
3377
+ >>> output = self.type_as(other)
3378
+ >>> print(output.dtype)
4052
3379
  Int32
3380
+ >>> print(output.shape)
3381
+ (2, 3, 4, 5)
4053
3382
  """
4054
- return self.astype(other.dtype)
3383
+ if self.dtype == other.dtype:
3384
+ return self
3385
+ return Tensor_.type_as(self, other)
3386
+
4055
3387
 
4056
3388
  def bool(self):
4057
3389
  r"""
@@ -4073,7 +3405,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4073
3405
  >>> print(output.dtype)
4074
3406
  Bool
4075
3407
  """
4076
- return tensor_operator_registry.get('bool')(self, mstype.bool_)
3408
+ return self.to(mstype.bool_)
4077
3409
 
4078
3410
  def float(self):
4079
3411
  r"""
@@ -4094,7 +3426,49 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4094
3426
  >>> print(output.dtype)
4095
3427
  Float32
4096
3428
  """
4097
- return tensor_operator_registry.get('float')(self, mstype.float32)
3429
+ return self.to(mstype.float32)
3430
+
3431
+ def bfloat16(self):
3432
+ r"""
3433
+ Converts input tensor dtype to `bfloat16`.
3434
+
3435
+ Returns:
3436
+ Tensor, converted to the `bfloat16` dtype.
3437
+
3438
+ Supported Platforms:
3439
+ ``Ascend`` ``GPU`` ``CPU``
3440
+
3441
+ Examples:
3442
+ >>> import numpy as np
3443
+ >>> import mindspore
3444
+ >>> from mindspore import Tensor
3445
+ >>> input_x = Tensor(np.ones([2,2]), mindspore.int32)
3446
+ >>> output = input_x.bfloat16()
3447
+ >>> print(output.dtype)
3448
+ BFloat16
3449
+ """
3450
+ return self.to(mstype.bfloat16)
3451
+
3452
+ def double(self):
3453
+ r"""
3454
+ Converts input tensor dtype to `float64`.
3455
+
3456
+ Returns:
3457
+ Tensor, converted to the `float64` dtype.
3458
+
3459
+ Supported Platforms:
3460
+ ``Ascend`` ``GPU`` ``CPU``
3461
+
3462
+ Examples:
3463
+ >>> import numpy as np
3464
+ >>> import mindspore
3465
+ >>> from mindspore import Tensor
3466
+ >>> input_x = Tensor(np.ones([2,2]), mindspore.int32)
3467
+ >>> output = input_x.double()
3468
+ >>> print(output.dtype)
3469
+ Float64
3470
+ """
3471
+ return self.to(mstype.float64)
4098
3472
 
4099
3473
  def half(self):
4100
3474
  r"""
@@ -4115,7 +3489,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4115
3489
  >>> print(output.dtype)
4116
3490
  Float16
4117
3491
  """
4118
- return tensor_operator_registry.get('half')(self, mstype.float16)
3492
+ return self.to(mstype.float16)
4119
3493
 
4120
3494
  def int(self):
4121
3495
  r"""
@@ -4136,7 +3510,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4136
3510
  >>> print(output.dtype)
4137
3511
  Int32
4138
3512
  """
4139
- return tensor_operator_registry.get('int')(self, mstype.int32)
3513
+ return self.to(mstype.int32)
4140
3514
 
4141
3515
  def byte(self):
4142
3516
  r"""
@@ -4155,9 +3529,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4155
3529
  >>> input_x = Tensor(np.ones([2,2]), mindspore.float32)
4156
3530
  >>> output = input_x.byte()
4157
3531
  >>> print(output.dtype)
4158
- uint8
3532
+ UInt8
4159
3533
  """
4160
- return tensor_operator_registry.get('byte')(self, mstype.uint8)
3534
+ return self.to(mstype.uint8)
4161
3535
 
4162
3536
  def long(self):
4163
3537
  r"""
@@ -4178,7 +3552,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4178
3552
  >>> print(output.dtype)
4179
3553
  Int64
4180
3554
  """
4181
- return tensor_operator_registry.get('long')(self, mstype.int64)
3555
+ return self.to(mstype.int64)
4182
3556
 
4183
3557
  def short(self):
4184
3558
  r"""
@@ -4200,7 +3574,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4200
3574
  >>> output
4201
3575
  Tensor(shape=[5], dtype=Int16, value= [1, 2, 3, 4, 5])
4202
3576
  """
4203
- return tensor_operator_registry.get('cast')(self, mstype.int16)
3577
+ return self.to(mstype.int16)
4204
3578
 
4205
3579
  def cholesky(self, upper=False):
4206
3580
  r"""
@@ -4210,7 +3584,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4210
3584
 
4211
3585
  def cholesky_inverse(self, upper=False):
4212
3586
  r"""
4213
- For details, please refer to :func:`mindspore.ops.cholesky_inverse`.
3587
+ This interface is deprecated from version 2.4 and will be removed in a future version.
4214
3588
  """
4215
3589
  return tensor_operator_registry.get('cholesky_inverse')(self, upper=upper)
4216
3590
 
@@ -4247,11 +3621,14 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4247
3621
  """
4248
3622
  return tensor_operator_registry.get('erfinv')(self)
4249
3623
 
4250
- def less_equal(self, other):
3624
+ def erfinv_(self):
4251
3625
  r"""
4252
- For details, please refer to :func:`mindspore.ops.less_equal`.
3626
+ In-place version of erfinv(), for details, please refer to :func:`mindspore.ops.erfinv`.
3627
+
3628
+ .. warning::
3629
+ This is an experimental API that is subject to change or deletion.
4253
3630
  """
4254
- return tensor_operator_registry.get('less_equal')(self, other)
3631
+ return tensor_operator_registry.get('erfinv_')(self)
4255
3632
 
4256
3633
  def lcm(self, other):
4257
3634
  r"""
@@ -4284,6 +3661,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4284
3661
  def expand(self, size):
4285
3662
  r"""
4286
3663
  For details, please refer to :func:`mindspore.ops.broadcast_to`.
3664
+ The parameter `size` of the current interface is the same as the parameter `shape` of the reference interface.
4287
3665
  """
4288
3666
  if isinstance(size, Tensor):
4289
3667
  size = tensor_operator_registry.get('tensortotuple')()(size)
@@ -4297,59 +3675,90 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4297
3675
 
4298
3676
  def multiply(self, value):
4299
3677
  r"""
4300
- For details, please refer to :func:`mindspore.ops.multiply`.
3678
+ For details, please refer to :func:`mindspore.ops.mul`.
3679
+ The parameter `value` of the current interface is the same as the parameter `other` of the reference interface.
4301
3680
  """
4302
3681
  return tensor_operator_registry.get('multiply')(self, value)
4303
3682
 
4304
- def div(self, value, *, rounding_mode=None):
4305
- r"""
4306
- For details, please refer to :func:`mindspore.ops.div`.
4307
- """
4308
- return tensor_operator_registry.get('div')(self, value, rounding_mode=rounding_mode)
4309
-
4310
- def divide(self, value, *, rounding_mode=None):
4311
- r"""
4312
- Alias for :func:`mindspore.Tensor.div`.
4313
- """
4314
- return tensor_operator_registry.get('div')(self, value, rounding_mode=rounding_mode)
4315
-
4316
- def eq(self, other):
4317
- r"""
4318
- For details, please refer to :func:`mindspore.ops.eq`.
4319
- """
4320
- return tensor_operator_registry.get('equal')(self, other)
4321
-
4322
3683
  def equal(self, other):
4323
3684
  r"""
4324
3685
  For details, please refer to :func:`mindspore.ops.equal`.
4325
3686
  """
4326
3687
  return tensor_operator_registry.get('equal')(self, other)
4327
3688
 
4328
- def expm1(self):
4329
- r"""
4330
- For details, please refer to :func:`mindspore.ops.expm1`.
4331
- """
4332
- return tensor_operator_registry.get('expm1')(self)
4333
-
4334
3689
  def index_add(self, dim, index, source, *, alpha=1):
4335
3690
  r"""
4336
3691
  For details, please refer to :func:`mindspore.ops.index_add`.
3692
+ The corresponding relationships between the parameters of `Tensor.index_add` and :func:`mindspore.ops.index_add`
3693
+ are as follows: `dim` -> `axis`, `index` -> `indices`, `source * alpha` -> `y`.
4337
3694
  """
4338
3695
  check_is_number(alpha, (int, float))
4339
3696
  source = tensor_operator_registry.get('__mul__')(source, alpha)
4340
3697
  return tensor_operator_registry.get('index_add')(self, indices=index, y=source, axis=dim)
4341
3698
 
4342
- def greater(self, other):
3699
+ def index_add_(self, dim, index, source, *, alpha=1):
4343
3700
  r"""
4344
- For details, please refer to :func:`mindspore.ops.greater`.
4345
- """
4346
- return tensor_operator_registry.get('greater')(self, other)
3701
+ Accumulate the elements of `alpha` times `source` into the `self` by adding to the index
3702
+ in the order given in `index`. For example, if `dim == 0`, `index[i] == j`, and `alpha = -1`,
3703
+ then the `i` th row of `source` is subtracted from the `j` th row of `self` .
3704
+ The `dim` th dimension of `source` must have the same size as the length of `index` ,
3705
+ and all other dimensions must match `self`, or an error will be raised.
3706
+ For a 3-D tensor the output is defined as follows:
4347
3707
 
4348
- def greater_equal(self, other):
4349
- r"""
4350
- For details, please refer to :func:`mindspore.ops.greater_equal`.
3708
+ .. math::
3709
+ \begin{array}{ll}
3710
+ self[index[i],\ :,\ :]\ +=\ alpha * src[i,\ :,\ :] \qquad \#if\ dim == 0 \\
3711
+ self[:,\ \ index[i],\ :]\ +=\ alpha * src[:,\ \ i,\ :] \qquad \#if\ dim == 1 \\
3712
+ self[:,\ :,\ \ index[i]]\ +=\ alpha * src[:,\ :,\ \ i] \qquad\#if\ dim == 2 \\
3713
+ \end{array}
3714
+
3715
+ .. warning::
3716
+ This is an experimental API that is subject to change or deletion.
3717
+
3718
+ Args:
3719
+ dim (int): The dimension along which to index.
3720
+ index (Tensor): Add the value of `self` and `source` along the dimension of the `dim` according to
3721
+ the specified index value, with data type int32. The `index` must be 1D with the same size as
3722
+ the size of `source` in the `dim` dimension. The values of `index` should be in [0, b),
3723
+ where the b is the size of `self` in the `dim` dimension.
3724
+ source (Tensor): The input tensor with the value to add. Must have same data type as `self`.
3725
+ The shape must be the same as `self` except the `dim` th dimension.
3726
+
3727
+ Keyword Args:
3728
+ alpha (number, optional): The scalar multiplier for source. Default: ``1``.
3729
+
3730
+ Returns:
3731
+ Tensor, has the same shape and dtype as `self`.
3732
+
3733
+ Raises:
3734
+ TypeError: If neither `index` nor `source` is a Tensor.
3735
+ ValueError: If dim is out of `self` rank's range.
3736
+ ValueError: If `self` rank is not the same as `source` rank.
3737
+ ValueError: If shape of `index` is not 1D or size of `index` is not equal to dimension
3738
+ of `source[dim]`.
3739
+ ValueError: If `source`'s shape is not the same as `self` except the `dim` th dimension.
3740
+
3741
+ Supported Platforms:
3742
+ ``Ascend``
3743
+
3744
+ Examples:
3745
+ >>> import numpy as np
3746
+ >>> import mindspore
3747
+ >>> from mindspore import Tensor
3748
+ >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
3749
+ >>> index = Tensor(np.array([0, 2]), mindspore.int32)
3750
+ >>> y = Tensor(np.array([[0.5, 1.0], [1.0, 1.5], [2.0, 2.5]]), mindspore.float32)
3751
+ >>> output = x.index_add_(1, index, y, alpha=1)
3752
+ >>> print(output)
3753
+ [[ 1.5 2. 4. ]
3754
+ [ 5. 5. 7.5]
3755
+ [ 9. 8. 11.5]]
3756
+ >>> print(x)
3757
+ [[ 1.5 2. 4. ]
3758
+ [ 5. 5. 7.5]
3759
+ [ 9. 8. 11.5]]
4351
3760
  """
4352
- return tensor_operator_registry.get('greater_equal')(self, other)
3761
+ return tensor_operator_registry.get('index_add_')(self, dim, index, source, alpha)
4353
3762
 
4354
3763
  def igamma(self, other):
4355
3764
  r"""
@@ -4363,18 +3772,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4363
3772
  """
4364
3773
  return tensor_operator_registry.get('igammac')(self, other)
4365
3774
 
4366
- def isinf(self):
4367
- r"""
4368
- For details, please refer to :func:`mindspore.ops.isinf`.
4369
- """
4370
- return tensor_operator_registry.get('isinf')(self)
4371
-
4372
- @isnan_mint
4373
3775
  def isnan(self):
4374
3776
  r"""
4375
- For details, please refer to :func:`mindspore.ops.isnan`.
3777
+ For details, please refer to :func:`mindspore.ops.ne`.
4376
3778
  """
4377
- return tensor_operator_registry.get('isnan')(self)
3779
+ return self.ne(self)
4378
3780
 
4379
3781
  def flip(self, dims):
4380
3782
  """
@@ -4423,42 +3825,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4423
3825
  """
4424
3826
  return self.dtype in mstype.signed_type
4425
3827
 
4426
- def le(self, other):
4427
- r"""
4428
- For details, please refer to :func:`mindspore.ops.le`.
4429
- """
4430
- return tensor_operator_registry.get('le')(self, other)
4431
-
4432
- def less(self, other):
4433
- r"""
4434
- For details, please refer to :func:`mindspore.ops.less`.
4435
- """
4436
- return tensor_operator_registry.get('less')(self, other)
4437
-
4438
- def lt(self, other):
4439
- """
4440
- Alias for :func:`mindspore.Tensor.less`.
4441
- """
4442
- return self.less(other)
4443
-
4444
- def logical_and(self, other):
4445
- r"""
4446
- For details, please refer to :func:`mindspore.ops.logical_and`.
4447
- """
4448
- return tensor_operator_registry.get('logical_and')(self, other)
4449
-
4450
- def logical_not(self):
4451
- r"""
4452
- For details, please refer to :func:`mindspore.ops.logical_not`.
4453
- """
4454
- return tensor_operator_registry.get('logical_not')(self)
4455
-
4456
- def logical_or(self, other):
4457
- r"""
4458
- For details, please refer to :func:`mindspore.ops.logical_or`.
4459
- """
4460
- return tensor_operator_registry.get('logical_or')(self, other)
4461
-
4462
3828
  def logical_xor(self, other):
4463
3829
  r"""
4464
3830
  For details, please refer to :func:`mindspore.ops.logical_xor`.
@@ -4467,7 +3833,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4467
3833
 
4468
3834
  def lstsq(self, A):
4469
3835
  r"""
4470
- For details, please refer to :func:`mindspore.ops.lstsq`.
3836
+ This interface is deprecated from version 2.4 and will be removed in a future version.
4471
3837
  """
4472
3838
  return tensor_operator_registry.get('lstsq')(self, A)
4473
3839
 
@@ -4476,6 +3842,15 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4476
3842
  r"""
4477
3843
  Accessing this property is equivalent to Calling self.adjoint().
4478
3844
  For details, please refer to :func:`mindspore.ops.adjoint`.
3845
+
3846
+ Examples:
3847
+ >>> from mindspore import Tensor
3848
+ >>> import numpy as np
3849
+ >>> x = Tensor(np.array([[0. + 0.j, 1. + 1.j], [2. + 2.j, 3. + 3.j]]))
3850
+ >>> output = x.mH
3851
+ >>> print(output)
3852
+ [[0.-0.j 2.-2.j]
3853
+ [1.-1.j 3.-3.j]]
4479
3854
  """
4480
3855
  return self.adjoint()
4481
3856
 
@@ -4485,6 +3860,14 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4485
3860
  Returns the Tensor that exchanges the last two dimensions.
4486
3861
  Accessing the attribute, x.mT, is equal to calling the method, x.swapaxes(-2, -1).
4487
3862
  For details, please refer to :func:`mindspore.Tensor.swapaxes`.
3863
+
3864
+ Examples:
3865
+ >>> from mindspore import Tensor
3866
+ >>> import numpy as np
3867
+ >>> x = Tensor(np.ones((2, 3, 4)))
3868
+ >>> output = x.mT
3869
+ >>> print(output.shape)
3870
+ (2, 4, 3)
4488
3871
  """
4489
3872
  return self.swapaxes(-2, -1)
4490
3873
 
@@ -4494,12 +3877,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4494
3877
  """
4495
3878
  return tensor_operator_registry.get('mvlgamma')(self, p)
4496
3879
 
4497
- def matmul(self, tensor2):
4498
- r"""
4499
- For details, please refer to :func:`mindspore.ops.matmul`.
4500
- """
4501
- return tensor_operator_registry.get('matmul')(self, tensor2)
4502
-
4503
3880
  def inner(self, other):
4504
3881
  r"""
4505
3882
  For details, please refer to :func:`mindspore.ops.inner`.
@@ -4514,95 +3891,16 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4514
3891
 
4515
3892
  def matrix_power(self, n):
4516
3893
  r"""
4517
- For details, please refer to :func:`mindspore.ops.matrix_power`.
4518
-
4519
- .. warning::
4520
- This is an experimental API that is subject to change or deletion.
4521
-
3894
+ This interface is deprecated from version 2.4 and will be removed in a future version.
4522
3895
  """
4523
3896
  return tensor_operator_registry.get('matrix_power')(self, n)
4524
3897
 
4525
- def maximum(self, other):
4526
- r"""
4527
- For details, please refer to :func:`mindspore.ops.maximum`.
4528
- """
4529
- return tensor_operator_registry.get('maximum')(self, other)
4530
-
4531
- def mm(self, mat2):
4532
- r"""
4533
- For details, please refer to :func:`mindspore.ops.mm`.
4534
- """
4535
- return tensor_operator_registry.get('mm')(self, mat2)
4536
-
4537
3898
  def msort(self):
4538
3899
  r"""
4539
3900
  For details, please refer to :func:`mindspore.ops.msort`.
4540
3901
  """
4541
3902
  return tensor_operator_registry.get('msort')(self)
4542
3903
 
4543
- def mul(self, value):
4544
- r"""
4545
- For details, please refer to :func:`mindspore.ops.mul`.
4546
- """
4547
- return tensor_operator_registry.get('mul')(self, value)
4548
-
4549
- def nan_to_num(self, nan=None, posinf=None, neginf=None):
4550
- """
4551
- For details, please refer to :func:`mindspore.ops.nan_to_num`.
4552
- """
4553
- return tensor_operator_registry.get('nan_to_num')(self, nan, posinf, neginf)
4554
-
4555
- def neg(self):
4556
- r"""
4557
- For details, please refer to :func:`mindspore.ops.neg`.
4558
- """
4559
- return tensor_operator_registry.get('neg')(self)
4560
-
4561
- def ne(self, other):
4562
- r"""
4563
- For details, please refer to :func:`mindspore.ops.ne`.
4564
- """
4565
- return tensor_operator_registry.get('ne')(self, other)
4566
-
4567
- def not_equal(self, other):
4568
- r"""
4569
- For details, please refer to :func:`mindspore.ops.not_equal`.
4570
- """
4571
- return tensor_operator_registry.get('not_equal')(self, other)
4572
-
4573
- def new_zeros(self, size, dtype=None):
4574
- r"""
4575
- Return a tensor of `size` filled with zeros.
4576
-
4577
- .. warning::
4578
- For argument `size`, Tensor type input will be deprecated in the future version.
4579
-
4580
- Args:
4581
- size (Union[int, tuple, list, Tensor]): An int, list or tuple of integers defining the output shape.
4582
- dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned tensor has
4583
- thesame dtype as `self`. Default: ``None``.
4584
-
4585
- Returns:
4586
- Tensor, the shape and dtype is defined above and filled with zeros.
4587
-
4588
- Raises:
4589
- TypeError: If `size` is neither an int nor an tuple/list/Tensor of int.
4590
-
4591
- Supported Platforms:
4592
- ``Ascend`` ``GPU`` ``CPU``
4593
-
4594
- Examples:
4595
- >>> import numpy as np
4596
- >>> import mindspore
4597
- >>> from mindspore import Tensor
4598
- >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
4599
- >>> output = x.new_zeros((2, 2))
4600
- >>> print(output)
4601
- [[0. 0.]
4602
- [0. 0.]]
4603
- """
4604
- return tensor_operator_registry.get('zeros')(size, dtype)
4605
-
4606
3904
  def zero_(self):
4607
3905
  r"""
4608
3906
  Return a tensor filled with zeros.
@@ -4623,43 +3921,52 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4623
3921
  >>> x = Tensor(np.array([2, 2]))
4624
3922
  >>> output = x.zero_()
4625
3923
  >>> print(output)
4626
- [[0. 0.]
4627
- [0. 0.]]
3924
+ [0 0]
4628
3925
  """
4629
3926
  return tensor_operator_registry.get('zero_')(self)
4630
3927
 
4631
- def new_ones(self, size, dtype=None):
3928
+ def new_empty(self, size, *, dtype=None, device=None):
4632
3929
  r"""
4633
- Return a tensor of `size` filled with ones.
3930
+ Returns an uninitialized Tensor of `size`. Its dtype is specified by `dtype` and its
3931
+ device is specified by `device`.
4634
3932
 
4635
3933
  .. warning::
4636
- For argument `size`, Tensor type input will be deprecated in the future version.
3934
+ This is an experimental API that is subject to change or deletion.
4637
3935
 
4638
3936
  Args:
4639
- size (Union[int, tuple, list, Tensor]): An int, list or tuple of integers defining the output shape.
4640
- dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned
4641
- tensor has the same dtype as `self`. Default: ``None``.
3937
+ size (Union[tuple[int], list[int], int]): The specified shape of output tensor. Only positive integer or
3938
+ tuple or list containing positive integers are allowed.
3939
+
3940
+ Keyword Args:
3941
+ dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype = None`,
3942
+ the tensor will have the same dtype as `self`. Default ``None``.
3943
+ device (string, optional): The specified device of the output tensor. Support ``CPU`` and ``Ascend``. If
3944
+ `device = None`, the tensor will have the same device as `self` and if the device of `self` is not
3945
+ defined, the value set by :func:`mindspore.set_device` will be used. Default ``None``.
4642
3946
 
4643
3947
  Returns:
4644
- Tensor, the shape and dtype is defined above and filled with ones.
3948
+ Tensor, the shape, dtype and device is defined above but with uninitialized data (May be a random value).
4645
3949
 
4646
3950
  Raises:
4647
- TypeError: If `size` is neither an int nor an tuple/list/Tensor of int.
3951
+ TypeError: If `size` is neither an int nor a tuple or list of int.
4648
3952
 
4649
3953
  Supported Platforms:
4650
- ``Ascend`` ``GPU`` ``CPU``
3954
+ ``Ascend``
4651
3955
 
4652
3956
  Examples:
4653
- >>> import numpy as np
4654
3957
  >>> import mindspore
4655
3958
  >>> from mindspore import Tensor
4656
- >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
4657
- >>> output = x.new_ones((2, 2))
4658
- >>> print(output)
4659
- [[1. 1.]
4660
- [1. 1.]]
3959
+ >>> x = Tensor([[1, 2, 3], [4, 5, 6]])
3960
+ >>> output1 = x.new_empty((2, 3))
3961
+ >>> print(output1)
3962
+ [[0 0 0]
3963
+ [0 0 0]]
3964
+ >>> output2 = x.new_empty((2, 3), dtype=mindspore.float64)
3965
+ >>> print(output2)
3966
+ [[0. 0. 0.]
3967
+ [0. 0. 0.]]
4661
3968
  """
4662
- return tensor_operator_registry.get('ones')(size, dtype)
3969
+ return tensor_operator_registry.get('new_empty')(self, size, dtype, device)
4663
3970
 
4664
3971
  def sign(self):
4665
3972
  r"""
@@ -4679,48 +3986,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4679
3986
  """
4680
3987
  return tensor_operator_registry.get('sgn')(self)
4681
3988
 
4682
- def sin(self):
4683
- r"""
4684
- For details, please refer to :func:`mindspore.ops.sin`.
4685
- """
4686
- return tensor_operator_registry.get('sin')(self)
4687
-
4688
- def sinc(self):
4689
- r"""
4690
- For details, please refer to :func:`mindspore.ops.sinc`.
4691
- """
4692
- return tensor_operator_registry.get('sinc')(self)
4693
-
4694
- def sinh(self):
4695
- r"""
4696
- For details, please refer to :func:`mindspore.ops.sinh`.
4697
- """
4698
- return tensor_operator_registry.get('sinh')(self)
4699
-
4700
- def sort(self, axis=-1, descending=False):
4701
- r"""
4702
- For details, please refer to :func:`mindspore.ops.sort`.
4703
- """
4704
- return tensor_operator_registry.get('sort')(self, axis=axis, descending=descending)
4705
-
4706
- def argsort(self, axis=-1, descending=False):
4707
- """
4708
- For details, please refer to :func:`mindspore.ops.argsort`.
4709
- """
4710
- return tensor_operator_registry.get('argsort')(self, axis, descending)
4711
-
4712
- def trunc(self):
4713
- r"""
4714
- For details, please refer to :func:`mindspore.ops.trunc`.
4715
- """
4716
- return tensor_operator_registry.get('trunc')(self)
4717
-
4718
- def where(self, condition, y):
4719
- r"""
4720
- For details, please refer to :func:`mindspore.ops.where`.
4721
- """
4722
- return tensor_operator_registry.get('where')(condition, self, y)
4723
-
4724
3989
  def imag(self):
4725
3990
  r"""
4726
3991
  For details, please refer to :func:`mindspore.ops.imag`.
@@ -4729,13 +3994,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4729
3994
 
4730
3995
  def quantile(self, q, axis=None, keepdims=False):
4731
3996
  r"""
4732
- For details, please refer to :func:`mindspore.ops.quantile`.
3997
+ This interface is deprecated from version 2.4 and will be removed in a future version.
4733
3998
  """
4734
3999
  return tensor_operator_registry.get('quantile')(self, q, axis, keepdims)
4735
4000
 
4736
4001
  def nanquantile(self, q, axis=None, keepdims=False):
4737
4002
  """
4738
- For details, please refer to :func:`mindspore.ops.nanquantile`.
4003
+ This interface is deprecated from version 2.4 and will be removed in a future version.
4739
4004
  """
4740
4005
  return tensor_operator_registry.get('nanquantile')(self, q, axis, keepdims)
4741
4006
 
@@ -4762,7 +4027,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4762
4027
 
4763
4028
  def qr(self, some=True):
4764
4029
  r"""
4765
- For details, please refer to :func:`mindspore.ops.qr`.
4030
+ This interface is deprecated from version 2.4 and will be removed in a future version.
4766
4031
  """
4767
4032
  validator.check_value_type('some', some, bool, 'Tensor.qr')
4768
4033
  return tensor_operator_registry.get('qr')(self, 'reduced' if some else 'complete')
@@ -4776,7 +4041,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4776
4041
 
4777
4042
  def masked_scatter(self, mask, x):
4778
4043
  r"""
4779
- Returns a Tensor. Updates the value in the "self Tensor" with the `tensor` value according to the mask.
4044
+ Updates the value in the "self Tensor" with the `tensor` value according to the mask, and returns a Tensor.
4780
4045
  The shape of `mask` and the "self Tensor" must be the same or `mask` is broadcastable.
4781
4046
 
4782
4047
  .. warning::
@@ -4816,35 +4081,39 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4816
4081
 
4817
4082
  def index_put(self, indices, values, accumulate=False):
4818
4083
  r"""
4819
- Returns a Tensor. According to the index number of `indices` ,
4820
- replace the value corresponding to the "self Tensor" with the value in `values`.
4084
+ Based on the indices in `indices`, replace the corresponding elements in Tensor `self`
4085
+ with the values in `values`. Outplace version of :func:`mindspore.Tensor.index_put_`
4086
+
4087
+ .. warning::
4088
+ The behavior is unpredictable in the following scenario:
4089
+
4090
+ - If `accumulate` is `False` and `indices` contains duplicate elements.
4821
4091
 
4822
4092
  Args:
4823
- indices (tuple[Tensor], list[Tensor]): the indices of type int32 or int64, used to index into the "self
4824
- Tensor". The rank of tensors in indices should be 1-D, size of indices should <= "self Tensor".rank
4093
+ indices (tuple[Tensor], list[Tensor]): the indices of type int32 or int64, used to index into the `self`.
4094
+ The rank of tensors in indices should be 1-D, size of indices should <= `self.rank`
4825
4095
  and the tensors in indices should be broadcastable.
4826
- values (Tensor): 1-D Tensor of the same type as "self Tensor". if size == 1 will be broadcast
4827
- accumulate (bool): If `accumulate` is True, the elements in values are added to "self Tensor",
4828
- else the elements in `values` replace the corresponding element in the "self Tensor".
4096
+ values (Tensor): 1-D Tensor with the same type as `self`. `values` should be broadcastable with size 1.
4097
+ accumulate (bool, optional): If `accumulate` is `True`, the elements in `values` will be added to `self`,
4098
+ otherwise the elements in `values` will replace the corresponding elements in the `self`.
4829
4099
  Default: ``False``.
4830
4100
 
4831
4101
  Returns:
4832
4102
  Tensor, with the same type and shape as the "self Tensor".
4833
4103
 
4834
4104
  Raises:
4835
- TypeError: If the dtype of the "self Tensor" is not equal to the dtype of `values`.
4105
+ TypeError: If the dtype of the `self` is not equal to the dtype of `values`.
4836
4106
  TypeError: If the dtype of `indices` is not tuple[Tensor], list[Tensor].
4837
4107
  TypeError: If the dtype of tensors in `indices` are not int32 or int64.
4838
4108
  TypeError: If the dtype of tensors in `indices` are inconsistent.
4839
4109
  TypeError: If the dtype of `accumulate` is not bool.
4840
4110
  ValueError: If rank(`values`) is not 1-D.
4841
4111
  ValueError: If size(`values`) is not 1 or max size of the tensors in `indices` when
4842
- rank("self Tensor") == size(`indices`).
4843
- ValueError: If size(`values`) is not 1 or "self Tensor".shape[-1] when
4844
- rank("self Tensor") > size(`indices`).
4112
+ rank(`self`) == size(`indices`).
4113
+ ValueError: If size(`values`) is not 1 or `self`.shape[-1] when rank(`self`) > size(`indices`).
4845
4114
  ValueError: If the rank of tensors in `indices` is not 1-D.
4846
4115
  ValueError: If the tensors in `indices` is not be broadcastable.
4847
- ValueError: If size(`indices`) > rank("self Tensor").
4116
+ ValueError: If size(`indices`) > rank(`self`).
4848
4117
 
4849
4118
  Supported Platforms:
4850
4119
  ``Ascend`` ``CPU``
@@ -4866,6 +4135,60 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4866
4135
  _index_put = tensor_operator_registry.get('index_put')(0 if accumulate is False else 1)
4867
4136
  return _index_put(self, values, indices)
4868
4137
 
4138
+ def index_put_(self, indices, values, accumulate=False):
4139
+ r"""
4140
+ Based on the indices in `indices`, replace the corresponding elements in Tensor `self` with the values
4141
+ in `values`. The expression `Tensor.index_put_(indices, values)` is equivalent to `tensor[indices] = values`.
4142
+ Update and return `self`.
4143
+
4144
+ .. warning::
4145
+ The behavior is unpredictable in the following scenario:
4146
+
4147
+ - If `accumulate` is `False` and `indices` contains duplicate elements.
4148
+
4149
+ Args:
4150
+ indices (tuple[Tensor], list[Tensor]): the indices of type is bool, uint8, int32 or int64,
4151
+ used to index into the `self`. The size of indices should <= the rank of `self`
4152
+ and the tensors in indices should be broadcastable.
4153
+ values (Tensor): Tensor with the same type as `self`. If size == 1, it will be broadcastable.
4154
+ accumulate (bool, optional): If `accumulate` is `True`, the elements in `values` will be added to `self`,
4155
+ otherwise the elements in `values` will replace the corresponding elements in the `self`.
4156
+ Default: ``False``.
4157
+
4158
+ Returns:
4159
+ Tensor `self`.
4160
+
4161
+ Raises:
4162
+ TypeError: If the dtype of the `self` is not equal to the dtype of `values`.
4163
+ TypeError: If the dtype of `indices` is not tuple[Tensor], list[Tensor].
4164
+ TypeError: If the dtype of tensors in `indices` are not bool, uint8, int32 or int64.
4165
+ TypeError: If the dtypes of tensors in `indices` are inconsistent.
4166
+ TypeError: If the dtype of `accumulate` is not bool.
4167
+ ValueError: If size(`values`) is not 1 or max size of the tensors in `indices` when
4168
+ rank(`self`) == size(`indices`).
4169
+ ValueError: If size(`values`) is not 1 or `self`.shape[-1] when rank(`self`) > size(`indices`).
4170
+ ValueError: If the tensors in `indices` is not be broadcastable.
4171
+ ValueError: If size(`indices`) > rank(`self`).
4172
+
4173
+ Supported Platforms:
4174
+ ``Ascend``
4175
+
4176
+ Examples:
4177
+ >>> import numpy as np
4178
+ >>> import mindspore
4179
+ >>> from mindspore import Tensor
4180
+ >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32))
4181
+ >>> values = Tensor(np.array([3]).astype(np.int32))
4182
+ >>> indices = [Tensor(np.array([0, 1, 1]).astype(np.int32)), Tensor(np.array([1, 2, 1]).astype(np.int32))]
4183
+ >>> accumulate = True
4184
+ >>> x.index_put_(indices, values, accumulate)
4185
+ >>> print(x)
4186
+ [[1 5 3]
4187
+ [4 8 9]]
4188
+ """
4189
+ index_put_ = tensor_operator_registry.get('index_put_')
4190
+ return index_put_(self, indices, values, accumulate)
4191
+
4869
4192
  def move_to(self, to, blocking=True):
4870
4193
  r"""
4871
4194
  Copy Tensor to target device synchronously or asynchronously, default synchronously. only support PyNative mode.
@@ -4916,6 +4239,62 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4916
4239
  """
4917
4240
  return Tensor_._offload(self)
4918
4241
 
4242
+ def _data_ptr(self):
4243
+ r"""
4244
+ Get the data ptr address of tensor, for CPU is host address, GPU/NPU is device address.
4245
+ User should know how to use the data ptr address.
4246
+ Note: this api is an experimental api, users need understatnd it before use.
4247
+
4248
+ Supported Platforms:
4249
+ ``CPU/GPU/Ascend``
4250
+
4251
+ Examples:
4252
+ >>> import mindspore as ms
4253
+ >>> from mindspore import Tensor
4254
+ >>> x = ms.Tensor([1, 2, 3], ms.int64)
4255
+ >>> data_ptr = x._data_ptr()
4256
+ """
4257
+ return Tensor_._data_ptr(self)
4258
+
4259
+ def normal_(self, mean=0, std=1, *, generator=None):
4260
+ r"""
4261
+ Update the `self` tensor in place by generating random numbers sampled from the normal
4262
+ distribution which constructed by the parameters `mean` and `std`.
4263
+
4264
+ .. warning::
4265
+ This is an experimental API that is subject to change or deletion.
4266
+
4267
+ Args:
4268
+ mean (number, optional): the mean of normal distribution. With float data type.
4269
+ Default: ``0``.
4270
+ std (number, optional): the std of normal distribution. With float data type.
4271
+ Default: ``1``.
4272
+
4273
+ Keyword Args:
4274
+ generator (:class:`mindspore.Generator`, optional): a pseudorandom number generator.
4275
+ Default: ``None``, uses the default pseudorandom number generator.
4276
+
4277
+ Returns:
4278
+ A tensor that is filled with random numbers that follow a normal distribution and
4279
+ that has the same type and shape as the `self` tensor.
4280
+
4281
+ Raises:
4282
+ TypeError: If the dtype of `mean` or `std` is not one of: bool, int, float, complex.
4283
+
4284
+ Supported Platforms:
4285
+ ``Ascend``
4286
+
4287
+ Examples:
4288
+ >>> import mindspore
4289
+ >>> import numpy as np
4290
+ >>> x = mindspore.Tensor(np.array([[1, 2], [3, 4]]), dtype=mindspore.float32)
4291
+ >>> output = x.normal_()
4292
+ >>> print(output)
4293
+ [[0.2788825 1.3305743]
4294
+ [1.244194 1.16303174]]
4295
+ """
4296
+ return tensor_operator_registry.get('normal_')(self, mean=mean, std=std, generator=generator)
4297
+
4919
4298
 
4920
4299
  def _vm_compare(*args):
4921
4300
  """Implement `vm_compare` for tensor."""
@@ -4926,12 +4305,16 @@ def _vm_compare(*args):
4926
4305
  if obj_str == "shape":
4927
4306
  fn = getattr(args[0].asnumpy(), obj_str)
4928
4307
  return fn
4929
- if obj_str == "__setitem__":
4930
- fn = getattr(args[0].asnumpy(), obj_str)
4308
+ if obj_str == "_tensor_setitem" or obj_str == "_tensor_setitem_origin":
4309
+ fn = getattr(args[0].asnumpy(), "__setitem__")
4931
4310
  index = args[1].asnumpy() if isinstance(args[1], Tensor) else args[1]
4932
4311
  value = args[2].asnumpy() if isinstance(args[2], Tensor) else args[2]
4933
4312
  fn(index, value)
4934
4313
  return args[0]
4314
+ if obj_str == "_tensor_getitem" or obj_str == "_tensor_getitem_origin":
4315
+ fn = getattr(args[0].asnumpy(), "__getitem__")
4316
+ index = args[1].asnumpy() if isinstance(args[1], Tensor) else args[1]
4317
+ return Tensor(np.array(fn(index)))
4935
4318
  if len(args) == 2:
4936
4319
  fn = getattr(args[0].asnumpy(), obj_str)
4937
4320
  return Tensor(fn())
@@ -4955,20 +4338,13 @@ def _check_tensor_input(input_data=None, dtype=None, shape=None, init=None):
4955
4338
  raise ValueError("init, dtype and shape must have values at the same time.")
4956
4339
 
4957
4340
  if input_data is not None:
4958
- if isinstance(input_data, np.ndarray) and input_data.ndim >= 1 and input_data.size == 0:
4959
- raise ValueError("input_data can not contain zero dimension.")
4960
4341
  if isinstance(input_data, (tuple, list)):
4961
4342
  try:
4962
- np_data = np.array(input_data)
4343
+ _ = np.array(input_data)
4963
4344
  except ValueError as e:
4964
4345
  if "The requested array has an inhomogeneous shape" in str(e):
4965
4346
  raise TypeError(f"For Tensor, the input_data is {input_data} that contain unsupported element.")
4966
4347
  raise
4967
- if np_data.ndim >= 1 and np_data.size == 0:
4968
- raise ValueError("input_data can not contain zero dimension.")
4969
-
4970
- if shape is not None and not (hasattr(init, "__enable_zero_dim__") and init.__enable_zero_dim__) and 0 in shape:
4971
- raise ValueError("Shape can not contain zero value.")
4972
4348
 
4973
4349
 
4974
4350
  def _check_tensor_dynamic_shape(dtype=None, shape=None, init=None):