mindspore 2.4.10__cp310-none-any.whl → 2.5.0__cp310-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +39 -0
- mindspore/__init__.py +8 -3
- mindspore/_akg/akg/composite/build_module.py +6 -2
- mindspore/_akg/akg/utils/kernel_exec.py +2 -2
- mindspore/_c_dataengine.cpython-310-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-310-aarch64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-310-aarch64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +0 -5
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/compile_config.py +64 -0
- mindspore/_extends/parse/deprecated/__init__.py +0 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +375 -0
- mindspore/_extends/parse/parser.py +23 -5
- mindspore/_extends/parse/standard_method.py +123 -27
- mindspore/_extends/pijit/pijit_func_white_list.py +1 -1
- mindspore/amp.py +7 -1
- mindspore/boost/boost_cell_wrapper.py +136 -41
- mindspore/common/__init__.py +3 -1
- mindspore/common/_register_for_tensor.py +0 -1
- mindspore/common/_stub_tensor.py +25 -4
- mindspore/common/_tensor_cpp_method.py +17 -0
- mindspore/common/_tensor_docs.py +6132 -0
- mindspore/common/api.py +98 -21
- mindspore/common/dtype.py +34 -34
- mindspore/common/dump.py +2 -1
- mindspore/common/file_system.py +8 -3
- mindspore/common/generator.py +2 -0
- mindspore/common/hook_handle.py +3 -1
- mindspore/common/initializer.py +3 -4
- mindspore/common/lazy_inline.py +8 -2
- mindspore/common/mindir_util.py +10 -2
- mindspore/common/parameter.py +31 -15
- mindspore/common/tensor.py +713 -1337
- mindspore/communication/__init__.py +1 -1
- mindspore/communication/_comm_helper.py +5 -0
- mindspore/communication/comm_func.py +215 -173
- mindspore/communication/management.py +23 -20
- mindspore/context.py +285 -191
- mindspore/dataset/__init__.py +23 -19
- mindspore/dataset/callback/ds_callback.py +2 -1
- mindspore/dataset/core/config.py +84 -3
- mindspore/dataset/engine/cache_admin.py +3 -3
- mindspore/dataset/engine/cache_client.py +5 -4
- mindspore/dataset/engine/datasets.py +192 -149
- mindspore/dataset/engine/datasets_audio.py +14 -0
- mindspore/dataset/engine/datasets_standard_format.py +11 -11
- mindspore/dataset/engine/datasets_text.py +38 -1
- mindspore/dataset/engine/datasets_user_defined.py +100 -66
- mindspore/dataset/engine/datasets_vision.py +81 -8
- mindspore/dataset/engine/iterators.py +281 -63
- mindspore/dataset/engine/obs/util.py +8 -0
- mindspore/dataset/engine/queue.py +40 -0
- mindspore/dataset/engine/samplers.py +26 -2
- mindspore/dataset/engine/serializer_deserializer.py +1 -1
- mindspore/dataset/engine/validators.py +43 -11
- mindspore/dataset/transforms/py_transforms_util.py +17 -0
- mindspore/dataset/transforms/transforms.py +29 -12
- mindspore/dataset/vision/validators.py +1 -2
- mindspore/device_context/__init__.py +21 -0
- mindspore/device_context/ascend/__init__.py +25 -0
- mindspore/device_context/ascend/device.py +72 -0
- mindspore/device_context/ascend/op_debug.py +94 -0
- mindspore/device_context/ascend/op_precision.py +193 -0
- mindspore/device_context/ascend/op_tuning.py +127 -0
- mindspore/device_context/cpu/__init__.py +25 -0
- mindspore/device_context/cpu/device.py +62 -0
- mindspore/device_context/cpu/op_tuning.py +43 -0
- mindspore/device_context/gpu/__init__.py +21 -0
- mindspore/device_context/gpu/device.py +70 -0
- mindspore/device_context/gpu/op_precision.py +67 -0
- mindspore/device_context/gpu/op_tuning.py +175 -0
- mindspore/device_manager.py +134 -0
- mindspore/experimental/llm_boost/__init__.py +1 -0
- mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +211 -0
- mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
- mindspore/experimental/llm_boost/register.py +1 -0
- mindspore/experimental/optim/adadelta.py +26 -22
- mindspore/experimental/optim/adam.py +3 -0
- mindspore/experimental/optim/lr_scheduler.py +33 -24
- mindspore/experimental/optim/radam.py +33 -30
- mindspore/hal/device.py +28 -0
- mindspore/hal/event.py +17 -0
- mindspore/hal/memory.py +94 -3
- mindspore/hal/stream.py +91 -6
- mindspore/include/api/context.h +0 -1
- mindspore/lib/libavcodec.so.59 +0 -0
- mindspore/lib/libavdevice.so.59 +0 -0
- mindspore/lib/libavfilter.so.8 +0 -0
- mindspore/lib/libavformat.so.59 +0 -0
- mindspore/lib/libavutil.so.57 +0 -0
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_ops.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/libswresample.so.4 +0 -0
- mindspore/lib/libswscale.so.6 +0 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910_93/aic-ascend910_93-ops-info.json +2048 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl/dynamic/decoder_kv_cache.py +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl/dynamic/prompt_kv_cache.py +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/version.info +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/config/ascend910_93/aic-ascend910_93-ops-info.json +224 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/all_finite.py +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/decoder_kv_cache.py +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/prompt_kv_cache.py +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.json +78 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.json +78 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.json +78 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/all_finite.json +139 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/binary_info_config.json +361 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/decoder_kv_cache.json +892 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/prompt_kv_cache.json +892 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/version.info +1 -1
- mindspore/lib/plugin/ascend/custom_compiler/setup.py +1 -1
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_internal_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libms_ascend_native_boost.so +0 -0
- mindspore/lib/plugin/ascend/libms_atb_boost.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +957 -955
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/liblcal_static.a +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{acme/include/base_type.h → base_type.h} +25 -20
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{cast/cast_tiling.h → internal.h} +6 -4
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_op.h +114 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/boost_kernel.h +70 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/llama_impl.h +85 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/model_interface.h +52 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/tensor.h +81 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_creator.h +123 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +155 -110
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{acme/include/tiling_info.h → tiling_info.h} +12 -9
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tiling_utils.h +178 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layer_norm_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_quant_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_310p_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcompare_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libllama_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_optiling.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmulti_weight_matmul_kernel_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_nz_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_f16_nz/internal_pp_matmul_f16_nz.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_f16_nz/internal_pp_matmul_f16_nz_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_i8_nz_compress/internal_pp_matmul_i8_nz_compress.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_i8_nz_compress/internal_pp_matmul_i8_nz_compress_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_int8_nz/internal_pp_matmul_int8_nz.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_int8_nz/internal_pp_matmul_int8_nz_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libadd_rms_norm_quant_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libapply_rotary_pos_emb_310p_impl.so → op_kernels/ascend310p/so_kernels/libapply_rotary_pos_emb_310p_ascend310p.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libcast_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libcompare_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libgelu_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libmatmul_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libreshape_and_cache_nz_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_4b60f88cdc28b25a36bad2d8b0a88092.json +163 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_4b60f88cdc28b25a36bad2d8b0a88092.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_cde61da2bd6fededcb1ba310a6ad16ee.json +163 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_cde61da2bd6fededcb1ba310a6ad16ee.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_matmul_postfusion_mix/internal_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_matmul_postfusion_mix/internal_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_matmul_postfusion_mix/internal_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_multi_weight_matmul_postfusion_mix/internal_multi_weight_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_multi_weight_matmul_postfusion_mix/internal_multi_weight_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_multi_weight_matmul_postfusion_mix/internal_multi_weight_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_bf16_bf16.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_bf16_fp16.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_bf16_fp32.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_fp16_bf16.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_fp16_fp16.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_fp16_fp32.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/paged_attention_v2/paged_attention_v2.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/paged_attention_v2/paged_attention_v2_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/paged_attention_v2/paged_attention_v2_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libadd_layer_norm_impl.so → op_kernels/ascend910b/so_kernels/libadd_layer_norm_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libadd_rms_norm_impl.so → op_kernels/ascend910b/so_kernels/libadd_rms_norm_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/so_kernels/libadd_rms_norm_quant_ascend910b.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libapply_rotary_pos_emb_impl.so → op_kernels/ascend910b/so_kernels/libapply_rotary_pos_emb_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libcast_impl.so → op_kernels/ascend910b/so_kernels/libcast_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libnot_equal_impl.so → op_kernels/ascend910b/so_kernels/libcompare_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libgelu_impl.so → op_kernels/ascend910b/so_kernels/libgelu_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/so_kernels/libllama_ascend910b.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libmatmul_impl.so → op_kernels/ascend910b/so_kernels/libmatmul_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libmulti_weight_matmul_kernel_impl.so → op_kernels/ascend910b/so_kernels/libmulti_weight_matmul_kernel_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libreshape_and_cache_impl.so → op_kernels/ascend910b/so_kernels/libreshape_and_cache_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/librms_norm_impl.so → op_kernels/ascend910b/so_kernels/librms_norm_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/log.py +12 -0
- mindspore/mindrecord/__init__.py +1 -1
- mindspore/mindrecord/config.py +17 -316
- mindspore/mindrecord/filereader.py +1 -9
- mindspore/mindrecord/filewriter.py +5 -15
- mindspore/mindrecord/mindpage.py +1 -9
- mindspore/mint/__init__.py +824 -218
- mindspore/mint/distributed/__init__.py +66 -4
- mindspore/mint/distributed/distributed.py +2594 -44
- mindspore/mint/linalg/__init__.py +6 -0
- mindspore/mint/nn/__init__.py +473 -14
- mindspore/mint/nn/functional.py +486 -11
- mindspore/mint/nn/layer/__init__.py +17 -4
- mindspore/mint/nn/layer/_functions.py +330 -0
- mindspore/mint/nn/layer/activation.py +169 -1
- mindspore/mint/nn/layer/basic.py +123 -0
- mindspore/mint/nn/layer/conv.py +727 -0
- mindspore/mint/nn/layer/normalization.py +215 -19
- mindspore/mint/nn/layer/padding.py +797 -0
- mindspore/mint/nn/layer/pooling.py +170 -0
- mindspore/mint/optim/__init__.py +2 -1
- mindspore/mint/optim/adam.py +223 -0
- mindspore/mint/optim/adamw.py +26 -19
- mindspore/mint/special/__init__.py +2 -1
- mindspore/multiprocessing/__init__.py +5 -0
- mindspore/nn/cell.py +126 -19
- mindspore/nn/dynamic_lr.py +2 -1
- mindspore/nn/layer/activation.py +6 -6
- mindspore/nn/layer/basic.py +35 -25
- mindspore/nn/layer/channel_shuffle.py +3 -3
- mindspore/nn/layer/embedding.py +3 -3
- mindspore/nn/layer/normalization.py +8 -7
- mindspore/nn/layer/padding.py +4 -3
- mindspore/nn/layer/pooling.py +47 -13
- mindspore/nn/layer/rnn_cells.py +1 -1
- mindspore/nn/layer/rnns.py +2 -1
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +48 -26
- mindspore/nn/learning_rate_schedule.py +5 -3
- mindspore/nn/loss/loss.py +31 -36
- mindspore/nn/optim/ada_grad.py +1 -0
- mindspore/nn/optim/adadelta.py +2 -2
- mindspore/nn/optim/adam.py +1 -1
- mindspore/nn/optim/lars.py +1 -4
- mindspore/nn/optim/optimizer.py +1 -1
- mindspore/nn/optim/rprop.py +2 -2
- mindspore/nn/optim/thor.py +2 -1
- mindspore/nn/utils/init.py +13 -11
- mindspore/nn/wrap/cell_wrapper.py +4 -6
- mindspore/nn/wrap/loss_scale.py +3 -4
- mindspore/numpy/array_creations.py +60 -62
- mindspore/numpy/array_ops.py +148 -143
- mindspore/numpy/logic_ops.py +41 -42
- mindspore/numpy/math_ops.py +361 -359
- mindspore/numpy/utils.py +16 -16
- mindspore/numpy/utils_const.py +4 -4
- mindspore/ops/__init__.py +2 -1
- mindspore/ops/_grad_experimental/grad_comm_ops.py +94 -13
- mindspore/ops/_grad_experimental/grad_debug_ops.py +6 -1
- mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
- mindspore/ops/_op_impl/cpu/__init__.py +1 -0
- mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
- mindspore/ops/_vmap/vmap_array_ops.py +20 -19
- mindspore/ops/_vmap/vmap_base.py +0 -2
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +19 -13
- mindspore/ops/_vmap/vmap_math_ops.py +11 -9
- mindspore/ops/_vmap/vmap_nn_ops.py +20 -34
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +149 -12
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -61
- mindspore/ops/auto_generate/gen_extend_func.py +554 -60
- mindspore/ops/auto_generate/gen_ops_def.py +1621 -115
- mindspore/ops/auto_generate/gen_ops_prim.py +8024 -3409
- mindspore/ops/auto_generate/pyboost_inner_prim.py +183 -79
- mindspore/ops/composite/base.py +1 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +229 -30
- mindspore/ops/composite/multitype_ops/pow_impl.py +0 -29
- mindspore/ops/function/__init__.py +12 -0
- mindspore/ops/function/array_func.py +561 -159
- mindspore/ops/function/clip_func.py +64 -0
- mindspore/ops/function/debug_func.py +28 -20
- mindspore/ops/function/image_func.py +1 -1
- mindspore/ops/function/linalg_func.py +5 -4
- mindspore/ops/function/math_func.py +1659 -290
- mindspore/ops/function/nn_func.py +988 -317
- mindspore/ops/function/parameter_func.py +3 -56
- mindspore/ops/function/random_func.py +243 -33
- mindspore/ops/function/sparse_unary_func.py +1 -1
- mindspore/ops/functional.py +18 -5
- mindspore/ops/functional_overload.py +897 -0
- mindspore/ops/operations/__init__.py +3 -2
- mindspore/ops/operations/_embedding_cache_ops.py +4 -4
- mindspore/ops/operations/_grad_ops.py +2 -34
- mindspore/ops/operations/_infer_ops.py +2 -1
- mindspore/ops/operations/_inner_ops.py +38 -8
- mindspore/ops/operations/array_ops.py +45 -303
- mindspore/ops/operations/comm_ops.py +19 -16
- mindspore/ops/operations/custom_ops.py +11 -55
- mindspore/ops/operations/debug_ops.py +42 -47
- mindspore/ops/operations/inner_ops.py +6 -4
- mindspore/ops/operations/linalg_ops.py +3 -2
- mindspore/ops/operations/manually_defined/ops_def.py +185 -104
- mindspore/ops/operations/math_ops.py +11 -216
- mindspore/ops/operations/nn_ops.py +146 -308
- mindspore/ops/primitive.py +23 -21
- mindspore/ops/tensor_method.py +1669 -0
- mindspore/ops_generate/aclnn_kernel_register_auto_cc_generator.py +110 -0
- mindspore/ops_generate/add_tensor_docs_generator.py +54 -0
- mindspore/ops_generate/arg_handler.py +0 -61
- mindspore/ops_generate/auto_grad_impl_cc_generator.py +135 -0
- mindspore/ops_generate/auto_grad_reg_cc_generator.py +93 -0
- mindspore/ops_generate/base_generator.py +11 -0
- mindspore/ops_generate/cpp_create_prim_instance_helper_generator.py +108 -0
- mindspore/ops_generate/functional_map_cpp_generator.py +491 -0
- mindspore/ops_generate/functional_overload_py_generator.py +110 -0
- mindspore/ops_generate/functions_cc_generator.py +233 -0
- mindspore/ops_generate/gen_aclnn_implement.py +110 -114
- mindspore/ops_generate/gen_constants.py +157 -3
- mindspore/ops_generate/gen_ops.py +245 -990
- mindspore/ops_generate/gen_pyboost_func.py +97 -998
- mindspore/ops_generate/gen_utils.py +119 -33
- mindspore/ops_generate/lite_ops_cpp_generator.py +155 -0
- mindspore/ops_generate/op_api_proto.py +206 -0
- mindspore/ops_generate/op_def_py_generator.py +131 -0
- mindspore/ops_generate/op_prim_py_generator.py +480 -0
- mindspore/ops_generate/op_proto.py +373 -108
- mindspore/ops_generate/op_template_parser.py +436 -0
- mindspore/ops_generate/ops_def_cc_generator.py +288 -0
- mindspore/ops_generate/ops_def_h_generator.py +74 -0
- mindspore/ops_generate/ops_name_h_generator.py +68 -0
- mindspore/ops_generate/ops_primitive_h_generator.py +81 -0
- mindspore/ops_generate/pyboost_functions_cpp_generator.py +370 -0
- mindspore/ops_generate/pyboost_functions_h_generator.py +68 -0
- mindspore/ops_generate/pyboost_functions_py_generator.py +148 -0
- mindspore/ops_generate/pyboost_grad_function_cpp_generator.py +154 -0
- mindspore/ops_generate/pyboost_inner_prim_generator.py +131 -0
- mindspore/ops_generate/pyboost_native_grad_functions_generator.py +268 -0
- mindspore/ops_generate/pyboost_op_cpp_code_generator.py +851 -0
- mindspore/ops_generate/pyboost_overload_functions_cpp_generator.py +344 -0
- mindspore/ops_generate/pyboost_utils.py +92 -33
- mindspore/ops_generate/template.py +294 -44
- mindspore/ops_generate/tensor_func_reg_cpp_generator.py +422 -0
- mindspore/parallel/__init__.py +3 -3
- mindspore/parallel/_auto_parallel_context.py +24 -33
- mindspore/parallel/_parallel_serialization.py +13 -2
- mindspore/parallel/_utils.py +4 -1
- mindspore/parallel/algo_parameter_config.py +1 -1
- mindspore/parallel/checkpoint_transform.py +44 -0
- mindspore/parallel/cluster/process_entity/_api.py +131 -37
- mindspore/parallel/cluster/process_entity/_utils.py +41 -6
- mindspore/parallel/cluster/run.py +20 -3
- mindspore/parallel/parameter_broadcast.py +1 -1
- mindspore/parallel/shard.py +3 -0
- mindspore/parallel/transform_safetensors.py +119 -253
- mindspore/profiler/__init__.py +17 -4
- mindspore/profiler/analysis/__init__.py +0 -0
- mindspore/profiler/analysis/parser/__init__.py +0 -0
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +166 -0
- mindspore/profiler/analysis/parser/base_parser.py +158 -0
- mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
- mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
- mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +261 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +84 -0
- mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
- mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
- mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
- mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +260 -0
- mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
- mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
- mindspore/profiler/analysis/task_manager.py +131 -0
- mindspore/profiler/analysis/time_converter.py +84 -0
- mindspore/profiler/analysis/viewer/__init__.py +0 -0
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +333 -0
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +252 -0
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +313 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +322 -0
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +265 -0
- mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
- mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +97 -0
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
- mindspore/profiler/analysis/work_flow.py +73 -0
- mindspore/profiler/common/ascend_msprof_exporter.py +138 -0
- mindspore/profiler/common/command_executor.py +90 -0
- mindspore/profiler/common/constant.py +174 -3
- mindspore/profiler/common/file_manager.py +208 -0
- mindspore/profiler/common/log.py +130 -0
- mindspore/profiler/common/msprof_cmd_tool.py +202 -0
- mindspore/profiler/common/path_manager.py +371 -0
- mindspore/profiler/common/process_bar.py +168 -0
- mindspore/profiler/common/process_pool.py +9 -3
- mindspore/profiler/common/profiler_context.py +476 -0
- mindspore/profiler/common/profiler_info.py +304 -0
- mindspore/profiler/common/profiler_output_path.py +284 -0
- mindspore/profiler/common/profiler_parameters.py +210 -0
- mindspore/profiler/common/profiler_path_manager.py +120 -0
- mindspore/profiler/common/record_function.py +76 -0
- mindspore/profiler/common/tlv_decoder.py +76 -0
- mindspore/profiler/common/util.py +75 -2
- mindspore/profiler/dynamic_profiler.py +270 -37
- mindspore/profiler/envprofiler.py +138 -0
- mindspore/profiler/mstx.py +199 -0
- mindspore/profiler/platform/__init__.py +21 -0
- mindspore/profiler/platform/base_profiler.py +40 -0
- mindspore/profiler/platform/cpu_profiler.py +124 -0
- mindspore/profiler/platform/gpu_profiler.py +74 -0
- mindspore/profiler/platform/npu_profiler.py +309 -0
- mindspore/profiler/profiler.py +580 -93
- mindspore/profiler/profiler_action_controller.py +187 -0
- mindspore/profiler/profiler_interface.py +114 -0
- mindspore/profiler/schedule.py +208 -0
- mindspore/rewrite/api/symbol_tree.py +1 -2
- mindspore/run_check/_check_version.py +2 -6
- mindspore/runtime/__init__.py +37 -0
- mindspore/runtime/device.py +27 -0
- mindspore/runtime/event.py +209 -0
- mindspore/runtime/executor.py +148 -0
- mindspore/runtime/memory.py +392 -0
- mindspore/runtime/stream.py +460 -0
- mindspore/runtime/thread_bind_core.py +401 -0
- mindspore/train/__init__.py +2 -2
- mindspore/train/_utils.py +53 -18
- mindspore/train/amp.py +8 -4
- mindspore/train/callback/_checkpoint.py +32 -18
- mindspore/train/callback/_early_stop.py +1 -1
- mindspore/train/callback/_flops_collector.py +105 -69
- mindspore/train/callback/_history.py +1 -1
- mindspore/train/callback/_summary_collector.py +44 -6
- mindspore/train/callback/_tft_register.py +31 -10
- mindspore/train/dataset_helper.py +11 -11
- mindspore/train/metrics/precision.py +4 -5
- mindspore/train/mind_ir_pb2.py +167 -46
- mindspore/train/model.py +13 -15
- mindspore/train/serialization.py +462 -76
- mindspore/train/summary/summary_record.py +1 -2
- mindspore/train/train_thor/model_thor.py +1 -1
- mindspore/utils/__init__.py +4 -2
- mindspore/utils/bin/dataset-cache +0 -0
- mindspore/utils/bin/dataset-cache-server +0 -0
- mindspore/utils/dryrun.py +138 -0
- mindspore/utils/runtime_execution_order_check.py +550 -0
- mindspore/version.py +1 -1
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/METADATA +2 -3
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/RECORD +522 -456
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/entry_points.txt +1 -1
- mindspore/_data_dump.cpython-310-aarch64-linux-gnu.so +0 -0
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/common/_tensor_overload.py +0 -139
- mindspore/lib/libmindspore_np_dtype.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -82
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -113
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -193
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/dtype_registry.h +0 -90
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_layer_norm_op.h +0 -60
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_rms_norm_op.h +0 -50
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_rms_norm_quant_op.h +0 -50
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/apply_rotary_pos_emb_nz_op.h +0 -42
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/apply_rotary_pos_emb_op.h +0 -55
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -34
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_only_ops.h +0 -94
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_op_base.h +0 -97
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/flash_attention_score_op.h +0 -97
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/gelu_op.h +0 -44
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_add_rmsnorm_op.h +0 -73
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -108
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/multi_impls_op.h +0 -64
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/multi_weight_matmul_op.h +0 -91
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/paged_attention_op.h +0 -99
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/reshape_and_cache_nz_op.h +0 -44
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/reshape_and_cache_op.h +0 -44
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/rms_norm_op.h +0 -64
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -179
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/profiling_util.h +0 -366
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -56
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/kernel/add.h +0 -21
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/tiling/add_tiling.h +0 -43
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb.h +0 -23
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_base.h +0 -456
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_bf16.h +0 -217
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp.h +0 -391
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp16.h +0 -126
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -230
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_tiling.h +0 -43
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_value.h +0 -27
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/apply_rotary_pos_emb_nz_impl.h +0 -34
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz.h +0 -23
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_base.h +0 -460
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_fp16.h +0 -116
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_fp32.h +0 -230
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_tiling.h +0 -43
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_value.h +0 -27
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -74
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -74
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_impl.h +0 -48
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/kernel/cast_kernel.h +0 -21
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -55
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_tiling.h +0 -27
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/kernel/compare_kernel.h +0 -23
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +0 -68
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -99
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +0 -21
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/lccl/lccl_wrapper.h +0 -58
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/ms_int_types.h +0 -91
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/ms_int_utils.h +0 -108
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +0 -64
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/add_param.h +0 -68
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/cast_param.h +0 -30
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -38
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +0 -42
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +0 -33
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -377
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache_nz/kernel/reshape_and_cache_nz.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache_nz/reshape_and_cache_nz_impl.h +0 -42
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache_nz/reshape_and_cache_nz_tiling.h +0 -27
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/kernel/sub_kernel.h +0 -20
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -48
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_tiling.h +0 -25
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +0 -399
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/utils.h +0 -41
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_tiling.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +0 -30
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_core.h +0 -43
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_entity.h +0 -38
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_sink.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_stream.h +0 -41
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -71
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -165
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +0 -20
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -121
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -106
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_quant_acme_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_310p_old_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_old_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_nz_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_nz_old_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_bf16_bnsd_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_bf16_bsh_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_fp16_bnsd_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_fp16_bsh_mix.o +0 -0
- mindspore/profiler/envprofiling.py +0 -254
- mindspore/profiler/profiling.py +0 -1926
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/WHEEL +0 -0
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/top_level.txt +0 -0
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright
|
|
1
|
+
# Copyright 2024 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -93,6 +93,13 @@ def adaptive_avg_pool2d_grad(grad_output, x):
|
|
|
93
93
|
return adaptive_avg_pool2d_grad_impl(grad_output, x)
|
|
94
94
|
|
|
95
95
|
|
|
96
|
+
def adaptive_avg_pool3d(input, output_size):
|
|
97
|
+
r"""
|
|
98
|
+
None
|
|
99
|
+
"""
|
|
100
|
+
return adaptive_avg_pool3d_impl(input, output_size)
|
|
101
|
+
|
|
102
|
+
|
|
96
103
|
def add(input, other, alpha=1):
|
|
97
104
|
r"""
|
|
98
105
|
Adds scaled other value to input Tensor.
|
|
@@ -151,14 +158,38 @@ def add(input, other, alpha=1):
|
|
|
151
158
|
|
|
152
159
|
def argmax(input, dim=None, keepdim=False):
|
|
153
160
|
r"""
|
|
161
|
+
argmax(input) -> Tensor
|
|
162
|
+
|
|
163
|
+
Return the indices of the maximum values of a tensor.
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
input (Tensor): Input tensor.
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
Tensor.
|
|
170
|
+
|
|
171
|
+
Supported Platforms:
|
|
172
|
+
``Ascend``
|
|
173
|
+
|
|
174
|
+
Examples:
|
|
175
|
+
>>> import numpy as np
|
|
176
|
+
>>> from mindspore import Tensor
|
|
177
|
+
>>> from mindspore import ops
|
|
178
|
+
>>> x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
|
|
179
|
+
>>> output = ops.auto_generate.argmax_ext(x)
|
|
180
|
+
>>> print(output)
|
|
181
|
+
6
|
|
182
|
+
|
|
183
|
+
.. function:: argmax(input, dim, keepdim=False) -> Tensor
|
|
184
|
+
:noindex:
|
|
185
|
+
|
|
154
186
|
Return the indices of the maximum values of a tensor across a dimension.
|
|
155
187
|
|
|
156
188
|
Args:
|
|
157
189
|
input (Tensor): Input tensor.
|
|
158
|
-
dim (
|
|
159
|
-
value within the flattened input will be returned. Default: ``None`` .
|
|
190
|
+
dim (int): The dimension to reduce.
|
|
160
191
|
keepdim (bool, optional): Whether the output tensor retains the specified
|
|
161
|
-
dimension.
|
|
192
|
+
dimension. Default: ``False`` .
|
|
162
193
|
|
|
163
194
|
Returns:
|
|
164
195
|
Tensor, indices of the maximum values across a dimension.
|
|
@@ -215,6 +246,41 @@ def argmin(input, dim=None, keepdim=False):
|
|
|
215
246
|
return argmin_impl(input, dim, keepdim)
|
|
216
247
|
|
|
217
248
|
|
|
249
|
+
def argsort(input, dim=-1, descending=False):
|
|
250
|
+
r"""
|
|
251
|
+
Sorts the input tensor along the given dimension in specified order and return the sorted indices.
|
|
252
|
+
|
|
253
|
+
.. warning::
|
|
254
|
+
This is an experimental optimizer API that is subject to change.
|
|
255
|
+
|
|
256
|
+
Args:
|
|
257
|
+
input(Tensor): The input tensor to sort.
|
|
258
|
+
dim (int, optional): The dim to sort along. Default: ``-1`` , means the last dimension.
|
|
259
|
+
The Ascend backend only supports sorting the last dimension.
|
|
260
|
+
descending (bool, optional): The sort order. If `descending` is ``True`` then the elements
|
|
261
|
+
are sorted in descending order by value. Otherwise sort in ascending order. Default: ``False`` .
|
|
262
|
+
|
|
263
|
+
Returns:
|
|
264
|
+
Tensor, the indices of sorted input tensor. Data type is int64.
|
|
265
|
+
|
|
266
|
+
Supported Platforms:
|
|
267
|
+
``Ascend``
|
|
268
|
+
|
|
269
|
+
Examples:
|
|
270
|
+
>>> import mindspore
|
|
271
|
+
>>> import numpy as np
|
|
272
|
+
>>> from mindspore import Tensor
|
|
273
|
+
>>> import mindspore.mint as mint
|
|
274
|
+
>>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
|
|
275
|
+
>>> sort = mint.argsort(x)
|
|
276
|
+
>>> print(sort)
|
|
277
|
+
[[2 1 0]
|
|
278
|
+
[2 0 1]
|
|
279
|
+
[0 1 2]]
|
|
280
|
+
"""
|
|
281
|
+
return argsort_impl(input, dim, descending)
|
|
282
|
+
|
|
283
|
+
|
|
218
284
|
def asin(input):
|
|
219
285
|
r"""
|
|
220
286
|
Computes arcsine of input tensors element-wise.
|
|
@@ -356,6 +422,102 @@ def atan(input):
|
|
|
356
422
|
return atan_impl(input)
|
|
357
423
|
|
|
358
424
|
|
|
425
|
+
def avg_pool1d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True):
|
|
426
|
+
r"""
|
|
427
|
+
Applies a 1D average pooling over an input Tensor which can be regarded as a composition of 1D input planes.
|
|
428
|
+
|
|
429
|
+
Typically the input is of shape :math:`(N_{in}, C_{in}, L_{in})`, avg_pool1d outputs regional average in the
|
|
430
|
+
:math:`(L_{in})`-dimension. Given kernel size as :math:`ks = l_{ker}` and `stride` as :math:`s = s_0`, the
|
|
431
|
+
operation is as follows.
|
|
432
|
+
|
|
433
|
+
.. math::
|
|
434
|
+
\text{output}(N_i, C_j, l) = \frac{1}{l_{ker}} \sum_{n=0}^{l_{ker}-1}
|
|
435
|
+
\text{input}(N_i, C_j, s_0 \times l + n)
|
|
436
|
+
|
|
437
|
+
.. warning::
|
|
438
|
+
This is an experimental API that is subject to change or deletion.
|
|
439
|
+
|
|
440
|
+
Args:
|
|
441
|
+
input (Tensor): Tensor of shape :math:`(N, C_{in}, L_{in})`.
|
|
442
|
+
kernel_size (Union(int, tuple[int])): The size of kernel window used to take the average value.
|
|
443
|
+
stride (Union(int, tuple[int]), optional): The distance of kernel moving. `stride` can either be an int
|
|
444
|
+
number or a tuple of one int number. Default: ``None``, the same value as `kernel_size`.
|
|
445
|
+
padding (Union(int, tuple[int]), optional): The pad length to be filled. `padding` can either be an integer
|
|
446
|
+
or a tuple of one integer. Default: ``0`` .
|
|
447
|
+
ceil_mode (bool, optional): If True, apply ceil instead of floor to compute the output shape. Default: ``False``.
|
|
448
|
+
count_include_pad (bool, optional): If True, include the zero-padding in the averaging calculation. Default: ``True`` .
|
|
449
|
+
|
|
450
|
+
Returns:
|
|
451
|
+
Tensor of shape :math:`(N, C_{in}, L_{out})`.
|
|
452
|
+
|
|
453
|
+
Raises:
|
|
454
|
+
TypeError: If `input` is not a Tensor.
|
|
455
|
+
TypeError: If `kernel_size` or `stride` is not an int.
|
|
456
|
+
TypeError: If `ceil_mode` or `count_include_pad` is not a bool.
|
|
457
|
+
ValueError: If `kernel_size` or `stride` is less than `1`.
|
|
458
|
+
ValueError: If `kernel_size` or `stride` or `padding` is not int nor a tuple whose length is greater than `1`.
|
|
459
|
+
|
|
460
|
+
Supported Platforms:
|
|
461
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
462
|
+
|
|
463
|
+
Examples:
|
|
464
|
+
>>> import mindspore
|
|
465
|
+
>>> import numpy as np
|
|
466
|
+
>>> from mindspore import Tensor, mint
|
|
467
|
+
>>> input_x = Tensor(np.random.randint(0, 10, [1, 3, 6]), mindspore.float32)
|
|
468
|
+
>>> output = mint.nn.functional.avg_pool1d(input_x, kernel_size=6, stride=1)
|
|
469
|
+
>>> print(output.shape)
|
|
470
|
+
(1, 3, 1)
|
|
471
|
+
"""
|
|
472
|
+
return avg_pool1d_impl(input, kernel_size, stride, padding, ceil_mode, count_include_pad)
|
|
473
|
+
|
|
474
|
+
|
|
475
|
+
def bincount(input, weights=None, minlength=0):
|
|
476
|
+
r"""
|
|
477
|
+
Count the occurrences of each value in the input.
|
|
478
|
+
|
|
479
|
+
If `minlength` is not specified, the length of the output Tensor is the maximum value in the input plus one.
|
|
480
|
+
If `minlength` is specified, the length of the output Tensor is the maximum value between `minlength` or
|
|
481
|
+
the maximum value in the input plus one.
|
|
482
|
+
|
|
483
|
+
Each value in the output Tensor represents the number of occurrences of that index value in the input.
|
|
484
|
+
If `weights` is specified, the output results are weighted,
|
|
485
|
+
i.e., :math:`out[n] += weight[i]` instead of :math:`out[n] += 1`.
|
|
486
|
+
|
|
487
|
+
.. warning::
|
|
488
|
+
This is an experimental API that is subject to change or deletion.
|
|
489
|
+
|
|
490
|
+
Args:
|
|
491
|
+
input (Tensor): A one-dimensional Tensor.
|
|
492
|
+
weights (Tensor, optional): Weights with the same shape as the input. Default: ``None``.
|
|
493
|
+
minlength (int, optional): The minimum length of output Tensor. Should be non-negative. Default: ``0``.
|
|
494
|
+
|
|
495
|
+
Returns:
|
|
496
|
+
Tensor, If input is non-empty, the output shape is :math:`(max(max(input)+1, minlength), )`,
|
|
497
|
+
otherwise the shape is :math:`(0, )`.
|
|
498
|
+
|
|
499
|
+
Raises:
|
|
500
|
+
TypeError: If `input` or `weights` is not a Tensor.
|
|
501
|
+
ValueError: If `input` contains negative values.
|
|
502
|
+
ValueError: If `input` is not one-dimensional or `input` and `weights` do not have the same shape.
|
|
503
|
+
|
|
504
|
+
Supported Platforms:
|
|
505
|
+
``Ascend``
|
|
506
|
+
|
|
507
|
+
Examples:
|
|
508
|
+
>>> from mindspore import mint
|
|
509
|
+
>>> print(mint.bincount(np.arange(5)))
|
|
510
|
+
[1. 1. 1. 1. 1.]
|
|
511
|
+
>>> print(mint.bincount(np.array([0, 1, 1, 3, 2, 1, 7])))
|
|
512
|
+
[1. 3. 1. 1. 0. 0. 0. 1.]
|
|
513
|
+
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
|
|
514
|
+
>>> x = np.array([0, 1, 1, 2, 2, 2])
|
|
515
|
+
>>> print(mint.bincount(x, weights=w, minlength=5))
|
|
516
|
+
[0.3 0.7 1.1 0.0 0.0]
|
|
517
|
+
"""
|
|
518
|
+
return bincount_impl(input, weights, minlength)
|
|
519
|
+
|
|
520
|
+
|
|
359
521
|
def bmm(input, mat2):
|
|
360
522
|
r"""
|
|
361
523
|
Performs batch matrix-matrix multiplication of two three-dimensional tensors.
|
|
@@ -463,13 +625,6 @@ def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
463
625
|
return fold_impl(input, converted_output_size, converted_kernel_size, converted_dilation, converted_padding, converted_stride)
|
|
464
626
|
|
|
465
627
|
|
|
466
|
-
def copy(variable, value):
|
|
467
|
-
r"""
|
|
468
|
-
None
|
|
469
|
-
"""
|
|
470
|
-
return copy_impl(variable, value)
|
|
471
|
-
|
|
472
|
-
|
|
473
628
|
def cummin(input, dim):
|
|
474
629
|
r"""
|
|
475
630
|
Returns a tuple (values, indices) where `values` is the cumulative minimum value of input Tensor `input`
|
|
@@ -480,6 +635,9 @@ def cummin(input, dim):
|
|
|
480
635
|
y_{i} = \min(x_{1}, x_{2}, ... , x_{i})
|
|
481
636
|
\end{array}
|
|
482
637
|
|
|
638
|
+
.. note::
|
|
639
|
+
O2 mode is not supported in Ascend.
|
|
640
|
+
|
|
483
641
|
Args:
|
|
484
642
|
input (Tensor): The input Tensor, The dimension must be greater than 0.
|
|
485
643
|
dim (int): Operation dimension. The value of `dim` must be in the range `[-input.ndim, input.ndim - 1]`.
|
|
@@ -494,9 +652,6 @@ def cummin(input, dim):
|
|
|
494
652
|
TypeError: If `dim` is not an int.
|
|
495
653
|
ValueError: If `dim` is out the range of `[-input.ndim, input.ndim - 1]`.
|
|
496
654
|
|
|
497
|
-
.. note::
|
|
498
|
-
O2 mode is not supported in Ascend.
|
|
499
|
-
|
|
500
655
|
Supported Platforms:
|
|
501
656
|
``Ascend``
|
|
502
657
|
|
|
@@ -652,6 +807,40 @@ def flatten(input, start_dim=0, end_dim=-1):
|
|
|
652
807
|
return flatten_impl(input, start_dim, end_dim)
|
|
653
808
|
|
|
654
809
|
|
|
810
|
+
def frac(input):
|
|
811
|
+
r"""
|
|
812
|
+
Calculates the fractional part of each element in the input.
|
|
813
|
+
|
|
814
|
+
.. math::
|
|
815
|
+
out_i = input_i - \lfloor |input_i| \rfloor * sgn(input_i)
|
|
816
|
+
|
|
817
|
+
.. warning::
|
|
818
|
+
This is an experimental API that is subject to change or deletion.
|
|
819
|
+
|
|
820
|
+
Args:
|
|
821
|
+
input (Tensor): The input Tensor.
|
|
822
|
+
|
|
823
|
+
Returns:
|
|
824
|
+
Tensor, has the same shape and type as input.
|
|
825
|
+
|
|
826
|
+
Raises:
|
|
827
|
+
TypeError: If `input` is not a Tensor.
|
|
828
|
+
|
|
829
|
+
Supported Platforms:
|
|
830
|
+
``Ascend``
|
|
831
|
+
|
|
832
|
+
Examples:
|
|
833
|
+
>>> import mindspore
|
|
834
|
+
>>> import numpy as np
|
|
835
|
+
>>> from mindspore import Tensor, ops
|
|
836
|
+
>>> x = Tensor([2, 4.2, -2.5], mindspore.float16)
|
|
837
|
+
>>> output = ops.frac_ext(x)
|
|
838
|
+
>>> print(output)
|
|
839
|
+
[ 0. 0.1992 -0.5 ]
|
|
840
|
+
"""
|
|
841
|
+
return frac_impl(input)
|
|
842
|
+
|
|
843
|
+
|
|
655
844
|
def histc(input, bins=100, min=0, max=0):
|
|
656
845
|
r"""
|
|
657
846
|
Computes the histogram of a tensor.
|
|
@@ -827,6 +1016,44 @@ def inplace_adds(input, other, alpha=1):
|
|
|
827
1016
|
return inplace_adds_impl(input, other, alpha)
|
|
828
1017
|
|
|
829
1018
|
|
|
1019
|
+
def sub_tensor_(input, other, alpha=1):
|
|
1020
|
+
r"""
|
|
1021
|
+
None
|
|
1022
|
+
"""
|
|
1023
|
+
return sub_tensor_impl(input, other, alpha)
|
|
1024
|
+
|
|
1025
|
+
|
|
1026
|
+
def isneginf(input):
|
|
1027
|
+
r"""
|
|
1028
|
+
Determines which elements are -inf for each position.
|
|
1029
|
+
|
|
1030
|
+
.. warning::
|
|
1031
|
+
- This is an experimental API that is subject to change.
|
|
1032
|
+
- This API can be used only on the Atlas A2 training series.
|
|
1033
|
+
|
|
1034
|
+
Args:
|
|
1035
|
+
input (Tensor): Input Tensor.
|
|
1036
|
+
|
|
1037
|
+
Returns:
|
|
1038
|
+
Tensor with the same shape as the input, where elements are `True` if the corresponding element in the `input` is negative infinity, and `False` otherwise.
|
|
1039
|
+
|
|
1040
|
+
Raises:
|
|
1041
|
+
TypeError: If the input is not a tensor.
|
|
1042
|
+
|
|
1043
|
+
Supported Platforms:
|
|
1044
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1045
|
+
|
|
1046
|
+
Examples:
|
|
1047
|
+
>>> from mindspore import ops, Tensor
|
|
1048
|
+
>>> from mindspore import dtype as mstype
|
|
1049
|
+
>>> output = ops.isneginf(Tensor([[-float("inf"), float("inf")], [1, -float("inf")]], mstype.float32))
|
|
1050
|
+
>>> print(output)
|
|
1051
|
+
[[ True False]
|
|
1052
|
+
[False True]]
|
|
1053
|
+
"""
|
|
1054
|
+
return isneginf_impl(input)
|
|
1055
|
+
|
|
1056
|
+
|
|
830
1057
|
def l1_loss(input, target, reduction='mean'):
|
|
831
1058
|
r"""
|
|
832
1059
|
Calculate the mean absolute error between the `input` value and the `target` value.
|
|
@@ -908,7 +1135,7 @@ def leaky_relu(input, negative_slope=0.01):
|
|
|
908
1135
|
|
|
909
1136
|
Args:
|
|
910
1137
|
input (Tensor): The input of leaky_relu is a Tensor of any dimension.
|
|
911
|
-
negative_slope (Union[int, float]): Slope of the activation function when the element of `input` is less than 0.
|
|
1138
|
+
negative_slope (Union[int, float], optional): Slope of the activation function when the element of `input` is less than 0.
|
|
912
1139
|
Default: ``0.01`` .
|
|
913
1140
|
|
|
914
1141
|
Returns:
|
|
@@ -933,6 +1160,84 @@ def leaky_relu(input, negative_slope=0.01):
|
|
|
933
1160
|
return leaky_relu_impl(input, negative_slope)
|
|
934
1161
|
|
|
935
1162
|
|
|
1163
|
+
def log10(input):
|
|
1164
|
+
r"""
|
|
1165
|
+
Returns the logarithm to the base 10 of a tensor element-wise.
|
|
1166
|
+
|
|
1167
|
+
.. math::
|
|
1168
|
+
y_i = \log_{10}(x_i)
|
|
1169
|
+
|
|
1170
|
+
.. warning::
|
|
1171
|
+
- This is an experimental API that is subject to change or deletion.
|
|
1172
|
+
- If the input value of operator Log10 is within the range (0, 0.01] or [0.95, 1.05], the output accuracy
|
|
1173
|
+
may be affacted.
|
|
1174
|
+
|
|
1175
|
+
Args:
|
|
1176
|
+
input (Tensor): Input Tensor of any dimension. The value must be greater than 0.
|
|
1177
|
+
|
|
1178
|
+
Returns:
|
|
1179
|
+
Tensor, has the same shape as the `input`, and the dtype changes according to the `input.dtype`.
|
|
1180
|
+
|
|
1181
|
+
- if `input.dtype` is in [float16, float32, float64, bfloat16], the output dtype is the same as the `input.dtype`.
|
|
1182
|
+
- if `input.dtype` is integer or boolean type, the output dtype is float32.
|
|
1183
|
+
|
|
1184
|
+
Raises:
|
|
1185
|
+
TypeError: If `input` is not a Tensor.
|
|
1186
|
+
|
|
1187
|
+
Supported Platforms:
|
|
1188
|
+
``Ascend``
|
|
1189
|
+
|
|
1190
|
+
Examples:
|
|
1191
|
+
>>> import mindspore
|
|
1192
|
+
>>> import numpy as np
|
|
1193
|
+
>>> from mindspore import Tensor, mint
|
|
1194
|
+
>>> x = Tensor(np.array([3.0, 5.0, 7.0]), mindspore.float32)
|
|
1195
|
+
>>> output = mint.log10(x)
|
|
1196
|
+
>>> print(output)
|
|
1197
|
+
[0.47712136 0.69897 0.845098 ]
|
|
1198
|
+
"""
|
|
1199
|
+
return log10_impl(input)
|
|
1200
|
+
|
|
1201
|
+
|
|
1202
|
+
def log2(input):
|
|
1203
|
+
r"""
|
|
1204
|
+
Returns the logarithm to the base 2 of a tensor element-wise.
|
|
1205
|
+
|
|
1206
|
+
.. math::
|
|
1207
|
+
y_i = \log_2(x_i)
|
|
1208
|
+
|
|
1209
|
+
.. warning::
|
|
1210
|
+
- This is an experimental API that is subject to change or deletion.
|
|
1211
|
+
- If the input value of operator Log2 is within the range (0, 0.01] or [0.95, 1.05], the output accuracy
|
|
1212
|
+
may be affacted.
|
|
1213
|
+
|
|
1214
|
+
Args:
|
|
1215
|
+
input (Tensor): Input Tensor of any dimension. The value must be greater than 0.
|
|
1216
|
+
|
|
1217
|
+
Returns:
|
|
1218
|
+
Tensor, has the same shape as the `input`, and the dtype changes according to the `input.dtype`.
|
|
1219
|
+
|
|
1220
|
+
- if `input.dtype` is in [float16, float32, float64, bfloat16], the output dtype is the same as the `input.dtype`.
|
|
1221
|
+
- if `input.dtype` is integer or boolean type, the output dtype is float32.
|
|
1222
|
+
|
|
1223
|
+
Raises:
|
|
1224
|
+
TypeError: If `input` is not a Tensor.
|
|
1225
|
+
|
|
1226
|
+
Supported Platforms:
|
|
1227
|
+
``Ascend``
|
|
1228
|
+
|
|
1229
|
+
Examples:
|
|
1230
|
+
>>> import mindspore
|
|
1231
|
+
>>> import numpy as np
|
|
1232
|
+
>>> from mindspore import Tensor, mint
|
|
1233
|
+
>>> x = Tensor(np.array([3.0, 5.0, 7.0]), mindspore.float32)
|
|
1234
|
+
>>> output = mint.log2(x)
|
|
1235
|
+
>>> print(output)
|
|
1236
|
+
[1.5849625 2.321928 2.807355 ]
|
|
1237
|
+
"""
|
|
1238
|
+
return log2_impl(input)
|
|
1239
|
+
|
|
1240
|
+
|
|
936
1241
|
def log_softmax(input, dim=None, dtype=None):
|
|
937
1242
|
r"""
|
|
938
1243
|
Applies the Log Softmax function to the input tensor on the specified axis.
|
|
@@ -1016,11 +1321,58 @@ def logaddexp(input, other):
|
|
|
1016
1321
|
return logaddexp_impl(input, other)
|
|
1017
1322
|
|
|
1018
1323
|
|
|
1019
|
-
def
|
|
1324
|
+
def logsumexp(input, dim, keepdim=False):
|
|
1325
|
+
r"""
|
|
1326
|
+
Computes the logarithm of the sum of exponentiations of all elements along the specified `dim` dimension of the `input` (with numerical stabilization), and retains the dimension based on the `keepdim` parameter.
|
|
1327
|
+
|
|
1328
|
+
.. math::
|
|
1329
|
+
|
|
1330
|
+
logsumexp(input) = \log(\sum(e^{input-input_{max}})) + input_{max}
|
|
1331
|
+
|
|
1332
|
+
.. warning::
|
|
1333
|
+
This is an experimental API that is subject to change or deletion.
|
|
1334
|
+
|
|
1335
|
+
Args:
|
|
1336
|
+
input (Tensor): Input Tensor.
|
|
1337
|
+
dim (Union[int, tuple(int), list(int)], optional): The dimension to be reduced (the value should be within `[0, len(input.shape) - 1]`), when the `dim` is `()`, all dimensions are reduced.
|
|
1338
|
+
keepdim (bool, optional): Whether the output tensor retains the dimension `dim`, default: `False`.
|
|
1339
|
+
|
|
1340
|
+
Returns:
|
|
1341
|
+
Tensor, the dtype changes according to the `input.dtype`, and the shape changes according to the values of `dim` and `keepdim`.
|
|
1342
|
+
|
|
1343
|
+
- If `input.dtype` is in [float16, float32, bfloat16], the output dtype is the same as the `input.dtype`.
|
|
1344
|
+
- If `input.dtype` is an integer or boolean type, the output dtype is float32.
|
|
1345
|
+
- If `dim` is (), and `keepdim` is False, the output is a 0-D tensor representing the logarithm of the sum of exponentiations of all elements in the `input` tensor.
|
|
1346
|
+
- If `dim` is `1`, and `keepdim` is False, the shape of output is :math:`(input.shape[0], input.shape[2], ..., input.shape[n])`.
|
|
1347
|
+
- If `dim` is `(1, 2)`, and `keepdim` is False, the shape of output is :math:`(input.shape[0], input.shape[3], ..., input.shape[n])`.
|
|
1348
|
+
|
|
1349
|
+
Raises:
|
|
1350
|
+
TypeError: If `input` is not a Tensor.
|
|
1351
|
+
TypeError: If dtype of `input` is not one of: bool, int8, int16, int32, int64, uint8, float16, float32, bfloat16.
|
|
1352
|
+
TypeError: If `dim` is not an int or tuple(int) or list(list).
|
|
1353
|
+
TypeError: If `keepdim` is not a bool.
|
|
1354
|
+
ValueError: If the value of any elements of `dim` is not in the range `[0, len(input.shape) - 1]`.
|
|
1355
|
+
RuntimeError: If any element of `dim` is repeated.
|
|
1356
|
+
|
|
1357
|
+
Supported Platforms:
|
|
1358
|
+
``Ascend``
|
|
1359
|
+
|
|
1360
|
+
Examples:
|
|
1361
|
+
>>> import numpy as np
|
|
1362
|
+
>>> from mindspore import Tensor, ops
|
|
1363
|
+
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
|
|
1364
|
+
>>> output = ops.auto_generate.logsumexp_ext(x, 1, keepdim=True)
|
|
1365
|
+
>>> print(output.shape)
|
|
1366
|
+
(3, 1, 5, 6)
|
|
1367
|
+
"""
|
|
1368
|
+
return logsumexp_impl(input, dim, keepdim)
|
|
1369
|
+
|
|
1370
|
+
|
|
1371
|
+
def matmul(input, other):
|
|
1020
1372
|
r"""
|
|
1021
1373
|
None
|
|
1022
1374
|
"""
|
|
1023
|
-
return matmul_impl(input,
|
|
1375
|
+
return matmul_impl(input, other)
|
|
1024
1376
|
|
|
1025
1377
|
|
|
1026
1378
|
def matrix_inverse(input):
|
|
@@ -1053,42 +1405,93 @@ def matrix_inverse(input):
|
|
|
1053
1405
|
return matrix_inverse_impl(input)
|
|
1054
1406
|
|
|
1055
1407
|
|
|
1056
|
-
def
|
|
1408
|
+
def max_unpool2d(input, indices, kernel_size, stride=None, padding=0, output_size=None):
|
|
1409
|
+
r"""
|
|
1410
|
+
Computes the inverse of `max_pool2d`.
|
|
1411
|
+
|
|
1412
|
+
`max_unpool2d` keeps the maximal value and set all position of non-maximal values to zero. Typically the input is of shape :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`, and the output is of shape :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`. The operation is as follows.
|
|
1413
|
+
|
|
1414
|
+
.. math::
|
|
1415
|
+
\begin{array}{ll} \\
|
|
1416
|
+
H_{out} = (H_{in} - 1) \times stride[0] - 2 \times padding[0] + kernel\_size[0] \\
|
|
1417
|
+
W_{out} = (W_{in} - 1) \times stride[1] - 2 \times padding[1] + kernel\_size[1] \\
|
|
1418
|
+
\end{array}
|
|
1419
|
+
|
|
1420
|
+
.. warning::
|
|
1421
|
+
This is an experimental API that is subject to change or deletion.
|
|
1422
|
+
|
|
1423
|
+
Args:
|
|
1424
|
+
input (Tensor): The input Tensor to invert. Tensor of shape :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
|
|
1425
|
+
indices (Tensor): Max values' index represented by the indices. Tensor of shape must be same with input 'input'. Values of indices must belong to :math:`[0, H_{in} \times W_{in} - 1]`. Data type must be in int32 or int64.
|
|
1426
|
+
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value, an int number that represents height and width of the kernel, or a tuple of two int numbers that represent height and width respectively.
|
|
1427
|
+
stride (Union[int, tuple[int]], optional): The distance of kernel moving, an int number that represents the height and width of movement are both stride, or a tuple of two int numbers that represent height and width of movement respectively. Default: ``None`` , which indicates the moving step is `kernel_size` .
|
|
1428
|
+
padding (Union[int, tuple[int]], optional): The pad value to be filled. Default: ``0`` . If `padding` is an integer, the paddings of height and width are the same, equal to padding. If `padding` is a tuple of two integers, the padding of height and width equal to padding[0] and padding[1] correspondingly.
|
|
1429
|
+
output_size (tuple[int], optional): The target output size. Default: ``None`` . If output_size == (), then the shape of output computed by `kernel_size`, `stride` and `padding`. If output_size != (), then output_size must be :math:`(N, C, H, W)` , :math:`(C, H, W)` or :math:`(H, W)` and output_size must belong to :math:`[(N, C, H_{out} - stride[0], W_{out} - stride[1]), (N, C, H_{out} + stride[0], W_{out} + stride[1])]`.
|
|
1430
|
+
|
|
1431
|
+
Returns:
|
|
1432
|
+
Tensor, with shape :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, with the same data type with `input`.
|
|
1433
|
+
|
|
1434
|
+
Raises:
|
|
1435
|
+
TypeError: If data type of `input` or `indices` is not supported.
|
|
1436
|
+
TypeError: If `kernel_size`, `stride` or `padding` is neither an int nor a tuple.
|
|
1437
|
+
ValueError: If numbers in `stride`, `padding` or `kernel_size` are not positive.
|
|
1438
|
+
ValueError: If the shapes of `input` and `indices` are different.
|
|
1439
|
+
ValueError: If the length of `input` is not 3 or 4.
|
|
1440
|
+
ValueError: If the type of `output_size` is not tuple.
|
|
1441
|
+
ValueError: If `output_size` is not close to output size computed by attr `kernel_size`, `stride`, `padding`.
|
|
1442
|
+
|
|
1443
|
+
Supported Platforms:
|
|
1444
|
+
``Ascend``
|
|
1445
|
+
|
|
1446
|
+
Examples:
|
|
1447
|
+
>>> import numpy as np
|
|
1448
|
+
>>> from mindspore import Tensor, ops
|
|
1449
|
+
>>> input = Tensor(np.array([[[[0, 1], [8, 9]]]]).astype(np.float32))
|
|
1450
|
+
>>> indices = Tensor(np.array([[[[0, 1], [2, 3]]]]).astype(np.int64))
|
|
1451
|
+
>>> output = ops.max_unpool2d_ext(input, indices, 1, stride=1, padding=0)
|
|
1452
|
+
>>> print(output.asnumpy())
|
|
1453
|
+
[[[[0. 1.]
|
|
1454
|
+
[8. 9.]]]]
|
|
1455
|
+
"""
|
|
1456
|
+
return max_unpool2d_impl(input, indices, kernel_size, stride, padding, output_size)
|
|
1457
|
+
|
|
1458
|
+
|
|
1459
|
+
def mean(input, dim=None, keepdim=False, dtype=None):
|
|
1057
1460
|
r"""
|
|
1058
1461
|
Reduces all dimension of a tensor by averaging all elements in the dimension, by default.
|
|
1059
|
-
And reduce a dimension of `input` along the specified `
|
|
1462
|
+
And reduce a dimension of `input` along the specified `dim`. `keepdim`
|
|
1060
1463
|
determines whether the dimensions of the output and input are the same.
|
|
1061
1464
|
|
|
1062
1465
|
Note:
|
|
1063
|
-
The `
|
|
1466
|
+
The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
1064
1467
|
|
|
1065
1468
|
Args:
|
|
1066
1469
|
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
1067
1470
|
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
1068
|
-
|
|
1471
|
+
dim (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` ,
|
|
1069
1472
|
reduce all dimensions. Only constant value is allowed. Assume the rank of `input` is r,
|
|
1070
1473
|
and the value range is [-r,r).
|
|
1071
|
-
|
|
1474
|
+
keepdim (bool): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
1072
1475
|
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
1073
1476
|
dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
|
|
1074
1477
|
|
|
1075
1478
|
Returns:
|
|
1076
1479
|
Tensor, has the same data type as input tensor.
|
|
1077
1480
|
|
|
1078
|
-
- If `
|
|
1481
|
+
- If `dim` is ``None`` , and `keepdim` is ``False`` ,
|
|
1079
1482
|
the output is a 0-D tensor representing the product of all elements in the input tensor.
|
|
1080
|
-
- If `
|
|
1483
|
+
- If `dim` is int, set as 1, and `keepdim` is ``False`` ,
|
|
1081
1484
|
the shape of output is :math:`(x_0, x_2, ..., x_R)`.
|
|
1082
|
-
- If `
|
|
1485
|
+
- If `dim` is tuple(int), set as (1, 2), and `keepdim` is ``False`` ,
|
|
1083
1486
|
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
|
|
1084
|
-
- If `
|
|
1487
|
+
- If `dim` is 1-D Tensor, set as [1, 2], and `keepdim` is ``False`` ,
|
|
1085
1488
|
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
|
|
1086
1489
|
|
|
1087
1490
|
Raises:
|
|
1088
1491
|
TypeError: If `x` is not a Tensor.
|
|
1089
|
-
TypeError: If `
|
|
1090
|
-
TypeError: If `
|
|
1091
|
-
ValueError: If `
|
|
1492
|
+
TypeError: If `dim` is not one of the following: int, tuple, list or Tensor.
|
|
1493
|
+
TypeError: If `keepdim` is not a bool.
|
|
1494
|
+
ValueError: If `dim` is out of range.
|
|
1092
1495
|
|
|
1093
1496
|
Supported Platforms:
|
|
1094
1497
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1098,7 +1501,7 @@ def mean(input, axis=None, keep_dims=False, dtype=None):
|
|
|
1098
1501
|
>>> import numpy as np
|
|
1099
1502
|
>>> from mindspore import Tensor, ops
|
|
1100
1503
|
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
|
|
1101
|
-
>>> output = ops.
|
|
1504
|
+
>>> output = ops.mean_ext(x, 1, keepdim=True)
|
|
1102
1505
|
>>> result = output.shape
|
|
1103
1506
|
>>> print(result)
|
|
1104
1507
|
(3, 1, 5, 6)
|
|
@@ -1107,25 +1510,25 @@ def mean(input, axis=None, keep_dims=False, dtype=None):
|
|
|
1107
1510
|
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
1108
1511
|
... [[6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8], [10, 10, 10, 10, 10, 10]]]),
|
|
1109
1512
|
... mindspore.float32)
|
|
1110
|
-
>>> output = ops.
|
|
1513
|
+
>>> output = ops.mean_ext(x)
|
|
1111
1514
|
>>> print(output)
|
|
1112
1515
|
5.0
|
|
1113
1516
|
>>> print(output.shape)
|
|
1114
1517
|
()
|
|
1115
|
-
>>> # case 2: Reduces a dimension along the
|
|
1116
|
-
>>> output = ops.
|
|
1518
|
+
>>> # case 2: Reduces a dimension along the dim 0
|
|
1519
|
+
>>> output = ops.mean_ext(x, 0, True)
|
|
1117
1520
|
>>> print(output)
|
|
1118
1521
|
[[[4. 4. 4. 4. 4. 4.]
|
|
1119
1522
|
[5. 5. 5. 5. 5. 5.]
|
|
1120
1523
|
[6. 6. 6. 6. 6. 6.]]]
|
|
1121
|
-
>>> # case 3: Reduces a dimension along the
|
|
1122
|
-
>>> output = ops.
|
|
1524
|
+
>>> # case 3: Reduces a dimension along the dim 1
|
|
1525
|
+
>>> output = ops.mean_ext(x, 1, True)
|
|
1123
1526
|
>>> print(output)
|
|
1124
1527
|
[[[2. 2. 2. 2. 2. 2.]]
|
|
1125
1528
|
[[5. 5. 5. 5. 5. 5.]]
|
|
1126
1529
|
[[8. 8. 8. 8. 8. 8.]]]
|
|
1127
|
-
>>> # case 4: Reduces a dimension along the
|
|
1128
|
-
>>> output = ops.
|
|
1530
|
+
>>> # case 4: Reduces a dimension along the dim 2
|
|
1531
|
+
>>> output = ops.mean_ext(x, 2, True)
|
|
1129
1532
|
>>> print(output)
|
|
1130
1533
|
[[[ 2.]
|
|
1131
1534
|
[ 2.]
|
|
@@ -1137,7 +1540,7 @@ def mean(input, axis=None, keep_dims=False, dtype=None):
|
|
|
1137
1540
|
[ 8.]
|
|
1138
1541
|
[10.]]]
|
|
1139
1542
|
"""
|
|
1140
|
-
return mean_impl(input,
|
|
1543
|
+
return mean_impl(input, dim, keepdim, dtype)
|
|
1141
1544
|
|
|
1142
1545
|
|
|
1143
1546
|
def mish(input):
|
|
@@ -1186,6 +1589,50 @@ def mish(input):
|
|
|
1186
1589
|
return mish_impl(input)
|
|
1187
1590
|
|
|
1188
1591
|
|
|
1592
|
+
def mm(input, mat2):
|
|
1593
|
+
r"""
|
|
1594
|
+
Returns the matrix product of two arrays.
|
|
1595
|
+
If `input` is a :math:`(n \times m)` Tensor, `mat2` is a
|
|
1596
|
+
:math:`(m \times p)` Tensor, `out` will be a :math:`(n \times p)` Tensor.
|
|
1597
|
+
|
|
1598
|
+
Note:
|
|
1599
|
+
This function cannot support broadcasting.
|
|
1600
|
+
Refer to :func:`mindspore.ops.matmul` instead if you need a broadcastable function.
|
|
1601
|
+
|
|
1602
|
+
.. warning::
|
|
1603
|
+
This is an experimental API that is subject to change or deletion.
|
|
1604
|
+
|
|
1605
|
+
Args:
|
|
1606
|
+
input (Tensor): The first matrix of matrix multiplication.
|
|
1607
|
+
The last dimension of `input` must be the same size as the first dimension of `mat2`.
|
|
1608
|
+
mat2 (Tensor): The second matrix of matrix multiplication.
|
|
1609
|
+
The last dimension of `input` must be the same size as the first dimension of `mat2`.
|
|
1610
|
+
|
|
1611
|
+
Returns:
|
|
1612
|
+
Tensor, the matrix product of the inputs.
|
|
1613
|
+
|
|
1614
|
+
Raises:
|
|
1615
|
+
ValueError: If the last dimension of `input` is not the same size as the
|
|
1616
|
+
second-to-last dimension of `mat2`.
|
|
1617
|
+
TypeError: If `input` or `mat2` is not a Tensor.
|
|
1618
|
+
TypeError: If dtype of `input` or `mat2` is not float16, float32 or bfloat16.
|
|
1619
|
+
|
|
1620
|
+
Supported Platforms:
|
|
1621
|
+
``Ascend``
|
|
1622
|
+
|
|
1623
|
+
Examples:
|
|
1624
|
+
>>> import mindspore as ms
|
|
1625
|
+
>>> from mindspore import ops
|
|
1626
|
+
>>> import numpy as np
|
|
1627
|
+
>>> x1 = ms.Tensor(np.random.rand(2, 3), ms.float32)
|
|
1628
|
+
>>> x2 = ms.Tensor(np.random.rand(3, 4), ms.float32)
|
|
1629
|
+
>>> out = ops.mm_ext(x1, x2)
|
|
1630
|
+
>>> print(out.shape)
|
|
1631
|
+
(2, 4)
|
|
1632
|
+
"""
|
|
1633
|
+
return mm_impl(input, mat2)
|
|
1634
|
+
|
|
1635
|
+
|
|
1189
1636
|
def mse_loss(input, target, reduction='mean'):
|
|
1190
1637
|
r"""
|
|
1191
1638
|
Calculates the mean squared error between the predicted value and the label value.
|
|
@@ -1272,34 +1719,34 @@ def outer(input, vec2):
|
|
|
1272
1719
|
return outer_impl(input, vec2)
|
|
1273
1720
|
|
|
1274
1721
|
|
|
1275
|
-
def prod(input,
|
|
1722
|
+
def prod(input, dim=None, keepdim=False, dtype=None):
|
|
1276
1723
|
r"""
|
|
1277
1724
|
Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can
|
|
1278
|
-
reduce a dimension of `input` along the `
|
|
1279
|
-
same by controlling `
|
|
1725
|
+
reduce a dimension of `input` along the `dim`. Determine whether the dimensions of the output and input are the
|
|
1726
|
+
same by controlling `keepdim`.
|
|
1280
1727
|
|
|
1281
1728
|
Args:
|
|
1282
1729
|
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
1283
1730
|
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
1284
|
-
|
|
1731
|
+
dim (int): The dimensions to reduce. Default: ``None`` , reduce all dimensions.
|
|
1285
1732
|
Only constant value is allowed. Assume the rank of `input` is r, and the value range is [-r,r).
|
|
1286
|
-
|
|
1733
|
+
keepdim (bool): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
1287
1734
|
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
1288
1735
|
dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
|
|
1289
1736
|
|
|
1290
1737
|
Returns:
|
|
1291
1738
|
Tensor, has the same data type as input tensor.
|
|
1292
1739
|
|
|
1293
|
-
- If `
|
|
1740
|
+
- If `dim` is ``None`` , and `keepdim` is ``False`` ,
|
|
1294
1741
|
the output is a 0-D tensor representing the product of all elements in the input tensor.
|
|
1295
|
-
- If `
|
|
1742
|
+
- If `dim` is int, set as 1, and `keepdim` is ``False`` ,
|
|
1296
1743
|
the shape of output is :math:`(input_0, input_2, ..., input_R)`.
|
|
1297
1744
|
|
|
1298
1745
|
Raises:
|
|
1299
1746
|
TypeError: If `input` is not a Tensor.
|
|
1300
|
-
TypeError: If `
|
|
1301
|
-
TypeError: If `
|
|
1302
|
-
ValueError: If `
|
|
1747
|
+
TypeError: If `dim` is not one of the following: int or None.
|
|
1748
|
+
TypeError: If `keepdim` is not a bool.
|
|
1749
|
+
ValueError: If `dim` is out of range.
|
|
1303
1750
|
|
|
1304
1751
|
Supported Platforms:
|
|
1305
1752
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1309,7 +1756,7 @@ def prod(input, axis=None, keep_dims=False, dtype=None):
|
|
|
1309
1756
|
>>> import numpy as np
|
|
1310
1757
|
>>> from mindspore import Tensor, ops
|
|
1311
1758
|
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
|
|
1312
|
-
>>> output = ops.
|
|
1759
|
+
>>> output = ops.prod_ext(x, 1, keepdim=True)
|
|
1313
1760
|
>>> result = output.shape
|
|
1314
1761
|
>>> print(result)
|
|
1315
1762
|
(3, 1, 5, 6)
|
|
@@ -1317,25 +1764,25 @@ def prod(input, axis=None, keep_dims=False, dtype=None):
|
|
|
1317
1764
|
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
|
|
1318
1765
|
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
1319
1766
|
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
|
|
1320
|
-
>>> output = ops.
|
|
1767
|
+
>>> output = ops.prod_ext(x)
|
|
1321
1768
|
>>> print(output)
|
|
1322
1769
|
2.2833798e+33
|
|
1323
1770
|
>>> print(output.shape)
|
|
1324
1771
|
()
|
|
1325
|
-
>>> # case 2: Reduces a dimension along
|
|
1326
|
-
>>> output = ops.
|
|
1772
|
+
>>> # case 2: Reduces a dimension along dim 0.
|
|
1773
|
+
>>> output = ops.prod_ext(x, 0, True)
|
|
1327
1774
|
>>> print(output)
|
|
1328
1775
|
[[[ 28. 28. 28. 28. 28. 28.]
|
|
1329
1776
|
[ 80. 80. 80. 80. 80. 80.]
|
|
1330
1777
|
[162. 162. 162. 162. 162. 162.]]]
|
|
1331
|
-
>>> # case 3: Reduces a dimension along
|
|
1332
|
-
>>> output = ops.
|
|
1778
|
+
>>> # case 3: Reduces a dimension along dim 1.
|
|
1779
|
+
>>> output = ops.prod_ext(x, 1, True)
|
|
1333
1780
|
>>> print(output)
|
|
1334
1781
|
[[[ 6. 6. 6. 6. 6. 6.]]
|
|
1335
1782
|
[[120. 120. 120. 120. 120. 120.]]
|
|
1336
1783
|
[[504. 504. 504. 504. 504. 504.]]]
|
|
1337
|
-
>>> # case 4: Reduces a dimension along
|
|
1338
|
-
>>> output = ops.
|
|
1784
|
+
>>> # case 4: Reduces a dimension along dim 2.
|
|
1785
|
+
>>> output = ops.prod_ext(x, 2, True)
|
|
1339
1786
|
>>> print(output)
|
|
1340
1787
|
[[[1.00000e+00]
|
|
1341
1788
|
[6.40000e+01]
|
|
@@ -1347,7 +1794,7 @@ def prod(input, axis=None, keep_dims=False, dtype=None):
|
|
|
1347
1794
|
[2.62144e+05]
|
|
1348
1795
|
[5.31441e+05]]]
|
|
1349
1796
|
"""
|
|
1350
|
-
return prod_impl(input,
|
|
1797
|
+
return prod_impl(input, dim, keepdim, dtype)
|
|
1351
1798
|
|
|
1352
1799
|
|
|
1353
1800
|
def select(input, dim, index):
|
|
@@ -1376,7 +1823,6 @@ def select(input, dim, index):
|
|
|
1376
1823
|
>>> from mindspore import Tensor, mint
|
|
1377
1824
|
>>> input = Tensor([[2, 3, 4, 5],[3, 2, 4, 5]])
|
|
1378
1825
|
>>> y = mint.select(input, 0, 0)
|
|
1379
|
-
>>> y = Tensor([1,2], mindspore.float32)
|
|
1380
1826
|
>>> print(y)
|
|
1381
1827
|
[2 3 4 5]
|
|
1382
1828
|
"""
|
|
@@ -1494,7 +1940,7 @@ def stack(tensors, dim=0):
|
|
|
1494
1940
|
|
|
1495
1941
|
Args:
|
|
1496
1942
|
tensors (Union[tuple, list]): A Tuple or list of Tensor objects with the same shape and type.
|
|
1497
|
-
dim (int): Dimension to stack. The range is [-(R+1), R+1). Default: ``0`` .
|
|
1943
|
+
dim (int, optional): Dimension to stack. The range is [-(R+1), R+1). Default: ``0`` .
|
|
1498
1944
|
|
|
1499
1945
|
Returns:
|
|
1500
1946
|
Tensor. A stacked Tensor with the same type as `tensors`.
|
|
@@ -1502,7 +1948,7 @@ def stack(tensors, dim=0):
|
|
|
1502
1948
|
Raises:
|
|
1503
1949
|
TypeError: If the data types of elements in `tensors` are not the same.
|
|
1504
1950
|
ValueError: If `dim` is out of the range [-(R+1), R+1);
|
|
1505
|
-
or if the shapes of elements in tensors are not the same.
|
|
1951
|
+
or if the shapes of elements in `tensors` are not the same.
|
|
1506
1952
|
|
|
1507
1953
|
Supported Platforms:
|
|
1508
1954
|
``Ascend``
|
|
@@ -1577,6 +2023,54 @@ def sub(input, other, alpha=1):
|
|
|
1577
2023
|
return sub_impl(input, other, alpha)
|
|
1578
2024
|
|
|
1579
2025
|
|
|
2026
|
+
def sum(input, dim=None, keepdim=False, dtype=None):
|
|
2027
|
+
r"""
|
|
2028
|
+
Alias for :func:`mindspore.mint.transpose` . The `input` corresponds to the `input` in the reference interface,
|
|
2029
|
+
and the parameters `axis0` and `axis1` correspond to `dim0` and `dim1` in the reference interface respectively.
|
|
2030
|
+
|
|
2031
|
+
.. warning::
|
|
2032
|
+
This is an experimental API that is subject to change or deletion.
|
|
2033
|
+
|
|
2034
|
+
Refer to :func:`mindspore.mint.transpose` for more details.
|
|
2035
|
+
"""
|
|
2036
|
+
return sum_impl(input, dim, keepdim, dtype)
|
|
2037
|
+
|
|
2038
|
+
|
|
2039
|
+
def t(input):
|
|
2040
|
+
r"""
|
|
2041
|
+
Transpose the input tensor.
|
|
2042
|
+
|
|
2043
|
+
.. warning::
|
|
2044
|
+
This is an experimental API that is subject to change or deletion.
|
|
2045
|
+
|
|
2046
|
+
Args:
|
|
2047
|
+
input (Tensor): The input tensor.
|
|
2048
|
+
|
|
2049
|
+
Returns:
|
|
2050
|
+
Tensor, transpose 2D tensor, return 1D tensor as it is.
|
|
2051
|
+
|
|
2052
|
+
Raises:
|
|
2053
|
+
ValueError: If the dimension of `input` is greater than 2.
|
|
2054
|
+
ValueError: If `input` is empty.
|
|
2055
|
+
TypeError: If `input` is not a tensor.
|
|
2056
|
+
|
|
2057
|
+
Supported Platforms:
|
|
2058
|
+
``Ascend``
|
|
2059
|
+
|
|
2060
|
+
Examples:
|
|
2061
|
+
>>> import mindspore
|
|
2062
|
+
>>> import numpy as np
|
|
2063
|
+
>>> from mindspore import Tensor, ops
|
|
2064
|
+
>>> input = Tensor(np.array([[1, 2, 3], [4, 5, 6]]), mindspore.float32)
|
|
2065
|
+
>>> output = ops.t_ext(input)
|
|
2066
|
+
>>> print(output)
|
|
2067
|
+
[[ 1. 4.]
|
|
2068
|
+
[ 2. 5.]
|
|
2069
|
+
[ 3. 6.]]
|
|
2070
|
+
"""
|
|
2071
|
+
return t_impl(input)
|
|
2072
|
+
|
|
2073
|
+
|
|
1580
2074
|
def topk(input, k, dim=-1, largest=True, sorted=True):
|
|
1581
2075
|
r"""
|
|
1582
2076
|
Finds values and indices of the `k` largest or smallest entries along a given dimension.
|