mindspore 2.4.10__cp310-none-any.whl → 2.5.0__cp310-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +39 -0
- mindspore/__init__.py +8 -3
- mindspore/_akg/akg/composite/build_module.py +6 -2
- mindspore/_akg/akg/utils/kernel_exec.py +2 -2
- mindspore/_c_dataengine.cpython-310-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-310-aarch64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-310-aarch64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +0 -5
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/compile_config.py +64 -0
- mindspore/_extends/parse/deprecated/__init__.py +0 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +375 -0
- mindspore/_extends/parse/parser.py +23 -5
- mindspore/_extends/parse/standard_method.py +123 -27
- mindspore/_extends/pijit/pijit_func_white_list.py +1 -1
- mindspore/amp.py +7 -1
- mindspore/boost/boost_cell_wrapper.py +136 -41
- mindspore/common/__init__.py +3 -1
- mindspore/common/_register_for_tensor.py +0 -1
- mindspore/common/_stub_tensor.py +25 -4
- mindspore/common/_tensor_cpp_method.py +17 -0
- mindspore/common/_tensor_docs.py +6132 -0
- mindspore/common/api.py +98 -21
- mindspore/common/dtype.py +34 -34
- mindspore/common/dump.py +2 -1
- mindspore/common/file_system.py +8 -3
- mindspore/common/generator.py +2 -0
- mindspore/common/hook_handle.py +3 -1
- mindspore/common/initializer.py +3 -4
- mindspore/common/lazy_inline.py +8 -2
- mindspore/common/mindir_util.py +10 -2
- mindspore/common/parameter.py +31 -15
- mindspore/common/tensor.py +713 -1337
- mindspore/communication/__init__.py +1 -1
- mindspore/communication/_comm_helper.py +5 -0
- mindspore/communication/comm_func.py +215 -173
- mindspore/communication/management.py +23 -20
- mindspore/context.py +285 -191
- mindspore/dataset/__init__.py +23 -19
- mindspore/dataset/callback/ds_callback.py +2 -1
- mindspore/dataset/core/config.py +84 -3
- mindspore/dataset/engine/cache_admin.py +3 -3
- mindspore/dataset/engine/cache_client.py +5 -4
- mindspore/dataset/engine/datasets.py +192 -149
- mindspore/dataset/engine/datasets_audio.py +14 -0
- mindspore/dataset/engine/datasets_standard_format.py +11 -11
- mindspore/dataset/engine/datasets_text.py +38 -1
- mindspore/dataset/engine/datasets_user_defined.py +100 -66
- mindspore/dataset/engine/datasets_vision.py +81 -8
- mindspore/dataset/engine/iterators.py +281 -63
- mindspore/dataset/engine/obs/util.py +8 -0
- mindspore/dataset/engine/queue.py +40 -0
- mindspore/dataset/engine/samplers.py +26 -2
- mindspore/dataset/engine/serializer_deserializer.py +1 -1
- mindspore/dataset/engine/validators.py +43 -11
- mindspore/dataset/transforms/py_transforms_util.py +17 -0
- mindspore/dataset/transforms/transforms.py +29 -12
- mindspore/dataset/vision/validators.py +1 -2
- mindspore/device_context/__init__.py +21 -0
- mindspore/device_context/ascend/__init__.py +25 -0
- mindspore/device_context/ascend/device.py +72 -0
- mindspore/device_context/ascend/op_debug.py +94 -0
- mindspore/device_context/ascend/op_precision.py +193 -0
- mindspore/device_context/ascend/op_tuning.py +127 -0
- mindspore/device_context/cpu/__init__.py +25 -0
- mindspore/device_context/cpu/device.py +62 -0
- mindspore/device_context/cpu/op_tuning.py +43 -0
- mindspore/device_context/gpu/__init__.py +21 -0
- mindspore/device_context/gpu/device.py +70 -0
- mindspore/device_context/gpu/op_precision.py +67 -0
- mindspore/device_context/gpu/op_tuning.py +175 -0
- mindspore/device_manager.py +134 -0
- mindspore/experimental/llm_boost/__init__.py +1 -0
- mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +211 -0
- mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
- mindspore/experimental/llm_boost/register.py +1 -0
- mindspore/experimental/optim/adadelta.py +26 -22
- mindspore/experimental/optim/adam.py +3 -0
- mindspore/experimental/optim/lr_scheduler.py +33 -24
- mindspore/experimental/optim/radam.py +33 -30
- mindspore/hal/device.py +28 -0
- mindspore/hal/event.py +17 -0
- mindspore/hal/memory.py +94 -3
- mindspore/hal/stream.py +91 -6
- mindspore/include/api/context.h +0 -1
- mindspore/lib/libavcodec.so.59 +0 -0
- mindspore/lib/libavdevice.so.59 +0 -0
- mindspore/lib/libavfilter.so.8 +0 -0
- mindspore/lib/libavformat.so.59 +0 -0
- mindspore/lib/libavutil.so.57 +0 -0
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_ops.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/libswresample.so.4 +0 -0
- mindspore/lib/libswscale.so.6 +0 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910_93/aic-ascend910_93-ops-info.json +2048 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl/dynamic/decoder_kv_cache.py +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl/dynamic/prompt_kv_cache.py +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/version.info +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/config/ascend910_93/aic-ascend910_93-ops-info.json +224 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/all_finite.py +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/decoder_kv_cache.py +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/prompt_kv_cache.py +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.json +78 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.json +78 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.json +78 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/all_finite.json +139 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/binary_info_config.json +361 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/decoder_kv_cache.json +892 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/prompt_kv_cache.json +892 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/version.info +1 -1
- mindspore/lib/plugin/ascend/custom_compiler/setup.py +1 -1
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_internal_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libms_ascend_native_boost.so +0 -0
- mindspore/lib/plugin/ascend/libms_atb_boost.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +957 -955
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/liblcal_static.a +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{acme/include/base_type.h → base_type.h} +25 -20
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{cast/cast_tiling.h → internal.h} +6 -4
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_op.h +114 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/boost_kernel.h +70 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/llama_impl.h +85 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/model_interface.h +52 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/tensor.h +81 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_creator.h +123 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +155 -110
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{acme/include/tiling_info.h → tiling_info.h} +12 -9
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tiling_utils.h +178 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layer_norm_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_quant_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_310p_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcompare_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libllama_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_optiling.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmulti_weight_matmul_kernel_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_nz_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_f16_nz/internal_pp_matmul_f16_nz.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_f16_nz/internal_pp_matmul_f16_nz_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_i8_nz_compress/internal_pp_matmul_i8_nz_compress.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_i8_nz_compress/internal_pp_matmul_i8_nz_compress_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_int8_nz/internal_pp_matmul_int8_nz.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_int8_nz/internal_pp_matmul_int8_nz_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libadd_rms_norm_quant_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libapply_rotary_pos_emb_310p_impl.so → op_kernels/ascend310p/so_kernels/libapply_rotary_pos_emb_310p_ascend310p.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libcast_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libcompare_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libgelu_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libmatmul_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libreshape_and_cache_nz_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_4b60f88cdc28b25a36bad2d8b0a88092.json +163 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_4b60f88cdc28b25a36bad2d8b0a88092.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_cde61da2bd6fededcb1ba310a6ad16ee.json +163 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_cde61da2bd6fededcb1ba310a6ad16ee.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_matmul_postfusion_mix/internal_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_matmul_postfusion_mix/internal_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_matmul_postfusion_mix/internal_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_multi_weight_matmul_postfusion_mix/internal_multi_weight_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_multi_weight_matmul_postfusion_mix/internal_multi_weight_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_multi_weight_matmul_postfusion_mix/internal_multi_weight_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_bf16_bf16.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_bf16_fp16.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_bf16_fp32.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_fp16_bf16.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_fp16_fp16.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_fp16_fp32.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/paged_attention_v2/paged_attention_v2.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/paged_attention_v2/paged_attention_v2_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/paged_attention_v2/paged_attention_v2_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libadd_layer_norm_impl.so → op_kernels/ascend910b/so_kernels/libadd_layer_norm_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libadd_rms_norm_impl.so → op_kernels/ascend910b/so_kernels/libadd_rms_norm_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/so_kernels/libadd_rms_norm_quant_ascend910b.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libapply_rotary_pos_emb_impl.so → op_kernels/ascend910b/so_kernels/libapply_rotary_pos_emb_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libcast_impl.so → op_kernels/ascend910b/so_kernels/libcast_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libnot_equal_impl.so → op_kernels/ascend910b/so_kernels/libcompare_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libgelu_impl.so → op_kernels/ascend910b/so_kernels/libgelu_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/so_kernels/libllama_ascend910b.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libmatmul_impl.so → op_kernels/ascend910b/so_kernels/libmatmul_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libmulti_weight_matmul_kernel_impl.so → op_kernels/ascend910b/so_kernels/libmulti_weight_matmul_kernel_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libreshape_and_cache_impl.so → op_kernels/ascend910b/so_kernels/libreshape_and_cache_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/librms_norm_impl.so → op_kernels/ascend910b/so_kernels/librms_norm_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/log.py +12 -0
- mindspore/mindrecord/__init__.py +1 -1
- mindspore/mindrecord/config.py +17 -316
- mindspore/mindrecord/filereader.py +1 -9
- mindspore/mindrecord/filewriter.py +5 -15
- mindspore/mindrecord/mindpage.py +1 -9
- mindspore/mint/__init__.py +824 -218
- mindspore/mint/distributed/__init__.py +66 -4
- mindspore/mint/distributed/distributed.py +2594 -44
- mindspore/mint/linalg/__init__.py +6 -0
- mindspore/mint/nn/__init__.py +473 -14
- mindspore/mint/nn/functional.py +486 -11
- mindspore/mint/nn/layer/__init__.py +17 -4
- mindspore/mint/nn/layer/_functions.py +330 -0
- mindspore/mint/nn/layer/activation.py +169 -1
- mindspore/mint/nn/layer/basic.py +123 -0
- mindspore/mint/nn/layer/conv.py +727 -0
- mindspore/mint/nn/layer/normalization.py +215 -19
- mindspore/mint/nn/layer/padding.py +797 -0
- mindspore/mint/nn/layer/pooling.py +170 -0
- mindspore/mint/optim/__init__.py +2 -1
- mindspore/mint/optim/adam.py +223 -0
- mindspore/mint/optim/adamw.py +26 -19
- mindspore/mint/special/__init__.py +2 -1
- mindspore/multiprocessing/__init__.py +5 -0
- mindspore/nn/cell.py +126 -19
- mindspore/nn/dynamic_lr.py +2 -1
- mindspore/nn/layer/activation.py +6 -6
- mindspore/nn/layer/basic.py +35 -25
- mindspore/nn/layer/channel_shuffle.py +3 -3
- mindspore/nn/layer/embedding.py +3 -3
- mindspore/nn/layer/normalization.py +8 -7
- mindspore/nn/layer/padding.py +4 -3
- mindspore/nn/layer/pooling.py +47 -13
- mindspore/nn/layer/rnn_cells.py +1 -1
- mindspore/nn/layer/rnns.py +2 -1
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +48 -26
- mindspore/nn/learning_rate_schedule.py +5 -3
- mindspore/nn/loss/loss.py +31 -36
- mindspore/nn/optim/ada_grad.py +1 -0
- mindspore/nn/optim/adadelta.py +2 -2
- mindspore/nn/optim/adam.py +1 -1
- mindspore/nn/optim/lars.py +1 -4
- mindspore/nn/optim/optimizer.py +1 -1
- mindspore/nn/optim/rprop.py +2 -2
- mindspore/nn/optim/thor.py +2 -1
- mindspore/nn/utils/init.py +13 -11
- mindspore/nn/wrap/cell_wrapper.py +4 -6
- mindspore/nn/wrap/loss_scale.py +3 -4
- mindspore/numpy/array_creations.py +60 -62
- mindspore/numpy/array_ops.py +148 -143
- mindspore/numpy/logic_ops.py +41 -42
- mindspore/numpy/math_ops.py +361 -359
- mindspore/numpy/utils.py +16 -16
- mindspore/numpy/utils_const.py +4 -4
- mindspore/ops/__init__.py +2 -1
- mindspore/ops/_grad_experimental/grad_comm_ops.py +94 -13
- mindspore/ops/_grad_experimental/grad_debug_ops.py +6 -1
- mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
- mindspore/ops/_op_impl/cpu/__init__.py +1 -0
- mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
- mindspore/ops/_vmap/vmap_array_ops.py +20 -19
- mindspore/ops/_vmap/vmap_base.py +0 -2
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +19 -13
- mindspore/ops/_vmap/vmap_math_ops.py +11 -9
- mindspore/ops/_vmap/vmap_nn_ops.py +20 -34
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +149 -12
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -61
- mindspore/ops/auto_generate/gen_extend_func.py +554 -60
- mindspore/ops/auto_generate/gen_ops_def.py +1621 -115
- mindspore/ops/auto_generate/gen_ops_prim.py +8024 -3409
- mindspore/ops/auto_generate/pyboost_inner_prim.py +183 -79
- mindspore/ops/composite/base.py +1 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +229 -30
- mindspore/ops/composite/multitype_ops/pow_impl.py +0 -29
- mindspore/ops/function/__init__.py +12 -0
- mindspore/ops/function/array_func.py +561 -159
- mindspore/ops/function/clip_func.py +64 -0
- mindspore/ops/function/debug_func.py +28 -20
- mindspore/ops/function/image_func.py +1 -1
- mindspore/ops/function/linalg_func.py +5 -4
- mindspore/ops/function/math_func.py +1659 -290
- mindspore/ops/function/nn_func.py +988 -317
- mindspore/ops/function/parameter_func.py +3 -56
- mindspore/ops/function/random_func.py +243 -33
- mindspore/ops/function/sparse_unary_func.py +1 -1
- mindspore/ops/functional.py +18 -5
- mindspore/ops/functional_overload.py +897 -0
- mindspore/ops/operations/__init__.py +3 -2
- mindspore/ops/operations/_embedding_cache_ops.py +4 -4
- mindspore/ops/operations/_grad_ops.py +2 -34
- mindspore/ops/operations/_infer_ops.py +2 -1
- mindspore/ops/operations/_inner_ops.py +38 -8
- mindspore/ops/operations/array_ops.py +45 -303
- mindspore/ops/operations/comm_ops.py +19 -16
- mindspore/ops/operations/custom_ops.py +11 -55
- mindspore/ops/operations/debug_ops.py +42 -47
- mindspore/ops/operations/inner_ops.py +6 -4
- mindspore/ops/operations/linalg_ops.py +3 -2
- mindspore/ops/operations/manually_defined/ops_def.py +185 -104
- mindspore/ops/operations/math_ops.py +11 -216
- mindspore/ops/operations/nn_ops.py +146 -308
- mindspore/ops/primitive.py +23 -21
- mindspore/ops/tensor_method.py +1669 -0
- mindspore/ops_generate/aclnn_kernel_register_auto_cc_generator.py +110 -0
- mindspore/ops_generate/add_tensor_docs_generator.py +54 -0
- mindspore/ops_generate/arg_handler.py +0 -61
- mindspore/ops_generate/auto_grad_impl_cc_generator.py +135 -0
- mindspore/ops_generate/auto_grad_reg_cc_generator.py +93 -0
- mindspore/ops_generate/base_generator.py +11 -0
- mindspore/ops_generate/cpp_create_prim_instance_helper_generator.py +108 -0
- mindspore/ops_generate/functional_map_cpp_generator.py +491 -0
- mindspore/ops_generate/functional_overload_py_generator.py +110 -0
- mindspore/ops_generate/functions_cc_generator.py +233 -0
- mindspore/ops_generate/gen_aclnn_implement.py +110 -114
- mindspore/ops_generate/gen_constants.py +157 -3
- mindspore/ops_generate/gen_ops.py +245 -990
- mindspore/ops_generate/gen_pyboost_func.py +97 -998
- mindspore/ops_generate/gen_utils.py +119 -33
- mindspore/ops_generate/lite_ops_cpp_generator.py +155 -0
- mindspore/ops_generate/op_api_proto.py +206 -0
- mindspore/ops_generate/op_def_py_generator.py +131 -0
- mindspore/ops_generate/op_prim_py_generator.py +480 -0
- mindspore/ops_generate/op_proto.py +373 -108
- mindspore/ops_generate/op_template_parser.py +436 -0
- mindspore/ops_generate/ops_def_cc_generator.py +288 -0
- mindspore/ops_generate/ops_def_h_generator.py +74 -0
- mindspore/ops_generate/ops_name_h_generator.py +68 -0
- mindspore/ops_generate/ops_primitive_h_generator.py +81 -0
- mindspore/ops_generate/pyboost_functions_cpp_generator.py +370 -0
- mindspore/ops_generate/pyboost_functions_h_generator.py +68 -0
- mindspore/ops_generate/pyboost_functions_py_generator.py +148 -0
- mindspore/ops_generate/pyboost_grad_function_cpp_generator.py +154 -0
- mindspore/ops_generate/pyboost_inner_prim_generator.py +131 -0
- mindspore/ops_generate/pyboost_native_grad_functions_generator.py +268 -0
- mindspore/ops_generate/pyboost_op_cpp_code_generator.py +851 -0
- mindspore/ops_generate/pyboost_overload_functions_cpp_generator.py +344 -0
- mindspore/ops_generate/pyboost_utils.py +92 -33
- mindspore/ops_generate/template.py +294 -44
- mindspore/ops_generate/tensor_func_reg_cpp_generator.py +422 -0
- mindspore/parallel/__init__.py +3 -3
- mindspore/parallel/_auto_parallel_context.py +24 -33
- mindspore/parallel/_parallel_serialization.py +13 -2
- mindspore/parallel/_utils.py +4 -1
- mindspore/parallel/algo_parameter_config.py +1 -1
- mindspore/parallel/checkpoint_transform.py +44 -0
- mindspore/parallel/cluster/process_entity/_api.py +131 -37
- mindspore/parallel/cluster/process_entity/_utils.py +41 -6
- mindspore/parallel/cluster/run.py +20 -3
- mindspore/parallel/parameter_broadcast.py +1 -1
- mindspore/parallel/shard.py +3 -0
- mindspore/parallel/transform_safetensors.py +119 -253
- mindspore/profiler/__init__.py +17 -4
- mindspore/profiler/analysis/__init__.py +0 -0
- mindspore/profiler/analysis/parser/__init__.py +0 -0
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +166 -0
- mindspore/profiler/analysis/parser/base_parser.py +158 -0
- mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
- mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
- mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +261 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +84 -0
- mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
- mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
- mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
- mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +260 -0
- mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
- mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
- mindspore/profiler/analysis/task_manager.py +131 -0
- mindspore/profiler/analysis/time_converter.py +84 -0
- mindspore/profiler/analysis/viewer/__init__.py +0 -0
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +333 -0
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +252 -0
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +313 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +322 -0
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +265 -0
- mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
- mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +97 -0
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
- mindspore/profiler/analysis/work_flow.py +73 -0
- mindspore/profiler/common/ascend_msprof_exporter.py +138 -0
- mindspore/profiler/common/command_executor.py +90 -0
- mindspore/profiler/common/constant.py +174 -3
- mindspore/profiler/common/file_manager.py +208 -0
- mindspore/profiler/common/log.py +130 -0
- mindspore/profiler/common/msprof_cmd_tool.py +202 -0
- mindspore/profiler/common/path_manager.py +371 -0
- mindspore/profiler/common/process_bar.py +168 -0
- mindspore/profiler/common/process_pool.py +9 -3
- mindspore/profiler/common/profiler_context.py +476 -0
- mindspore/profiler/common/profiler_info.py +304 -0
- mindspore/profiler/common/profiler_output_path.py +284 -0
- mindspore/profiler/common/profiler_parameters.py +210 -0
- mindspore/profiler/common/profiler_path_manager.py +120 -0
- mindspore/profiler/common/record_function.py +76 -0
- mindspore/profiler/common/tlv_decoder.py +76 -0
- mindspore/profiler/common/util.py +75 -2
- mindspore/profiler/dynamic_profiler.py +270 -37
- mindspore/profiler/envprofiler.py +138 -0
- mindspore/profiler/mstx.py +199 -0
- mindspore/profiler/platform/__init__.py +21 -0
- mindspore/profiler/platform/base_profiler.py +40 -0
- mindspore/profiler/platform/cpu_profiler.py +124 -0
- mindspore/profiler/platform/gpu_profiler.py +74 -0
- mindspore/profiler/platform/npu_profiler.py +309 -0
- mindspore/profiler/profiler.py +580 -93
- mindspore/profiler/profiler_action_controller.py +187 -0
- mindspore/profiler/profiler_interface.py +114 -0
- mindspore/profiler/schedule.py +208 -0
- mindspore/rewrite/api/symbol_tree.py +1 -2
- mindspore/run_check/_check_version.py +2 -6
- mindspore/runtime/__init__.py +37 -0
- mindspore/runtime/device.py +27 -0
- mindspore/runtime/event.py +209 -0
- mindspore/runtime/executor.py +148 -0
- mindspore/runtime/memory.py +392 -0
- mindspore/runtime/stream.py +460 -0
- mindspore/runtime/thread_bind_core.py +401 -0
- mindspore/train/__init__.py +2 -2
- mindspore/train/_utils.py +53 -18
- mindspore/train/amp.py +8 -4
- mindspore/train/callback/_checkpoint.py +32 -18
- mindspore/train/callback/_early_stop.py +1 -1
- mindspore/train/callback/_flops_collector.py +105 -69
- mindspore/train/callback/_history.py +1 -1
- mindspore/train/callback/_summary_collector.py +44 -6
- mindspore/train/callback/_tft_register.py +31 -10
- mindspore/train/dataset_helper.py +11 -11
- mindspore/train/metrics/precision.py +4 -5
- mindspore/train/mind_ir_pb2.py +167 -46
- mindspore/train/model.py +13 -15
- mindspore/train/serialization.py +462 -76
- mindspore/train/summary/summary_record.py +1 -2
- mindspore/train/train_thor/model_thor.py +1 -1
- mindspore/utils/__init__.py +4 -2
- mindspore/utils/bin/dataset-cache +0 -0
- mindspore/utils/bin/dataset-cache-server +0 -0
- mindspore/utils/dryrun.py +138 -0
- mindspore/utils/runtime_execution_order_check.py +550 -0
- mindspore/version.py +1 -1
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/METADATA +2 -3
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/RECORD +522 -456
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/entry_points.txt +1 -1
- mindspore/_data_dump.cpython-310-aarch64-linux-gnu.so +0 -0
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/common/_tensor_overload.py +0 -139
- mindspore/lib/libmindspore_np_dtype.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -82
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -113
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -193
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/dtype_registry.h +0 -90
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_layer_norm_op.h +0 -60
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_rms_norm_op.h +0 -50
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_rms_norm_quant_op.h +0 -50
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/apply_rotary_pos_emb_nz_op.h +0 -42
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/apply_rotary_pos_emb_op.h +0 -55
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -34
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_only_ops.h +0 -94
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_op_base.h +0 -97
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/flash_attention_score_op.h +0 -97
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/gelu_op.h +0 -44
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_add_rmsnorm_op.h +0 -73
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -108
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/multi_impls_op.h +0 -64
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/multi_weight_matmul_op.h +0 -91
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/paged_attention_op.h +0 -99
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/reshape_and_cache_nz_op.h +0 -44
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/reshape_and_cache_op.h +0 -44
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/rms_norm_op.h +0 -64
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -179
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/profiling_util.h +0 -366
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -56
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/kernel/add.h +0 -21
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/tiling/add_tiling.h +0 -43
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb.h +0 -23
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_base.h +0 -456
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_bf16.h +0 -217
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp.h +0 -391
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp16.h +0 -126
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -230
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_tiling.h +0 -43
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_value.h +0 -27
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/apply_rotary_pos_emb_nz_impl.h +0 -34
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz.h +0 -23
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_base.h +0 -460
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_fp16.h +0 -116
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_fp32.h +0 -230
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_tiling.h +0 -43
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_value.h +0 -27
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -74
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -74
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_impl.h +0 -48
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/kernel/cast_kernel.h +0 -21
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -55
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_tiling.h +0 -27
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/kernel/compare_kernel.h +0 -23
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +0 -68
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -99
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +0 -21
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/lccl/lccl_wrapper.h +0 -58
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/ms_int_types.h +0 -91
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/ms_int_utils.h +0 -108
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +0 -64
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/add_param.h +0 -68
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/cast_param.h +0 -30
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -38
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +0 -42
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +0 -33
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -377
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache_nz/kernel/reshape_and_cache_nz.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache_nz/reshape_and_cache_nz_impl.h +0 -42
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache_nz/reshape_and_cache_nz_tiling.h +0 -27
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/kernel/sub_kernel.h +0 -20
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -48
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_tiling.h +0 -25
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +0 -399
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/utils.h +0 -41
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_tiling.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +0 -30
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_core.h +0 -43
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_entity.h +0 -38
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_sink.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_stream.h +0 -41
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -71
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -165
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +0 -20
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -121
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -106
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_quant_acme_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_310p_old_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_old_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_nz_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_nz_old_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_bf16_bnsd_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_bf16_bsh_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_fp16_bnsd_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_fp16_bsh_mix.o +0 -0
- mindspore/profiler/envprofiling.py +0 -254
- mindspore/profiler/profiling.py +0 -1926
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/WHEEL +0 -0
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/top_level.txt +0 -0
mindspore/numpy/array_ops.py
CHANGED
|
@@ -19,8 +19,6 @@ import operator
|
|
|
19
19
|
|
|
20
20
|
from mindspore.common import dtype as mstype
|
|
21
21
|
from mindspore.common import Tensor, mutable
|
|
22
|
-
from mindspore.ops import operations as P
|
|
23
|
-
from mindspore.ops import functional as F
|
|
24
22
|
from mindspore.ops.primitive import constexpr, _primexpr
|
|
25
23
|
from mindspore.nn import Cell
|
|
26
24
|
from mindspore import ops
|
|
@@ -73,17 +71,24 @@ def expand_dims(a, axis):
|
|
|
73
71
|
if not isinstance(axis, (int, tuple, list)):
|
|
74
72
|
_raise_type_error("axis must be tuple, list or int, but got ", axis)
|
|
75
73
|
if isinstance(axis, int):
|
|
76
|
-
return
|
|
74
|
+
return ops.expand_dims(a, axis)
|
|
77
75
|
ndim = a.ndim + len(axis)
|
|
78
76
|
axis = _canonicalize_axis(axis, ndim)
|
|
79
77
|
for ax in axis:
|
|
80
|
-
a =
|
|
78
|
+
a = ops.expand_dims(a, ax)
|
|
81
79
|
return a
|
|
82
80
|
|
|
83
81
|
|
|
84
82
|
def squeeze(a, axis=None):
|
|
85
83
|
"""
|
|
86
|
-
|
|
84
|
+
Return the Tensor after deleting the dimension of size 1 in the specified `axis`.
|
|
85
|
+
|
|
86
|
+
If :math:`axis=None`, it will remove all the dimensions of size 1.
|
|
87
|
+
If `axis` is specified, it will remove the dimensions of size 1 in the given `axis`.
|
|
88
|
+
For example, if the dimension is not specified :math:`axis=None`, input shape is (A, 1, B, C, 1, D),
|
|
89
|
+
then the shape of the output Tensor is (A, B, C, D). If the dimension is specified, the squeeze operation
|
|
90
|
+
is only performed in the specified dimension. If input shape is (A, 1, B), when :math:`axis=0` or :math:`axis=2`,
|
|
91
|
+
the input tensor is not changed, while when :math:`axis=1`, the input tensor shape is changed to (A, B).
|
|
87
92
|
|
|
88
93
|
Args:
|
|
89
94
|
a (Tensor): Input tensor array.
|
|
@@ -199,14 +204,14 @@ def rollaxis(x, axis, start=0):
|
|
|
199
204
|
if not isinstance(start, int):
|
|
200
205
|
_raise_type_error("integer argument expected, but got ", start)
|
|
201
206
|
|
|
202
|
-
shape =
|
|
203
|
-
ndim =
|
|
207
|
+
shape = ops.shape(x)
|
|
208
|
+
ndim = ops.tuple_len(shape)
|
|
204
209
|
|
|
205
210
|
axis = _check_axes_range(axis, ndim)
|
|
206
211
|
start = _check_start_normalize(start, ndim)
|
|
207
212
|
if start - axis >= 0 and start - axis <= 1:
|
|
208
213
|
return x
|
|
209
|
-
perm =
|
|
214
|
+
perm = ops.make_range(0, ndim)
|
|
210
215
|
new_perm = None
|
|
211
216
|
if start < axis:
|
|
212
217
|
if axis + 1 < ndim:
|
|
@@ -222,7 +227,7 @@ def rollaxis(x, axis, start=0):
|
|
|
222
227
|
new_perm = perm[0:axis] + perm[axis + 1:start] + \
|
|
223
228
|
perm[axis:axis + 1]
|
|
224
229
|
|
|
225
|
-
return
|
|
230
|
+
return ops.transpose(x, new_perm)
|
|
226
231
|
|
|
227
232
|
|
|
228
233
|
def swapaxes(x, axis1, axis2):
|
|
@@ -409,7 +414,7 @@ def concatenate(arrays, axis=0):
|
|
|
409
414
|
# as: tuple(tensor_1(4,5), tensor_2(4,5), tensor_3(4,5))
|
|
410
415
|
if axis is None or axis >= MAX_NUMPY_DIMS:
|
|
411
416
|
return ravel(arrays)
|
|
412
|
-
arr_shape =
|
|
417
|
+
arr_shape = ops.shape(arrays)
|
|
413
418
|
_check_axes_range((axis,), len(arr_shape))
|
|
414
419
|
# move axis 0 to the disiganated position, while keep other axes' relative
|
|
415
420
|
# positions unchanged
|
|
@@ -424,12 +429,12 @@ def concatenate(arrays, axis=0):
|
|
|
424
429
|
flattened_arrays += (ravel(arr),)
|
|
425
430
|
axis = -1
|
|
426
431
|
flattened_arrays = _promote_type_for_concatenate(flattened_arrays)
|
|
427
|
-
return
|
|
432
|
+
return ops.Concat(axis)(flattened_arrays)
|
|
428
433
|
|
|
429
434
|
# convert a list of tensor to a tuple of tensor
|
|
430
435
|
arrays = _convert_list_tensor_to_tuple_tensor(arrays)
|
|
431
436
|
|
|
432
|
-
arr_shape =
|
|
437
|
+
arr_shape = ops.shape(arrays[0])
|
|
433
438
|
_check_axes_range((axis,), len(arr_shape))
|
|
434
439
|
|
|
435
440
|
# if only one tensor in the tuple/list, return the tensor itself
|
|
@@ -437,7 +442,7 @@ def concatenate(arrays, axis=0):
|
|
|
437
442
|
return arrays[0]
|
|
438
443
|
|
|
439
444
|
arrays = _promote_type_for_concatenate(arrays)
|
|
440
|
-
return
|
|
445
|
+
return ops.Concat(axis)(arrays)
|
|
441
446
|
|
|
442
447
|
|
|
443
448
|
def append(arr, values, axis=None):
|
|
@@ -476,7 +481,7 @@ def append(arr, values, axis=None):
|
|
|
476
481
|
values = values.ravel()
|
|
477
482
|
else:
|
|
478
483
|
_check_axis_in_range(axis, arr.ndim)
|
|
479
|
-
if
|
|
484
|
+
if ops.rank(arr) != ops.rank(values):
|
|
480
485
|
_raise_value_error("all tensors must have same number of dimensions")
|
|
481
486
|
return concatenate((arr, values), axis)
|
|
482
487
|
|
|
@@ -518,13 +523,13 @@ def column_stack(tup):
|
|
|
518
523
|
trans_tup = ()
|
|
519
524
|
for tensor in tup:
|
|
520
525
|
if tensor.ndim < 1:
|
|
521
|
-
tensor =
|
|
526
|
+
tensor = ops.expand_dims(tensor, 0)
|
|
522
527
|
if tensor.ndim == 1:
|
|
523
|
-
tensor =
|
|
528
|
+
tensor = ops.expand_dims(tensor, 1)
|
|
524
529
|
trans_tup += (tensor,)
|
|
525
530
|
if not trans_tup:
|
|
526
531
|
_raise_value_error("Need at least one tensor to concatenate.")
|
|
527
|
-
return
|
|
532
|
+
return ops.Concat(1)(trans_tup)
|
|
528
533
|
|
|
529
534
|
|
|
530
535
|
def vstack(tup):
|
|
@@ -568,7 +573,7 @@ def vstack(tup):
|
|
|
568
573
|
trans_tup += (tensor,)
|
|
569
574
|
if not trans_tup:
|
|
570
575
|
_raise_value_error("Need at least one tensor to concatenate.")
|
|
571
|
-
return
|
|
576
|
+
return ops.Concat(0)(trans_tup)
|
|
572
577
|
|
|
573
578
|
|
|
574
579
|
def hstack(tup):
|
|
@@ -608,13 +613,13 @@ def hstack(tup):
|
|
|
608
613
|
tuple_of_tensor = ()
|
|
609
614
|
for tensor in tup:
|
|
610
615
|
if tensor.ndim < 1:
|
|
611
|
-
tensor =
|
|
616
|
+
tensor = ops.expand_dims(tensor, 0)
|
|
612
617
|
tuple_of_tensor += (tensor,)
|
|
613
618
|
if not tuple_of_tensor:
|
|
614
619
|
_raise_value_error("Need at least one tensor to concatenate.")
|
|
615
620
|
if tuple_of_tensor[0].ndim <= 1:
|
|
616
|
-
return
|
|
617
|
-
return
|
|
621
|
+
return ops.Concat(0)(tuple_of_tensor)
|
|
622
|
+
return ops.Concat(1)(tuple_of_tensor)
|
|
618
623
|
|
|
619
624
|
|
|
620
625
|
def dstack(tup):
|
|
@@ -658,11 +663,11 @@ def dstack(tup):
|
|
|
658
663
|
if tensor.ndim <= 1:
|
|
659
664
|
tensor = _expand(tensor, 2, 0)
|
|
660
665
|
if tensor.ndim == 2:
|
|
661
|
-
tensor =
|
|
666
|
+
tensor = ops.expand_dims(tensor, 2)
|
|
662
667
|
trans_tup += (tensor,)
|
|
663
668
|
if not trans_tup:
|
|
664
669
|
_raise_value_error("Need at least one tensor to concatenate.")
|
|
665
|
-
return
|
|
670
|
+
return ops.Concat(2)(trans_tup)
|
|
666
671
|
|
|
667
672
|
|
|
668
673
|
def where(condition, x=None, y=None):
|
|
@@ -705,42 +710,42 @@ def where(condition, x=None, y=None):
|
|
|
705
710
|
"""
|
|
706
711
|
condition, x, y = _to_tensor(condition, x, y)
|
|
707
712
|
# type promotes input tensors
|
|
708
|
-
dtype1 =
|
|
709
|
-
dtype2 =
|
|
713
|
+
dtype1 = ops.dtype(x)
|
|
714
|
+
dtype2 = ops.dtype(y)
|
|
710
715
|
dtype = _promote(dtype1, dtype2)
|
|
711
716
|
if not _check_same_type(dtype1, dtype):
|
|
712
|
-
x =
|
|
717
|
+
x = ops.cast(x, dtype)
|
|
713
718
|
if not _check_same_type(dtype2, dtype):
|
|
714
|
-
y =
|
|
719
|
+
y = ops.cast(y, dtype)
|
|
715
720
|
is_bool = _check_same_type(dtype1, mstype.bool_) and _check_same_type(dtype2, mstype.bool_)
|
|
716
721
|
if is_bool:
|
|
717
722
|
# select does not support bool type for x or y
|
|
718
|
-
x =
|
|
719
|
-
y =
|
|
723
|
+
x = ops.cast(x, mstype.float32)
|
|
724
|
+
y = ops.cast(y, mstype.float32)
|
|
720
725
|
|
|
721
|
-
dynamic =
|
|
722
|
-
or
|
|
726
|
+
dynamic = ops.is_sequence_value_unknown(ops.shape(condition)) or ops.is_sequence_value_unknown(ops.shape(x))\
|
|
727
|
+
or ops.is_sequence_value_unknown(ops.shape(y))
|
|
723
728
|
# As select op currently does not support broadcast, broadcasts input tensors
|
|
724
729
|
if not dynamic:
|
|
725
|
-
shape_out = _infer_out_shape(
|
|
726
|
-
|
|
730
|
+
shape_out = _infer_out_shape(ops.shape(condition),
|
|
731
|
+
ops.shape(x), ops.shape(y))
|
|
727
732
|
condition = _broadcast_to_shape(condition, shape_out)
|
|
728
733
|
x = _broadcast_to_shape(x, shape_out)
|
|
729
734
|
y = _broadcast_to_shape(y, shape_out)
|
|
730
735
|
else:
|
|
731
736
|
# Get the broadcast shape through broadcast calculation
|
|
732
737
|
add_x_y = x + y
|
|
733
|
-
add_out = condition +
|
|
734
|
-
shape_out =
|
|
738
|
+
add_out = condition + ops.cast(add_x_y, condition.dtype)
|
|
739
|
+
shape_out = ops.Shape()(add_out)
|
|
735
740
|
condition = ops.broadcast_to(condition, shape_out)
|
|
736
741
|
x = ops.broadcast_to(x, shape_out)
|
|
737
742
|
y = ops.broadcast_to(y, shape_out)
|
|
738
743
|
|
|
739
|
-
if not _check_same_type(
|
|
740
|
-
condition =
|
|
741
|
-
res =
|
|
744
|
+
if not _check_same_type(ops.dtype(condition), mstype.bool_):
|
|
745
|
+
condition = ops.cast(condition, mstype.bool_)
|
|
746
|
+
res = ops.select(condition, x, y)
|
|
742
747
|
if is_bool:
|
|
743
|
-
res =
|
|
748
|
+
res = ops.cast(res, mstype.bool_)
|
|
744
749
|
return res
|
|
745
750
|
|
|
746
751
|
|
|
@@ -873,13 +878,13 @@ def atleast_3d(*arys):
|
|
|
873
878
|
"""
|
|
874
879
|
res = []
|
|
875
880
|
for arr in arys:
|
|
876
|
-
ndim =
|
|
881
|
+
ndim = ops.rank(arr)
|
|
877
882
|
if ndim == 0:
|
|
878
|
-
arr =
|
|
883
|
+
arr = ops.reshape(arr, (1, 1, 1))
|
|
879
884
|
elif ndim == 1:
|
|
880
|
-
arr =
|
|
885
|
+
arr = ops.reshape(arr, (1, ops.size(arr), 1))
|
|
881
886
|
elif ndim == 2:
|
|
882
|
-
arr =
|
|
887
|
+
arr = ops.reshape(arr, ops.shape(arr) + (1,))
|
|
883
888
|
res.append(arr)
|
|
884
889
|
if len(res) == 1:
|
|
885
890
|
return res[0]
|
|
@@ -927,24 +932,24 @@ def stack(arrays, axis=0):
|
|
|
927
932
|
"""
|
|
928
933
|
|
|
929
934
|
if isinstance(arrays, Tensor):
|
|
930
|
-
shape =
|
|
931
|
-
ndim =
|
|
935
|
+
shape = ops.shape(arrays)
|
|
936
|
+
ndim = ops.rank(arrays)
|
|
932
937
|
axis = axis % ndim
|
|
933
|
-
axes =
|
|
938
|
+
axes = ops.make_range(ndim)
|
|
934
939
|
perm = axes[1:axis + 1] + (0,) + axes[axis + 1:]
|
|
935
940
|
if _is_shape_empty(shape):
|
|
936
941
|
return _empty(mstype.float32, shape[1:axis + 1] + (shape[0],) + shape[axis + 1:])
|
|
937
942
|
return transpose(arrays, perm)
|
|
938
943
|
|
|
939
944
|
if isinstance(arrays, (list, tuple)):
|
|
940
|
-
shape = (len(arrays),) +
|
|
945
|
+
shape = (len(arrays),) + ops.shape(arrays[0])
|
|
941
946
|
ndim = len(shape)
|
|
942
947
|
axis = axis % ndim
|
|
943
948
|
if _is_shape_empty(shape):
|
|
944
949
|
return _empty(mstype.float32, shape[1:axis + 1] + (shape[0],) + shape[axis + 1:])
|
|
945
950
|
seq = ()
|
|
946
951
|
for arr in arrays:
|
|
947
|
-
seq += (
|
|
952
|
+
seq += (ops.expand_dims(arr, axis),)
|
|
948
953
|
return concatenate(seq, axis)
|
|
949
954
|
return _raise_value_error('input arrays must be Tensor, tuple, or list')
|
|
950
955
|
|
|
@@ -954,7 +959,7 @@ class UniqueNet(Cell):
|
|
|
954
959
|
|
|
955
960
|
def __init__(self):
|
|
956
961
|
super(UniqueNet, self).__init__()
|
|
957
|
-
self.unique =
|
|
962
|
+
self.unique = ops.Unique()
|
|
958
963
|
|
|
959
964
|
def construct(self, x):
|
|
960
965
|
return self.unique(x)
|
|
@@ -998,7 +1003,7 @@ def unique(x, return_inverse=False):
|
|
|
998
1003
|
value= [0, 1, 1, 1, 2, 3, 4]))
|
|
999
1004
|
"""
|
|
1000
1005
|
_check_input_tensor(x)
|
|
1001
|
-
if
|
|
1006
|
+
if ops.tuple_len(ops.shape(x)) > 1:
|
|
1002
1007
|
x = ravel(x)
|
|
1003
1008
|
uniq = UniqueNet()
|
|
1004
1009
|
res = uniq(x)
|
|
@@ -1032,7 +1037,7 @@ def roll_along_axis(a, shift, axis):
|
|
|
1032
1037
|
end1 = ()
|
|
1033
1038
|
end2 = ()
|
|
1034
1039
|
stride = _list_comprehensions(a.ndim, 1, True)
|
|
1035
|
-
for i in
|
|
1040
|
+
for i in ops.make_range(a.ndim):
|
|
1036
1041
|
if i != axis:
|
|
1037
1042
|
begin1 += (0,)
|
|
1038
1043
|
end1 += (a.shape[i],)
|
|
@@ -1043,8 +1048,8 @@ def roll_along_axis(a, shift, axis):
|
|
|
1043
1048
|
end1 += (a.shape[i],)
|
|
1044
1049
|
begin2 += (0,)
|
|
1045
1050
|
end2 += (shift,)
|
|
1046
|
-
return append(
|
|
1047
|
-
|
|
1051
|
+
return append(ops.strided_slice(a, begin1, end1, stride),
|
|
1052
|
+
ops.strided_slice(a, begin2, end2, stride), axis=axis)
|
|
1048
1053
|
|
|
1049
1054
|
|
|
1050
1055
|
def roll(a, shift, axis=None):
|
|
@@ -1086,7 +1091,7 @@ def roll(a, shift, axis=None):
|
|
|
1086
1091
|
original_shape = a.shape
|
|
1087
1092
|
original_dtype = a.dtype
|
|
1088
1093
|
restore_shape = False
|
|
1089
|
-
#
|
|
1094
|
+
# ops.strided_slice only supports float on cpu, this will change once more supports
|
|
1090
1095
|
# are added.
|
|
1091
1096
|
if not _check_is_float(original_dtype):
|
|
1092
1097
|
if not original_dtype in (mstype.complex64, mstype.complex128):
|
|
@@ -1181,14 +1186,14 @@ def moveaxis(a, source, destination):
|
|
|
1181
1186
|
>>> print(output.shape)
|
|
1182
1187
|
(5, 4, 3)
|
|
1183
1188
|
"""
|
|
1184
|
-
ndim =
|
|
1189
|
+
ndim = ops.rank(a)
|
|
1185
1190
|
source = _check_axis_valid(source, ndim)
|
|
1186
1191
|
destination = _check_axis_valid(destination, ndim)
|
|
1187
1192
|
if len(source) != len(destination):
|
|
1188
1193
|
_raise_value_error('`source` and `destination` arguments must have the same number of elements')
|
|
1189
1194
|
perm = _get_moved_perm(ndim, source, destination)
|
|
1190
1195
|
|
|
1191
|
-
return
|
|
1196
|
+
return ops.transpose(a, perm)
|
|
1192
1197
|
|
|
1193
1198
|
|
|
1194
1199
|
def tile(a, reps):
|
|
@@ -1233,13 +1238,13 @@ def tile(a, reps):
|
|
|
1233
1238
|
[[0 1 2 0 1 2]]]
|
|
1234
1239
|
"""
|
|
1235
1240
|
_check_input_tensor(a)
|
|
1236
|
-
ndim =
|
|
1237
|
-
shape =
|
|
1241
|
+
ndim = ops.rank(a)
|
|
1242
|
+
shape = ops.shape(a)
|
|
1238
1243
|
reps = _add_unit_axes(reps, ndim)
|
|
1239
1244
|
if _is_shape_empty(shape) or _is_shape_empty(reps):
|
|
1240
1245
|
shape = _add_unit_axes(shape, len(reps))
|
|
1241
|
-
return _empty(
|
|
1242
|
-
return
|
|
1246
|
+
return _empty(ops.dtype(a), _seq_prod(shape, reps))
|
|
1247
|
+
return ops.tile(a, reps)
|
|
1243
1248
|
|
|
1244
1249
|
|
|
1245
1250
|
@_primexpr
|
|
@@ -1284,7 +1289,7 @@ def broadcast_to(array, shape):
|
|
|
1284
1289
|
def _check(shape_a, shape):
|
|
1285
1290
|
if not _check_can_broadcast_to(shape_a, shape):
|
|
1286
1291
|
_raise_value_error('cannot broadcast with ', shape)
|
|
1287
|
-
shape_a =
|
|
1292
|
+
shape_a = ops.shape(array)
|
|
1288
1293
|
_check(shape_a, shape)
|
|
1289
1294
|
return _broadcast_to_shape(array, shape)
|
|
1290
1295
|
|
|
@@ -1322,7 +1327,7 @@ def broadcast_arrays(*args):
|
|
|
1322
1327
|
[[4, 4, 4],
|
|
1323
1328
|
[5, 5, 5]])]
|
|
1324
1329
|
"""
|
|
1325
|
-
shapes = map(
|
|
1330
|
+
shapes = map(ops.shape, args)
|
|
1326
1331
|
out_shape = _infer_out_shape(*shapes)
|
|
1327
1332
|
res = []
|
|
1328
1333
|
for arr in args:
|
|
@@ -1439,18 +1444,18 @@ def _split(x, indices_or_sections, opname, axis=0):
|
|
|
1439
1444
|
if indices_or_sections > length_along_dim:
|
|
1440
1445
|
_raise_value_error("empty tensor encountered.")
|
|
1441
1446
|
if opname == "split" or length_along_dim % indices_or_sections == 0:
|
|
1442
|
-
res =
|
|
1447
|
+
res = ops.Split(axis_new, indices_or_sections)(x)
|
|
1443
1448
|
else:
|
|
1444
1449
|
num_long_tensor = length_along_dim % indices_or_sections
|
|
1445
1450
|
num_short_tensor = indices_or_sections - num_long_tensor
|
|
1446
1451
|
length1 = num_long_tensor * (length_along_dim // indices_or_sections + 1)
|
|
1447
1452
|
length2 = length_along_dim - length1
|
|
1448
|
-
start1 = _list_comprehensions(
|
|
1453
|
+
start1 = _list_comprehensions(ops.rank(x), 0, True)
|
|
1449
1454
|
size1 = _tuple_setitem(arr_shape, axis_new, length1)
|
|
1450
1455
|
start2 = _tuple_setitem(start1, axis_new, length1)
|
|
1451
1456
|
size2 = _tuple_setitem(arr_shape, axis_new, length2)
|
|
1452
|
-
res =
|
|
1453
|
-
|
|
1457
|
+
res = ops.Split(axis_new, num_long_tensor)(ops.tensor_slice(x, start1, size1)) + \
|
|
1458
|
+
ops.Split(axis_new, num_short_tensor)(ops.tensor_slice(x, start2, size2))
|
|
1454
1459
|
|
|
1455
1460
|
elif isinstance(indices_or_sections, (list, tuple)) and _check_element_int(indices_or_sections):
|
|
1456
1461
|
res = _split_sub_tensors(x, indices_or_sections, axis_new)
|
|
@@ -1487,7 +1492,7 @@ def _split_sub_tensors(x, indices, axis):
|
|
|
1487
1492
|
end[axis] = idx
|
|
1488
1493
|
if end[axis] <= begin[axis]:
|
|
1489
1494
|
_raise_value_error("empty sub-tensor encountered.")
|
|
1490
|
-
sliced_tensor =
|
|
1495
|
+
sliced_tensor = ops.strided_slice(x, _type_convert(tuple, begin), _type_convert(tuple, end), strides)
|
|
1491
1496
|
sub_tensors.append(sliced_tensor)
|
|
1492
1497
|
return sub_tensors
|
|
1493
1498
|
|
|
@@ -1679,10 +1684,10 @@ def flip(m, axis=None):
|
|
|
1679
1684
|
[3. 2.]]]
|
|
1680
1685
|
"""
|
|
1681
1686
|
_check_input_tensor(m)
|
|
1682
|
-
ndim =
|
|
1687
|
+
ndim = ops.rank(m)
|
|
1683
1688
|
axes = _check_axis_valid(axis, ndim)
|
|
1684
|
-
shape =
|
|
1685
|
-
dtype =
|
|
1689
|
+
shape = ops.shape(m)
|
|
1690
|
+
dtype = ops.dtype(m)
|
|
1686
1691
|
if _is_shape_empty(shape):
|
|
1687
1692
|
return m
|
|
1688
1693
|
if not _check_is_float(dtype):
|
|
@@ -1690,9 +1695,9 @@ def flip(m, axis=None):
|
|
|
1690
1695
|
start = _get_flip_start(ndim, shape, axes)
|
|
1691
1696
|
end = _get_flip_end(ndim, shape, axes)
|
|
1692
1697
|
strides = _get_flip_strides(ndim, axes)
|
|
1693
|
-
res =
|
|
1694
|
-
if not _check_same_type(
|
|
1695
|
-
res =
|
|
1698
|
+
res = ops.strided_slice(m, start, end, strides)
|
|
1699
|
+
if not _check_same_type(ops.dtype(res), dtype):
|
|
1700
|
+
res = ops.cast(res, dtype)
|
|
1696
1701
|
return res
|
|
1697
1702
|
|
|
1698
1703
|
|
|
@@ -1796,49 +1801,49 @@ def take_along_axis(arr, indices, axis):
|
|
|
1796
1801
|
if axis is None:
|
|
1797
1802
|
arr = ravel(arr)
|
|
1798
1803
|
axis = 0
|
|
1799
|
-
ndim =
|
|
1800
|
-
if ndim !=
|
|
1804
|
+
ndim = ops.rank(arr)
|
|
1805
|
+
if ndim != ops.rank(indices):
|
|
1801
1806
|
_raise_value_error('`indices` and `arr` must have the same number of dimensions')
|
|
1802
1807
|
axis = _check_axis_in_range(axis, ndim)
|
|
1803
1808
|
|
|
1804
|
-
shape_arr =
|
|
1805
|
-
shape_indices =
|
|
1809
|
+
shape_arr = ops.shape(arr)
|
|
1810
|
+
shape_indices = ops.shape(indices)
|
|
1806
1811
|
# broadcasts indices against the shape of arr except at axis
|
|
1807
1812
|
indices = _broadcast_to(indices, _tuple_slice(shape_indices, None, axis),
|
|
1808
1813
|
_tuple_slice(shape_arr, None, axis), ndim)
|
|
1809
1814
|
indices = _broadcast_to(indices, _tuple_slice(shape_arr, None, axis + 1) +
|
|
1810
1815
|
_tuple_slice(shape_indices, axis + 1, None), shape_arr, ndim)
|
|
1811
1816
|
arr = _broadcast_to(arr, shape_arr, indices.shape, ndim)
|
|
1812
|
-
return
|
|
1817
|
+
return ops.gather_d(arr, axis, indices)
|
|
1813
1818
|
|
|
1814
1819
|
|
|
1815
1820
|
def _mod(x, y):
|
|
1816
1821
|
"""Computes x mod y."""
|
|
1817
|
-
quotient =
|
|
1818
|
-
prod =
|
|
1819
|
-
return
|
|
1822
|
+
quotient = ops.tensor_floordiv(x, y)
|
|
1823
|
+
prod = ops.tensor_mul(y, quotient)
|
|
1824
|
+
return ops.tensor_sub(x, prod)
|
|
1820
1825
|
|
|
1821
1826
|
|
|
1822
1827
|
def _check_indices(dims, indices, mode, allow_negative_index=True):
|
|
1823
1828
|
"""Checks whether indices are out of bounds."""
|
|
1824
|
-
shape =
|
|
1825
|
-
dtype =
|
|
1829
|
+
shape = ops.shape(indices)
|
|
1830
|
+
dtype = ops.dtype(indices)
|
|
1826
1831
|
if not allow_negative_index:
|
|
1827
|
-
lowerbounds =
|
|
1832
|
+
lowerbounds = ops.fill(dtype, shape, 0)
|
|
1828
1833
|
else:
|
|
1829
|
-
lowerbounds =
|
|
1830
|
-
upperbounds =
|
|
1831
|
-
out_of_lowerbounds =
|
|
1832
|
-
out_of_upperbounds =
|
|
1834
|
+
lowerbounds = ops.fill(dtype, shape, -dims)
|
|
1835
|
+
upperbounds = ops.fill(dtype, shape, dims - 1)
|
|
1836
|
+
out_of_lowerbounds = ops.tensor_lt(indices, lowerbounds)
|
|
1837
|
+
out_of_upperbounds = ops.tensor_gt(indices, upperbounds)
|
|
1833
1838
|
if mode == 'raise':
|
|
1834
1839
|
_raise_unimplemented_error('"raise" mode is not implemented')
|
|
1835
1840
|
if mode == 'wrap':
|
|
1836
|
-
return _mod(indices,
|
|
1841
|
+
return _mod(indices, ops.fill(mstype.float32, shape, dims)).astype(dtype)
|
|
1837
1842
|
if mode != 'clip':
|
|
1838
1843
|
_raise_value_error('invalid mode. Expected "raise", "wrap", or "clip"')
|
|
1839
|
-
zeros =
|
|
1840
|
-
clipped =
|
|
1841
|
-
clipped =
|
|
1844
|
+
zeros = ops.fill(dtype, shape, 0)
|
|
1845
|
+
clipped = ops.select(out_of_lowerbounds, zeros, indices)
|
|
1846
|
+
clipped = ops.select(out_of_upperbounds, upperbounds, clipped)
|
|
1842
1847
|
return clipped
|
|
1843
1848
|
|
|
1844
1849
|
|
|
@@ -2052,9 +2057,9 @@ def select(condlist, choicelist, default=0):
|
|
|
2052
2057
|
[ 0 1 2 0 16]
|
|
2053
2058
|
"""
|
|
2054
2059
|
condlist, choicelist = _to_tensor(condlist, choicelist)
|
|
2055
|
-
shape_cond =
|
|
2056
|
-
shape_choice =
|
|
2057
|
-
if
|
|
2060
|
+
shape_cond = ops.shape(condlist)
|
|
2061
|
+
shape_choice = ops.shape(choicelist)
|
|
2062
|
+
if ops.rank(condlist) == 0 or ops.rank(choicelist) == 0:
|
|
2058
2063
|
_raise_value_error('input cannot be scalars')
|
|
2059
2064
|
case_num = shape_cond[0]
|
|
2060
2065
|
if shape_choice[0] != case_num:
|
|
@@ -2066,25 +2071,25 @@ def select(condlist, choicelist, default=0):
|
|
|
2066
2071
|
case_size = _infer_out_shape(case_size_cond, case_size_choice)
|
|
2067
2072
|
shape_broadcasted = (case_num,) + case_size
|
|
2068
2073
|
ndim = len(shape_broadcasted)
|
|
2069
|
-
shape_cond_expanded = ((case_num,) + _list_comprehensions(ndim -
|
|
2074
|
+
shape_cond_expanded = ((case_num,) + _list_comprehensions(ndim - ops.rank(condlist), 1, True) +
|
|
2070
2075
|
case_size_cond)
|
|
2071
|
-
condlist = _broadcast_to_shape(
|
|
2072
|
-
shape_choice_expanded = ((case_num,) + _list_comprehensions(ndim -
|
|
2076
|
+
condlist = _broadcast_to_shape(ops.reshape(condlist, shape_cond_expanded), shape_broadcasted)
|
|
2077
|
+
shape_choice_expanded = ((case_num,) + _list_comprehensions(ndim - ops.rank(choicelist), 1, True) +
|
|
2073
2078
|
case_size_choice)
|
|
2074
|
-
choicelist = _broadcast_to_shape(
|
|
2079
|
+
choicelist = _broadcast_to_shape(ops.reshape(choicelist, shape_choice_expanded), shape_broadcasted)
|
|
2075
2080
|
|
|
2076
2081
|
slice_start = _list_comprehensions(ndim - 1, 0, True)
|
|
2077
2082
|
slice_size = (1,) + case_size
|
|
2078
|
-
dtype =
|
|
2083
|
+
dtype = ops.dtype(choicelist)
|
|
2079
2084
|
if isinstance(default, Tensor):
|
|
2080
|
-
default_slice = default.astype(
|
|
2085
|
+
default_slice = default.astype(ops.dtype(choicelist)).reshape(slice_size)
|
|
2081
2086
|
else:
|
|
2082
|
-
default_slice =
|
|
2087
|
+
default_slice = ops.fill(ops.dtype(choicelist), slice_size, default)
|
|
2083
2088
|
for i in range(case_num - 1, -1, -1):
|
|
2084
|
-
cond_slice =
|
|
2085
|
-
choice_slice =
|
|
2086
|
-
default_slice =
|
|
2087
|
-
return
|
|
2089
|
+
cond_slice = ops.tensor_slice(condlist.astype(mstype.float32), (i,) + slice_start, slice_size)
|
|
2090
|
+
choice_slice = ops.tensor_slice(choicelist, (i,) + slice_start, slice_size)
|
|
2091
|
+
default_slice = ops.select(cond_slice.astype(mstype.bool_), choice_slice, default_slice)
|
|
2092
|
+
return ops.reshape(default_slice, (case_size)).astype(dtype)
|
|
2088
2093
|
|
|
2089
2094
|
|
|
2090
2095
|
@_primexpr
|
|
@@ -2173,32 +2178,32 @@ def choose(a, choices, mode='clip'):
|
|
|
2173
2178
|
[ 10 -10 10]]
|
|
2174
2179
|
"""
|
|
2175
2180
|
a = _to_tensor(a)
|
|
2176
|
-
if not _check_is_int(
|
|
2181
|
+
if not _check_is_int(ops.dtype(a)):
|
|
2177
2182
|
_raise_value_error('`a` should be an int array')
|
|
2178
2183
|
if isinstance(choices, (tuple, list)):
|
|
2179
2184
|
# broadcasts choices to the same shape if choices is a sequence
|
|
2180
2185
|
choices = _to_tensor(*choices)
|
|
2181
2186
|
shapes = ()
|
|
2182
2187
|
for choice in choices:
|
|
2183
|
-
shapes += (
|
|
2184
|
-
shape_choice = _infer_out_shape(
|
|
2188
|
+
shapes += (ops.shape(choice),)
|
|
2189
|
+
shape_choice = _infer_out_shape(ops.shape(a), *shapes)
|
|
2185
2190
|
tmp = []
|
|
2186
2191
|
for choice in choices:
|
|
2187
2192
|
tmp.append(broadcast_to(choice, shape_choice))
|
|
2188
2193
|
choices = stack(tmp)
|
|
2189
2194
|
else:
|
|
2190
2195
|
choices = _to_tensor(choices)
|
|
2191
|
-
shape_choice = _infer_out_shape(
|
|
2192
|
-
choices =
|
|
2193
|
-
choices = broadcast_to(choices, (
|
|
2196
|
+
shape_choice = _infer_out_shape(ops.shape(a), ops.shape(choices)[1:])
|
|
2197
|
+
choices = ops.reshape(choices, choices.shape[:1] + _add_unit_axes(choices.shape[1:], len(shape_choice)))
|
|
2198
|
+
choices = broadcast_to(choices, (ops.shape(choices)[0],) + shape_choice)
|
|
2194
2199
|
|
|
2195
|
-
if
|
|
2200
|
+
if ops.rank(a) == 0 or ops.rank(choices) == 0:
|
|
2196
2201
|
_raise_value_error('input cannot be scalars')
|
|
2197
2202
|
a = broadcast_to(a, shape_choice)
|
|
2198
|
-
a = _check_indices(
|
|
2199
|
-
grid = _get_grid(
|
|
2200
|
-
indices = concatenate((a.reshape(
|
|
2201
|
-
return
|
|
2203
|
+
a = _check_indices(ops.shape(choices)[0], a, mode, allow_negative_index=False)
|
|
2204
|
+
grid = _get_grid(ops.shape(a))
|
|
2205
|
+
indices = concatenate((a.reshape(ops.shape(a) + (1,)), grid), -1)
|
|
2206
|
+
return ops.gather_nd(choices, indices)
|
|
2202
2207
|
|
|
2203
2208
|
|
|
2204
2209
|
def size(a, axis=None):
|
|
@@ -2312,24 +2317,24 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs):
|
|
|
2312
2317
|
[0 8 0]
|
|
2313
2318
|
[0 0 9]]]
|
|
2314
2319
|
"""
|
|
2315
|
-
ndim =
|
|
2316
|
-
shape =
|
|
2320
|
+
ndim = ops.rank(arr)
|
|
2321
|
+
shape = ops.shape(arr)
|
|
2317
2322
|
axis = _check_axis_in_range(axis, ndim)
|
|
2318
2323
|
arr = moveaxis(arr, axis, -1)
|
|
2319
|
-
arr =
|
|
2324
|
+
arr = ops.reshape(arr, (-1, ops.shape(arr)[-1]))
|
|
2320
2325
|
slices = []
|
|
2321
|
-
for i in range(
|
|
2326
|
+
for i in range(ops.shape(arr)[0]):
|
|
2322
2327
|
slices.append(func1d(arr[i], *args, **kwargs))
|
|
2323
2328
|
stacked_slices = stack(slices)
|
|
2324
2329
|
shape_stacked = (_tuple_slice(shape, None, axis) + _tuple_slice(shape, axis + 1, None) +
|
|
2325
|
-
_tuple_slice(
|
|
2326
|
-
res =
|
|
2330
|
+
_tuple_slice(ops.shape(stacked_slices), 1, None))
|
|
2331
|
+
res = ops.reshape(stacked_slices, shape_stacked)
|
|
2327
2332
|
|
|
2328
2333
|
# moves the dimensions returned by `func1d` back to `axis`
|
|
2329
|
-
ndim_func =
|
|
2334
|
+
ndim_func = ops.rank(res) - ndim + 1
|
|
2330
2335
|
if ndim_func >= 1:
|
|
2331
|
-
res = moveaxis(res,
|
|
2332
|
-
|
|
2336
|
+
res = moveaxis(res, ops.make_range(ndim - 1, ops.rank(res)),
|
|
2337
|
+
ops.make_range(axis, axis + ndim_func))
|
|
2333
2338
|
return res
|
|
2334
2339
|
|
|
2335
2340
|
|
|
@@ -2445,17 +2450,17 @@ def unravel_index(indices, shape, order='C'):
|
|
|
2445
2450
|
_raise_value_error('invalid order. Expected "C" or "F"')
|
|
2446
2451
|
if isinstance(shape, int):
|
|
2447
2452
|
shape = (shape,)
|
|
2448
|
-
ndim =
|
|
2453
|
+
ndim = ops.rank(indices)
|
|
2449
2454
|
if order == 'F':
|
|
2450
2455
|
sizes = _cumprod(shape)
|
|
2451
2456
|
else:
|
|
2452
2457
|
sizes = _cumprod(shape[::-1])
|
|
2453
2458
|
sizes = _to_tensor(sizes[::-1] + (1,))
|
|
2454
|
-
sizes =
|
|
2459
|
+
sizes = ops.reshape(sizes, (-1,) + _list_comprehensions(ndim, 1, True))
|
|
2455
2460
|
total_size = sizes[0]
|
|
2456
2461
|
indices = where(indices > total_size - 1, total_size - 1, indices)
|
|
2457
2462
|
if _get_device() == 'GPU':
|
|
2458
|
-
dtype =
|
|
2463
|
+
dtype = ops.dtype(total_size)
|
|
2459
2464
|
lowerbounds = (-(total_size.astype(mstype.float32))).astype(dtype)
|
|
2460
2465
|
else:
|
|
2461
2466
|
lowerbounds = -total_size
|
|
@@ -2515,7 +2520,7 @@ def apply_over_axes(func, a, axes):
|
|
|
2515
2520
|
res = a
|
|
2516
2521
|
for axis in axes:
|
|
2517
2522
|
res = func(res, axis=axis)
|
|
2518
|
-
res =
|
|
2523
|
+
res = ops.expand_dims(res, axis) if res.ndim != a.ndim else res
|
|
2519
2524
|
if res.ndim != a.ndim:
|
|
2520
2525
|
_raise_value_error("function is not returning a tensor of the correct shape")
|
|
2521
2526
|
return res
|
|
@@ -2546,7 +2551,7 @@ def argwhere(a):
|
|
|
2546
2551
|
Tensor(shape=[2, 3], dtype=Int64, value=[[0, 0, 0], [0, 1, 0]])
|
|
2547
2552
|
"""
|
|
2548
2553
|
a = _to_tensor(a)
|
|
2549
|
-
return
|
|
2554
|
+
return ops.argwhere(a)
|
|
2550
2555
|
|
|
2551
2556
|
|
|
2552
2557
|
def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
|
|
@@ -2584,42 +2589,42 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
|
|
|
2584
2589
|
def unique_w_ind(arr):
|
|
2585
2590
|
array, sort_indices = arr.ravel().sort()
|
|
2586
2591
|
array_type = array.dtype
|
|
2587
|
-
cmp_array1 =
|
|
2588
|
-
cmp_array2 =
|
|
2592
|
+
cmp_array1 = ops.cat((array, Tensor([0], dtype=array_type)))
|
|
2593
|
+
cmp_array2 = ops.cat((Tensor([0], dtype=array_type), array))
|
|
2589
2594
|
mask = cmp_array1 != cmp_array2
|
|
2590
2595
|
mask[0] = True
|
|
2591
|
-
array =
|
|
2592
|
-
ind =
|
|
2596
|
+
array = ops.masked_select(array, mask[:-1])
|
|
2597
|
+
ind = ops.masked_select(sort_indices, mask[:-1])
|
|
2593
2598
|
return array, ind
|
|
2594
2599
|
|
|
2595
2600
|
if not isinstance(assume_unique, bool) or not isinstance(return_indices, bool):
|
|
2596
2601
|
_raise_type_error("assume_unique or return_indices is not bool type.")
|
|
2597
2602
|
ar1, ar2 = _to_tensor(ar1, ar2)
|
|
2598
|
-
ind1 =
|
|
2599
|
-
ind2 =
|
|
2603
|
+
ind1 = ops.fill(mstype.int32, (ar1.size,), -1)
|
|
2604
|
+
ind2 = ops.fill(mstype.int32, (ar2.size,), -1)
|
|
2600
2605
|
if not assume_unique:
|
|
2601
2606
|
if return_indices:
|
|
2602
2607
|
array1, ind1 = unique_w_ind(ar1)
|
|
2603
2608
|
array2, ind2 = unique_w_ind(ar2)
|
|
2604
2609
|
else:
|
|
2605
|
-
array1 =
|
|
2606
|
-
array2 =
|
|
2610
|
+
array1 = ops.unique(ar1)[0]
|
|
2611
|
+
array2 = ops.unique(ar2)[0]
|
|
2607
2612
|
else:
|
|
2608
2613
|
array1 = ar1.ravel()
|
|
2609
2614
|
array2 = ar2.ravel()
|
|
2610
2615
|
concat_array = concatenate((array1, array2))
|
|
2611
2616
|
if return_indices:
|
|
2612
|
-
concat_sort_indices =
|
|
2617
|
+
concat_sort_indices = ops.argsort(concat_array)
|
|
2613
2618
|
concat_array = concat_array[concat_sort_indices]
|
|
2614
2619
|
else:
|
|
2615
2620
|
concat_array, concat_sort_indices = concat_array.sort()
|
|
2616
2621
|
|
|
2617
2622
|
mask_res = concat_array[1:] == concat_array[:-1]
|
|
2618
|
-
res =
|
|
2623
|
+
res = ops.masked_select(concat_array[1:], mask_res)
|
|
2619
2624
|
|
|
2620
2625
|
if return_indices:
|
|
2621
|
-
ar1_indices =
|
|
2622
|
-
ar2_indices =
|
|
2626
|
+
ar1_indices = ops.masked_select(concat_sort_indices[:-1], mask_res)
|
|
2627
|
+
ar2_indices = ops.masked_select(concat_sort_indices[1:], mask_res)
|
|
2623
2628
|
if ar2_indices.shape[0] > 0:
|
|
2624
2629
|
ar2_indices = ar2_indices - array1.size
|
|
2625
2630
|
if not assume_unique:
|