mindspore 2.4.10__cp310-cp310-manylinux1_x86_64.whl → 2.5.0__cp310-cp310-manylinux1_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +39 -0
- mindspore/__init__.py +8 -3
- mindspore/_akg/akg/composite/build_module.py +6 -2
- mindspore/_akg/akg/utils/kernel_exec.py +2 -2
- mindspore/_c_dataengine.cpython-310-x86_64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-310-x86_64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-310-x86_64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +0 -5
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/compile_config.py +64 -0
- mindspore/_extends/parse/deprecated/__init__.py +0 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +375 -0
- mindspore/_extends/parse/parser.py +23 -5
- mindspore/_extends/parse/standard_method.py +123 -27
- mindspore/_extends/pijit/pijit_func_white_list.py +1 -1
- mindspore/amp.py +7 -1
- mindspore/boost/boost_cell_wrapper.py +136 -41
- mindspore/common/__init__.py +3 -1
- mindspore/common/_register_for_tensor.py +0 -1
- mindspore/common/_stub_tensor.py +25 -4
- mindspore/common/_tensor_cpp_method.py +17 -0
- mindspore/common/_tensor_docs.py +6132 -0
- mindspore/common/api.py +98 -21
- mindspore/common/dtype.py +34 -34
- mindspore/common/dump.py +2 -1
- mindspore/common/file_system.py +8 -3
- mindspore/common/generator.py +2 -0
- mindspore/common/hook_handle.py +3 -1
- mindspore/common/initializer.py +3 -4
- mindspore/common/lazy_inline.py +8 -2
- mindspore/common/mindir_util.py +10 -2
- mindspore/common/parameter.py +31 -15
- mindspore/common/tensor.py +713 -1337
- mindspore/communication/__init__.py +1 -1
- mindspore/communication/_comm_helper.py +5 -0
- mindspore/communication/comm_func.py +215 -173
- mindspore/communication/management.py +23 -20
- mindspore/context.py +285 -191
- mindspore/dataset/__init__.py +23 -19
- mindspore/dataset/callback/ds_callback.py +2 -1
- mindspore/dataset/core/config.py +84 -3
- mindspore/dataset/engine/cache_admin.py +3 -3
- mindspore/dataset/engine/cache_client.py +5 -4
- mindspore/dataset/engine/datasets.py +192 -149
- mindspore/dataset/engine/datasets_audio.py +14 -0
- mindspore/dataset/engine/datasets_standard_format.py +11 -11
- mindspore/dataset/engine/datasets_text.py +38 -1
- mindspore/dataset/engine/datasets_user_defined.py +100 -66
- mindspore/dataset/engine/datasets_vision.py +81 -8
- mindspore/dataset/engine/iterators.py +281 -63
- mindspore/dataset/engine/obs/util.py +8 -0
- mindspore/dataset/engine/queue.py +40 -0
- mindspore/dataset/engine/samplers.py +26 -2
- mindspore/dataset/engine/serializer_deserializer.py +1 -1
- mindspore/dataset/engine/validators.py +43 -11
- mindspore/dataset/transforms/py_transforms_util.py +17 -0
- mindspore/dataset/transforms/transforms.py +29 -12
- mindspore/dataset/vision/validators.py +1 -2
- mindspore/device_context/__init__.py +21 -0
- mindspore/device_context/ascend/__init__.py +25 -0
- mindspore/device_context/ascend/device.py +72 -0
- mindspore/device_context/ascend/op_debug.py +94 -0
- mindspore/device_context/ascend/op_precision.py +193 -0
- mindspore/device_context/ascend/op_tuning.py +127 -0
- mindspore/device_context/cpu/__init__.py +25 -0
- mindspore/device_context/cpu/device.py +62 -0
- mindspore/device_context/cpu/op_tuning.py +43 -0
- mindspore/device_context/gpu/__init__.py +21 -0
- mindspore/device_context/gpu/device.py +70 -0
- mindspore/device_context/gpu/op_precision.py +67 -0
- mindspore/device_context/gpu/op_tuning.py +175 -0
- mindspore/device_manager.py +134 -0
- mindspore/experimental/llm_boost/__init__.py +1 -0
- mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +211 -0
- mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
- mindspore/experimental/llm_boost/register.py +1 -0
- mindspore/experimental/optim/adadelta.py +26 -22
- mindspore/experimental/optim/adam.py +3 -0
- mindspore/experimental/optim/lr_scheduler.py +33 -24
- mindspore/experimental/optim/radam.py +33 -30
- mindspore/hal/device.py +28 -0
- mindspore/hal/event.py +17 -0
- mindspore/hal/memory.py +94 -3
- mindspore/hal/stream.py +91 -6
- mindspore/include/api/context.h +0 -1
- mindspore/lib/libavcodec.so.59 +0 -0
- mindspore/lib/libavdevice.so.59 +0 -0
- mindspore/lib/libavfilter.so.8 +0 -0
- mindspore/lib/libavformat.so.59 +0 -0
- mindspore/lib/libavutil.so.57 +0 -0
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_ops.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/libswresample.so.4 +0 -0
- mindspore/lib/libswscale.so.6 +0 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910_93/aic-ascend910_93-ops-info.json +2048 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl/dynamic/decoder_kv_cache.py +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl/dynamic/prompt_kv_cache.py +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_proto/lib/linux/x86_64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/version.info +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/config/ascend910_93/aic-ascend910_93-ops-info.json +224 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/all_finite.py +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/decoder_kv_cache.py +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/prompt_kv_cache.py +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.json +78 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.json +78 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.json +78 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/all_finite.json +139 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/binary_info_config.json +361 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/decoder_kv_cache.json +892 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/prompt_kv_cache.json +892 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_proto/lib/linux/x86_64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/version.info +1 -1
- mindspore/lib/plugin/ascend/custom_compiler/setup.py +1 -1
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_internal_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libms_ascend_native_boost.so +0 -0
- mindspore/lib/plugin/ascend/libms_atb_boost.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +960 -958
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{acme/include/base_type.h → base_type.h} +25 -20
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{cast/cast_tiling.h → internal.h} +6 -4
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_op.h +114 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/boost_kernel.h +70 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/llama_impl.h +85 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/model_interface.h +52 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/tensor.h +81 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_creator.h +123 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +155 -110
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{acme/include/tiling_info.h → tiling_info.h} +12 -9
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tiling_utils.h +178 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layer_norm_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_quant_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_310p_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcompare_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libllama_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_optiling.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmulti_weight_matmul_kernel_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_nz_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_f16_nz/internal_pp_matmul_f16_nz.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_f16_nz/internal_pp_matmul_f16_nz_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_i8_nz_compress/internal_pp_matmul_i8_nz_compress.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_i8_nz_compress/internal_pp_matmul_i8_nz_compress_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_int8_nz/internal_pp_matmul_int8_nz.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_int8_nz/internal_pp_matmul_int8_nz_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libadd_rms_norm_quant_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libapply_rotary_pos_emb_310p_impl.so → op_kernels/ascend310p/so_kernels/libapply_rotary_pos_emb_310p_ascend310p.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libcast_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libcompare_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libgelu_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libmatmul_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libreshape_and_cache_nz_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_4b60f88cdc28b25a36bad2d8b0a88092.json +163 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_4b60f88cdc28b25a36bad2d8b0a88092.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_cde61da2bd6fededcb1ba310a6ad16ee.json +163 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_cde61da2bd6fededcb1ba310a6ad16ee.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_matmul_postfusion_mix/internal_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_matmul_postfusion_mix/internal_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_matmul_postfusion_mix/internal_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_multi_weight_matmul_postfusion_mix/internal_multi_weight_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_multi_weight_matmul_postfusion_mix/internal_multi_weight_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_multi_weight_matmul_postfusion_mix/internal_multi_weight_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/matmul_add_rmsnorm/matmul_add_rmsnorm_bf16_bf16.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/matmul_add_rmsnorm/matmul_add_rmsnorm_bf16_fp16.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/matmul_add_rmsnorm/matmul_add_rmsnorm_bf16_fp32.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/matmul_add_rmsnorm/matmul_add_rmsnorm_fp16_bf16.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/matmul_add_rmsnorm/matmul_add_rmsnorm_fp16_fp16.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/matmul_add_rmsnorm/matmul_add_rmsnorm_fp16_fp32.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/paged_attention_v2/paged_attention_v2.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/paged_attention_v2/paged_attention_v2_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/paged_attention_v2/paged_attention_v2_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/so_kernels/libadd_layer_norm_ascend910b.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libadd_rms_norm_impl.so → op_kernels/ascend910b/so_kernels/libadd_rms_norm_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/so_kernels/libadd_rms_norm_quant_ascend910b.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libapply_rotary_pos_emb_impl.so → op_kernels/ascend910b/so_kernels/libapply_rotary_pos_emb_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libcast_impl.so → op_kernels/ascend910b/so_kernels/libcast_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libnot_equal_impl.so → op_kernels/ascend910b/so_kernels/libcompare_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libgelu_impl.so → op_kernels/ascend910b/so_kernels/libgelu_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/so_kernels/libllama_ascend910b.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libmatmul_impl.so → op_kernels/ascend910b/so_kernels/libmatmul_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libmulti_weight_matmul_kernel_impl.so → op_kernels/ascend910b/so_kernels/libmulti_weight_matmul_kernel_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libreshape_and_cache_impl.so → op_kernels/ascend910b/so_kernels/libreshape_and_cache_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/librms_norm_impl.so → op_kernels/ascend910b/so_kernels/librms_norm_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
- mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu10.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.6/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
- mindspore/log.py +12 -0
- mindspore/mindrecord/__init__.py +1 -1
- mindspore/mindrecord/config.py +17 -316
- mindspore/mindrecord/filereader.py +1 -9
- mindspore/mindrecord/filewriter.py +5 -15
- mindspore/mindrecord/mindpage.py +1 -9
- mindspore/mint/__init__.py +824 -218
- mindspore/mint/distributed/__init__.py +66 -4
- mindspore/mint/distributed/distributed.py +2594 -44
- mindspore/mint/linalg/__init__.py +6 -0
- mindspore/mint/nn/__init__.py +473 -14
- mindspore/mint/nn/functional.py +486 -11
- mindspore/mint/nn/layer/__init__.py +17 -4
- mindspore/mint/nn/layer/_functions.py +330 -0
- mindspore/mint/nn/layer/activation.py +169 -1
- mindspore/mint/nn/layer/basic.py +123 -0
- mindspore/mint/nn/layer/conv.py +727 -0
- mindspore/mint/nn/layer/normalization.py +215 -19
- mindspore/mint/nn/layer/padding.py +797 -0
- mindspore/mint/nn/layer/pooling.py +170 -0
- mindspore/mint/optim/__init__.py +2 -1
- mindspore/mint/optim/adam.py +223 -0
- mindspore/mint/optim/adamw.py +26 -19
- mindspore/mint/special/__init__.py +2 -1
- mindspore/multiprocessing/__init__.py +5 -0
- mindspore/nn/cell.py +126 -19
- mindspore/nn/dynamic_lr.py +2 -1
- mindspore/nn/layer/activation.py +6 -6
- mindspore/nn/layer/basic.py +35 -25
- mindspore/nn/layer/channel_shuffle.py +3 -3
- mindspore/nn/layer/embedding.py +3 -3
- mindspore/nn/layer/normalization.py +8 -7
- mindspore/nn/layer/padding.py +4 -3
- mindspore/nn/layer/pooling.py +47 -13
- mindspore/nn/layer/rnn_cells.py +1 -1
- mindspore/nn/layer/rnns.py +2 -1
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +48 -26
- mindspore/nn/learning_rate_schedule.py +5 -3
- mindspore/nn/loss/loss.py +31 -36
- mindspore/nn/optim/ada_grad.py +1 -0
- mindspore/nn/optim/adadelta.py +2 -2
- mindspore/nn/optim/adam.py +1 -1
- mindspore/nn/optim/lars.py +1 -4
- mindspore/nn/optim/optimizer.py +1 -1
- mindspore/nn/optim/rprop.py +2 -2
- mindspore/nn/optim/thor.py +2 -1
- mindspore/nn/utils/init.py +13 -11
- mindspore/nn/wrap/cell_wrapper.py +4 -6
- mindspore/nn/wrap/loss_scale.py +3 -4
- mindspore/numpy/array_creations.py +60 -62
- mindspore/numpy/array_ops.py +148 -143
- mindspore/numpy/logic_ops.py +41 -42
- mindspore/numpy/math_ops.py +361 -359
- mindspore/numpy/utils.py +16 -16
- mindspore/numpy/utils_const.py +4 -4
- mindspore/ops/__init__.py +2 -1
- mindspore/ops/_grad_experimental/grad_comm_ops.py +94 -13
- mindspore/ops/_grad_experimental/grad_debug_ops.py +6 -1
- mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
- mindspore/ops/_op_impl/cpu/__init__.py +1 -0
- mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
- mindspore/ops/_vmap/vmap_array_ops.py +20 -19
- mindspore/ops/_vmap/vmap_base.py +0 -2
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +19 -13
- mindspore/ops/_vmap/vmap_math_ops.py +11 -9
- mindspore/ops/_vmap/vmap_nn_ops.py +20 -34
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +149 -12
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -61
- mindspore/ops/auto_generate/gen_extend_func.py +554 -60
- mindspore/ops/auto_generate/gen_ops_def.py +1621 -115
- mindspore/ops/auto_generate/gen_ops_prim.py +8024 -3409
- mindspore/ops/auto_generate/pyboost_inner_prim.py +183 -79
- mindspore/ops/composite/base.py +1 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +229 -30
- mindspore/ops/composite/multitype_ops/pow_impl.py +0 -29
- mindspore/ops/function/__init__.py +12 -0
- mindspore/ops/function/array_func.py +561 -159
- mindspore/ops/function/clip_func.py +64 -0
- mindspore/ops/function/debug_func.py +28 -20
- mindspore/ops/function/image_func.py +1 -1
- mindspore/ops/function/linalg_func.py +5 -4
- mindspore/ops/function/math_func.py +1659 -290
- mindspore/ops/function/nn_func.py +988 -317
- mindspore/ops/function/parameter_func.py +3 -56
- mindspore/ops/function/random_func.py +243 -33
- mindspore/ops/function/sparse_unary_func.py +1 -1
- mindspore/ops/functional.py +18 -5
- mindspore/ops/functional_overload.py +897 -0
- mindspore/ops/operations/__init__.py +3 -2
- mindspore/ops/operations/_embedding_cache_ops.py +4 -4
- mindspore/ops/operations/_grad_ops.py +2 -34
- mindspore/ops/operations/_infer_ops.py +2 -1
- mindspore/ops/operations/_inner_ops.py +38 -8
- mindspore/ops/operations/array_ops.py +45 -303
- mindspore/ops/operations/comm_ops.py +19 -16
- mindspore/ops/operations/custom_ops.py +11 -55
- mindspore/ops/operations/debug_ops.py +42 -47
- mindspore/ops/operations/inner_ops.py +6 -4
- mindspore/ops/operations/linalg_ops.py +3 -2
- mindspore/ops/operations/manually_defined/ops_def.py +185 -104
- mindspore/ops/operations/math_ops.py +11 -216
- mindspore/ops/operations/nn_ops.py +146 -308
- mindspore/ops/primitive.py +23 -21
- mindspore/ops/tensor_method.py +1669 -0
- mindspore/ops_generate/aclnn_kernel_register_auto_cc_generator.py +110 -0
- mindspore/ops_generate/add_tensor_docs_generator.py +54 -0
- mindspore/ops_generate/arg_handler.py +0 -61
- mindspore/ops_generate/auto_grad_impl_cc_generator.py +135 -0
- mindspore/ops_generate/auto_grad_reg_cc_generator.py +93 -0
- mindspore/ops_generate/base_generator.py +11 -0
- mindspore/ops_generate/cpp_create_prim_instance_helper_generator.py +108 -0
- mindspore/ops_generate/functional_map_cpp_generator.py +491 -0
- mindspore/ops_generate/functional_overload_py_generator.py +110 -0
- mindspore/ops_generate/functions_cc_generator.py +233 -0
- mindspore/ops_generate/gen_aclnn_implement.py +110 -114
- mindspore/ops_generate/gen_constants.py +157 -3
- mindspore/ops_generate/gen_ops.py +245 -990
- mindspore/ops_generate/gen_pyboost_func.py +97 -998
- mindspore/ops_generate/gen_utils.py +119 -33
- mindspore/ops_generate/lite_ops_cpp_generator.py +155 -0
- mindspore/ops_generate/op_api_proto.py +206 -0
- mindspore/ops_generate/op_def_py_generator.py +131 -0
- mindspore/ops_generate/op_prim_py_generator.py +480 -0
- mindspore/ops_generate/op_proto.py +373 -108
- mindspore/ops_generate/op_template_parser.py +436 -0
- mindspore/ops_generate/ops_def_cc_generator.py +288 -0
- mindspore/ops_generate/ops_def_h_generator.py +74 -0
- mindspore/ops_generate/ops_name_h_generator.py +68 -0
- mindspore/ops_generate/ops_primitive_h_generator.py +81 -0
- mindspore/ops_generate/pyboost_functions_cpp_generator.py +370 -0
- mindspore/ops_generate/pyboost_functions_h_generator.py +68 -0
- mindspore/ops_generate/pyboost_functions_py_generator.py +148 -0
- mindspore/ops_generate/pyboost_grad_function_cpp_generator.py +154 -0
- mindspore/ops_generate/pyboost_inner_prim_generator.py +131 -0
- mindspore/ops_generate/pyboost_native_grad_functions_generator.py +268 -0
- mindspore/ops_generate/pyboost_op_cpp_code_generator.py +851 -0
- mindspore/ops_generate/pyboost_overload_functions_cpp_generator.py +344 -0
- mindspore/ops_generate/pyboost_utils.py +92 -33
- mindspore/ops_generate/template.py +294 -44
- mindspore/ops_generate/tensor_func_reg_cpp_generator.py +422 -0
- mindspore/parallel/__init__.py +3 -3
- mindspore/parallel/_auto_parallel_context.py +24 -33
- mindspore/parallel/_parallel_serialization.py +13 -2
- mindspore/parallel/_utils.py +4 -1
- mindspore/parallel/algo_parameter_config.py +1 -1
- mindspore/parallel/checkpoint_transform.py +44 -0
- mindspore/parallel/cluster/process_entity/_api.py +131 -37
- mindspore/parallel/cluster/process_entity/_utils.py +41 -6
- mindspore/parallel/cluster/run.py +20 -3
- mindspore/parallel/parameter_broadcast.py +1 -1
- mindspore/parallel/shard.py +3 -0
- mindspore/parallel/transform_safetensors.py +119 -253
- mindspore/profiler/__init__.py +17 -4
- mindspore/profiler/analysis/__init__.py +0 -0
- mindspore/profiler/analysis/parser/__init__.py +0 -0
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +166 -0
- mindspore/profiler/analysis/parser/base_parser.py +158 -0
- mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
- mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
- mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +261 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +84 -0
- mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
- mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
- mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
- mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +260 -0
- mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
- mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
- mindspore/profiler/analysis/task_manager.py +131 -0
- mindspore/profiler/analysis/time_converter.py +84 -0
- mindspore/profiler/analysis/viewer/__init__.py +0 -0
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +333 -0
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +252 -0
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +313 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +322 -0
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +265 -0
- mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
- mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +97 -0
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
- mindspore/profiler/analysis/work_flow.py +73 -0
- mindspore/profiler/common/ascend_msprof_exporter.py +138 -0
- mindspore/profiler/common/command_executor.py +90 -0
- mindspore/profiler/common/constant.py +174 -3
- mindspore/profiler/common/file_manager.py +208 -0
- mindspore/profiler/common/log.py +130 -0
- mindspore/profiler/common/msprof_cmd_tool.py +202 -0
- mindspore/profiler/common/path_manager.py +371 -0
- mindspore/profiler/common/process_bar.py +168 -0
- mindspore/profiler/common/process_pool.py +9 -3
- mindspore/profiler/common/profiler_context.py +476 -0
- mindspore/profiler/common/profiler_info.py +304 -0
- mindspore/profiler/common/profiler_output_path.py +284 -0
- mindspore/profiler/common/profiler_parameters.py +210 -0
- mindspore/profiler/common/profiler_path_manager.py +120 -0
- mindspore/profiler/common/record_function.py +76 -0
- mindspore/profiler/common/tlv_decoder.py +76 -0
- mindspore/profiler/common/util.py +75 -2
- mindspore/profiler/dynamic_profiler.py +270 -37
- mindspore/profiler/envprofiler.py +138 -0
- mindspore/profiler/mstx.py +199 -0
- mindspore/profiler/platform/__init__.py +21 -0
- mindspore/profiler/platform/base_profiler.py +40 -0
- mindspore/profiler/platform/cpu_profiler.py +124 -0
- mindspore/profiler/platform/gpu_profiler.py +74 -0
- mindspore/profiler/platform/npu_profiler.py +309 -0
- mindspore/profiler/profiler.py +580 -93
- mindspore/profiler/profiler_action_controller.py +187 -0
- mindspore/profiler/profiler_interface.py +114 -0
- mindspore/profiler/schedule.py +208 -0
- mindspore/rewrite/api/symbol_tree.py +1 -2
- mindspore/run_check/_check_version.py +2 -6
- mindspore/runtime/__init__.py +37 -0
- mindspore/runtime/device.py +27 -0
- mindspore/runtime/event.py +209 -0
- mindspore/runtime/executor.py +148 -0
- mindspore/runtime/memory.py +392 -0
- mindspore/runtime/stream.py +460 -0
- mindspore/runtime/thread_bind_core.py +401 -0
- mindspore/train/__init__.py +2 -2
- mindspore/train/_utils.py +53 -18
- mindspore/train/amp.py +8 -4
- mindspore/train/callback/_checkpoint.py +32 -18
- mindspore/train/callback/_early_stop.py +1 -1
- mindspore/train/callback/_flops_collector.py +105 -69
- mindspore/train/callback/_history.py +1 -1
- mindspore/train/callback/_summary_collector.py +44 -6
- mindspore/train/callback/_tft_register.py +31 -10
- mindspore/train/dataset_helper.py +11 -11
- mindspore/train/metrics/precision.py +4 -5
- mindspore/train/mind_ir_pb2.py +167 -46
- mindspore/train/model.py +13 -15
- mindspore/train/serialization.py +462 -76
- mindspore/train/summary/summary_record.py +1 -2
- mindspore/train/train_thor/model_thor.py +1 -1
- mindspore/utils/__init__.py +4 -2
- mindspore/utils/bin/dataset-cache +0 -0
- mindspore/utils/bin/dataset-cache-server +0 -0
- mindspore/utils/dryrun.py +138 -0
- mindspore/utils/runtime_execution_order_check.py +550 -0
- mindspore/version.py +1 -1
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/METADATA +2 -3
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/RECORD +532 -466
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/entry_points.txt +1 -1
- mindspore/_data_dump.cpython-310-x86_64-linux-gnu.so +0 -0
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/common/_tensor_overload.py +0 -139
- mindspore/lib/libmindspore_np_dtype.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -82
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -113
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -193
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/dtype_registry.h +0 -90
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_layer_norm_op.h +0 -60
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_rms_norm_op.h +0 -50
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_rms_norm_quant_op.h +0 -50
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/apply_rotary_pos_emb_nz_op.h +0 -42
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/apply_rotary_pos_emb_op.h +0 -55
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -34
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_only_ops.h +0 -94
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_op_base.h +0 -97
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/flash_attention_score_op.h +0 -97
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/gelu_op.h +0 -44
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_add_rmsnorm_op.h +0 -73
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -108
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/multi_impls_op.h +0 -64
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/multi_weight_matmul_op.h +0 -91
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/paged_attention_op.h +0 -99
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/reshape_and_cache_nz_op.h +0 -44
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/reshape_and_cache_op.h +0 -44
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/rms_norm_op.h +0 -64
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -179
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/profiling_util.h +0 -366
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -56
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/kernel/add.h +0 -21
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/tiling/add_tiling.h +0 -43
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb.h +0 -23
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_base.h +0 -456
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_bf16.h +0 -217
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp.h +0 -391
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp16.h +0 -126
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -230
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_tiling.h +0 -43
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_value.h +0 -27
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/apply_rotary_pos_emb_nz_impl.h +0 -34
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz.h +0 -23
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_base.h +0 -460
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_fp16.h +0 -116
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_fp32.h +0 -230
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_tiling.h +0 -43
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_value.h +0 -27
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -74
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -74
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_impl.h +0 -48
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/kernel/cast_kernel.h +0 -21
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -55
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_tiling.h +0 -27
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/kernel/compare_kernel.h +0 -23
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +0 -68
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -99
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +0 -21
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/lccl/lccl_wrapper.h +0 -58
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/ms_int_types.h +0 -91
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/ms_int_utils.h +0 -108
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +0 -64
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/add_param.h +0 -68
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/cast_param.h +0 -30
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -38
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +0 -42
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +0 -33
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -377
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache_nz/kernel/reshape_and_cache_nz.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache_nz/reshape_and_cache_nz_impl.h +0 -42
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache_nz/reshape_and_cache_nz_tiling.h +0 -27
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/kernel/sub_kernel.h +0 -20
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -48
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_tiling.h +0 -25
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +0 -399
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/utils.h +0 -41
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_tiling.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +0 -30
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_core.h +0 -43
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_entity.h +0 -38
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_sink.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_stream.h +0 -41
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -71
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -165
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +0 -20
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -121
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -106
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layer_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_quant_acme_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_310p_old_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_old_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_nz_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_nz_old_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/matmul_add_rmsnorm/matmul_add_rmsnorm_bf16_bf16.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/matmul_add_rmsnorm/matmul_add_rmsnorm_bf16_fp16.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/matmul_add_rmsnorm/matmul_add_rmsnorm_bf16_fp32.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/matmul_add_rmsnorm/matmul_add_rmsnorm_fp16_bf16.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/matmul_add_rmsnorm/matmul_add_rmsnorm_fp16_fp16.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/matmul_add_rmsnorm/matmul_add_rmsnorm_fp16_fp32.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_bf16_bnsd_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_bf16_bsh_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_fp16_bnsd_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_fp16_bsh_mix.o +0 -0
- mindspore/profiler/envprofiling.py +0 -254
- mindspore/profiler/profiling.py +0 -1926
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/WHEEL +0 -0
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/top_level.txt +0 -0
mindspore/mint/__init__.py
CHANGED
|
@@ -16,16 +16,15 @@
|
|
|
16
16
|
from __future__ import absolute_import
|
|
17
17
|
import mindspore.ops as ops
|
|
18
18
|
from mindspore.ops.primitive import constexpr
|
|
19
|
-
from mindspore.common._register_for_tensor import tensor_operator_registry_for_mint
|
|
20
19
|
from mindspore.common.tensor import Tensor
|
|
21
|
-
from mindspore.ops.function.array_func import gather_ext as gather
|
|
20
|
+
from mindspore.ops.function.array_func import gather_ext as gather
|
|
22
21
|
from mindspore.ops.function.nn_func import conv2d_ext as conv2d
|
|
23
22
|
from mindspore.mint.nn.functional import sigmoid
|
|
24
23
|
from mindspore.mint.nn import functional
|
|
25
24
|
from mindspore.mint import linalg
|
|
26
25
|
from mindspore.mint import special
|
|
27
26
|
from mindspore.mint import distributed
|
|
28
|
-
from mindspore.ops import erf
|
|
27
|
+
from mindspore.ops import erf
|
|
29
28
|
from mindspore.ops.function.math_func import linspace_ext as linspace
|
|
30
29
|
from mindspore.ops.function.math_func import median_ext as median
|
|
31
30
|
from mindspore.ops.function.array_func import ones_like_ext as ones_like
|
|
@@ -33,16 +32,24 @@ from mindspore.ops.function.array_func import full_ext as full
|
|
|
33
32
|
from mindspore.ops.function.array_func import zeros_like_ext as zeros_like
|
|
34
33
|
from mindspore.ops.function.array_func import unique_ext as unique
|
|
35
34
|
from mindspore.ops.function.array_func import chunk_ext as chunk
|
|
35
|
+
from mindspore.ops.functional_overload import empty
|
|
36
|
+
from mindspore.ops.function.array_func import empty_like
|
|
36
37
|
from mindspore.ops.function.math_func import isclose
|
|
37
38
|
from mindspore.ops.auto_generate import abs
|
|
39
|
+
from mindspore.ops.auto_generate import clone
|
|
40
|
+
from mindspore.ops.function.array_func import full_like_ext as full_like
|
|
38
41
|
# 1
|
|
39
|
-
from mindspore.ops.function.math_func import divide
|
|
42
|
+
from mindspore.ops.function.math_func import divide
|
|
40
43
|
from mindspore.ops.auto_generate import topk_ext as topk
|
|
41
44
|
from mindspore.ops.function.math_func import roll
|
|
42
45
|
# 2
|
|
43
46
|
from mindspore.ops.function.math_func import sin
|
|
44
47
|
# 3
|
|
45
|
-
from mindspore.ops.
|
|
48
|
+
from mindspore.ops.functional_overload import clamp, where
|
|
49
|
+
from mindspore.ops.functional_overload import clip
|
|
50
|
+
from mindspore.ops.functional_overload import fmod
|
|
51
|
+
from mindspore.ops.functional_overload import max
|
|
52
|
+
from mindspore.ops.functional_overload import min
|
|
46
53
|
# 4
|
|
47
54
|
from mindspore.ops.auto_generate import sinc
|
|
48
55
|
from mindspore.ops.auto_generate import sinh
|
|
@@ -57,19 +64,22 @@ from mindspore.ops.auto_generate import stack_ext as stack
|
|
|
57
64
|
from mindspore.ops.function.array_func import unsqueeze
|
|
58
65
|
# 8
|
|
59
66
|
from mindspore.ops.auto_generate import transpose_ext as transpose
|
|
67
|
+
from mindspore.ops.auto_generate import batch_norm_elemt
|
|
68
|
+
from mindspore.ops.auto_generate import batch_norm_gather_stats_with_counts
|
|
69
|
+
from mindspore.ops.auto_generate import batch_norm_stats
|
|
60
70
|
# 9
|
|
61
71
|
from mindspore.ops.auto_generate import masked_select
|
|
62
72
|
from mindspore.ops.function.math_func import cross
|
|
63
73
|
# 10
|
|
64
74
|
from mindspore.ops.function.math_func import ne
|
|
65
75
|
# 11
|
|
66
|
-
|
|
76
|
+
from mindspore.ops.function.math_func import cdist as cdist_
|
|
67
77
|
# 12
|
|
68
|
-
from mindspore.ops.
|
|
78
|
+
from mindspore.ops.functional_overload import repeat_interleave
|
|
69
79
|
# 13
|
|
70
80
|
from mindspore.ops.functional import flip
|
|
71
81
|
# 14
|
|
72
|
-
|
|
82
|
+
from mindspore.ops.auto_generate import mv
|
|
73
83
|
# 15
|
|
74
84
|
from mindspore.ops.auto_generate import flatten_ext as flatten
|
|
75
85
|
# 16
|
|
@@ -78,17 +88,17 @@ from mindspore.ops.auto_generate import bmm_ext as bmm
|
|
|
78
88
|
# 17
|
|
79
89
|
|
|
80
90
|
# 18
|
|
81
|
-
|
|
91
|
+
|
|
82
92
|
# 19
|
|
83
93
|
from mindspore.ops.functional import log
|
|
84
94
|
# 20
|
|
85
95
|
|
|
86
96
|
# 21
|
|
87
|
-
from mindspore.ops.
|
|
97
|
+
from mindspore.ops.function.math_func import mul_ext as mul
|
|
88
98
|
# 22
|
|
89
|
-
|
|
99
|
+
from mindspore.ops.functional import cumprod
|
|
90
100
|
# 23
|
|
91
|
-
|
|
101
|
+
from mindspore.ops.auto_generate import exp2
|
|
92
102
|
# 24
|
|
93
103
|
|
|
94
104
|
# 25
|
|
@@ -170,7 +180,7 @@ from mindspore.ops.functional import maximum
|
|
|
170
180
|
# 63
|
|
171
181
|
from mindspore.ops.functional import minimum
|
|
172
182
|
# 64
|
|
173
|
-
|
|
183
|
+
from mindspore.ops.functional import ravel
|
|
174
184
|
# 65
|
|
175
185
|
from mindspore.ops.functional import logical_and
|
|
176
186
|
# 66
|
|
@@ -184,7 +194,7 @@ from mindspore.ops.functional import less_equal, le
|
|
|
184
194
|
# 70
|
|
185
195
|
from mindspore.ops.functional import negative, neg
|
|
186
196
|
# 71
|
|
187
|
-
|
|
197
|
+
|
|
188
198
|
# 72
|
|
189
199
|
|
|
190
200
|
# 73
|
|
@@ -194,7 +204,7 @@ from mindspore.ops.function.array_func import sort_ext as sort
|
|
|
194
204
|
# 75
|
|
195
205
|
from mindspore.ops.functional import less, lt
|
|
196
206
|
# 76
|
|
197
|
-
from mindspore.ops.
|
|
207
|
+
from mindspore.ops.function.math_func import pow_ext as pow
|
|
198
208
|
# 77
|
|
199
209
|
|
|
200
210
|
# 78
|
|
@@ -202,13 +212,13 @@ from mindspore.ops.function import arange_ext as arange
|
|
|
202
212
|
# 79
|
|
203
213
|
|
|
204
214
|
# 80
|
|
205
|
-
|
|
215
|
+
from mindspore.ops.functional_overload import div
|
|
206
216
|
# 81
|
|
207
217
|
from mindspore.ops.auto_generate import index_select_ext as index_select
|
|
208
218
|
# 82
|
|
209
219
|
from mindspore.ops.auto_generate import cummin_ext as cummin
|
|
210
220
|
# 83
|
|
211
|
-
from mindspore.ops.
|
|
221
|
+
from mindspore.ops.auto_generate import narrow
|
|
212
222
|
# 84
|
|
213
223
|
|
|
214
224
|
# 85
|
|
@@ -220,19 +230,19 @@ from mindspore.ops.auto_generate import trunc
|
|
|
220
230
|
# 88
|
|
221
231
|
|
|
222
232
|
# 89
|
|
223
|
-
|
|
233
|
+
from mindspore.ops.auto_generate import argsort_ext as argsort
|
|
224
234
|
# 90
|
|
225
|
-
|
|
235
|
+
from mindspore.ops.auto_generate import isinf
|
|
226
236
|
# 91
|
|
227
237
|
|
|
228
238
|
# 92
|
|
229
|
-
|
|
239
|
+
from mindspore.ops.function.math_func import polar
|
|
230
240
|
# 93
|
|
231
241
|
|
|
232
242
|
# 94
|
|
233
243
|
from mindspore.ops.function.math_func import tanh
|
|
234
244
|
# 95
|
|
235
|
-
|
|
245
|
+
from mindspore.ops.function.math_func import diff_ext as diff
|
|
236
246
|
# 96
|
|
237
247
|
|
|
238
248
|
# 97
|
|
@@ -262,7 +272,7 @@ from mindspore.ops.function.math_func import tanh
|
|
|
262
272
|
# 109
|
|
263
273
|
from mindspore.ops.auto_generate import argmin_ext as argmin
|
|
264
274
|
# 110
|
|
265
|
-
|
|
275
|
+
from mindspore.ops.function.nn_func import softmax_ext
|
|
266
276
|
# 111
|
|
267
277
|
|
|
268
278
|
# 112
|
|
@@ -282,11 +292,14 @@ from mindspore.ops.auto_generate import argmin_ext as argmin
|
|
|
282
292
|
# 119
|
|
283
293
|
|
|
284
294
|
# 120
|
|
285
|
-
|
|
295
|
+
from mindspore.ops.auto_generate import isneginf_ext as isneginf
|
|
286
296
|
# 121
|
|
287
297
|
|
|
288
298
|
# 122
|
|
289
299
|
|
|
300
|
+
# 123
|
|
301
|
+
from mindspore.ops.function.math_func import var_ext as var
|
|
302
|
+
|
|
290
303
|
# 151
|
|
291
304
|
from mindspore.ops.function.math_func import acos_ext as acos
|
|
292
305
|
from mindspore.ops.function.math_func import arccos_ext as arccos
|
|
@@ -332,12 +345,14 @@ from mindspore.ops.function.random_func import randint_like_ext as randint_like
|
|
|
332
345
|
from mindspore.ops.auto_generate import floor
|
|
333
346
|
# 231
|
|
334
347
|
from mindspore.ops.function.math_func import inverse_ext as inverse
|
|
348
|
+
# 239
|
|
349
|
+
from mindspore.ops.functional_overload import lerp
|
|
335
350
|
# 244
|
|
336
351
|
from mindspore.ops.auto_generate import log1p
|
|
337
352
|
# 261
|
|
338
353
|
from mindspore.ops.function.random_func import multinomial_ext as multinomial
|
|
339
354
|
# 275
|
|
340
|
-
from mindspore.ops.
|
|
355
|
+
from mindspore.ops.functional_overload import remainder
|
|
341
356
|
# 285
|
|
342
357
|
from mindspore.ops.function.array_func import scatter_add_ext as scatter_add
|
|
343
358
|
# 289
|
|
@@ -350,6 +365,7 @@ from mindspore.ops.function.math_func import tan
|
|
|
350
365
|
|
|
351
366
|
# 303
|
|
352
367
|
from mindspore.ops.auto_generate import trace_ext as trace
|
|
368
|
+
from mindspore.ops.auto_generate import gcd
|
|
353
369
|
|
|
354
370
|
from mindspore.ops.function.array_func import reshape
|
|
355
371
|
|
|
@@ -357,22 +373,93 @@ from mindspore.ops.auto_generate import outer_ext as outer
|
|
|
357
373
|
|
|
358
374
|
# 304
|
|
359
375
|
from mindspore.ops.function.array_func import tril_ext as tril
|
|
376
|
+
# 520
|
|
377
|
+
from mindspore.ops.function.math_func import bincount_ext as bincount
|
|
360
378
|
|
|
361
379
|
# 305
|
|
362
380
|
from mindspore.ops import triu
|
|
363
381
|
|
|
382
|
+
# 308
|
|
383
|
+
from mindspore.ops.auto_generate import mm_ext as mm
|
|
384
|
+
|
|
385
|
+
# 382
|
|
386
|
+
from mindspore.ops.function.math_func import dstack
|
|
387
|
+
|
|
388
|
+
# 501
|
|
389
|
+
from mindspore.ops.function.math_func import addbmm_ext as addbmm
|
|
390
|
+
|
|
391
|
+
# 502
|
|
392
|
+
from mindspore.ops.function.math_func import addmm_ext as addmm
|
|
393
|
+
|
|
394
|
+
# 505
|
|
395
|
+
from mindspore.ops.function.math_func import addmv_ext as addmv
|
|
396
|
+
|
|
397
|
+
# 510
|
|
398
|
+
from mindspore.ops.function.math_func import amax_ext as amax
|
|
399
|
+
|
|
400
|
+
# 511
|
|
401
|
+
from mindspore.ops.function.math_func import amin_ext as amin
|
|
402
|
+
|
|
403
|
+
# 521
|
|
404
|
+
from mindspore.ops.functional_overload import bitwise_not
|
|
405
|
+
|
|
406
|
+
# 526
|
|
407
|
+
from mindspore.ops.auto_generate import dot
|
|
408
|
+
|
|
409
|
+
# 533
|
|
410
|
+
from mindspore.ops.function.math_func import frac_ext as frac
|
|
411
|
+
|
|
364
412
|
# 538
|
|
365
413
|
from mindspore.ops.function.math_func import histc_ext as histc
|
|
366
414
|
|
|
415
|
+
# 552
|
|
416
|
+
from mindspore.ops.auto_generate import log10_ext as log10
|
|
417
|
+
|
|
367
418
|
# 553
|
|
368
419
|
from mindspore.ops.auto_generate import logaddexp_ext as logaddexp
|
|
369
420
|
|
|
421
|
+
# 557
|
|
422
|
+
from mindspore.ops.auto_generate import logsumexp_ext as logsumexp
|
|
423
|
+
|
|
424
|
+
# 582
|
|
425
|
+
from mindspore.ops.function.math_func import std_mean_ext as std_mean
|
|
426
|
+
|
|
427
|
+
# 588
|
|
428
|
+
from mindspore.ops.function.math_func import var_mean_ext as var_mean
|
|
429
|
+
|
|
370
430
|
# 610
|
|
371
431
|
from mindspore.ops.function.math_func import nan_to_num
|
|
372
432
|
|
|
433
|
+
# 613
|
|
434
|
+
from mindspore.ops.functional_overload import nansum
|
|
435
|
+
|
|
436
|
+
# 664
|
|
437
|
+
from mindspore.ops.function.array_func import meshgrid_ext as meshgrid
|
|
438
|
+
|
|
373
439
|
# 695
|
|
374
440
|
from mindspore.ops.auto_generate import count_nonzero
|
|
375
441
|
|
|
442
|
+
# 697
|
|
443
|
+
from mindspore.ops.function.math_func import float_power_ext as float_power
|
|
444
|
+
|
|
445
|
+
# 708
|
|
446
|
+
from mindspore.ops.function.math_func import std_ext as std
|
|
447
|
+
|
|
448
|
+
# 887
|
|
449
|
+
from mindspore.ops.auto_generate import log2_ext as log2
|
|
450
|
+
|
|
451
|
+
# 889
|
|
452
|
+
from mindspore.ops.function.math_func import isnan_ext as isnan
|
|
453
|
+
|
|
454
|
+
# 1007
|
|
455
|
+
from mindspore.ops.auto_generate import t_ext as t
|
|
456
|
+
from mindspore.ops.auto_generate.pyboost_inner_prim import squeeze_impl
|
|
457
|
+
from mindspore.ops.auto_generate.gen_ops_prim import equal_ext_op
|
|
458
|
+
|
|
459
|
+
|
|
460
|
+
# 1023
|
|
461
|
+
from mindspore.ops.function.array_func import unbind_ext as unbind
|
|
462
|
+
|
|
376
463
|
|
|
377
464
|
def add(input, other, *, alpha=1):
|
|
378
465
|
r"""
|
|
@@ -399,7 +486,7 @@ def add(input, other, *, alpha=1):
|
|
|
399
486
|
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
400
487
|
|
|
401
488
|
Keyword Args:
|
|
402
|
-
alpha (number.Number): A scaling factor applied to `other`, default 1
|
|
489
|
+
alpha (number.Number): A scaling factor applied to `other`, default: ``1``.
|
|
403
490
|
|
|
404
491
|
Returns:
|
|
405
492
|
Tensor with a shape that is the same as the broadcasted shape of the input `input` and `other`,
|
|
@@ -445,7 +532,7 @@ def any(input, dim=None, keepdim=False):
|
|
|
445
532
|
input (Tensor): Input Tensor, has the shape :math:`(N, *)` where :math:`*` means,
|
|
446
533
|
any number of additional dimensions.
|
|
447
534
|
dim (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce.
|
|
448
|
-
Suppose the rank of `input` is r, `dim` must be in the range [-
|
|
535
|
+
Suppose the rank of `input` is r, `dim` must be in the range [-r,r).
|
|
449
536
|
Default: ``None`` , all dimensions are reduced.
|
|
450
537
|
keepdim (bool, optional): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
451
538
|
If ``False`` , don't keep these dimensions. Default : ``False`` .
|
|
@@ -457,7 +544,7 @@ def any(input, dim=None, keepdim=False):
|
|
|
457
544
|
the output is a 0-D Tensor representing the "logical OR" of all elements in the input Tensor.
|
|
458
545
|
- If `dim` is int, such as 2, and `keepdim` is ``False`` ,
|
|
459
546
|
the shape of output is :math:`(input_1, input_3, ..., input_R)`.
|
|
460
|
-
- If `dim` is tuple(int), such as (2, 3), and `keepdim` is ``False`` ,
|
|
547
|
+
- If `dim` is tuple(int) or list(int), such as (2, 3), and `keepdim` is ``False`` ,
|
|
461
548
|
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
462
549
|
- If `dim` is 1-D Tensor, such as [2, 3], and `keepdim` is ``False`` ,
|
|
463
550
|
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
@@ -494,6 +581,35 @@ def any(input, dim=None, keepdim=False):
|
|
|
494
581
|
|
|
495
582
|
def all(input, dim=None, keepdim=False):
|
|
496
583
|
r"""
|
|
584
|
+
all(input) -> Tensor
|
|
585
|
+
|
|
586
|
+
Reduces all elements of `input` by the "logical AND".
|
|
587
|
+
|
|
588
|
+
Args:
|
|
589
|
+
input (Tensor): Input Tensor, has the shape :math:`(N, *)` where :math:`*` means,
|
|
590
|
+
any number of additional dimensions.
|
|
591
|
+
|
|
592
|
+
Returns:
|
|
593
|
+
Tensor, the dtype is bool.
|
|
594
|
+
|
|
595
|
+
Raises:
|
|
596
|
+
TypeError: If `input` is not a Tensor.
|
|
597
|
+
|
|
598
|
+
Supported Platforms:
|
|
599
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
600
|
+
|
|
601
|
+
Examples:
|
|
602
|
+
>>> import numpy as np
|
|
603
|
+
>>> from mindspore import Tensor, mint
|
|
604
|
+
>>> x = Tensor(np.array([[True, False], [True, True]]))
|
|
605
|
+
>>> # case 1: Reduces a dimension by the "logicalAND" of all elements in the dimension.
|
|
606
|
+
>>> output = mint.all(x)
|
|
607
|
+
>>> print(output)
|
|
608
|
+
False
|
|
609
|
+
|
|
610
|
+
.. function:: all(input, dim, keepdim=False) -> Tensor
|
|
611
|
+
:noindex:
|
|
612
|
+
|
|
497
613
|
Reduces a dimension of `input` by the "logical AND" of all elements in the dimension, by default. And also can
|
|
498
614
|
reduce a dimension of `input` along the `dim`. Determine whether the dimensions of the output and input are the
|
|
499
615
|
same by controlling `keepdim`.
|
|
@@ -504,20 +620,17 @@ def all(input, dim=None, keepdim=False):
|
|
|
504
620
|
Args:
|
|
505
621
|
input (Tensor): Input Tensor, has the shape :math:`(N, *)` where :math:`*` means,
|
|
506
622
|
any number of additional dimensions.
|
|
507
|
-
dim (Union[int, tuple(int), list(int), Tensor]
|
|
623
|
+
dim (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce.
|
|
508
624
|
Suppose the rank of `input` is r, `dim` must be in the range [-rank(input), rank(input)).
|
|
509
|
-
Default: ``None`` , all dimensions are reduced.
|
|
510
625
|
keepdim (bool, optional): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
511
626
|
If ``False`` , don't keep these dimensions. Default : ``False`` .
|
|
512
627
|
|
|
513
628
|
Returns:
|
|
514
629
|
Tensor, the dtype is bool.
|
|
515
630
|
|
|
516
|
-
- If `dim` is ``None`` , and `keepdim` is ``False`` ,
|
|
517
|
-
the output is a 0-D Tensor representing the "logical AND" of all elements in the input Tensor.
|
|
518
631
|
- If `dim` is int, such as 2, and `keepdim` is ``False`` ,
|
|
519
632
|
the shape of output is :math:`(input_1, input_3, ..., input_R)`.
|
|
520
|
-
- If `dim` is tuple(int), such as (2, 3), and `keepdim` is ``False`` ,
|
|
633
|
+
- If `dim` is tuple(int) or list(int), such as (2, 3), and `keepdim` is ``False`` ,
|
|
521
634
|
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
522
635
|
- If `dim` is 1-D Tensor, such as [2, 3], and `keepdim` is ``False`` ,
|
|
523
636
|
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
@@ -534,17 +647,11 @@ def all(input, dim=None, keepdim=False):
|
|
|
534
647
|
>>> import numpy as np
|
|
535
648
|
>>> from mindspore import Tensor, mint
|
|
536
649
|
>>> x = Tensor(np.array([[True, False], [True, True]]))
|
|
537
|
-
>>> # case 1: Reduces a dimension
|
|
538
|
-
>>> output = mint.all(x, keepdim=True)
|
|
539
|
-
>>> print(output)
|
|
540
|
-
[[False]]
|
|
541
|
-
>>> print(output.shape)
|
|
542
|
-
(1, 1)
|
|
543
|
-
>>> # case 2: Reduces a dimension along axis 0.
|
|
650
|
+
>>> # case 1: Reduces a dimension along axis 0.
|
|
544
651
|
>>> output = mint.all(x, dim=0)
|
|
545
652
|
>>> print(output)
|
|
546
653
|
[ True False]
|
|
547
|
-
>>> # case
|
|
654
|
+
>>> # case 2: Reduces a dimension along axis 1.
|
|
548
655
|
>>> output = mint.all(x, dim=1)
|
|
549
656
|
>>> print(output)
|
|
550
657
|
[False True]
|
|
@@ -552,6 +659,53 @@ def all(input, dim=None, keepdim=False):
|
|
|
552
659
|
return ops.function.math_func.all(input, dim, keepdim)
|
|
553
660
|
|
|
554
661
|
|
|
662
|
+
def allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False):
|
|
663
|
+
"""
|
|
664
|
+
Returns a new Tensor with boolean elements representing if each element of `input`
|
|
665
|
+
is “close” to the corresponding element of `other`. Closeness is defined as:
|
|
666
|
+
|
|
667
|
+
.. math::
|
|
668
|
+
|input-other| ≤ atol + rtol × |other|
|
|
669
|
+
|
|
670
|
+
.. warning::
|
|
671
|
+
This is an experimental API that is subject to change or deletion.
|
|
672
|
+
|
|
673
|
+
Args:
|
|
674
|
+
input (Tensor): First tensor to compare.
|
|
675
|
+
Support dtype: float16, float32, float64, int8, int16, int32, int64 and uint8.
|
|
676
|
+
On Ascend, more dtypes are support: bool and bfloat16.
|
|
677
|
+
other (Tensor): Second tensor to compare. Dtype must be same as `input`.
|
|
678
|
+
rtol (Union[float, int, bool], optional): Relative tolerance. Default: ``1e-05`` .
|
|
679
|
+
atol (Union[float, int, bool], optional): Absolute tolerance. Default: ``1e-08`` .
|
|
680
|
+
equal_nan (bool, optional): If ``True`` , then two NaNs will be considered equal. Default: ``False``.
|
|
681
|
+
|
|
682
|
+
Returns:
|
|
683
|
+
A bool Scalar.
|
|
684
|
+
|
|
685
|
+
Raises:
|
|
686
|
+
TypeError: `input` or `other` is not Tensor.
|
|
687
|
+
TypeError: `input` or `other` dtype is not support.
|
|
688
|
+
TypeError: `atol` or `rtol` is not float, int or bool.
|
|
689
|
+
TypeError: `equal_nan` is not bool.
|
|
690
|
+
TypeError: `input` and `other` have different dtypes.
|
|
691
|
+
ValueError: `input` and `other` cannot broadcast.
|
|
692
|
+
|
|
693
|
+
Supported Platforms:
|
|
694
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
695
|
+
|
|
696
|
+
Examples:
|
|
697
|
+
>>> import mindspore
|
|
698
|
+
>>> import numpy as np
|
|
699
|
+
>>> from mindspore import Tensor, ops
|
|
700
|
+
>>> input = Tensor(np.array([1.3, 2.1, 3.2, 4.1, 5.1]), mindspore.float16)
|
|
701
|
+
>>> other = Tensor(np.array([1.3, 3.3, 2.3, 3.1, 5.1]), mindspore.float16)
|
|
702
|
+
>>> output = mint.allclose(input, other)
|
|
703
|
+
>>> print(output)
|
|
704
|
+
False
|
|
705
|
+
"""
|
|
706
|
+
return isclose(input, other, rtol, atol, equal_nan).all().item()
|
|
707
|
+
|
|
708
|
+
|
|
555
709
|
def cat(tensors, dim=0):
|
|
556
710
|
r"""
|
|
557
711
|
Connect input tensors along with the given dimension.
|
|
@@ -572,7 +726,7 @@ def cat(tensors, dim=0):
|
|
|
572
726
|
all other dimensions should be equal, that is,
|
|
573
727
|
:math:`t1.shape[1] = t2.shape[1], t1.shape[2] = t2.shape[2], ..., t1.shape[R-1] = t2.shape[R-1]`,
|
|
574
728
|
where :math:`R` represents the rank of tensor.
|
|
575
|
-
dim (int): The specified dimension, whose value is in range :math:`[-R, R)`. Default: ``0`` .
|
|
729
|
+
dim (int, optional): The specified dimension, whose value is in range :math:`[-R, R)`. Default: ``0`` .
|
|
576
730
|
|
|
577
731
|
Returns:
|
|
578
732
|
Tensor, the shape is :math:`(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)`.
|
|
@@ -611,10 +765,13 @@ def cat(tensors, dim=0):
|
|
|
611
765
|
|
|
612
766
|
def concat(tensors, dim=0):
|
|
613
767
|
r"""
|
|
768
|
+
Alias for :func:`mindspore.mint.cat`.
|
|
769
|
+
|
|
614
770
|
.. warning::
|
|
615
771
|
This is an experimental API that is subject to change or deletion.
|
|
616
772
|
|
|
617
|
-
|
|
773
|
+
Supported Platforms:
|
|
774
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
618
775
|
"""
|
|
619
776
|
return cat(tensors, dim)
|
|
620
777
|
|
|
@@ -629,6 +786,9 @@ def cummax(input, dim):
|
|
|
629
786
|
y_{i} = \max(x_{1}, x_{2}, ... , x_{i})
|
|
630
787
|
\end{array}
|
|
631
788
|
|
|
789
|
+
.. note::
|
|
790
|
+
O2 mode is not supported in Ascend.
|
|
791
|
+
|
|
632
792
|
Args:
|
|
633
793
|
input (Tensor): The input Tensor. Rank of `input` must be greater than 0.
|
|
634
794
|
dim (int): The dimension to do the operation over. The value of `dim` must be in the range
|
|
@@ -643,9 +803,6 @@ def cummax(input, dim):
|
|
|
643
803
|
TypeError: If `dim` is not an int.
|
|
644
804
|
ValueError: If `dim` is out the range of `[-input.ndim, input.ndim - 1]`.
|
|
645
805
|
|
|
646
|
-
.. note::
|
|
647
|
-
O2 mode is not supported in Ascend.
|
|
648
|
-
|
|
649
806
|
Supported Platforms:
|
|
650
807
|
``Ascend``
|
|
651
808
|
|
|
@@ -670,6 +827,26 @@ def cummax(input, dim):
|
|
|
670
827
|
return ops.auto_generate.cummax(input, dim)
|
|
671
828
|
|
|
672
829
|
|
|
830
|
+
def not_equal(input, other):
|
|
831
|
+
r"""
|
|
832
|
+
Alias for :func:`mindspore.mint.ne` .
|
|
833
|
+
|
|
834
|
+
Supported Platforms:
|
|
835
|
+
``Ascend``
|
|
836
|
+
"""
|
|
837
|
+
return ne(input, other)
|
|
838
|
+
|
|
839
|
+
|
|
840
|
+
def softmax(input, dim, *, dtype=None):
|
|
841
|
+
r"""
|
|
842
|
+
Alias for :func:`mindspore.mint.nn.functional.softmax`.
|
|
843
|
+
|
|
844
|
+
Supported Platforms:
|
|
845
|
+
``Ascend``
|
|
846
|
+
"""
|
|
847
|
+
return softmax_ext(input, dim, dtype)
|
|
848
|
+
|
|
849
|
+
|
|
673
850
|
def _einsum_convert_sublist_to_label(num, ell_num=False):
|
|
674
851
|
"""Convert sublist to label."""
|
|
675
852
|
if num == Ellipsis or ell_num and num == 52:
|
|
@@ -678,7 +855,8 @@ def _einsum_convert_sublist_to_label(num, ell_num=False):
|
|
|
678
855
|
return chr(num + ord('A'))
|
|
679
856
|
if 26 <= num < 52:
|
|
680
857
|
return chr(num + ord('a') - 26)
|
|
681
|
-
raise ValueError(
|
|
858
|
+
raise ValueError(
|
|
859
|
+
f'For einsum, the number in sublist must be in range [0, 52), but got {num}')
|
|
682
860
|
|
|
683
861
|
|
|
684
862
|
def _einsum_convert_label_to_index(label):
|
|
@@ -690,7 +868,8 @@ def _einsum_convert_label_to_index(label):
|
|
|
690
868
|
return label_num - ord('a') + 26
|
|
691
869
|
if label_num == ord('.'):
|
|
692
870
|
return 52
|
|
693
|
-
raise ValueError(
|
|
871
|
+
raise ValueError(
|
|
872
|
+
f'For einsum, the label in equation must be in [a-zA-Z] or ., but got {label}')
|
|
694
873
|
|
|
695
874
|
|
|
696
875
|
def _einsum_convert_sublist(equation, *operands):
|
|
@@ -713,18 +892,21 @@ def _einsum_convert_sublist(equation, *operands):
|
|
|
713
892
|
operands_tmp = list([equation]) + list(operands[1::2])
|
|
714
893
|
equation = equation_tmp
|
|
715
894
|
operands = tuple(operands_tmp)
|
|
716
|
-
if len(operands) == 0:
|
|
717
|
-
raise ValueError(
|
|
895
|
+
if len(operands) == 0: # pylint: disable=len-as-condition
|
|
896
|
+
raise ValueError(
|
|
897
|
+
"For einsum, the 'operands' must have at least one operand.")
|
|
718
898
|
return equation, operands
|
|
719
899
|
|
|
720
900
|
|
|
721
901
|
def _einsum_check_inputargs(equation, operands):
|
|
722
902
|
"""Check equation and operands."""
|
|
723
903
|
if not isinstance(equation, str):
|
|
724
|
-
raise TypeError(
|
|
904
|
+
raise TypeError(
|
|
905
|
+
f"For einsum, 'equation' must be a str, but got {type(equation)}.")
|
|
725
906
|
for operand in operands:
|
|
726
907
|
if not isinstance(operand, Tensor):
|
|
727
|
-
raise TypeError(
|
|
908
|
+
raise TypeError(
|
|
909
|
+
f"For einsum, members of 'operands' must be Tensor, but got {type(operand)}.")
|
|
728
910
|
|
|
729
911
|
|
|
730
912
|
@constexpr
|
|
@@ -737,7 +919,8 @@ def _einsum_parse_equation(equation):
|
|
|
737
919
|
if '->' in equation:
|
|
738
920
|
l_equation, r_equation = equation.split('->', 1)
|
|
739
921
|
if l_equation == '':
|
|
740
|
-
raise ValueError(
|
|
922
|
+
raise ValueError(
|
|
923
|
+
'For einsum, equation must contain characters to the left fo the arrow.')
|
|
741
924
|
else:
|
|
742
925
|
l_equation = equation
|
|
743
926
|
|
|
@@ -752,13 +935,15 @@ def _einsum_parse_equation(equation):
|
|
|
752
935
|
if '.' in subequation and ('...' not in subequation or subequation.count('.') != 3):
|
|
753
936
|
raise ValueError(f"For einsum, an ellipsis in the equation must include three continuous \'.\', "
|
|
754
937
|
f"and can only be found once.")
|
|
755
|
-
subequation_lst = [_einsum_convert_label_to_index(
|
|
938
|
+
subequation_lst = [_einsum_convert_label_to_index(
|
|
939
|
+
label) for label in subequation.replace('...', '.')]
|
|
756
940
|
l_equationlst.append(subequation_lst)
|
|
757
941
|
|
|
758
942
|
if "." in r_equation and ('...' not in r_equation or r_equation.count('.') != 3):
|
|
759
943
|
raise ValueError(f"For einsum, an ellipsis in the equation must include three continuous \'.\', "
|
|
760
944
|
f"and can only be found once.")
|
|
761
|
-
r_equationlst = [_einsum_convert_label_to_index(
|
|
945
|
+
r_equationlst = [_einsum_convert_label_to_index(
|
|
946
|
+
label) for label in r_equation.replace('...', '.')]
|
|
762
947
|
|
|
763
948
|
return l_equationlst, r_equationlst, ('->' in equation)
|
|
764
949
|
|
|
@@ -767,8 +952,8 @@ def _einsum_parse_labels(l_equationlst, operands):
|
|
|
767
952
|
"""Parse left script of equation."""
|
|
768
953
|
align_rank = 0
|
|
769
954
|
max_labels = 53
|
|
955
|
+
ellipsis_dimnum = 0
|
|
770
956
|
labels_count = [0] * max_labels
|
|
771
|
-
labels2dimlst = [None] * max_labels
|
|
772
957
|
|
|
773
958
|
if len(operands) != len(l_equationlst):
|
|
774
959
|
raise ValueError(f"For einsum, 'operands' is not equal to specified in the 'equation', "
|
|
@@ -779,67 +964,70 @@ def _einsum_parse_labels(l_equationlst, operands):
|
|
|
779
964
|
label_num = 0
|
|
780
965
|
operand_shape = list(operands[idx].shape)
|
|
781
966
|
for label in sub_equ:
|
|
967
|
+
dim_num = 1
|
|
782
968
|
label_num += 1
|
|
783
969
|
end_dim = start_dim + 1
|
|
784
970
|
|
|
785
971
|
# Label is ellipsis
|
|
786
972
|
if label == 52:
|
|
787
973
|
end_dim = len(operand_shape) - len(sub_equ) + label_num
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
f"represented different dimensions.")
|
|
974
|
+
dim_num = end_dim - start_dim
|
|
975
|
+
if ellipsis_dimnum != 0 and ellipsis_dimnum != dim_num:
|
|
976
|
+
raise ValueError(f"For einsum, an ellipsis in 'equation' can only represent the same numbers of "
|
|
977
|
+
f"dimensions in 'operands'.")
|
|
978
|
+
ellipsis_dimnum = dim_num
|
|
979
|
+
if labels_count[label] == 0:
|
|
980
|
+
align_rank += dim_num
|
|
796
981
|
labels_count[label] += 1
|
|
797
|
-
start_dim
|
|
982
|
+
start_dim += dim_num
|
|
798
983
|
if label_num != len(sub_equ) or start_dim != len(operand_shape):
|
|
799
984
|
raise ValueError(f"For einsum, the numbers of labels specified in the 'equation' does not match "
|
|
800
985
|
f"'operands[{idx}]'.")
|
|
801
|
-
return
|
|
986
|
+
return ellipsis_dimnum, labels_count, align_rank
|
|
802
987
|
|
|
803
988
|
|
|
804
|
-
def _einsum_infer_output(r_equationlst, arrow_exist,
|
|
989
|
+
def _einsum_infer_output(r_equationlst, arrow_exist, ellipsis_dimnum, labels_count):
|
|
805
990
|
"""Parse right script of equation and infer output shape."""
|
|
806
991
|
idx = 0
|
|
807
992
|
idle_idx = -1
|
|
808
|
-
|
|
993
|
+
output_rank = 0
|
|
809
994
|
labels_perm_idx = [idle_idx] * 53
|
|
810
995
|
|
|
811
996
|
if arrow_exist:
|
|
812
997
|
for label in r_equationlst:
|
|
813
998
|
if labels_count[label] != 0:
|
|
814
|
-
output_shape += labels2dimlst[label]
|
|
815
999
|
if labels_perm_idx[label] != idle_idx:
|
|
816
1000
|
raise ValueError(f"For einsum, '{_einsum_convert_sublist_to_label(label, True)}' or {label} in "
|
|
817
1001
|
f"sublist format has appears more than once in output subscript.")
|
|
1002
|
+
dimnum = 1
|
|
1003
|
+
if label == 52:
|
|
1004
|
+
dimnum = ellipsis_dimnum
|
|
818
1005
|
labels_perm_idx[label] = idx
|
|
819
|
-
|
|
1006
|
+
output_rank += dimnum
|
|
1007
|
+
idx += dimnum
|
|
820
1008
|
else:
|
|
821
1009
|
raise ValueError(f"For einsum, the label to the right of arrow in the 'equation' must appear on "
|
|
822
1010
|
f"left, but '{_einsum_convert_sublist_to_label(label, True)}' does not.")
|
|
823
1011
|
else:
|
|
824
1012
|
if labels_count[52] != 0:
|
|
825
|
-
|
|
1013
|
+
output_rank += ellipsis_dimnum
|
|
826
1014
|
labels_perm_idx[52] = idx
|
|
827
|
-
idx +=
|
|
1015
|
+
idx += ellipsis_dimnum
|
|
828
1016
|
for label, count in enumerate(labels_count):
|
|
829
1017
|
if count == 1:
|
|
830
|
-
|
|
1018
|
+
output_rank += 1
|
|
831
1019
|
labels_perm_idx[label] = idx
|
|
832
|
-
idx +=
|
|
1020
|
+
idx += 1
|
|
833
1021
|
|
|
834
1022
|
for label, count in enumerate(labels_count):
|
|
835
1023
|
if count != 0 and labels_perm_idx[label] == idle_idx:
|
|
836
1024
|
labels_perm_idx[label] = idx
|
|
837
1025
|
idx += 1
|
|
838
1026
|
|
|
839
|
-
return
|
|
1027
|
+
return output_rank, labels_perm_idx
|
|
840
1028
|
|
|
841
1029
|
|
|
842
|
-
def _einsum_adjust_operands(operands, l_equationlst,
|
|
1030
|
+
def _einsum_adjust_operands(operands, l_equationlst, ellipsis_dimnum, labels_perm_idx, align_rank):
|
|
843
1031
|
"""Align operands to output as possible."""
|
|
844
1032
|
# Unsqueeze miss dimensions to make all operands has same rank, compute diagonal if operand has same label.
|
|
845
1033
|
# Then use _labels_perm_idx to transpose all operands to align dimensions with output.
|
|
@@ -865,7 +1053,7 @@ def _einsum_adjust_operands(operands, l_equationlst, labels2dimlst, labels_perm_
|
|
|
865
1053
|
else:
|
|
866
1054
|
label_dims[label] = dim
|
|
867
1055
|
if label == 52:
|
|
868
|
-
for ell_idx in range(
|
|
1056
|
+
for ell_idx in range(ellipsis_dimnum):
|
|
869
1057
|
align_axis[labels_perm_idx[label] + ell_idx] = dim
|
|
870
1058
|
dim += 1
|
|
871
1059
|
else:
|
|
@@ -886,7 +1074,7 @@ def _einsum_adjust_operands(operands, l_equationlst, labels2dimlst, labels_perm_
|
|
|
886
1074
|
|
|
887
1075
|
def _einsum_find_dimlastop(align_rank, operands, adjust_operands):
|
|
888
1076
|
"""Find dim last operand."""
|
|
889
|
-
dim_last_op = [0
|
|
1077
|
+
dim_last_op = [0] * align_rank
|
|
890
1078
|
has_zero_dim = False
|
|
891
1079
|
for dim in range(align_rank):
|
|
892
1080
|
broadcast_dim = adjust_operands[0].shape[dim]
|
|
@@ -939,7 +1127,7 @@ def _einsum_multiplication(sum_dims, l_tensor, r_tensor):
|
|
|
939
1127
|
ronly_size *= r_shape[i]
|
|
940
1128
|
|
|
941
1129
|
# Compute the einsum bmm operators pipeline.
|
|
942
|
-
# The whole operators
|
|
1130
|
+
# The whole operators pipeline is transpose(in) -> reshape(in) -> bmm(in) -> reshape(out) -> transpose(out).
|
|
943
1131
|
l_reshape_shape = (batch_size, lonly_size, sum_size)
|
|
944
1132
|
r_reshape_shape = (batch_size, sum_size, ronly_size)
|
|
945
1133
|
|
|
@@ -976,37 +1164,33 @@ def _einsum_multiplication(sum_dims, l_tensor, r_tensor):
|
|
|
976
1164
|
return reshape(output, output_squeeze_shape)
|
|
977
1165
|
|
|
978
1166
|
|
|
979
|
-
def _einsum_squeeze(operand, dim):
|
|
980
|
-
'''Will be replaced by mint.squeeze in the future'''
|
|
981
|
-
operand_shape = operand.shape
|
|
982
|
-
squeeze_shape = []
|
|
983
|
-
for idx in range(len(operand_shape)):
|
|
984
|
-
if idx != dim:
|
|
985
|
-
squeeze_shape.append(operand_shape[idx])
|
|
986
|
-
return reshape(operand, squeeze_shape)
|
|
987
|
-
|
|
988
|
-
|
|
989
1167
|
def _einsum(equation, operands):
|
|
990
1168
|
'''Einsum main process'''
|
|
991
|
-
_l_equationlst, _r_equationlst, _arrow_exist = _einsum_parse_equation(
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
1169
|
+
_l_equationlst, _r_equationlst, _arrow_exist = _einsum_parse_equation(
|
|
1170
|
+
equation)
|
|
1171
|
+
_ellipsis_dimnum, _labels_count, _align_rank = _einsum_parse_labels(
|
|
1172
|
+
_l_equationlst, operands)
|
|
1173
|
+
_output_rank, _labels_perm_idx = _einsum_infer_output(
|
|
1174
|
+
_r_equationlst, _arrow_exist, _ellipsis_dimnum, _labels_count)
|
|
1175
|
+
_adjust_operands = _einsum_adjust_operands(operands, _l_equationlst, _ellipsis_dimnum, _labels_perm_idx,
|
|
1176
|
+
_align_rank)
|
|
1177
|
+
_dim_last_op, _has_zero_dim = _einsum_find_dimlastop(
|
|
1178
|
+
_align_rank, operands, _adjust_operands)
|
|
998
1179
|
_result = _adjust_operands[0]
|
|
999
1180
|
|
|
1000
1181
|
# Fast path if operands has zero dim.
|
|
1001
1182
|
if _has_zero_dim:
|
|
1002
|
-
|
|
1183
|
+
output_shape = []
|
|
1184
|
+
for dim in range(_output_rank):
|
|
1185
|
+
output_shape.append(_adjust_operands[_dim_last_op[dim]].shape[dim])
|
|
1186
|
+
return zeros(output_shape, dtype=_result.dtype)
|
|
1003
1187
|
|
|
1004
1188
|
# Sum or squeeze dimensions that is 1 for all rest operands.
|
|
1005
1189
|
_reduce_dim = _output_rank
|
|
1006
1190
|
for dim in range(_output_rank, _align_rank):
|
|
1007
1191
|
if _dim_last_op[dim] == 0:
|
|
1008
1192
|
if _result.shape[_reduce_dim] == 1:
|
|
1009
|
-
_result =
|
|
1193
|
+
_result = squeeze(_result, _reduce_dim)
|
|
1010
1194
|
else:
|
|
1011
1195
|
_result = sum(_result, _reduce_dim)
|
|
1012
1196
|
else:
|
|
@@ -1019,11 +1203,11 @@ def _einsum(equation, operands):
|
|
|
1019
1203
|
sum_dims = []
|
|
1020
1204
|
for j in range(_output_rank, _align_rank):
|
|
1021
1205
|
if _dim_last_op[j] < i:
|
|
1022
|
-
operand =
|
|
1206
|
+
operand = squeeze(operand, dim)
|
|
1023
1207
|
elif _dim_last_op[j] == i:
|
|
1024
1208
|
if _result.shape[dim] == 1:
|
|
1025
1209
|
operand = sum(operand, dim)
|
|
1026
|
-
_result =
|
|
1210
|
+
_result = squeeze(_result, dim)
|
|
1027
1211
|
else:
|
|
1028
1212
|
sum_dims.append(dim)
|
|
1029
1213
|
dim += 1
|
|
@@ -1050,16 +1234,22 @@ def einsum(equation, *operands):
|
|
|
1050
1234
|
The sublist format is also supported. For example, mint.einsum(op1, sublist1, op2, sublist2, ..., sublist_out).
|
|
1051
1235
|
In this format, equation can be derived by the sublists which are made up of Python's Ellipsis and list of
|
|
1052
1236
|
integers in [0, 52). Each operand is followed by a sublist and an output sublist is at the end.
|
|
1237
|
+
Dynamic shape, dynamic rank input is not supported in `graph mode (mode=mindspore.GRAPH_MODE)
|
|
1238
|
+
<https://www.mindspore.cn/docs/en/master/model_train/program_form/static_graph.html>`_.
|
|
1053
1239
|
|
|
1054
1240
|
.. warning::
|
|
1055
1241
|
This is an experimental API that is subject to change or deletion.
|
|
1056
1242
|
|
|
1057
1243
|
Args:
|
|
1058
1244
|
equation (str): Notation based on the Einstein summation convention, represent the operation you want to do.
|
|
1059
|
-
the value can contain only letters, commas, ellipsis and arrow.
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1245
|
+
the value can contain only letters, commas, ellipsis and arrow. The letters(must be in [a-zA-Z]) represent
|
|
1246
|
+
input tensor dimension, commas(,) represent separate tensors, ellipsis indicates the tensor dimension that
|
|
1247
|
+
you do not care about, the left of the arrow indicates the input tensors, and the right of it indicates the
|
|
1248
|
+
desired output dimension. If there are no arrows in the equation, the letters that appear exactly once in
|
|
1249
|
+
the equation will be part of the output, sorted in increasing alphabetical order. The output is computed by
|
|
1250
|
+
multiplying the input operands element-wise, with their dimensions aligned based on the letters, and then
|
|
1251
|
+
summing out the dimensions whose letters are not part of the output. If there is one arrow in the equation,
|
|
1252
|
+
the output letters must appear at least once for some input operand and at most once for the output.
|
|
1063
1253
|
operands (Tensor): Input tensor used for calculation. The dtype of the tensor must be the same.
|
|
1064
1254
|
|
|
1065
1255
|
Returns:
|
|
@@ -1138,6 +1328,81 @@ def einsum(equation, *operands):
|
|
|
1138
1328
|
return _einsum(_equation, _operands)
|
|
1139
1329
|
|
|
1140
1330
|
|
|
1331
|
+
def equal(input, other):
|
|
1332
|
+
r"""
|
|
1333
|
+
Computes the equivalence between two tensors.
|
|
1334
|
+
|
|
1335
|
+
Note:
|
|
1336
|
+
`input` and `other` comply with the implicit type conversion rules to make the data types consistent.
|
|
1337
|
+
|
|
1338
|
+
.. warning::
|
|
1339
|
+
This is an experimental API that is subject to change or deletion.
|
|
1340
|
+
|
|
1341
|
+
Args:
|
|
1342
|
+
input (Tensor): The first input.
|
|
1343
|
+
other (Tensor): The second input.
|
|
1344
|
+
|
|
1345
|
+
Returns:
|
|
1346
|
+
bool.
|
|
1347
|
+
|
|
1348
|
+
Raises:
|
|
1349
|
+
TypeError: If `input` or `other` is not a Tensor.
|
|
1350
|
+
|
|
1351
|
+
Supported Platforms:
|
|
1352
|
+
``Ascend``
|
|
1353
|
+
|
|
1354
|
+
Examples:
|
|
1355
|
+
>>> import mindspore
|
|
1356
|
+
>>> from mindspore import Tensor, mint
|
|
1357
|
+
>>> x = Tensor([1, 2, 3], mindspore.int32)
|
|
1358
|
+
>>> y = Tensor([1, 2, 4], mindspore.int32)
|
|
1359
|
+
>>> output = mint.equal(x, y)
|
|
1360
|
+
>>> print(output)
|
|
1361
|
+
False
|
|
1362
|
+
"""
|
|
1363
|
+
result = equal_ext_op(input, other)
|
|
1364
|
+
return result.item()
|
|
1365
|
+
|
|
1366
|
+
|
|
1367
|
+
def isfinite(input):
|
|
1368
|
+
r"""
|
|
1369
|
+
Determine which elements are finite for each position. If elements are not ``NaN`` , ``-INF`` , ``INF``,
|
|
1370
|
+
they are finite.
|
|
1371
|
+
|
|
1372
|
+
.. math::
|
|
1373
|
+
out_i = \begin{cases}
|
|
1374
|
+
& \text{ if } input_{i} = \text{Finite},\ \ True \\
|
|
1375
|
+
& \text{ if } input_{i} \ne \text{Finite},\ \ False
|
|
1376
|
+
\end{cases}
|
|
1377
|
+
|
|
1378
|
+
Args:
|
|
1379
|
+
input (Tensor): The input tensor.
|
|
1380
|
+
|
|
1381
|
+
Returns:
|
|
1382
|
+
Tensor, has the same shape of input, and the dtype is bool.
|
|
1383
|
+
|
|
1384
|
+
Raises:
|
|
1385
|
+
TypeError: If input is not a Tensor.
|
|
1386
|
+
|
|
1387
|
+
Supported Platforms:
|
|
1388
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1389
|
+
|
|
1390
|
+
Examples:
|
|
1391
|
+
>>> import mindspore
|
|
1392
|
+
>>> import numpy as np
|
|
1393
|
+
>>> from mindspore import Tensor, mint
|
|
1394
|
+
>>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
|
|
1395
|
+
>>> output = mint.isfinite(x)
|
|
1396
|
+
>>> print(output)
|
|
1397
|
+
[False True False]
|
|
1398
|
+
>>> x = Tensor(2.1, mindspore.float64)
|
|
1399
|
+
>>> output = mint.isfinite(x)
|
|
1400
|
+
>>> print(output)
|
|
1401
|
+
True
|
|
1402
|
+
"""
|
|
1403
|
+
return ops.auto_generate.isfinite(input)
|
|
1404
|
+
|
|
1405
|
+
|
|
1141
1406
|
def item(input):
|
|
1142
1407
|
r"""
|
|
1143
1408
|
Returns the value of this tensor as a standard Python number.
|
|
@@ -1178,6 +1443,43 @@ def item(input):
|
|
|
1178
1443
|
|
|
1179
1444
|
def mean(input, dim=None, keepdim=False, *, dtype=None):
|
|
1180
1445
|
r"""
|
|
1446
|
+
mean(input, *, dtype=None) -> Tensor
|
|
1447
|
+
|
|
1448
|
+
Reduces all dimension of a tensor by averaging all elements.
|
|
1449
|
+
|
|
1450
|
+
Args:
|
|
1451
|
+
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
1452
|
+
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
1453
|
+
|
|
1454
|
+
Keyword Args:
|
|
1455
|
+
dtype (:class:`mindspore.dtype`, optional): The desired data type of returned Tensor. Default: ``None`` .
|
|
1456
|
+
|
|
1457
|
+
Returns:
|
|
1458
|
+
Tensor.
|
|
1459
|
+
|
|
1460
|
+
Raises:
|
|
1461
|
+
TypeError: If `input` is not a Tensor.
|
|
1462
|
+
|
|
1463
|
+
Supported Platforms:
|
|
1464
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1465
|
+
|
|
1466
|
+
Examples:
|
|
1467
|
+
>>> import mindspore
|
|
1468
|
+
>>> import numpy as np
|
|
1469
|
+
>>> from mindspore import Tensor, mint
|
|
1470
|
+
>>> x = Tensor(np.array([[[2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2]],
|
|
1471
|
+
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
1472
|
+
... [[6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8], [10, 10, 10, 10, 10, 10]]]),
|
|
1473
|
+
... mindspore.float32)
|
|
1474
|
+
>>> output = mint.mean(x)
|
|
1475
|
+
>>> print(output)
|
|
1476
|
+
5.0
|
|
1477
|
+
>>> print(output.shape)
|
|
1478
|
+
()
|
|
1479
|
+
|
|
1480
|
+
.. function:: mean(input, dim, keepdim=False, *, dtype=None) -> Tensor
|
|
1481
|
+
:noindex:
|
|
1482
|
+
|
|
1181
1483
|
Reduces all dimension of a tensor by averaging all elements in the dimension, by default.
|
|
1182
1484
|
And reduce a dimension of `input` along the specified `dim`. `keepdim`
|
|
1183
1485
|
determines whether the dimensions of the output and input are the same.
|
|
@@ -1188,9 +1490,8 @@ def mean(input, dim=None, keepdim=False, *, dtype=None):
|
|
|
1188
1490
|
Args:
|
|
1189
1491
|
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
1190
1492
|
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
1191
|
-
dim (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce.
|
|
1192
|
-
|
|
1193
|
-
and the value range is [-r,r).
|
|
1493
|
+
dim (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce.
|
|
1494
|
+
Only constant value is allowed. Assume the rank of `input` is r, and the value range is [-r,r).
|
|
1194
1495
|
keepdim (bool): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
1195
1496
|
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
1196
1497
|
|
|
@@ -1200,8 +1501,6 @@ def mean(input, dim=None, keepdim=False, *, dtype=None):
|
|
|
1200
1501
|
Returns:
|
|
1201
1502
|
Tensor.
|
|
1202
1503
|
|
|
1203
|
-
- If `dim` is ``None`` , and `keepdim` is ``False`` ,
|
|
1204
|
-
the output is a 0-D tensor representing the product of all elements in the input tensor.
|
|
1205
1504
|
- If `dim` is int, set as 1, and `keepdim` is ``False`` ,
|
|
1206
1505
|
the shape of output is :math:`(input_0, input_2, ..., input_R)`.
|
|
1207
1506
|
- If `dim` is tuple(int) or list(int), set as (1, 2), and `keepdim` is ``False`` ,
|
|
@@ -1222,51 +1521,57 @@ def mean(input, dim=None, keepdim=False, *, dtype=None):
|
|
|
1222
1521
|
>>> import mindspore
|
|
1223
1522
|
>>> import numpy as np
|
|
1224
1523
|
>>> from mindspore import Tensor, mint
|
|
1225
|
-
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
|
|
1226
|
-
>>> output = mint.mean(x, 1, keepdim=True)
|
|
1227
|
-
>>> result = output.shape
|
|
1228
|
-
>>> print(result)
|
|
1229
|
-
(3, 1, 5, 6)
|
|
1230
|
-
>>> # case 1: Reduces a dimension by averaging all elements in the dimension.
|
|
1231
1524
|
>>> x = Tensor(np.array([[[2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2]],
|
|
1232
1525
|
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
1233
1526
|
... [[6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8], [10, 10, 10, 10, 10, 10]]]),
|
|
1234
1527
|
... mindspore.float32)
|
|
1235
|
-
>>> output = mint.mean(x)
|
|
1236
|
-
>>> print(output)
|
|
1237
|
-
5.0
|
|
1238
|
-
>>> print(output.shape)
|
|
1239
|
-
()
|
|
1240
|
-
>>> # case 2: Reduces a dimension along the axis 0
|
|
1241
1528
|
>>> output = mint.mean(x, 0, True)
|
|
1242
1529
|
>>> print(output)
|
|
1243
1530
|
[[[4. 4. 4. 4. 4. 4.]
|
|
1244
1531
|
[5. 5. 5. 5. 5. 5.]
|
|
1245
1532
|
[6. 6. 6. 6. 6. 6.]]]
|
|
1246
|
-
>>> # case 3: Reduces a dimension along the axis 1
|
|
1247
|
-
>>> output = mint.mean(x, 1, True)
|
|
1248
|
-
>>> print(output)
|
|
1249
|
-
[[[2. 2. 2. 2. 2. 2.]]
|
|
1250
|
-
[[5. 5. 5. 5. 5. 5.]]
|
|
1251
|
-
[[8. 8. 8. 8. 8. 8.]]]
|
|
1252
|
-
>>> # case 4: Reduces a dimension along the axis 2
|
|
1253
|
-
>>> output = mint.mean(x, 2, True)
|
|
1254
|
-
>>> print(output)
|
|
1255
|
-
[[[ 2.]
|
|
1256
|
-
[ 2.]
|
|
1257
|
-
[ 2.]]
|
|
1258
|
-
[[ 4.]
|
|
1259
|
-
[ 5.]
|
|
1260
|
-
[ 6.]]
|
|
1261
|
-
[[ 6.]
|
|
1262
|
-
[ 8.]
|
|
1263
|
-
[10.]]]
|
|
1264
1533
|
"""
|
|
1265
|
-
return ops.
|
|
1534
|
+
return ops.auto_generate.mean_ext(input, dim, keepdim, dtype)
|
|
1266
1535
|
|
|
1267
1536
|
|
|
1268
1537
|
def prod(input, dim=None, keepdim=False, *, dtype=None):
|
|
1269
1538
|
r"""
|
|
1539
|
+
prod(input, *, dtype=None) -> Tensor
|
|
1540
|
+
|
|
1541
|
+
Multiplying all elements of input.
|
|
1542
|
+
|
|
1543
|
+
Args:
|
|
1544
|
+
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
1545
|
+
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
1546
|
+
|
|
1547
|
+
Keyword Args:
|
|
1548
|
+
dtype (:class:`mindspore.dtype`, optional): The desired data type of returned Tensor. Default: ``None`` .
|
|
1549
|
+
|
|
1550
|
+
Returns:
|
|
1551
|
+
Tensor.
|
|
1552
|
+
|
|
1553
|
+
Raises:
|
|
1554
|
+
TypeError: If `input` is not a Tensor.
|
|
1555
|
+
|
|
1556
|
+
Supported Platforms:
|
|
1557
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1558
|
+
|
|
1559
|
+
Examples:
|
|
1560
|
+
>>> import mindspore
|
|
1561
|
+
>>> import numpy as np
|
|
1562
|
+
>>> from mindspore import Tensor, mint
|
|
1563
|
+
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
|
|
1564
|
+
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
1565
|
+
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
|
|
1566
|
+
>>> output = mint.prod(x)
|
|
1567
|
+
>>> print(output)
|
|
1568
|
+
2.2833798e+33
|
|
1569
|
+
>>> print(output.shape)
|
|
1570
|
+
()
|
|
1571
|
+
|
|
1572
|
+
.. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor
|
|
1573
|
+
:noindex:
|
|
1574
|
+
|
|
1270
1575
|
Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can
|
|
1271
1576
|
reduce a dimension of `input` along the `dim`. Determine whether the dimensions of the output and input are the
|
|
1272
1577
|
same by controlling `keepdim`.
|
|
@@ -1274,7 +1579,7 @@ def prod(input, dim=None, keepdim=False, *, dtype=None):
|
|
|
1274
1579
|
Args:
|
|
1275
1580
|
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
1276
1581
|
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
1277
|
-
dim (int): The dimensions to reduce.
|
|
1582
|
+
dim (int): The dimensions to reduce. Only constant value is allowed.
|
|
1278
1583
|
Assume the rank of `x` is r, and the value range is [-r,r).
|
|
1279
1584
|
keepdim (bool): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
1280
1585
|
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
@@ -1285,8 +1590,6 @@ def prod(input, dim=None, keepdim=False, *, dtype=None):
|
|
|
1285
1590
|
Returns:
|
|
1286
1591
|
Tensor.
|
|
1287
1592
|
|
|
1288
|
-
- If `dim` is ``None`` , and `keepdim` is ``False`` ,
|
|
1289
|
-
the output is a 0-D tensor representing the product of all elements in the input tensor.
|
|
1290
1593
|
- If `dim` is int, set as 1, and `keepdim` is ``False`` ,
|
|
1291
1594
|
the shape of output is :math:`(input_0, input_2, ..., input_R)`.
|
|
1292
1595
|
|
|
@@ -1303,46 +1606,112 @@ def prod(input, dim=None, keepdim=False, *, dtype=None):
|
|
|
1303
1606
|
>>> import mindspore
|
|
1304
1607
|
>>> import numpy as np
|
|
1305
1608
|
>>> from mindspore import Tensor, mint
|
|
1306
|
-
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
|
|
1307
|
-
>>> output = mint.prod(x, 1, keepdim=True)
|
|
1308
|
-
>>> result = output.shape
|
|
1309
|
-
>>> print(result)
|
|
1310
|
-
(3, 1, 5, 6)
|
|
1311
|
-
>>> # case 1: Reduces a dimension by multiplying all elements in the dimension.
|
|
1312
1609
|
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
|
|
1313
1610
|
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
1314
1611
|
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
|
|
1315
|
-
>>> output = mint.prod(x)
|
|
1316
|
-
>>> print(output)
|
|
1317
|
-
2.2833798e+33
|
|
1318
|
-
>>> print(output.shape)
|
|
1319
|
-
()
|
|
1320
|
-
>>> # case 2: Reduces a dimension along axis 0.
|
|
1321
1612
|
>>> output = mint.prod(x, 0, True)
|
|
1322
1613
|
>>> print(output)
|
|
1323
1614
|
[[[ 28. 28. 28. 28. 28. 28.]
|
|
1324
1615
|
[ 80. 80. 80. 80. 80. 80.]
|
|
1325
1616
|
[162. 162. 162. 162. 162. 162.]]]
|
|
1326
|
-
>>> # case 3: Reduces a dimension along axis 1.
|
|
1327
|
-
>>> output = mint.prod(x, 1, True)
|
|
1328
|
-
>>> print(output)
|
|
1329
|
-
[[[ 6. 6. 6. 6. 6. 6.]]
|
|
1330
|
-
[[120. 120. 120. 120. 120. 120.]]
|
|
1331
|
-
[[504. 504. 504. 504. 504. 504.]]]
|
|
1332
|
-
>>> # case 4: Reduces a dimension along axis 2.
|
|
1333
|
-
>>> output = mint.prod(x, 2, True)
|
|
1334
|
-
>>> print(output)
|
|
1335
|
-
[[[1.00000e+00]
|
|
1336
|
-
[6.40000e+01]
|
|
1337
|
-
[7.29000e+02]]
|
|
1338
|
-
[[4.09600e+03]
|
|
1339
|
-
[1.56250e+04]
|
|
1340
|
-
[4.66560e+04]]
|
|
1341
|
-
[[1.17649e+05]
|
|
1342
|
-
[2.62144e+05]
|
|
1343
|
-
[5.31441e+05]]]
|
|
1344
1617
|
"""
|
|
1345
|
-
return ops.auto_generate.prod_ext(input,
|
|
1618
|
+
return ops.auto_generate.prod_ext(input, dim, keepdim, dtype)
|
|
1619
|
+
|
|
1620
|
+
|
|
1621
|
+
def sum(input, dim=None, keepdim=False, *, dtype=None):
|
|
1622
|
+
r'''
|
|
1623
|
+
sum(input, *, dtype=None) -> Tensor
|
|
1624
|
+
|
|
1625
|
+
Calculate sum of all elements in Tensor.
|
|
1626
|
+
|
|
1627
|
+
Args:
|
|
1628
|
+
input (Tensor): The input tensor.
|
|
1629
|
+
|
|
1630
|
+
Keyword Args:
|
|
1631
|
+
dtype (:class:`mindspore.dtype`, optional): The desired data type of returned Tensor. Default: ``None`` .
|
|
1632
|
+
|
|
1633
|
+
Returns:
|
|
1634
|
+
A Tensor, sum of all elements in `input`.
|
|
1635
|
+
|
|
1636
|
+
Raises:
|
|
1637
|
+
TypeError: If `input` is not a Tensor.
|
|
1638
|
+
|
|
1639
|
+
Supported Platforms:
|
|
1640
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1641
|
+
|
|
1642
|
+
Examples:
|
|
1643
|
+
>>> import mindspore
|
|
1644
|
+
>>> import numpy as np
|
|
1645
|
+
>>> from mindspore import Tensor, mint
|
|
1646
|
+
>>> from mindspore import dtype as mstype
|
|
1647
|
+
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
|
|
1648
|
+
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
1649
|
+
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mstype.float32)
|
|
1650
|
+
>>> out = mint.sum(x)
|
|
1651
|
+
>>> print(out)
|
|
1652
|
+
270.0
|
|
1653
|
+
|
|
1654
|
+
.. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor
|
|
1655
|
+
:noindex:
|
|
1656
|
+
|
|
1657
|
+
Calculate sum of Tensor elements over a given dim.
|
|
1658
|
+
|
|
1659
|
+
Note:
|
|
1660
|
+
The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
1661
|
+
|
|
1662
|
+
Args:
|
|
1663
|
+
input (Tensor): The input tensor.
|
|
1664
|
+
dim (Union[int, tuple(int), list(int), Tensor]): Dimensions along which a sum is performed.
|
|
1665
|
+
If the `dim` is a tuple or list of ints, a sum is performed on all the dimensions specified in the tuple.
|
|
1666
|
+
Must be in the range :math:`[-input.ndim, input.ndim)` .
|
|
1667
|
+
keepdim (bool): Whether the output tensor has `dim` retained or not.
|
|
1668
|
+
If ``True`` , keep these reduced dimensions and the length is 1.
|
|
1669
|
+
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
1670
|
+
|
|
1671
|
+
Keyword Args:
|
|
1672
|
+
dtype (:class:`mindspore.dtype`, optional): The desired data type of returned Tensor. Default: ``None`` .
|
|
1673
|
+
|
|
1674
|
+
Returns:
|
|
1675
|
+
A Tensor, sum of elements over a given `dim` in `input`.
|
|
1676
|
+
|
|
1677
|
+
Raises:
|
|
1678
|
+
TypeError: If `input` is not a Tensor.
|
|
1679
|
+
TypeError: If `dim` is not an int, tulpe(int), list(int) or Tensor.
|
|
1680
|
+
ValueError: If `dim` is not in the range :math:`[-input.ndim, input.ndim)` .
|
|
1681
|
+
TypeError: If `keepdim` is not a bool.
|
|
1682
|
+
|
|
1683
|
+
Supported Platforms:
|
|
1684
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1685
|
+
|
|
1686
|
+
Examples:
|
|
1687
|
+
>>> import mindspore
|
|
1688
|
+
>>> import numpy as np
|
|
1689
|
+
>>> from mindspore import Tensor, mint
|
|
1690
|
+
>>> from mindspore import dtype as mstype
|
|
1691
|
+
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
|
|
1692
|
+
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
1693
|
+
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mstype.float32)
|
|
1694
|
+
>>> out = mint.sum(x)
|
|
1695
|
+
>>> print(out)
|
|
1696
|
+
270.0
|
|
1697
|
+
>>> out = mint.sum(x, dim=2)
|
|
1698
|
+
>>> print(out)
|
|
1699
|
+
[[ 6. 12. 18.]
|
|
1700
|
+
[24. 30. 36.]
|
|
1701
|
+
[42. 48. 54.]]
|
|
1702
|
+
>>> out = mint.sum(x, dim=2, keepdim=True)
|
|
1703
|
+
>>> print(out)
|
|
1704
|
+
[[[ 6.]
|
|
1705
|
+
[12.]
|
|
1706
|
+
[18.]]
|
|
1707
|
+
[[24.]
|
|
1708
|
+
[30.]
|
|
1709
|
+
[36.]]
|
|
1710
|
+
[[42.]
|
|
1711
|
+
[48.]
|
|
1712
|
+
[54.]]]
|
|
1713
|
+
'''
|
|
1714
|
+
return ops.auto_generate.sum_ext(input, dim, keepdim, dtype)
|
|
1346
1715
|
|
|
1347
1716
|
|
|
1348
1717
|
def ones(size, *, dtype=None):
|
|
@@ -1365,7 +1734,7 @@ def ones(size, *, dtype=None):
|
|
|
1365
1734
|
Tensor, whose dtype and size are defined by input.
|
|
1366
1735
|
|
|
1367
1736
|
Raises:
|
|
1368
|
-
TypeError: If `size` is neither an int nor
|
|
1737
|
+
TypeError: If `size` is neither an int nor a tuple/list/Tensor of int.
|
|
1369
1738
|
|
|
1370
1739
|
Supported Platforms:
|
|
1371
1740
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1429,7 +1798,7 @@ def split(tensor, split_size_or_sections, dim=0):
|
|
|
1429
1798
|
if `tensor.shape[dim]` is not divisible by `split_size_or_sections`.
|
|
1430
1799
|
If `split_size_or_sections` is a list type, then `tensor` will be split into len(split_size_or_sections)
|
|
1431
1800
|
chunks with sizes `split_size_or_sections` along the given `dim`.
|
|
1432
|
-
dim (int): The dim along which to split. Default: ``0`` .
|
|
1801
|
+
dim (int, optional): The dim along which to split. Default: ``0`` .
|
|
1433
1802
|
|
|
1434
1803
|
Returns:
|
|
1435
1804
|
A tuple of sub-tensors.
|
|
@@ -1437,10 +1806,10 @@ def split(tensor, split_size_or_sections, dim=0):
|
|
|
1437
1806
|
Raises:
|
|
1438
1807
|
TypeError: If argument `tensor` is not Tensor.
|
|
1439
1808
|
TypeError: If argument `dim` is not int.
|
|
1440
|
-
ValueError: If argument `dim` is out of range of
|
|
1809
|
+
ValueError: If argument `dim` is out of range of [-tensor.ndim, tensor.ndim).
|
|
1441
1810
|
TypeError: If each element in `split_size_or_sections` is not integer.
|
|
1442
1811
|
TypeError: If argument `split_size_or_sections` is not int, tuple(int) or list(int).
|
|
1443
|
-
ValueError: The sum of `split_size_or_sections` is not equal to
|
|
1812
|
+
ValueError: The sum of `split_size_or_sections` is not equal to tensor.shape[dim].
|
|
1444
1813
|
|
|
1445
1814
|
Supported Platforms:
|
|
1446
1815
|
``Ascend``
|
|
@@ -1490,6 +1859,56 @@ def sqrt(input):
|
|
|
1490
1859
|
return ops.auto_generate.sqrt(input)
|
|
1491
1860
|
|
|
1492
1861
|
|
|
1862
|
+
def squeeze(input, dim):
|
|
1863
|
+
r"""
|
|
1864
|
+
Return the Tensor after deleting the dimension of size 1 in the specified `dim`.
|
|
1865
|
+
|
|
1866
|
+
If :math:`dim=()`, it will remove all the dimensions of size 1.
|
|
1867
|
+
If `dim` is specified, it will remove the dimensions of size 1 in the given `dim`.
|
|
1868
|
+
For example, if the dimension is not specified :math:`dim=()`, input shape is (A, 1, B, C, 1, D),
|
|
1869
|
+
then the shape of the output Tensor is (A, B, C, D). If the dimension is specified, the squeeze operation
|
|
1870
|
+
is only performed in the specified dimension. If input shape is (A, 1, B), when :math:`dim=0` or :math:`dim=2`,
|
|
1871
|
+
the input tensor is not changed, while when :math:`dim=1`, the input tensor shape is changed to (A, B).
|
|
1872
|
+
|
|
1873
|
+
Note:
|
|
1874
|
+
- Please note that in dynamic graph mode, the output Tensor will share data with the input Tensor,
|
|
1875
|
+
and there is no Tensor data copy process.
|
|
1876
|
+
- The dimension index starts at 0 and must be in the range `[-input.ndim, input.ndim]`.
|
|
1877
|
+
- In GE mode, only support remove dimensions of size 1 from the shape of input tensor.
|
|
1878
|
+
|
|
1879
|
+
.. warning::
|
|
1880
|
+
This is an experimental API that is subject to change or deletion.
|
|
1881
|
+
|
|
1882
|
+
Args:
|
|
1883
|
+
input (Tensor): Used to calculate Squeeze. The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
1884
|
+
dim (Union[int, tuple(int)]): Specifies the dimension indexes of shape to be removed, which will
|
|
1885
|
+
remove all the dimensions of size 1 in the given dim parameter. If specified, it must be int32 or int64.
|
|
1886
|
+
|
|
1887
|
+
Returns:
|
|
1888
|
+
Tensor, the shape of tensor is :math:`(x_1, x_2, ..., x_S)`.
|
|
1889
|
+
|
|
1890
|
+
Raises:
|
|
1891
|
+
TypeError: If `input` is not a tensor.
|
|
1892
|
+
TypeError: If `dim` is not an int, tuple.
|
|
1893
|
+
TypeError: If `dim` is a tuple whose elements are not all int.
|
|
1894
|
+
|
|
1895
|
+
Supported Platforms:
|
|
1896
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1897
|
+
|
|
1898
|
+
Examples:
|
|
1899
|
+
>>> import mindspore
|
|
1900
|
+
>>> import numpy as np
|
|
1901
|
+
>>> from mindspore import Tensor, mint
|
|
1902
|
+
>>> input = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
|
|
1903
|
+
>>> output = mint.squeeze(input, 2)
|
|
1904
|
+
>>> print(output)
|
|
1905
|
+
[[1. 1.]
|
|
1906
|
+
[1. 1.]
|
|
1907
|
+
[1. 1.]]
|
|
1908
|
+
"""
|
|
1909
|
+
return squeeze_impl(input, dim)
|
|
1910
|
+
|
|
1911
|
+
|
|
1493
1912
|
def sub(input, other, *, alpha=1):
|
|
1494
1913
|
r"""
|
|
1495
1914
|
Subtracts scaled other value from input Tensor.
|
|
@@ -1515,7 +1934,7 @@ def sub(input, other, *, alpha=1):
|
|
|
1515
1934
|
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
1516
1935
|
|
|
1517
1936
|
Keyword Args:
|
|
1518
|
-
alpha (number.Number): A scaling factor applied to `other`, default 1
|
|
1937
|
+
alpha (number.Number, optional): A scaling factor applied to `other`, default ``1``.
|
|
1519
1938
|
|
|
1520
1939
|
Returns:
|
|
1521
1940
|
Tensor with a shape that is the same as the broadcasted shape of the input `input` and `other`,
|
|
@@ -1550,7 +1969,13 @@ def sub(input, other, *, alpha=1):
|
|
|
1550
1969
|
|
|
1551
1970
|
def swapaxes(input, axis0, axis1):
|
|
1552
1971
|
'''
|
|
1553
|
-
|
|
1972
|
+
Alias for :func:`mindspore.mint.transpose` . The `input` corresponds to the `input` in the reference interface,
|
|
1973
|
+
and the parameters `axis0` and `axis1` correspond to `dim0` and `dim1` in the reference interface respectively.
|
|
1974
|
+
|
|
1975
|
+
For more details, see :func:`mindspore.mint.transpose` .
|
|
1976
|
+
|
|
1977
|
+
.. warning::
|
|
1978
|
+
This is an experimental API that is subject to change or deletion.
|
|
1554
1979
|
|
|
1555
1980
|
Examples:
|
|
1556
1981
|
>>> import numpy as np
|
|
@@ -1564,6 +1989,67 @@ def swapaxes(input, axis0, axis1):
|
|
|
1564
1989
|
return transpose(input, axis0, axis1)
|
|
1565
1990
|
|
|
1566
1991
|
|
|
1992
|
+
def unique_consecutive(input, return_inverse=False, return_counts=False, dim=None):
|
|
1993
|
+
r"""
|
|
1994
|
+
Returns the elements that are unique in each consecutive group of equivalent elements in the input tensor.
|
|
1995
|
+
|
|
1996
|
+
When `return_inverse=True` , it returns a tensor containing the indices of the elements in the input tensor
|
|
1997
|
+
within the output tensor.
|
|
1998
|
+
|
|
1999
|
+
When `return_counts=True` , it returns a tensor representing the number of occurrences of each output element
|
|
2000
|
+
in the input.
|
|
2001
|
+
|
|
2002
|
+
.. warning::
|
|
2003
|
+
This is an experimental API that is subject to change or deletion.
|
|
2004
|
+
|
|
2005
|
+
Args:
|
|
2006
|
+
input (Tensor): The input tensor.
|
|
2007
|
+
return_inverse (bool, optional): Whether to return the index of where the element in the original input
|
|
2008
|
+
maps to the position in the output. Default: ``False`` .
|
|
2009
|
+
return_counts (bool, optional): Whether to return the counts of each unique element. Default: ``False`` .
|
|
2010
|
+
dim (int, optional): The dimension to apply unique. If ``None`` , the unique of the flattened input is
|
|
2011
|
+
returned. If specified, it must be int32 or int64. Default: ``None`` .
|
|
2012
|
+
|
|
2013
|
+
Returns:
|
|
2014
|
+
A tensor or a tuple of tensors containing tensor objects (`output`, `inverse_indices`, `counts`).
|
|
2015
|
+
|
|
2016
|
+
- **output** (Tensor): the output tensor has the same type as `input` and represents the output list of
|
|
2017
|
+
unique scalar elements.
|
|
2018
|
+
- **inverse_indices** (Tensor, optional): if `return_inverse` is True, there will be an additional returned
|
|
2019
|
+
tensor `inverse_indices`. `inverse_indices` has the same shape as `input` and represents the index of where
|
|
2020
|
+
the element in the original input maps to the position in the output.
|
|
2021
|
+
- **counts** (Tensor, optional): if `return_counts` is True, there will be an additional returned tensor
|
|
2022
|
+
`counts`. `counts` has the same shape as `output` or `output.shape[dim]` if dim was specified and represents
|
|
2023
|
+
the number of occurrences for each unique value or tensor.
|
|
2024
|
+
|
|
2025
|
+
Raises:
|
|
2026
|
+
TypeError: If `input` is not a Tensor.
|
|
2027
|
+
TypeError: If dtype of `input` is not supported.
|
|
2028
|
+
TypeError: If `return_inverse` is not a bool.
|
|
2029
|
+
TypeError: If `return_counts` is not a bool.
|
|
2030
|
+
TypeError: If `dim` is not an int.
|
|
2031
|
+
ValueError: If `dim` is not in the range of :math:`[-ndim, ndim-1]`.
|
|
2032
|
+
|
|
2033
|
+
Supported Platforms:
|
|
2034
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2035
|
+
|
|
2036
|
+
Examples:
|
|
2037
|
+
>>> import numpy as np
|
|
2038
|
+
>>> from mindspore import Tensor, mint
|
|
2039
|
+
>>> from mindspore import dtype as mstype
|
|
2040
|
+
>>> x = Tensor(np.array([1, 1, 2, 2, 3, 1, 1, 2]), mstype.int64)
|
|
2041
|
+
>>> output, inverse_indices, counts = mint.unique_consecutive(x, True, True, None)
|
|
2042
|
+
>>> print(output)
|
|
2043
|
+
[1 2 3 1 2]
|
|
2044
|
+
>>> print(inverse_indices)
|
|
2045
|
+
[0 0 1 1 2 3 3 4]
|
|
2046
|
+
>>> print(counts)
|
|
2047
|
+
[2 2 1 2 1]
|
|
2048
|
+
"""
|
|
2049
|
+
|
|
2050
|
+
return ops.function.array_func.unique_consecutive(input, return_inverse, return_counts, dim)
|
|
2051
|
+
|
|
2052
|
+
|
|
1567
2053
|
def zeros(size, *, dtype=None):
|
|
1568
2054
|
"""
|
|
1569
2055
|
Creates a tensor filled with 0 with shape described by `size` and fills it with value 0 in type of `dtype`.
|
|
@@ -1581,7 +2067,7 @@ def zeros(size, *, dtype=None):
|
|
|
1581
2067
|
Tensor, whose dtype and size are defined by input.
|
|
1582
2068
|
|
|
1583
2069
|
Raises:
|
|
1584
|
-
TypeError: If `size` is neither an int nor
|
|
2070
|
+
TypeError: If `size` is neither an int nor a tuple/list/Tensor of int.
|
|
1585
2071
|
|
|
1586
2072
|
Supported Platforms:
|
|
1587
2073
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1638,9 +2124,9 @@ def scatter(input, dim, index, src):
|
|
|
1638
2124
|
|
|
1639
2125
|
Raises:
|
|
1640
2126
|
TypeError: If `index` is neither int32 nor int64.
|
|
1641
|
-
ValueError: If rank of any of `input` , `index` and `src` less than 1.
|
|
2127
|
+
ValueError: If rank of any of `input` , `index` and `src` is less than 1.
|
|
1642
2128
|
ValueError: If the rank of `src` is not equal to the rank of `input` .
|
|
1643
|
-
TypeError: If the data
|
|
2129
|
+
TypeError: If the data types of `input` and `src` have different dtypes.
|
|
1644
2130
|
RuntimeError: If `index` has negative elements.
|
|
1645
2131
|
|
|
1646
2132
|
Supported Platforms:
|
|
@@ -1680,15 +2166,71 @@ def scatter(input, dim, index, src):
|
|
|
1680
2166
|
return ops.function.array_func.scatter(input, dim, index, src)
|
|
1681
2167
|
|
|
1682
2168
|
|
|
2169
|
+
def cdist(x1, x2, p=2.0, compute_mode='use_mm_for_euclid_dist_if_necessary'):
|
|
2170
|
+
"""
|
|
2171
|
+
Computes p-norm distance between each pair of row vectors of two input Tensors.
|
|
2172
|
+
|
|
2173
|
+
.. warning::
|
|
2174
|
+
This is an experimental optimizer API that is subject to change.
|
|
2175
|
+
|
|
2176
|
+
Note:
|
|
2177
|
+
On Ascend, the supported dtypes are float16 and float32.
|
|
2178
|
+
On CPU, the supported dtypes are float16 and float32.
|
|
2179
|
+
On GPU, the supported dtypes are float32 and float64.
|
|
2180
|
+
|
|
2181
|
+
Args:
|
|
2182
|
+
x1 (Tensor): Input tensor of shape :math:`(B, P, M)`.
|
|
2183
|
+
Letter :math:`B` represents 0 or positive int number.
|
|
2184
|
+
When :math:`B` is equal to 0, it means this dimension can be ignored,
|
|
2185
|
+
i.e. shape of the tensor is :math:`(P, M)`.
|
|
2186
|
+
x2 (Tensor): Input tensor of shape :math:`(B, R, M)`, has the same dtype as `x1`.
|
|
2187
|
+
p (float, optional): P value for the p-norm distance to calculate between each
|
|
2188
|
+
vector pair, P >= 0. Default: ``2.0`` .
|
|
2189
|
+
compute_mode (string, optional): Specify the cumpute mode. Setting this parameter currently has no effect.
|
|
2190
|
+
Default: ``'use_mm_for_euclid_dist_if_necessary'`` .
|
|
2191
|
+
|
|
2192
|
+
Returns:
|
|
2193
|
+
Tensor, p-norm distance, has the same dtype as `x1`, its shape is :math:`(B, P, R)`.
|
|
2194
|
+
|
|
2195
|
+
Raises:
|
|
2196
|
+
TypeError: If `x1` or `x2` is not Tensor.
|
|
2197
|
+
TypeError: If dtype of `x1` or `x2` is not listed in the "Note" above.
|
|
2198
|
+
TypeError: If `p` is not float32.
|
|
2199
|
+
ValueError: If `p` is negative.
|
|
2200
|
+
ValueError: If dimension of `x1` is not the same as `x2`.
|
|
2201
|
+
ValueError: If dimension of `x1` or `x2` is neither 2 nor 3.
|
|
2202
|
+
ValueError: If the batch dim of `x1` and `x2` can not broadcast.
|
|
2203
|
+
ValueError: If the number of columns of `x1` is not the same as that of `x2`.
|
|
2204
|
+
|
|
2205
|
+
Supported Platforms:
|
|
2206
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2207
|
+
|
|
2208
|
+
Examples:
|
|
2209
|
+
>>> import numpy as np
|
|
2210
|
+
>>> from mindspore import Tensor, ops
|
|
2211
|
+
>>> x = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
|
|
2212
|
+
>>> y = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
|
|
2213
|
+
>>> output = ops.cdist(x, y, 2.0)
|
|
2214
|
+
>>> print(output)
|
|
2215
|
+
[[[2.8284273 2.8284273]
|
|
2216
|
+
[1.4142137 1.4142137]]]
|
|
2217
|
+
"""
|
|
2218
|
+
return cdist_(x1, x2, p)
|
|
2219
|
+
|
|
2220
|
+
|
|
1683
2221
|
__all__ = [
|
|
1684
2222
|
'conv2d',
|
|
1685
2223
|
'full',
|
|
1686
2224
|
'ones_like',
|
|
1687
2225
|
'zeros_like',
|
|
1688
2226
|
'abs',
|
|
2227
|
+
'clone',
|
|
1689
2228
|
'erf',
|
|
1690
2229
|
'where',
|
|
1691
2230
|
'isclose',
|
|
2231
|
+
'empty',
|
|
2232
|
+
'empty_like',
|
|
2233
|
+
'full_like',
|
|
1692
2234
|
# 1
|
|
1693
2235
|
'div',
|
|
1694
2236
|
'divide',
|
|
@@ -1699,6 +2241,7 @@ __all__ = [
|
|
|
1699
2241
|
# 3
|
|
1700
2242
|
'clamp',
|
|
1701
2243
|
'xlogy',
|
|
2244
|
+
'fmod',
|
|
1702
2245
|
# 4
|
|
1703
2246
|
'sinc',
|
|
1704
2247
|
'sinh',
|
|
@@ -1712,10 +2255,14 @@ __all__ = [
|
|
|
1712
2255
|
# 8
|
|
1713
2256
|
'transpose',
|
|
1714
2257
|
'swapaxes',
|
|
2258
|
+
"batch_norm_elemt",
|
|
2259
|
+
"batch_norm_gather_stats_with_counts",
|
|
2260
|
+
"batch_norm_stats",
|
|
1715
2261
|
# 9
|
|
1716
|
-
|
|
2262
|
+
'squeeze',
|
|
1717
2263
|
# 10
|
|
1718
2264
|
'ne',
|
|
2265
|
+
'not_equal',
|
|
1719
2266
|
# 11
|
|
1720
2267
|
'unsqueeze',
|
|
1721
2268
|
# 12
|
|
@@ -1723,7 +2270,7 @@ __all__ = [
|
|
|
1723
2270
|
# 13
|
|
1724
2271
|
"flip",
|
|
1725
2272
|
# 14
|
|
1726
|
-
|
|
2273
|
+
'mv',
|
|
1727
2274
|
# 15
|
|
1728
2275
|
'flatten',
|
|
1729
2276
|
# 16
|
|
@@ -1740,11 +2287,11 @@ __all__ = [
|
|
|
1740
2287
|
# 21
|
|
1741
2288
|
'mul',
|
|
1742
2289
|
# 22
|
|
1743
|
-
|
|
2290
|
+
'cumprod',
|
|
1744
2291
|
# 23
|
|
1745
|
-
|
|
2292
|
+
'exp2',
|
|
1746
2293
|
# 24
|
|
1747
|
-
|
|
2294
|
+
'cdist',
|
|
1748
2295
|
# 25
|
|
1749
2296
|
'greater',
|
|
1750
2297
|
'gt',
|
|
@@ -1828,7 +2375,7 @@ __all__ = [
|
|
|
1828
2375
|
# 63
|
|
1829
2376
|
'minimum',
|
|
1830
2377
|
# 64
|
|
1831
|
-
|
|
2378
|
+
'ravel',
|
|
1832
2379
|
# 65
|
|
1833
2380
|
'logical_and',
|
|
1834
2381
|
# 66
|
|
@@ -1872,7 +2419,6 @@ __all__ = [
|
|
|
1872
2419
|
# 83
|
|
1873
2420
|
'narrow',
|
|
1874
2421
|
# 84
|
|
1875
|
-
|
|
1876
2422
|
'masked_select',
|
|
1877
2423
|
|
|
1878
2424
|
# 86
|
|
@@ -1883,13 +2429,13 @@ __all__ = [
|
|
|
1883
2429
|
# 88
|
|
1884
2430
|
'chunk',
|
|
1885
2431
|
# 89
|
|
1886
|
-
|
|
2432
|
+
'argsort',
|
|
1887
2433
|
# 90
|
|
1888
|
-
|
|
2434
|
+
'isinf',
|
|
1889
2435
|
# 91
|
|
1890
2436
|
|
|
1891
2437
|
# 92
|
|
1892
|
-
|
|
2438
|
+
'polar',
|
|
1893
2439
|
# 93
|
|
1894
2440
|
|
|
1895
2441
|
# 94
|
|
@@ -1925,7 +2471,7 @@ __all__ = [
|
|
|
1925
2471
|
# 109
|
|
1926
2472
|
'argmin',
|
|
1927
2473
|
# 110
|
|
1928
|
-
|
|
2474
|
+
'softmax',
|
|
1929
2475
|
# 111
|
|
1930
2476
|
|
|
1931
2477
|
# 112
|
|
@@ -1945,11 +2491,14 @@ __all__ = [
|
|
|
1945
2491
|
# 119
|
|
1946
2492
|
|
|
1947
2493
|
# 120
|
|
1948
|
-
|
|
2494
|
+
'isneginf',
|
|
1949
2495
|
# 121
|
|
1950
2496
|
|
|
1951
2497
|
# 122
|
|
1952
2498
|
|
|
2499
|
+
# 123
|
|
2500
|
+
'var',
|
|
2501
|
+
|
|
1953
2502
|
# 151
|
|
1954
2503
|
'acos',
|
|
1955
2504
|
'arccos',
|
|
@@ -1994,7 +2543,8 @@ __all__ = [
|
|
|
1994
2543
|
'erfc',
|
|
1995
2544
|
# 208
|
|
1996
2545
|
'eye',
|
|
1997
|
-
|
|
2546
|
+
# 239
|
|
2547
|
+
'lerp',
|
|
1998
2548
|
# 256
|
|
1999
2549
|
'median',
|
|
2000
2550
|
'randperm',
|
|
@@ -2022,6 +2572,7 @@ __all__ = [
|
|
|
2022
2572
|
'tan',
|
|
2023
2573
|
# 303
|
|
2024
2574
|
'trace',
|
|
2575
|
+
'gcd',
|
|
2025
2576
|
'reshape',
|
|
2026
2577
|
'outer',
|
|
2027
2578
|
# 304
|
|
@@ -2030,41 +2581,96 @@ __all__ = [
|
|
|
2030
2581
|
# 305
|
|
2031
2582
|
'triu',
|
|
2032
2583
|
|
|
2584
|
+
# 308
|
|
2585
|
+
'mm',
|
|
2586
|
+
|
|
2587
|
+
# 382
|
|
2588
|
+
'dstack',
|
|
2589
|
+
|
|
2590
|
+
# 406
|
|
2591
|
+
'allclose',
|
|
2592
|
+
|
|
2593
|
+
# 501
|
|
2594
|
+
'addbmm',
|
|
2595
|
+
|
|
2596
|
+
# 502
|
|
2597
|
+
'addmm',
|
|
2598
|
+
|
|
2599
|
+
# 505
|
|
2600
|
+
'addmv',
|
|
2601
|
+
|
|
2602
|
+
# 510
|
|
2603
|
+
'amax',
|
|
2604
|
+
|
|
2605
|
+
# 511
|
|
2606
|
+
'amin',
|
|
2607
|
+
|
|
2608
|
+
# 520
|
|
2609
|
+
'bincount',
|
|
2610
|
+
|
|
2611
|
+
# 521
|
|
2612
|
+
'bitwise_not',
|
|
2613
|
+
|
|
2614
|
+
# 526
|
|
2615
|
+
'dot',
|
|
2616
|
+
|
|
2617
|
+
# 533
|
|
2618
|
+
'frac',
|
|
2619
|
+
|
|
2033
2620
|
# 538
|
|
2034
2621
|
'histc',
|
|
2035
2622
|
|
|
2623
|
+
# 552
|
|
2624
|
+
'log10',
|
|
2625
|
+
|
|
2036
2626
|
# 553
|
|
2037
2627
|
'logaddexp',
|
|
2038
2628
|
|
|
2629
|
+
# 557
|
|
2630
|
+
'logsumexp',
|
|
2631
|
+
|
|
2632
|
+
# 582
|
|
2633
|
+
'std_mean',
|
|
2634
|
+
|
|
2635
|
+
# 588
|
|
2636
|
+
'var_mean',
|
|
2637
|
+
|
|
2638
|
+
# 586
|
|
2639
|
+
'unique_consecutive',
|
|
2640
|
+
|
|
2039
2641
|
# 610
|
|
2040
2642
|
'nan_to_num',
|
|
2041
2643
|
|
|
2644
|
+
# 613
|
|
2645
|
+
'nansum',
|
|
2646
|
+
|
|
2647
|
+
# 664
|
|
2648
|
+
'meshgrid',
|
|
2649
|
+
|
|
2042
2650
|
# 695
|
|
2043
2651
|
'count_nonzero',
|
|
2044
|
-
]
|
|
2045
2652
|
|
|
2046
|
-
|
|
2047
|
-
|
|
2048
|
-
|
|
2049
|
-
|
|
2050
|
-
|
|
2051
|
-
|
|
2052
|
-
|
|
2053
|
-
|
|
2054
|
-
|
|
2055
|
-
|
|
2056
|
-
|
|
2057
|
-
|
|
2058
|
-
|
|
2059
|
-
|
|
2060
|
-
|
|
2061
|
-
|
|
2062
|
-
|
|
2063
|
-
|
|
2064
|
-
|
|
2065
|
-
|
|
2066
|
-
|
|
2067
|
-
setattr(tensor_operator_registry_for_mint, 'sum', sum)
|
|
2653
|
+
# 697
|
|
2654
|
+
'float_power',
|
|
2655
|
+
|
|
2656
|
+
# 708
|
|
2657
|
+
'std',
|
|
2658
|
+
|
|
2659
|
+
# 887
|
|
2660
|
+
'log2',
|
|
2661
|
+
|
|
2662
|
+
# 889
|
|
2663
|
+
'isnan',
|
|
2664
|
+
|
|
2665
|
+
# 1007
|
|
2666
|
+
't',
|
|
2667
|
+
|
|
2668
|
+
# 1023
|
|
2669
|
+
'unbind',
|
|
2670
|
+
|
|
2671
|
+
# 1100
|
|
2672
|
+
'diff',
|
|
2673
|
+
]
|
|
2068
2674
|
|
|
2069
2675
|
__all__.extend(functional.__all__)
|
|
2070
2676
|
__all__.extend(nn.__all__)
|