mindspore 2.4.1__cp39-none-any.whl → 2.5.0__cp39-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +39 -0
- mindspore/__init__.py +8 -3
- mindspore/_akg/akg/composite/build_module.py +6 -2
- mindspore/_akg/akg/utils/kernel_exec.py +2 -2
- mindspore/_c_dataengine.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +0 -5
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/compile_config.py +64 -0
- mindspore/_extends/parse/deprecated/__init__.py +0 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +375 -0
- mindspore/_extends/parse/parser.py +23 -5
- mindspore/_extends/parse/standard_method.py +123 -27
- mindspore/_extends/pijit/pijit_func_white_list.py +1 -1
- mindspore/amp.py +7 -1
- mindspore/boost/boost_cell_wrapper.py +136 -41
- mindspore/common/__init__.py +3 -1
- mindspore/common/_register_for_tensor.py +0 -1
- mindspore/common/_stub_tensor.py +25 -4
- mindspore/common/_tensor_cpp_method.py +17 -0
- mindspore/common/_tensor_docs.py +6132 -0
- mindspore/common/api.py +99 -25
- mindspore/common/dtype.py +34 -34
- mindspore/common/dump.py +2 -1
- mindspore/common/file_system.py +8 -1
- mindspore/common/generator.py +2 -0
- mindspore/common/hook_handle.py +3 -1
- mindspore/common/initializer.py +3 -4
- mindspore/common/lazy_inline.py +8 -2
- mindspore/common/mindir_util.py +10 -2
- mindspore/common/parameter.py +30 -27
- mindspore/common/tensor.py +713 -1337
- mindspore/communication/__init__.py +1 -1
- mindspore/communication/_comm_helper.py +10 -0
- mindspore/communication/comm_func.py +215 -173
- mindspore/communication/management.py +23 -20
- mindspore/context.py +292 -193
- mindspore/dataset/__init__.py +23 -19
- mindspore/dataset/callback/ds_callback.py +2 -1
- mindspore/dataset/core/config.py +84 -3
- mindspore/dataset/engine/cache_admin.py +3 -3
- mindspore/dataset/engine/cache_client.py +5 -4
- mindspore/dataset/engine/datasets.py +192 -149
- mindspore/dataset/engine/datasets_audio.py +14 -0
- mindspore/dataset/engine/datasets_standard_format.py +28 -11
- mindspore/dataset/engine/datasets_text.py +38 -1
- mindspore/dataset/engine/datasets_user_defined.py +125 -65
- mindspore/dataset/engine/datasets_vision.py +81 -8
- mindspore/dataset/engine/iterators.py +281 -63
- mindspore/dataset/engine/obs/util.py +8 -0
- mindspore/dataset/engine/queue.py +40 -0
- mindspore/dataset/engine/samplers.py +26 -2
- mindspore/dataset/engine/serializer_deserializer.py +1 -1
- mindspore/dataset/engine/validators.py +43 -11
- mindspore/dataset/transforms/py_transforms_util.py +17 -0
- mindspore/dataset/transforms/transforms.py +29 -12
- mindspore/dataset/vision/validators.py +1 -2
- mindspore/device_context/__init__.py +21 -0
- mindspore/device_context/ascend/__init__.py +25 -0
- mindspore/device_context/ascend/device.py +72 -0
- mindspore/device_context/ascend/op_debug.py +94 -0
- mindspore/device_context/ascend/op_precision.py +193 -0
- mindspore/device_context/ascend/op_tuning.py +127 -0
- mindspore/device_context/cpu/__init__.py +25 -0
- mindspore/device_context/cpu/device.py +62 -0
- mindspore/device_context/cpu/op_tuning.py +43 -0
- mindspore/device_context/gpu/__init__.py +21 -0
- mindspore/device_context/gpu/device.py +70 -0
- mindspore/device_context/gpu/op_precision.py +67 -0
- mindspore/device_context/gpu/op_tuning.py +175 -0
- mindspore/device_manager.py +134 -0
- mindspore/experimental/llm_boost/__init__.py +3 -2
- mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +211 -0
- mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
- mindspore/experimental/llm_boost/atb/boost_base.py +239 -64
- mindspore/experimental/llm_boost/atb/llama_boost.py +52 -30
- mindspore/experimental/llm_boost/atb/qwen_boost.py +47 -24
- mindspore/experimental/llm_boost/register.py +1 -0
- mindspore/experimental/optim/adadelta.py +26 -22
- mindspore/experimental/optim/adam.py +3 -0
- mindspore/experimental/optim/lr_scheduler.py +33 -24
- mindspore/experimental/optim/radam.py +33 -30
- mindspore/hal/device.py +28 -0
- mindspore/hal/event.py +17 -0
- mindspore/hal/memory.py +94 -3
- mindspore/hal/stream.py +91 -6
- mindspore/include/api/context.h +1 -2
- mindspore/include/dataset/constants.h +2 -2
- mindspore/lib/libavcodec.so.59 +0 -0
- mindspore/lib/libavdevice.so.59 +0 -0
- mindspore/lib/libavfilter.so.8 +0 -0
- mindspore/lib/libavformat.so.59 +0 -0
- mindspore/lib/libavutil.so.57 +0 -0
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_ops.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/libswresample.so.4 +0 -0
- mindspore/lib/libswscale.so.6 +0 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910_93/aic-ascend910_93-ops-info.json +2048 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_proto/libop_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/framework/npu_supported_ops.json +10 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +182 -0
- mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl → custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl}/dynamic/decoder_kv_cache.py +51 -16
- mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl → custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl}/dynamic/prompt_kv_cache.py +51 -16
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.json +158 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.json +158 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.json +158 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.json +158 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.json +158 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.json +158 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.json +158 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.json +158 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.json +167 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.json +167 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.json +167 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.json +167 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.json +167 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.json +167 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.json +167 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.json +167 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/config/ascend910/binary_info_config.json +302 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/config/ascend910/decoder_kv_cache.json +892 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/config/ascend910/prompt_kv_cache.json +892 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_proto/inc/op_proto.h +33 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910/version.info +1 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/framework/npu_supported_ops.json +14 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_api/include/aclnn_decoder_kv_cache.h +59 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_api/include/aclnn_prompt_kv_cache.h +59 -0
- mindspore/lib/plugin/ascend/{custom_ascendc_ops → custom_ascendc_910b}/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl → custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl}/dynamic/all_finite.py +51 -16
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/decoder_kv_cache.cpp +192 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/decoder_kv_cache.py +215 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/prompt_kv_cache.cpp +274 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/prompt_kv_cache.py +215 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.json +80 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.json +80 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.json +80 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.json +158 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.json +158 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.json +158 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.json +158 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.json +158 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.json +158 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.json +158 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.json +158 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.json +167 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.json +167 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.json +167 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.json +167 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.json +167 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.json +167 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.json +167 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.json +167 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.json +78 -0
- mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.o → custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.o} +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.json +78 -0
- mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.o → custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.o} +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.json +78 -0
- mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.o → custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.o} +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910_93/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.json +78 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.json +78 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.json +78 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.json +156 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.json +165 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend310p/all_finite.json +139 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend310p/binary_info_config.json +361 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend310p/decoder_kv_cache.json +892 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend310p/prompt_kv_cache.json +892 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/all_finite.json +139 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/binary_info_config.json +361 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/decoder_kv_cache.json +892 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910_93/prompt_kv_cache.json +892 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910b/all_finite.json +139 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910b/binary_info_config.json +361 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910b/decoder_kv_cache.json +892 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910b/prompt_kv_cache.json +892 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_910b/version.info +1 -0
- mindspore/lib/plugin/ascend/custom_compiler/setup.py +1 -1
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_internal_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libms_ascend_native_boost.so +0 -0
- mindspore/lib/plugin/ascend/libms_atb_boost.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +957 -955
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/liblcal_static.a +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{acme/include/base_type.h → base_type.h} +25 -20
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{cast/cast_tiling.h → internal.h} +6 -4
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_op.h +114 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/boost_kernel.h +70 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/llama_impl.h +85 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/model_interface.h +52 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/llm/tensor.h +81 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_creator.h +123 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +155 -110
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{acme/include/tiling_info.h → tiling_info.h} +12 -9
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tiling_utils.h +178 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layer_norm_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_quant_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_310p_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcompare_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libllama_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_optiling.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmulti_weight_matmul_kernel_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_nz_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_op.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_f16_nz/internal_pp_matmul_f16_nz.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_f16_nz/internal_pp_matmul_f16_nz_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_i8_nz_compress/internal_pp_matmul_i8_nz_compress.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_i8_nz_compress/internal_pp_matmul_i8_nz_compress_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_int8_nz/internal_pp_matmul_int8_nz.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/object_kernels/internal_pp_matmul_int8_nz/internal_pp_matmul_int8_nz_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libadd_rms_norm_quant_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libapply_rotary_pos_emb_310p_impl.so → op_kernels/ascend310p/so_kernels/libapply_rotary_pos_emb_310p_ascend310p.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libcast_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libcompare_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libgelu_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libmatmul_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend310p/so_kernels/libreshape_and_cache_nz_ascend310p.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_4b60f88cdc28b25a36bad2d8b0a88092.json +163 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_4b60f88cdc28b25a36bad2d8b0a88092.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_cde61da2bd6fededcb1ba310a6ad16ee.json +163 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/hphol_kernels/add_rms_norm_dynamic_quant/AddRmsNormDynamicQuant_cde61da2bd6fededcb1ba310a6ad16ee.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/flash_attention_score/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_matmul_postfusion_mix/internal_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_matmul_postfusion_mix/internal_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_matmul_postfusion_mix/internal_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_multi_weight_matmul_postfusion_mix/internal_multi_weight_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_multi_weight_matmul_postfusion_mix/internal_multi_weight_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/internal_multi_weight_matmul_postfusion_mix/internal_multi_weight_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_bf16_bf16.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_bf16_fp16.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_bf16_fp32.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_fp16_bf16.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_fp16_fp16.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/{matmul_add_rmsnorm → object_kernels/matmul_add_rmsnorm}/matmul_add_rmsnorm_fp16_fp32.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/paged_attention_v2/paged_attention_v2.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/paged_attention_v2/paged_attention_v2_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/object_kernels/paged_attention_v2/paged_attention_v2_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libadd_layer_norm_impl.so → op_kernels/ascend910b/so_kernels/libadd_layer_norm_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libadd_rms_norm_impl.so → op_kernels/ascend910b/so_kernels/libadd_rms_norm_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/so_kernels/libadd_rms_norm_quant_ascend910b.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libapply_rotary_pos_emb_impl.so → op_kernels/ascend910b/so_kernels/libapply_rotary_pos_emb_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libcast_impl.so → op_kernels/ascend910b/so_kernels/libcast_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libnot_equal_impl.so → op_kernels/ascend910b/so_kernels/libcompare_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libgelu_impl.so → op_kernels/ascend910b/so_kernels/libgelu_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/so_kernels/libllama_ascend910b.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libmatmul_impl.so → op_kernels/ascend910b/so_kernels/libmatmul_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libmulti_weight_matmul_kernel_impl.so → op_kernels/ascend910b/so_kernels/libmulti_weight_matmul_kernel_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/libreshape_and_cache_impl.so → op_kernels/ascend910b/so_kernels/libreshape_and_cache_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/{lib/librms_norm_impl.so → op_kernels/ascend910b/so_kernels/librms_norm_ascend910b.so} +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/log.py +12 -0
- mindspore/mindrecord/__init__.py +1 -1
- mindspore/mindrecord/config.py +17 -316
- mindspore/mindrecord/filereader.py +1 -9
- mindspore/mindrecord/filewriter.py +5 -15
- mindspore/mindrecord/mindpage.py +1 -9
- mindspore/mint/__init__.py +824 -218
- mindspore/mint/distributed/__init__.py +66 -4
- mindspore/mint/distributed/distributed.py +2594 -44
- mindspore/mint/linalg/__init__.py +6 -0
- mindspore/mint/nn/__init__.py +473 -14
- mindspore/mint/nn/functional.py +486 -11
- mindspore/mint/nn/layer/__init__.py +17 -4
- mindspore/mint/nn/layer/_functions.py +330 -0
- mindspore/mint/nn/layer/activation.py +169 -1
- mindspore/mint/nn/layer/basic.py +123 -0
- mindspore/mint/nn/layer/conv.py +727 -0
- mindspore/mint/nn/layer/normalization.py +215 -19
- mindspore/mint/nn/layer/padding.py +797 -0
- mindspore/mint/nn/layer/pooling.py +170 -0
- mindspore/mint/optim/__init__.py +2 -1
- mindspore/mint/optim/adam.py +223 -0
- mindspore/mint/optim/adamw.py +26 -19
- mindspore/mint/special/__init__.py +2 -1
- mindspore/multiprocessing/__init__.py +5 -0
- mindspore/nn/__init__.py +2 -0
- mindspore/nn/cell.py +142 -21
- mindspore/nn/dynamic_lr.py +2 -1
- mindspore/nn/layer/activation.py +6 -6
- mindspore/nn/layer/basic.py +35 -25
- mindspore/nn/layer/channel_shuffle.py +3 -3
- mindspore/nn/layer/conv.py +3 -0
- mindspore/nn/layer/embedding.py +3 -3
- mindspore/nn/layer/normalization.py +8 -7
- mindspore/nn/layer/padding.py +4 -3
- mindspore/nn/layer/pooling.py +55 -23
- mindspore/nn/layer/rnn_cells.py +1 -1
- mindspore/nn/layer/rnns.py +2 -1
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +48 -26
- mindspore/nn/learning_rate_schedule.py +5 -3
- mindspore/nn/loss/loss.py +31 -36
- mindspore/nn/optim/ada_grad.py +1 -0
- mindspore/nn/optim/adadelta.py +2 -2
- mindspore/nn/optim/adam.py +1 -1
- mindspore/nn/optim/lars.py +1 -4
- mindspore/nn/optim/optimizer.py +1 -1
- mindspore/nn/optim/rprop.py +2 -2
- mindspore/nn/optim/thor.py +2 -1
- mindspore/nn/utils/__init__.py +22 -0
- mindspore/nn/utils/init.py +73 -0
- mindspore/nn/wrap/cell_wrapper.py +4 -6
- mindspore/nn/wrap/loss_scale.py +3 -4
- mindspore/numpy/array_creations.py +60 -62
- mindspore/numpy/array_ops.py +148 -143
- mindspore/numpy/logic_ops.py +41 -42
- mindspore/numpy/math_ops.py +361 -359
- mindspore/numpy/utils.py +16 -16
- mindspore/numpy/utils_const.py +4 -4
- mindspore/ops/__init__.py +2 -1
- mindspore/ops/_grad_experimental/grad_comm_ops.py +107 -8
- mindspore/ops/_grad_experimental/grad_debug_ops.py +6 -1
- mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
- mindspore/ops/_op_impl/cpu/__init__.py +1 -0
- mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
- mindspore/ops/_vmap/vmap_array_ops.py +20 -19
- mindspore/ops/_vmap/vmap_base.py +0 -2
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +19 -13
- mindspore/ops/_vmap/vmap_math_ops.py +11 -9
- mindspore/ops/_vmap/vmap_nn_ops.py +20 -34
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +149 -12
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -61
- mindspore/ops/auto_generate/gen_extend_func.py +554 -60
- mindspore/ops/auto_generate/gen_ops_def.py +1621 -115
- mindspore/ops/auto_generate/gen_ops_prim.py +8027 -3411
- mindspore/ops/auto_generate/pyboost_inner_prim.py +183 -79
- mindspore/ops/composite/base.py +1 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +229 -30
- mindspore/ops/composite/multitype_ops/pow_impl.py +0 -29
- mindspore/ops/function/__init__.py +12 -0
- mindspore/ops/function/array_func.py +561 -159
- mindspore/ops/function/clip_func.py +64 -0
- mindspore/ops/function/debug_func.py +28 -20
- mindspore/ops/function/image_func.py +1 -1
- mindspore/ops/function/linalg_func.py +5 -4
- mindspore/ops/function/math_func.py +1664 -294
- mindspore/ops/function/nn_func.py +988 -317
- mindspore/ops/function/parameter_func.py +3 -56
- mindspore/ops/function/random_func.py +243 -33
- mindspore/ops/function/sparse_unary_func.py +1 -1
- mindspore/ops/functional.py +18 -5
- mindspore/ops/functional_overload.py +897 -0
- mindspore/ops/operations/__init__.py +3 -2
- mindspore/ops/operations/_embedding_cache_ops.py +4 -4
- mindspore/ops/operations/_grad_ops.py +2 -34
- mindspore/ops/operations/_infer_ops.py +2 -1
- mindspore/ops/operations/_inner_ops.py +38 -8
- mindspore/ops/operations/array_ops.py +45 -303
- mindspore/ops/operations/comm_ops.py +23 -17
- mindspore/ops/operations/custom_ops.py +7 -49
- mindspore/ops/operations/debug_ops.py +42 -47
- mindspore/ops/operations/inner_ops.py +6 -4
- mindspore/ops/operations/linalg_ops.py +3 -2
- mindspore/ops/operations/manually_defined/ops_def.py +185 -104
- mindspore/ops/operations/math_ops.py +11 -216
- mindspore/ops/operations/nn_ops.py +153 -310
- mindspore/ops/primitive.py +23 -21
- mindspore/ops/tensor_method.py +1669 -0
- mindspore/ops_generate/aclnn_kernel_register_auto_cc_generator.py +110 -0
- mindspore/ops_generate/add_tensor_docs_generator.py +54 -0
- mindspore/ops_generate/arg_handler.py +0 -61
- mindspore/ops_generate/auto_grad_impl_cc_generator.py +135 -0
- mindspore/ops_generate/auto_grad_reg_cc_generator.py +93 -0
- mindspore/ops_generate/base_generator.py +11 -0
- mindspore/ops_generate/cpp_create_prim_instance_helper_generator.py +108 -0
- mindspore/ops_generate/functional_map_cpp_generator.py +491 -0
- mindspore/ops_generate/functional_overload_py_generator.py +110 -0
- mindspore/ops_generate/functions_cc_generator.py +233 -0
- mindspore/ops_generate/gen_aclnn_implement.py +110 -114
- mindspore/ops_generate/gen_constants.py +157 -3
- mindspore/ops_generate/gen_ops.py +245 -990
- mindspore/ops_generate/gen_pyboost_func.py +97 -998
- mindspore/ops_generate/gen_utils.py +119 -33
- mindspore/ops_generate/lite_ops_cpp_generator.py +155 -0
- mindspore/ops_generate/op_api_proto.py +206 -0
- mindspore/ops_generate/op_def_py_generator.py +131 -0
- mindspore/ops_generate/op_prim_py_generator.py +480 -0
- mindspore/ops_generate/op_proto.py +373 -108
- mindspore/ops_generate/op_template_parser.py +436 -0
- mindspore/ops_generate/ops_def_cc_generator.py +288 -0
- mindspore/ops_generate/ops_def_h_generator.py +74 -0
- mindspore/ops_generate/ops_name_h_generator.py +68 -0
- mindspore/ops_generate/ops_primitive_h_generator.py +81 -0
- mindspore/ops_generate/pyboost_functions_cpp_generator.py +370 -0
- mindspore/ops_generate/pyboost_functions_h_generator.py +68 -0
- mindspore/ops_generate/pyboost_functions_py_generator.py +148 -0
- mindspore/ops_generate/pyboost_grad_function_cpp_generator.py +154 -0
- mindspore/ops_generate/pyboost_inner_prim_generator.py +131 -0
- mindspore/ops_generate/pyboost_native_grad_functions_generator.py +268 -0
- mindspore/ops_generate/pyboost_op_cpp_code_generator.py +851 -0
- mindspore/ops_generate/pyboost_overload_functions_cpp_generator.py +344 -0
- mindspore/ops_generate/pyboost_utils.py +92 -33
- mindspore/ops_generate/template.py +294 -44
- mindspore/ops_generate/tensor_func_reg_cpp_generator.py +422 -0
- mindspore/parallel/__init__.py +3 -3
- mindspore/parallel/_auto_parallel_context.py +44 -34
- mindspore/parallel/_cell_wrapper.py +22 -3
- mindspore/parallel/_parallel_serialization.py +13 -2
- mindspore/parallel/_utils.py +4 -2
- mindspore/parallel/algo_parameter_config.py +1 -1
- mindspore/parallel/checkpoint_transform.py +44 -0
- mindspore/parallel/cluster/process_entity/_api.py +131 -37
- mindspore/parallel/cluster/process_entity/_utils.py +41 -6
- mindspore/parallel/cluster/run.py +20 -3
- mindspore/parallel/parameter_broadcast.py +1 -1
- mindspore/parallel/shard.py +3 -0
- mindspore/parallel/transform_safetensors.py +119 -253
- mindspore/profiler/__init__.py +17 -4
- mindspore/profiler/analysis/__init__.py +0 -0
- mindspore/profiler/analysis/parser/__init__.py +0 -0
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +166 -0
- mindspore/profiler/analysis/parser/base_parser.py +158 -0
- mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
- mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
- mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +261 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +84 -0
- mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
- mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
- mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
- mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +260 -0
- mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
- mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
- mindspore/profiler/analysis/task_manager.py +131 -0
- mindspore/profiler/analysis/time_converter.py +84 -0
- mindspore/profiler/analysis/viewer/__init__.py +0 -0
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +333 -0
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +252 -0
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +313 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +322 -0
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +265 -0
- mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
- mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +97 -0
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
- mindspore/profiler/analysis/work_flow.py +73 -0
- mindspore/profiler/common/ascend_msprof_exporter.py +138 -0
- mindspore/profiler/common/command_executor.py +90 -0
- mindspore/profiler/common/constant.py +174 -3
- mindspore/profiler/common/file_manager.py +208 -0
- mindspore/profiler/common/log.py +130 -0
- mindspore/profiler/common/msprof_cmd_tool.py +202 -0
- mindspore/profiler/common/path_manager.py +371 -0
- mindspore/profiler/common/process_bar.py +168 -0
- mindspore/profiler/common/process_pool.py +9 -3
- mindspore/profiler/common/profiler_context.py +476 -0
- mindspore/profiler/common/profiler_info.py +304 -0
- mindspore/profiler/common/profiler_output_path.py +284 -0
- mindspore/profiler/common/profiler_parameters.py +210 -0
- mindspore/profiler/common/profiler_path_manager.py +120 -0
- mindspore/profiler/common/record_function.py +76 -0
- mindspore/profiler/common/tlv_decoder.py +76 -0
- mindspore/profiler/common/util.py +75 -2
- mindspore/profiler/dynamic_profiler.py +270 -37
- mindspore/profiler/envprofiler.py +138 -0
- mindspore/profiler/mstx.py +199 -0
- mindspore/profiler/platform/__init__.py +21 -0
- mindspore/profiler/platform/base_profiler.py +40 -0
- mindspore/profiler/platform/cpu_profiler.py +124 -0
- mindspore/profiler/platform/gpu_profiler.py +74 -0
- mindspore/profiler/platform/npu_profiler.py +309 -0
- mindspore/profiler/profiler.py +580 -93
- mindspore/profiler/profiler_action_controller.py +187 -0
- mindspore/profiler/profiler_interface.py +114 -0
- mindspore/profiler/schedule.py +208 -0
- mindspore/rewrite/api/symbol_tree.py +1 -2
- mindspore/run_check/_check_version.py +18 -13
- mindspore/runtime/__init__.py +37 -0
- mindspore/runtime/device.py +27 -0
- mindspore/runtime/event.py +209 -0
- mindspore/runtime/executor.py +148 -0
- mindspore/runtime/memory.py +392 -0
- mindspore/runtime/stream.py +460 -0
- mindspore/runtime/thread_bind_core.py +401 -0
- mindspore/train/__init__.py +2 -2
- mindspore/train/_utils.py +53 -18
- mindspore/train/amp.py +8 -4
- mindspore/train/callback/_checkpoint.py +32 -18
- mindspore/train/callback/_early_stop.py +1 -1
- mindspore/train/callback/_flops_collector.py +105 -69
- mindspore/train/callback/_history.py +1 -1
- mindspore/train/callback/_summary_collector.py +44 -6
- mindspore/train/callback/_tft_register.py +37 -15
- mindspore/train/dataset_helper.py +11 -11
- mindspore/train/metrics/precision.py +4 -5
- mindspore/train/mind_ir_pb2.py +167 -46
- mindspore/train/model.py +13 -14
- mindspore/train/serialization.py +461 -72
- mindspore/train/summary/summary_record.py +1 -2
- mindspore/train/train_thor/model_thor.py +1 -1
- mindspore/utils/__init__.py +4 -2
- mindspore/utils/bin/dataset-cache +0 -0
- mindspore/utils/bin/dataset-cache-server +0 -0
- mindspore/utils/dryrun.py +138 -0
- mindspore/utils/runtime_execution_order_check.py +550 -0
- mindspore/version.py +1 -1
- {mindspore-2.4.1.dist-info → mindspore-2.5.0.dist-info}/METADATA +3 -4
- {mindspore-2.4.1.dist-info → mindspore-2.5.0.dist-info}/RECORD +672 -479
- {mindspore-2.4.1.dist-info → mindspore-2.5.0.dist-info}/entry_points.txt +1 -1
- mindspore/_data_dump.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/common/_tensor_overload.py +0 -139
- mindspore/lib/libmindspore_np_dtype.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/all_finite.json +0 -109
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/binary_info_config.json +0 -38
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/version.info +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -82
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -113
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -193
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/dtype_registry.h +0 -90
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_layer_norm_op.h +0 -60
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_rms_norm_op.h +0 -50
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_rms_norm_quant_op.h +0 -50
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/apply_rotary_pos_emb_nz_op.h +0 -42
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/apply_rotary_pos_emb_op.h +0 -55
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -34
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_only_ops.h +0 -94
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_op_base.h +0 -97
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/flash_attention_score_op.h +0 -92
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/gelu_op.h +0 -44
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_add_rmsnorm_op.h +0 -73
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -108
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/multi_impls_op.h +0 -64
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/multi_weight_matmul_op.h +0 -91
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/paged_attention_op.h +0 -99
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/reshape_and_cache_nz_op.h +0 -44
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/reshape_and_cache_op.h +0 -44
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/rms_norm_op.h +0 -64
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -179
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/profiling_util.h +0 -366
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -56
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/kernel/add.h +0 -21
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/tiling/add_tiling.h +0 -43
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb.h +0 -23
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_base.h +0 -456
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_bf16.h +0 -217
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp.h +0 -391
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp16.h +0 -126
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -230
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_tiling.h +0 -43
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_value.h +0 -27
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/apply_rotary_pos_emb_nz_impl.h +0 -34
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz.h +0 -23
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_base.h +0 -460
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_fp16.h +0 -116
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_fp32.h +0 -230
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_tiling.h +0 -43
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb_nz/kernel/apply_rotary_pos_emb_nz_value.h +0 -27
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -74
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -74
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_impl.h +0 -48
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/kernel/cast_kernel.h +0 -21
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -55
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_tiling.h +0 -27
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/kernel/compare_kernel.h +0 -23
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +0 -68
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -99
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +0 -21
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/lccl/lccl_wrapper.h +0 -58
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/ms_int_types.h +0 -91
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/ms_int_utils.h +0 -108
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +0 -64
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/add_param.h +0 -68
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/cast_param.h +0 -30
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -38
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +0 -42
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +0 -33
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -377
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache_nz/kernel/reshape_and_cache_nz.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache_nz/reshape_and_cache_nz_impl.h +0 -42
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache_nz/reshape_and_cache_nz_tiling.h +0 -27
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/kernel/sub_kernel.h +0 -20
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -48
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_tiling.h +0 -25
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +0 -399
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/utils.h +0 -41
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_tiling.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +0 -30
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_core.h +0 -43
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_entity.h +0 -38
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_sink.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_stream.h +0 -41
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -71
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -165
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +0 -20
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -121
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -106
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_quant_acme_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_310p_old_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_old_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_nz_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_nz_old_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMatMulPostFusionMixTactic/acme_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/AcmeMultiWeightMatMulPostFusionMixTactic/acme_multi_weight_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_bf16_bnsd_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_bf16_bsh_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_fp16_bnsd_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_fp16_bsh_mix.o +0 -0
- mindspore/profiler/envprofiling.py +0 -254
- mindspore/profiler/profiling.py +0 -1926
- /mindspore/lib/plugin/ascend/{custom_ascendc_ops → custom_ascendc_910}/op_api/include/aclnn_decoder_kv_cache.h +0 -0
- /mindspore/lib/plugin/ascend/{custom_ascendc_ops → custom_ascendc_910}/op_api/include/aclnn_prompt_kv_cache.h +0 -0
- /mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl → custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl}/dynamic/decoder_kv_cache.cpp +0 -0
- /mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl → custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl}/dynamic/prompt_kv_cache.cpp +0 -0
- /mindspore/lib/plugin/ascend/{custom_ascendc_ops → custom_ascendc_910b}/op_api/include/aclnn_all_finite.h +0 -0
- /mindspore/lib/plugin/ascend/{custom_ascendc_ops → custom_ascendc_910b}/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +0 -0
- /mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json → custom_ascendc_910b/op_impl/ai_core/tbe/config/ascend910_93/aic-ascend910_93-ops-info.json} +0 -0
- /mindspore/lib/plugin/ascend/{custom_ascendc_ops → custom_ascendc_910b}/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +0 -0
- /mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl → custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl}/dynamic/all_finite.cpp +0 -0
- /mindspore/lib/plugin/ascend/{custom_ascendc_ops → custom_ascendc_910b}/op_proto/inc/op_proto.h +0 -0
- {mindspore-2.4.1.dist-info → mindspore-2.5.0.dist-info}/WHEEL +0 -0
- {mindspore-2.4.1.dist-info → mindspore-2.5.0.dist-info}/top_level.txt +0 -0
|
@@ -239,6 +239,13 @@ def adaptive_avg_pool2d_grad_ext(grad_output, x):
|
|
|
239
239
|
return adaptive_avg_pool2d_grad_ext_op(grad_output, x)
|
|
240
240
|
|
|
241
241
|
|
|
242
|
+
def adaptive_avg_pool3d_ext(input, output_size):
|
|
243
|
+
r"""
|
|
244
|
+
|
|
245
|
+
"""
|
|
246
|
+
return adaptive_avg_pool3d_ext_op(input, output_size)
|
|
247
|
+
|
|
248
|
+
|
|
242
249
|
def add_ext(input, other, alpha=1):
|
|
243
250
|
r"""
|
|
244
251
|
Adds scaled other value to input Tensor.
|
|
@@ -370,11 +377,60 @@ def add(input, other):
|
|
|
370
377
|
return add_op(input, other)
|
|
371
378
|
|
|
372
379
|
|
|
373
|
-
def
|
|
380
|
+
def add_rms_norm(x1, x2, gamma, epsilon=1e-6):
|
|
374
381
|
r"""
|
|
375
|
-
|
|
382
|
+
The AddRmsNorm is a fusion operator that fusing RmsNorm and its preceding Add operator, reducing the time for
|
|
383
|
+
moving data in and out.
|
|
384
|
+
It computes the following expression:
|
|
385
|
+
|
|
386
|
+
.. math::
|
|
387
|
+
\begin{array}{ll} \\
|
|
388
|
+
x_i = x1_i + x2_i \\
|
|
389
|
+
y_i=RmsNorm(x_i)=\frac{x_i}{\sqrt{\frac{1}{n}\sum_{i=1}^{n}{ x_i^2}+\varepsilon}}\gamma_i
|
|
390
|
+
\end{array}
|
|
391
|
+
|
|
392
|
+
.. warning::
|
|
393
|
+
This is an experimental API that is subject to change or deletion. This API is only supported in Atlas A2
|
|
394
|
+
training series for now.
|
|
395
|
+
|
|
396
|
+
Args:
|
|
397
|
+
x1 (Tensor): Input data of AddRmsNorm. Support data type: float16, float32, bfloat16.
|
|
398
|
+
x2 (Tensor): Input data of AddRmsNorm. Support data type: float16, float32, bfloat16.
|
|
399
|
+
gamma (Tensor): Learnable parameter :math:`\gamma` . Support data type: float16, float32, bfloat16.
|
|
400
|
+
epsilon (float, optional): A float number ranged in (0, 1] to prevent division by 0. Default value is `1e-6`.
|
|
401
|
+
|
|
402
|
+
Returns:
|
|
403
|
+
- Tensor, denotes the normalized result, has the same type and shape as `x1`.
|
|
404
|
+
- Tensor, with the float data type, denotes the reciprocal of the input standard deviation, used by gradient
|
|
405
|
+
calculation.
|
|
406
|
+
- Tensor, the sum of `x1` and `x2`.
|
|
407
|
+
|
|
408
|
+
Raises:
|
|
409
|
+
TypeError: If data type of `x1` or `x2` is not one of the following: float16, float32, bfloat16.
|
|
410
|
+
TypeError: If data type of `gamma` is not one of the following: float16, float32, bfloat16.
|
|
411
|
+
ValueError: If `epsilon` is not a float between 0 and 1.
|
|
412
|
+
ValueError: If the rank of `gamma` is greater than the rank of `x1` or `x2`.
|
|
413
|
+
RuntimeError: If the shapes of `x1` and `x2` are not same.
|
|
414
|
+
|
|
415
|
+
Supported Platforms:
|
|
416
|
+
``Ascend``
|
|
417
|
+
|
|
418
|
+
Examples:
|
|
419
|
+
>>> import mindspore
|
|
420
|
+
>>> import numpy as np
|
|
421
|
+
>>> from mindspore import Tensor, ops
|
|
422
|
+
>>> x1 = Tensor(np.array([[0.5, 1.0, 1.5], [0.5, 1.0, 1.5]]), mindspore.float32)
|
|
423
|
+
>>> x2 = Tensor(np.array([[0.5, 1.0, 1.5], [0.5, 1.0, 1.5]]), mindspore.float32)
|
|
424
|
+
>>> gamma = Tensor(np.ones([3]), mindspore.float32)
|
|
425
|
+
>>> y, rstd = ops.add_rms_norm(x1, x2, gamma)
|
|
426
|
+
>>> print(y)
|
|
427
|
+
[[0.46290997 0.92581993 1.3887299]
|
|
428
|
+
[0.46290997 0.92581993 1.3887299]]
|
|
429
|
+
>>> print(rstd)
|
|
430
|
+
[[0.46290997]
|
|
431
|
+
[0.46290997]]
|
|
376
432
|
"""
|
|
377
|
-
return
|
|
433
|
+
return add_rms_norm_op(x1, x2, gamma, epsilon)
|
|
378
434
|
|
|
379
435
|
|
|
380
436
|
def addn(x):
|
|
@@ -451,14 +507,38 @@ def apply_rotary_pos_emb_(query, key, cos, sin, position_ids, cos_format=0):
|
|
|
451
507
|
|
|
452
508
|
def argmax_ext(input, dim=None, keepdim=False):
|
|
453
509
|
r"""
|
|
510
|
+
argmax(input) -> Tensor
|
|
511
|
+
|
|
512
|
+
Return the indices of the maximum values of a tensor.
|
|
513
|
+
|
|
514
|
+
Args:
|
|
515
|
+
input (Tensor): Input tensor.
|
|
516
|
+
|
|
517
|
+
Returns:
|
|
518
|
+
Tensor.
|
|
519
|
+
|
|
520
|
+
Supported Platforms:
|
|
521
|
+
``Ascend``
|
|
522
|
+
|
|
523
|
+
Examples:
|
|
524
|
+
>>> import numpy as np
|
|
525
|
+
>>> from mindspore import Tensor
|
|
526
|
+
>>> from mindspore import ops
|
|
527
|
+
>>> x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
|
|
528
|
+
>>> output = ops.auto_generate.argmax_ext(x)
|
|
529
|
+
>>> print(output)
|
|
530
|
+
6
|
|
531
|
+
|
|
532
|
+
.. function:: argmax(input, dim, keepdim=False) -> Tensor
|
|
533
|
+
:noindex:
|
|
534
|
+
|
|
454
535
|
Return the indices of the maximum values of a tensor across a dimension.
|
|
455
536
|
|
|
456
537
|
Args:
|
|
457
538
|
input (Tensor): Input tensor.
|
|
458
|
-
dim (
|
|
459
|
-
value within the flattened input will be returned. Default: ``None`` .
|
|
539
|
+
dim (int): The dimension to reduce.
|
|
460
540
|
keepdim (bool, optional): Whether the output tensor retains the specified
|
|
461
|
-
dimension.
|
|
541
|
+
dimension. Default: ``False`` .
|
|
462
542
|
|
|
463
543
|
Returns:
|
|
464
544
|
Tensor, indices of the maximum values across a dimension.
|
|
@@ -515,6 +595,48 @@ def argmin_ext(input, dim=None, keepdim=False):
|
|
|
515
595
|
return argmin_ext_op(input, dim, keepdim)
|
|
516
596
|
|
|
517
597
|
|
|
598
|
+
def argsort_ext(input, dim=-1, descending=False):
|
|
599
|
+
r"""
|
|
600
|
+
Sorts the input tensor along the given dimension in specified order and return the sorted indices.
|
|
601
|
+
|
|
602
|
+
.. warning::
|
|
603
|
+
This is an experimental optimizer API that is subject to change.
|
|
604
|
+
|
|
605
|
+
Args:
|
|
606
|
+
input(Tensor): The input tensor to sort.
|
|
607
|
+
dim (int, optional): The dim to sort along. Default: ``-1`` , means the last dimension.
|
|
608
|
+
The Ascend backend only supports sorting the last dimension.
|
|
609
|
+
descending (bool, optional): The sort order. If `descending` is ``True`` then the elements
|
|
610
|
+
are sorted in descending order by value. Otherwise sort in ascending order. Default: ``False`` .
|
|
611
|
+
|
|
612
|
+
Returns:
|
|
613
|
+
Tensor, the indices of sorted input tensor. Data type is int64.
|
|
614
|
+
|
|
615
|
+
Supported Platforms:
|
|
616
|
+
``Ascend``
|
|
617
|
+
|
|
618
|
+
Examples:
|
|
619
|
+
>>> import mindspore
|
|
620
|
+
>>> import numpy as np
|
|
621
|
+
>>> from mindspore import Tensor
|
|
622
|
+
>>> import mindspore.mint as mint
|
|
623
|
+
>>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
|
|
624
|
+
>>> sort = mint.argsort(x)
|
|
625
|
+
>>> print(sort)
|
|
626
|
+
[[2 1 0]
|
|
627
|
+
[2 0 1]
|
|
628
|
+
[0 1 2]]
|
|
629
|
+
"""
|
|
630
|
+
return argsort_op(input, dim, descending)
|
|
631
|
+
|
|
632
|
+
|
|
633
|
+
def as_strided(input, size, stride, storage_offset=0):
|
|
634
|
+
r"""
|
|
635
|
+
|
|
636
|
+
"""
|
|
637
|
+
return as_strided_op(input, size, stride, storage_offset)
|
|
638
|
+
|
|
639
|
+
|
|
518
640
|
def asin_ext(input):
|
|
519
641
|
r"""
|
|
520
642
|
Computes arcsine of input tensors element-wise.
|
|
@@ -648,7 +770,7 @@ def asinh(input):
|
|
|
648
770
|
|
|
649
771
|
def assign_add(variable, value):
|
|
650
772
|
r"""
|
|
651
|
-
Updates a `Parameter` by adding a value to it.
|
|
773
|
+
Updates a `Parameter` or `Tensor` by adding a value to it.
|
|
652
774
|
|
|
653
775
|
Args of `variable` and `value` comply with the implicit type conversion rules to make the data types consistent.
|
|
654
776
|
If they have different data types, the lower priority data type will be converted to
|
|
@@ -657,15 +779,16 @@ def assign_add(variable, value):
|
|
|
657
779
|
and the data type is consistent with the Tensor data type involved in the operation.
|
|
658
780
|
|
|
659
781
|
Note:
|
|
660
|
-
Since `variable` is a data type Parameter, the data type cannot be changed,
|
|
782
|
+
Since `variable` is a data type Parameter or Tensor, the data type cannot be changed,
|
|
661
783
|
so only the type of `value` is allowed to be promoted to the type of `variable`.
|
|
662
784
|
And the conversion type supported by different devices will be different,
|
|
663
785
|
it is recommended to use the same data type when using this operator.
|
|
664
786
|
|
|
665
787
|
Args:
|
|
666
|
-
variable (Parameter): The `Parameter`.
|
|
788
|
+
variable (Union[Parameter, Tensor]): The `Parameter` or `Tensor`.
|
|
667
789
|
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
668
|
-
value (Tensor): The value to be added to the `variable`.
|
|
790
|
+
value (Union[Tensor, Number]): The value to be added to the `variable`.
|
|
791
|
+
If `value` is a number, the number is automatically converted to Tensor.
|
|
669
792
|
It must have the same shape as `variable`.
|
|
670
793
|
it is recommended to use the same data type when using this operator.
|
|
671
794
|
|
|
@@ -674,8 +797,8 @@ def assign_add(variable, value):
|
|
|
674
797
|
|
|
675
798
|
Raises:
|
|
676
799
|
TypeError: If `value` is neither Number nor Tensor.
|
|
677
|
-
RuntimeError: If the data type of `variable` and `value` conversion of Parameter
|
|
678
|
-
is required when data type conversion of Parameter is not supported.
|
|
800
|
+
RuntimeError: If the data type of `variable` and `value` conversion of Parameter or Tensor
|
|
801
|
+
is required when data type conversion of Parameter or Tensor is not supported.
|
|
679
802
|
|
|
680
803
|
Supported Platforms:
|
|
681
804
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -696,14 +819,14 @@ def assign_add(variable, value):
|
|
|
696
819
|
|
|
697
820
|
def assign(variable, value):
|
|
698
821
|
r"""
|
|
699
|
-
Assigns `Parameter` with a value.
|
|
822
|
+
Assigns `Parameter` or `Tensor` with a value.
|
|
700
823
|
|
|
701
824
|
Args of `variable` and `value` comply with the implicit type conversion rules to make the data types consistent.
|
|
702
825
|
If they have different data types, the lower priority data type will be converted to
|
|
703
826
|
the relatively highest priority data type.
|
|
704
827
|
|
|
705
828
|
Args:
|
|
706
|
-
variable (Parameter): The `Parameter`. :math:`(N,*)` where :math:`*` means,
|
|
829
|
+
variable (Union[Parameter, Tensor]): The `Parameter` or `Tensor`. :math:`(N,*)` where :math:`*` means,
|
|
707
830
|
any number of additional dimensions.
|
|
708
831
|
value (Tensor): The value to be assigned, has the same shape with `variable`.
|
|
709
832
|
|
|
@@ -711,9 +834,9 @@ def assign(variable, value):
|
|
|
711
834
|
Tensor, has the same data type and shape as original `variable`.
|
|
712
835
|
|
|
713
836
|
Raises:
|
|
714
|
-
TypeError: If `variable` is
|
|
837
|
+
TypeError: If `variable` is neither a Parameter nor a Tensor.
|
|
715
838
|
TypeError: If `value` is not a Tensor.
|
|
716
|
-
RuntimeError: If the data type of `variable` and `value` conversion of Parameter
|
|
839
|
+
RuntimeError: If the data type of `variable` and `value` conversion of Parameter or Tensor
|
|
717
840
|
is required when data type conversion of Parameter is not supported.
|
|
718
841
|
|
|
719
842
|
Supported Platforms:
|
|
@@ -731,6 +854,53 @@ def assign(variable, value):
|
|
|
731
854
|
return assign_op(variable, value)
|
|
732
855
|
|
|
733
856
|
|
|
857
|
+
def assign_sub(variable, value):
|
|
858
|
+
r"""
|
|
859
|
+
Updates a `Parameter` or `Tensor` by subtracting a value from it.
|
|
860
|
+
|
|
861
|
+
Args of `variable` and `value` comply with the implicit type conversion rules to make the data types consistent.
|
|
862
|
+
If they have different data types, the lower priority data type will be converted to
|
|
863
|
+
the relatively highest priority data type.
|
|
864
|
+
If `value` is a number, the number is automatically converted to Tensor,
|
|
865
|
+
and the data type is consistent with the Tensor data type involved in the operation.
|
|
866
|
+
|
|
867
|
+
Note:
|
|
868
|
+
Since `variable` is a data type Parameter or Tensor, the data type cannot be changed,
|
|
869
|
+
so only the type of `value` is allowed to be promoted to the type of `variable`.
|
|
870
|
+
And the conversion type supported by different devices will be different,
|
|
871
|
+
it is recommended to use the same data type when using this operator.
|
|
872
|
+
|
|
873
|
+
Args:
|
|
874
|
+
variable (Union[Parameter, Tensor]): The `Parameter` or `Tensor`.
|
|
875
|
+
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
876
|
+
value (Tensor): The value to be subtracted from the `variable`.
|
|
877
|
+
It must have the same shape as `variable`.
|
|
878
|
+
it is recommended to use the same data type when using this operator.
|
|
879
|
+
|
|
880
|
+
Returns:
|
|
881
|
+
Tensor, has the same data type and shape as `variable`.
|
|
882
|
+
|
|
883
|
+
Raises:
|
|
884
|
+
TypeError: If `value` is neither Number nor Tensor.
|
|
885
|
+
RuntimeError: If the type conversion between `variable` and `value` is not supported.
|
|
886
|
+
|
|
887
|
+
Supported Platforms:
|
|
888
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
889
|
+
|
|
890
|
+
Examples:
|
|
891
|
+
>>> import mindspore
|
|
892
|
+
>>> import numpy as np
|
|
893
|
+
>>> from mindspore import Tensor, ops
|
|
894
|
+
>>> from mindspore.common.initializer import initializer
|
|
895
|
+
>>> variable = mindspore.Parameter(initializer(1, [1], mindspore.int32), name="global_step")
|
|
896
|
+
>>> value = Tensor(np.ones([1]).astype(np.int32) * 100)
|
|
897
|
+
>>> ops.assign_sub(variable, value)
|
|
898
|
+
>>> print(variable.asnumpy())
|
|
899
|
+
[-99]
|
|
900
|
+
"""
|
|
901
|
+
return assign_sub_op(variable, value)
|
|
902
|
+
|
|
903
|
+
|
|
734
904
|
def atan2_ext(input, other):
|
|
735
905
|
r"""
|
|
736
906
|
Returns arctangent of input/other element-wise.
|
|
@@ -919,6 +1089,56 @@ def atanh(input):
|
|
|
919
1089
|
return atanh_op(input)
|
|
920
1090
|
|
|
921
1091
|
|
|
1092
|
+
def avg_pool1d_ext(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True):
|
|
1093
|
+
r"""
|
|
1094
|
+
Applies a 1D average pooling over an input Tensor which can be regarded as a composition of 1D input planes.
|
|
1095
|
+
|
|
1096
|
+
Typically the input is of shape :math:`(N_{in}, C_{in}, L_{in})`, avg_pool1d outputs regional average in the
|
|
1097
|
+
:math:`(L_{in})`-dimension. Given kernel size as :math:`ks = l_{ker}` and `stride` as :math:`s = s_0`, the
|
|
1098
|
+
operation is as follows.
|
|
1099
|
+
|
|
1100
|
+
.. math::
|
|
1101
|
+
\text{output}(N_i, C_j, l) = \frac{1}{l_{ker}} \sum_{n=0}^{l_{ker}-1}
|
|
1102
|
+
\text{input}(N_i, C_j, s_0 \times l + n)
|
|
1103
|
+
|
|
1104
|
+
.. warning::
|
|
1105
|
+
This is an experimental API that is subject to change or deletion.
|
|
1106
|
+
|
|
1107
|
+
Args:
|
|
1108
|
+
input (Tensor): Tensor of shape :math:`(N, C_{in}, L_{in})`.
|
|
1109
|
+
kernel_size (Union(int, tuple[int])): The size of kernel window used to take the average value.
|
|
1110
|
+
stride (Union(int, tuple[int]), optional): The distance of kernel moving. `stride` can either be an int
|
|
1111
|
+
number or a tuple of one int number. Default: ``None``, the same value as `kernel_size`.
|
|
1112
|
+
padding (Union(int, tuple[int]), optional): The pad length to be filled. `padding` can either be an integer
|
|
1113
|
+
or a tuple of one integer. Default: ``0`` .
|
|
1114
|
+
ceil_mode (bool, optional): If True, apply ceil instead of floor to compute the output shape. Default: ``False``.
|
|
1115
|
+
count_include_pad (bool, optional): If True, include the zero-padding in the averaging calculation. Default: ``True`` .
|
|
1116
|
+
|
|
1117
|
+
Returns:
|
|
1118
|
+
Tensor of shape :math:`(N, C_{in}, L_{out})`.
|
|
1119
|
+
|
|
1120
|
+
Raises:
|
|
1121
|
+
TypeError: If `input` is not a Tensor.
|
|
1122
|
+
TypeError: If `kernel_size` or `stride` is not an int.
|
|
1123
|
+
TypeError: If `ceil_mode` or `count_include_pad` is not a bool.
|
|
1124
|
+
ValueError: If `kernel_size` or `stride` is less than `1`.
|
|
1125
|
+
ValueError: If `kernel_size` or `stride` or `padding` is not int nor a tuple whose length is greater than `1`.
|
|
1126
|
+
|
|
1127
|
+
Supported Platforms:
|
|
1128
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1129
|
+
|
|
1130
|
+
Examples:
|
|
1131
|
+
>>> import mindspore
|
|
1132
|
+
>>> import numpy as np
|
|
1133
|
+
>>> from mindspore import Tensor, mint
|
|
1134
|
+
>>> input_x = Tensor(np.random.randint(0, 10, [1, 3, 6]), mindspore.float32)
|
|
1135
|
+
>>> output = mint.nn.functional.avg_pool1d(input_x, kernel_size=6, stride=1)
|
|
1136
|
+
>>> print(output.shape)
|
|
1137
|
+
(1, 3, 1)
|
|
1138
|
+
"""
|
|
1139
|
+
return avg_pool1d_op(input, kernel_size, stride, padding, ceil_mode, count_include_pad)
|
|
1140
|
+
|
|
1141
|
+
|
|
922
1142
|
def baddbmm(input, batch1, batch2, beta, alpha):
|
|
923
1143
|
r"""
|
|
924
1144
|
The result is the sum of the input and a batch matrix-matrix product of matrices in batch1 and batch2.
|
|
@@ -963,6 +1183,73 @@ def baddbmm(input, batch1, batch2, beta, alpha):
|
|
|
963
1183
|
return baddbmm_op(input, batch1, batch2, beta, alpha)
|
|
964
1184
|
|
|
965
1185
|
|
|
1186
|
+
def batch_norm_elemt(input, weight=None, bias=None, mean=None, invstd=None, eps=1e-5):
|
|
1187
|
+
r"""
|
|
1188
|
+
|
|
1189
|
+
"""
|
|
1190
|
+
return batch_norm_elemt_op(input, weight, bias, mean, invstd, eps)
|
|
1191
|
+
|
|
1192
|
+
|
|
1193
|
+
def batch_norm_gather_stats_with_counts(input, mean, invstd, running_mean=None, running_var=None, momentum=1e-1, eps=1e-5, counts=None):
|
|
1194
|
+
r"""
|
|
1195
|
+
|
|
1196
|
+
"""
|
|
1197
|
+
return batch_norm_gather_stats_with_counts_op(input, mean, invstd, running_mean, running_var, momentum, eps, counts)
|
|
1198
|
+
|
|
1199
|
+
|
|
1200
|
+
def batch_norm_stats(input, eps):
|
|
1201
|
+
r"""
|
|
1202
|
+
|
|
1203
|
+
"""
|
|
1204
|
+
return batch_norm_stats_op(input, eps)
|
|
1205
|
+
|
|
1206
|
+
|
|
1207
|
+
def bincount_ext(input, weights=None, minlength=0):
|
|
1208
|
+
r"""
|
|
1209
|
+
Count the occurrences of each value in the input.
|
|
1210
|
+
|
|
1211
|
+
If `minlength` is not specified, the length of the output Tensor is the maximum value in the input plus one.
|
|
1212
|
+
If `minlength` is specified, the length of the output Tensor is the maximum value between `minlength` or
|
|
1213
|
+
the maximum value in the input plus one.
|
|
1214
|
+
|
|
1215
|
+
Each value in the output Tensor represents the number of occurrences of that index value in the input.
|
|
1216
|
+
If `weights` is specified, the output results are weighted,
|
|
1217
|
+
i.e., :math:`out[n] += weight[i]` instead of :math:`out[n] += 1`.
|
|
1218
|
+
|
|
1219
|
+
.. warning::
|
|
1220
|
+
This is an experimental API that is subject to change or deletion.
|
|
1221
|
+
|
|
1222
|
+
Args:
|
|
1223
|
+
input (Tensor): A one-dimensional Tensor.
|
|
1224
|
+
weights (Tensor, optional): Weights with the same shape as the input. Default: ``None``.
|
|
1225
|
+
minlength (int, optional): The minimum length of output Tensor. Should be non-negative. Default: ``0``.
|
|
1226
|
+
|
|
1227
|
+
Returns:
|
|
1228
|
+
Tensor, If input is non-empty, the output shape is :math:`(max(max(input)+1, minlength), )`,
|
|
1229
|
+
otherwise the shape is :math:`(0, )`.
|
|
1230
|
+
|
|
1231
|
+
Raises:
|
|
1232
|
+
TypeError: If `input` or `weights` is not a Tensor.
|
|
1233
|
+
ValueError: If `input` contains negative values.
|
|
1234
|
+
ValueError: If `input` is not one-dimensional or `input` and `weights` do not have the same shape.
|
|
1235
|
+
|
|
1236
|
+
Supported Platforms:
|
|
1237
|
+
``Ascend``
|
|
1238
|
+
|
|
1239
|
+
Examples:
|
|
1240
|
+
>>> from mindspore import mint
|
|
1241
|
+
>>> print(mint.bincount(np.arange(5)))
|
|
1242
|
+
[1. 1. 1. 1. 1.]
|
|
1243
|
+
>>> print(mint.bincount(np.array([0, 1, 1, 3, 2, 1, 7])))
|
|
1244
|
+
[1. 3. 1. 1. 0. 0. 0. 1.]
|
|
1245
|
+
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
|
|
1246
|
+
>>> x = np.array([0, 1, 1, 2, 2, 2])
|
|
1247
|
+
>>> print(mint.bincount(x, weights=w, minlength=5))
|
|
1248
|
+
[0.3 0.7 1.1 0.0 0.0]
|
|
1249
|
+
"""
|
|
1250
|
+
return bincount_ext_op(input, weights, minlength)
|
|
1251
|
+
|
|
1252
|
+
|
|
966
1253
|
def bmm_ext(input, mat2):
|
|
967
1254
|
r"""
|
|
968
1255
|
Performs batch matrix-matrix multiplication of two three-dimensional tensors.
|
|
@@ -1120,6 +1407,54 @@ def cast(input, dtype):
|
|
|
1120
1407
|
return cast_op(input, dtype)
|
|
1121
1408
|
|
|
1122
1409
|
|
|
1410
|
+
def cdist(x1, x2, p=2.0):
|
|
1411
|
+
r"""
|
|
1412
|
+
Computes p-norm distance between each pair of row vectors of two input Tensors.
|
|
1413
|
+
|
|
1414
|
+
Note:
|
|
1415
|
+
On Ascend, the supported dtypes are float16 and float32.
|
|
1416
|
+
On CPU, the supported dtypes are float16 and float32.
|
|
1417
|
+
On GPU, the supported dtypes are float32 and float64.
|
|
1418
|
+
|
|
1419
|
+
Args:
|
|
1420
|
+
x1 (Tensor): Input tensor of shape :math:`(B, P, M)`.
|
|
1421
|
+
Letter :math:`B` represents 0 or positive int number.
|
|
1422
|
+
When :math:`B` is equal to 0, it means this dimension can be ignored,
|
|
1423
|
+
i.e. shape of the tensor is :math:`(P, M)`.
|
|
1424
|
+
x2 (Tensor): Input tensor of shape :math:`(B, R, M)`, has the same dtype as `x1`.
|
|
1425
|
+
p (float, optional): P value for the p-norm distance to calculate between each
|
|
1426
|
+
vector pair, P >= 0. Default: ``2.0`` .
|
|
1427
|
+
|
|
1428
|
+
Returns:
|
|
1429
|
+
Tensor, p-norm distance, has the same dtype as `x1`, its shape is :math:`(B, P, R)`.
|
|
1430
|
+
|
|
1431
|
+
Raises:
|
|
1432
|
+
TypeError: If `x1` or `x2` is not Tensor.
|
|
1433
|
+
TypeError: If dtype of `x1` or `x2` is not listed in the "Note" above.
|
|
1434
|
+
TypeError: If `p` is not float32.
|
|
1435
|
+
ValueError: If `p` is negative.
|
|
1436
|
+
ValueError: If dimension of `x1` is not the same as `x2`.
|
|
1437
|
+
ValueError: If dimension of `x1` or `x2` is neither 2 nor 3.
|
|
1438
|
+
ValueError: If the batch dim of `x1` and `x2` can not broadcast.
|
|
1439
|
+
ValueError: If the number of columns of `x1` is not the same as that of `x2`.
|
|
1440
|
+
|
|
1441
|
+
Supported Platforms:
|
|
1442
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1443
|
+
|
|
1444
|
+
Examples:
|
|
1445
|
+
>>> import numpy as np
|
|
1446
|
+
>>> from mindspore import Tensor, ops
|
|
1447
|
+
>>> x = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
|
|
1448
|
+
>>> y = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
|
|
1449
|
+
>>> output = ops.cdist(x, y, 2.0)
|
|
1450
|
+
>>> print(output)
|
|
1451
|
+
[[[2.8284273 2.8284273]
|
|
1452
|
+
[1.4142137 1.4142137]]]
|
|
1453
|
+
"""
|
|
1454
|
+
cdist_op = _get_cache_prim(Cdist)(p)
|
|
1455
|
+
return cdist_op(x1, x2)
|
|
1456
|
+
|
|
1457
|
+
|
|
1123
1458
|
def ceil(input):
|
|
1124
1459
|
r"""
|
|
1125
1460
|
Rounds a tensor up to the closest integer element-wise.
|
|
@@ -1322,6 +1657,42 @@ def clamp_tensor(input, min=None, max=None):
|
|
|
1322
1657
|
return clamp_tensor_op(input, min, max)
|
|
1323
1658
|
|
|
1324
1659
|
|
|
1660
|
+
def clone(input):
|
|
1661
|
+
r"""
|
|
1662
|
+
Returns a copy of the input tensor.
|
|
1663
|
+
|
|
1664
|
+
.. warning::
|
|
1665
|
+
This is an experimental API that is subject to change or deletion.
|
|
1666
|
+
|
|
1667
|
+
Note:
|
|
1668
|
+
This function is differentiable, and gradients will flow back directly from the calculation
|
|
1669
|
+
result of the function to the `input`.
|
|
1670
|
+
|
|
1671
|
+
Args:
|
|
1672
|
+
input (Tensor): A tensor to be copied.
|
|
1673
|
+
|
|
1674
|
+
Returns:
|
|
1675
|
+
Tensor, with the same data, shape and type as `input`.
|
|
1676
|
+
|
|
1677
|
+
Raises:
|
|
1678
|
+
TypeError: If `input` is not a Tensor.
|
|
1679
|
+
|
|
1680
|
+
Supported Platforms:
|
|
1681
|
+
``Ascend``
|
|
1682
|
+
|
|
1683
|
+
Examples:
|
|
1684
|
+
>>> import numpy as np
|
|
1685
|
+
>>> from mindspore import Tensor, ops
|
|
1686
|
+
>>> input = Tensor(np.ones((3,3)).astype("float32"))
|
|
1687
|
+
>>> output = ops.auto_generate.clone(input)
|
|
1688
|
+
>>> print(output)
|
|
1689
|
+
[[1. 1. 1.]
|
|
1690
|
+
[1. 1. 1.]
|
|
1691
|
+
[1. 1. 1.]]
|
|
1692
|
+
"""
|
|
1693
|
+
return clone_op(input)
|
|
1694
|
+
|
|
1695
|
+
|
|
1325
1696
|
def fold_ext(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
|
|
1326
1697
|
r"""
|
|
1327
1698
|
Combines an array of sliding local blocks into a large containing tensor.
|
|
@@ -1500,11 +1871,72 @@ def contiguous(input):
|
|
|
1500
1871
|
return contiguous_op(input)
|
|
1501
1872
|
|
|
1502
1873
|
|
|
1503
|
-
def
|
|
1874
|
+
def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
|
|
1875
|
+
r"""
|
|
1876
|
+
Applies a 2D transposed convolution operator over an input image composed of several input planes,
|
|
1877
|
+
sometimes also called deconvolution (although it is not an actual deconvolution).
|
|
1878
|
+
|
|
1879
|
+
Refer to :class:`mindspore.mint.nn.ConvTranspose2d` for more details.
|
|
1880
|
+
|
|
1881
|
+
.. warning::
|
|
1882
|
+
- This is an experimental API that is subject to change or deletion.
|
|
1883
|
+
- In the scenario where inputs are non-contiguous, `output_padding` must be less than `stride` .
|
|
1884
|
+
- For Atlas training products, when the dtype of input is float32, the `groups` only supports 1.
|
|
1885
|
+
|
|
1886
|
+
Args:
|
|
1887
|
+
input (Tensor): input tensor of shape :math:`(minibatch, in\_channels, iH, iW)` or :math:`(in\_channels, iH, iW)` .
|
|
1888
|
+
weight (Tensor): filters of shape :math:`(in\_channels, \frac{out\_channels}{\text{groups}}, kH, kW)` .
|
|
1889
|
+
bias (Tensor, optional): bias of shape :math:`(out\_channels)` . Default: ``None`` .
|
|
1890
|
+
stride (Union[int, tuple(int), list[int]], optional): the stride of the convolving kernel. Can be a single number or a
|
|
1891
|
+
tuple :math:`(sH, sW)` . Default: ``1`` .
|
|
1892
|
+
padding (Union[int, tuple(int), list[int]], optional): :math:`dilation * (kernel\_size - 1) - padding` zero-padding will
|
|
1893
|
+
be added to both sides of each dimension in the input. Can be a single number or a tuple :math:`(padH, padW)` .
|
|
1894
|
+
Default: ``0`` .
|
|
1895
|
+
output_padding (Union[int, tuple(int), list[int]], optional): additional size added to one side of each dimension in the
|
|
1896
|
+
output shape. Can be a single number or a tuple :math:`(out\_padH, out\_padW)` . The value of `output_padding` must
|
|
1897
|
+
be less than `stride` or `dilation` . Default: ``0`` .
|
|
1898
|
+
groups (int, optional): split input into groups, :math:`in\_channels` should be divisible by the
|
|
1899
|
+
number of groups. Default: ``1`` .
|
|
1900
|
+
dilation (Union[int, tuple(int), list[int]], optional): the spacing between kernel elements. Can be a single number or
|
|
1901
|
+
a tuple :math:`(dH, dW)` . Default: ``1`` .
|
|
1902
|
+
|
|
1903
|
+
Returns:
|
|
1904
|
+
Tensor of shape :math:`(minibatch, out\_channels, oH, oW)` or :math:`(out\_channels, oH, oW)` , where
|
|
1905
|
+
|
|
1906
|
+
.. math::
|
|
1907
|
+
oH = (iH - 1) \times sH - 2 \times padH + dH \times (kH - 1) + out\_padH + 1
|
|
1908
|
+
.. math::
|
|
1909
|
+
oW = (iW - 1) \times sW - 2 \times padW + dW \times (kW - 1) + out\_padW + 1
|
|
1910
|
+
|
|
1911
|
+
Raises:
|
|
1912
|
+
TypeError: If `stride`, `padding`, `output_padding` or `dilation` is neither an int nor a tuple or a list.
|
|
1913
|
+
TypeError: If `groups` is not an int.
|
|
1914
|
+
ValueError: If the shape of `bias` is not :math:`(out\_channels)` .
|
|
1915
|
+
ValueError: If `stride` or `dilation` is less than 1.
|
|
1916
|
+
ValueError: If `padding` or `output_padding` is less than 0.
|
|
1917
|
+
ValueError: If `stride`, `padding`, `output_padding` or `dilation` is a tuple whose length is not equal to 2.
|
|
1918
|
+
|
|
1919
|
+
Supported Platforms:
|
|
1920
|
+
``Ascend``
|
|
1921
|
+
|
|
1922
|
+
Examples:
|
|
1923
|
+
>>> import mindspore
|
|
1924
|
+
>>> import numpy as np
|
|
1925
|
+
>>> from mindspore import Tensor, ops
|
|
1926
|
+
>>> x = Tensor(np.ones([1, 4, 5, 5]), mindspore.float32)
|
|
1927
|
+
>>> weight = Tensor(np.ones([4, 8, 3, 3]), mindspore.float32)
|
|
1928
|
+
>>> output = ops.conv_transpose2d(x, weight)
|
|
1929
|
+
>>> print(output.shape)
|
|
1930
|
+
(1, 8, 7, 7)
|
|
1931
|
+
"""
|
|
1932
|
+
return conv_transpose2d_op(input, weight, bias, stride, padding, output_padding, groups, dilation)
|
|
1933
|
+
|
|
1934
|
+
|
|
1935
|
+
def convolution(input, weight, bias=None, stride=1, padding=0, dilation=1, transposed=False, output_padding=0, groups=1):
|
|
1504
1936
|
r"""
|
|
1505
1937
|
|
|
1506
1938
|
"""
|
|
1507
|
-
return
|
|
1939
|
+
return convolution_op(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups)
|
|
1508
1940
|
|
|
1509
1941
|
|
|
1510
1942
|
def copy(input):
|
|
@@ -1665,7 +2097,7 @@ def count_nonzero(input, dim=None):
|
|
|
1665
2097
|
Args:
|
|
1666
2098
|
input (Tensor): Input data is used to count non-zero numbers. With shape
|
|
1667
2099
|
:math:`(*)` where :math:`*` means, any number of additional dimensions.
|
|
1668
|
-
dim (Union[int, tuple(int), list(int)], optional): The dimension to reduce. Default value: ``None``, which indicates that the number of non-zero elements is calculated. If `dim` is ``None``, all elements in the tensor are summed up.
|
|
2100
|
+
dim (Union[None, int, tuple(int), list(int)], optional): The dimension to reduce. Default value: ``None``, which indicates that the number of non-zero elements is calculated. If `dim` is ``None``, all elements in the tensor are summed up.
|
|
1669
2101
|
|
|
1670
2102
|
Returns:
|
|
1671
2103
|
Tensor, number of nonzero element across dim specified by `dim`.
|
|
@@ -1764,8 +2196,11 @@ def cummin_ext(input, dim):
|
|
|
1764
2196
|
y_{i} = \min(x_{1}, x_{2}, ... , x_{i})
|
|
1765
2197
|
\end{array}
|
|
1766
2198
|
|
|
1767
|
-
|
|
1768
|
-
|
|
2199
|
+
.. note::
|
|
2200
|
+
O2 mode is not supported in Ascend.
|
|
2201
|
+
|
|
2202
|
+
Args:
|
|
2203
|
+
input (Tensor): The input Tensor, The dimension must be greater than 0.
|
|
1769
2204
|
dim (int): Operation dimension. The value of `dim` must be in the range `[-input.ndim, input.ndim - 1]`.
|
|
1770
2205
|
|
|
1771
2206
|
Returns:
|
|
@@ -1778,9 +2213,6 @@ def cummin_ext(input, dim):
|
|
|
1778
2213
|
TypeError: If `dim` is not an int.
|
|
1779
2214
|
ValueError: If `dim` is out the range of `[-input.ndim, input.ndim - 1]`.
|
|
1780
2215
|
|
|
1781
|
-
.. note::
|
|
1782
|
-
O2 mode is not supported in Ascend.
|
|
1783
|
-
|
|
1784
2216
|
Supported Platforms:
|
|
1785
2217
|
``Ascend``
|
|
1786
2218
|
|
|
@@ -1909,7 +2341,7 @@ def dense(input, weight, bias=None):
|
|
|
1909
2341
|
|
|
1910
2342
|
.. warning::
|
|
1911
2343
|
- This is an experimental API that is subject to change or deletion.
|
|
1912
|
-
- In
|
|
2344
|
+
- In PyNative mode, if `bias` is not 1D, the `input` cannot be greater than 6D.
|
|
1913
2345
|
|
|
1914
2346
|
Args:
|
|
1915
2347
|
input (Tensor): Input Tensor of shape :math:`(*, in\_channels)`,
|
|
@@ -1926,7 +2358,7 @@ def dense(input, weight, bias=None):
|
|
|
1926
2358
|
TypeError: If `input` is not Tensor.
|
|
1927
2359
|
TypeError: If `weight` is not Tensor.
|
|
1928
2360
|
TypeError: If `bias` is not Tensor.
|
|
1929
|
-
RuntimeError: If `bias` is not 1D and `input` is greater than 6D in
|
|
2361
|
+
RuntimeError: If `bias` is not 1D and `input` is greater than 6D in PyNative mode.
|
|
1930
2362
|
|
|
1931
2363
|
Supported Platforms:
|
|
1932
2364
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -2028,9 +2460,48 @@ def diagonal(input, offset=0, dim1=0, dim2=1):
|
|
|
2028
2460
|
return diagonal_op(input)
|
|
2029
2461
|
|
|
2030
2462
|
|
|
2031
|
-
def
|
|
2463
|
+
def divs(input, other):
|
|
2032
2464
|
r"""
|
|
2033
2465
|
|
|
2466
|
+
"""
|
|
2467
|
+
return divs_op(input, other)
|
|
2468
|
+
|
|
2469
|
+
|
|
2470
|
+
def dot(input, other):
|
|
2471
|
+
r"""
|
|
2472
|
+
Computes the dot product of two 1D tensor.
|
|
2473
|
+
|
|
2474
|
+
.. warning::
|
|
2475
|
+
This is an experimental API that is subject to change or deletion.
|
|
2476
|
+
|
|
2477
|
+
Args:
|
|
2478
|
+
input (Tensor): The first input in the dot product, must be 1D.
|
|
2479
|
+
other (Tensor): The second input in the dot product, must be 1D.
|
|
2480
|
+
|
|
2481
|
+
Returns:
|
|
2482
|
+
Tensor, the shape is [] and the data type is same as `input`.
|
|
2483
|
+
|
|
2484
|
+
Raises:
|
|
2485
|
+
TypeError: If dtype of `input`, `other` is not tensor.
|
|
2486
|
+
TypeError: If dtype of `input`, `other` are not in float16, float32 or bfloat16.
|
|
2487
|
+
RuntimeError: If dtypes of `input` and `other` are not same.
|
|
2488
|
+
RuntimeError: If shapes of `input` and `other` are not same.
|
|
2489
|
+
RuntimeError: If shapes of `input` and `other` are not 1D.
|
|
2490
|
+
|
|
2491
|
+
Supported Platforms:
|
|
2492
|
+
``Ascend``
|
|
2493
|
+
|
|
2494
|
+
Examples:
|
|
2495
|
+
>>> import mindspore
|
|
2496
|
+
>>> from mindspore import Tensor, mint
|
|
2497
|
+
>>> x = Tensor([2.0, 3.0], mindspore.float32)
|
|
2498
|
+
>>> y = Tensor([2.0, 1.0], mindspore.float32)
|
|
2499
|
+
>>> dot = mint.dot()
|
|
2500
|
+
>>> output = dot(x, y)
|
|
2501
|
+
>>> print(output)
|
|
2502
|
+
[7. ]
|
|
2503
|
+
>>> print(output.dtype)
|
|
2504
|
+
Float32
|
|
2034
2505
|
"""
|
|
2035
2506
|
return dot_op(input, other)
|
|
2036
2507
|
|
|
@@ -2293,14 +2764,14 @@ def erf(input):
|
|
|
2293
2764
|
|
|
2294
2765
|
.. math::
|
|
2295
2766
|
|
|
2296
|
-
erf(x)=\frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt
|
|
2767
|
+
\text{erf}(x)=\frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt
|
|
2297
2768
|
|
|
2298
2769
|
Args:
|
|
2299
2770
|
input (Tensor): The input tensor of Gaussian error function. :math:`x` in the following formula.
|
|
2300
2771
|
Supported dtypes:
|
|
2301
2772
|
|
|
2302
2773
|
- GPU/CPU: float16, float32, float64.
|
|
2303
|
-
- Ascend: float16, float32, float64, int64, bool.
|
|
2774
|
+
- Ascend: float16, float32, float64, int64, bool, bfloat16.
|
|
2304
2775
|
|
|
2305
2776
|
Returns:
|
|
2306
2777
|
Tensor, has the same shape as the `input`.
|
|
@@ -2310,7 +2781,7 @@ def erf(input):
|
|
|
2310
2781
|
:raise TypeError: If `input` is not a Tensor.
|
|
2311
2782
|
:raise TypeError:
|
|
2312
2783
|
* GPU/CPU: If dtype of `input` is not float16, float32, float64.
|
|
2313
|
-
* Ascend: If dtype of `input` is not float16, float32, float64, int64, bool.
|
|
2784
|
+
* Ascend: If dtype of `input` is not float16, float32, float64, int64, bool, bfloat16.
|
|
2314
2785
|
|
|
2315
2786
|
Supported Platforms:
|
|
2316
2787
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -2333,7 +2804,7 @@ def erfc(input):
|
|
|
2333
2804
|
|
|
2334
2805
|
.. math::
|
|
2335
2806
|
|
|
2336
|
-
erfc(x) = 1 - \frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt
|
|
2807
|
+
\text{erfc}(x) = 1 - \frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt
|
|
2337
2808
|
|
|
2338
2809
|
Args:
|
|
2339
2810
|
input (Tensor): The input tensor of the complementary error function, :math:`x` in the above formula.
|
|
@@ -2409,6 +2880,38 @@ def erfinv(input):
|
|
|
2409
2880
|
return erfinv_op(input)
|
|
2410
2881
|
|
|
2411
2882
|
|
|
2883
|
+
def exp2(input):
|
|
2884
|
+
r"""
|
|
2885
|
+
Calculates the base-2 exponent of the Tensor `input` element by element.
|
|
2886
|
+
|
|
2887
|
+
.. math::
|
|
2888
|
+
|
|
2889
|
+
out_i = 2^{input_i}
|
|
2890
|
+
|
|
2891
|
+
Args:
|
|
2892
|
+
input (Tensor): The input Tensor.
|
|
2893
|
+
|
|
2894
|
+
Returns:
|
|
2895
|
+
Tensor, which has the same shape as the `input`.
|
|
2896
|
+
|
|
2897
|
+
Raises:
|
|
2898
|
+
TypeError: If `input` is not a Tensor.
|
|
2899
|
+
|
|
2900
|
+
Supported Platforms:
|
|
2901
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2902
|
+
|
|
2903
|
+
Examples:
|
|
2904
|
+
>>> import mindspore
|
|
2905
|
+
>>> import numpy as np
|
|
2906
|
+
>>> from mindspore import Tensor, ops
|
|
2907
|
+
>>> x = Tensor(np.array([0.0, 1.0, 2.0, 4.0]), mindspore.float32)
|
|
2908
|
+
>>> output = ops.exp2(x)
|
|
2909
|
+
>>> print(output)
|
|
2910
|
+
[ 1. 2. 4. 16.]
|
|
2911
|
+
"""
|
|
2912
|
+
return exp2_op(input)
|
|
2913
|
+
|
|
2914
|
+
|
|
2412
2915
|
def exp(input):
|
|
2413
2916
|
r"""
|
|
2414
2917
|
Returns exponential of a tensor element-wise.
|
|
@@ -2440,6 +2943,41 @@ def exp(input):
|
|
|
2440
2943
|
return exp_op(input)
|
|
2441
2944
|
|
|
2442
2945
|
|
|
2946
|
+
def expand_as(input, other):
|
|
2947
|
+
r"""
|
|
2948
|
+
Broadcast the shape of the input tensor to be the same as the another input tensor. The dim of the
|
|
2949
|
+
input shape must be smaller than or equal to that of another and the broadcast rules must be met.
|
|
2950
|
+
|
|
2951
|
+
Args:
|
|
2952
|
+
input (Tensor): The input Tensor.
|
|
2953
|
+
other (Tensor): The target Tensor. It's shape is the target shape that input tensor need to be broadcasted.
|
|
2954
|
+
|
|
2955
|
+
Returns:
|
|
2956
|
+
Tensor, with the given shape of `other` and the same data type as `input`.
|
|
2957
|
+
|
|
2958
|
+
Raises:
|
|
2959
|
+
TypeError: If `other` is not a tensor.
|
|
2960
|
+
ValueError: If the shape of `other` and `input` are incompatible.
|
|
2961
|
+
|
|
2962
|
+
Supported Platforms:
|
|
2963
|
+
``Ascend``
|
|
2964
|
+
|
|
2965
|
+
Examples:
|
|
2966
|
+
>>> import numpy as np
|
|
2967
|
+
>>> from mindspore import Tensor
|
|
2968
|
+
>>> from mindspore.ops.function.array_func import expand_as
|
|
2969
|
+
>>> x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]).astype(np.float32))
|
|
2970
|
+
>>> other = Tensor(np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]).astype(np.float32))
|
|
2971
|
+
>>> output = expand_as(x, other)
|
|
2972
|
+
>>> print(output)
|
|
2973
|
+
[[1. 2. 3.]
|
|
2974
|
+
[1. 2. 3.]
|
|
2975
|
+
[1. 2. 3.]]
|
|
2976
|
+
>>> shape = (3, 3)
|
|
2977
|
+
"""
|
|
2978
|
+
return expand_as_op(input, other)
|
|
2979
|
+
|
|
2980
|
+
|
|
2443
2981
|
def expand_dims(input_x, axis):
|
|
2444
2982
|
r"""
|
|
2445
2983
|
Adds an additional dimension to `input_x` at the given axis, the dimension
|
|
@@ -3033,6 +3571,40 @@ def floor(input):
|
|
|
3033
3571
|
return floor_op(input)
|
|
3034
3572
|
|
|
3035
3573
|
|
|
3574
|
+
def frac_ext(input):
|
|
3575
|
+
r"""
|
|
3576
|
+
Calculates the fractional part of each element in the input.
|
|
3577
|
+
|
|
3578
|
+
.. math::
|
|
3579
|
+
out_i = input_i - \lfloor |input_i| \rfloor * sgn(input_i)
|
|
3580
|
+
|
|
3581
|
+
.. warning::
|
|
3582
|
+
This is an experimental API that is subject to change or deletion.
|
|
3583
|
+
|
|
3584
|
+
Args:
|
|
3585
|
+
input (Tensor): The input Tensor.
|
|
3586
|
+
|
|
3587
|
+
Returns:
|
|
3588
|
+
Tensor, has the same shape and type as input.
|
|
3589
|
+
|
|
3590
|
+
Raises:
|
|
3591
|
+
TypeError: If `input` is not a Tensor.
|
|
3592
|
+
|
|
3593
|
+
Supported Platforms:
|
|
3594
|
+
``Ascend``
|
|
3595
|
+
|
|
3596
|
+
Examples:
|
|
3597
|
+
>>> import mindspore
|
|
3598
|
+
>>> import numpy as np
|
|
3599
|
+
>>> from mindspore import Tensor, ops
|
|
3600
|
+
>>> x = Tensor([2, 4.2, -2.5], mindspore.float16)
|
|
3601
|
+
>>> output = ops.frac_ext(x)
|
|
3602
|
+
>>> print(output)
|
|
3603
|
+
[ 0. 0.1992 -0.5 ]
|
|
3604
|
+
"""
|
|
3605
|
+
return frac_op(input)
|
|
3606
|
+
|
|
3607
|
+
|
|
3036
3608
|
def gather_d(x, dim, index):
|
|
3037
3609
|
r"""
|
|
3038
3610
|
Gathers elements along an axis specified by dim.
|
|
@@ -3107,7 +3679,7 @@ def gather(input_params, input_indices, axis, batch_dims=0):
|
|
|
3107
3679
|
|
|
3108
3680
|
The following figure shows the calculation process of Gather commonly:
|
|
3109
3681
|
|
|
3110
|
-
.. image:: Gather.png
|
|
3682
|
+
.. image:: ../images/Gather.png
|
|
3111
3683
|
|
|
3112
3684
|
where params represents the input `input_params`, and indices represents the index to be sliced `input_indices`.
|
|
3113
3685
|
|
|
@@ -3188,8 +3760,7 @@ def gather(input_params, input_indices, axis, batch_dims=0):
|
|
|
3188
3760
|
def gcd(input, other):
|
|
3189
3761
|
r"""
|
|
3190
3762
|
Computes greatest common divisor of input tensors element-wise.
|
|
3191
|
-
The shape of two inputs should be broadcastable, and data
|
|
3192
|
-
one of: int32, int64.
|
|
3763
|
+
The shape of two inputs should be broadcastable, and data types should be one of: int16 (supported when using the Ascend backend, Graph mode is only supported when the graph compilation level is O0), int32, int64.
|
|
3193
3764
|
|
|
3194
3765
|
.. warning::
|
|
3195
3766
|
This is an experimental API that is subject to change or deletion.
|
|
@@ -3265,6 +3836,20 @@ def geqrf(input):
|
|
|
3265
3836
|
return geqrf_op(input)
|
|
3266
3837
|
|
|
3267
3838
|
|
|
3839
|
+
def gmm_backward(grad, x, weight, group_list=None):
|
|
3840
|
+
r"""
|
|
3841
|
+
|
|
3842
|
+
"""
|
|
3843
|
+
return gmm_backward_op(grad, x, weight, group_list)
|
|
3844
|
+
|
|
3845
|
+
|
|
3846
|
+
def gmm_v2_backward(grad, x, weight, group_list=None, group_list_type=0):
|
|
3847
|
+
r"""
|
|
3848
|
+
|
|
3849
|
+
"""
|
|
3850
|
+
return gmm_v2_backward_op(grad, x, weight, group_list, group_list_type)
|
|
3851
|
+
|
|
3852
|
+
|
|
3268
3853
|
def greater_equal(input, other):
|
|
3269
3854
|
r"""
|
|
3270
3855
|
Given two Tensors, compares them element-wise to check if each element in the first
|
|
@@ -3330,6 +3915,13 @@ def greater(input, other):
|
|
|
3330
3915
|
return greater_op(input, other)
|
|
3331
3916
|
|
|
3332
3917
|
|
|
3918
|
+
def hardtanh(input, min_val=-1, max_val=1):
|
|
3919
|
+
r"""
|
|
3920
|
+
|
|
3921
|
+
"""
|
|
3922
|
+
return hardtanh_op(input, min_val, max_val)
|
|
3923
|
+
|
|
3924
|
+
|
|
3333
3925
|
def hfft2(input, s=None, dim=(-2, -1), norm=None):
|
|
3334
3926
|
r"""
|
|
3335
3927
|
Calculates the two dimensional discrete Fourier transform of of a Hermitian symmetric `input`.
|
|
@@ -3564,9 +4156,9 @@ def hardshrink(input, lambd=0.5):
|
|
|
3564
4156
|
0, & \text{ otherwise }
|
|
3565
4157
|
\end{cases}
|
|
3566
4158
|
|
|
3567
|
-
|
|
4159
|
+
HardShrink Activation Function Graph:
|
|
3568
4160
|
|
|
3569
|
-
.. image:: ../images/
|
|
4161
|
+
.. image:: ../images/Hardshrink.png
|
|
3570
4162
|
:align: center
|
|
3571
4163
|
|
|
3572
4164
|
Args:
|
|
@@ -3608,16 +4200,16 @@ def hardsigmoid(input):
|
|
|
3608
4200
|
Hard Sigmoid is defined as:
|
|
3609
4201
|
|
|
3610
4202
|
.. math::
|
|
3611
|
-
\text{
|
|
4203
|
+
\text{HardSigmoid}(input) =
|
|
3612
4204
|
\begin{cases}
|
|
3613
4205
|
0, & \text{ if } input \leq -3, \\
|
|
3614
4206
|
1, & \text{ if } input \geq +3, \\
|
|
3615
4207
|
input/6 + 1/2, & \text{ otherwise }
|
|
3616
4208
|
\end{cases}
|
|
3617
4209
|
|
|
3618
|
-
|
|
4210
|
+
HardSigmoid Activation Function Graph:
|
|
3619
4211
|
|
|
3620
|
-
.. image:: ../images/
|
|
4212
|
+
.. image:: ../images/Hardsigmoid.png
|
|
3621
4213
|
:align: center
|
|
3622
4214
|
|
|
3623
4215
|
Args:
|
|
@@ -3652,16 +4244,16 @@ def hardswish(input):
|
|
|
3652
4244
|
Hard swish is defined as:
|
|
3653
4245
|
|
|
3654
4246
|
.. math::
|
|
3655
|
-
\text{
|
|
4247
|
+
\text{HardSwish}(input) =
|
|
3656
4248
|
\begin{cases}
|
|
3657
4249
|
0, & \text{ if } input \leq -3, \\
|
|
3658
4250
|
input, & \text{ if } input \geq +3, \\
|
|
3659
4251
|
input*(input + 3)/6, & \text{ otherwise }
|
|
3660
4252
|
\end{cases}
|
|
3661
4253
|
|
|
3662
|
-
|
|
4254
|
+
HardSwish Activation Function Graph:
|
|
3663
4255
|
|
|
3664
|
-
.. image:: ../images/
|
|
4256
|
+
.. image:: ../images/Hardswish.png
|
|
3665
4257
|
:align: center
|
|
3666
4258
|
|
|
3667
4259
|
Args:
|
|
@@ -4178,6 +4770,70 @@ def unfold_ext(input, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
4178
4770
|
return im2col_ext_op(input, kernel_size, dilation, padding, stride)
|
|
4179
4771
|
|
|
4180
4772
|
|
|
4773
|
+
def index_fill_scalar(input, dim, index, value):
|
|
4774
|
+
r"""
|
|
4775
|
+
|
|
4776
|
+
"""
|
|
4777
|
+
return index_fill_scalar_op(input, dim, index, value)
|
|
4778
|
+
|
|
4779
|
+
|
|
4780
|
+
def index_fill_tensor(input, dim, index, value):
|
|
4781
|
+
r"""
|
|
4782
|
+
|
|
4783
|
+
"""
|
|
4784
|
+
return index_fill_tensor_op(input, dim, index, value)
|
|
4785
|
+
|
|
4786
|
+
|
|
4787
|
+
def index(input, indices):
|
|
4788
|
+
r"""
|
|
4789
|
+
Index the Tensor using an `indices`.
|
|
4790
|
+
|
|
4791
|
+
.. warning::
|
|
4792
|
+
This is an experimental optimizer API that is subject to change.
|
|
4793
|
+
|
|
4794
|
+
Args:
|
|
4795
|
+
input (Tensor): The input Tensor.
|
|
4796
|
+
indices (tuple[Tensor], list[Tensor]): the indices of type is bool, uint8, int32 or int64, used to index into the `input`.
|
|
4797
|
+
The size of indices should <= the rank of `input` and the tensors in indices should be broadcastable.
|
|
4798
|
+
When the tensor types are bool and uint8, shape will match the input dimensions in turn. For example: the first tensor of `indices` is of type bool,
|
|
4799
|
+
Shape(x, y), `input` Shape(a, b, c), and (x, y) needs to match (a, b).
|
|
4800
|
+
|
|
4801
|
+
|
|
4802
|
+
Returns:
|
|
4803
|
+
Tensor, has the same dtype as input Tensor.
|
|
4804
|
+
|
|
4805
|
+
Raises:
|
|
4806
|
+
TypeError: If `input` is not a Tensor.
|
|
4807
|
+
TypeError: If the dtype of `indices` is not tuple[Tensor], list[Tensor].
|
|
4808
|
+
TypeError: If the dtype of tensors in `indices` is not bool, uint8, int32 or int64.
|
|
4809
|
+
ValueError: If the tensors in `indices` is not be broadcastable.
|
|
4810
|
+
ValueError: If size(`indices`) > rank(`input`).
|
|
4811
|
+
ValueError: If rank of `input` = 0.
|
|
4812
|
+
|
|
4813
|
+
Supported Platforms:
|
|
4814
|
+
``Ascend``
|
|
4815
|
+
|
|
4816
|
+
Examples:
|
|
4817
|
+
>>> import numpy as np
|
|
4818
|
+
>>> import mindspore
|
|
4819
|
+
>>> from mindspore import Tensor, ops
|
|
4820
|
+
>>> input1 = Tensor(np.array([[1, 2, 3], [4, 5, 6]]), mindspore.int32)
|
|
4821
|
+
>>> indices1 = Tensor(np.array([0, 1, 1]), mindspore.int32)
|
|
4822
|
+
>>> indices2 = Tensor(np.array([1, 2, 1]), mindspore.int32)
|
|
4823
|
+
>>> output = ops.auto_generate.index(input1, [indices1, indices2])
|
|
4824
|
+
>>> print(output)
|
|
4825
|
+
[2 6 5]
|
|
4826
|
+
>>> input2 = Tensor(np.arange(4 * 3 * 3).reshape(4, 3, 3), mindspore.int32)
|
|
4827
|
+
>>> indices3 = Tensor(np.array([1, 0]), mindspore.int32)
|
|
4828
|
+
>>> indices4 = Tensor(np.array([1, 1, 0]), mindspore.bool_)
|
|
4829
|
+
>>> output2 = ops.auto_generate.index(input2, [indices3, indices4])
|
|
4830
|
+
>>> print(output2)
|
|
4831
|
+
[[ 9 10 11]
|
|
4832
|
+
[ 3 4 5]]
|
|
4833
|
+
"""
|
|
4834
|
+
return index_op(input, indices)
|
|
4835
|
+
|
|
4836
|
+
|
|
4181
4837
|
def index_select_ext(input, dim, index):
|
|
4182
4838
|
r"""
|
|
4183
4839
|
Generates a new Tensor that accesses the values of `input` along the specified `dim` dimension
|
|
@@ -4231,18 +4887,241 @@ def inplace_add_ext(input, other, alpha=1):
|
|
|
4231
4887
|
return inplace_add_ext_op(input, other, alpha)
|
|
4232
4888
|
|
|
4233
4889
|
|
|
4234
|
-
def
|
|
4890
|
+
def inplace_adds_ext(input, other, alpha=1):
|
|
4235
4891
|
r"""
|
|
4236
4892
|
|
|
4237
4893
|
"""
|
|
4238
|
-
return
|
|
4894
|
+
return inplace_adds_ext_op(input, other, alpha)
|
|
4239
4895
|
|
|
4240
4896
|
|
|
4241
|
-
def
|
|
4897
|
+
def inplace_clamp_scalar(input, min=None, max=None):
|
|
4242
4898
|
r"""
|
|
4243
4899
|
|
|
4244
4900
|
"""
|
|
4245
|
-
return
|
|
4901
|
+
return inplace_clamp_scalar_op(input, min, max)
|
|
4902
|
+
|
|
4903
|
+
|
|
4904
|
+
def inplace_clamp_tensor(input, min=None, max=None):
|
|
4905
|
+
r"""
|
|
4906
|
+
|
|
4907
|
+
"""
|
|
4908
|
+
return inplace_clamp_tensor_op(input, min, max)
|
|
4909
|
+
|
|
4910
|
+
|
|
4911
|
+
def inplace_copy(variable, value):
|
|
4912
|
+
r"""
|
|
4913
|
+
|
|
4914
|
+
"""
|
|
4915
|
+
return inplace_copy_op(variable, value)
|
|
4916
|
+
|
|
4917
|
+
|
|
4918
|
+
def div_tensor_(input, other):
|
|
4919
|
+
r"""
|
|
4920
|
+
|
|
4921
|
+
"""
|
|
4922
|
+
return inplace_div_op(input, other)
|
|
4923
|
+
|
|
4924
|
+
|
|
4925
|
+
def divmod_tensor_(input, other, rounding_mode=None):
|
|
4926
|
+
r"""
|
|
4927
|
+
|
|
4928
|
+
"""
|
|
4929
|
+
return inplace_divmod_op(input, other, rounding_mode)
|
|
4930
|
+
|
|
4931
|
+
|
|
4932
|
+
def divmod_scalar_(input, other, rounding_mode=None):
|
|
4933
|
+
r"""
|
|
4934
|
+
|
|
4935
|
+
"""
|
|
4936
|
+
return inplace_divmods_op(input, other, rounding_mode)
|
|
4937
|
+
|
|
4938
|
+
|
|
4939
|
+
def div_scalar_(input, other):
|
|
4940
|
+
r"""
|
|
4941
|
+
|
|
4942
|
+
"""
|
|
4943
|
+
return inplace_divs_op(input, other)
|
|
4944
|
+
|
|
4945
|
+
|
|
4946
|
+
def inplace_fill_scalar(input, value):
|
|
4947
|
+
r"""
|
|
4948
|
+
|
|
4949
|
+
"""
|
|
4950
|
+
return inplace_fill_scalar_op(input, value)
|
|
4951
|
+
|
|
4952
|
+
|
|
4953
|
+
def inplace_fill_tensor(input, value):
|
|
4954
|
+
r"""
|
|
4955
|
+
|
|
4956
|
+
"""
|
|
4957
|
+
return inplace_fill_tensor_op(input, value)
|
|
4958
|
+
|
|
4959
|
+
|
|
4960
|
+
def floor_(input):
|
|
4961
|
+
r"""
|
|
4962
|
+
|
|
4963
|
+
"""
|
|
4964
|
+
return inplace_floor_op(input)
|
|
4965
|
+
|
|
4966
|
+
|
|
4967
|
+
def inplace_hardtanh(input, min_val=-1, max_val=1):
|
|
4968
|
+
r"""
|
|
4969
|
+
Update the `input` tensor in-place by computing the hardtanh activation function `input`, The activation
|
|
4970
|
+
function is defined as:
|
|
4971
|
+
|
|
4972
|
+
.. math::
|
|
4973
|
+
\text{hardtanh}(input) = \begin{cases}
|
|
4974
|
+
max\_val, & \text{ if } input > max\_val \\
|
|
4975
|
+
min\_val, & \text{ if } input < min\_val \\
|
|
4976
|
+
input, & \text{ otherwise. }
|
|
4977
|
+
\end{cases}
|
|
4978
|
+
|
|
4979
|
+
Linear region range :math:`[min\_val, max\_val]` can be adjusted using `min_val` and `max_val`.
|
|
4980
|
+
|
|
4981
|
+
Hardtanh Activation Function Graph:
|
|
4982
|
+
|
|
4983
|
+
.. image:: ../images/Hardtanh.png
|
|
4984
|
+
:align: center
|
|
4985
|
+
|
|
4986
|
+
.. warning::
|
|
4987
|
+
This is an experimental optimizer API that is subject to change.
|
|
4988
|
+
|
|
4989
|
+
Args:
|
|
4990
|
+
input (Tensor): Input Tensor.
|
|
4991
|
+
min_val (Union[bool, int, float], optional): Minimum value of the linear region range. Default: ``-1.0`` .
|
|
4992
|
+
max_val (Union[bool, int, float], optional): Maximum value of the linear region range. Default: ``1.0`` .
|
|
4993
|
+
|
|
4994
|
+
Returns:
|
|
4995
|
+
Tensor.
|
|
4996
|
+
|
|
4997
|
+
Raises:
|
|
4998
|
+
TypeError: If `input` is not a Tensor.
|
|
4999
|
+
TypeError: If dtype of `input` is not one of: int8, int16, int32, int64, uint8, float16, float32, bfloat16.
|
|
5000
|
+
TypeError: If dtype of `min_val` is neither float nor int.
|
|
5001
|
+
TypeError: If dtype of `max_val` is neither float nor int.
|
|
5002
|
+
|
|
5003
|
+
Supported Platforms:
|
|
5004
|
+
``Ascend``
|
|
5005
|
+
|
|
5006
|
+
Examples:
|
|
5007
|
+
>>> import mindspore
|
|
5008
|
+
>>> from mindspore import Tensor, mint
|
|
5009
|
+
>>> x = Tensor([-1, -2, 0, 2, 1], mindspore.float16)
|
|
5010
|
+
>>> mint.hardtanh_(x, min_val=-1.0, max_val=1.0)
|
|
5011
|
+
>>> print(x)
|
|
5012
|
+
[-1. -1. 0. 1. 1.]
|
|
5013
|
+
"""
|
|
5014
|
+
return inplace_hardtanh_op(input, min_val, max_val)
|
|
5015
|
+
|
|
5016
|
+
|
|
5017
|
+
def index_put_(input, indices, values, accumulate=False):
|
|
5018
|
+
r"""
|
|
5019
|
+
Based on the indices in `indices`, replace the corresponding elements in Tensor `self` with the values
|
|
5020
|
+
in `values`. The expression `Tensor.index_put_(indices, values)` is equivalent to `tensor[indices] = values`.
|
|
5021
|
+
Update and return `self`.
|
|
5022
|
+
|
|
5023
|
+
.. warning::
|
|
5024
|
+
The behavior is unpredictable in the following scenario:
|
|
5025
|
+
|
|
5026
|
+
- If `accumulate` is `False` and `indices` contains duplicate elements.
|
|
5027
|
+
|
|
5028
|
+
Args:
|
|
5029
|
+
indices (tuple[Tensor], list[Tensor]): the indices of type is bool, uint8, int32 or int64,
|
|
5030
|
+
used to index into the `self`. The size of indices should <= the rank of `self`
|
|
5031
|
+
and the tensors in indices should be broadcastable.
|
|
5032
|
+
values (Tensor): Tensor with the same type as `self`. If size == 1, it will be broadcastable.
|
|
5033
|
+
accumulate (bool, optional): If `accumulate` is `True`, the elements in `values` will be added to `self`,
|
|
5034
|
+
otherwise the elements in `values` will replace the corresponding elements in the `self`.
|
|
5035
|
+
Default: ``False``.
|
|
5036
|
+
|
|
5037
|
+
Returns:
|
|
5038
|
+
Tensor `self`.
|
|
5039
|
+
|
|
5040
|
+
Raises:
|
|
5041
|
+
TypeError: If the dtype of the `self` is not equal to the dtype of `values`.
|
|
5042
|
+
TypeError: If the dtype of `indices` is not tuple[Tensor], list[Tensor].
|
|
5043
|
+
TypeError: If the dtype of tensors in `indices` are not bool, uint8, int32 or int64.
|
|
5044
|
+
TypeError: If the dtypes of tensors in `indices` are inconsistent.
|
|
5045
|
+
TypeError: If the dtype of `accumulate` is not bool.
|
|
5046
|
+
ValueError: If size(`values`) is not 1 or max size of the tensors in `indices` when
|
|
5047
|
+
rank(`self`) == size(`indices`).
|
|
5048
|
+
ValueError: If size(`values`) is not 1 or `self`.shape[-1] when rank(`self`) > size(`indices`).
|
|
5049
|
+
ValueError: If the tensors in `indices` is not be broadcastable.
|
|
5050
|
+
ValueError: If size(`indices`) > rank(`self`).
|
|
5051
|
+
|
|
5052
|
+
Supported Platforms:
|
|
5053
|
+
``Ascend``
|
|
5054
|
+
|
|
5055
|
+
Examples:
|
|
5056
|
+
>>> import numpy as np
|
|
5057
|
+
>>> import mindspore
|
|
5058
|
+
>>> from mindspore import Tensor
|
|
5059
|
+
>>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32))
|
|
5060
|
+
>>> values = Tensor(np.array([3]).astype(np.int32))
|
|
5061
|
+
>>> indices = [Tensor(np.array([0, 1, 1]).astype(np.int32)), Tensor(np.array([1, 2, 1]).astype(np.int32))]
|
|
5062
|
+
>>> accumulate = True
|
|
5063
|
+
>>> output = x.index_put_(indices, values, accumulate)
|
|
5064
|
+
>>> print(output)
|
|
5065
|
+
[[1 5 3]
|
|
5066
|
+
[4 8 9]]
|
|
5067
|
+
"""
|
|
5068
|
+
return inplace_index_put_op(input, indices, values, accumulate)
|
|
5069
|
+
|
|
5070
|
+
|
|
5071
|
+
def masked_fill_scalar_(input, mask, value):
|
|
5072
|
+
r"""
|
|
5073
|
+
|
|
5074
|
+
"""
|
|
5075
|
+
return inplace_masked_fill_scalar_op(input, mask, value)
|
|
5076
|
+
|
|
5077
|
+
|
|
5078
|
+
def masked_fill_tensor_(input, mask, value):
|
|
5079
|
+
r"""
|
|
5080
|
+
|
|
5081
|
+
"""
|
|
5082
|
+
return inplace_masked_fill_tensor_op(input, mask, value)
|
|
5083
|
+
|
|
5084
|
+
|
|
5085
|
+
def inplace_mul(input, other):
|
|
5086
|
+
r"""
|
|
5087
|
+
|
|
5088
|
+
"""
|
|
5089
|
+
return inplace_mul_op(input, other)
|
|
5090
|
+
|
|
5091
|
+
|
|
5092
|
+
def inplace_muls(input, other):
|
|
5093
|
+
r"""
|
|
5094
|
+
|
|
5095
|
+
"""
|
|
5096
|
+
return inplace_muls_op(input, other)
|
|
5097
|
+
|
|
5098
|
+
|
|
5099
|
+
def inplace_scatter_add(input, dim, index, src):
|
|
5100
|
+
r"""
|
|
5101
|
+
|
|
5102
|
+
"""
|
|
5103
|
+
return inplace_scatter_add_op(input, dim, index, src)
|
|
5104
|
+
|
|
5105
|
+
|
|
5106
|
+
def sub_tensor_(input, other, alpha=1):
|
|
5107
|
+
r"""
|
|
5108
|
+
|
|
5109
|
+
"""
|
|
5110
|
+
return inplace_sub_ext_op(input, other, alpha)
|
|
5111
|
+
|
|
5112
|
+
|
|
5113
|
+
def sub_scalar_(input, other, alpha=1):
|
|
5114
|
+
r"""
|
|
5115
|
+
|
|
5116
|
+
"""
|
|
5117
|
+
return inplace_sub_scalar_op(input, other, alpha)
|
|
5118
|
+
|
|
5119
|
+
|
|
5120
|
+
def tanh_(input):
|
|
5121
|
+
r"""
|
|
5122
|
+
|
|
5123
|
+
"""
|
|
5124
|
+
return inplace_tanh_op(input)
|
|
4246
5125
|
|
|
4247
5126
|
|
|
4248
5127
|
def zero_(input):
|
|
@@ -4418,7 +5297,7 @@ def irfftn(input, s=None, dim=None, norm=None):
|
|
|
4418
5297
|
return irfftn_op(input, s, dim, norm)
|
|
4419
5298
|
|
|
4420
5299
|
|
|
4421
|
-
def isfinite(
|
|
5300
|
+
def isfinite(input):
|
|
4422
5301
|
r"""
|
|
4423
5302
|
Determine which elements are finite for each position. If elements are not ``NaN`` , ``-INF`` , ``INF``,
|
|
4424
5303
|
they are finite.
|
|
@@ -4431,13 +5310,13 @@ def isfinite(x):
|
|
|
4431
5310
|
\end{cases}
|
|
4432
5311
|
|
|
4433
5312
|
Args:
|
|
4434
|
-
|
|
5313
|
+
input (Tensor): The input tensor.
|
|
4435
5314
|
|
|
4436
5315
|
Returns:
|
|
4437
5316
|
Tensor, has the same shape of input, and the dtype is bool.
|
|
4438
5317
|
|
|
4439
5318
|
Raises:
|
|
4440
|
-
TypeError: If
|
|
5319
|
+
TypeError: If `input` is not a Tensor.
|
|
4441
5320
|
|
|
4442
5321
|
Supported Platforms:
|
|
4443
5322
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4455,7 +5334,83 @@ def isfinite(x):
|
|
|
4455
5334
|
>>> print(output)
|
|
4456
5335
|
True
|
|
4457
5336
|
"""
|
|
4458
|
-
return isfinite_op(
|
|
5337
|
+
return isfinite_op(input)
|
|
5338
|
+
|
|
5339
|
+
|
|
5340
|
+
def isinf(input):
|
|
5341
|
+
r"""
|
|
5342
|
+
Determines which elements are inf or -inf for each position.
|
|
5343
|
+
|
|
5344
|
+
.. math::
|
|
5345
|
+
|
|
5346
|
+
out_i = \begin{cases}
|
|
5347
|
+
& \ True,\ \text{ if } x_{i} = \text{Inf} \\
|
|
5348
|
+
& \ False,\ \text{ if } x_{i} \ne \text{Inf}
|
|
5349
|
+
\end{cases}
|
|
5350
|
+
|
|
5351
|
+
where Inf means value is infinite.
|
|
5352
|
+
|
|
5353
|
+
.. warning::
|
|
5354
|
+
- This is an experimental API that is subject to change.
|
|
5355
|
+
- For Ascend, it is only supported on platforms above Atlas A2.
|
|
5356
|
+
|
|
5357
|
+
Args:
|
|
5358
|
+
input (Tensor): The input tensor.
|
|
5359
|
+
|
|
5360
|
+
Returns:
|
|
5361
|
+
Tensor, has the same shape of input, and the dtype is bool.
|
|
5362
|
+
|
|
5363
|
+
Raises:
|
|
5364
|
+
TypeError: If `input` is not a Tensor.
|
|
5365
|
+
|
|
5366
|
+
Supported Platforms:
|
|
5367
|
+
``Ascend`` ``CPU`` ``GPU``
|
|
5368
|
+
|
|
5369
|
+
Examples:
|
|
5370
|
+
>>> import mindspore
|
|
5371
|
+
>>> import numpy as np
|
|
5372
|
+
>>> from mindspore import Tensor, ops
|
|
5373
|
+
>>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
|
|
5374
|
+
>>> output = ops.isinf(x)
|
|
5375
|
+
>>> print(output)
|
|
5376
|
+
[False False True]
|
|
5377
|
+
>>> x = Tensor(2.1, mindspore.float64)
|
|
5378
|
+
>>> output = ops.isinf(x)
|
|
5379
|
+
>>> print(output)
|
|
5380
|
+
False
|
|
5381
|
+
"""
|
|
5382
|
+
return isinf_op(input)
|
|
5383
|
+
|
|
5384
|
+
|
|
5385
|
+
def isneginf_ext(input):
|
|
5386
|
+
r"""
|
|
5387
|
+
Determines which elements are -inf for each position.
|
|
5388
|
+
|
|
5389
|
+
.. warning::
|
|
5390
|
+
- This is an experimental API that is subject to change.
|
|
5391
|
+
- This API can be used only on the Atlas A2 training series.
|
|
5392
|
+
|
|
5393
|
+
Args:
|
|
5394
|
+
input (Tensor): Input Tensor.
|
|
5395
|
+
|
|
5396
|
+
Returns:
|
|
5397
|
+
Tensor with the same shape as the input, where elements are `True` if the corresponding element in the `input` is negative infinity, and `False` otherwise.
|
|
5398
|
+
|
|
5399
|
+
Raises:
|
|
5400
|
+
TypeError: If the input is not a tensor.
|
|
5401
|
+
|
|
5402
|
+
Supported Platforms:
|
|
5403
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
5404
|
+
|
|
5405
|
+
Examples:
|
|
5406
|
+
>>> from mindspore import ops, Tensor
|
|
5407
|
+
>>> from mindspore import dtype as mstype
|
|
5408
|
+
>>> output = ops.isneginf(Tensor([[-float("inf"), float("inf")], [1, -float("inf")]], mstype.float32))
|
|
5409
|
+
>>> print(output)
|
|
5410
|
+
[[ True False]
|
|
5411
|
+
[False True]]
|
|
5412
|
+
"""
|
|
5413
|
+
return isneginf_op(input)
|
|
4459
5414
|
|
|
4460
5415
|
|
|
4461
5416
|
def l1_loss_ext(input, target, reduction='mean'):
|
|
@@ -4539,7 +5494,7 @@ def leaky_relu_ext(input, negative_slope=0.01):
|
|
|
4539
5494
|
|
|
4540
5495
|
Args:
|
|
4541
5496
|
input (Tensor): The input of leaky_relu is a Tensor of any dimension.
|
|
4542
|
-
negative_slope (Union[int, float]): Slope of the activation function when the element of `input` is less than 0.
|
|
5497
|
+
negative_slope (Union[int, float], optional): Slope of the activation function when the element of `input` is less than 0.
|
|
4543
5498
|
Default: ``0.01`` .
|
|
4544
5499
|
|
|
4545
5500
|
Returns:
|
|
@@ -4564,6 +5519,62 @@ def leaky_relu_ext(input, negative_slope=0.01):
|
|
|
4564
5519
|
return leaky_relu_ext_op(input, negative_slope)
|
|
4565
5520
|
|
|
4566
5521
|
|
|
5522
|
+
def lerp(input, end, weight):
|
|
5523
|
+
r"""
|
|
5524
|
+
Does a linear interpolation of two tensors start and end based on a float or tensor weight.
|
|
5525
|
+
|
|
5526
|
+
If `weight` is a tensor, the shapes of three inputs need to be broadcast;
|
|
5527
|
+
If `weight` is a float, the shapes of `input` and `end` need to be broadcast.
|
|
5528
|
+
If `weight` is a float and platform is Ascend, the types of `input` and `end` need to be float32.
|
|
5529
|
+
|
|
5530
|
+
.. warning::
|
|
5531
|
+
This is an experimental API that is subject to change or deletion.
|
|
5532
|
+
|
|
5533
|
+
.. math::
|
|
5534
|
+
output_{i} = input_{i} + weight_{i} * (end_{i} - input_{i})
|
|
5535
|
+
|
|
5536
|
+
Args:
|
|
5537
|
+
input (Tensor): The tensor with the starting points. Data type must be float16 or float32.
|
|
5538
|
+
end (Tensor): The tensor with the ending points. Data type must be the same as `input`.
|
|
5539
|
+
weight (Union[float, Tensor]): The weight for the interpolation formula. Must be a float scalar
|
|
5540
|
+
or a tensor with float16 or float32 data type.
|
|
5541
|
+
|
|
5542
|
+
Returns:
|
|
5543
|
+
Tensor, has the same type and shape as input `input`.
|
|
5544
|
+
|
|
5545
|
+
Raises:
|
|
5546
|
+
TypeError: If `input` or `end` is not a tensor.
|
|
5547
|
+
TypeError: If `weight` is neither scalar(float) nor tensor.
|
|
5548
|
+
TypeError: If dtype of `input` or `end` is neither float16 nor float32.
|
|
5549
|
+
TypeError: If dtype of `weight` is neither float16 nor float32 when it is a tensor.
|
|
5550
|
+
TypeError: If `input` and `end` have different data types.
|
|
5551
|
+
TypeError: If `input`, `end` and `weight` have different data types when `weight` is a tensor.
|
|
5552
|
+
ValueError: If `end` could not be broadcast to a tensor with shape of `input`.
|
|
5553
|
+
ValueError: If `weight` could not be broadcast to tensors with shapes of `input` and `end` when it is a tensor.
|
|
5554
|
+
|
|
5555
|
+
Supported Platforms:
|
|
5556
|
+
``Ascend``
|
|
5557
|
+
|
|
5558
|
+
Examples:
|
|
5559
|
+
>>> import mindspore
|
|
5560
|
+
>>> import numpy as np
|
|
5561
|
+
>>> from mindspore import Tensor, ops
|
|
5562
|
+
>>> start = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
|
|
5563
|
+
>>> end = Tensor(np.array([10., 10., 10., 10.]), mindspore.float32)
|
|
5564
|
+
>>> output = ops.lerp(start, end, 0.5)
|
|
5565
|
+
>>> print(output)
|
|
5566
|
+
[5.5 6. 6.5 7. ]
|
|
5567
|
+
"""
|
|
5568
|
+
return lerp_op(input, end, weight)
|
|
5569
|
+
|
|
5570
|
+
|
|
5571
|
+
def lerp_scalar(input, end, weight):
|
|
5572
|
+
r"""
|
|
5573
|
+
|
|
5574
|
+
"""
|
|
5575
|
+
return lerp_scalar_op(input, end, weight)
|
|
5576
|
+
|
|
5577
|
+
|
|
4567
5578
|
def less_equal(input, other):
|
|
4568
5579
|
r"""
|
|
4569
5580
|
Computes the boolean value of :math:`input <= other` element-wise.
|
|
@@ -4648,6 +5659,45 @@ def less(input, other):
|
|
|
4648
5659
|
return less_op(input, other)
|
|
4649
5660
|
|
|
4650
5661
|
|
|
5662
|
+
def log10_ext(input):
|
|
5663
|
+
r"""
|
|
5664
|
+
Returns the logarithm to the base 10 of a tensor element-wise.
|
|
5665
|
+
|
|
5666
|
+
.. math::
|
|
5667
|
+
y_i = \log_{10}(x_i)
|
|
5668
|
+
|
|
5669
|
+
.. warning::
|
|
5670
|
+
- This is an experimental API that is subject to change or deletion.
|
|
5671
|
+
- If the input value of operator Log10 is within the range (0, 0.01] or [0.95, 1.05], the output accuracy
|
|
5672
|
+
may be affacted.
|
|
5673
|
+
|
|
5674
|
+
Args:
|
|
5675
|
+
input (Tensor): Input Tensor of any dimension. The value must be greater than 0.
|
|
5676
|
+
|
|
5677
|
+
Returns:
|
|
5678
|
+
Tensor, has the same shape as the `input`, and the dtype changes according to the `input.dtype`.
|
|
5679
|
+
|
|
5680
|
+
- if `input.dtype` is in [float16, float32, float64, bfloat16], the output dtype is the same as the `input.dtype`.
|
|
5681
|
+
- if `input.dtype` is integer or boolean type, the output dtype is float32.
|
|
5682
|
+
|
|
5683
|
+
Raises:
|
|
5684
|
+
TypeError: If `input` is not a Tensor.
|
|
5685
|
+
|
|
5686
|
+
Supported Platforms:
|
|
5687
|
+
``Ascend``
|
|
5688
|
+
|
|
5689
|
+
Examples:
|
|
5690
|
+
>>> import mindspore
|
|
5691
|
+
>>> import numpy as np
|
|
5692
|
+
>>> from mindspore import Tensor, mint
|
|
5693
|
+
>>> x = Tensor(np.array([3.0, 5.0, 7.0]), mindspore.float32)
|
|
5694
|
+
>>> output = mint.log10(x)
|
|
5695
|
+
>>> print(output)
|
|
5696
|
+
[0.47712136 0.69897 0.845098 ]
|
|
5697
|
+
"""
|
|
5698
|
+
return log10_op(input)
|
|
5699
|
+
|
|
5700
|
+
|
|
4651
5701
|
def log1p(input):
|
|
4652
5702
|
r"""
|
|
4653
5703
|
Returns the natural logarithm of one plus the input tensor element-wise.
|
|
@@ -4679,6 +5729,45 @@ def log1p(input):
|
|
|
4679
5729
|
return log1p_op(input)
|
|
4680
5730
|
|
|
4681
5731
|
|
|
5732
|
+
def log2_ext(input):
|
|
5733
|
+
r"""
|
|
5734
|
+
Returns the logarithm to the base 2 of a tensor element-wise.
|
|
5735
|
+
|
|
5736
|
+
.. math::
|
|
5737
|
+
y_i = \log_2(x_i)
|
|
5738
|
+
|
|
5739
|
+
.. warning::
|
|
5740
|
+
- This is an experimental API that is subject to change or deletion.
|
|
5741
|
+
- If the input value of operator Log2 is within the range (0, 0.01] or [0.95, 1.05], the output accuracy
|
|
5742
|
+
may be affacted.
|
|
5743
|
+
|
|
5744
|
+
Args:
|
|
5745
|
+
input (Tensor): Input Tensor of any dimension. The value must be greater than 0.
|
|
5746
|
+
|
|
5747
|
+
Returns:
|
|
5748
|
+
Tensor, has the same shape as the `input`, and the dtype changes according to the `input.dtype`.
|
|
5749
|
+
|
|
5750
|
+
- if `input.dtype` is in [float16, float32, float64, bfloat16], the output dtype is the same as the `input.dtype`.
|
|
5751
|
+
- if `input.dtype` is integer or boolean type, the output dtype is float32.
|
|
5752
|
+
|
|
5753
|
+
Raises:
|
|
5754
|
+
TypeError: If `input` is not a Tensor.
|
|
5755
|
+
|
|
5756
|
+
Supported Platforms:
|
|
5757
|
+
``Ascend``
|
|
5758
|
+
|
|
5759
|
+
Examples:
|
|
5760
|
+
>>> import mindspore
|
|
5761
|
+
>>> import numpy as np
|
|
5762
|
+
>>> from mindspore import Tensor, mint
|
|
5763
|
+
>>> x = Tensor(np.array([3.0, 5.0, 7.0]), mindspore.float32)
|
|
5764
|
+
>>> output = mint.log2(x)
|
|
5765
|
+
>>> print(output)
|
|
5766
|
+
[1.5849625 2.321928 2.807355 ]
|
|
5767
|
+
"""
|
|
5768
|
+
return log2_op(input)
|
|
5769
|
+
|
|
5770
|
+
|
|
4682
5771
|
def log(input):
|
|
4683
5772
|
r"""
|
|
4684
5773
|
Returns the natural logarithm of a tensor element-wise.
|
|
@@ -4837,11 +5926,58 @@ def logaddexp_ext(input, other):
|
|
|
4837
5926
|
return logaddexp_op(input, other)
|
|
4838
5927
|
|
|
4839
5928
|
|
|
4840
|
-
def logsigmoid_grad(dy, input, buffer):
|
|
4841
|
-
r"""
|
|
4842
|
-
|
|
5929
|
+
def logsigmoid_grad(dy, input, buffer):
|
|
5930
|
+
r"""
|
|
5931
|
+
|
|
5932
|
+
"""
|
|
5933
|
+
return logsigmoid_grad_op(dy, input, buffer)
|
|
5934
|
+
|
|
5935
|
+
|
|
5936
|
+
def logsumexp_ext(input, dim, keepdim=False):
|
|
5937
|
+
r"""
|
|
5938
|
+
Computes the logarithm of the sum of exponentiations of all elements along the specified `dim` dimension of the `input` (with numerical stabilization), and retains the dimension based on the `keepdim` parameter.
|
|
5939
|
+
|
|
5940
|
+
.. math::
|
|
5941
|
+
|
|
5942
|
+
logsumexp(input) = \log(\sum(e^{input-input_{max}})) + input_{max}
|
|
5943
|
+
|
|
5944
|
+
.. warning::
|
|
5945
|
+
This is an experimental API that is subject to change or deletion.
|
|
5946
|
+
|
|
5947
|
+
Args:
|
|
5948
|
+
input (Tensor): Input Tensor.
|
|
5949
|
+
dim (Union[int, tuple(int), list(int)], optional): The dimension to be reduced (the value should be within `[0, len(input.shape) - 1]`), when the `dim` is `()`, all dimensions are reduced.
|
|
5950
|
+
keepdim (bool, optional): Whether the output tensor retains the dimension `dim`, default: `False`.
|
|
5951
|
+
|
|
5952
|
+
Returns:
|
|
5953
|
+
Tensor, the dtype changes according to the `input.dtype`, and the shape changes according to the values of `dim` and `keepdim`.
|
|
5954
|
+
|
|
5955
|
+
- If `input.dtype` is in [float16, float32, bfloat16], the output dtype is the same as the `input.dtype`.
|
|
5956
|
+
- If `input.dtype` is an integer or boolean type, the output dtype is float32.
|
|
5957
|
+
- If `dim` is (), and `keepdim` is False, the output is a 0-D tensor representing the logarithm of the sum of exponentiations of all elements in the `input` tensor.
|
|
5958
|
+
- If `dim` is `1`, and `keepdim` is False, the shape of output is :math:`(input.shape[0], input.shape[2], ..., input.shape[n])`.
|
|
5959
|
+
- If `dim` is `(1, 2)`, and `keepdim` is False, the shape of output is :math:`(input.shape[0], input.shape[3], ..., input.shape[n])`.
|
|
5960
|
+
|
|
5961
|
+
Raises:
|
|
5962
|
+
TypeError: If `input` is not a Tensor.
|
|
5963
|
+
TypeError: If dtype of `input` is not one of: bool, int8, int16, int32, int64, uint8, float16, float32, bfloat16.
|
|
5964
|
+
TypeError: If `dim` is not an int or tuple(int) or list(list).
|
|
5965
|
+
TypeError: If `keepdim` is not a bool.
|
|
5966
|
+
ValueError: If the value of any elements of `dim` is not in the range `[0, len(input.shape) - 1]`.
|
|
5967
|
+
RuntimeError: If any element of `dim` is repeated.
|
|
5968
|
+
|
|
5969
|
+
Supported Platforms:
|
|
5970
|
+
``Ascend``
|
|
5971
|
+
|
|
5972
|
+
Examples:
|
|
5973
|
+
>>> import numpy as np
|
|
5974
|
+
>>> from mindspore import Tensor, ops
|
|
5975
|
+
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
|
|
5976
|
+
>>> output = ops.auto_generate.logsumexp_ext(x, 1, keepdim=True)
|
|
5977
|
+
>>> print(output.shape)
|
|
5978
|
+
(3, 1, 5, 6)
|
|
4843
5979
|
"""
|
|
4844
|
-
return
|
|
5980
|
+
return logsumexp_op(input, dim, keepdim)
|
|
4845
5981
|
|
|
4846
5982
|
|
|
4847
5983
|
def masked_fill(input_x, mask, value):
|
|
@@ -4864,7 +6000,7 @@ def masked_fill(input_x, mask, value):
|
|
|
4864
6000
|
ValueError: If the shapes of `input_x` and `mask` could not be broadcast.
|
|
4865
6001
|
TypeError: If dtype of `input_x` or `value` is not one of bool, uint8, int8, int16, int32,
|
|
4866
6002
|
int64, float16, float32, float64, complex64, complex128.
|
|
4867
|
-
TypeError: If dtype of `value` is different from that of `input_x
|
|
6003
|
+
TypeError: If dtype of `value` is different from that of `input_x` in CPU and GPU.
|
|
4868
6004
|
TypeError: If `value` is neither float number nor Tensor.
|
|
4869
6005
|
|
|
4870
6006
|
Supported Platforms:
|
|
@@ -4915,11 +6051,11 @@ def masked_select(input, mask):
|
|
|
4915
6051
|
return masked_select_op(input, mask)
|
|
4916
6052
|
|
|
4917
6053
|
|
|
4918
|
-
def matmul_ext(input,
|
|
6054
|
+
def matmul_ext(input, other):
|
|
4919
6055
|
r"""
|
|
4920
6056
|
|
|
4921
6057
|
"""
|
|
4922
|
-
return matmul_ext_op(input,
|
|
6058
|
+
return matmul_ext_op(input, other)
|
|
4923
6059
|
|
|
4924
6060
|
|
|
4925
6061
|
def matrix_exp(input):
|
|
@@ -5001,6 +6137,57 @@ def max_(input):
|
|
|
5001
6137
|
return max_op(input)
|
|
5002
6138
|
|
|
5003
6139
|
|
|
6140
|
+
def max_unpool2d_ext(input, indices, kernel_size, stride=None, padding=0, output_size=None):
|
|
6141
|
+
r"""
|
|
6142
|
+
Computes the inverse of `max_pool2d`.
|
|
6143
|
+
|
|
6144
|
+
`max_unpool2d` keeps the maximal value and set all position of non-maximal values to zero. Typically the input is of shape :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`, and the output is of shape :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`. The operation is as follows.
|
|
6145
|
+
|
|
6146
|
+
.. math::
|
|
6147
|
+
\begin{array}{ll} \\
|
|
6148
|
+
H_{out} = (H_{in} - 1) \times stride[0] - 2 \times padding[0] + kernel\_size[0] \\
|
|
6149
|
+
W_{out} = (W_{in} - 1) \times stride[1] - 2 \times padding[1] + kernel\_size[1] \\
|
|
6150
|
+
\end{array}
|
|
6151
|
+
|
|
6152
|
+
.. warning::
|
|
6153
|
+
This is an experimental API that is subject to change or deletion.
|
|
6154
|
+
|
|
6155
|
+
Args:
|
|
6156
|
+
input (Tensor): The input Tensor to invert. Tensor of shape :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
|
|
6157
|
+
indices (Tensor): Max values' index represented by the indices. Tensor of shape must be same with input 'input'. Values of indices must belong to :math:`[0, H_{in} \times W_{in} - 1]`. Data type must be in int32 or int64.
|
|
6158
|
+
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value, an int number that represents height and width of the kernel, or a tuple of two int numbers that represent height and width respectively.
|
|
6159
|
+
stride (Union[int, tuple[int]], optional): The distance of kernel moving, an int number that represents the height and width of movement are both stride, or a tuple of two int numbers that represent height and width of movement respectively. Default: ``None`` , which indicates the moving step is `kernel_size` .
|
|
6160
|
+
padding (Union[int, tuple[int]], optional): The pad value to be filled. Default: ``0`` . If `padding` is an integer, the paddings of height and width are the same, equal to padding. If `padding` is a tuple of two integers, the padding of height and width equal to padding[0] and padding[1] correspondingly.
|
|
6161
|
+
output_size (tuple[int], optional): The target output size. Default: ``None`` . If output_size == (), then the shape of output computed by `kernel_size`, `stride` and `padding`. If output_size != (), then output_size must be :math:`(N, C, H, W)` , :math:`(C, H, W)` or :math:`(H, W)` and output_size must belong to :math:`[(N, C, H_{out} - stride[0], W_{out} - stride[1]), (N, C, H_{out} + stride[0], W_{out} + stride[1])]`.
|
|
6162
|
+
|
|
6163
|
+
Returns:
|
|
6164
|
+
Tensor, with shape :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, with the same data type with `input`.
|
|
6165
|
+
|
|
6166
|
+
Raises:
|
|
6167
|
+
TypeError: If data type of `input` or `indices` is not supported.
|
|
6168
|
+
TypeError: If `kernel_size`, `stride` or `padding` is neither an int nor a tuple.
|
|
6169
|
+
ValueError: If numbers in `stride`, `padding` or `kernel_size` are not positive.
|
|
6170
|
+
ValueError: If the shapes of `input` and `indices` are different.
|
|
6171
|
+
ValueError: If the length of `input` is not 3 or 4.
|
|
6172
|
+
ValueError: If the type of `output_size` is not tuple.
|
|
6173
|
+
ValueError: If `output_size` is not close to output size computed by attr `kernel_size`, `stride`, `padding`.
|
|
6174
|
+
|
|
6175
|
+
Supported Platforms:
|
|
6176
|
+
``Ascend``
|
|
6177
|
+
|
|
6178
|
+
Examples:
|
|
6179
|
+
>>> import numpy as np
|
|
6180
|
+
>>> from mindspore import Tensor, ops
|
|
6181
|
+
>>> input = Tensor(np.array([[[[0, 1], [8, 9]]]]).astype(np.float32))
|
|
6182
|
+
>>> indices = Tensor(np.array([[[[0, 1], [2, 3]]]]).astype(np.int64))
|
|
6183
|
+
>>> output = ops.max_unpool2d_ext(input, indices, 1, stride=1, padding=0)
|
|
6184
|
+
>>> print(output.asnumpy())
|
|
6185
|
+
[[[[0. 1.]
|
|
6186
|
+
[8. 9.]]]]
|
|
6187
|
+
"""
|
|
6188
|
+
return max_unpool2d_ext_op(input, indices, kernel_size, stride, padding, output_size)
|
|
6189
|
+
|
|
6190
|
+
|
|
5004
6191
|
def maximum(input, other):
|
|
5005
6192
|
r"""
|
|
5006
6193
|
Computes the maximum of input tensors element-wise.
|
|
@@ -5019,8 +6206,8 @@ def maximum(input, other):
|
|
|
5019
6206
|
- If one of the elements being compared is a NaN, then that element is returned.
|
|
5020
6207
|
|
|
5021
6208
|
.. warning::
|
|
5022
|
-
If all inputs are scalar of integers. In
|
|
5023
|
-
|
|
6209
|
+
If all inputs are scalar of integers. In Graph mode, the output will be Tensor of int32, while in
|
|
6210
|
+
PyNative mode, the output will be Tensor of int64.
|
|
5024
6211
|
|
|
5025
6212
|
Args:
|
|
5026
6213
|
input (Union[Tensor, Number, bool]): The first input is a number or
|
|
@@ -5059,42 +6246,42 @@ def maximum(input, other):
|
|
|
5059
6246
|
return maximum_op(input, other)
|
|
5060
6247
|
|
|
5061
6248
|
|
|
5062
|
-
def mean_ext(input,
|
|
6249
|
+
def mean_ext(input, dim=None, keepdim=False, dtype=None):
|
|
5063
6250
|
r"""
|
|
5064
6251
|
Reduces all dimension of a tensor by averaging all elements in the dimension, by default.
|
|
5065
|
-
And reduce a dimension of `input` along the specified `
|
|
6252
|
+
And reduce a dimension of `input` along the specified `dim`. `keepdim`
|
|
5066
6253
|
determines whether the dimensions of the output and input are the same.
|
|
5067
6254
|
|
|
5068
6255
|
Note:
|
|
5069
|
-
The `
|
|
6256
|
+
The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
5070
6257
|
|
|
5071
6258
|
Args:
|
|
5072
6259
|
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
5073
6260
|
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
5074
|
-
|
|
6261
|
+
dim (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` ,
|
|
5075
6262
|
reduce all dimensions. Only constant value is allowed. Assume the rank of `input` is r,
|
|
5076
6263
|
and the value range is [-r,r).
|
|
5077
|
-
|
|
6264
|
+
keepdim (bool): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
5078
6265
|
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
5079
6266
|
dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
|
|
5080
6267
|
|
|
5081
6268
|
Returns:
|
|
5082
6269
|
Tensor, has the same data type as input tensor.
|
|
5083
6270
|
|
|
5084
|
-
- If `
|
|
6271
|
+
- If `dim` is ``None`` , and `keepdim` is ``False`` ,
|
|
5085
6272
|
the output is a 0-D tensor representing the product of all elements in the input tensor.
|
|
5086
|
-
- If `
|
|
6273
|
+
- If `dim` is int, set as 1, and `keepdim` is ``False`` ,
|
|
5087
6274
|
the shape of output is :math:`(x_0, x_2, ..., x_R)`.
|
|
5088
|
-
- If `
|
|
6275
|
+
- If `dim` is tuple(int), set as (1, 2), and `keepdim` is ``False`` ,
|
|
5089
6276
|
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
|
|
5090
|
-
- If `
|
|
6277
|
+
- If `dim` is 1-D Tensor, set as [1, 2], and `keepdim` is ``False`` ,
|
|
5091
6278
|
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
|
|
5092
6279
|
|
|
5093
6280
|
Raises:
|
|
5094
6281
|
TypeError: If `x` is not a Tensor.
|
|
5095
|
-
TypeError: If `
|
|
5096
|
-
TypeError: If `
|
|
5097
|
-
ValueError: If `
|
|
6282
|
+
TypeError: If `dim` is not one of the following: int, tuple, list or Tensor.
|
|
6283
|
+
TypeError: If `keepdim` is not a bool.
|
|
6284
|
+
ValueError: If `dim` is out of range.
|
|
5098
6285
|
|
|
5099
6286
|
Supported Platforms:
|
|
5100
6287
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -5104,7 +6291,7 @@ def mean_ext(input, axis=None, keep_dims=False, dtype=None):
|
|
|
5104
6291
|
>>> import numpy as np
|
|
5105
6292
|
>>> from mindspore import Tensor, ops
|
|
5106
6293
|
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
|
|
5107
|
-
>>> output = ops.
|
|
6294
|
+
>>> output = ops.mean_ext(x, 1, keepdim=True)
|
|
5108
6295
|
>>> result = output.shape
|
|
5109
6296
|
>>> print(result)
|
|
5110
6297
|
(3, 1, 5, 6)
|
|
@@ -5113,25 +6300,25 @@ def mean_ext(input, axis=None, keep_dims=False, dtype=None):
|
|
|
5113
6300
|
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
5114
6301
|
... [[6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8], [10, 10, 10, 10, 10, 10]]]),
|
|
5115
6302
|
... mindspore.float32)
|
|
5116
|
-
>>> output = ops.
|
|
6303
|
+
>>> output = ops.mean_ext(x)
|
|
5117
6304
|
>>> print(output)
|
|
5118
6305
|
5.0
|
|
5119
6306
|
>>> print(output.shape)
|
|
5120
6307
|
()
|
|
5121
|
-
>>> # case 2: Reduces a dimension along the
|
|
5122
|
-
>>> output = ops.
|
|
6308
|
+
>>> # case 2: Reduces a dimension along the dim 0
|
|
6309
|
+
>>> output = ops.mean_ext(x, 0, True)
|
|
5123
6310
|
>>> print(output)
|
|
5124
6311
|
[[[4. 4. 4. 4. 4. 4.]
|
|
5125
6312
|
[5. 5. 5. 5. 5. 5.]
|
|
5126
6313
|
[6. 6. 6. 6. 6. 6.]]]
|
|
5127
|
-
>>> # case 3: Reduces a dimension along the
|
|
5128
|
-
>>> output = ops.
|
|
6314
|
+
>>> # case 3: Reduces a dimension along the dim 1
|
|
6315
|
+
>>> output = ops.mean_ext(x, 1, True)
|
|
5129
6316
|
>>> print(output)
|
|
5130
6317
|
[[[2. 2. 2. 2. 2. 2.]]
|
|
5131
6318
|
[[5. 5. 5. 5. 5. 5.]]
|
|
5132
6319
|
[[8. 8. 8. 8. 8. 8.]]]
|
|
5133
|
-
>>> # case 4: Reduces a dimension along the
|
|
5134
|
-
>>> output = ops.
|
|
6320
|
+
>>> # case 4: Reduces a dimension along the dim 2
|
|
6321
|
+
>>> output = ops.mean_ext(x, 2, True)
|
|
5135
6322
|
>>> print(output)
|
|
5136
6323
|
[[[ 2.]
|
|
5137
6324
|
[ 2.]
|
|
@@ -5143,7 +6330,7 @@ def mean_ext(input, axis=None, keep_dims=False, dtype=None):
|
|
|
5143
6330
|
[ 8.]
|
|
5144
6331
|
[10.]]]
|
|
5145
6332
|
"""
|
|
5146
|
-
return mean_ext_op(input,
|
|
6333
|
+
return mean_ext_op(input, dim, keepdim, dtype)
|
|
5147
6334
|
|
|
5148
6335
|
|
|
5149
6336
|
def min_(input):
|
|
@@ -5159,6 +6346,9 @@ def minimum(input, other):
|
|
|
5159
6346
|
r"""
|
|
5160
6347
|
Computes the minimum of input tensors element-wise.
|
|
5161
6348
|
|
|
6349
|
+
.. math::
|
|
6350
|
+
output_i = \min(input_i, other_i)
|
|
6351
|
+
|
|
5162
6352
|
Note:
|
|
5163
6353
|
- Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
|
|
5164
6354
|
consistent.
|
|
@@ -5167,9 +6357,6 @@ def minimum(input, other):
|
|
|
5167
6357
|
- Shapes of them are supposed to be broadcast.
|
|
5168
6358
|
- If one of the elements being compared is a NaN, then that element is returned.
|
|
5169
6359
|
|
|
5170
|
-
.. math::
|
|
5171
|
-
output_i = \min(input_i, other_i)
|
|
5172
|
-
|
|
5173
6360
|
Args:
|
|
5174
6361
|
input (Union[Tensor, Number, bool]): The first input is a number or
|
|
5175
6362
|
a bool or a tensor whose data type is number or bool.
|
|
@@ -5253,6 +6440,50 @@ def mish_ext(input):
|
|
|
5253
6440
|
return mish_ext_op(input)
|
|
5254
6441
|
|
|
5255
6442
|
|
|
6443
|
+
def mm_ext(input, mat2):
|
|
6444
|
+
r"""
|
|
6445
|
+
Returns the matrix product of two arrays.
|
|
6446
|
+
If `input` is a :math:`(n \times m)` Tensor, `mat2` is a
|
|
6447
|
+
:math:`(m \times p)` Tensor, `out` will be a :math:`(n \times p)` Tensor.
|
|
6448
|
+
|
|
6449
|
+
Note:
|
|
6450
|
+
This function cannot support broadcasting.
|
|
6451
|
+
Refer to :func:`mindspore.ops.matmul` instead if you need a broadcastable function.
|
|
6452
|
+
|
|
6453
|
+
.. warning::
|
|
6454
|
+
This is an experimental API that is subject to change or deletion.
|
|
6455
|
+
|
|
6456
|
+
Args:
|
|
6457
|
+
input (Tensor): The first matrix of matrix multiplication.
|
|
6458
|
+
The last dimension of `input` must be the same size as the first dimension of `mat2`.
|
|
6459
|
+
mat2 (Tensor): The second matrix of matrix multiplication.
|
|
6460
|
+
The last dimension of `input` must be the same size as the first dimension of `mat2`.
|
|
6461
|
+
|
|
6462
|
+
Returns:
|
|
6463
|
+
Tensor, the matrix product of the inputs.
|
|
6464
|
+
|
|
6465
|
+
Raises:
|
|
6466
|
+
ValueError: If the last dimension of `input` is not the same size as the
|
|
6467
|
+
second-to-last dimension of `mat2`.
|
|
6468
|
+
TypeError: If `input` or `mat2` is not a Tensor.
|
|
6469
|
+
TypeError: If dtype of `input` or `mat2` is not float16, float32 or bfloat16.
|
|
6470
|
+
|
|
6471
|
+
Supported Platforms:
|
|
6472
|
+
``Ascend``
|
|
6473
|
+
|
|
6474
|
+
Examples:
|
|
6475
|
+
>>> import mindspore as ms
|
|
6476
|
+
>>> from mindspore import ops
|
|
6477
|
+
>>> import numpy as np
|
|
6478
|
+
>>> x1 = ms.Tensor(np.random.rand(2, 3), ms.float32)
|
|
6479
|
+
>>> x2 = ms.Tensor(np.random.rand(3, 4), ms.float32)
|
|
6480
|
+
>>> out = ops.mm_ext(x1, x2)
|
|
6481
|
+
>>> print(out.shape)
|
|
6482
|
+
(2, 4)
|
|
6483
|
+
"""
|
|
6484
|
+
return mm_ext_op(input, mat2)
|
|
6485
|
+
|
|
6486
|
+
|
|
5256
6487
|
def mse_loss_ext(input, target, reduction='mean'):
|
|
5257
6488
|
r"""
|
|
5258
6489
|
Calculates the mean squared error between the predicted value and the label value.
|
|
@@ -5356,7 +6587,38 @@ def muls(input, other):
|
|
|
5356
6587
|
|
|
5357
6588
|
def mv(input, vec):
|
|
5358
6589
|
r"""
|
|
5359
|
-
|
|
6590
|
+
Multiply matrix `input` and vector `vec`.
|
|
6591
|
+
If `input` is a tensor with shape :math:`(N, M)` and `vec` is a tensor with shape :math:`(M,)`,
|
|
6592
|
+
The output is a 1-D tensor which shape is :math:`(N,)`.
|
|
6593
|
+
|
|
6594
|
+
.. warning::
|
|
6595
|
+
This is an experimental API that is subject to change or deletion.
|
|
6596
|
+
|
|
6597
|
+
Args:
|
|
6598
|
+
input (Tensor): The input matrix which shape is :math:`(N,M)` and the rank must be 2-D.
|
|
6599
|
+
vec (Tensor): The input vector which shape is :math:`(M,)` and the rank is 1-D.
|
|
6600
|
+
|
|
6601
|
+
Returns:
|
|
6602
|
+
Tensor, the shape is :math:`(N,)`.
|
|
6603
|
+
|
|
6604
|
+
Raises:
|
|
6605
|
+
TypeError: If `input` or `vec` is not a tensor.
|
|
6606
|
+
TypeError: If the dtype of `input` or `vec` is not float16 or float32.
|
|
6607
|
+
TypeError: If the dtypes of `input` and `vec` are different.
|
|
6608
|
+
ValueError: If the `input` is not a 2-D tensor or the `vec` is not a 1-D tensor.
|
|
6609
|
+
|
|
6610
|
+
Supported Platforms:
|
|
6611
|
+
``Ascend``
|
|
6612
|
+
|
|
6613
|
+
Examples:
|
|
6614
|
+
>>> import mindspore
|
|
6615
|
+
>>> import numpy as np
|
|
6616
|
+
>>> from mindspore import Tensor, mint
|
|
6617
|
+
>>> input = Tensor(np.array([[3., 4.], [1., 6.], [1., 3.]]).astype(np.float32))
|
|
6618
|
+
>>> vec = Tensor(np.array([1., 2.]).astype(np.float32))
|
|
6619
|
+
>>> output = mint.mv(input, vec)
|
|
6620
|
+
>>> print(output)
|
|
6621
|
+
[11. 13. 7.]
|
|
5360
6622
|
"""
|
|
5361
6623
|
return mv_op(input, vec)
|
|
5362
6624
|
|
|
@@ -5399,6 +6661,46 @@ def nan_to_num(input, nan=None, posinf=None, neginf=None):
|
|
|
5399
6661
|
return nan_to_num_impl(input, nan, posinf, neginf)
|
|
5400
6662
|
|
|
5401
6663
|
|
|
6664
|
+
def narrow(input, dim, start, length):
|
|
6665
|
+
r"""
|
|
6666
|
+
Obtains a tensor of a specified length at a specified start position along a specified axis.
|
|
6667
|
+
|
|
6668
|
+
Args:
|
|
6669
|
+
input (Tensor): the tensor to narrow.
|
|
6670
|
+
dim (int): the axis along which to narrow.
|
|
6671
|
+
start (Union[int, Tensor[int]]): the starting dimension.
|
|
6672
|
+
length (int): the distance to the ending dimension.
|
|
6673
|
+
|
|
6674
|
+
Returns:
|
|
6675
|
+
output (Tensors) - The narrowed tensor.
|
|
6676
|
+
|
|
6677
|
+
Raises:
|
|
6678
|
+
ValueError: the rank of `input` is 0.
|
|
6679
|
+
ValueError: the value of `dim` is out the range [-input.ndim, input.ndim).
|
|
6680
|
+
ValueError: the value of `start` is out the range [-input.shape[dim], input.shape[dim]].
|
|
6681
|
+
ValueError: the value of `length` is out the range [0, input.shape[dim]-start].
|
|
6682
|
+
|
|
6683
|
+
Supported Platforms:
|
|
6684
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
6685
|
+
|
|
6686
|
+
Examples:
|
|
6687
|
+
>>> import mindspore
|
|
6688
|
+
>>> from mindspore import mint
|
|
6689
|
+
>>> from mindspore import Tensor
|
|
6690
|
+
>>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mindspore.int32)
|
|
6691
|
+
>>> output = mint.narrow(x, 0, 0, 2)
|
|
6692
|
+
>>> print(output)
|
|
6693
|
+
[[ 1 2 3]
|
|
6694
|
+
[ 4 5 6]]
|
|
6695
|
+
>>> output = mint.narrow(x, 1, 1, 2)
|
|
6696
|
+
>>> print(output)
|
|
6697
|
+
[[ 2 3]
|
|
6698
|
+
[ 5 6]
|
|
6699
|
+
[ 8 9]]
|
|
6700
|
+
"""
|
|
6701
|
+
return narrow_op(input, dim, start, length)
|
|
6702
|
+
|
|
6703
|
+
|
|
5402
6704
|
def neg(input):
|
|
5403
6705
|
r"""
|
|
5404
6706
|
Returns a tensor with negative values of the input tensor element-wise.
|
|
@@ -5439,10 +6741,6 @@ def nextafter(input, other):
|
|
|
5439
6741
|
then the next representable of :math:`a` towards :math:`b` is :math:`a+eps`,
|
|
5440
6742
|
the next representable of :math:`b` towards :math:`a` is :math:`b-eps`.
|
|
5441
6743
|
|
|
5442
|
-
.. math::
|
|
5443
|
-
|
|
5444
|
-
out_{i} = nextafter({input_{i}, other_{i}})
|
|
5445
|
-
|
|
5446
6744
|
For more detailed information, refer to `A Self Regularized Non-Monotonic Neural Activation Function <https://arxiv.org/abs/1908.08681>`_.
|
|
5447
6745
|
|
|
5448
6746
|
Args:
|
|
@@ -5662,34 +6960,34 @@ def prelu(input, weight):
|
|
|
5662
6960
|
return prelu_op(input, weight)
|
|
5663
6961
|
|
|
5664
6962
|
|
|
5665
|
-
def prod_ext(input,
|
|
6963
|
+
def prod_ext(input, dim=None, keepdim=False, dtype=None):
|
|
5666
6964
|
r"""
|
|
5667
6965
|
Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can
|
|
5668
|
-
reduce a dimension of `input` along the `
|
|
5669
|
-
same by controlling `
|
|
6966
|
+
reduce a dimension of `input` along the `dim`. Determine whether the dimensions of the output and input are the
|
|
6967
|
+
same by controlling `keepdim`.
|
|
5670
6968
|
|
|
5671
6969
|
Args:
|
|
5672
6970
|
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
5673
6971
|
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
5674
|
-
|
|
6972
|
+
dim (int): The dimensions to reduce. Default: ``None`` , reduce all dimensions.
|
|
5675
6973
|
Only constant value is allowed. Assume the rank of `input` is r, and the value range is [-r,r).
|
|
5676
|
-
|
|
6974
|
+
keepdim (bool): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
5677
6975
|
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
5678
6976
|
dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
|
|
5679
6977
|
|
|
5680
6978
|
Returns:
|
|
5681
6979
|
Tensor, has the same data type as input tensor.
|
|
5682
6980
|
|
|
5683
|
-
- If `
|
|
6981
|
+
- If `dim` is ``None`` , and `keepdim` is ``False`` ,
|
|
5684
6982
|
the output is a 0-D tensor representing the product of all elements in the input tensor.
|
|
5685
|
-
- If `
|
|
6983
|
+
- If `dim` is int, set as 1, and `keepdim` is ``False`` ,
|
|
5686
6984
|
the shape of output is :math:`(input_0, input_2, ..., input_R)`.
|
|
5687
6985
|
|
|
5688
6986
|
Raises:
|
|
5689
6987
|
TypeError: If `input` is not a Tensor.
|
|
5690
|
-
TypeError: If `
|
|
5691
|
-
TypeError: If `
|
|
5692
|
-
ValueError: If `
|
|
6988
|
+
TypeError: If `dim` is not one of the following: int or None.
|
|
6989
|
+
TypeError: If `keepdim` is not a bool.
|
|
6990
|
+
ValueError: If `dim` is out of range.
|
|
5693
6991
|
|
|
5694
6992
|
Supported Platforms:
|
|
5695
6993
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -5699,7 +6997,7 @@ def prod_ext(input, axis=None, keep_dims=False, dtype=None):
|
|
|
5699
6997
|
>>> import numpy as np
|
|
5700
6998
|
>>> from mindspore import Tensor, ops
|
|
5701
6999
|
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
|
|
5702
|
-
>>> output = ops.
|
|
7000
|
+
>>> output = ops.prod_ext(x, 1, keepdim=True)
|
|
5703
7001
|
>>> result = output.shape
|
|
5704
7002
|
>>> print(result)
|
|
5705
7003
|
(3, 1, 5, 6)
|
|
@@ -5707,25 +7005,25 @@ def prod_ext(input, axis=None, keep_dims=False, dtype=None):
|
|
|
5707
7005
|
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
|
|
5708
7006
|
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
5709
7007
|
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
|
|
5710
|
-
>>> output = ops.
|
|
7008
|
+
>>> output = ops.prod_ext(x)
|
|
5711
7009
|
>>> print(output)
|
|
5712
7010
|
2.2833798e+33
|
|
5713
7011
|
>>> print(output.shape)
|
|
5714
7012
|
()
|
|
5715
|
-
>>> # case 2: Reduces a dimension along
|
|
5716
|
-
>>> output = ops.
|
|
7013
|
+
>>> # case 2: Reduces a dimension along dim 0.
|
|
7014
|
+
>>> output = ops.prod_ext(x, 0, True)
|
|
5717
7015
|
>>> print(output)
|
|
5718
7016
|
[[[ 28. 28. 28. 28. 28. 28.]
|
|
5719
7017
|
[ 80. 80. 80. 80. 80. 80.]
|
|
5720
7018
|
[162. 162. 162. 162. 162. 162.]]]
|
|
5721
|
-
>>> # case 3: Reduces a dimension along
|
|
5722
|
-
>>> output = ops.
|
|
7019
|
+
>>> # case 3: Reduces a dimension along dim 1.
|
|
7020
|
+
>>> output = ops.prod_ext(x, 1, True)
|
|
5723
7021
|
>>> print(output)
|
|
5724
7022
|
[[[ 6. 6. 6. 6. 6. 6.]]
|
|
5725
7023
|
[[120. 120. 120. 120. 120. 120.]]
|
|
5726
7024
|
[[504. 504. 504. 504. 504. 504.]]]
|
|
5727
|
-
>>> # case 4: Reduces a dimension along
|
|
5728
|
-
>>> output = ops.
|
|
7025
|
+
>>> # case 4: Reduces a dimension along dim 2.
|
|
7026
|
+
>>> output = ops.prod_ext(x, 2, True)
|
|
5729
7027
|
>>> print(output)
|
|
5730
7028
|
[[[1.00000e+00]
|
|
5731
7029
|
[6.40000e+01]
|
|
@@ -5737,7 +7035,7 @@ def prod_ext(input, axis=None, keep_dims=False, dtype=None):
|
|
|
5737
7035
|
[2.62144e+05]
|
|
5738
7036
|
[5.31441e+05]]]
|
|
5739
7037
|
"""
|
|
5740
|
-
return prod_ext_op(input,
|
|
7038
|
+
return prod_ext_op(input, dim, keepdim, dtype)
|
|
5741
7039
|
|
|
5742
7040
|
|
|
5743
7041
|
def prompt_k_v_cache(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len, align_mode='LEFT'):
|
|
@@ -6043,6 +7341,7 @@ def relu(input):
|
|
|
6043
7341
|
|
|
6044
7342
|
Args:
|
|
6045
7343
|
input (Tensor): The input Tensor.
|
|
7344
|
+
inplace (bool, optional): Whether to use inplace mode, Defaults to ``False``.
|
|
6046
7345
|
|
|
6047
7346
|
Returns:
|
|
6048
7347
|
Tensor, with the same dtype and shape as the `input`.
|
|
@@ -6723,7 +8022,6 @@ def select_ext(input, dim, index):
|
|
|
6723
8022
|
>>> from mindspore import Tensor, mint
|
|
6724
8023
|
>>> input = Tensor([[2, 3, 4, 5],[3, 2, 4, 5]])
|
|
6725
8024
|
>>> y = mint.select(input, 0, 0)
|
|
6726
|
-
>>> y = Tensor([1,2], mindspore.float32)
|
|
6727
8025
|
>>> print(y)
|
|
6728
8026
|
[2 3 4 5]
|
|
6729
8027
|
|
|
@@ -6952,7 +8250,7 @@ def sign(input):
|
|
|
6952
8250
|
>>> print(output)
|
|
6953
8251
|
[[-1 0 1 1 1]
|
|
6954
8252
|
[ 1 1 1 -1 0]]
|
|
6955
|
-
>>> ms.
|
|
8253
|
+
>>> ms.set_device(device_target="CPU")
|
|
6956
8254
|
>>> x = ms.Tensor([[-1, 0, float('inf'), 4, float('nan')], [2, 3, float('-inf'), -6, 0]])
|
|
6957
8255
|
>>> output = ops.sign(x)
|
|
6958
8256
|
>>> print(output)
|
|
@@ -7380,7 +8678,7 @@ def stack_ext(tensors, dim=0):
|
|
|
7380
8678
|
|
|
7381
8679
|
Args:
|
|
7382
8680
|
tensors (Union[tuple, list]): A Tuple or list of Tensor objects with the same shape and type.
|
|
7383
|
-
dim (int): Dimension to stack. The range is [-(R+1), R+1). Default: ``0`` .
|
|
8681
|
+
dim (int, optional): Dimension to stack. The range is [-(R+1), R+1). Default: ``0`` .
|
|
7384
8682
|
|
|
7385
8683
|
Returns:
|
|
7386
8684
|
Tensor. A stacked Tensor with the same type as `tensors`.
|
|
@@ -7388,7 +8686,7 @@ def stack_ext(tensors, dim=0):
|
|
|
7388
8686
|
Raises:
|
|
7389
8687
|
TypeError: If the data types of elements in `tensors` are not the same.
|
|
7390
8688
|
ValueError: If `dim` is out of the range [-(R+1), R+1);
|
|
7391
|
-
or if the shapes of elements in tensors are not the same.
|
|
8689
|
+
or if the shapes of elements in `tensors` are not the same.
|
|
7392
8690
|
|
|
7393
8691
|
Supported Platforms:
|
|
7394
8692
|
``Ascend``
|
|
@@ -7670,6 +8968,19 @@ def sub(input, other):
|
|
|
7670
8968
|
return sub_op(input, other)
|
|
7671
8969
|
|
|
7672
8970
|
|
|
8971
|
+
def sum_ext(input, dim=None, keepdim=False, dtype=None):
|
|
8972
|
+
r"""
|
|
8973
|
+
Alias for :func:`mindspore.mint.transpose` . The `input` corresponds to the `input` in the reference interface,
|
|
8974
|
+
and the parameters `axis0` and `axis1` correspond to `dim0` and `dim1` in the reference interface respectively.
|
|
8975
|
+
|
|
8976
|
+
.. warning::
|
|
8977
|
+
This is an experimental API that is subject to change or deletion.
|
|
8978
|
+
|
|
8979
|
+
Refer to :func:`mindspore.mint.transpose` for more details.
|
|
8980
|
+
"""
|
|
8981
|
+
return sum_ext_op(input, dim, keepdim, dtype)
|
|
8982
|
+
|
|
8983
|
+
|
|
7673
8984
|
def swiglu_grad(grad_output, input, dim=-1):
|
|
7674
8985
|
r"""
|
|
7675
8986
|
|
|
@@ -7682,6 +8993,9 @@ def swiglu(input, dim=-1):
|
|
|
7682
8993
|
Computes SwiGLU (Swish-Gated Linear Unit activation function) of input tensor.
|
|
7683
8994
|
SwiGLU is a variant of the :class:`mindspore.ops.GLU` activation function, it is defined as:
|
|
7684
8995
|
|
|
8996
|
+
.. warning::
|
|
8997
|
+
This is an experimental API that is subject to change or deletion.
|
|
8998
|
+
|
|
7685
8999
|
.. math::
|
|
7686
9000
|
{SwiGLU}(a, b)= Swish(a) \otimes b
|
|
7687
9001
|
|
|
@@ -7715,6 +9029,41 @@ def swiglu(input, dim=-1):
|
|
|
7715
9029
|
return swiglu_op(input, dim)
|
|
7716
9030
|
|
|
7717
9031
|
|
|
9032
|
+
def t_ext(input):
|
|
9033
|
+
r"""
|
|
9034
|
+
Transpose the input tensor.
|
|
9035
|
+
|
|
9036
|
+
.. warning::
|
|
9037
|
+
This is an experimental API that is subject to change or deletion.
|
|
9038
|
+
|
|
9039
|
+
Args:
|
|
9040
|
+
input (Tensor): The input tensor.
|
|
9041
|
+
|
|
9042
|
+
Returns:
|
|
9043
|
+
Tensor, transpose 2D tensor, return 1D tensor as it is.
|
|
9044
|
+
|
|
9045
|
+
Raises:
|
|
9046
|
+
ValueError: If the dimension of `input` is greater than 2.
|
|
9047
|
+
ValueError: If `input` is empty.
|
|
9048
|
+
TypeError: If `input` is not a tensor.
|
|
9049
|
+
|
|
9050
|
+
Supported Platforms:
|
|
9051
|
+
``Ascend``
|
|
9052
|
+
|
|
9053
|
+
Examples:
|
|
9054
|
+
>>> import mindspore
|
|
9055
|
+
>>> import numpy as np
|
|
9056
|
+
>>> from mindspore import Tensor, ops
|
|
9057
|
+
>>> input = Tensor(np.array([[1, 2, 3], [4, 5, 6]]), mindspore.float32)
|
|
9058
|
+
>>> output = ops.t_ext(input)
|
|
9059
|
+
>>> print(output)
|
|
9060
|
+
[[ 1. 4.]
|
|
9061
|
+
[ 2. 5.]
|
|
9062
|
+
[ 3. 6.]]
|
|
9063
|
+
"""
|
|
9064
|
+
return t_ext_op(input)
|
|
9065
|
+
|
|
9066
|
+
|
|
7718
9067
|
def tan(input):
|
|
7719
9068
|
r"""
|
|
7720
9069
|
Computes tangent of `input` element-wise.
|
|
@@ -8071,7 +9420,7 @@ def triu(input, diagonal=0):
|
|
|
8071
9420
|
This is an experimental API that is subject to change or deletion.
|
|
8072
9421
|
|
|
8073
9422
|
Args:
|
|
8074
|
-
input (Tensor): The input tensor with shape :math:`(M, N
|
|
9423
|
+
input (Tensor): The input tensor with shape :math:`(*, M, N)` where * means any number of additional dimensions.
|
|
8075
9424
|
diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: ``0``,
|
|
8076
9425
|
indicating the main diagonal.
|
|
8077
9426
|
|
|
@@ -8157,6 +9506,49 @@ def tuple_to_tensor(input_tuple, dtype=None):
|
|
|
8157
9506
|
"""
|
|
8158
9507
|
return tuple_to_tensor_op(input_tuple, dtype)
|
|
8159
9508
|
|
|
9509
|
+
type_as_op=TypeAs()
|
|
9510
|
+
|
|
9511
|
+
def type_as(input, other):
|
|
9512
|
+
r"""
|
|
9513
|
+
Returns input cast to the type of the with the other.
|
|
9514
|
+
|
|
9515
|
+
.. warning::
|
|
9516
|
+
This is an experimental API that is subject to change or deletion.
|
|
9517
|
+
|
|
9518
|
+
Note:
|
|
9519
|
+
When converting complex numbers to boolean type, the imaginary part of the complex number is not
|
|
9520
|
+
taken into account. As long as the real part is non-zero, it returns True; otherwise, it returns False.
|
|
9521
|
+
|
|
9522
|
+
Args:
|
|
9523
|
+
input (Tensor): The shape of tensor is :math:`(x_0, x_1, ..., x_R)`. The tensor whose data type is to be converted.
|
|
9524
|
+
other (Tensor): The shape of tensor is :math:`(x_0, x_1, ..., x_R)`. The tensor whose data type is specified.
|
|
9525
|
+
|
|
9526
|
+
Returns:
|
|
9527
|
+
Tensor, the shape of tensor is the same as `input`, :math:`(x_0, x_1, ..., x_R)`.
|
|
9528
|
+
|
|
9529
|
+
Raises:
|
|
9530
|
+
TypeError: If `input` is not a Tensor.
|
|
9531
|
+
TypeError: If `other` is not a Tensor.
|
|
9532
|
+
|
|
9533
|
+
Supported Platforms:
|
|
9534
|
+
``Ascend``
|
|
9535
|
+
|
|
9536
|
+
Examples:
|
|
9537
|
+
>>> import mindspore
|
|
9538
|
+
>>> import numpy as np
|
|
9539
|
+
>>> from mindspore import Tensor, ops
|
|
9540
|
+
>>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
|
|
9541
|
+
>>> input = Tensor(input_np)
|
|
9542
|
+
>>> other_np = np.random.randn(2, 3, 4).astype(np.int32)
|
|
9543
|
+
>>> other = Tensor(other_np)
|
|
9544
|
+
>>> output = ops.type_as(input, other)
|
|
9545
|
+
>>> print(output.dtype)
|
|
9546
|
+
Int32
|
|
9547
|
+
>>> print(output.shape)
|
|
9548
|
+
(2, 3, 4, 5)
|
|
9549
|
+
"""
|
|
9550
|
+
return type_as_op(input, other)
|
|
9551
|
+
|
|
8160
9552
|
|
|
8161
9553
|
def unsorted_segment_sum(input_x, segment_ids, num_segments):
|
|
8162
9554
|
r"""
|
|
@@ -8216,6 +9608,41 @@ def unsorted_segment_sum(input_x, segment_ids, num_segments):
|
|
|
8216
9608
|
return unsorted_segment_sum_op(input_x, segment_ids, num_segments)
|
|
8217
9609
|
|
|
8218
9610
|
|
|
9611
|
+
def view_as(input, other):
|
|
9612
|
+
r"""
|
|
9613
|
+
Change the shape of the input tensor based on the shape of other.
|
|
9614
|
+
|
|
9615
|
+
.. warning::
|
|
9616
|
+
This is an experimental API that is subject to change or deletion.
|
|
9617
|
+
|
|
9618
|
+
Args:
|
|
9619
|
+
input (Tensor): The input tensor.
|
|
9620
|
+
other (Tensor): The shape of return tensor is same as the shape of other.
|
|
9621
|
+
|
|
9622
|
+
Returns:
|
|
9623
|
+
Tensor, which has the same shape of other.
|
|
9624
|
+
|
|
9625
|
+
Raises:
|
|
9626
|
+
TypeError: If `input` is not a tensor.
|
|
9627
|
+
|
|
9628
|
+
Supported Platforms:
|
|
9629
|
+
``Ascend``
|
|
9630
|
+
|
|
9631
|
+
Examples:
|
|
9632
|
+
>>> import mindspore
|
|
9633
|
+
>>> import numpy as np
|
|
9634
|
+
>>> from mindspore import Tensor, ops
|
|
9635
|
+
>>> input = Tensor(np.array([[1, 2, 3], [2, 3, 4]], dtype=np.float32))
|
|
9636
|
+
>>> other = Tensor(np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32))
|
|
9637
|
+
>>> output = ops.view_as(input, other)
|
|
9638
|
+
>>> print(output)
|
|
9639
|
+
[[1. 2.]
|
|
9640
|
+
[3. 2.]
|
|
9641
|
+
[3. 4.]]
|
|
9642
|
+
"""
|
|
9643
|
+
return view_as_op(input, other)
|
|
9644
|
+
|
|
9645
|
+
|
|
8219
9646
|
def view(input, shape):
|
|
8220
9647
|
r"""
|
|
8221
9648
|
Reshape the tensor according to the input shape. It's the same as :func:`mindspore.Tensor.reshape`,
|
|
@@ -8289,6 +9716,10 @@ def grouped_matmul(x, weight, bias=None, scale=None, offset=None, antiquant_scal
|
|
|
8289
9716
|
.. math::
|
|
8290
9717
|
y_i = x_i\times (weight_i + antiquant\_offset_i) * antiquant\_scale_i + bias_i
|
|
8291
9718
|
|
|
9719
|
+
.. note::
|
|
9720
|
+
Only when `bias` , `scale` , `offset` , `antiquant_scale` and `antiquant_offset` are all None, `group_type` is 0,
|
|
9721
|
+
and `split_item` is 3, the reverse derivative is supported.
|
|
9722
|
+
|
|
8292
9723
|
Args:
|
|
8293
9724
|
split_item (int): Splitting input mode. Only support 0 and 3. 0 represents multiple Tensors, and 3 represents a single Tensor.
|
|
8294
9725
|
group_type (int): The axis to be split. Only support -1 and 0. If the matrix is multiplied by A[m,k]xB[k,n]=C[m,n].
|
|
@@ -8365,7 +9796,8 @@ def grouped_matmul(x, weight, bias=None, scale=None, offset=None, antiquant_scal
|
|
|
8365
9796
|
... result = self.gmm(x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, group_list)
|
|
8366
9797
|
... return result
|
|
8367
9798
|
...
|
|
8368
|
-
>>>
|
|
9799
|
+
>>> ms.set_device(device_target="Ascend")
|
|
9800
|
+
>>> context.set_context(mode=ms.GRAPH_MODE)
|
|
8369
9801
|
>>> x = [ms.Tensor(np.array([[0, 0, 0, 0],
|
|
8370
9802
|
... [1, 1, 1, 1],
|
|
8371
9803
|
... [2, 2, 2, 2],
|
|
@@ -8392,6 +9824,20 @@ def grouped_matmul(x, weight, bias=None, scale=None, offset=None, antiquant_scal
|
|
|
8392
9824
|
return grouped_matmul_impl(x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, group_list, split_item, group_type)
|
|
8393
9825
|
|
|
8394
9826
|
|
|
9827
|
+
def grouped_matmul_v2(x, weight, bias=None, scale=None, offset=None, antiquant_scale=None, antiquant_offset=None, group_list=None, split_item=0, group_type=-1):
|
|
9828
|
+
r"""
|
|
9829
|
+
|
|
9830
|
+
"""
|
|
9831
|
+
return grouped_matmul_v2_op(x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, group_list, split_item, group_type)
|
|
9832
|
+
|
|
9833
|
+
|
|
9834
|
+
def grouped_matmul_v4(x, weight, bias=None, scale=None, offset=None, antiquant_scale=None, antiquant_offset=None, pre_token_scale=None, group_list=None, activation_input=None, activation_quant_scale=None, activation_quant_offset=None, split_item=0, group_type=-1, group_list_type=0, act_type=0):
|
|
9835
|
+
r"""
|
|
9836
|
+
|
|
9837
|
+
"""
|
|
9838
|
+
return grouped_matmul_v4_op(x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, pre_token_scale, group_list, activation_input, activation_quant_scale, activation_quant_offset, split_item, group_type, group_list_type, act_type)
|
|
9839
|
+
|
|
9840
|
+
|
|
8395
9841
|
def kv_cache_scatter_update(var, indices, updates, axis, reduce='none'):
|
|
8396
9842
|
r"""
|
|
8397
9843
|
Update var with updates and indices along sequence axis.
|
|
@@ -8427,6 +9873,41 @@ def kv_cache_scatter_update(var, indices, updates, axis, reduce='none'):
|
|
|
8427
9873
|
return kv_cache_scatter_update_op(var, indices, updates, axis, reduce)
|
|
8428
9874
|
|
|
8429
9875
|
|
|
9876
|
+
def moe_compute_expert_tokens(sorted_experts, num_expert):
|
|
9877
|
+
r"""
|
|
9878
|
+
In MoE calculation, Search for the last index processed by each expert through binary search.
|
|
9879
|
+
|
|
9880
|
+
.. math::
|
|
9881
|
+
expert_tokens_{i} = BinarySearch(sorted_experts, num_expert)
|
|
9882
|
+
|
|
9883
|
+
Inputs:
|
|
9884
|
+
- **sorted_experts** (Tensor) - A tensor which represent sorted experts, must be 1D tensor.
|
|
9885
|
+
Supported type: Int32.
|
|
9886
|
+
- **num_expert** (int) - The number of experts, must be greater than 0.
|
|
9887
|
+
|
|
9888
|
+
Outputs:
|
|
9889
|
+
Tensor, have the same dtype with sorted_experts.
|
|
9890
|
+
|
|
9891
|
+
Raises:
|
|
9892
|
+
TypeError: if `sorted_experts` is not a tensor.
|
|
9893
|
+
ValueError: if `num_expert` is less than 0.
|
|
9894
|
+
|
|
9895
|
+
Supported Platforms:
|
|
9896
|
+
``Ascend``
|
|
9897
|
+
|
|
9898
|
+
Examples:
|
|
9899
|
+
>>> import mindspore as ms
|
|
9900
|
+
>>> from mindspore.ops.auto_generate import MoeComputeExpertTokens
|
|
9901
|
+
>>> sorted_experts = ms.Tensor([0, 0, 1, 2, 2], dtype=ms.int32)
|
|
9902
|
+
>>> num_expert = 5
|
|
9903
|
+
>>> net = MoeComputeExpertTokens()
|
|
9904
|
+
>>> expert_tokens = net(sorted_experts, num_expert)
|
|
9905
|
+
>>> print(expert_tokens)
|
|
9906
|
+
[2, 3, 5]
|
|
9907
|
+
"""
|
|
9908
|
+
return moe_compute_expert_tokens_op(sorted_experts, num_expert)
|
|
9909
|
+
|
|
9910
|
+
|
|
8430
9911
|
def moe_finalize_routing(expanded_x, x1, x2=None, bias=None, scales=None, expanded_row_idx=None, expanded_expert_idx=None):
|
|
8431
9912
|
r"""
|
|
8432
9913
|
In MoE calculation, merge the results output by FFN and rearrange the output in time order by experts.
|
|
@@ -8484,7 +9965,8 @@ def moe_finalize_routing(expanded_x, x1, x2=None, bias=None, scales=None, expand
|
|
|
8484
9965
|
... result = self.moe_finalize_routing(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
|
|
8485
9966
|
... return result
|
|
8486
9967
|
...
|
|
8487
|
-
>>>
|
|
9968
|
+
>>> ms.set_device(device_target="Ascend")
|
|
9969
|
+
>>> context.set_context(mode=ms.GRAPH_MODE)
|
|
8488
9970
|
>>> # E = 4, K = 2, N = 3, H = 4
|
|
8489
9971
|
>>> expanded_x = ms.Tensor(np.array([[0.1, 0.1, 0.1, 0.1],
|
|
8490
9972
|
... [0.2, 0.2, 0.2, 0.2],
|
|
@@ -8517,6 +9999,30 @@ def moe_finalize_routing(expanded_x, x1, x2=None, bias=None, scales=None, expand
|
|
|
8517
9999
|
return moe_finalize_routing_op(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
|
|
8518
10000
|
|
|
8519
10001
|
|
|
10002
|
+
def moe_init_routing(x, row_idx, expert_idx, active_num):
|
|
10003
|
+
r"""
|
|
10004
|
+
Performs routing on the computation result of MoeGatingTopKSoftmax.
|
|
10005
|
+
|
|
10006
|
+
Inputs:
|
|
10007
|
+
- **x** (Tensor) - 2D tensor, which contains input feature tokens. The shape is (NUM_ROWS, H).
|
|
10008
|
+
- **row_idx** (Tensor) - Original row ID of each position. The shape must be the same as that of expertForSourceRow.
|
|
10009
|
+
- **expert_idx** (Tensor) - 2D tensor, indicating k experts corresponding to each row of features in the output of aclnnMoeGatingTopKSoftmax. The shape is (NUM_ROWS, K).
|
|
10010
|
+
active_num (int64): maximum number of rows that can be processed, that is, the maximum number of rows that are valid in expandedXOut.
|
|
10011
|
+
|
|
10012
|
+
Outputs:
|
|
10013
|
+
- **expanded_x** (Tensor) - 2D tensor, indicating features extended based on expertIdx. The shape is (min(NUM_ROWS, activeNum) * k, H).
|
|
10014
|
+
- **expanded_row_idx** (Tensor) - 1D tensor, indicating mapping between expandedX and x. The shape is (NUM_ROWS*K).
|
|
10015
|
+
- **expanded_expert_idx** (Tensor) - sorted result of expertIdx.
|
|
10016
|
+
|
|
10017
|
+
Raises:
|
|
10018
|
+
ShapeError: If the shape of input Tensor does not match the description in args.
|
|
10019
|
+
|
|
10020
|
+
Supported Platforms:
|
|
10021
|
+
``Ascend``
|
|
10022
|
+
"""
|
|
10023
|
+
return moe_init_routing_op(x, row_idx, expert_idx, active_num)
|
|
10024
|
+
|
|
10025
|
+
|
|
8520
10026
|
def quant_batch_matmul(x1, x2, scale, offset=None, bias=None, pertokenScaleOptional=None, transpose_x1=False, transpose_x2=False, dtype=mstype.float16):
|
|
8521
10027
|
r"""
|
|
8522
10028
|
|