mindspore 2.3.0__cp39-none-any.whl → 2.3.0rc2__cp39-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +0 -1512
- mindspore/__init__.py +1 -2
- mindspore/_c_dataengine.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +25 -5
- mindspore/_extends/graph_kernel/model/graph_parallel.py +1 -1
- mindspore/_extends/parse/__init__.py +2 -2
- mindspore/_extends/parse/compile_config.py +0 -29
- mindspore/_extends/parse/namespace.py +2 -2
- mindspore/_extends/parse/parser.py +5 -21
- mindspore/_extends/parse/resources.py +7 -5
- mindspore/_extends/parse/standard_method.py +59 -40
- mindspore/_mindspore_offline_debug.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/amp.py +5 -26
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/base.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +1 -1
- mindspore/boost/grad_freeze.py +2 -2
- mindspore/boost/less_batch_normalization.py +6 -9
- mindspore/common/__init__.py +1 -8
- mindspore/common/_register_for_tensor.py +9 -8
- mindspore/common/api.py +65 -275
- mindspore/common/dtype.py +4 -8
- mindspore/common/dump.py +5 -2
- mindspore/common/jit_config.py +1 -1
- mindspore/common/lazy_inline.py +2 -14
- mindspore/common/parameter.py +15 -14
- mindspore/common/recompute.py +5 -20
- mindspore/common/sparse_tensor.py +6 -21
- mindspore/common/tensor.py +52 -100
- mindspore/communication/__init__.py +11 -6
- mindspore/communication/management.py +94 -92
- mindspore/context.py +18 -180
- mindspore/dataset/engine/datasets.py +46 -69
- mindspore/dataset/engine/datasets_user_defined.py +53 -72
- mindspore/dataset/engine/datasets_vision.py +2 -2
- mindspore/dataset/engine/queue.py +38 -56
- mindspore/dataset/engine/validators.py +5 -11
- mindspore/dataset/vision/__init__.py +5 -5
- mindspore/dataset/vision/c_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +1 -1
- mindspore/dataset/vision/transforms.py +46 -591
- mindspore/dataset/vision/utils.py +1 -121
- mindspore/dataset/vision/validators.py +3 -9
- mindspore/hal/__init__.py +1 -7
- mindspore/hal/device.py +1 -1
- mindspore/include/api/model.h +0 -3
- mindspore/include/dataset/vision.h +2 -54
- mindspore/include/mindapi/base/types.h +0 -1
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +0 -35
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +0 -72
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/{aclnn_all_finite.h → aclnn_add_custom.h} +11 -9
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +12 -184
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +15 -7
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +15 -7
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +31 -77
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +31 -77
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +5 -4
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/DeviceBin +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +286 -275
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -3
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_tiling.h +45 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +4 -8
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_tiling.h +4 -11
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/kernel/flash_attention_score_mix_hwsync.h +0 -18
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -6
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +75 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/kernel/matmul.h +5 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/matmul_impl.h +3 -18
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_common_tiling.h +5 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_info.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/tiling_data.h +3 -36
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/kernel/matmul_stridedslice_fusion.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/matmul_stridedslice_fusion_impl.h +4 -22
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +2 -16
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/kernel/paged_attention_mix_hwsync.h +3 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +4 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_tiling.h +4 -9
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +2 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +4 -10
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +12 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +1 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +2 -10
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +1 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -17
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +7 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layernorm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_stridedslice_fusion_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libnot_equal_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblcal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/mindrecord/filewriter.py +2 -2
- mindspore/mint/__init__.py +40 -720
- mindspore/mint/nn/__init__.py +7 -89
- mindspore/mint/nn/functional.py +16 -165
- mindspore/mint/optim/adamw.py +16 -15
- mindspore/nn/__init__.py +2 -0
- mindspore/nn/cell.py +98 -97
- mindspore/nn/extend/basic.py +2 -2
- mindspore/nn/extend/embedding.py +1 -1
- mindspore/nn/extend/layer/normalization.py +5 -7
- mindspore/nn/generator.py +297 -0
- mindspore/nn/layer/activation.py +3 -4
- mindspore/nn/layer/basic.py +16 -79
- mindspore/nn/layer/conv.py +8 -17
- mindspore/nn/layer/embedding.py +4 -1
- mindspore/nn/layer/math.py +1 -1
- mindspore/nn/layer/normalization.py +1 -1
- mindspore/nn/layer/pooling.py +0 -5
- mindspore/nn/layer/rnn_cells.py +2 -2
- mindspore/nn/loss/loss.py +19 -19
- mindspore/nn/optim/adasum.py +1 -1
- mindspore/nn/optim/sgd.py +2 -3
- mindspore/nn/probability/distribution/exponential.py +1 -1
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/logistic.py +1 -1
- mindspore/nn/wrap/cell_wrapper.py +1 -25
- mindspore/nn/wrap/loss_scale.py +1 -24
- mindspore/numpy/array_ops.py +1 -5
- mindspore/numpy/dtypes.py +3 -3
- mindspore/numpy/math_ops.py +8 -8
- mindspore/ops/__init__.py +1 -1
- mindspore/ops/_grad_experimental/grad_comm_ops.py +16 -75
- mindspore/ops/_vmap/vmap_array_ops.py +0 -27
- mindspore/ops/_vmap/vmap_math_ops.py +1 -29
- mindspore/ops/_vmap/vmap_nn_ops.py +18 -19
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +8 -34
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +9 -2
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -26
- mindspore/ops/auto_generate/gen_extend_func.py +27 -603
- mindspore/ops/auto_generate/gen_ops_def.py +203 -993
- mindspore/ops/auto_generate/gen_ops_prim.py +402 -1946
- mindspore/ops/auto_generate/pyboost_inner_prim.py +20 -90
- mindspore/ops/composite/base.py +6 -3
- mindspore/ops/composite/math_ops.py +1 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +17 -24
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/extend/__init__.py +3 -2
- mindspore/ops/extend/array_func.py +51 -10
- mindspore/ops/extend/nn_func.py +78 -2
- mindspore/ops/function/__init__.py +13 -8
- mindspore/ops/function/array_func.py +179 -455
- mindspore/ops/function/clip_func.py +1 -1
- mindspore/ops/function/grad/grad_func.py +3 -3
- mindspore/ops/function/math_func.py +103 -117
- mindspore/ops/function/nn_func.py +163 -275
- mindspore/ops/function/other_func.py +2 -2
- mindspore/ops/function/random_func.py +69 -202
- mindspore/ops/function/sparse_func.py +4 -4
- mindspore/ops/functional.py +327 -332
- mindspore/ops/operations/__init__.py +3 -13
- mindspore/ops/operations/_grad_ops.py +27 -3
- mindspore/ops/operations/_inner_ops.py +356 -53
- mindspore/ops/operations/_rl_inner_ops.py +2 -2
- mindspore/ops/operations/_tensor_array.py +8 -8
- mindspore/ops/operations/array_ops.py +65 -82
- mindspore/ops/operations/comm_ops.py +93 -784
- mindspore/ops/operations/custom_ops.py +28 -51
- mindspore/ops/operations/debug_ops.py +4 -4
- mindspore/ops/operations/inner_ops.py +2 -2
- mindspore/ops/operations/manually_defined/ops_def.py +4 -304
- mindspore/ops/operations/math_ops.py +50 -3
- mindspore/ops/operations/nn_ops.py +247 -14
- mindspore/ops/operations/other_ops.py +3 -3
- mindspore/ops/operations/random_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +1 -1
- mindspore/ops/primitive.py +8 -9
- mindspore/ops/silent_check.py +5 -5
- mindspore/ops_generate/arg_dtype_cast.py +9 -2
- mindspore/ops_generate/arg_handler.py +0 -26
- mindspore/ops_generate/gen_aclnn_implement.py +4 -1
- mindspore/ops_generate/gen_ops.py +4 -26
- mindspore/ops_generate/gen_pyboost_func.py +12 -41
- mindspore/ops_generate/gen_utils.py +0 -21
- mindspore/ops_generate/pyboost_utils.py +2 -7
- mindspore/ops_generate/template.py +0 -1
- mindspore/parallel/_auto_parallel_context.py +1 -21
- mindspore/parallel/_tensor.py +5 -0
- mindspore/parallel/_transformer/transformer.py +1 -1
- mindspore/parallel/_utils.py +1 -15
- mindspore/parallel/algo_parameter_config.py +3 -1
- mindspore/parallel/checkpoint_transform.py +9 -12
- mindspore/parallel/cluster/process_entity/_api.py +29 -28
- mindspore/parallel/cluster/process_entity/_utils.py +3 -13
- mindspore/parallel/cluster/run.py +16 -13
- mindspore/parallel/parameter_broadcast.py +2 -2
- mindspore/parallel/shard.py +17 -31
- mindspore/profiler/__init__.py +2 -3
- mindspore/profiler/common/util.py +2 -107
- mindspore/profiler/envprofiling.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +21 -8
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -82
- mindspore/profiler/parser/ascend_analysis/function_event.py +28 -43
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +27 -49
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +10 -15
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +20 -25
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +5 -5
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +1 -10
- mindspore/profiler/parser/ascend_hccl_generator.py +1 -4
- mindspore/profiler/parser/ascend_msprof_exporter.py +22 -43
- mindspore/profiler/parser/ascend_timeline_generator.py +5 -7
- mindspore/profiler/parser/minddata_parser.py +3 -72
- mindspore/profiler/profiling.py +59 -176
- mindspore/rewrite/api/node.py +1 -1
- mindspore/rewrite/common/namespace.py +5 -5
- mindspore/rewrite/parsers/assign_parser.py +0 -2
- mindspore/rewrite/parsers/class_def_parser.py +4 -8
- mindspore/run_check/_check_version.py +1 -1
- mindspore/scipy/fft.py +3 -1
- mindspore/scipy/linalg.py +3 -2
- mindspore/scipy/ops.py +3 -5
- mindspore/scipy/optimize/__init__.py +2 -2
- mindspore/train/__init__.py +4 -4
- mindspore/train/anf_ir_pb2.py +2 -8
- mindspore/train/callback/__init__.py +2 -5
- mindspore/train/callback/_backup_and_restore.py +2 -2
- mindspore/train/callback/_checkpoint.py +16 -104
- mindspore/train/callback/_landscape.py +1 -1
- mindspore/train/callback/_time_monitor.py +1 -1
- mindspore/train/data_sink.py +4 -5
- mindspore/train/dataset_helper.py +20 -45
- mindspore/train/model.py +38 -266
- mindspore/train/serialization.py +105 -256
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/version.py +1 -1
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +2 -2
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +303 -420
- mindspore/_extends/pijit/__init__.py +0 -23
- mindspore/_extends/pijit/pijit_func_white_list.py +0 -343
- mindspore/common/file_system.py +0 -48
- mindspore/common/generator.py +0 -260
- mindspore/common/no_inline.py +0 -54
- mindspore/common/np_dtype.py +0 -25
- mindspore/communication/comm_func.py +0 -1140
- mindspore/hal/memory.py +0 -326
- mindspore/lib/libavcodec.so.59 +0 -0
- mindspore/lib/libavdevice.so.59 +0 -0
- mindspore/lib/libavfilter.so.8 +0 -0
- mindspore/lib/libavformat.so.59 +0 -0
- mindspore/lib/libavutil.so.57 +0 -0
- mindspore/lib/libmindspore_np_dtype.so +0 -0
- mindspore/lib/libswresample.so.4 +0 -0
- mindspore/lib/libswscale.so.6 +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.cpp +0 -326
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.py +0 -180
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/all_finite.json +0 -109
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/binary_info_config.json +0 -38
- mindspore/lib/plugin/ascend/custom_compiler/OWNERS +0 -12
- mindspore/lib/plugin/ascend/custom_compiler/setup.py +0 -255
- mindspore/lib/plugin/ascend/custom_compiler/start.sh +0 -26
- mindspore/lib/plugin/ascend/custom_compiler/template.json +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/base_type.h +0 -133
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -32
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/tiling_info.h +0 -60
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -37
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_op.h +0 -34
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_backoff_base.h +0 -62
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -33
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_ops.h +0 -88
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_pa_op.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -95
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -84
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -61
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -224
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_impl.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_tiling.h +0 -187
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul.h +0 -245
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_interface.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_utils.h +0 -111
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/tiling_data.h +0 -54
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -364
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -114
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -98
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/mint/linalg/__init__.py +0 -22
- mindspore/nn/layer/embedding_service.py +0 -531
- mindspore/nn/layer/embedding_service_layer.py +0 -393
- mindspore/ops/function/reshard_func.py +0 -102
- mindspore/ops/operations/_infer_ops.py +0 -19
- mindspore/ops/operations/reshard_ops.py +0 -53
- mindspore/profiler/common/process_pool.py +0 -41
- mindspore/profiler/common/singleton.py +0 -28
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/train/callback/_cluster_monitor.py +0 -201
- mindspore/train/callback/_flops_collector.py +0 -238
- mindspore/train/callback/_mindio_ttp.py +0 -443
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
mindspore/mint/__init__.py
CHANGED
|
@@ -14,23 +14,19 @@
|
|
|
14
14
|
# ============================================================================
|
|
15
15
|
"""mint module."""
|
|
16
16
|
from __future__ import absolute_import
|
|
17
|
-
|
|
18
|
-
from mindspore.ops.extend import gather, conv2d, max, min
|
|
17
|
+
from mindspore.ops.extend import *
|
|
19
18
|
from mindspore.ops.extend import array_func, math_func, nn_func
|
|
20
19
|
from mindspore.mint.nn.functional import *
|
|
21
20
|
from mindspore.mint.nn import functional
|
|
22
|
-
from mindspore.
|
|
23
|
-
from mindspore.ops import erf, where, triu
|
|
21
|
+
from mindspore.ops import erf, where, tril, triu
|
|
24
22
|
from mindspore.ops.function.math_func import linspace_ext as linspace
|
|
25
23
|
from mindspore.ops.function.array_func import full_ext as full
|
|
26
24
|
from mindspore.ops.function.array_func import ones_like_ext as ones_like
|
|
27
25
|
from mindspore.ops.function.array_func import zeros_like_ext as zeros_like
|
|
28
|
-
from mindspore.ops.function.array_func import unique_ext as unique
|
|
29
|
-
from mindspore.ops.function.math_func import isclose
|
|
30
26
|
from mindspore.ops.auto_generate import abs
|
|
31
27
|
# 1
|
|
32
28
|
from mindspore.ops.function.math_func import divide, div
|
|
33
|
-
from mindspore.ops.
|
|
29
|
+
from mindspore.ops.function.array_func import topk_ext as topk
|
|
34
30
|
# 2
|
|
35
31
|
from mindspore.ops.function.math_func import sin
|
|
36
32
|
# 3
|
|
@@ -38,7 +34,7 @@ from mindspore.ops.function.clip_func import clamp
|
|
|
38
34
|
# 4
|
|
39
35
|
|
|
40
36
|
# 5
|
|
41
|
-
|
|
37
|
+
|
|
42
38
|
# 6
|
|
43
39
|
from mindspore.ops.auto_generate import stack_ext as stack
|
|
44
40
|
|
|
@@ -53,16 +49,15 @@ from mindspore.ops.function.math_func import ne
|
|
|
53
49
|
# 11
|
|
54
50
|
|
|
55
51
|
# 12
|
|
56
|
-
|
|
52
|
+
from mindspore.ops.function.array_func import repeat_interleave_ext as repeat_interleave
|
|
57
53
|
# 13
|
|
58
|
-
|
|
54
|
+
|
|
59
55
|
# 14
|
|
60
56
|
|
|
61
57
|
# 15
|
|
62
58
|
from mindspore.ops.auto_generate import flatten_ext as flatten
|
|
63
59
|
# 16
|
|
64
60
|
from mindspore.ops.functional import matmul
|
|
65
|
-
from mindspore.ops.auto_generate import bmm_ext as bmm
|
|
66
61
|
# 17
|
|
67
62
|
|
|
68
63
|
# 18
|
|
@@ -70,13 +65,13 @@ from mindspore.ops.functional import sum
|
|
|
70
65
|
# 19
|
|
71
66
|
from mindspore.ops.functional import log
|
|
72
67
|
# 20
|
|
73
|
-
|
|
68
|
+
from mindspore.ops.functional import prod
|
|
74
69
|
# 21
|
|
75
70
|
from mindspore.ops.functional import mul
|
|
76
71
|
# 22
|
|
77
72
|
|
|
78
73
|
# 23
|
|
79
|
-
|
|
74
|
+
from mindspore.ops.functional import mean_ext as mean
|
|
80
75
|
# 24
|
|
81
76
|
|
|
82
77
|
# 25
|
|
@@ -88,15 +83,15 @@ from mindspore.ops.functional import reciprocal
|
|
|
88
83
|
# 28
|
|
89
84
|
from mindspore.ops.functional import exp
|
|
90
85
|
# 29
|
|
91
|
-
|
|
86
|
+
from mindspore.ops.functional import sqrt
|
|
92
87
|
# 30
|
|
93
|
-
|
|
88
|
+
|
|
94
89
|
# 31
|
|
95
90
|
|
|
96
91
|
# 32
|
|
97
92
|
|
|
98
93
|
# 33
|
|
99
|
-
|
|
94
|
+
from mindspore.ops.function.array_func import split_ext as split
|
|
100
95
|
# 34
|
|
101
96
|
|
|
102
97
|
# 35
|
|
@@ -104,7 +99,7 @@ from mindspore.ops.functional import erfinv
|
|
|
104
99
|
# 36
|
|
105
100
|
|
|
106
101
|
# 37
|
|
107
|
-
|
|
102
|
+
|
|
108
103
|
# 38
|
|
109
104
|
|
|
110
105
|
# 39
|
|
@@ -114,7 +109,7 @@ from mindspore.ops.function.array_func import nonzero
|
|
|
114
109
|
# 41
|
|
115
110
|
|
|
116
111
|
# 42
|
|
117
|
-
from mindspore.ops.
|
|
112
|
+
from mindspore.ops.functional import argmax
|
|
118
113
|
# 43
|
|
119
114
|
|
|
120
115
|
# 44
|
|
@@ -138,7 +133,7 @@ from mindspore.ops.functional import tile
|
|
|
138
133
|
# 53
|
|
139
134
|
|
|
140
135
|
# 54
|
|
141
|
-
from mindspore.ops
|
|
136
|
+
from mindspore.ops import normal_ext as normal
|
|
142
137
|
# 55
|
|
143
138
|
|
|
144
139
|
# 56
|
|
@@ -146,10 +141,11 @@ from mindspore.ops.function.random_func import normal_ext as normal
|
|
|
146
141
|
# 57
|
|
147
142
|
from mindspore.ops.functional import broadcast_to
|
|
148
143
|
# 58
|
|
149
|
-
|
|
144
|
+
|
|
150
145
|
# 59
|
|
151
146
|
from mindspore.ops.functional import square
|
|
152
147
|
# 60
|
|
148
|
+
from mindspore.ops.function.math_func import all
|
|
153
149
|
|
|
154
150
|
# 61
|
|
155
151
|
from mindspore.ops.functional import rsqrt
|
|
@@ -178,7 +174,7 @@ from mindspore.ops.functional import isfinite
|
|
|
178
174
|
# 73
|
|
179
175
|
from mindspore.ops.functional import ceil
|
|
180
176
|
# 74
|
|
181
|
-
|
|
177
|
+
|
|
182
178
|
# 75
|
|
183
179
|
from mindspore.ops.functional import less, lt
|
|
184
180
|
# 76
|
|
@@ -192,15 +188,15 @@ from mindspore.ops.function import arange_ext as arange
|
|
|
192
188
|
# 80
|
|
193
189
|
|
|
194
190
|
# 81
|
|
195
|
-
|
|
191
|
+
|
|
196
192
|
# 82
|
|
197
193
|
|
|
198
194
|
# 83
|
|
199
|
-
|
|
195
|
+
|
|
200
196
|
# 84
|
|
201
197
|
|
|
202
198
|
# 85
|
|
203
|
-
|
|
199
|
+
|
|
204
200
|
# 86
|
|
205
201
|
|
|
206
202
|
# 87
|
|
@@ -231,676 +227,16 @@ from mindspore.ops.function.math_func import tanh
|
|
|
231
227
|
|
|
232
228
|
# 100
|
|
233
229
|
|
|
234
|
-
# 122
|
|
235
|
-
|
|
236
|
-
# 176
|
|
237
|
-
from mindspore.ops.function.math_func import atan2_ext as atan2
|
|
238
|
-
from mindspore.ops.function.math_func import arctan2_ext as arctan2
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
# 208
|
|
242
|
-
from mindspore.ops.function.array_func import eye
|
|
243
|
-
from mindspore.ops.function.random_func import rand_ext as rand
|
|
244
|
-
from mindspore.ops.function.random_func import rand_like_ext as rand_like
|
|
245
|
-
# 210
|
|
246
|
-
from mindspore.ops.auto_generate import floor
|
|
247
|
-
# 231
|
|
248
|
-
from mindspore.ops.function.math_func import inverse_ext as inverse
|
|
249
|
-
|
|
250
230
|
# 285
|
|
251
231
|
from mindspore.ops.function.array_func import scatter_add_ext as scatter_add
|
|
252
232
|
|
|
253
|
-
|
|
254
|
-
def add(input, other, *, alpha=1):
|
|
255
|
-
r"""
|
|
256
|
-
Adds scaled other value to input Tensor.
|
|
257
|
-
|
|
258
|
-
.. math::
|
|
259
|
-
|
|
260
|
-
out_{i} = input_{i} + alpha \times other_{i}
|
|
261
|
-
|
|
262
|
-
Note:
|
|
263
|
-
- When the two inputs have different shapes,
|
|
264
|
-
they must be able to broadcast to a common shape.
|
|
265
|
-
- The two inputs and alpha comply with the implicit type conversion rules to make the data types
|
|
266
|
-
consistent.
|
|
267
|
-
|
|
268
|
-
Args:
|
|
269
|
-
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
270
|
-
a bool or a tensor whose data type is
|
|
271
|
-
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
272
|
-
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
|
|
273
|
-
other (Union[Tensor, number.Number, bool]): The second input, is a number.Number or
|
|
274
|
-
a bool or a tensor whose data type is
|
|
275
|
-
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
276
|
-
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
|
|
277
|
-
|
|
278
|
-
Keyword Args:
|
|
279
|
-
alpha (number.Number): A scaling factor applied to `other`, default 1.
|
|
280
|
-
|
|
281
|
-
Returns:
|
|
282
|
-
Tensor with a shape that is the same as the broadcasted shape of the input `input` and `other`,
|
|
283
|
-
and the data type is the one with higher precision or higher digits among the two inputs and alpha.
|
|
284
|
-
|
|
285
|
-
Raises:
|
|
286
|
-
TypeError: If the type of `input`, `other`, or `alpha` is not one of the following: Tensor, number.Number, bool.
|
|
287
|
-
TypeError: If `alpha` is of type float but `input` and `other` are not of type float.
|
|
288
|
-
TypeError: If `alpha` is of type bool but `input` and `other` are not of type bool.
|
|
289
|
-
|
|
290
|
-
Supported Platforms:
|
|
291
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
292
|
-
|
|
293
|
-
Examples:
|
|
294
|
-
>>> import numpy as np
|
|
295
|
-
>>> import mindspore
|
|
296
|
-
>>> from mindspore import Tensor
|
|
297
|
-
>>> from mindspore import mint
|
|
298
|
-
>>> x = Tensor(1, mindspore.int32)
|
|
299
|
-
>>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
|
|
300
|
-
>>> alpha = 0.5
|
|
301
|
-
>>> output = mint.add(x, y, alpha=alpha)
|
|
302
|
-
>>> print(output)
|
|
303
|
-
[3. 3.5 4.]
|
|
304
|
-
>>> # the data type of x is int32, the data type of y is float32,
|
|
305
|
-
>>> # alpha is a float, and the output is the data format of higher precision float32.
|
|
306
|
-
>>> print(output.dtype)
|
|
307
|
-
Float32
|
|
308
|
-
"""
|
|
309
|
-
return ops.auto_generate.add_ext(input, other, alpha)
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
def any(input, dim=None, keepdim=False):
|
|
313
|
-
r"""
|
|
314
|
-
Reduces a dimension of `input` by the "logical OR" of all elements in the dimension, by default. And also can
|
|
315
|
-
reduce a dimension of `input` along the `dim`. Determine whether the dimensions of the output and input are the
|
|
316
|
-
same by controlling `keepdim`.
|
|
317
|
-
|
|
318
|
-
Note:
|
|
319
|
-
The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
320
|
-
|
|
321
|
-
Args:
|
|
322
|
-
input (Tensor): Input Tensor, has the shape :math:`(N, *)` where :math:`*` means,
|
|
323
|
-
any number of additional dimensions.
|
|
324
|
-
dim (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce.
|
|
325
|
-
Suppose the rank of `input` is r, `dim` must be in the range [-rank(input), rank(input)).
|
|
326
|
-
Default: ``None`` , all dimensions are reduced.
|
|
327
|
-
keepdim (bool, optional): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
328
|
-
If ``False`` , don't keep these dimensions. Default : ``False`` .
|
|
329
|
-
|
|
330
|
-
Returns:
|
|
331
|
-
Tensor, the dtype is bool.
|
|
332
|
-
|
|
333
|
-
- If `dim` is ``None`` , and `keepdim` is ``False`` ,
|
|
334
|
-
the output is a 0-D Tensor representing the "logical OR" of all elements in the input Tensor.
|
|
335
|
-
- If `dim` is int, such as 2, and `keepdim` is ``False`` ,
|
|
336
|
-
the shape of output is :math:`(input_1, input_3, ..., input_R)`.
|
|
337
|
-
- If `dim` is tuple(int), such as (2, 3), and `keepdim` is ``False`` ,
|
|
338
|
-
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
339
|
-
- If `dim` is 1-D Tensor, such as [2, 3], and `keepdim` is ``False`` ,
|
|
340
|
-
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
341
|
-
|
|
342
|
-
Raises:
|
|
343
|
-
TypeError: If `keepdim` is not a bool.
|
|
344
|
-
TypeError: If `input` is not a Tensor.
|
|
345
|
-
TypeError: If `dim` is not one of the following: int, tuple, list or Tensor.
|
|
346
|
-
|
|
347
|
-
Supported Platforms:
|
|
348
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
349
|
-
|
|
350
|
-
Examples:
|
|
351
|
-
>>> import numpy as np
|
|
352
|
-
>>> from mindspore import Tensor, mint
|
|
353
|
-
>>> x = Tensor(np.array([[True, False], [True, True]]))
|
|
354
|
-
>>> # case 1: Reduces a dimension by the "logical OR" of all elements in the dimension.
|
|
355
|
-
>>> output = mint.any(x, keepdim=True)
|
|
356
|
-
>>> print(output)
|
|
357
|
-
[[ True]]
|
|
358
|
-
>>> print(output.shape)
|
|
359
|
-
(1, 1)
|
|
360
|
-
>>> # case 2: Reduces a dimension along dim 0.
|
|
361
|
-
>>> output = mint.any(x, dim=0)
|
|
362
|
-
>>> print(output)
|
|
363
|
-
[ True True]
|
|
364
|
-
>>> # case 3: Reduces a dimension along dim 1.
|
|
365
|
-
>>> output = mint.any(x, dim=1)
|
|
366
|
-
>>> print(output)
|
|
367
|
-
[ True True]
|
|
368
|
-
"""
|
|
369
|
-
return ops.functional.any(input, dim, keepdim)
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
def all(input, dim=None, keepdim=False):
|
|
373
|
-
r"""
|
|
374
|
-
Reduces a dimension of `input` by the "logical AND" of all elements in the dimension, by default. And also can
|
|
375
|
-
reduce a dimension of `input` along the `dim`. Determine whether the dimensions of the output and input are the
|
|
376
|
-
same by controlling `keepdim`.
|
|
377
|
-
|
|
378
|
-
Note:
|
|
379
|
-
The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
380
|
-
|
|
381
|
-
Args:
|
|
382
|
-
input (Tensor): Input Tensor, has the shape :math:`(N, *)` where :math:`*` means,
|
|
383
|
-
any number of additional dimensions.
|
|
384
|
-
dim (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce.
|
|
385
|
-
Suppose the rank of `input` is r, `dim` must be in the range [-rank(input), rank(input)).
|
|
386
|
-
Default: ``None`` , all dimensions are reduced.
|
|
387
|
-
keepdim (bool, optional): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
388
|
-
If ``False`` , don't keep these dimensions. Default : ``False`` .
|
|
389
|
-
|
|
390
|
-
Returns:
|
|
391
|
-
Tensor, the dtype is bool.
|
|
392
|
-
|
|
393
|
-
- If `dim` is ``None`` , and `keepdim` is ``False`` ,
|
|
394
|
-
the output is a 0-D Tensor representing the "logical AND" of all elements in the input Tensor.
|
|
395
|
-
- If `dim` is int, such as 2, and `keepdim` is ``False`` ,
|
|
396
|
-
the shape of output is :math:`(input_1, input_3, ..., input_R)`.
|
|
397
|
-
- If `dim` is tuple(int), such as (2, 3), and `keepdim` is ``False`` ,
|
|
398
|
-
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
399
|
-
- If `dim` is 1-D Tensor, such as [2, 3], and `keepdim` is ``False`` ,
|
|
400
|
-
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
401
|
-
|
|
402
|
-
Raises:
|
|
403
|
-
TypeError: If `keepdim` is not a bool.
|
|
404
|
-
TypeError: If `input` is not a Tensor.
|
|
405
|
-
TypeError: If `dim` is not one of the following: int, tuple, list or Tensor.
|
|
406
|
-
|
|
407
|
-
Supported Platforms:
|
|
408
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
409
|
-
|
|
410
|
-
Examples:
|
|
411
|
-
>>> import numpy as np
|
|
412
|
-
>>> from mindspore import Tensor, mint
|
|
413
|
-
>>> x = Tensor(np.array([[True, False], [True, True]]))
|
|
414
|
-
>>> # case 1: Reduces a dimension by the "logicalAND" of all elements in the dimension.
|
|
415
|
-
>>> output = mint.all(x, keepdim=True)
|
|
416
|
-
>>> print(output)
|
|
417
|
-
[[False]]
|
|
418
|
-
>>> print(output.shape)
|
|
419
|
-
(1, 1)
|
|
420
|
-
>>> # case 2: Reduces a dimension along axis 0.
|
|
421
|
-
>>> output = mint.all(x, dim=0)
|
|
422
|
-
>>> print(output)
|
|
423
|
-
[ True False]
|
|
424
|
-
>>> # case 3: Reduces a dimension along axis 1.
|
|
425
|
-
>>> output = mint.all(x, dim=1)
|
|
426
|
-
>>> print(output)
|
|
427
|
-
[False True]
|
|
428
|
-
"""
|
|
429
|
-
return ops.function.math_func.all(input, dim, keepdim)
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
def cat(tensors, dim=0):
|
|
434
|
-
r"""
|
|
435
|
-
Connect input tensors along with the given dimension.
|
|
436
|
-
|
|
437
|
-
The input data is a tuple or a list of tensors. These tensors have the same rank :math:`R`.
|
|
438
|
-
Set the given dimension as :math:`m`, and :math:`0 \le m < R`. Set the number of input tensors as :math:`N`.
|
|
439
|
-
For the :math:`i`-th tensor :math:`t_i`, it has the shape of :math:`(x_1, x_2, ..., x_{mi}, ..., x_R)`.
|
|
440
|
-
:math:`x_{mi}` is the :math:`m`-th dimension of the :math:`t_i`. Then, the shape of the output tensor is
|
|
441
|
-
|
|
442
|
-
.. math::
|
|
443
|
-
|
|
444
|
-
(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)
|
|
445
|
-
|
|
446
|
-
Args:
|
|
447
|
-
tensors (Union[tuple, list]): A tuple or a list of input tensors.
|
|
448
|
-
Suppose there are two tensors in this tuple or list, namely t1 and t2.
|
|
449
|
-
To perform `concat` in the dimension 0 direction, except for the :math:`0`-th dimension,
|
|
450
|
-
all other dimensions should be equal, that is,
|
|
451
|
-
:math:`t1.shape[1] = t2.shape[1], t1.shape[2] = t2.shape[2], ..., t1.shape[R-1] = t2.shape[R-1]`,
|
|
452
|
-
where :math:`R` represents the rank of tensor.
|
|
453
|
-
dim (int): The specified dimension, whose value is in range :math:`[-R, R)`. Default: ``0`` .
|
|
454
|
-
|
|
455
|
-
Returns:
|
|
456
|
-
Tensor, the shape is :math:`(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)`.
|
|
457
|
-
The data type is the same with `tensors`.
|
|
458
|
-
|
|
459
|
-
Raises:
|
|
460
|
-
TypeError: If `dim` is not an int.
|
|
461
|
-
ValueError: If `tensors` have different dimension of tensor.
|
|
462
|
-
ValueError: If `dim` not in range :math:`[-R, R)`.
|
|
463
|
-
ValueError: If tensor's shape in `tensors` except for `dim` are different.
|
|
464
|
-
ValueError: If `tensors` is an empty tuple or list.
|
|
465
|
-
|
|
466
|
-
Supported Platforms:
|
|
467
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
468
|
-
|
|
469
|
-
Examples:
|
|
470
|
-
>>> import mindspore
|
|
471
|
-
>>> import numpy as np
|
|
472
|
-
>>> from mindspore import Tensor
|
|
473
|
-
>>> from mindspore import mint
|
|
474
|
-
>>> input_x1 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
|
|
475
|
-
>>> input_x2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
|
|
476
|
-
>>> output = mint.cat((input_x1, input_x2))
|
|
477
|
-
>>> print(output)
|
|
478
|
-
[[0. 1.]
|
|
479
|
-
[2. 1.]
|
|
480
|
-
[0. 1.]
|
|
481
|
-
[2. 1.]]
|
|
482
|
-
>>> output = mint.cat((input_x1, input_x2), 1)
|
|
483
|
-
>>> print(output)
|
|
484
|
-
[[0. 1. 0. 1.]
|
|
485
|
-
[2. 1. 2. 1.]]
|
|
486
|
-
"""
|
|
487
|
-
return ops.auto_generate.cat(tensors, dim)
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
def mean(input, dim=None, keepdim=False, *, dtype=None):
|
|
491
|
-
r"""
|
|
492
|
-
Reduces all dimension of a tensor by averaging all elements in the dimension, by default.
|
|
493
|
-
And reduce a dimension of `input` along the specified `dim`. `keepdim`
|
|
494
|
-
determines whether the dimensions of the output and input are the same.
|
|
495
|
-
|
|
496
|
-
Note:
|
|
497
|
-
The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
498
|
-
|
|
499
|
-
Args:
|
|
500
|
-
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
501
|
-
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
502
|
-
dim (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` ,
|
|
503
|
-
reduce all dimensions. Only constant value is allowed. Assume the rank of `input` is r,
|
|
504
|
-
and the value range is [-r,r).
|
|
505
|
-
keepdim (bool): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
506
|
-
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
507
|
-
|
|
508
|
-
Keyword Args:
|
|
509
|
-
dtype (:class:`mindspore.dtype`, optional): The desired data type of returned Tensor. Default: ``None`` .
|
|
510
|
-
|
|
511
|
-
Returns:
|
|
512
|
-
Tensor.
|
|
513
|
-
|
|
514
|
-
- If `dim` is ``None`` , and `keepdim` is ``False`` ,
|
|
515
|
-
the output is a 0-D tensor representing the product of all elements in the input tensor.
|
|
516
|
-
- If `dim` is int, set as 1, and `keepdim` is ``False`` ,
|
|
517
|
-
the shape of output is :math:`(input_0, input_2, ..., input_R)`.
|
|
518
|
-
- If `dim` is tuple(int) or list(int), set as (1, 2), and `keepdim` is ``False`` ,
|
|
519
|
-
the shape of output is :math:`(input_0, input_3, ..., input_R)`.
|
|
520
|
-
- If `dim` is 1-D Tensor, set as [1, 2], and `keepdim` is ``False`` ,
|
|
521
|
-
the shape of output is :math:`(input_0, input_3, ..., input_R)`.
|
|
522
|
-
|
|
523
|
-
Raises:
|
|
524
|
-
TypeError: If `input` is not a Tensor.
|
|
525
|
-
TypeError: If `dim` is not one of the following: int, tuple, list or Tensor.
|
|
526
|
-
TypeError: If `keepdim` is not a bool.
|
|
527
|
-
ValueError: If `dim` is out of range.
|
|
528
|
-
|
|
529
|
-
Supported Platforms:
|
|
530
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
531
|
-
|
|
532
|
-
Examples:
|
|
533
|
-
>>> import mindspore
|
|
534
|
-
>>> import numpy as np
|
|
535
|
-
>>> from mindspore import Tensor, mint
|
|
536
|
-
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
|
|
537
|
-
>>> output = mint.mean(x, 1, keepdim=True)
|
|
538
|
-
>>> result = output.shape
|
|
539
|
-
>>> print(result)
|
|
540
|
-
(3, 1, 5, 6)
|
|
541
|
-
>>> # case 1: Reduces a dimension by averaging all elements in the dimension.
|
|
542
|
-
>>> x = Tensor(np.array([[[2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2]],
|
|
543
|
-
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
544
|
-
... [[6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8], [10, 10, 10, 10, 10, 10]]]),
|
|
545
|
-
... mindspore.float32)
|
|
546
|
-
>>> output = mint.mean(x)
|
|
547
|
-
>>> print(output)
|
|
548
|
-
5.0
|
|
549
|
-
>>> print(output.shape)
|
|
550
|
-
()
|
|
551
|
-
>>> # case 2: Reduces a dimension along the axis 0
|
|
552
|
-
>>> output = mint.mean(x, 0, True)
|
|
553
|
-
>>> print(output)
|
|
554
|
-
[[[4. 4. 4. 4. 4. 4.]
|
|
555
|
-
[5. 5. 5. 5. 5. 5.]
|
|
556
|
-
[6. 6. 6. 6. 6. 6.]]]
|
|
557
|
-
>>> # case 3: Reduces a dimension along the axis 1
|
|
558
|
-
>>> output = mint.mean(x, 1, True)
|
|
559
|
-
>>> print(output)
|
|
560
|
-
[[[2. 2. 2. 2. 2. 2.]]
|
|
561
|
-
[[5. 5. 5. 5. 5. 5.]]
|
|
562
|
-
[[8. 8. 8. 8. 8. 8.]]]
|
|
563
|
-
>>> # case 4: Reduces a dimension along the axis 2
|
|
564
|
-
>>> output = mint.mean(x, 2, True)
|
|
565
|
-
>>> print(output)
|
|
566
|
-
[[[ 2.]
|
|
567
|
-
[ 2.]
|
|
568
|
-
[ 2.]]
|
|
569
|
-
[[ 4.]
|
|
570
|
-
[ 5.]
|
|
571
|
-
[ 6.]]
|
|
572
|
-
[[ 6.]
|
|
573
|
-
[ 8.]
|
|
574
|
-
[10.]]]
|
|
575
|
-
"""
|
|
576
|
-
return ops.function.math_func.mean_ext(input, axis=dim, keep_dims=keepdim, dtype=dtype)
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
def prod(input, dim=None, keepdim=False, *, dtype=None):
|
|
580
|
-
r"""
|
|
581
|
-
Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can
|
|
582
|
-
reduce a dimension of `input` along the `dim`. Determine whether the dimensions of the output and input are the
|
|
583
|
-
same by controlling `keepdim`.
|
|
584
|
-
|
|
585
|
-
Args:
|
|
586
|
-
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
587
|
-
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
588
|
-
dim (int): The dimensions to reduce. Default: ``None`` , reduce all dimensions. Only constant value is allowed.
|
|
589
|
-
Assume the rank of `x` is r, and the value range is [-r,r).
|
|
590
|
-
keepdim (bool): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
591
|
-
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
592
|
-
|
|
593
|
-
Keyword Args:
|
|
594
|
-
dtype (:class:`mindspore.dtype`, optional): The desired data type of returned Tensor. Default: ``None`` .
|
|
595
|
-
|
|
596
|
-
Returns:
|
|
597
|
-
Tensor.
|
|
598
|
-
|
|
599
|
-
- If `dim` is ``None`` , and `keepdim` is ``False`` ,
|
|
600
|
-
the output is a 0-D tensor representing the product of all elements in the input tensor.
|
|
601
|
-
- If `dim` is int, set as 1, and `keepdim` is ``False`` ,
|
|
602
|
-
the shape of output is :math:`(input_0, input_2, ..., input_R)`.
|
|
603
|
-
|
|
604
|
-
Raises:
|
|
605
|
-
TypeError: If `input` is not a Tensor.
|
|
606
|
-
TypeError: If `dim` is not int.
|
|
607
|
-
TypeError: If `keepdim` is not a bool.
|
|
608
|
-
ValueError: If `dim` is out of range.
|
|
609
|
-
|
|
610
|
-
Supported Platforms:
|
|
611
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
612
|
-
|
|
613
|
-
Examples:
|
|
614
|
-
>>> import mindspore
|
|
615
|
-
>>> import numpy as np
|
|
616
|
-
>>> from mindspore import Tensor, mint
|
|
617
|
-
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
|
|
618
|
-
>>> output = mint.prod(x, 1, keepdim=True)
|
|
619
|
-
>>> result = output.shape
|
|
620
|
-
>>> print(result)
|
|
621
|
-
(3, 1, 5, 6)
|
|
622
|
-
>>> # case 1: Reduces a dimension by multiplying all elements in the dimension.
|
|
623
|
-
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
|
|
624
|
-
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
625
|
-
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
|
|
626
|
-
>>> output = mint.prod(x)
|
|
627
|
-
>>> print(output)
|
|
628
|
-
2.2833798e+33
|
|
629
|
-
>>> print(output.shape)
|
|
630
|
-
()
|
|
631
|
-
>>> # case 2: Reduces a dimension along axis 0.
|
|
632
|
-
>>> output = mint.prod(x, 0, True)
|
|
633
|
-
>>> print(output)
|
|
634
|
-
[[[ 28. 28. 28. 28. 28. 28.]
|
|
635
|
-
[ 80. 80. 80. 80. 80. 80.]
|
|
636
|
-
[162. 162. 162. 162. 162. 162.]]]
|
|
637
|
-
>>> # case 3: Reduces a dimension along axis 1.
|
|
638
|
-
>>> output = mint.prod(x, 1, True)
|
|
639
|
-
>>> print(output)
|
|
640
|
-
[[[ 6. 6. 6. 6. 6. 6.]]
|
|
641
|
-
[[120. 120. 120. 120. 120. 120.]]
|
|
642
|
-
[[504. 504. 504. 504. 504. 504.]]]
|
|
643
|
-
>>> # case 4: Reduces a dimension along axis 2.
|
|
644
|
-
>>> output = mint.prod(x, 2, True)
|
|
645
|
-
>>> print(output)
|
|
646
|
-
[[[1.00000e+00]
|
|
647
|
-
[6.40000e+01]
|
|
648
|
-
[7.29000e+02]]
|
|
649
|
-
[[4.09600e+03]
|
|
650
|
-
[1.56250e+04]
|
|
651
|
-
[4.66560e+04]]
|
|
652
|
-
[[1.17649e+05]
|
|
653
|
-
[2.62144e+05]
|
|
654
|
-
[5.31441e+05]]]
|
|
655
|
-
"""
|
|
656
|
-
return ops.auto_generate.prod_ext(input, axis=dim, keep_dims=keepdim, dtype=dtype)
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
def ones(size, *, dtype=None):
|
|
660
|
-
r"""
|
|
661
|
-
Creates a tensor filled with value ones.
|
|
662
|
-
|
|
663
|
-
Creates a tensor with shape described by the first argument and fills it with value ones in type of the second
|
|
664
|
-
argument.
|
|
665
|
-
|
|
666
|
-
Args:
|
|
667
|
-
size (Union[tuple[int], list[int], int, Tensor]): The specified shape of output tensor. Only positive integer or
|
|
668
|
-
tuple or Tensor containing positive integers are allowed. If it is a Tensor,
|
|
669
|
-
it must be a 0-D or 1-D Tensor with int32 or int64 dtypes.
|
|
670
|
-
|
|
671
|
-
Keyword Args:
|
|
672
|
-
dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor. If `dtype` is ``None`` ,
|
|
673
|
-
`mindspore.float32` will be used. Default: ``None`` .
|
|
674
|
-
|
|
675
|
-
Returns:
|
|
676
|
-
Tensor, whose dtype and size are defined by input.
|
|
677
|
-
|
|
678
|
-
Raises:
|
|
679
|
-
TypeError: If `size` is neither an int nor an tuple/list/Tensor of int.
|
|
680
|
-
|
|
681
|
-
Supported Platforms:
|
|
682
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
683
|
-
|
|
684
|
-
Examples:
|
|
685
|
-
>>> import mindspore
|
|
686
|
-
>>> from mindspore import mint
|
|
687
|
-
>>> output = mint.ones((2, 2), dtype=mindspore.float32)
|
|
688
|
-
>>> print(output)
|
|
689
|
-
[[1. 1.]
|
|
690
|
-
[1. 1.]]
|
|
691
|
-
"""
|
|
692
|
-
return ops.auto_generate.ones(size, dtype)
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
def permute(input, dims):
|
|
696
|
-
"""
|
|
697
|
-
Permutes the dimensions of the input tensor according to input `dims` .
|
|
698
|
-
|
|
699
|
-
Args:
|
|
700
|
-
input (Tensor): Input Tensor.
|
|
701
|
-
dims (tuple(int)): The order of the dimensions. Permute rearranges the `input` according
|
|
702
|
-
to the order of the `dims`.
|
|
703
|
-
|
|
704
|
-
Returns:
|
|
705
|
-
Tensor, has the same dimension as input tensor, with `axis` suitably permuted.
|
|
706
|
-
|
|
707
|
-
Raises:
|
|
708
|
-
ValueError: If `dims` is None.
|
|
709
|
-
ValueError: If the number of elements of `dims` is not equal to `input` ndim.
|
|
710
|
-
|
|
711
|
-
Supported Platforms:
|
|
712
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
713
|
-
|
|
714
|
-
Examples:
|
|
715
|
-
>>> import mindspore
|
|
716
|
-
>>> import numpy as np
|
|
717
|
-
>>> from mindspore import Tensor, mint
|
|
718
|
-
>>> input_x = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32)
|
|
719
|
-
>>> input_perm = (0, 2, 1)
|
|
720
|
-
>>> print(mint.permute(input_x, input_perm))
|
|
721
|
-
[[[ 1. 4.]
|
|
722
|
-
[ 2. 5.]
|
|
723
|
-
[ 3. 6.]]
|
|
724
|
-
[[ 7. 10.]
|
|
725
|
-
[ 8. 11.]
|
|
726
|
-
[ 9. 12.]]]
|
|
727
|
-
"""
|
|
728
|
-
return ops.functional.permute(input, dims)
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
def split(tensor, split_size_or_sections, dim=0):
|
|
732
|
-
"""
|
|
733
|
-
Splits the Tensor into chunks along the given dim.
|
|
734
|
-
|
|
735
|
-
Args:
|
|
736
|
-
tensor (Tensor): A Tensor to be divided.
|
|
737
|
-
split_size_or_sections (Union[int, tuple(int), list(int)]):
|
|
738
|
-
If `split_size_or_sections` is an int type, `tensor` will be split into equally sized chunks,
|
|
739
|
-
each chunk with size `split_size_or_sections`. Last chunk will be smaller than `split_size_or_sections`
|
|
740
|
-
if `tensor.shape[dim]` is not divisible by `split_size_or_sections`.
|
|
741
|
-
If `split_size_or_sections` is a list type, then `tensor` will be split into len(split_size_or_sections)
|
|
742
|
-
chunks with sizes `split_size_or_sections` along the given `dim`.
|
|
743
|
-
dim (int): The dim along which to split. Default: ``0`` .
|
|
744
|
-
|
|
745
|
-
Returns:
|
|
746
|
-
A tuple of sub-tensors.
|
|
747
|
-
|
|
748
|
-
Raises:
|
|
749
|
-
TypeError: If argument `tensor` is not Tensor.
|
|
750
|
-
TypeError: If argument `dim` is not int.
|
|
751
|
-
ValueError: If argument `dim` is out of range of :[-tensor.ndim, tensor.ndim).
|
|
752
|
-
TypeError: If each element in `split_size_or_sections` is not integer.
|
|
753
|
-
TypeError: If argument `split_size_or_sections` is not int, tuple(int) or list(int).
|
|
754
|
-
ValueError: The sum of `split_size_or_sections` is not equal to x.shape[dim].
|
|
755
|
-
|
|
756
|
-
Supported Platforms:
|
|
757
|
-
``Ascend``
|
|
758
|
-
|
|
759
|
-
Examples:
|
|
760
|
-
>>> import numpy as np
|
|
761
|
-
>>> from mindspore import ops, Tensor
|
|
762
|
-
>>> input_x = np.arange(9).astype("float32")
|
|
763
|
-
>>> output = ops.split(Tensor(input_x), 3)
|
|
764
|
-
>>> print(output)
|
|
765
|
-
(Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
|
|
766
|
-
Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
|
|
767
|
-
Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))
|
|
768
|
-
"""
|
|
769
|
-
return ops.function.array_func.split_ext(tensor, split_size_or_sections, dim)
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
def sqrt(input):
|
|
773
|
-
r"""
|
|
774
|
-
Returns sqrt of a tensor element-wise.
|
|
775
|
-
|
|
776
|
-
.. math::
|
|
777
|
-
|
|
778
|
-
out_{i} = \sqrt{input_{i}}
|
|
779
|
-
|
|
780
|
-
Args:
|
|
781
|
-
input (Tensor): The input tensor with a dtype of number.Number.
|
|
782
|
-
|
|
783
|
-
Returns:
|
|
784
|
-
Tensor, has the same shape as the `input`.
|
|
785
|
-
|
|
786
|
-
Raises:
|
|
787
|
-
TypeError: If `input` is not a Tensor.
|
|
788
|
-
|
|
789
|
-
Supported Platforms:
|
|
790
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
791
|
-
|
|
792
|
-
Examples:
|
|
793
|
-
>>> import mindspore
|
|
794
|
-
>>> import numpy as np
|
|
795
|
-
>>> from mindspore import Tensor, mint
|
|
796
|
-
>>> input = Tensor(np.array([1.0, 4.0, 9.0]), mindspore.float32)
|
|
797
|
-
>>> output = mint.sqrt(input)
|
|
798
|
-
>>> print(output)
|
|
799
|
-
[1. 2. 3.]
|
|
800
|
-
"""
|
|
801
|
-
return ops.auto_generate.sqrt(input)
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
def sub(input, other, *, alpha=1):
|
|
805
|
-
r"""
|
|
806
|
-
Subtracts scaled other value from input Tensor.
|
|
807
|
-
|
|
808
|
-
.. math::
|
|
809
|
-
|
|
810
|
-
out_{i} = input_{i} - alpha \times other_{i}
|
|
811
|
-
|
|
812
|
-
Note:
|
|
813
|
-
- When the two inputs have different shapes,
|
|
814
|
-
they must be able to broadcast to a common shape.
|
|
815
|
-
- The two inputs and alpha comply with the implicit type conversion rules to make the data types
|
|
816
|
-
consistent.
|
|
817
|
-
|
|
818
|
-
Args:
|
|
819
|
-
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
820
|
-
a bool or a tensor whose data type is
|
|
821
|
-
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
822
|
-
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
|
|
823
|
-
other (Union[Tensor, number.Number, bool]): The second input, is a number.Number or
|
|
824
|
-
a bool or a tensor whose data type is
|
|
825
|
-
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
826
|
-
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
|
|
827
|
-
|
|
828
|
-
Keyword Args:
|
|
829
|
-
alpha (number.Number): A scaling factor applied to `other`, default 1.
|
|
830
|
-
|
|
831
|
-
Returns:
|
|
832
|
-
Tensor with a shape that is the same as the broadcasted shape of the input `input` and `other`,
|
|
833
|
-
and the data type is the one with higher precision or higher digits among the two inputs and alpha.
|
|
834
|
-
|
|
835
|
-
Raises:
|
|
836
|
-
TypeError: If the type of `input`, `other`, or `alpha` is not one of the following: Tensor, number.Number, bool.
|
|
837
|
-
TypeError: If `alpha` is of type float but `input` and `other` are not of type float.
|
|
838
|
-
TypeError: If `alpha` is of type bool but `input` and `other` are not of type bool.
|
|
839
|
-
|
|
840
|
-
Supported Platforms:
|
|
841
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
842
|
-
|
|
843
|
-
Examples:
|
|
844
|
-
>>> import numpy as np
|
|
845
|
-
>>> import mindspore
|
|
846
|
-
>>> from mindspore import Tensor
|
|
847
|
-
>>> from mindspore import mint
|
|
848
|
-
>>> x = Tensor(np.array([4, 5, 6]).astype(np.float32))
|
|
849
|
-
>>> y = Tensor(1, mindspore.int32)
|
|
850
|
-
>>> alpha = 0.5
|
|
851
|
-
>>> output = mint.sub(x, y, alpha=alpha)
|
|
852
|
-
>>> print(output)
|
|
853
|
-
[3.5 4.5 5.5]
|
|
854
|
-
>>> # the data type of x is float32, the data type of y is int32,
|
|
855
|
-
>>> # alpha is a float, and the output is the data format of higher precision float32.
|
|
856
|
-
>>> print(output.dtype)
|
|
857
|
-
Float32
|
|
858
|
-
"""
|
|
859
|
-
return ops.auto_generate.sub_ext(input, other, alpha)
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
def zeros(size, *, dtype=None):
|
|
863
|
-
"""
|
|
864
|
-
Creates a tensor filled with 0 with shape described by `size` and fills it with value 0 in type of `dtype`.
|
|
865
|
-
|
|
866
|
-
Args:
|
|
867
|
-
size (Union[tuple[int], list[int], int, Tensor]): The specified shape of output tensor. Only positive integer or
|
|
868
|
-
tuple or Tensor containing positive integers are allowed. If it is a Tensor,
|
|
869
|
-
it must be a 0-D or 1-D Tensor with int32 or int64 dtypes.
|
|
870
|
-
|
|
871
|
-
Keyword Args:
|
|
872
|
-
dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor. If `dtype` is ``None`` ,
|
|
873
|
-
mindspore.float32 will be used. Default: ``None`` .
|
|
874
|
-
|
|
875
|
-
Returns:
|
|
876
|
-
Tensor, whose dtype and size are defined by input.
|
|
877
|
-
|
|
878
|
-
Raises:
|
|
879
|
-
TypeError: If `size` is neither an int nor an tuple/list/Tensor of int.
|
|
880
|
-
|
|
881
|
-
Supported Platforms:
|
|
882
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
883
|
-
|
|
884
|
-
Examples:
|
|
885
|
-
>>> import mindspore
|
|
886
|
-
>>> from mindspore import mint
|
|
887
|
-
>>> output = mint.zeros((2, 2), dtype=mindspore.float32)
|
|
888
|
-
>>> print(output)
|
|
889
|
-
[[0. 0.]
|
|
890
|
-
[0. 0.]]
|
|
891
|
-
"""
|
|
892
|
-
return ops.auto_generate.zeros(size, dtype)
|
|
893
|
-
|
|
894
|
-
|
|
895
233
|
__all__ = [
|
|
896
234
|
'full',
|
|
897
235
|
'ones_like',
|
|
898
236
|
'zeros_like',
|
|
899
|
-
'abs',
|
|
900
237
|
'erf',
|
|
901
238
|
'where',
|
|
902
239
|
'linspace',
|
|
903
|
-
'isclose',
|
|
904
240
|
# 1
|
|
905
241
|
'div',
|
|
906
242
|
'divide',
|
|
@@ -912,11 +248,11 @@ __all__ = [
|
|
|
912
248
|
# 4
|
|
913
249
|
|
|
914
250
|
# 5
|
|
915
|
-
|
|
251
|
+
|
|
916
252
|
# 6
|
|
917
253
|
'stack',
|
|
918
254
|
# 7
|
|
919
|
-
|
|
255
|
+
|
|
920
256
|
# 8
|
|
921
257
|
|
|
922
258
|
# 9
|
|
@@ -926,18 +262,17 @@ __all__ = [
|
|
|
926
262
|
# 11
|
|
927
263
|
|
|
928
264
|
# 12
|
|
929
|
-
|
|
265
|
+
"repeat_interleave",
|
|
930
266
|
# 13
|
|
931
|
-
|
|
267
|
+
|
|
932
268
|
# 14
|
|
933
269
|
|
|
934
270
|
# 15
|
|
935
271
|
'flatten',
|
|
936
272
|
# 16
|
|
937
273
|
'matmul',
|
|
938
|
-
'bmm',
|
|
939
274
|
# 17
|
|
940
|
-
|
|
275
|
+
|
|
941
276
|
# 18
|
|
942
277
|
'sum',
|
|
943
278
|
# 19
|
|
@@ -949,7 +284,7 @@ __all__ = [
|
|
|
949
284
|
# 22
|
|
950
285
|
|
|
951
286
|
# 23
|
|
952
|
-
|
|
287
|
+
'mean',
|
|
953
288
|
# 24
|
|
954
289
|
|
|
955
290
|
# 25
|
|
@@ -964,11 +299,11 @@ __all__ = [
|
|
|
964
299
|
# 29
|
|
965
300
|
'sqrt',
|
|
966
301
|
# 30
|
|
967
|
-
|
|
302
|
+
|
|
968
303
|
# 31
|
|
969
304
|
|
|
970
305
|
# 32
|
|
971
|
-
|
|
306
|
+
|
|
972
307
|
# 33
|
|
973
308
|
'split',
|
|
974
309
|
# 34
|
|
@@ -978,19 +313,19 @@ __all__ = [
|
|
|
978
313
|
# 36
|
|
979
314
|
|
|
980
315
|
# 37
|
|
981
|
-
|
|
316
|
+
|
|
982
317
|
# 38
|
|
983
318
|
|
|
984
319
|
# 39
|
|
985
320
|
|
|
986
321
|
# 40
|
|
987
|
-
|
|
322
|
+
|
|
988
323
|
# 41
|
|
989
|
-
|
|
324
|
+
|
|
990
325
|
# 42
|
|
991
326
|
'argmax',
|
|
992
327
|
# 43
|
|
993
|
-
|
|
328
|
+
|
|
994
329
|
# 44
|
|
995
330
|
'cos',
|
|
996
331
|
# 45
|
|
@@ -998,15 +333,15 @@ __all__ = [
|
|
|
998
333
|
# 46
|
|
999
334
|
|
|
1000
335
|
# 47
|
|
1001
|
-
|
|
336
|
+
|
|
1002
337
|
# 48
|
|
1003
|
-
|
|
338
|
+
|
|
1004
339
|
# 49
|
|
1005
340
|
|
|
1006
341
|
# 50
|
|
1007
342
|
'tile',
|
|
1008
343
|
# 51
|
|
1009
|
-
|
|
344
|
+
|
|
1010
345
|
# 52
|
|
1011
346
|
|
|
1012
347
|
# 53
|
|
@@ -1020,7 +355,7 @@ __all__ = [
|
|
|
1020
355
|
# 57
|
|
1021
356
|
'broadcast_to',
|
|
1022
357
|
# 58
|
|
1023
|
-
|
|
358
|
+
|
|
1024
359
|
# 59
|
|
1025
360
|
'square',
|
|
1026
361
|
# 60
|
|
@@ -1054,7 +389,7 @@ __all__ = [
|
|
|
1054
389
|
# 73
|
|
1055
390
|
'ceil',
|
|
1056
391
|
# 74
|
|
1057
|
-
|
|
392
|
+
|
|
1058
393
|
# 75
|
|
1059
394
|
'less',
|
|
1060
395
|
'lt',
|
|
@@ -1070,11 +405,11 @@ __all__ = [
|
|
|
1070
405
|
# 80
|
|
1071
406
|
|
|
1072
407
|
# 81
|
|
1073
|
-
|
|
408
|
+
|
|
1074
409
|
# 82
|
|
1075
410
|
|
|
1076
411
|
# 83
|
|
1077
|
-
|
|
412
|
+
|
|
1078
413
|
# 84
|
|
1079
414
|
|
|
1080
415
|
# 85
|
|
@@ -1109,22 +444,10 @@ __all__ = [
|
|
|
1109
444
|
|
|
1110
445
|
# 100
|
|
1111
446
|
|
|
1112
|
-
# 176
|
|
1113
|
-
'atan2',
|
|
1114
|
-
'arctan2',
|
|
1115
|
-
|
|
1116
|
-
# 208
|
|
1117
|
-
'eye',
|
|
1118
|
-
'rand',
|
|
1119
|
-
'rand_like',
|
|
1120
|
-
# 210
|
|
1121
|
-
'floor',
|
|
1122
|
-
# 231
|
|
1123
|
-
'inverse',
|
|
1124
447
|
# 285
|
|
1125
448
|
'scatter_add',
|
|
1126
449
|
# 304
|
|
1127
|
-
|
|
450
|
+
'tril',
|
|
1128
451
|
# 305
|
|
1129
452
|
'triu',
|
|
1130
453
|
]
|
|
@@ -1132,6 +455,3 @@ __all__.extend(array_func.__all__)
|
|
|
1132
455
|
__all__.extend(math_func.__all__)
|
|
1133
456
|
__all__.extend(nn_func.__all__)
|
|
1134
457
|
__all__.extend(functional.__all__)
|
|
1135
|
-
__all__.extend(nn.__all__)
|
|
1136
|
-
__all__.extend(optim.__all__)
|
|
1137
|
-
__all__.extend(linalg.__all__)
|