mindspore 2.3.0rc1__cp39-none-any.whl → 2.3.0rc2__cp39-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +1 -1
- mindspore/_akg/akg/utils/tbe_codegen_utils.py +13 -3
- mindspore/_c_dataengine.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +20 -0
- mindspore/_extends/parse/parser.py +1 -1
- mindspore/_extends/parse/standard_method.py +6 -5
- mindspore/_mindspore_offline_debug.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/amp.py +5 -5
- mindspore/boost/boost_cell_wrapper.py +1 -1
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +4 -2
- mindspore/common/_register_for_recompute.py +48 -0
- mindspore/common/_stub_tensor.py +1 -0
- mindspore/common/api.py +56 -4
- mindspore/common/dtype.py +5 -3
- mindspore/common/dump.py +2 -2
- mindspore/common/hook_handle.py +51 -4
- mindspore/common/initializer.py +1 -1
- mindspore/common/jit_config.py +17 -6
- mindspore/common/parameter.py +7 -2
- mindspore/common/recompute.py +247 -0
- mindspore/common/sparse_tensor.py +2 -2
- mindspore/common/symbol.py +1 -1
- mindspore/common/tensor.py +74 -36
- mindspore/communication/__init__.py +3 -3
- mindspore/communication/management.py +30 -30
- mindspore/context.py +28 -15
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +2 -2
- mindspore/dataset/audio/transforms.py +51 -51
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +3 -3
- mindspore/dataset/engine/datasets_audio.py +14 -14
- mindspore/dataset/engine/datasets_standard_format.py +3 -3
- mindspore/dataset/engine/datasets_text.py +38 -38
- mindspore/dataset/engine/datasets_user_defined.py +3 -3
- mindspore/dataset/engine/datasets_vision.py +68 -68
- mindspore/dataset/text/__init__.py +3 -3
- mindspore/dataset/text/transforms.py +26 -26
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/vision/__init__.py +3 -3
- mindspore/dataset/vision/transforms.py +92 -92
- mindspore/dataset/vision/utils.py +1 -1
- mindspore/experimental/optim/adadelta.py +2 -2
- mindspore/experimental/optim/adagrad.py +2 -2
- mindspore/experimental/optim/adam.py +2 -2
- mindspore/experimental/optim/adamax.py +2 -2
- mindspore/experimental/optim/adamw.py +2 -2
- mindspore/experimental/optim/asgd.py +2 -2
- mindspore/experimental/optim/lr_scheduler.py +24 -20
- mindspore/experimental/optim/nadam.py +2 -2
- mindspore/experimental/optim/optimizer.py +1 -1
- mindspore/experimental/optim/radam.py +2 -2
- mindspore/experimental/optim/rmsprop.py +2 -2
- mindspore/experimental/optim/rprop.py +2 -2
- mindspore/experimental/optim/sgd.py +2 -2
- mindspore/hal/stream.py +2 -0
- mindspore/include/mindapi/base/types.h +5 -0
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6 -6
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/DeviceBin +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +101787 -98559
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/base/op_register.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/params/mix.h +8 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/params/norm.h +5 -3
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/params/reduce.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/rt/backend/backend.h +3 -3
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/rt/backend/rtbackend.h +3 -3
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/rt/base/types.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/rt/module/module.h +3 -3
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/svector/svector.h +3 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/tiling/add_tiling.h +9 -9
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +2 -6
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_base.h +460 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_bf16.h +217 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp16.h +116 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_tiling.h +16 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_value.h +27 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -4
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention/FlashAttentionScore_impl.h → flash_attention_score/flash_attention_score_impl.h} +2 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention/bs_attention_tiling.h → flash_attention_score/flash_attention_score_tiling.h} +15 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/gelu/tiling/gelu_tiling.h +7 -9
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/lccl/lccl_wrapper.h +58 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/matmul_impl.h +19 -8
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{matmul → matmul_common}/pp_matmul_common_tiling.h +18 -8
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{matmul → matmul_common}/pp_matmul_info.h +7 -4
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{matmul → matmul_common}/tiling_data.h +44 -6
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/tiling_utils.h +65 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/matmul_stridedslice_fusion_impl.h +10 -6
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +4 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/kernel/paged_attention_mix_hwsync.h +41 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention/PagedAttention_impl.h → paged_attention/paged_attention_impl.h} +1 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_tiling.h +63 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/add_param.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention_param.h → param/attention_param.h} +11 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +37 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +45 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache/reshape_and_cache_tiling.h +1 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/rms_norm.h +23 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/rms_norm_base.h +175 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/rms_norm_normal.h +276 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/rms_norm_split_d.h +280 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/tiling_data.h +35 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +45 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/kernel/sub_kernel.h +20 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +47 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_tiling.h +25 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +323 -23
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/types.h +15 -4
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +8 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layernorm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_stridedslice_fusion_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libnot_equal_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lcal.h +22 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lcal_comm.h +70 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lcal_types.h +103 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lccl.h +47 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lccl_wrapper.h +58 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lcoc.h +154 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblcal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/log.py +2 -2
- mindspore/mint/__init__.py +457 -0
- mindspore/mint/nn/__init__.py +430 -0
- mindspore/mint/nn/functional.py +424 -0
- mindspore/mint/optim/__init__.py +24 -0
- mindspore/mint/optim/adamw.py +186 -0
- mindspore/multiprocessing/__init__.py +4 -0
- mindspore/nn/__init__.py +3 -0
- mindspore/nn/cell.py +51 -47
- mindspore/nn/extend/__init__.py +29 -0
- mindspore/nn/extend/basic.py +140 -0
- mindspore/nn/extend/embedding.py +143 -0
- mindspore/nn/extend/layer/__init__.py +27 -0
- mindspore/nn/extend/layer/normalization.py +107 -0
- mindspore/nn/extend/pooling.py +117 -0
- mindspore/nn/generator.py +297 -0
- mindspore/nn/layer/basic.py +109 -1
- mindspore/nn/layer/container.py +2 -2
- mindspore/nn/layer/conv.py +6 -6
- mindspore/nn/layer/embedding.py +1 -1
- mindspore/nn/layer/normalization.py +21 -43
- mindspore/nn/layer/padding.py +4 -0
- mindspore/nn/optim/ada_grad.py +2 -2
- mindspore/nn/optim/adadelta.py +1 -1
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +7 -7
- mindspore/nn/optim/adamax.py +2 -2
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +2 -2
- mindspore/nn/optim/ftrl.py +1 -1
- mindspore/nn/optim/lamb.py +3 -3
- mindspore/nn/optim/lars.py +1 -1
- mindspore/nn/optim/lazyadam.py +2 -2
- mindspore/nn/optim/momentum.py +2 -2
- mindspore/nn/optim/optimizer.py +2 -2
- mindspore/nn/optim/proximal_ada_grad.py +2 -2
- mindspore/nn/optim/rmsprop.py +2 -2
- mindspore/nn/optim/rprop.py +2 -2
- mindspore/nn/optim/sgd.py +2 -2
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +9 -9
- mindspore/nn/wrap/grad_reducer.py +5 -5
- mindspore/ops/_grad_experimental/grad_comm_ops.py +4 -2
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +41 -2
- mindspore/ops/_vmap/vmap_math_ops.py +27 -8
- mindspore/ops/_vmap/vmap_nn_ops.py +66 -8
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +73 -1
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +12 -3
- mindspore/ops/auto_generate/gen_arg_handler.py +24 -0
- mindspore/ops/auto_generate/gen_extend_func.py +274 -0
- mindspore/ops/auto_generate/gen_ops_def.py +889 -22
- mindspore/ops/auto_generate/gen_ops_prim.py +3541 -253
- mindspore/ops/auto_generate/pyboost_inner_prim.py +282 -0
- mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -1
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +9 -0
- mindspore/ops/extend/__init__.py +9 -1
- mindspore/ops/extend/array_func.py +134 -27
- mindspore/ops/extend/math_func.py +3 -3
- mindspore/ops/extend/nn_func.py +363 -2
- mindspore/ops/function/__init__.py +19 -2
- mindspore/ops/function/array_func.py +463 -439
- mindspore/ops/function/clip_func.py +7 -18
- mindspore/ops/function/grad/grad_func.py +5 -5
- mindspore/ops/function/linalg_func.py +4 -4
- mindspore/ops/function/math_func.py +260 -243
- mindspore/ops/function/nn_func.py +825 -62
- mindspore/ops/function/random_func.py +73 -4
- mindspore/ops/function/sparse_unary_func.py +1 -1
- mindspore/ops/function/vmap_func.py +1 -1
- mindspore/ops/functional.py +2 -2
- mindspore/ops/op_info_register.py +1 -31
- mindspore/ops/operations/__init__.py +2 -3
- mindspore/ops/operations/_grad_ops.py +2 -107
- mindspore/ops/operations/_inner_ops.py +5 -5
- mindspore/ops/operations/_sequence_ops.py +2 -2
- mindspore/ops/operations/array_ops.py +11 -233
- mindspore/ops/operations/comm_ops.py +32 -32
- mindspore/ops/operations/custom_ops.py +7 -89
- mindspore/ops/operations/manually_defined/ops_def.py +329 -4
- mindspore/ops/operations/math_ops.py +13 -163
- mindspore/ops/operations/nn_ops.py +9 -316
- mindspore/ops/operations/random_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +3 -3
- mindspore/ops/primitive.py +2 -2
- mindspore/ops_generate/arg_dtype_cast.py +12 -3
- mindspore/ops_generate/arg_handler.py +24 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +2 -0
- mindspore/ops_generate/gen_pyboost_func.py +13 -6
- mindspore/ops_generate/pyboost_utils.py +2 -17
- mindspore/parallel/__init__.py +3 -2
- mindspore/parallel/_auto_parallel_context.py +106 -1
- mindspore/parallel/_parallel_serialization.py +34 -2
- mindspore/parallel/_utils.py +16 -0
- mindspore/parallel/algo_parameter_config.py +4 -4
- mindspore/parallel/checkpoint_transform.py +249 -77
- mindspore/parallel/cluster/process_entity/_api.py +1 -1
- mindspore/parallel/parameter_broadcast.py +1 -1
- mindspore/parallel/shard.py +1 -1
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +1 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +17 -5
- mindspore/profiler/parser/ascend_msprof_exporter.py +3 -3
- mindspore/profiler/parser/ascend_msprof_generator.py +10 -3
- mindspore/profiler/parser/ascend_op_generator.py +26 -9
- mindspore/profiler/parser/ascend_timeline_generator.py +7 -4
- mindspore/profiler/parser/profiler_info.py +11 -1
- mindspore/profiler/profiling.py +13 -5
- mindspore/rewrite/api/node.py +12 -12
- mindspore/rewrite/api/symbol_tree.py +11 -11
- mindspore/run_check/_check_version.py +1 -1
- mindspore/safeguard/rewrite_obfuscation.py +2 -2
- mindspore/train/amp.py +4 -4
- mindspore/train/anf_ir_pb2.py +8 -2
- mindspore/train/callback/_backup_and_restore.py +2 -2
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +2 -2
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_landscape.py +4 -4
- mindspore/train/callback/_loss_monitor.py +2 -2
- mindspore/train/callback/_on_request_exit.py +2 -2
- mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
- mindspore/train/callback/_summary_collector.py +2 -2
- mindspore/train/callback/_time_monitor.py +2 -2
- mindspore/train/dataset_helper.py +8 -3
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/mind_ir_pb2.py +22 -17
- mindspore/train/model.py +15 -15
- mindspore/train/serialization.py +18 -18
- mindspore/train/summary/summary_record.py +7 -7
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/version.py +1 -1
- {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +1 -1
- {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +307 -260
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/tiling_data.h +0 -59
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/FlashAttentionScore_bf16_BNSD_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/FlashAttentionScore_bf16_BSH_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/FlashAttentionScore_fp16_BNSD_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/FlashAttentionScore_fp16_BSH_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/PagedAttention_bf16_BNSD_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/PagedAttention_bf16_BSH_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/PagedAttention_fp16_BNSD_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/PagedAttention_fp16_BSH_mix.o +0 -0
- /mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention/bs_attention_mix_hwsync.h → flash_attention_score/kernel/flash_attention_score_mix_hwsync.h} +0 -0
- {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
- {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
- {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -31,17 +31,21 @@ from mindspore.ops import composite as C
|
|
|
31
31
|
from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
|
|
32
32
|
from mindspore.ops.primitive import constexpr, _primexpr
|
|
33
33
|
from mindspore.ops.operations._inner_ops import TileSize
|
|
34
|
-
from mindspore.ops.auto_generate import Cummin
|
|
34
|
+
from mindspore.ops.auto_generate import Cummin, BatchMatMul, LinSpaceExt, Norm
|
|
35
|
+
from mindspore.ops import auto_generate
|
|
35
36
|
from mindspore.ops.operations.math_ops import STFT
|
|
36
37
|
from mindspore.ops.operations.math_ops import LuUnpack
|
|
37
38
|
from mindspore.ops.operations.math_ops import Roll
|
|
38
39
|
from mindspore.ops.operations.math_ops import Ormqr
|
|
40
|
+
from mindspore.ops.operations.math_ops import DivMod
|
|
39
41
|
from mindspore.ops.operations.array_ops import MatrixSetDiagV3, Transpose
|
|
40
42
|
from mindspore.ops.auto_generate import (minimum, maximum, mul, sin, sinc, sinh, cummax, real, conj, add, sub, cos, cosh,
|
|
41
43
|
matrix_exp, sqrt, rsqrt, square, trace, nextafter, abs, acos, acosh, angle,
|
|
42
44
|
asin, asinh, atan, atan2, atanh, ceil, equal, erf, erfc, erfinv, exp, expm1,
|
|
43
45
|
floor, floor_divide, floor_mod, gcd, greater, greater_equal, less, less_equal,
|
|
44
|
-
log, log1p, neg, not_equal, pow, round
|
|
46
|
+
log, log1p, neg, not_equal, pow, round, isfinite, argmax, mean_ext_op,
|
|
47
|
+
sum_ext_op, prod_ext_op, all)
|
|
48
|
+
from mindspore.ops.auto_generate import tanh
|
|
45
49
|
from mindspore.nn import layer
|
|
46
50
|
from mindspore._checkparam import check_is_number
|
|
47
51
|
from mindspore import _checkparam as validator
|
|
@@ -143,7 +147,7 @@ asinh_ = P.Asinh()
|
|
|
143
147
|
atan2_ = P.Atan2()
|
|
144
148
|
atan_ = P.Atan()
|
|
145
149
|
atanh_ = P.Atanh()
|
|
146
|
-
batch_matmul_ =
|
|
150
|
+
batch_matmul_ = BatchMatMul()
|
|
147
151
|
bessel_i0_ = BesselI0()
|
|
148
152
|
bessel_i0e_ = P.BesselI0e()
|
|
149
153
|
bessel_i1_ = BesselI1()
|
|
@@ -164,7 +168,6 @@ cumprod_ = P.CumProd()
|
|
|
164
168
|
cumsum_ = P.CumSum()
|
|
165
169
|
cumulative_logsumexp_ = CumulativeLogsumexp()
|
|
166
170
|
digamma_ = P.Digamma()
|
|
167
|
-
div_ = P.Div()
|
|
168
171
|
dtype_ = P.DType()
|
|
169
172
|
eps_ = P.Eps()
|
|
170
173
|
erf_ = P.Erf()
|
|
@@ -180,7 +183,6 @@ igammac_ = Igammac()
|
|
|
180
183
|
imag_ = P.Imag()
|
|
181
184
|
inv_ = P.math_ops.Inv()
|
|
182
185
|
invert_ = P.Invert()
|
|
183
|
-
isfinite_ = P.IsFinite()
|
|
184
186
|
isinf_ = P.IsInf()
|
|
185
187
|
isnan_ = P.IsNan()
|
|
186
188
|
lcm_ = Lcm()
|
|
@@ -692,16 +694,6 @@ def subtract(input, other, *, alpha=1):
|
|
|
692
694
|
return tensor_sub(input, alpha * other)
|
|
693
695
|
|
|
694
696
|
|
|
695
|
-
def true_divide(dividend, divisor):
|
|
696
|
-
r"""
|
|
697
|
-
Alias for :func:`mindspore.ops.div` with :math:`rounding\_mode=None`.
|
|
698
|
-
|
|
699
|
-
Supported Platforms:
|
|
700
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
701
|
-
"""
|
|
702
|
-
return div(dividend, divisor, rounding_mode=None)
|
|
703
|
-
|
|
704
|
-
|
|
705
697
|
def multiply(input, other):
|
|
706
698
|
r"""
|
|
707
699
|
Alias for :func:`mindspore.ops.asinh`.
|
|
@@ -767,15 +759,23 @@ def div(input, other, *, rounding_mode=None):
|
|
|
767
759
|
"""
|
|
768
760
|
if rounding_mode is not None and rounding_mode not in ['floor', 'trunc']:
|
|
769
761
|
raise ValueError("For ops.div, rounding_mode value should be None, 'floor' or 'trunc'.")
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
if rounding_mode == 'trunc':
|
|
775
|
-
output = trunc_(output)
|
|
762
|
+
if rounding_mode:
|
|
763
|
+
output = DivMod()(input, other, rounding_mode)
|
|
764
|
+
else:
|
|
765
|
+
output = P.Div()(input, other)
|
|
776
766
|
return output
|
|
777
767
|
|
|
778
768
|
|
|
769
|
+
def true_divide(dividend, divisor):
|
|
770
|
+
r"""
|
|
771
|
+
Alias for :func:`mindspore.ops.div` with :math:`rounding\_mode=None`.
|
|
772
|
+
|
|
773
|
+
Supported Platforms:
|
|
774
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
775
|
+
"""
|
|
776
|
+
return div(dividend, divisor)
|
|
777
|
+
|
|
778
|
+
|
|
779
779
|
def divide(input, other, *, rounding_mode=None):
|
|
780
780
|
"""
|
|
781
781
|
Alias for :func:`mindspore.ops.div` .
|
|
@@ -1652,8 +1652,8 @@ def xlogy(input, other):
|
|
|
1652
1652
|
Args:
|
|
1653
1653
|
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
1654
1654
|
a bool or a tensor whose data type is
|
|
1655
|
-
`number <https://www.mindspore.cn/docs/en/
|
|
1656
|
-
`bool_ <https://www.mindspore.cn/docs/en/
|
|
1655
|
+
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
1656
|
+
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
|
|
1657
1657
|
other (Union[Tensor, number.Number, bool]): The second input is a number.Number or
|
|
1658
1658
|
a bool when the first input is a tensor or a tensor whose data type is number or bool\_.
|
|
1659
1659
|
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
|
|
@@ -1811,45 +1811,6 @@ def arccos(input):
|
|
|
1811
1811
|
return acos(input)
|
|
1812
1812
|
|
|
1813
1813
|
|
|
1814
|
-
def tanh(input):
|
|
1815
|
-
r"""
|
|
1816
|
-
Computes hyperbolic tangent of input element-wise. The Tanh function is defined as:
|
|
1817
|
-
|
|
1818
|
-
.. math::
|
|
1819
|
-
|
|
1820
|
-
tanh(x_i) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1},
|
|
1821
|
-
|
|
1822
|
-
where :math:`x_i` is an element of the input Tensor.
|
|
1823
|
-
|
|
1824
|
-
Tanh Activation Function Graph:
|
|
1825
|
-
|
|
1826
|
-
.. image:: ../images/Tanh.png
|
|
1827
|
-
:align: center
|
|
1828
|
-
|
|
1829
|
-
Args:
|
|
1830
|
-
input (Tensor): Input of Tanh.
|
|
1831
|
-
|
|
1832
|
-
Returns:
|
|
1833
|
-
Tensor, with the same type and shape as the `input`.
|
|
1834
|
-
|
|
1835
|
-
Raises:
|
|
1836
|
-
TypeError: If `input` is not a Tensor.
|
|
1837
|
-
|
|
1838
|
-
Supported Platforms:
|
|
1839
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1840
|
-
|
|
1841
|
-
Examples:
|
|
1842
|
-
>>> import mindspore
|
|
1843
|
-
>>> import numpy as np
|
|
1844
|
-
>>> from mindspore import Tensor, ops
|
|
1845
|
-
>>> input = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
|
|
1846
|
-
>>> output = ops.tanh(input)
|
|
1847
|
-
>>> print(output)
|
|
1848
|
-
[0.7615941 0.9640276 0.9950547 0.9993293 0.9999092]
|
|
1849
|
-
"""
|
|
1850
|
-
return tanh_(input)
|
|
1851
|
-
|
|
1852
|
-
|
|
1853
1814
|
def arcsinh(input):
|
|
1854
1815
|
r"""
|
|
1855
1816
|
Alias for :func:`mindspore.ops.asinh`.
|
|
@@ -2573,6 +2534,52 @@ def linspace(start, end, steps):
|
|
|
2573
2534
|
return linspace_(start, end, steps)
|
|
2574
2535
|
|
|
2575
2536
|
|
|
2537
|
+
def linspace_ext(start, end, steps, *, dtype=None):
|
|
2538
|
+
r"""
|
|
2539
|
+
Returns a Tensor whose value is `steps` evenly spaced in the interval `start` and `end` (including `start` and
|
|
2540
|
+
`end`), and the length of the output Tensor is `steps`.
|
|
2541
|
+
|
|
2542
|
+
.. math::
|
|
2543
|
+
\begin{aligned}
|
|
2544
|
+
&step = (end - start)/(steps - 1)\\
|
|
2545
|
+
&output = [start, start+step, start+2*step, ... , end]
|
|
2546
|
+
\end{aligned}
|
|
2547
|
+
|
|
2548
|
+
Args:
|
|
2549
|
+
start (Union[Tensor, Number]): Start value of interval.
|
|
2550
|
+
If `start` is Tensor, data type must be float32 or float64 and with shape of 0-D.
|
|
2551
|
+
end (Union[Tensor, Number]): Last value of interval.
|
|
2552
|
+
If `end` is Tensor, data type must be float32 or float64 and with shape of 0-D.
|
|
2553
|
+
steps (Union[Tensor, int]): Number of ticks in the interval, inclusive of start and end.
|
|
2554
|
+
Must be positive int number or 0D int32/int64 Tensor.
|
|
2555
|
+
|
|
2556
|
+
Keyword Args:
|
|
2557
|
+
dtype (mindspore.dtype, optional): The output Tensor data type. Default: ``None`` , the data type of output
|
|
2558
|
+
Tensor is float32.
|
|
2559
|
+
|
|
2560
|
+
Returns:
|
|
2561
|
+
Tensor, has the shape of :math:`(steps,)`.
|
|
2562
|
+
|
|
2563
|
+
Raises:
|
|
2564
|
+
TypeError: If dtype of `start` or dtype of `end` is not supported.
|
|
2565
|
+
ValueError: If shape of `start` or shape of `end` is not 0-D.
|
|
2566
|
+
TypeError: If `steps` is not int or 0D int32/int64 Tensor.
|
|
2567
|
+
ValueError: If `steps` is not positive int number.
|
|
2568
|
+
|
|
2569
|
+
Supported Platforms:
|
|
2570
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2571
|
+
|
|
2572
|
+
Examples:
|
|
2573
|
+
>>> start = Tensor(1, mindspore.float32)
|
|
2574
|
+
>>> end = Tensor(10, mindspore.float32)
|
|
2575
|
+
>>> steps = 5
|
|
2576
|
+
>>> output = ops.linspace_ext(start, end, steps, dtype=mindspore.float32)
|
|
2577
|
+
>>> print(output)
|
|
2578
|
+
[ 1. 3.25 5.5 7.75 10. ]
|
|
2579
|
+
"""
|
|
2580
|
+
return _get_cache_prim(LinSpaceExt)()(start, end, steps, dtype)
|
|
2581
|
+
|
|
2582
|
+
|
|
2576
2583
|
def det(input):
|
|
2577
2584
|
r"""
|
|
2578
2585
|
Computes the determinant of one or more square matrices.
|
|
@@ -3015,8 +3022,8 @@ def le(input, other):
|
|
|
3015
3022
|
Args:
|
|
3016
3023
|
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
3017
3024
|
a bool or a tensor whose data type is
|
|
3018
|
-
`number <https://www.mindspore.cn/docs/en/
|
|
3019
|
-
`bool_ <https://www.mindspore.cn/docs/en/
|
|
3025
|
+
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
3026
|
+
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
|
|
3020
3027
|
other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
|
|
3021
3028
|
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
|
|
3022
3029
|
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
|
|
@@ -3065,8 +3072,8 @@ def gt(input, other):
|
|
|
3065
3072
|
Args:
|
|
3066
3073
|
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
3067
3074
|
a bool or a tensor whose data type is
|
|
3068
|
-
`number <https://www.mindspore.cn/docs/en/
|
|
3069
|
-
`bool_ <https://www.mindspore.cn/docs/en/
|
|
3075
|
+
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
3076
|
+
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ .
|
|
3070
3077
|
other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
|
|
3071
3078
|
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
|
|
3072
3079
|
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
|
|
@@ -3292,46 +3299,6 @@ def approximate_equal(x, y, tolerance=1e-5):
|
|
|
3292
3299
|
return _get_cache_prim(P.ApproximateEqual)(tolerance)(x, y)
|
|
3293
3300
|
|
|
3294
3301
|
|
|
3295
|
-
def isfinite(x):
|
|
3296
|
-
r"""
|
|
3297
|
-
Determines which elements are finite for each position. If elements are not ``NaN`` , ``-INF`` , ``INF``,
|
|
3298
|
-
they are finite.
|
|
3299
|
-
|
|
3300
|
-
.. math::
|
|
3301
|
-
|
|
3302
|
-
out_i = \begin{cases}
|
|
3303
|
-
& \text{ if } x_{i} = \text{Finite},\ \ True \\
|
|
3304
|
-
& \text{ if } x_{i} \ne \text{Finite},\ \ False
|
|
3305
|
-
\end{cases}
|
|
3306
|
-
|
|
3307
|
-
Args:
|
|
3308
|
-
x (Tensor): The input tensor.
|
|
3309
|
-
|
|
3310
|
-
Returns:
|
|
3311
|
-
Tensor, has the same shape of input, and the dtype is bool.
|
|
3312
|
-
|
|
3313
|
-
Raises:
|
|
3314
|
-
TypeError: If `x` is not a Tensor.
|
|
3315
|
-
|
|
3316
|
-
Supported Platforms:
|
|
3317
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3318
|
-
|
|
3319
|
-
Examples:
|
|
3320
|
-
>>> import mindspore
|
|
3321
|
-
>>> import numpy as np
|
|
3322
|
-
>>> from mindspore import Tensor, ops
|
|
3323
|
-
>>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
|
|
3324
|
-
>>> output = ops.isfinite(x)
|
|
3325
|
-
>>> print(output)
|
|
3326
|
-
[False True False]
|
|
3327
|
-
>>> x = Tensor(2.1, mindspore.float64)
|
|
3328
|
-
>>> output = ops.isfinite(x)
|
|
3329
|
-
>>> print(output)
|
|
3330
|
-
True
|
|
3331
|
-
"""
|
|
3332
|
-
return isfinite_(x)
|
|
3333
|
-
|
|
3334
|
-
|
|
3335
3302
|
def isnan(input):
|
|
3336
3303
|
r"""
|
|
3337
3304
|
Determines which elements are NaN for each position.
|
|
@@ -3680,7 +3647,7 @@ def nanmedian(input, axis=-1, keepdims=False):
|
|
|
3680
3647
|
|
|
3681
3648
|
.. warning::
|
|
3682
3649
|
`indices` does not necessarily contain the first occurrence of each median value found in the `input`,
|
|
3683
|
-
|
|
3650
|
+
unless it is unique.
|
|
3684
3651
|
|
|
3685
3652
|
Args:
|
|
3686
3653
|
input (Tensor): The input tensor to calculate the median and indices.
|
|
@@ -6678,6 +6645,88 @@ def amax(input, axis=None, keepdims=False, *, initial=None, where=None):
|
|
|
6678
6645
|
|
|
6679
6646
|
|
|
6680
6647
|
def mean(x, axis=None, keep_dims=False):
|
|
6648
|
+
r"""
|
|
6649
|
+
Reduces all dimension of a tensor by averaging all elements in the dimension, by default.
|
|
6650
|
+
And reduce a dimension of `x` along the specified `axis`. `keep_dims`
|
|
6651
|
+
determines whether the dimensions of the output and input are the same.
|
|
6652
|
+
|
|
6653
|
+
Args:
|
|
6654
|
+
x (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
6655
|
+
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
6656
|
+
axis (Union[int, tuple(int), list(int)]): The dimensions to reduce. Default: ``None`` , reduce all dimensions.
|
|
6657
|
+
Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
|
|
6658
|
+
keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
|
|
6659
|
+
If false, don't keep these dimensions. Default: ``False`` .
|
|
6660
|
+
|
|
6661
|
+
Returns:
|
|
6662
|
+
Tensor, has the same data type as input tensor.
|
|
6663
|
+
|
|
6664
|
+
- If `axis` is None, and `keep_dims` is False,
|
|
6665
|
+
the output is a 0-D tensor representing the product of all elements in the input tensor.
|
|
6666
|
+
- If `axis` is int, set as 1, and `keep_dims` is False,
|
|
6667
|
+
the shape of output is :math:`(x_0, x_2, ..., x_R)`.
|
|
6668
|
+
- If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` ,
|
|
6669
|
+
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
|
|
6670
|
+
|
|
6671
|
+
Raises:
|
|
6672
|
+
TypeError: If `x` is not a Tensor.
|
|
6673
|
+
TypeError: If `axis` is not one of the following: int, tuple or list.
|
|
6674
|
+
TypeError: If `keep_dims` is not a bool.
|
|
6675
|
+
ValueError: If `axis` is out of range.
|
|
6676
|
+
|
|
6677
|
+
Supported Platforms:
|
|
6678
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
6679
|
+
|
|
6680
|
+
Examples:
|
|
6681
|
+
>>> import mindspore
|
|
6682
|
+
>>> import numpy as np
|
|
6683
|
+
>>> from mindspore import Tensor, ops
|
|
6684
|
+
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
|
|
6685
|
+
>>> output = ops.mean(x, 1, keep_dims=True)
|
|
6686
|
+
>>> result = output.shape
|
|
6687
|
+
>>> print(result)
|
|
6688
|
+
(3, 1, 5, 6)
|
|
6689
|
+
>>> # case 1: Reduces a dimension by averaging all elements in the dimension.
|
|
6690
|
+
>>> x = Tensor(np.array([[[2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2]],
|
|
6691
|
+
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
6692
|
+
... [[6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8], [10, 10, 10, 10, 10, 10]]]),
|
|
6693
|
+
... mindspore.float32)
|
|
6694
|
+
>>> output = ops.mean(x)
|
|
6695
|
+
>>> print(output)
|
|
6696
|
+
5.0
|
|
6697
|
+
>>> print(output.shape)
|
|
6698
|
+
()
|
|
6699
|
+
>>> # case 2: Reduces a dimension along the axis 0
|
|
6700
|
+
>>> output = ops.mean(x, 0, True)
|
|
6701
|
+
>>> print(output)
|
|
6702
|
+
[[[4. 4. 4. 4. 4. 4.]
|
|
6703
|
+
[5. 5. 5. 5. 5. 5.]
|
|
6704
|
+
[6. 6. 6. 6. 6. 6.]]]
|
|
6705
|
+
>>> # case 3: Reduces a dimension along the axis 1
|
|
6706
|
+
>>> output = ops.mean(x, 1, True)
|
|
6707
|
+
>>> print(output)
|
|
6708
|
+
[[[2. 2. 2. 2. 2. 2.]]
|
|
6709
|
+
[[5. 5. 5. 5. 5. 5.]]
|
|
6710
|
+
[[8. 8. 8. 8. 8. 8.]]]
|
|
6711
|
+
>>> # case 4: Reduces a dimension along the axis 2
|
|
6712
|
+
>>> output = ops.mean(x, 2, True)
|
|
6713
|
+
>>> print(output)
|
|
6714
|
+
[[[ 2.]
|
|
6715
|
+
[ 2.]
|
|
6716
|
+
[ 2.]]
|
|
6717
|
+
[[ 4.]
|
|
6718
|
+
[ 5.]
|
|
6719
|
+
[ 6.]]
|
|
6720
|
+
[[ 6.]
|
|
6721
|
+
[ 8.]
|
|
6722
|
+
[10.]]]
|
|
6723
|
+
"""
|
|
6724
|
+
if axis is None:
|
|
6725
|
+
axis = ()
|
|
6726
|
+
return _get_cache_prim(P.ReduceMean)(keep_dims)(x, axis)
|
|
6727
|
+
|
|
6728
|
+
|
|
6729
|
+
def mean_ext(input, axis=None, keep_dims=False, dtype=None):
|
|
6681
6730
|
r"""
|
|
6682
6731
|
Reduces all dimension of a tensor by averaging all elements in the dimension, by default.
|
|
6683
6732
|
And reduce a dimension of `input` along the specified `axis`. `keep_dims`
|
|
@@ -6687,13 +6736,14 @@ def mean(x, axis=None, keep_dims=False):
|
|
|
6687
6736
|
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
6688
6737
|
|
|
6689
6738
|
Args:
|
|
6690
|
-
|
|
6739
|
+
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
6691
6740
|
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
6692
6741
|
axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` ,
|
|
6693
6742
|
reduce all dimensions. Only constant value is allowed. Assume the rank of `input` is r,
|
|
6694
6743
|
and the value range is [-r,r).
|
|
6695
6744
|
keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
6696
6745
|
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
6746
|
+
dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
|
|
6697
6747
|
|
|
6698
6748
|
Returns:
|
|
6699
6749
|
Tensor, has the same data type as input tensor.
|
|
@@ -6759,13 +6809,11 @@ def mean(x, axis=None, keep_dims=False):
|
|
|
6759
6809
|
[[ 6.]
|
|
6760
6810
|
[ 8.]
|
|
6761
6811
|
[10.]]]
|
|
6762
|
-
|
|
6763
|
-
|
|
6764
|
-
axis = ()
|
|
6765
|
-
return _get_cache_prim(P.ReduceMean)(keep_dims)(x, axis)
|
|
6812
|
+
"""
|
|
6813
|
+
return mean_ext_op(input, axis, keep_dims, dtype)
|
|
6766
6814
|
|
|
6767
6815
|
|
|
6768
|
-
def prod(input, axis=None, keep_dims=False):
|
|
6816
|
+
def prod(input, axis=None, keep_dims=False, dtype=None):
|
|
6769
6817
|
r"""
|
|
6770
6818
|
Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can
|
|
6771
6819
|
reduce a dimension of `input` along the `axis`. Determine whether the dimensions of the output and input are the
|
|
@@ -6781,6 +6829,7 @@ def prod(input, axis=None, keep_dims=False):
|
|
|
6781
6829
|
dimensions. Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
|
|
6782
6830
|
keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
6783
6831
|
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
6832
|
+
dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
|
|
6784
6833
|
|
|
6785
6834
|
Returns:
|
|
6786
6835
|
Tensor, has the same data type as input tensor.
|
|
@@ -6846,8 +6895,10 @@ def prod(input, axis=None, keep_dims=False):
|
|
|
6846
6895
|
[2.62144e+05]
|
|
6847
6896
|
[5.31441e+05]]]
|
|
6848
6897
|
"""
|
|
6849
|
-
if axis
|
|
6850
|
-
axis
|
|
6898
|
+
if not isinstance(axis, (tuple, list, Tensor)):
|
|
6899
|
+
return prod_ext_op(input, axis, keep_dims, dtype)
|
|
6900
|
+
if dtype is not None:
|
|
6901
|
+
input = input.astype(dtype)
|
|
6851
6902
|
return _get_cache_prim(P.ReduceProd)(keep_dims)(input, axis)
|
|
6852
6903
|
|
|
6853
6904
|
|
|
@@ -7210,6 +7261,84 @@ def _compute_vector_norm_inf(x, dim, keepdims, norm_func):
|
|
|
7210
7261
|
return ret_norm
|
|
7211
7262
|
|
|
7212
7263
|
|
|
7264
|
+
def norm_ext(A, ord=None, dim=None, keepdim=False, *, dtype=None):
|
|
7265
|
+
r"""
|
|
7266
|
+
Returns the matrix norm or vector norm of a given tensor.
|
|
7267
|
+
|
|
7268
|
+
`ord` is the calculation mode of norm. The following norm modes are supported.
|
|
7269
|
+
|
|
7270
|
+
====================== ================================ ==========================================
|
|
7271
|
+
`ord` norm for matrices norm for vectors
|
|
7272
|
+
====================== ================================ ==========================================
|
|
7273
|
+
`None` (default) Frobenius norm `2`-norm (see below)
|
|
7274
|
+
`'fro'` Frobenius norm -- not supported --
|
|
7275
|
+
`'nuc'` nuclear norm -- not supported --
|
|
7276
|
+
`inf` :math:`max(sum(abs(x), dim=1))` :math:`max(abs(x))`
|
|
7277
|
+
`-inf` :math:`min(sum(abs(x), dim=1))` :math:`min(abs(x))`
|
|
7278
|
+
`0` -- not supported -- :math:`sum(x != 0)`
|
|
7279
|
+
`1` :math:`max(sum(abs(x), dim=0))` as below
|
|
7280
|
+
`-1` :math:`min(sum(abs(x), dim=0))` as below
|
|
7281
|
+
`2` largest singular value as below
|
|
7282
|
+
`-2` smallest singular value as below
|
|
7283
|
+
other `int` or `float` -- not supported -- :math:`sum(abs(x)^{ord})^{(1 / ord)}`
|
|
7284
|
+
====================== ================================ ==========================================
|
|
7285
|
+
|
|
7286
|
+
Args:
|
|
7287
|
+
A (Tensor): Tensor of shape :math:`(*, n)` or :math:`(*, m, n)` where * is zero or more batch dimensions.
|
|
7288
|
+
ord (Union[int, float, inf, -inf, 'fro', 'nuc'], optional): norm's mode. refer to the table above for
|
|
7289
|
+
behavior. Default: ``None`` .
|
|
7290
|
+
dim (Union[int, Tuple(int)], optional): calculate the dimension of vector norm or matrix norm.
|
|
7291
|
+
Default: ``None`` .
|
|
7292
|
+
|
|
7293
|
+
- When `dim` is int, it will be calculated by vector norm.
|
|
7294
|
+
|
|
7295
|
+
- When `dim` is a 2-tuple, it will be calculated by matrix norm.
|
|
7296
|
+
|
|
7297
|
+
- If `dim` is None and `ord` is None, `A` will be flattened to 1D and the 2-norm
|
|
7298
|
+
of the vector will be calculated.
|
|
7299
|
+
|
|
7300
|
+
- If `dim` is None and `ord` is not None, `A` must be 1D or 2D.
|
|
7301
|
+
|
|
7302
|
+
keepdim (bool): whether the output Tensor retains the original dimension. Default: ``False`` .
|
|
7303
|
+
|
|
7304
|
+
Keyword Args:
|
|
7305
|
+
dtype (:class:`mindspore.dtype`, optional): When set, `A` will be converted to the specified type,
|
|
7306
|
+
`dtype`, before execution, and dtype of returned Tensor will also be `dtype`. Default: ``None`` .
|
|
7307
|
+
|
|
7308
|
+
Returns:
|
|
7309
|
+
Tensor, the result of norm calculation on the specified dimension, `dim`, has the same dtype as `A`.
|
|
7310
|
+
|
|
7311
|
+
Raises:
|
|
7312
|
+
ValueError: If `dim` is out of range.
|
|
7313
|
+
TypeError: If `dim` is neither an int nor a tuple of int.
|
|
7314
|
+
TypeError: If `A` is a vector and `ord` is a str.
|
|
7315
|
+
ValueError: If `A` is a matrices and `ord` is not in valid mode.
|
|
7316
|
+
ValueError: If `A` is a matrices and `ord` is an integer but not in [1, -1, 2, -2].
|
|
7317
|
+
ValueError: If two elements of `dim` is same after normalize.
|
|
7318
|
+
ValueError: If any elements of `dim` is out of range.
|
|
7319
|
+
|
|
7320
|
+
Supported Platforms:
|
|
7321
|
+
``Ascend``
|
|
7322
|
+
|
|
7323
|
+
Note:
|
|
7324
|
+
Currently, it only support `ops.norm_ext(A)`.
|
|
7325
|
+
|
|
7326
|
+
Examples:
|
|
7327
|
+
>>> import mindspore as ms
|
|
7328
|
+
>>> import mindspore.ops as ops
|
|
7329
|
+
>>> data_range = ops.arange(-13, 13, dtype=ms.float32)
|
|
7330
|
+
>>> # Exclude 0 from original data for 0 is invalid input when `ord` is negative.
|
|
7331
|
+
>>> x = data_range[data_range != 0]
|
|
7332
|
+
>>> y = x.reshape(5, 5)
|
|
7333
|
+
>>> print(ops.norm_ext(x))
|
|
7334
|
+
38.327538
|
|
7335
|
+
>>> print(ops.norm(x, 0))
|
|
7336
|
+
25.0
|
|
7337
|
+
"""
|
|
7338
|
+
norm_ext_op = Norm()
|
|
7339
|
+
return norm_ext_op(A, ord, dim, keepdim, dtype)
|
|
7340
|
+
|
|
7341
|
+
|
|
7213
7342
|
def vector_norm(x, ord=2, axis=None, keepdims=False, *, dtype=None):
|
|
7214
7343
|
r"""
|
|
7215
7344
|
Returns the vector norm of the given tensor on the specified dimensions.
|
|
@@ -7908,6 +8037,7 @@ def matmul(input, other):
|
|
|
7908
8037
|
On GPU, the supported dtypes are np.float16 and np.float32.
|
|
7909
8038
|
On CPU, the supported dtypes are np.float16 and np.float32.
|
|
7910
8039
|
The dtype of `input` and `other` must be same.
|
|
8040
|
+
On Ascend, the rank of `input` or `other` must be between 1 and 6.
|
|
7911
8041
|
|
|
7912
8042
|
Args:
|
|
7913
8043
|
input (Tensor): Input tensor, scalar not allowed.
|
|
@@ -7925,7 +8055,8 @@ def matmul(input, other):
|
|
|
7925
8055
|
TypeError: If the dtype of `input` and the dtype of `other` are not the same.
|
|
7926
8056
|
ValueError: If the last dimension of `input` is not the same size as the
|
|
7927
8057
|
second-to-last dimension of `other`, or if a scalar value is passed in.
|
|
7928
|
-
ValueError: If the shape of `input` and `
|
|
8058
|
+
ValueError: If the shape of `input` and `input` could not broadcast together.
|
|
8059
|
+
RuntimeError: If the rank of `input` or `other` is less than 1 or greater than 6.
|
|
7929
8060
|
|
|
7930
8061
|
Supported Platforms:
|
|
7931
8062
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -7956,42 +8087,7 @@ def matmul(input, other):
|
|
|
7956
8087
|
>>> print(output.shape)
|
|
7957
8088
|
(1,)
|
|
7958
8089
|
"""
|
|
7959
|
-
|
|
7960
|
-
raise TypeError("For matmul op, inputs must be all tensors.")
|
|
7961
|
-
|
|
7962
|
-
input_rank, other_rank = rank_(input), rank_(other)
|
|
7963
|
-
if input_rank == 2 and other_rank == 2:
|
|
7964
|
-
_matmul = _get_cache_prim(P.MatMul)(False, False)
|
|
7965
|
-
return _matmul(input, other)
|
|
7966
|
-
|
|
7967
|
-
ndim1_orig, ndim2_orig = rank_(input), rank_(other)
|
|
7968
|
-
shape1_orig, shape2_orig = shape_(input), shape_(other)
|
|
7969
|
-
transpose_b = ndim2_orig == 1
|
|
7970
|
-
shape_backbone = _check_matmul_shapes(shape1_orig, shape2_orig, 'matmul')
|
|
7971
|
-
# infers the shape of the output
|
|
7972
|
-
shape_out = shape_backbone + _infer_shape_rem(shape1_orig, shape2_orig,
|
|
7973
|
-
ndim1_orig, ndim2_orig, transpose_b)
|
|
7974
|
-
|
|
7975
|
-
_matmul = _get_cache_prim(P.MatMul)(False, transpose_b)
|
|
7976
|
-
_batch_matmul = _get_cache_prim(P.BatchMatMul)(False, transpose_b)
|
|
7977
|
-
|
|
7978
|
-
input = _expand(input, 2)
|
|
7979
|
-
other = _expand(other, 2)
|
|
7980
|
-
if rank_(other) == 2:
|
|
7981
|
-
if rank_(input) > 2:
|
|
7982
|
-
input = reshape_(input, (-1, shape1_orig[-1]))
|
|
7983
|
-
res = _matmul(input, other)
|
|
7984
|
-
else:
|
|
7985
|
-
# broadcasts input.shape[:-2] with other.shape[:-2]
|
|
7986
|
-
ndim_aligned = _max(ndim1_orig, ndim2_orig)
|
|
7987
|
-
input = _expand(input, ndim_aligned)
|
|
7988
|
-
other = _expand(other, ndim_aligned)
|
|
7989
|
-
shape1_aligned, shape2_aligned = shape_(input), shape_(other)
|
|
7990
|
-
input = _broadcast_to(input, shape1_aligned[:-2], shape_backbone, ndim_aligned)
|
|
7991
|
-
other = _broadcast_to(other, shape2_aligned[:-2], shape_backbone, ndim_aligned)
|
|
7992
|
-
res = _batch_matmul(input, other)
|
|
7993
|
-
|
|
7994
|
-
return reshape_(res, shape_out)
|
|
8090
|
+
return auto_generate.matmul_ext(input, other)
|
|
7995
8091
|
|
|
7996
8092
|
|
|
7997
8093
|
def inner(input, other):
|
|
@@ -8108,9 +8204,6 @@ def bmm(input_x, mat2):
|
|
|
8108
8204
|
[[3255. 3312. 3369.]]
|
|
8109
8205
|
[[4362. 4428. 4494.]]]]
|
|
8110
8206
|
"""
|
|
8111
|
-
if not (isinstance(input_x, Tensor) and isinstance(mat2, Tensor)):
|
|
8112
|
-
raise TypeError("For bmm op, inputs input_x and mat2 must be all tensors.")
|
|
8113
|
-
|
|
8114
8207
|
return batch_matmul_(input_x, mat2)
|
|
8115
8208
|
|
|
8116
8209
|
|
|
@@ -8275,7 +8368,7 @@ def baddbmm(input, batch1, batch2, beta=1, alpha=1):
|
|
|
8275
8368
|
[5. 5. 5.]
|
|
8276
8369
|
[5. 5. 5.]]]
|
|
8277
8370
|
"""
|
|
8278
|
-
bmmop = _get_cache_prim(
|
|
8371
|
+
bmmop = _get_cache_prim(BatchMatMul)(False, False)
|
|
8279
8372
|
if not (isinstance(input, Tensor) and isinstance(batch1, Tensor) and isinstance(batch2, Tensor)):
|
|
8280
8373
|
raise TypeError("For Baddbmm, inputs must be all tensors.")
|
|
8281
8374
|
input_dtype = dtype_(input)
|
|
@@ -8641,70 +8734,6 @@ def _check_is_tensor(param_name, input, cls_name):
|
|
|
8641
8734
|
raise TypeError(f"For {cls_name}, {param_name} must be a Tensor, but got {type(input)}.")
|
|
8642
8735
|
|
|
8643
8736
|
|
|
8644
|
-
def all(input, axis=None, keep_dims=False):
|
|
8645
|
-
r"""
|
|
8646
|
-
Reduces a dimension of `input` by the "logical AND" of all elements in the dimension, by default. And also can
|
|
8647
|
-
reduce a dimension of `input` along the `axis`. Determine whether the dimensions of the output and input are the
|
|
8648
|
-
same by controlling `keep_dims`.
|
|
8649
|
-
|
|
8650
|
-
Note:
|
|
8651
|
-
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
8652
|
-
|
|
8653
|
-
Args:
|
|
8654
|
-
input (Tensor): Input Tensor, has the shape :math:`(N, *)` where :math:`*` means,
|
|
8655
|
-
any number of additional dimensions.
|
|
8656
|
-
axis (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce.
|
|
8657
|
-
Suppose the rank of `input` is r, `axis` must be in the range [-rank(input), rank(input)).
|
|
8658
|
-
Default: ``None`` , all dimensions are reduced.
|
|
8659
|
-
keep_dims (bool, optional): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
8660
|
-
If ``False`` , don't keep these dimensions. Default : ``False`` .
|
|
8661
|
-
|
|
8662
|
-
Returns:
|
|
8663
|
-
Tensor, the dtype is bool.
|
|
8664
|
-
|
|
8665
|
-
- If `axis` is ``None`` , and `keep_dims` is ``False`` ,
|
|
8666
|
-
the output is a 0-D Tensor representing the "logical AND" of all elements in the input Tensor.
|
|
8667
|
-
- If `axis` is int, such as 2, and `keep_dims` is ``False`` ,
|
|
8668
|
-
the shape of output is :math:`(input_1, input_3, ..., input_R)`.
|
|
8669
|
-
- If `axis` is tuple(int), such as (2, 3), and `keep_dims` is ``False`` ,
|
|
8670
|
-
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
8671
|
-
- If `axis` is 1-D Tensor, such as [2, 3], and `keep_dims` is ``False`` ,
|
|
8672
|
-
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
8673
|
-
|
|
8674
|
-
Raises:
|
|
8675
|
-
TypeError: If `keep_dims` is not a bool.
|
|
8676
|
-
TypeError: If `input` is not a Tensor.
|
|
8677
|
-
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
|
|
8678
|
-
|
|
8679
|
-
Supported Platforms:
|
|
8680
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
8681
|
-
|
|
8682
|
-
Examples:
|
|
8683
|
-
>>> import numpy as np
|
|
8684
|
-
>>> from mindspore import Tensor, ops
|
|
8685
|
-
>>> x = Tensor(np.array([[True, False], [True, True]]))
|
|
8686
|
-
>>> # case 1: Reduces a dimension by the "logicalAND" of all elements in the dimension.
|
|
8687
|
-
>>> output = ops.all(x, keep_dims=True)
|
|
8688
|
-
>>> print(output)
|
|
8689
|
-
[[False]]
|
|
8690
|
-
>>> print(output.shape)
|
|
8691
|
-
(1, 1)
|
|
8692
|
-
>>> # case 2: Reduces a dimension along axis 0.
|
|
8693
|
-
>>> output = ops.all(x, axis=0)
|
|
8694
|
-
>>> print(output)
|
|
8695
|
-
[ True False]
|
|
8696
|
-
>>> # case 3: Reduces a dimension along axis 1.
|
|
8697
|
-
>>> output = ops.all(x, axis=1)
|
|
8698
|
-
>>> print(output)
|
|
8699
|
-
[False True]
|
|
8700
|
-
"""
|
|
8701
|
-
_check_is_tensor("input", input, "all")
|
|
8702
|
-
if axis is None:
|
|
8703
|
-
axis = ()
|
|
8704
|
-
if input.dtype != mstype.bool_:
|
|
8705
|
-
input = cast_(input, mstype.bool_)
|
|
8706
|
-
return _get_cache_prim(P.ReduceAll)(keep_dims)(input, axis)
|
|
8707
|
-
|
|
8708
8737
|
|
|
8709
8738
|
def any(input, axis=None, keep_dims=False):
|
|
8710
8739
|
r"""
|
|
@@ -10096,23 +10125,7 @@ def sum(input, dim=None, keepdim=False, *, dtype=None):
|
|
|
10096
10125
|
[48.]
|
|
10097
10126
|
[54.]]]
|
|
10098
10127
|
"""
|
|
10099
|
-
|
|
10100
|
-
raise TypeError(f"For 'sum', 'input' must be Tensor, but got{type(input)}")
|
|
10101
|
-
if dim is not None and not isinstance(dim, (int, tuple, list)):
|
|
10102
|
-
raise TypeError(f"For 'sum', 'dim' must be int, tuple(int), list(int) or None, but got {type(dim)}")
|
|
10103
|
-
if not isinstance(keepdim, bool):
|
|
10104
|
-
raise TypeError(f"For 'sum', 'keepdim' must be bool, but got {type(keepdim)}")
|
|
10105
|
-
|
|
10106
|
-
if input.dtype == mstype.bool_:
|
|
10107
|
-
input = input.astype(mstype.int64)
|
|
10108
|
-
if dtype is not None:
|
|
10109
|
-
input = input.astype(dtype)
|
|
10110
|
-
reduce_sum = _get_cache_prim(P.ReduceSum)(keep_dims=keepdim)
|
|
10111
|
-
if dim is not None:
|
|
10112
|
-
out = reduce_sum(input, dim)
|
|
10113
|
-
else:
|
|
10114
|
-
out = reduce_sum(input)
|
|
10115
|
-
return out
|
|
10128
|
+
return sum_ext_op(input, dim, keepdim, dtype)
|
|
10116
10129
|
|
|
10117
10130
|
|
|
10118
10131
|
def tanhshrink(input):
|
|
@@ -10498,7 +10511,7 @@ def fft(input, n=None, dim=-1, norm=None): # pylint: disable=redefined-outer-na
|
|
|
10498
10511
|
Default: -1.
|
|
10499
10512
|
norm (string, optional): Normalization mode. Three modes are defined as,
|
|
10500
10513
|
``"forward"`` (normalize by :math `1/n`), ``"backward"``(no normalization),
|
|
10501
|
-
``"ortho"`` (normalize by :math
|
|
10514
|
+
``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
|
|
10502
10515
|
Default: ``None`` that means ``"backward"``.
|
|
10503
10516
|
|
|
10504
10517
|
Returns:
|
|
@@ -10577,7 +10590,7 @@ def fft2(input, s=None, dim=(-2, -1), norm=None): # pylint: disable=redefined-o
|
|
|
10577
10590
|
Default: last two dimensions.
|
|
10578
10591
|
norm (string, optional): Normalization mode. Three modes are defined as,
|
|
10579
10592
|
``"forward"``(normalize by :math `1/n`), ``"backward"``(no normalization),
|
|
10580
|
-
``"ortho"``(normalize by :math
|
|
10593
|
+
``"ortho"``(normalize by :math:`1/\sqrt{n}`). Where :math `n = prod(s)` is the logical FFT size.
|
|
10581
10594
|
Default: ``None`` that means ``"backward"``.
|
|
10582
10595
|
|
|
10583
10596
|
Returns:
|
|
@@ -10620,7 +10633,7 @@ def fftn(input, s=None, dim=None, norm=None): # pylint: disable=redefined-outer
|
|
|
10620
10633
|
Default: all dimensions, or the last `len(s)` dimensions if `s` is given.
|
|
10621
10634
|
norm (string, optional): Normalization mode. Three modes are defined as,
|
|
10622
10635
|
``"forward"``(normalize by :math `1/n`), ``"backward"``(no normalization),
|
|
10623
|
-
``"ortho"``(normalize by :math
|
|
10636
|
+
``"ortho"``(normalize by :math:`1/\sqrt{n}`). Where :math `n = prod(s)` is the logical FFT size.
|
|
10624
10637
|
Default: ``None`` that means ``"backward"``.
|
|
10625
10638
|
|
|
10626
10639
|
Returns:
|
|
@@ -10669,7 +10682,7 @@ def ifft(input, n=None, dim=-1, norm=None): # pylint: disable=redefined-outer-n
|
|
|
10669
10682
|
Default: -1.
|
|
10670
10683
|
norm (string, optional): Normalization mode. Three modes are defined as,
|
|
10671
10684
|
``"forward"``(normalize by :math `1/n`), ``"backward"``(no normalization),
|
|
10672
|
-
``"ortho"``(normalize by :math
|
|
10685
|
+
``"ortho"``(normalize by :math:`1/\sqrt{n}`).
|
|
10673
10686
|
Default: ``None`` that means ``"backward"``.
|
|
10674
10687
|
|
|
10675
10688
|
Returns:
|
|
@@ -10749,7 +10762,7 @@ def ifft2(input, s=None, dim=(-2, -1), norm=None): # pylint: disable=redefined-
|
|
|
10749
10762
|
Default: (-2, -1).
|
|
10750
10763
|
norm (string, optional): Normalization mode. Three modes are defined as,
|
|
10751
10764
|
``"forward"``(normalize by :math `1/n`), ``"backward"``(no normalization),
|
|
10752
|
-
``"ortho"``(normalize by :math
|
|
10765
|
+
``"ortho"``(normalize by :math:`1/\sqrt{n}`). Where :math `n = prod(s)` is the logical IFFT size.
|
|
10753
10766
|
Default: ``None`` that means ``"backward"``.
|
|
10754
10767
|
|
|
10755
10768
|
Returns:
|
|
@@ -10792,7 +10805,7 @@ def ifftn(input, s=None, dim=None, norm=None): # pylint: disable=redefined-oute
|
|
|
10792
10805
|
Default: all dimensions, or the last `len(s)` dimensions if `s` is given.
|
|
10793
10806
|
norm (string, optional): Normalization mode. Three modes are defined as,
|
|
10794
10807
|
``"forward"``(normalize by :math `1/n`), ``"backward"``(no normalization),
|
|
10795
|
-
``"ortho"``(normalize by :math
|
|
10808
|
+
``"ortho"``(normalize by :math:`1/\sqrt{n}`). Where :math `n = prod(s)` is the logical IFFT size.
|
|
10796
10809
|
Default: ``None`` that means ``"backward"``.
|
|
10797
10810
|
|
|
10798
10811
|
Returns:
|
|
@@ -11578,6 +11591,7 @@ __all__ = [
|
|
|
11578
11591
|
'addcdiv',
|
|
11579
11592
|
'addcmul',
|
|
11580
11593
|
'angle',
|
|
11594
|
+
'argmax',
|
|
11581
11595
|
'argmin',
|
|
11582
11596
|
'arccosh',
|
|
11583
11597
|
'arcsin',
|
|
@@ -11595,6 +11609,7 @@ __all__ = [
|
|
|
11595
11609
|
'le',
|
|
11596
11610
|
'lerp',
|
|
11597
11611
|
'norm',
|
|
11612
|
+
'norm_ext',
|
|
11598
11613
|
'vector_norm',
|
|
11599
11614
|
'matrix_norm',
|
|
11600
11615
|
'tensor_gt',
|
|
@@ -11664,6 +11679,7 @@ __all__ = [
|
|
|
11664
11679
|
'matrix_determinant',
|
|
11665
11680
|
'det',
|
|
11666
11681
|
'linspace',
|
|
11682
|
+
'linspace_ext',
|
|
11667
11683
|
'logspace',
|
|
11668
11684
|
'lu_solve',
|
|
11669
11685
|
'matrix_solve',
|
|
@@ -11751,6 +11767,7 @@ __all__ = [
|
|
|
11751
11767
|
'amin',
|
|
11752
11768
|
'amax',
|
|
11753
11769
|
'mean',
|
|
11770
|
+
'mean_ext',
|
|
11754
11771
|
'prod',
|
|
11755
11772
|
'all',
|
|
11756
11773
|
'any',
|