mindspore 2.3.0__cp39-none-any.whl → 2.3.0rc2__cp39-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +0 -1512
- mindspore/__init__.py +1 -2
- mindspore/_c_dataengine.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +25 -5
- mindspore/_extends/graph_kernel/model/graph_parallel.py +1 -1
- mindspore/_extends/parse/__init__.py +2 -2
- mindspore/_extends/parse/compile_config.py +0 -29
- mindspore/_extends/parse/namespace.py +2 -2
- mindspore/_extends/parse/parser.py +5 -21
- mindspore/_extends/parse/resources.py +7 -5
- mindspore/_extends/parse/standard_method.py +59 -40
- mindspore/_mindspore_offline_debug.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/amp.py +5 -26
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/base.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +1 -1
- mindspore/boost/grad_freeze.py +2 -2
- mindspore/boost/less_batch_normalization.py +6 -9
- mindspore/common/__init__.py +1 -8
- mindspore/common/_register_for_tensor.py +9 -8
- mindspore/common/api.py +65 -275
- mindspore/common/dtype.py +4 -8
- mindspore/common/dump.py +5 -2
- mindspore/common/jit_config.py +1 -1
- mindspore/common/lazy_inline.py +2 -14
- mindspore/common/parameter.py +15 -14
- mindspore/common/recompute.py +5 -20
- mindspore/common/sparse_tensor.py +6 -21
- mindspore/common/tensor.py +52 -100
- mindspore/communication/__init__.py +11 -6
- mindspore/communication/management.py +94 -92
- mindspore/context.py +18 -180
- mindspore/dataset/engine/datasets.py +46 -69
- mindspore/dataset/engine/datasets_user_defined.py +53 -72
- mindspore/dataset/engine/datasets_vision.py +2 -2
- mindspore/dataset/engine/queue.py +38 -56
- mindspore/dataset/engine/validators.py +5 -11
- mindspore/dataset/vision/__init__.py +5 -5
- mindspore/dataset/vision/c_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +1 -1
- mindspore/dataset/vision/transforms.py +46 -591
- mindspore/dataset/vision/utils.py +1 -121
- mindspore/dataset/vision/validators.py +3 -9
- mindspore/hal/__init__.py +1 -7
- mindspore/hal/device.py +1 -1
- mindspore/include/api/model.h +0 -3
- mindspore/include/dataset/vision.h +2 -54
- mindspore/include/mindapi/base/types.h +0 -1
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +0 -35
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +0 -72
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/{aclnn_all_finite.h → aclnn_add_custom.h} +11 -9
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +12 -184
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +15 -7
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +15 -7
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +31 -77
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +31 -77
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +5 -4
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/DeviceBin +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +286 -275
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -3
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_tiling.h +45 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +4 -8
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_tiling.h +4 -11
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/kernel/flash_attention_score_mix_hwsync.h +0 -18
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -6
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +75 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/kernel/matmul.h +5 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/matmul_impl.h +3 -18
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_common_tiling.h +5 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_info.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/tiling_data.h +3 -36
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/kernel/matmul_stridedslice_fusion.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/matmul_stridedslice_fusion_impl.h +4 -22
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +2 -16
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/kernel/paged_attention_mix_hwsync.h +3 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +4 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_tiling.h +4 -9
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +2 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +4 -10
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +12 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +1 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +2 -10
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +1 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -17
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +7 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layernorm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_stridedslice_fusion_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libnot_equal_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblcal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/mindrecord/filewriter.py +2 -2
- mindspore/mint/__init__.py +40 -720
- mindspore/mint/nn/__init__.py +7 -89
- mindspore/mint/nn/functional.py +16 -165
- mindspore/mint/optim/adamw.py +16 -15
- mindspore/nn/__init__.py +2 -0
- mindspore/nn/cell.py +98 -97
- mindspore/nn/extend/basic.py +2 -2
- mindspore/nn/extend/embedding.py +1 -1
- mindspore/nn/extend/layer/normalization.py +5 -7
- mindspore/nn/generator.py +297 -0
- mindspore/nn/layer/activation.py +3 -4
- mindspore/nn/layer/basic.py +16 -79
- mindspore/nn/layer/conv.py +8 -17
- mindspore/nn/layer/embedding.py +4 -1
- mindspore/nn/layer/math.py +1 -1
- mindspore/nn/layer/normalization.py +1 -1
- mindspore/nn/layer/pooling.py +0 -5
- mindspore/nn/layer/rnn_cells.py +2 -2
- mindspore/nn/loss/loss.py +19 -19
- mindspore/nn/optim/adasum.py +1 -1
- mindspore/nn/optim/sgd.py +2 -3
- mindspore/nn/probability/distribution/exponential.py +1 -1
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/logistic.py +1 -1
- mindspore/nn/wrap/cell_wrapper.py +1 -25
- mindspore/nn/wrap/loss_scale.py +1 -24
- mindspore/numpy/array_ops.py +1 -5
- mindspore/numpy/dtypes.py +3 -3
- mindspore/numpy/math_ops.py +8 -8
- mindspore/ops/__init__.py +1 -1
- mindspore/ops/_grad_experimental/grad_comm_ops.py +16 -75
- mindspore/ops/_vmap/vmap_array_ops.py +0 -27
- mindspore/ops/_vmap/vmap_math_ops.py +1 -29
- mindspore/ops/_vmap/vmap_nn_ops.py +18 -19
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +8 -34
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +9 -2
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -26
- mindspore/ops/auto_generate/gen_extend_func.py +27 -603
- mindspore/ops/auto_generate/gen_ops_def.py +203 -993
- mindspore/ops/auto_generate/gen_ops_prim.py +402 -1946
- mindspore/ops/auto_generate/pyboost_inner_prim.py +20 -90
- mindspore/ops/composite/base.py +6 -3
- mindspore/ops/composite/math_ops.py +1 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +17 -24
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/extend/__init__.py +3 -2
- mindspore/ops/extend/array_func.py +51 -10
- mindspore/ops/extend/nn_func.py +78 -2
- mindspore/ops/function/__init__.py +13 -8
- mindspore/ops/function/array_func.py +179 -455
- mindspore/ops/function/clip_func.py +1 -1
- mindspore/ops/function/grad/grad_func.py +3 -3
- mindspore/ops/function/math_func.py +103 -117
- mindspore/ops/function/nn_func.py +163 -275
- mindspore/ops/function/other_func.py +2 -2
- mindspore/ops/function/random_func.py +69 -202
- mindspore/ops/function/sparse_func.py +4 -4
- mindspore/ops/functional.py +327 -332
- mindspore/ops/operations/__init__.py +3 -13
- mindspore/ops/operations/_grad_ops.py +27 -3
- mindspore/ops/operations/_inner_ops.py +356 -53
- mindspore/ops/operations/_rl_inner_ops.py +2 -2
- mindspore/ops/operations/_tensor_array.py +8 -8
- mindspore/ops/operations/array_ops.py +65 -82
- mindspore/ops/operations/comm_ops.py +93 -784
- mindspore/ops/operations/custom_ops.py +28 -51
- mindspore/ops/operations/debug_ops.py +4 -4
- mindspore/ops/operations/inner_ops.py +2 -2
- mindspore/ops/operations/manually_defined/ops_def.py +4 -304
- mindspore/ops/operations/math_ops.py +50 -3
- mindspore/ops/operations/nn_ops.py +247 -14
- mindspore/ops/operations/other_ops.py +3 -3
- mindspore/ops/operations/random_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +1 -1
- mindspore/ops/primitive.py +8 -9
- mindspore/ops/silent_check.py +5 -5
- mindspore/ops_generate/arg_dtype_cast.py +9 -2
- mindspore/ops_generate/arg_handler.py +0 -26
- mindspore/ops_generate/gen_aclnn_implement.py +4 -1
- mindspore/ops_generate/gen_ops.py +4 -26
- mindspore/ops_generate/gen_pyboost_func.py +12 -41
- mindspore/ops_generate/gen_utils.py +0 -21
- mindspore/ops_generate/pyboost_utils.py +2 -7
- mindspore/ops_generate/template.py +0 -1
- mindspore/parallel/_auto_parallel_context.py +1 -21
- mindspore/parallel/_tensor.py +5 -0
- mindspore/parallel/_transformer/transformer.py +1 -1
- mindspore/parallel/_utils.py +1 -15
- mindspore/parallel/algo_parameter_config.py +3 -1
- mindspore/parallel/checkpoint_transform.py +9 -12
- mindspore/parallel/cluster/process_entity/_api.py +29 -28
- mindspore/parallel/cluster/process_entity/_utils.py +3 -13
- mindspore/parallel/cluster/run.py +16 -13
- mindspore/parallel/parameter_broadcast.py +2 -2
- mindspore/parallel/shard.py +17 -31
- mindspore/profiler/__init__.py +2 -3
- mindspore/profiler/common/util.py +2 -107
- mindspore/profiler/envprofiling.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +21 -8
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -82
- mindspore/profiler/parser/ascend_analysis/function_event.py +28 -43
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +27 -49
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +10 -15
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +20 -25
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +5 -5
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +1 -10
- mindspore/profiler/parser/ascend_hccl_generator.py +1 -4
- mindspore/profiler/parser/ascend_msprof_exporter.py +22 -43
- mindspore/profiler/parser/ascend_timeline_generator.py +5 -7
- mindspore/profiler/parser/minddata_parser.py +3 -72
- mindspore/profiler/profiling.py +59 -176
- mindspore/rewrite/api/node.py +1 -1
- mindspore/rewrite/common/namespace.py +5 -5
- mindspore/rewrite/parsers/assign_parser.py +0 -2
- mindspore/rewrite/parsers/class_def_parser.py +4 -8
- mindspore/run_check/_check_version.py +1 -1
- mindspore/scipy/fft.py +3 -1
- mindspore/scipy/linalg.py +3 -2
- mindspore/scipy/ops.py +3 -5
- mindspore/scipy/optimize/__init__.py +2 -2
- mindspore/train/__init__.py +4 -4
- mindspore/train/anf_ir_pb2.py +2 -8
- mindspore/train/callback/__init__.py +2 -5
- mindspore/train/callback/_backup_and_restore.py +2 -2
- mindspore/train/callback/_checkpoint.py +16 -104
- mindspore/train/callback/_landscape.py +1 -1
- mindspore/train/callback/_time_monitor.py +1 -1
- mindspore/train/data_sink.py +4 -5
- mindspore/train/dataset_helper.py +20 -45
- mindspore/train/model.py +38 -266
- mindspore/train/serialization.py +105 -256
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/version.py +1 -1
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +2 -2
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +303 -420
- mindspore/_extends/pijit/__init__.py +0 -23
- mindspore/_extends/pijit/pijit_func_white_list.py +0 -343
- mindspore/common/file_system.py +0 -48
- mindspore/common/generator.py +0 -260
- mindspore/common/no_inline.py +0 -54
- mindspore/common/np_dtype.py +0 -25
- mindspore/communication/comm_func.py +0 -1140
- mindspore/hal/memory.py +0 -326
- mindspore/lib/libavcodec.so.59 +0 -0
- mindspore/lib/libavdevice.so.59 +0 -0
- mindspore/lib/libavfilter.so.8 +0 -0
- mindspore/lib/libavformat.so.59 +0 -0
- mindspore/lib/libavutil.so.57 +0 -0
- mindspore/lib/libmindspore_np_dtype.so +0 -0
- mindspore/lib/libswresample.so.4 +0 -0
- mindspore/lib/libswscale.so.6 +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.cpp +0 -326
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.py +0 -180
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/all_finite.json +0 -109
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/binary_info_config.json +0 -38
- mindspore/lib/plugin/ascend/custom_compiler/OWNERS +0 -12
- mindspore/lib/plugin/ascend/custom_compiler/setup.py +0 -255
- mindspore/lib/plugin/ascend/custom_compiler/start.sh +0 -26
- mindspore/lib/plugin/ascend/custom_compiler/template.json +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/base_type.h +0 -133
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -32
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/tiling_info.h +0 -60
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -37
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_op.h +0 -34
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_backoff_base.h +0 -62
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -33
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_ops.h +0 -88
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_pa_op.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -95
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -84
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -61
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -224
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_impl.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_tiling.h +0 -187
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul.h +0 -245
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_interface.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_utils.h +0 -111
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/tiling_data.h +0 -54
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -364
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -114
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -98
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/mint/linalg/__init__.py +0 -22
- mindspore/nn/layer/embedding_service.py +0 -531
- mindspore/nn/layer/embedding_service_layer.py +0 -393
- mindspore/ops/function/reshard_func.py +0 -102
- mindspore/ops/operations/_infer_ops.py +0 -19
- mindspore/ops/operations/reshard_ops.py +0 -53
- mindspore/profiler/common/process_pool.py +0 -41
- mindspore/profiler/common/singleton.py +0 -28
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/train/callback/_cluster_monitor.py +0 -201
- mindspore/train/callback/_flops_collector.py +0 -238
- mindspore/train/callback/_mindio_ttp.py +0 -443
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -39,15 +39,16 @@ from mindspore.ops.operations.nn_ops import FractionalMaxPoolWithFixedKsize, Fra
|
|
|
39
39
|
from mindspore.ops.operations.nn_ops import PadV3
|
|
40
40
|
from mindspore.ops.operations.nn_ops import ChannelShuffle
|
|
41
41
|
from mindspore.ops.operations.nn_ops import TripletMarginLoss
|
|
42
|
+
from mindspore.ops.operations.nn_ops import LayerNormExt
|
|
42
43
|
from mindspore.ops.operations._sequence_ops import TupleToTensor, TensorToTuple, ListToTensor
|
|
43
44
|
from mindspore.common.api import _function_forbid_reuse
|
|
44
45
|
from mindspore.ops.auto_generate import log_softmax, dense, prelu, celu, relu, fast_gelu, silu, elu, sigmoid, relu6
|
|
45
|
-
from mindspore.ops.auto_generate import
|
|
46
|
-
from mindspore.ops.auto_generate import (reflection_pad_1d_op, reflection_pad_2d_op, reflection_pad_3d_op,
|
|
46
|
+
from mindspore.ops.auto_generate.gen_ops_prim import GroupNorm
|
|
47
|
+
from mindspore.ops.auto_generate import (reflection_pad_1d_op, reflection_pad_2d_op, reflection_pad_3d_op,
|
|
47
48
|
replication_pad_1d_op, replication_pad_2d_op, replication_pad_3d_op,
|
|
48
|
-
constant_pad_nd_op, dropout_ext_op
|
|
49
|
+
constant_pad_nd_op, dropout_ext_op)
|
|
49
50
|
from mindspore.ops.auto_generate.gen_ops_prim import embedding_op, Convolution
|
|
50
|
-
from mindspore.
|
|
51
|
+
from mindspore.nn.generator import default_generator
|
|
51
52
|
|
|
52
53
|
abs_ = P.Abs()
|
|
53
54
|
add_ = P.Add()
|
|
@@ -104,7 +105,6 @@ check_int_const = validator.check_is_int
|
|
|
104
105
|
check_non_negative_float_const = validator.check_non_negative_float
|
|
105
106
|
check_string_const = constexpr(validator.check_string)
|
|
106
107
|
|
|
107
|
-
generator_step_ = Tensor(1, mstype.int64)
|
|
108
108
|
|
|
109
109
|
def adaptive_avg_pool2d(input, output_size):
|
|
110
110
|
r"""
|
|
@@ -540,23 +540,22 @@ def avg_pool2d_ext(input, kernel_size, stride=None, padding=0, ceil_mode=False,
|
|
|
540
540
|
count_include_pad (bool): If True, include the zero-padding in the averaging calculation.
|
|
541
541
|
Default: ``True`` .
|
|
542
542
|
divisor_override (int): If specified, it will be used as divisor in the averaging calculation,
|
|
543
|
-
otherwise
|
|
543
|
+
otherwise `kernel_size` will be used. Default: ``None``.
|
|
544
544
|
|
|
545
545
|
Returns:
|
|
546
546
|
Tensor, with shape :math:`(N, C, H_{out}, W_{out})`.
|
|
547
547
|
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
\end{array}
|
|
548
|
+
.. math::
|
|
549
|
+
|
|
550
|
+
H_{out} = \frac{H_{in} + 2 \times padding[0] - kernel_size[0]}{stride[0]} + 1
|
|
551
|
+
W_{out} = \frac{W_{in} + 2 \times padding[1] - kernel_size[1]}{stride[1]} + 1
|
|
553
552
|
|
|
554
553
|
Raises:
|
|
555
554
|
TypeError: If `input` is not a Tensor.
|
|
556
555
|
TypeError: If `kernel_size` or `stride` is neither int nor tuple.
|
|
557
556
|
TypeError: If `ceil_mode` or `count_include_pad` is not a bool.
|
|
558
|
-
TypeError: If `divisor_override` is not an int
|
|
559
|
-
ValueError: If
|
|
557
|
+
TypeError: If `divisor_override` is not an int.
|
|
558
|
+
ValueError: If length of shape of `input` is not equal to `4` or `3`.
|
|
560
559
|
ValueError: If `kernel_size` or `stride` is less than 1.
|
|
561
560
|
ValueError: If `kernel_size` or `stride` is a tuple whose length is not equal to `2` or `1`.
|
|
562
561
|
ValueError: If `padding` is neither a int nor a tuple whose length is equal to `2` or `1`.
|
|
@@ -568,9 +567,9 @@ def avg_pool2d_ext(input, kernel_size, stride=None, padding=0, ceil_mode=False,
|
|
|
568
567
|
Examples:
|
|
569
568
|
>>> import mindspore
|
|
570
569
|
>>> import numpy as np
|
|
571
|
-
>>> from mindspore import Tensor,
|
|
570
|
+
>>> from mindspore import Tensor, mint
|
|
572
571
|
>>> x = Tensor(np.arange(1 * 3 * 3 * 4).reshape(1, 3, 3, 4), mindspore.float32)
|
|
573
|
-
>>> output =
|
|
572
|
+
>>> output = mint.avg_pool2d(x, kernel_size=2, stride=1)
|
|
574
573
|
>>> print(output)
|
|
575
574
|
[[[[ 2.5 3.5 4.5]
|
|
576
575
|
[ 6.5 7.5 8.5]]
|
|
@@ -1222,12 +1221,12 @@ def max_unpool3d(x, indices, kernel_size, stride=None, padding=0, output_size=No
|
|
|
1222
1221
|
return out
|
|
1223
1222
|
|
|
1224
1223
|
|
|
1225
|
-
def binary_cross_entropy_with_logits(
|
|
1224
|
+
def binary_cross_entropy_with_logits(logits, label, weight=None, pos_weight=None, reduction='mean'):
|
|
1226
1225
|
r"""
|
|
1227
|
-
Adds sigmoid activation function to input `
|
|
1228
|
-
|
|
1226
|
+
Adds sigmoid activation function to input `logits`, and uses the given logits to compute binary cross entropy
|
|
1227
|
+
between the logits and the label.
|
|
1229
1228
|
|
|
1230
|
-
Sets input
|
|
1229
|
+
Sets input logits as :math:`X`, input label as :math:`Y`, input weight as :math:`W`, output as :math:`L`. Then,
|
|
1231
1230
|
|
|
1232
1231
|
.. math::
|
|
1233
1232
|
|
|
@@ -1268,14 +1267,14 @@ def binary_cross_entropy_with_logits(input, target, weight=None, pos_weight=None
|
|
|
1268
1267
|
:math:`P_c>1` increases the recall, :math:`P_c<1` increases the precision.
|
|
1269
1268
|
|
|
1270
1269
|
Args:
|
|
1271
|
-
|
|
1272
|
-
|
|
1270
|
+
logits (Tensor): Input logits. Data type must be float16 or float32.
|
|
1271
|
+
label (Tensor): Ground truth label, has the same shape as `logits`.
|
|
1273
1272
|
Data type must be float16 or float32.
|
|
1274
1273
|
weight (Tensor, optional): A rescaling weight applied to the loss of each batch element. It can be
|
|
1275
|
-
broadcast to a tensor with shape of `
|
|
1274
|
+
broadcast to a tensor with shape of `logits`. Data type must be float16 or float32.
|
|
1276
1275
|
Default: ``None``, `weight` is a Tensor whose value is ``1``.
|
|
1277
1276
|
pos_weight (Tensor, optional): A weight of positive examples. Must be a vector with length equal to the
|
|
1278
|
-
number of classes. It can be broadcast to a tensor with shape of `
|
|
1277
|
+
number of classes. It can be broadcast to a tensor with shape of `logits`.
|
|
1279
1278
|
Data type must be float16 or float32. Default: ``None``, `pos_weight` is a Tensor whose value is ``1``.
|
|
1280
1279
|
reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
|
|
1281
1280
|
``'sum'`` . Default: ``'mean'`` .
|
|
@@ -1285,14 +1284,14 @@ def binary_cross_entropy_with_logits(input, target, weight=None, pos_weight=None
|
|
|
1285
1284
|
- ``'sum'``: the output elements will be summed.
|
|
1286
1285
|
|
|
1287
1286
|
Returns:
|
|
1288
|
-
Tensor or Scalar, if `reduction` is ``'none'``, it's a tensor with the same shape and type as input `
|
|
1287
|
+
Tensor or Scalar, if `reduction` is ``'none'``, it's a tensor with the same shape and type as input `logits`.
|
|
1289
1288
|
Otherwise, the output is a scalar.
|
|
1290
1289
|
|
|
1291
1290
|
Raises:
|
|
1292
|
-
TypeError: If input `
|
|
1293
|
-
TypeError: If data type of input `
|
|
1291
|
+
TypeError: If input `logits`, `label`, `weight`, `pos_weight` is not Tensor.
|
|
1292
|
+
TypeError: If data type of input `logits`, `label`, `weight`, `pos_weight` is neither float16 nor float32.
|
|
1294
1293
|
TypeError: If data type of input `reduction` is not string.
|
|
1295
|
-
ValueError: If `weight` or `pos_weight` can not be broadcast to a tensor with shape of `
|
|
1294
|
+
ValueError: If `weight` or `pos_weight` can not be broadcast to a tensor with shape of `logits`.
|
|
1296
1295
|
ValueError: If `reduction` is not one of ``'none'``, ``'mean'`` or ``'sum'``.
|
|
1297
1296
|
|
|
1298
1297
|
Supported Platforms:
|
|
@@ -1302,17 +1301,21 @@ def binary_cross_entropy_with_logits(input, target, weight=None, pos_weight=None
|
|
|
1302
1301
|
>>> import mindspore
|
|
1303
1302
|
>>> import numpy as np
|
|
1304
1303
|
>>> from mindspore import Tensor, ops
|
|
1305
|
-
>>>
|
|
1306
|
-
>>>
|
|
1304
|
+
>>> logits = Tensor(np.array([[-0.8, 1.2, 0.7], [-0.1, -0.4, 0.7]]), mindspore.float32)
|
|
1305
|
+
>>> label = Tensor(np.array([[0.3, 0.8, 1.2], [-0.6, 0.1, 2.2]]), mindspore.float32)
|
|
1307
1306
|
>>> weight = Tensor(np.array([1.0, 1.0, 1.0]), mindspore.float32)
|
|
1308
1307
|
>>> pos_weight = Tensor(np.array([1.0, 1.0, 1.0]), mindspore.float32)
|
|
1309
|
-
>>> output = ops.binary_cross_entropy_with_logits(
|
|
1308
|
+
>>> output = ops.binary_cross_entropy_with_logits(logits, label, weight, pos_weight)
|
|
1310
1309
|
>>> print(output)
|
|
1311
1310
|
0.3463612
|
|
1312
1311
|
"""
|
|
1313
1312
|
|
|
1313
|
+
if weight is None:
|
|
1314
|
+
weight = ops.ones_like(logits)
|
|
1315
|
+
if pos_weight is None:
|
|
1316
|
+
pos_weight = ops.ones_like(logits)
|
|
1314
1317
|
bce_with_logits_loss_op = _get_cache_prim(NN_OPS.BCEWithLogitsLoss)(reduction)
|
|
1315
|
-
return bce_with_logits_loss_op(
|
|
1318
|
+
return bce_with_logits_loss_op(logits, label, weight, pos_weight)
|
|
1316
1319
|
|
|
1317
1320
|
|
|
1318
1321
|
@_function_forbid_reuse
|
|
@@ -1362,25 +1365,27 @@ def dropout(input, p=0.5, training=True, seed=None):
|
|
|
1362
1365
|
|
|
1363
1366
|
|
|
1364
1367
|
@_function_forbid_reuse
|
|
1365
|
-
def dropout_ext(input, p=0.5, training=True):
|
|
1368
|
+
def dropout_ext(input, p=0.5, training=True, seed=None):
|
|
1366
1369
|
r"""
|
|
1367
1370
|
During training, randomly zeroes some of the elements of the input tensor
|
|
1368
1371
|
with probability `p` from a Bernoulli distribution. It plays the role of reducing neuron correlation and
|
|
1369
1372
|
avoid overfitting. And the return will be multiplied by :math:`\frac{1}{1-p}` during training.
|
|
1370
|
-
During the reasoning, this operation returns the same Tensor as the `
|
|
1373
|
+
During the reasoning, this operation returns the same Tensor as the `x`.
|
|
1371
1374
|
|
|
1372
1375
|
Args:
|
|
1373
|
-
input (Tensor): The input Tensor of shape :math:`(*, N)
|
|
1374
|
-
p (float): The dropping rate
|
|
1375
|
-
means dropping out 10% of input
|
|
1376
|
-
training (bool): Apply
|
|
1377
|
-
|
|
1376
|
+
input (Tensor): The input Tensor of shape :math:`(*, N)`, with data type of float16, float32 or float64.
|
|
1377
|
+
p (float, optional): The dropping rate, between 0 and 1, e.g. p = 0.1,
|
|
1378
|
+
means dropping out 10% of input units. Default: ``0.5`` .
|
|
1379
|
+
training (bool): Apply dropout_ext if is True. Default: ``True``.
|
|
1380
|
+
seed (int, optional): Seed is used as entropy source for Random number engines generating pseudo-random numbers.
|
|
1381
|
+
Default: ``None`` , which will be treated as ``0`` .
|
|
1378
1382
|
|
|
1379
1383
|
Returns:
|
|
1380
1384
|
- **output** (Tensor) - Zeroed tensor, with the same shape and data type as `input`.
|
|
1381
1385
|
|
|
1382
1386
|
Raises:
|
|
1383
1387
|
TypeError: If `p` is not a float.
|
|
1388
|
+
TypeError: If dtype of `input` is not float16, float32 or float64.
|
|
1384
1389
|
TypeError: If `input` is not a Tensor.
|
|
1385
1390
|
|
|
1386
1391
|
Supported Platforms:
|
|
@@ -1390,14 +1395,15 @@ def dropout_ext(input, p=0.5, training=True):
|
|
|
1390
1395
|
>>> import mindspore
|
|
1391
1396
|
>>> from mindspore import Tensor, ops
|
|
1392
1397
|
>>> input = Tensor(((20, 16), (50, 50)), mindspore.float32)
|
|
1393
|
-
>>> output = ops.
|
|
1398
|
+
>>> output = ops.dropout_ext(input, p=0.5)
|
|
1394
1399
|
>>> print(output.shape)
|
|
1395
1400
|
(2, 2)
|
|
1396
1401
|
"""
|
|
1397
1402
|
check_bool_const(training, "training", "dropout_ext")
|
|
1398
1403
|
if training is False:
|
|
1399
1404
|
return input
|
|
1400
|
-
|
|
1405
|
+
generator = default_generator()
|
|
1406
|
+
seed, offset = generator(1)
|
|
1401
1407
|
out, _ = dropout_ext_op(input, p, seed, offset)
|
|
1402
1408
|
return out
|
|
1403
1409
|
|
|
@@ -2041,10 +2047,10 @@ def flip(input, dims):
|
|
|
2041
2047
|
``Ascend`` ``GPU`` ``CPU``
|
|
2042
2048
|
|
|
2043
2049
|
Examples:
|
|
2044
|
-
>>> import mindspore
|
|
2045
|
-
>>>
|
|
2050
|
+
>>> import mindspore as ms
|
|
2051
|
+
>>> import mindspore.ops as ops
|
|
2046
2052
|
>>> import numpy as np
|
|
2047
|
-
>>> input =
|
|
2053
|
+
>>> input = ms.Tensor(np.arange(1, 9).reshape((2, 2, 2)))
|
|
2048
2054
|
>>> output = ops.flip(input, (0, 2))
|
|
2049
2055
|
>>> print(output)
|
|
2050
2056
|
[[[6 5]
|
|
@@ -2052,7 +2058,7 @@ def flip(input, dims):
|
|
|
2052
2058
|
[[2 1]
|
|
2053
2059
|
[4 3]]]
|
|
2054
2060
|
"""
|
|
2055
|
-
res =
|
|
2061
|
+
res = _get_cache_prim(ops.ReverseV2)(axis=dims)(input)
|
|
2056
2062
|
return res
|
|
2057
2063
|
|
|
2058
2064
|
|
|
@@ -2074,7 +2080,7 @@ def flipud(input):
|
|
|
2074
2080
|
|
|
2075
2081
|
Examples:
|
|
2076
2082
|
>>> import mindspore as ms
|
|
2077
|
-
>>>
|
|
2083
|
+
>>> import mindspore.ops as ops
|
|
2078
2084
|
>>> import numpy as np
|
|
2079
2085
|
>>> input = ms.Tensor(np.arange(1, 9).reshape((2, 2, 2)))
|
|
2080
2086
|
>>> output = ops.flipud(input)
|
|
@@ -2105,7 +2111,7 @@ def fliplr(input):
|
|
|
2105
2111
|
|
|
2106
2112
|
Examples:
|
|
2107
2113
|
>>> import mindspore as ms
|
|
2108
|
-
>>>
|
|
2114
|
+
>>> import mindspore.ops as ops
|
|
2109
2115
|
>>> import numpy as np
|
|
2110
2116
|
>>> input = ms.Tensor(np.arange(1, 9).reshape((2, 2, 2)))
|
|
2111
2117
|
>>> output = ops.fliplr(input)
|
|
@@ -2134,7 +2140,7 @@ def is_floating_point(input):
|
|
|
2134
2140
|
|
|
2135
2141
|
Examples:
|
|
2136
2142
|
>>> import mindspore as ms
|
|
2137
|
-
>>>
|
|
2143
|
+
>>> import mindspore.ops as ops
|
|
2138
2144
|
>>> from mindspore import Tensor
|
|
2139
2145
|
>>> x = ms.Tensor([1, 2, 3], ms.float32)
|
|
2140
2146
|
>>> y = ms.Tensor([1, 2, 3], ms.int64)
|
|
@@ -2582,9 +2588,8 @@ def interpolate(input,
|
|
|
2582
2588
|
"For 'interpolate', it is incorrect to set 'recompute_scale_factor' to True"
|
|
2583
2589
|
" after specifying an explicit 'size'.")
|
|
2584
2590
|
if F.isconstant(shape) and F.isconstant(scale_factor):
|
|
2585
|
-
|
|
2586
|
-
|
|
2587
|
-
for i in range(tuple_len)])
|
|
2591
|
+
size = tuple([floor(shape[i + 2] * scale_factor[i]) for i in
|
|
2592
|
+
range(min(len(shape) - 2), len(scale_factor))])
|
|
2588
2593
|
else:
|
|
2589
2594
|
size = _interpolate_scale_factor_convert_size(shape, scale_factor)
|
|
2590
2595
|
scale_factor = None
|
|
@@ -2634,9 +2639,8 @@ def _interpolate_ext_scale_factor_convert_size(input, scale_factor):
|
|
|
2634
2639
|
shape = F.shape(input)
|
|
2635
2640
|
size = None
|
|
2636
2641
|
if F.isconstant(shape) and F.isconstant(scale_factor):
|
|
2637
|
-
|
|
2638
|
-
|
|
2639
|
-
for i in range(tuple_len)])
|
|
2642
|
+
size = tuple([floor(shape[i + 2] * scale_factor[i]) for i in
|
|
2643
|
+
range(min(len(shape) - 2), len(scale_factor))])
|
|
2640
2644
|
else:
|
|
2641
2645
|
x = tuple_to_tensor_(shape[2:], mstype.int64)
|
|
2642
2646
|
y = tuple_to_tensor_(scale_factor, mstype.float32)
|
|
@@ -3038,54 +3042,6 @@ def softmax(input, axis=-1, *, dtype=None):
|
|
|
3038
3042
|
return softmax_(input)
|
|
3039
3043
|
|
|
3040
3044
|
|
|
3041
|
-
def softmax_ext(input, dim=None, dtype=None):
|
|
3042
|
-
r"""
|
|
3043
|
-
Applies the Softmax operation to the input tensor on the specified axis.
|
|
3044
|
-
Suppose a slice in the given axis :math:`dim`, then for each element :math:`input_i`,
|
|
3045
|
-
the Softmax function is shown as follows:
|
|
3046
|
-
|
|
3047
|
-
.. math::
|
|
3048
|
-
\text{output}(input_i) = \frac{\exp(input_i)}{\sum_{j = 0}^{N-1}\exp(input_j)},
|
|
3049
|
-
|
|
3050
|
-
where :math:`N` is the length of the tensor.
|
|
3051
|
-
|
|
3052
|
-
Args:
|
|
3053
|
-
input (Tensor): Tensor of shape :math:`(N, *)`, where :math:`*` means, any number of
|
|
3054
|
-
additional dimensions.
|
|
3055
|
-
dim (int, optional): The dim to perform the Softmax operation. Default: ``None`` .
|
|
3056
|
-
|
|
3057
|
-
Keyword Args:
|
|
3058
|
-
dtype (:class:`mindspore.dtype`, optional): When set, `input` will be converted to the specified type,
|
|
3059
|
-
`dtype`, before execution, and dtype of returned Tensor will also be `dtype`. Default: ``None`` .
|
|
3060
|
-
|
|
3061
|
-
Returns:
|
|
3062
|
-
Tensor, with the same type and shape as the `input`.
|
|
3063
|
-
|
|
3064
|
-
Raises:
|
|
3065
|
-
TypeError: If `dim` is not an int.
|
|
3066
|
-
|
|
3067
|
-
Supported Platforms:
|
|
3068
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3069
|
-
|
|
3070
|
-
Examples:
|
|
3071
|
-
>>> import mindspore
|
|
3072
|
-
>>> import numpy as np
|
|
3073
|
-
>>> from mindspore import Tensor, ops
|
|
3074
|
-
>>> input = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
|
|
3075
|
-
>>> output = ops.function.nn_func.softmax_ext(input)
|
|
3076
|
-
>>> print(output)
|
|
3077
|
-
[0.01165623 0.03168492 0.08612854 0.23412167 0.6364086 ]
|
|
3078
|
-
"""
|
|
3079
|
-
dim = -1 if dim is None else dim
|
|
3080
|
-
if not isinstance(dim, int):
|
|
3081
|
-
type_dim = type(dim).__name__
|
|
3082
|
-
raise TypeError(f" the type of 'dim' must be 'int', but got '{dim}' with type '{type_dim}'.")
|
|
3083
|
-
if dtype is not None:
|
|
3084
|
-
input = ops.cast(input, dtype)
|
|
3085
|
-
softmax_ = _get_cache_prim(P.Softmax)(dim)
|
|
3086
|
-
return softmax_(input)
|
|
3087
|
-
|
|
3088
|
-
|
|
3089
3045
|
def softmin(x, axis=-1, *, dtype=None):
|
|
3090
3046
|
r"""
|
|
3091
3047
|
Applies the Softmin operation to the input tensor on the specified axis.
|
|
@@ -3239,6 +3195,51 @@ def softplus(input, beta=1, threshold=20): # pylint:disable=redefined-outer-name
|
|
|
3239
3195
|
return ops.select(input * beta > threshold, input, op_output)
|
|
3240
3196
|
|
|
3241
3197
|
|
|
3198
|
+
def softplus_ext(input, beta=1, threshold=20): # pylint:disable=redefined-outer-name
|
|
3199
|
+
r"""
|
|
3200
|
+
Applies softplus function to `input` element-wise.
|
|
3201
|
+
|
|
3202
|
+
The softplus function is shown as follows, x is the element of `input` :
|
|
3203
|
+
|
|
3204
|
+
.. math::
|
|
3205
|
+
|
|
3206
|
+
\text{output} = \frac{1}{beta}\log(1 + \exp(\text{beta * x}))
|
|
3207
|
+
|
|
3208
|
+
When :math:`input * beta > threshold`, the implementation converts to the linear function
|
|
3209
|
+
to ensure numerical stability.
|
|
3210
|
+
|
|
3211
|
+
Args:
|
|
3212
|
+
input (Tensor) - Tensor of any dimension.
|
|
3213
|
+
Supported dtypes:
|
|
3214
|
+
|
|
3215
|
+
- Ascend: float16, float32, bfloat16
|
|
3216
|
+
|
|
3217
|
+
beta (number, optional) - The :math:`\beta` value in softplus function. Default: ``1`` .
|
|
3218
|
+
threshold (number, optional) - When :math:`input * beta > threshold`, converting softplus to a linear function.
|
|
3219
|
+
Default: ``20`` .
|
|
3220
|
+
|
|
3221
|
+
Returns:
|
|
3222
|
+
Tensor, with the same type and shape as the `input` .
|
|
3223
|
+
|
|
3224
|
+
Raises:
|
|
3225
|
+
TypeError: If `input` is not a Tensor.
|
|
3226
|
+
TypeError: If the dtype of `input` is not float16, float32, bfloat16.
|
|
3227
|
+
|
|
3228
|
+
Supported Platforms:
|
|
3229
|
+
``Ascend``
|
|
3230
|
+
|
|
3231
|
+
Examples:
|
|
3232
|
+
>>> import mindspore
|
|
3233
|
+
>>> import numpy as np
|
|
3234
|
+
>>> from mindspore import Tensor, mint
|
|
3235
|
+
>>> input = Tensor(np.array([0.1, 0.2, 30, 25]), mindspore.float32)
|
|
3236
|
+
>>> output = mint.softplus(input)
|
|
3237
|
+
>>> print(output)
|
|
3238
|
+
[0.74439657 0.7981388 30. 25.]
|
|
3239
|
+
"""
|
|
3240
|
+
return _get_cache_prim(ops.auto_generate.SoftplusExt)()(input, beta, threshold)
|
|
3241
|
+
|
|
3242
|
+
|
|
3242
3243
|
def selu(input_x):
|
|
3243
3244
|
r"""
|
|
3244
3245
|
Activation function SeLU (Scaled exponential Linear Unit).
|
|
@@ -3576,10 +3577,7 @@ def pdist(input, p=2.0):
|
|
|
3576
3577
|
|
|
3577
3578
|
def _circular_pad(input_x, padding):
|
|
3578
3579
|
"""circular pad"""
|
|
3579
|
-
|
|
3580
|
-
padding = tuple_to_tensor_(padding, mstype.int64)
|
|
3581
|
-
elif isinstance(padding, list):
|
|
3582
|
-
padding = list_to_tensor_(padding, mstype.int64)
|
|
3580
|
+
padding = scalar_to_tensor_(padding, const_arg=True)
|
|
3583
3581
|
is_expand = False
|
|
3584
3582
|
if padding.shape[0] // 2 + 1 == input_x.ndim:
|
|
3585
3583
|
input_x = input_x.expand_dims(0)
|
|
@@ -3590,85 +3588,24 @@ def _circular_pad(input_x, padding):
|
|
|
3590
3588
|
return out
|
|
3591
3589
|
|
|
3592
3590
|
|
|
3593
|
-
def
|
|
3594
|
-
"""reflection pad"""
|
|
3595
|
-
out = input
|
|
3596
|
-
if len(pad) == 2:
|
|
3597
|
-
out = reflection_pad_1d_op(input, pad)
|
|
3598
|
-
elif len(pad) == 4:
|
|
3599
|
-
out = reflection_pad_2d_op(input, pad)
|
|
3600
|
-
else:
|
|
3601
|
-
out = reflection_pad_3d_op(input, pad)
|
|
3602
|
-
return out
|
|
3603
|
-
|
|
3604
|
-
|
|
3605
|
-
def _replication_pad(input, pad):
|
|
3606
|
-
"""replication pad"""
|
|
3607
|
-
out = input
|
|
3608
|
-
if len(pad) == 2:
|
|
3609
|
-
out = replication_pad_1d_op(input, pad)
|
|
3610
|
-
elif len(pad) == 4:
|
|
3611
|
-
out = replication_pad_2d_op(input, pad)
|
|
3612
|
-
else:
|
|
3613
|
-
out = replication_pad_3d_op(input, pad)
|
|
3614
|
-
return out
|
|
3615
|
-
|
|
3616
|
-
|
|
3617
|
-
def pad_ext(input, pad, mode='constant', value=0.0):
|
|
3591
|
+
def pad_ext(input, pad, mode='constant', value=None):
|
|
3618
3592
|
r"""
|
|
3619
3593
|
Pads the input tensor according to the pad.
|
|
3620
3594
|
|
|
3621
|
-
.. warning::
|
|
3622
|
-
`circular` mode has poor performance and is not recommended.
|
|
3623
|
-
|
|
3624
3595
|
Args:
|
|
3625
3596
|
input (Tensor): Tensor of shape :math:`(N, *)`, where :math:`*` means, any number of additional dimensions.
|
|
3626
|
-
pad (
|
|
3627
|
-
:math:`\left\lfloor\frac{\text{len(pad)}}{2}\right\rfloor` dimensions
|
|
3628
|
-
of `input` will be padded.
|
|
3629
|
-
|
|
3630
|
-
Example: to pad only the last dimension of the input tensor, then
|
|
3631
|
-
:attr:`pad` has the form
|
|
3632
|
-
:math:`(\text{padding_left}, \text{padding_right})`;
|
|
3633
|
-
|
|
3634
|
-
Example: to pad the last 2 dimensions of the input tensor, then use
|
|
3635
|
-
:math:`(\text{padding_left}, \text{padding_right}, \text{padding_top}, \text{padding_bottom})`;
|
|
3636
|
-
|
|
3637
|
-
Example: to pad the last 3 dimensions, use
|
|
3638
|
-
:math:`(\text{padding_left}, \text{padding_right}, \text{padding_top}, \text{padding_bottom},
|
|
3639
|
-
\text{padding_front}, \text{padding_back})` and so on.
|
|
3640
|
-
|
|
3597
|
+
pad (tuple[int]): Filling position of pad.
|
|
3641
3598
|
mode (str, optional): Pad filling mode, ``'constant'`` , ``'reflect'`` , ``'replicate'`` or ``'circular'`` .
|
|
3642
3599
|
Default: ``'constant'`` .
|
|
3643
|
-
|
|
3644
|
-
For ``'constant'`` mode, please refer to :class:`mindspore.nn.ConstantPad1d` as an example to understand
|
|
3645
|
-
this filling pattern and extend the padding pattern to n dimensions.
|
|
3646
|
-
|
|
3647
|
-
For ``'reflect'`` mode, please refer to :class:`mindspore.nn.ReflectionPad1d` as an example to understand
|
|
3648
|
-
this filling pattern.
|
|
3649
|
-
The reflect mode is used to pad the last three dimensions of 4D or 5D input, the last two dimensions of 3D
|
|
3650
|
-
or 4D input, or the last dimension of 2D or 3D input.
|
|
3651
|
-
|
|
3652
|
-
For ``'replicate'`` mode, please refer to :class:`mindspore.nn.ReplicationPad1d` as an example to understand
|
|
3653
|
-
this filling pattern.
|
|
3654
|
-
The replicate mode is used to pad the last three dimensions of 4D or 5D input, the last two dimensions of 3D
|
|
3655
|
-
or 4D input, or the last dimension of 2D or 3D input.
|
|
3656
|
-
|
|
3657
|
-
For ``'circular'`` mode, the pixels from one edge of the image are wrapped around to the opposite edge,
|
|
3658
|
-
such that the pixel on the right edge of the image is replaced with the pixel on the left edge,
|
|
3659
|
-
and the pixel on the bottom edge is replaced with the pixel on the top edge.
|
|
3660
|
-
The circular mode is used to pad the last three dimensions of 4D or 5D input, the last two dimensions of 3D
|
|
3661
|
-
or 4D input, or the last dimension of 2D or 3D input.
|
|
3662
|
-
|
|
3663
3600
|
value (Union[int, float, None], optional): Valid only in ``'constant'`` mode.
|
|
3664
|
-
Set the
|
|
3665
|
-
Default: ``
|
|
3601
|
+
Set the pad value in ``'constant'`` mode. If the value is None, 0 is used as the default pad value.
|
|
3602
|
+
Default: ``None`` .
|
|
3666
3603
|
|
|
3667
3604
|
Returns:
|
|
3668
|
-
Tensor, the tensor after
|
|
3605
|
+
Tensor, the tensor after pad.
|
|
3669
3606
|
|
|
3670
3607
|
Raises:
|
|
3671
|
-
TypeError: If `pad` is not an int of tuple
|
|
3608
|
+
TypeError: If `pad` is not an int of tuple.
|
|
3672
3609
|
TypeError: If `input` is not a Tensor.
|
|
3673
3610
|
ValueError: If length of `pad` is not even.
|
|
3674
3611
|
ValueError: If length of `pad` is greater than 6.
|
|
@@ -3678,10 +3615,11 @@ def pad_ext(input, pad, mode='constant', value=0.0):
|
|
|
3678
3615
|
``Ascend``
|
|
3679
3616
|
|
|
3680
3617
|
Examples:
|
|
3681
|
-
>>>
|
|
3618
|
+
>>> import mindspore as ms
|
|
3619
|
+
>>> from mindspore.mint.nn.functional import pad
|
|
3682
3620
|
>>> import numpy as np
|
|
3683
3621
|
>>> x = ms.Tensor(np.arange(1 * 2 * 2 * 2).reshape((1, 2, 2, 2)), dtype=ms.float64)
|
|
3684
|
-
>>> output =
|
|
3622
|
+
>>> output = pad(x, [1, 0, 0, 1], mode='constant', value=6.0)
|
|
3685
3623
|
>>> print(output)
|
|
3686
3624
|
[[[[6. 0. 1.]
|
|
3687
3625
|
[6. 2. 3.]
|
|
@@ -3689,6 +3627,14 @@ def pad_ext(input, pad, mode='constant', value=0.0):
|
|
|
3689
3627
|
[[6. 4. 5.]
|
|
3690
3628
|
[6. 6. 7.]
|
|
3691
3629
|
[6. 6. 6.]]]]
|
|
3630
|
+
>>> output1 = ops.pad(x, (1, 0, 0, 1), mode='reflect')
|
|
3631
|
+
>>> print(output1)
|
|
3632
|
+
[[[[1. 0. 1.]
|
|
3633
|
+
[3. 2. 3.]
|
|
3634
|
+
[1. 0. 1.]]
|
|
3635
|
+
[[5. 4. 5.]
|
|
3636
|
+
[7. 6. 7.]
|
|
3637
|
+
[5. 4. 5.]]]]
|
|
3692
3638
|
"""
|
|
3693
3639
|
if not isinstance(input, Tensor):
|
|
3694
3640
|
raise TypeError(f"For 'pad', the type of 'input' must be Tensor, but got {type(input)}.")
|
|
@@ -3698,17 +3644,30 @@ def pad_ext(input, pad, mode='constant', value=0.0):
|
|
|
3698
3644
|
if mode == "constant":
|
|
3699
3645
|
value = 0 if value is None else value
|
|
3700
3646
|
out = constant_pad_nd_op(input, pad, value)
|
|
3647
|
+
elif mode == "circular":
|
|
3648
|
+
out = _circular_pad(input, pad)
|
|
3701
3649
|
else:
|
|
3702
|
-
if
|
|
3703
|
-
|
|
3704
|
-
|
|
3705
|
-
|
|
3706
|
-
|
|
3707
|
-
|
|
3708
|
-
|
|
3709
|
-
|
|
3650
|
+
if len(pad) == 2:
|
|
3651
|
+
if mode == "reflect":
|
|
3652
|
+
out = reflection_pad_1d_op(input, pad)
|
|
3653
|
+
elif mode == "replicate":
|
|
3654
|
+
out = replication_pad_1d_op(input, pad)
|
|
3655
|
+
else:
|
|
3656
|
+
raise ValueError(f"Pad filling mode must be 'constant' 'circular' 'reflect' or 'replicate'.")
|
|
3657
|
+
elif len(pad) == 4:
|
|
3658
|
+
if mode == "reflect":
|
|
3659
|
+
out = reflection_pad_2d_op(input, pad)
|
|
3660
|
+
elif mode == "replicate":
|
|
3661
|
+
out = replication_pad_2d_op(input, pad)
|
|
3662
|
+
else:
|
|
3663
|
+
raise ValueError(f"Pad filling mode must be 'constant' 'circular' 'reflect' or 'replicate'.")
|
|
3710
3664
|
else:
|
|
3711
|
-
|
|
3665
|
+
if mode == "reflect":
|
|
3666
|
+
out = reflection_pad_3d_op(input, pad)
|
|
3667
|
+
elif mode == "replicate":
|
|
3668
|
+
out = replication_pad_3d_op(input, pad)
|
|
3669
|
+
else:
|
|
3670
|
+
raise ValueError(f"Pad filling mode must be 'constant' 'circular' 'reflect' or 'replicate'.")
|
|
3712
3671
|
return out
|
|
3713
3672
|
|
|
3714
3673
|
|
|
@@ -3790,7 +3749,7 @@ def pad(input_x, padding, mode='constant', value=None):
|
|
|
3790
3749
|
|
|
3791
3750
|
Examples:
|
|
3792
3751
|
>>> import mindspore as ms
|
|
3793
|
-
>>>
|
|
3752
|
+
>>> import mindspore.ops as ops
|
|
3794
3753
|
>>> import numpy as np
|
|
3795
3754
|
>>> x = ms.Tensor(np.arange(1 * 2 * 2 * 2).reshape((1, 2, 2, 2)), dtype=ms.float64)
|
|
3796
3755
|
>>> output = ops.pad(x, [1, 0, 0, 1], mode='constant', value=6.0)
|
|
@@ -5267,7 +5226,7 @@ def hinge_embedding_loss(inputs, targets, margin=1.0, reduction='mean'):
|
|
|
5267
5226
|
Examples:
|
|
5268
5227
|
>>> import numpy as np
|
|
5269
5228
|
>>> import mindspore.common.dtype as mstype
|
|
5270
|
-
>>>
|
|
5229
|
+
>>> import mindspore.ops as ops
|
|
5271
5230
|
>>> from mindspore import Tensor
|
|
5272
5231
|
>>> arr1 = np.array([0.9, -1.2, 2, 0.8, 3.9, 2, 1, 0, -1]).reshape((3, 3))
|
|
5273
5232
|
>>> arr2 = np.array([1, 1, -1, 1, -1, 1, -1, 1, 1]).reshape((3, 3))
|
|
@@ -6102,23 +6061,18 @@ def adaptive_avg_pool1d(input, output_size):
|
|
|
6102
6061
|
|
|
6103
6062
|
|
|
6104
6063
|
def layer_norm(input, normalized_shape, weight=None, bias=None, eps=1e-5):
|
|
6105
|
-
r"""Applies the Layer Normalization
|
|
6064
|
+
r"""Applies the Layer Normalization to the input tensor.
|
|
6106
6065
|
|
|
6107
|
-
|
|
6108
|
-
input of a single training case. LayerNorm is described in the paper
|
|
6066
|
+
This operator will normalize the input tensor on given axis. LayerNorm is described in the paper
|
|
6109
6067
|
`Layer Normalization <https://arxiv.org/abs/1607.06450>`_.
|
|
6110
6068
|
|
|
6111
|
-
Unlike batch normalization, layer normalization performs the exact same calculations at training and
|
|
6112
|
-
test time. Applies to all channels and pixels, even batch_size=1. The formula is as follows:
|
|
6113
|
-
|
|
6114
6069
|
.. math::
|
|
6115
|
-
y = \frac{x -
|
|
6070
|
+
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
|
|
6116
6071
|
|
|
6117
|
-
where :math:`\gamma` is
|
|
6118
|
-
learned through training.
|
|
6072
|
+
where :math:`\gamma` is weight, :math:`\beta` is bias, :math:`\epsilon` is eps.
|
|
6119
6073
|
|
|
6120
6074
|
Args:
|
|
6121
|
-
input (Tensor):
|
|
6075
|
+
input (Tensor): Tensor of shape :math:`(N, \ldots)`. The input of LayerNorm.
|
|
6122
6076
|
normalized_shape (Union(int, tuple[int], list[int])): The normalized shape of `input` for LayerNorm.
|
|
6123
6077
|
`normalized_shape` equal to `input_shape[begin_norm_axis:]`, where `begin_norm_axis` represents the axis
|
|
6124
6078
|
where normalization begins.
|
|
@@ -6130,7 +6084,7 @@ def layer_norm(input, normalized_shape, weight=None, bias=None, eps=1e-5):
|
|
|
6130
6084
|
Default: ``1e-5`` .
|
|
6131
6085
|
|
|
6132
6086
|
Returns:
|
|
6133
|
-
Tensor
|
|
6087
|
+
- **output** (Tensor) - The normalized input, has the same type and shape as the `input`.
|
|
6134
6088
|
|
|
6135
6089
|
Raises:
|
|
6136
6090
|
TypeError: If `input` is not a Tensor.
|
|
@@ -6158,6 +6112,7 @@ def layer_norm(input, normalized_shape, weight=None, bias=None, eps=1e-5):
|
|
|
6158
6112
|
weight = ops.ones(normalized_shape, dtype=input.dtype)
|
|
6159
6113
|
if bias is None:
|
|
6160
6114
|
bias = ops.zeros(normalized_shape, dtype=input.dtype)
|
|
6115
|
+
layer_norm_ext_op = LayerNormExt()
|
|
6161
6116
|
return layer_norm_ext_op(input, normalized_shape, weight, bias, eps)[0]
|
|
6162
6117
|
|
|
6163
6118
|
|
|
@@ -6178,7 +6133,7 @@ def group_norm(input, num_groups, weight=None, bias=None, eps=1e-5):
|
|
|
6178
6133
|
where :math:`\gamma` is `weight`, :math:`\beta` is `bias`, :math:`\epsilon` is `eps`.
|
|
6179
6134
|
|
|
6180
6135
|
Args:
|
|
6181
|
-
input (Tensor): The input feature with shape :math:`(N, C, *)` where :math:`*` means, any number of
|
|
6136
|
+
input (Tensor) : The input feature with shape :math:`(N, C, *)` where :math:`*` means, any number of
|
|
6182
6137
|
additional dimensions.
|
|
6183
6138
|
num_groups (int): The number of groups to be divided along the channel dimension.
|
|
6184
6139
|
weight (Tensor, optional): The shape :math:`(C,)`, Default: ``None``, has the same data type with `input`.
|
|
@@ -6200,9 +6155,9 @@ def group_norm(input, num_groups, weight=None, bias=None, eps=1e-5):
|
|
|
6200
6155
|
Examples:
|
|
6201
6156
|
>>> import mindspore as ms
|
|
6202
6157
|
>>> import numpy as np
|
|
6203
|
-
>>> from mindspore import
|
|
6158
|
+
>>> from mindspore.ops import group_norm
|
|
6204
6159
|
>>> x = ms.Tensor(np.ones([1, 2, 4, 4], np.float32))
|
|
6205
|
-
>>> output =
|
|
6160
|
+
>>> output = group_norm(x, 2)
|
|
6206
6161
|
>>> print(output)
|
|
6207
6162
|
[[[[0. 0. 0. 0.]
|
|
6208
6163
|
[0. 0. 0. 0.]
|
|
@@ -6213,78 +6168,9 @@ def group_norm(input, num_groups, weight=None, bias=None, eps=1e-5):
|
|
|
6213
6168
|
[0. 0. 0. 0.]
|
|
6214
6169
|
[0. 0. 0. 0.]]]]
|
|
6215
6170
|
"""
|
|
6216
|
-
|
|
6217
|
-
weight = ops.ones([input.shape[1]], dtype=input.dtype)
|
|
6218
|
-
if bias is None:
|
|
6219
|
-
bias = ops.zeros([input.shape[1]], dtype=input.dtype)
|
|
6171
|
+
group_norm_op = GroupNorm()
|
|
6220
6172
|
return group_norm_op(input, num_groups, weight, bias, eps)[0]
|
|
6221
6173
|
|
|
6222
|
-
|
|
6223
|
-
def batch_norm_ext(input, running_mean, running_var, weight=None, bias=None, training=False, momentum=0.1, eps=1e-5):
|
|
6224
|
-
r"""
|
|
6225
|
-
Batch Normalization for input data and updated parameters.
|
|
6226
|
-
|
|
6227
|
-
Batch Normalization is widely used in convolutional neural networks. This operation
|
|
6228
|
-
applies Batch Normalization over inputs to avoid internal covariate shift as described
|
|
6229
|
-
in the paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal
|
|
6230
|
-
Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the
|
|
6231
|
-
features using a mini-batch of data and the learned parameters can be described
|
|
6232
|
-
in the following formula,
|
|
6233
|
-
|
|
6234
|
-
.. math::
|
|
6235
|
-
|
|
6236
|
-
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
|
|
6237
|
-
|
|
6238
|
-
where :math:`\gamma` is `weight`, :math:`\beta` is `bias`, :math:`\epsilon` is `eps`, :math:`mean` is the
|
|
6239
|
-
mean of :math:`x`, :math:`variance` is the variance of :math:`x`.
|
|
6240
|
-
|
|
6241
|
-
Args:
|
|
6242
|
-
input (Tensor): Tensor of shape :math:`(N, C, *)`, with bfloat16, float16 or float32 data type.
|
|
6243
|
-
For Atlas training products, the shape must be 2-4 dimensions currently.
|
|
6244
|
-
running_mean (Tensor): The shape :math:`(C,)`, with bfloat, float16 or float32 data type.
|
|
6245
|
-
running_var (Tensor): The shape :math:`(C,)`, with bfloat, float16 or float32 data type.
|
|
6246
|
-
weight (Tensor, optional): The shape :math:`(C,)`, with bfloat, float16 or float32 data type, Default: ``None``.
|
|
6247
|
-
Initialized to ``1`` when `weight` is None.
|
|
6248
|
-
bias (Tensor, optional): The shape :math:`(C,)`, with bfloat, float16 or float32 data type. Default: ``None``.
|
|
6249
|
-
Initialized to ``0`` when `weight` is None.
|
|
6250
|
-
training (bool, optional): If `training` is `True`, `mean` and `variance` are computed during training.
|
|
6251
|
-
If `training` is `False`, they're loaded from checkpoint during inference. Default: ``False`` .
|
|
6252
|
-
momentum (float, optional): The hyper parameter to compute moving average for `running_mean` and `running_var`
|
|
6253
|
-
(e.g. :math:`new\_running\_mean = (1 - momentum) * running\_mean + momentum * current\_mean`).
|
|
6254
|
-
Default: ``0.1`` .
|
|
6255
|
-
eps (float, optional): A small value added for numerical stability. Default: ``1e-5``.
|
|
6256
|
-
|
|
6257
|
-
Returns:
|
|
6258
|
-
Tensor, has the same type and shape as `input`. The shape is :math:`(N, C, *)`.
|
|
6259
|
-
|
|
6260
|
-
Raises:
|
|
6261
|
-
TypeError: If `training` is not a bool.
|
|
6262
|
-
TypeError: If dtype of `eps` or `momentum` is not float.
|
|
6263
|
-
TypeError: If `input`, `weight`, `bias`, `running_mean` or `running_var` is not a Tensor.
|
|
6264
|
-
|
|
6265
|
-
Supported Platforms:
|
|
6266
|
-
``Ascend``
|
|
6267
|
-
|
|
6268
|
-
Examples:
|
|
6269
|
-
>>> import mindspore
|
|
6270
|
-
>>> from mindspore import Tensor, ops
|
|
6271
|
-
>>> input_x = Tensor([[1.0, 2.0], [3.0, 4.0]], mindspore.float32)
|
|
6272
|
-
>>> running_mean = Tensor([0.5, 1.5], mindspore.float32)
|
|
6273
|
-
>>> running_var = Tensor([0.1, 0.2], mindspore.float32)
|
|
6274
|
-
>>> weight = Tensor([2.0, 2.0], mindspore.float32)
|
|
6275
|
-
>>> bias = Tensor([-1.0, -1.0], mindspore.float32)
|
|
6276
|
-
>>> output = ops.function.nn_func.batch_norm_ext(input_x, running_mean, running_var, weight, bias)
|
|
6277
|
-
>>> print(output)
|
|
6278
|
-
[[ 2.1621194 1.2360122]
|
|
6279
|
-
[14.810596 10.180061 ]]
|
|
6280
|
-
"""
|
|
6281
|
-
if weight is None:
|
|
6282
|
-
weight = ops.ones([input.shape[1]], dtype=input.dtype)
|
|
6283
|
-
if bias is None:
|
|
6284
|
-
bias = ops.zeros([input.shape[1]], dtype=input.dtype)
|
|
6285
|
-
output = batch_norm_ext_op(input, weight, bias, running_mean, running_var, training, momentum, eps)
|
|
6286
|
-
return output[0]
|
|
6287
|
-
|
|
6288
6174
|
def batch_norm(input_x, running_mean, running_var, weight, bias, training=False, momentum=0.1, eps=1e-5):
|
|
6289
6175
|
r"""
|
|
6290
6176
|
Batch Normalization for input data and updated parameters.
|
|
@@ -6422,7 +6308,6 @@ def binary_cross_entropy(logits, labels, weight=None, reduction='mean'):
|
|
|
6422
6308
|
Args:
|
|
6423
6309
|
logits (Tensor): The predictive value whose data type must be float16 or float32.
|
|
6424
6310
|
labels (Tensor): The target value which has the same shape and data type as `logits`.
|
|
6425
|
-
And the data type is float16 or float32.
|
|
6426
6311
|
weight (Tensor, optional): A rescaling weight applied to the loss of each batch element.
|
|
6427
6312
|
Its shape must be able to broadcast to that of `logits` and `labels`.
|
|
6428
6313
|
And it must have the same shape and data type as `logits`. Default: ``None`` . If set to ``None`` ,
|
|
@@ -7045,6 +6930,8 @@ def gelu(input, approximate='none'):
|
|
|
7045
6930
|
.. math::
|
|
7046
6931
|
GELU(x_i) = 0.5 * x_i * (1 + \tanh(\sqrt(2 / \pi) * (x_i + 0.044715 * x_i^3)))
|
|
7047
6932
|
|
|
6933
|
+
For the related GELU graph, refer to `GELU <https://en.wikipedia.org/wiki/Activation_function#/media/File:Activation_gelu.png>`_ .
|
|
6934
|
+
|
|
7048
6935
|
GELU Activation Function Graph:
|
|
7049
6936
|
|
|
7050
6937
|
.. image:: ../images/GELU.png
|
|
@@ -7072,7 +6959,7 @@ def gelu(input, approximate='none'):
|
|
|
7072
6959
|
>>> x = Tensor([1.0, 2.0, 3.0], mindspore.float32)
|
|
7073
6960
|
>>> result = ops.gelu(x)
|
|
7074
6961
|
>>> print(result)
|
|
7075
|
-
[0.
|
|
6962
|
+
[0.841192 1.9545976 2.9963627]
|
|
7076
6963
|
"""
|
|
7077
6964
|
if approximate not in ['none', 'tanh']:
|
|
7078
6965
|
raise ValueError("For ops.gelu, approximate value should be either 'none' or 'tanh'.")
|
|
@@ -7185,7 +7072,7 @@ def lp_pool1d(x, norm_type, kernel_size, stride=None, ceil_mode=False):
|
|
|
7185
7072
|
|
|
7186
7073
|
Examples:
|
|
7187
7074
|
>>> import mindspore as ms
|
|
7188
|
-
>>>
|
|
7075
|
+
>>> import mindspore.ops as ops
|
|
7189
7076
|
>>> from mindspore import Tensor
|
|
7190
7077
|
>>> import numpy as np
|
|
7191
7078
|
>>> x = Tensor(np.arange(2 * 3 * 4).reshape((2, 3, 4)), dtype=ms.float32)
|
|
@@ -7270,7 +7157,7 @@ def lp_pool2d(x, norm_type, kernel_size, stride=None, ceil_mode=False):
|
|
|
7270
7157
|
|
|
7271
7158
|
Examples:
|
|
7272
7159
|
>>> import mindspore as ms
|
|
7273
|
-
>>>
|
|
7160
|
+
>>> import mindspore.ops as ops
|
|
7274
7161
|
>>> from mindspore import Tensor
|
|
7275
7162
|
>>> import numpy as np
|
|
7276
7163
|
>>> x = Tensor(np.arange(2 * 3 * 4 * 5).reshape((2, 3, 4, 5)), dtype=ms.float32)
|
|
@@ -7396,7 +7283,7 @@ def msort(input):
|
|
|
7396
7283
|
|
|
7397
7284
|
Examples:
|
|
7398
7285
|
>>> import mindspore as ms
|
|
7399
|
-
>>>
|
|
7286
|
+
>>> import mindspore.ops as ops
|
|
7400
7287
|
>>> import numpy as np
|
|
7401
7288
|
>>> input = ms.Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), ms.float16)
|
|
7402
7289
|
>>> output = ops.msort(input)
|
|
@@ -8128,6 +8015,7 @@ __all__ = [
|
|
|
8128
8015
|
'softmin',
|
|
8129
8016
|
'pdist',
|
|
8130
8017
|
'pad',
|
|
8018
|
+
'pad_ext',
|
|
8131
8019
|
'prelu',
|
|
8132
8020
|
'mirror_pad',
|
|
8133
8021
|
'cross_entropy',
|
|
@@ -8170,6 +8058,6 @@ __all__ = [
|
|
|
8170
8058
|
'channel_shuffle',
|
|
8171
8059
|
'hardsigmoid',
|
|
8172
8060
|
'group_norm',
|
|
8173
|
-
'
|
|
8061
|
+
'dropout_ext',
|
|
8174
8062
|
]
|
|
8175
8063
|
__all__.sort()
|