mindspore 2.3.0__cp39-none-any.whl → 2.3.0rc2__cp39-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +0 -1512
- mindspore/__init__.py +1 -2
- mindspore/_c_dataengine.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +25 -5
- mindspore/_extends/graph_kernel/model/graph_parallel.py +1 -1
- mindspore/_extends/parse/__init__.py +2 -2
- mindspore/_extends/parse/compile_config.py +0 -29
- mindspore/_extends/parse/namespace.py +2 -2
- mindspore/_extends/parse/parser.py +5 -21
- mindspore/_extends/parse/resources.py +7 -5
- mindspore/_extends/parse/standard_method.py +59 -40
- mindspore/_mindspore_offline_debug.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/amp.py +5 -26
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/base.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +1 -1
- mindspore/boost/grad_freeze.py +2 -2
- mindspore/boost/less_batch_normalization.py +6 -9
- mindspore/common/__init__.py +1 -8
- mindspore/common/_register_for_tensor.py +9 -8
- mindspore/common/api.py +65 -275
- mindspore/common/dtype.py +4 -8
- mindspore/common/dump.py +5 -2
- mindspore/common/jit_config.py +1 -1
- mindspore/common/lazy_inline.py +2 -14
- mindspore/common/parameter.py +15 -14
- mindspore/common/recompute.py +5 -20
- mindspore/common/sparse_tensor.py +6 -21
- mindspore/common/tensor.py +52 -100
- mindspore/communication/__init__.py +11 -6
- mindspore/communication/management.py +94 -92
- mindspore/context.py +18 -180
- mindspore/dataset/engine/datasets.py +46 -69
- mindspore/dataset/engine/datasets_user_defined.py +53 -72
- mindspore/dataset/engine/datasets_vision.py +2 -2
- mindspore/dataset/engine/queue.py +38 -56
- mindspore/dataset/engine/validators.py +5 -11
- mindspore/dataset/vision/__init__.py +5 -5
- mindspore/dataset/vision/c_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +1 -1
- mindspore/dataset/vision/transforms.py +46 -591
- mindspore/dataset/vision/utils.py +1 -121
- mindspore/dataset/vision/validators.py +3 -9
- mindspore/hal/__init__.py +1 -7
- mindspore/hal/device.py +1 -1
- mindspore/include/api/model.h +0 -3
- mindspore/include/dataset/vision.h +2 -54
- mindspore/include/mindapi/base/types.h +0 -1
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +0 -35
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +0 -72
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/{aclnn_all_finite.h → aclnn_add_custom.h} +11 -9
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +12 -184
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +15 -7
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +15 -7
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +31 -77
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +31 -77
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +5 -4
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/DeviceBin +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +286 -275
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -3
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_tiling.h +45 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +4 -8
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_tiling.h +4 -11
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/kernel/flash_attention_score_mix_hwsync.h +0 -18
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -6
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +75 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/kernel/matmul.h +5 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/matmul_impl.h +3 -18
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_common_tiling.h +5 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_info.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/tiling_data.h +3 -36
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/kernel/matmul_stridedslice_fusion.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/matmul_stridedslice_fusion_impl.h +4 -22
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +2 -16
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/kernel/paged_attention_mix_hwsync.h +3 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +4 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_tiling.h +4 -9
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +2 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +4 -10
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +12 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +1 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +2 -10
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +1 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -17
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +7 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layernorm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_stridedslice_fusion_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libnot_equal_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblcal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/mindrecord/filewriter.py +2 -2
- mindspore/mint/__init__.py +40 -720
- mindspore/mint/nn/__init__.py +7 -89
- mindspore/mint/nn/functional.py +16 -165
- mindspore/mint/optim/adamw.py +16 -15
- mindspore/nn/__init__.py +2 -0
- mindspore/nn/cell.py +98 -97
- mindspore/nn/extend/basic.py +2 -2
- mindspore/nn/extend/embedding.py +1 -1
- mindspore/nn/extend/layer/normalization.py +5 -7
- mindspore/nn/generator.py +297 -0
- mindspore/nn/layer/activation.py +3 -4
- mindspore/nn/layer/basic.py +16 -79
- mindspore/nn/layer/conv.py +8 -17
- mindspore/nn/layer/embedding.py +4 -1
- mindspore/nn/layer/math.py +1 -1
- mindspore/nn/layer/normalization.py +1 -1
- mindspore/nn/layer/pooling.py +0 -5
- mindspore/nn/layer/rnn_cells.py +2 -2
- mindspore/nn/loss/loss.py +19 -19
- mindspore/nn/optim/adasum.py +1 -1
- mindspore/nn/optim/sgd.py +2 -3
- mindspore/nn/probability/distribution/exponential.py +1 -1
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/logistic.py +1 -1
- mindspore/nn/wrap/cell_wrapper.py +1 -25
- mindspore/nn/wrap/loss_scale.py +1 -24
- mindspore/numpy/array_ops.py +1 -5
- mindspore/numpy/dtypes.py +3 -3
- mindspore/numpy/math_ops.py +8 -8
- mindspore/ops/__init__.py +1 -1
- mindspore/ops/_grad_experimental/grad_comm_ops.py +16 -75
- mindspore/ops/_vmap/vmap_array_ops.py +0 -27
- mindspore/ops/_vmap/vmap_math_ops.py +1 -29
- mindspore/ops/_vmap/vmap_nn_ops.py +18 -19
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +8 -34
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +9 -2
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -26
- mindspore/ops/auto_generate/gen_extend_func.py +27 -603
- mindspore/ops/auto_generate/gen_ops_def.py +203 -993
- mindspore/ops/auto_generate/gen_ops_prim.py +402 -1946
- mindspore/ops/auto_generate/pyboost_inner_prim.py +20 -90
- mindspore/ops/composite/base.py +6 -3
- mindspore/ops/composite/math_ops.py +1 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +17 -24
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/extend/__init__.py +3 -2
- mindspore/ops/extend/array_func.py +51 -10
- mindspore/ops/extend/nn_func.py +78 -2
- mindspore/ops/function/__init__.py +13 -8
- mindspore/ops/function/array_func.py +179 -455
- mindspore/ops/function/clip_func.py +1 -1
- mindspore/ops/function/grad/grad_func.py +3 -3
- mindspore/ops/function/math_func.py +103 -117
- mindspore/ops/function/nn_func.py +163 -275
- mindspore/ops/function/other_func.py +2 -2
- mindspore/ops/function/random_func.py +69 -202
- mindspore/ops/function/sparse_func.py +4 -4
- mindspore/ops/functional.py +327 -332
- mindspore/ops/operations/__init__.py +3 -13
- mindspore/ops/operations/_grad_ops.py +27 -3
- mindspore/ops/operations/_inner_ops.py +356 -53
- mindspore/ops/operations/_rl_inner_ops.py +2 -2
- mindspore/ops/operations/_tensor_array.py +8 -8
- mindspore/ops/operations/array_ops.py +65 -82
- mindspore/ops/operations/comm_ops.py +93 -784
- mindspore/ops/operations/custom_ops.py +28 -51
- mindspore/ops/operations/debug_ops.py +4 -4
- mindspore/ops/operations/inner_ops.py +2 -2
- mindspore/ops/operations/manually_defined/ops_def.py +4 -304
- mindspore/ops/operations/math_ops.py +50 -3
- mindspore/ops/operations/nn_ops.py +247 -14
- mindspore/ops/operations/other_ops.py +3 -3
- mindspore/ops/operations/random_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +1 -1
- mindspore/ops/primitive.py +8 -9
- mindspore/ops/silent_check.py +5 -5
- mindspore/ops_generate/arg_dtype_cast.py +9 -2
- mindspore/ops_generate/arg_handler.py +0 -26
- mindspore/ops_generate/gen_aclnn_implement.py +4 -1
- mindspore/ops_generate/gen_ops.py +4 -26
- mindspore/ops_generate/gen_pyboost_func.py +12 -41
- mindspore/ops_generate/gen_utils.py +0 -21
- mindspore/ops_generate/pyboost_utils.py +2 -7
- mindspore/ops_generate/template.py +0 -1
- mindspore/parallel/_auto_parallel_context.py +1 -21
- mindspore/parallel/_tensor.py +5 -0
- mindspore/parallel/_transformer/transformer.py +1 -1
- mindspore/parallel/_utils.py +1 -15
- mindspore/parallel/algo_parameter_config.py +3 -1
- mindspore/parallel/checkpoint_transform.py +9 -12
- mindspore/parallel/cluster/process_entity/_api.py +29 -28
- mindspore/parallel/cluster/process_entity/_utils.py +3 -13
- mindspore/parallel/cluster/run.py +16 -13
- mindspore/parallel/parameter_broadcast.py +2 -2
- mindspore/parallel/shard.py +17 -31
- mindspore/profiler/__init__.py +2 -3
- mindspore/profiler/common/util.py +2 -107
- mindspore/profiler/envprofiling.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +21 -8
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -82
- mindspore/profiler/parser/ascend_analysis/function_event.py +28 -43
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +27 -49
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +10 -15
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +20 -25
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +5 -5
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +1 -10
- mindspore/profiler/parser/ascend_hccl_generator.py +1 -4
- mindspore/profiler/parser/ascend_msprof_exporter.py +22 -43
- mindspore/profiler/parser/ascend_timeline_generator.py +5 -7
- mindspore/profiler/parser/minddata_parser.py +3 -72
- mindspore/profiler/profiling.py +59 -176
- mindspore/rewrite/api/node.py +1 -1
- mindspore/rewrite/common/namespace.py +5 -5
- mindspore/rewrite/parsers/assign_parser.py +0 -2
- mindspore/rewrite/parsers/class_def_parser.py +4 -8
- mindspore/run_check/_check_version.py +1 -1
- mindspore/scipy/fft.py +3 -1
- mindspore/scipy/linalg.py +3 -2
- mindspore/scipy/ops.py +3 -5
- mindspore/scipy/optimize/__init__.py +2 -2
- mindspore/train/__init__.py +4 -4
- mindspore/train/anf_ir_pb2.py +2 -8
- mindspore/train/callback/__init__.py +2 -5
- mindspore/train/callback/_backup_and_restore.py +2 -2
- mindspore/train/callback/_checkpoint.py +16 -104
- mindspore/train/callback/_landscape.py +1 -1
- mindspore/train/callback/_time_monitor.py +1 -1
- mindspore/train/data_sink.py +4 -5
- mindspore/train/dataset_helper.py +20 -45
- mindspore/train/model.py +38 -266
- mindspore/train/serialization.py +105 -256
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/version.py +1 -1
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +2 -2
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +303 -420
- mindspore/_extends/pijit/__init__.py +0 -23
- mindspore/_extends/pijit/pijit_func_white_list.py +0 -343
- mindspore/common/file_system.py +0 -48
- mindspore/common/generator.py +0 -260
- mindspore/common/no_inline.py +0 -54
- mindspore/common/np_dtype.py +0 -25
- mindspore/communication/comm_func.py +0 -1140
- mindspore/hal/memory.py +0 -326
- mindspore/lib/libavcodec.so.59 +0 -0
- mindspore/lib/libavdevice.so.59 +0 -0
- mindspore/lib/libavfilter.so.8 +0 -0
- mindspore/lib/libavformat.so.59 +0 -0
- mindspore/lib/libavutil.so.57 +0 -0
- mindspore/lib/libmindspore_np_dtype.so +0 -0
- mindspore/lib/libswresample.so.4 +0 -0
- mindspore/lib/libswscale.so.6 +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.cpp +0 -326
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.py +0 -180
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/all_finite.json +0 -109
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/binary_info_config.json +0 -38
- mindspore/lib/plugin/ascend/custom_compiler/OWNERS +0 -12
- mindspore/lib/plugin/ascend/custom_compiler/setup.py +0 -255
- mindspore/lib/plugin/ascend/custom_compiler/start.sh +0 -26
- mindspore/lib/plugin/ascend/custom_compiler/template.json +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/base_type.h +0 -133
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -32
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/tiling_info.h +0 -60
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -37
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_op.h +0 -34
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_backoff_base.h +0 -62
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -33
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_ops.h +0 -88
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_pa_op.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -95
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -84
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -61
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -224
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_impl.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_tiling.h +0 -187
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul.h +0 -245
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_interface.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_utils.h +0 -111
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/tiling_data.h +0 -54
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -364
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -114
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -98
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/mint/linalg/__init__.py +0 -22
- mindspore/nn/layer/embedding_service.py +0 -531
- mindspore/nn/layer/embedding_service_layer.py +0 -393
- mindspore/ops/function/reshard_func.py +0 -102
- mindspore/ops/operations/_infer_ops.py +0 -19
- mindspore/ops/operations/reshard_ops.py +0 -53
- mindspore/profiler/common/process_pool.py +0 -41
- mindspore/profiler/common/singleton.py +0 -28
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/train/callback/_cluster_monitor.py +0 -201
- mindspore/train/callback/_flops_collector.py +0 -238
- mindspore/train/callback/_mindio_ttp.py +0 -443
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -30,15 +30,14 @@ from mindspore.ops.primitive import Primitive
|
|
|
30
30
|
from mindspore.ops.primitive import PrimitiveWithInfer
|
|
31
31
|
from mindspore.ops.primitive import PrimitiveWithCheck
|
|
32
32
|
from mindspore.ops.primitive import prim_attr_register
|
|
33
|
-
from ..auto_generate import (CeLU, Flatten, LogSoftmax, ReLU, ReLU6, Dense,
|
|
33
|
+
from ..auto_generate import (CeLU, Flatten, LogSoftmax, ReLU, ReLU6, Dense,
|
|
34
34
|
Elu, Sigmoid, Softmax, SoftplusExt, HSwish, HSigmoid, AvgPool, BiasAdd,
|
|
35
|
-
NLLLoss, OneHot, GeLU, FastGeLU, PReLU,
|
|
35
|
+
NLLLoss, OneHot, GeLU, FastGeLU, PReLU,
|
|
36
36
|
GridSampler3D, GridSampler2D, LayerNorm, LayerNormExt, HShrink, AdamWeightDecay, Dropout,
|
|
37
37
|
ApplyRotaryPosEmb, PagedAttention, PagedAttentionMask, ReshapeAndCache,
|
|
38
38
|
FlashAttentionScore, Embedding, UpsampleNearest1D, UpsampleNearest2D,
|
|
39
39
|
UpsampleNearest3D, UpsampleTrilinear3D,
|
|
40
|
-
UpsampleBilinear2D, UpsampleLinear1D
|
|
41
|
-
BinaryCrossEntropy, BCEWithLogitsLoss)
|
|
40
|
+
UpsampleBilinear2D, UpsampleLinear1D)
|
|
42
41
|
from .manually_defined import BatchNorm
|
|
43
42
|
|
|
44
43
|
|
|
@@ -569,6 +568,36 @@ class SeLU(Primitive):
|
|
|
569
568
|
self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
|
|
570
569
|
|
|
571
570
|
|
|
571
|
+
class Tanh(Primitive):
|
|
572
|
+
r"""
|
|
573
|
+
Computes hyperbolic tangent of input element-wise.
|
|
574
|
+
|
|
575
|
+
Refer to :func:`mindspore.ops.tanh` for more details.
|
|
576
|
+
|
|
577
|
+
Inputs:
|
|
578
|
+
- **input_x** (Tensor) - Input Tensor of any dimension.
|
|
579
|
+
|
|
580
|
+
Outputs:
|
|
581
|
+
Tensor, with the same type and shape as the `input_x`.
|
|
582
|
+
|
|
583
|
+
Supported Platforms:
|
|
584
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
585
|
+
|
|
586
|
+
Examples:
|
|
587
|
+
>>> import mindspore
|
|
588
|
+
>>> import numpy as np
|
|
589
|
+
>>> from mindspore import Tensor, ops
|
|
590
|
+
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
|
|
591
|
+
>>> tanh = ops.Tanh()
|
|
592
|
+
>>> output = tanh(input_x)
|
|
593
|
+
>>> print(output)
|
|
594
|
+
[0.7615941 0.9640276 0.9950547 0.9993293 0.9999092]
|
|
595
|
+
"""
|
|
596
|
+
|
|
597
|
+
@prim_attr_register
|
|
598
|
+
def __init__(self):
|
|
599
|
+
"""Initialize Tanh"""
|
|
600
|
+
self.init_prim_io_names(inputs=['x'], outputs=['y'])
|
|
572
601
|
|
|
573
602
|
|
|
574
603
|
class FusedBatchNorm(Primitive):
|
|
@@ -3207,6 +3236,105 @@ class SigmoidCrossEntropyWithLogits(Primitive):
|
|
|
3207
3236
|
self.init_prim_io_names(inputs=['predict', 'target'], outputs=['loss'])
|
|
3208
3237
|
|
|
3209
3238
|
|
|
3239
|
+
class BCEWithLogitsLoss(PrimitiveWithInfer):
|
|
3240
|
+
r"""
|
|
3241
|
+
Adds sigmoid activation function to input `logits`, and uses the given logits to compute binary cross entropy
|
|
3242
|
+
between the logits and the label.
|
|
3243
|
+
|
|
3244
|
+
Sets input logits as :math:`X`, input label as :math:`Y`, input weight as :math:`W`, output as :math:`L`. Then,
|
|
3245
|
+
|
|
3246
|
+
.. math::
|
|
3247
|
+
|
|
3248
|
+
\begin{array}{ll} \\
|
|
3249
|
+
p_{ij} = sigmoid(X_{ij}) = \frac{1}{1 + e^{-X_{ij}}} \\
|
|
3250
|
+
L_{ij} = -[Y_{ij}log(p_{ij}) + (1 - Y_{ij})log(1 - p_{ij})]
|
|
3251
|
+
\end{array}
|
|
3252
|
+
|
|
3253
|
+
:math:`i` indicates the :math:`i^{th}` sample, :math:`j` indicates the category. Then,
|
|
3254
|
+
|
|
3255
|
+
.. math::
|
|
3256
|
+
\ell(x, y) = \begin{cases}
|
|
3257
|
+
L, & \text{if reduction} = \text{'none';}\\
|
|
3258
|
+
\operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\
|
|
3259
|
+
\operatorname{sum}(L), & \text{if reduction} = \text{'sum'.}
|
|
3260
|
+
\end{cases}
|
|
3261
|
+
|
|
3262
|
+
:math:`\ell` indicates the method of calculating the loss. There are three methods:
|
|
3263
|
+
the first method is to provide the loss value directly,
|
|
3264
|
+
the second method is to calculate the average value of all losses,
|
|
3265
|
+
and the third method is to calculate the sum of all losses.
|
|
3266
|
+
|
|
3267
|
+
This operator will multiply the output by the corresponding weight.
|
|
3268
|
+
The tensor `weight` assigns different weights to each piece of data in the batch,
|
|
3269
|
+
and the tensor `pos_weight` adds corresponding weights to the positive examples of each category.
|
|
3270
|
+
|
|
3271
|
+
In addition, it can trade off recall and precision by adding weights to positive examples.
|
|
3272
|
+
In the case of multi-label classification the loss can be described as:
|
|
3273
|
+
|
|
3274
|
+
.. math::
|
|
3275
|
+
\begin{array}{ll} \\
|
|
3276
|
+
p_{ij,c} = sigmoid(X_{ij,c}) = \frac{1}{1 + e^{-X_{ij,c}}} \\
|
|
3277
|
+
L_{ij,c} = -[P_{c}Y_{ij,c} * log(p_{ij,c}) + (1 - Y_{ij,c})log(1 - p_{ij,c})]
|
|
3278
|
+
\end{array}
|
|
3279
|
+
|
|
3280
|
+
where c is the class number (c>1 for multi-label binary classification, c=1 for single-label binary classification),
|
|
3281
|
+
n is the number of the sample in the batch and :math:`P_c` is the weight of the positive answer for the class c.
|
|
3282
|
+
:math:`P_c>1` increases the recall, :math:`P_c<1` increases the precision.
|
|
3283
|
+
|
|
3284
|
+
Args:
|
|
3285
|
+
reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
|
|
3286
|
+
``'sum'`` . Default: ``'mean'`` .
|
|
3287
|
+
|
|
3288
|
+
- ``'none'``: no reduction will be applied.
|
|
3289
|
+
- ``'mean'``: compute and return the weighted mean of elements in the output.
|
|
3290
|
+
- ``'sum'``: the output elements will be summed.
|
|
3291
|
+
|
|
3292
|
+
Inputs:
|
|
3293
|
+
- **logits** (Tensor) - Input logits. Data type must be float16 or float32.
|
|
3294
|
+
Tensor of shape :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
3295
|
+
- **label** (Tensor) - Ground truth label, has the same shape as `logits`.
|
|
3296
|
+
Data type must be float16 or float32.
|
|
3297
|
+
- **weight** (Tensor) - A rescaling weight applied to the loss of each batch element. It can be
|
|
3298
|
+
broadcast to a tensor with shape of `logits`. Data type must be float16 or float32.
|
|
3299
|
+
- **pos_weight** (Tensor) - A weight of positive examples. Must be a vector with length equal to the
|
|
3300
|
+
number of classes. It can be broadcast to a tensor with shape of `logits`.
|
|
3301
|
+
Data type must be float16 or float32.
|
|
3302
|
+
|
|
3303
|
+
Outputs:
|
|
3304
|
+
Tensor or Scalar, if `reduction` is ``'none'``, it's a tensor with the same shape and type as input `logits`.
|
|
3305
|
+
Otherwise, the output is a scalar.
|
|
3306
|
+
|
|
3307
|
+
Raises:
|
|
3308
|
+
TypeError: If any input is not Tensor.
|
|
3309
|
+
TypeError: If data type of any input is neither float16 nor float32.
|
|
3310
|
+
TypeError: If data type of `reduction` is not string.
|
|
3311
|
+
ValueError: If `weight` or `pos_weight` can not be broadcast to a tensor with shape of `logits`.
|
|
3312
|
+
ValueError: If `reduction` is not one of ``'none'``, ``'mean'`` or ``'sum'``.
|
|
3313
|
+
|
|
3314
|
+
Supported Platforms:
|
|
3315
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
3316
|
+
|
|
3317
|
+
Examples:
|
|
3318
|
+
>>> import mindspore
|
|
3319
|
+
>>> import numpy as np
|
|
3320
|
+
>>> from mindspore import Tensor, ops
|
|
3321
|
+
>>> logits = Tensor(np.array([[-0.8, 1.2, 0.7], [-0.1, -0.4, 0.7]]), mindspore.float32)
|
|
3322
|
+
>>> label = Tensor(np.array([[0.3, 0.8, 1.2], [-0.6, 0.1, 2.2]]), mindspore.float32)
|
|
3323
|
+
>>> weight = Tensor(np.array([1.0, 1.0, 1.0]), mindspore.float32)
|
|
3324
|
+
>>> pos_weight = Tensor(np.array([1.0, 1.0, 1.0]), mindspore.float32)
|
|
3325
|
+
>>> loss = ops.BCEWithLogitsLoss()
|
|
3326
|
+
>>> output = loss(logits, label, weight, pos_weight)
|
|
3327
|
+
>>> print(output)
|
|
3328
|
+
0.3463612
|
|
3329
|
+
"""
|
|
3330
|
+
|
|
3331
|
+
@prim_attr_register
|
|
3332
|
+
def __init__(self, reduction='mean'):
|
|
3333
|
+
"""Initialize BCEWithLogitsLoss"""
|
|
3334
|
+
super().__init__("BCEWithLogitsLoss")
|
|
3335
|
+
self.reduction = validator.check_string(reduction, ['none', 'sum', 'mean'], 'reduction', self.name)
|
|
3336
|
+
|
|
3337
|
+
|
|
3210
3338
|
class Pad(Primitive):
|
|
3211
3339
|
r"""
|
|
3212
3340
|
Pads the input tensor according to the paddings.
|
|
@@ -3520,9 +3648,8 @@ class ROIAlign(Primitive):
|
|
|
3520
3648
|
|
|
3521
3649
|
|
|
3522
3650
|
Inputs:
|
|
3523
|
-
- **features** (Tensor) - The input features, whose shape must be :math:`(N, C, H, W)
|
|
3524
|
-
|
|
3525
|
-
- **rois** (Tensor) - The shape is :math:`(rois\_n, 5)`, with data type of float16 or float32.
|
|
3651
|
+
- **features** (Tensor) - The input features, whose shape must be :math:`(N, C, H, W)`.
|
|
3652
|
+
- **rois** (Tensor) - The shape is :math:`(rois\_n, 5)`. With data type of float16 or float32.
|
|
3526
3653
|
`rois_n` represents the number of RoI. The size of the second dimension must be `5` and the `5` colunms
|
|
3527
3654
|
are :math:`(image\_index, top\_left\_x, top\_left\_y, bottom\_right\_x, bottom\_right\_y)`.
|
|
3528
3655
|
`image_index` represents the index of image. `top_left_x` and `top_left_y` represent the `x, y`
|
|
@@ -4313,6 +4440,86 @@ class KLDivLoss(Primitive):
|
|
|
4313
4440
|
self.reduction = validator.check_string(reduction, support_mode, 'reduction', self.name)
|
|
4314
4441
|
|
|
4315
4442
|
|
|
4443
|
+
class BinaryCrossEntropy(Primitive):
|
|
4444
|
+
r"""
|
|
4445
|
+
Computes the binary cross entropy between the logits and the labels.
|
|
4446
|
+
|
|
4447
|
+
Sets logits as :math:`x`, labels as :math:`y`, output as :math:`\ell(x, y)`.
|
|
4448
|
+
Let,
|
|
4449
|
+
|
|
4450
|
+
.. math::
|
|
4451
|
+
L = \{l_1,\dots,l_N\}^\top, \quad
|
|
4452
|
+
l_n = - w_n \left[ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right]
|
|
4453
|
+
|
|
4454
|
+
In which, :math:`L` indicates the loss of all batch_sizes, :math:`l` indicates the loss of one batch_size,
|
|
4455
|
+
and n indicates one batch_size in the 1-N range, :math:`w_n` indicates the
|
|
4456
|
+
weight of :math:`n`-th batch of binary cross entropy. Then,
|
|
4457
|
+
|
|
4458
|
+
.. math::
|
|
4459
|
+
\ell(x, y) = \begin{cases}
|
|
4460
|
+
L, & \text{if reduction} = \text{'none';}\\
|
|
4461
|
+
\operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\
|
|
4462
|
+
\operatorname{sum}(L), & \text{if reduction} = \text{'sum'.}
|
|
4463
|
+
\end{cases}
|
|
4464
|
+
|
|
4465
|
+
.. warning::
|
|
4466
|
+
- The value of :math:`x` must range from 0 to 1.
|
|
4467
|
+
|
|
4468
|
+
Args:
|
|
4469
|
+
reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
|
|
4470
|
+
``'sum'`` . Default: ``'mean'`` .
|
|
4471
|
+
|
|
4472
|
+
- ``'none'``: no reduction will be applied.
|
|
4473
|
+
- ``'mean'``: compute and return the weighted mean of elements in the output.
|
|
4474
|
+
- ``'sum'``: the output elements will be summed.
|
|
4475
|
+
|
|
4476
|
+
Inputs:
|
|
4477
|
+
- **logits** (Tensor) - The predictive value whose data type must be float16 or float32,
|
|
4478
|
+
The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
4479
|
+
- **labels** (Tensor) - The target value which has the same shape and data type as `logits`.
|
|
4480
|
+
- **weight** (Tensor, optional) - A rescaling weight applied to the loss of each batch element.
|
|
4481
|
+
And it must have the same shape and data type as `logits`. Default: ``None`` .
|
|
4482
|
+
|
|
4483
|
+
Outputs:
|
|
4484
|
+
Tensor or Scalar. Returns Tensor that has the same dtype and shape as `logits` if `reduction` is 'none'.
|
|
4485
|
+
Otherwise, returns a scalar Tensor.
|
|
4486
|
+
|
|
4487
|
+
Raises:
|
|
4488
|
+
TypeError: If dtype of `logits`, `labels` or `weight` (if given) is neither float16 nor float32.
|
|
4489
|
+
ValueError: If `reduction` is not one of ``'none'``, ``'mean'`` or ``'sum'``.
|
|
4490
|
+
ValueError: If shape of `labels` is not the same as `logits` or `weight` (if given).
|
|
4491
|
+
TypeError: If `logits`, `labels` or `weight` is not a Tensor.
|
|
4492
|
+
|
|
4493
|
+
Supported Platforms:
|
|
4494
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
4495
|
+
|
|
4496
|
+
Examples:
|
|
4497
|
+
>>> import mindspore
|
|
4498
|
+
>>> import numpy as np
|
|
4499
|
+
>>> from mindspore import Tensor, nn, ops
|
|
4500
|
+
>>> class Net(nn.Cell):
|
|
4501
|
+
... def __init__(self):
|
|
4502
|
+
... super(Net, self).__init__()
|
|
4503
|
+
... self.binary_cross_entropy = ops.BinaryCrossEntropy()
|
|
4504
|
+
... def construct(self, logits, labels, weight):
|
|
4505
|
+
... result = self.binary_cross_entropy(logits, labels, weight)
|
|
4506
|
+
... return result
|
|
4507
|
+
...
|
|
4508
|
+
>>> net = Net()
|
|
4509
|
+
>>> logits = Tensor(np.array([0.2, 0.7, 0.1]), mindspore.float32)
|
|
4510
|
+
>>> labels = Tensor(np.array([0., 1., 0.]), mindspore.float32)
|
|
4511
|
+
>>> weight = Tensor(np.array([1, 2, 2]), mindspore.float32)
|
|
4512
|
+
>>> output = net(logits, labels, weight)
|
|
4513
|
+
>>> print(output)
|
|
4514
|
+
0.38240486
|
|
4515
|
+
"""
|
|
4516
|
+
|
|
4517
|
+
@prim_attr_register
|
|
4518
|
+
def __init__(self, reduction='mean'):
|
|
4519
|
+
"""Initialize BinaryCrossEntropy."""
|
|
4520
|
+
self.reduction = validator.check_string(reduction, ['none', 'mean', 'sum'], 'reduction', self.name)
|
|
4521
|
+
|
|
4522
|
+
|
|
4316
4523
|
class ApplyAdaMax(Primitive):
|
|
4317
4524
|
r"""
|
|
4318
4525
|
Updates relevant entries according to the adamax scheme.
|
|
@@ -8163,7 +8370,7 @@ class ApplyAdamWithAmsgradV2(Primitive):
|
|
|
8163
8370
|
``Ascend`` ``GPU`` ``CPU``
|
|
8164
8371
|
|
|
8165
8372
|
Examples:
|
|
8166
|
-
>>>
|
|
8373
|
+
>>> import mindspore.ops as ops
|
|
8167
8374
|
>>> import mindspore.nn as nn
|
|
8168
8375
|
>>> from mindspore import Tensor, Parameter
|
|
8169
8376
|
>>> import numpy as np
|
|
@@ -9658,12 +9865,38 @@ class IncreFlashAttention(Primitive):
|
|
|
9658
9865
|
outputs=["attention_out"])
|
|
9659
9866
|
|
|
9660
9867
|
|
|
9661
|
-
class
|
|
9868
|
+
class RmsNorm(Primitive):
|
|
9662
9869
|
r"""
|
|
9663
|
-
|
|
9870
|
+
The RmsNorm operator is a normalization operation, and its formula is:
|
|
9871
|
+
|
|
9872
|
+
.. math::
|
|
9873
|
+
y=\frac{x_i}{\sqrt{\frac{1}{n}}\sum_{i=1}^{n}{ x_i^2}+\varepsilon }\gamma_i
|
|
9874
|
+
|
|
9875
|
+
.. warning::
|
|
9876
|
+
This is an experimental API that is subject to change or deletion.
|
|
9877
|
+
|
|
9878
|
+
Args:
|
|
9879
|
+
epsilon (float): prevent division by 0, default value is `1e-6`
|
|
9880
|
+
|
|
9881
|
+
Inputs:
|
|
9882
|
+
- **input_x** (Tensor) - Input data of RmsNorm, support data type: float16, float32, bfloat16.
|
|
9883
|
+
- **gamma** (Tensor) - Support data type: float16, float32, bfloat16.
|
|
9884
|
+
|
|
9885
|
+
Outputs:
|
|
9886
|
+
- **y** (Tensor) - Has the same type and shape with `input_x`.
|
|
9887
|
+
- **rstd** (Tensor) - Has the same type with `input_x`, used by gradient calculation.
|
|
9888
|
+
|
|
9889
|
+
Raises:
|
|
9890
|
+
TypeError: If data type of `input_x` is not one of the following: float16, float32, bfloat16.
|
|
9891
|
+
TypeError: If data type of `gamma` is not one of the following: float16, float32, bfloat16.
|
|
9892
|
+
TypeError: If data type of "input_x" is not the same with the data type of "gamma"
|
|
9893
|
+
|
|
9894
|
+
Supported Platforms:
|
|
9895
|
+
``Ascend``
|
|
9664
9896
|
"""
|
|
9897
|
+
|
|
9665
9898
|
@prim_attr_register
|
|
9666
|
-
def __init__(self):
|
|
9667
|
-
"""Initialize"""
|
|
9668
|
-
|
|
9669
|
-
|
|
9899
|
+
def __init__(self, epsilon=1e-6):
|
|
9900
|
+
"""Initialize Dense."""
|
|
9901
|
+
validator.check_value_type("epsilon", epsilon, [float], self.name)
|
|
9902
|
+
self.init_prim_io_names(inputs=['x', 'gamma'], outputs=["y", "rstd"])
|
|
@@ -430,7 +430,7 @@ class Partial(Primitive):
|
|
|
430
430
|
|
|
431
431
|
Examples:
|
|
432
432
|
>>> from mindspore import Tensor
|
|
433
|
-
>>>
|
|
433
|
+
>>> import mindspore.ops as ops
|
|
434
434
|
>>> def show_input(x, y, z):
|
|
435
435
|
... return x, y, z
|
|
436
436
|
>>> partial = ops.Partial()
|
|
@@ -487,7 +487,7 @@ class Depend(Primitive):
|
|
|
487
487
|
>>> import numpy as np
|
|
488
488
|
>>> import mindspore
|
|
489
489
|
>>> import mindspore.nn as nn
|
|
490
|
-
>>>
|
|
490
|
+
>>> import mindspore.ops as ops
|
|
491
491
|
>>> from mindspore import Tensor
|
|
492
492
|
>>> class Net(nn.Cell):
|
|
493
493
|
... def __init__(self):
|
|
@@ -561,7 +561,7 @@ class StopGradient(Primitive):
|
|
|
561
561
|
``Ascend`` ``GPU`` ``CPU``
|
|
562
562
|
|
|
563
563
|
Examples:
|
|
564
|
-
>>>
|
|
564
|
+
>>> import mindspore.ops as ops
|
|
565
565
|
>>> from mindspore import Tensor
|
|
566
566
|
>>> from mindspore import dtype as mstype
|
|
567
567
|
>>> def net(x, y):
|
|
@@ -813,7 +813,7 @@ class RandomChoiceWithMask(Primitive):
|
|
|
813
813
|
>>> import numpy as np
|
|
814
814
|
>>> from mindspore import Tensor, ops
|
|
815
815
|
>>> rnd_choice_mask = ops.RandomChoiceWithMask()
|
|
816
|
-
>>> input_x = Tensor(np.ones(shape=[240000, 4]).astype(np.
|
|
816
|
+
>>> input_x = Tensor(np.ones(shape=[240000, 4]).astype(np.bool))
|
|
817
817
|
>>> output_y, output_mask = rnd_choice_mask(input_x)
|
|
818
818
|
>>> result = output_y.shape
|
|
819
819
|
>>> print(result)
|
|
@@ -584,7 +584,7 @@ class SparseTensorDenseAdd(Primitive):
|
|
|
584
584
|
|
|
585
585
|
Examples:
|
|
586
586
|
>>> from mindspore import Tensor
|
|
587
|
-
>>>
|
|
587
|
+
>>> import mindspore.ops as ops
|
|
588
588
|
>>> from mindspore import dtype as mstype
|
|
589
589
|
>>> x1_indices = Tensor([[0, 0], [0, 1]], dtype=mstype.int64)
|
|
590
590
|
>>> x1_values = Tensor([1, 1], dtype=mstype.float32)
|
mindspore/ops/primitive.py
CHANGED
|
@@ -21,8 +21,7 @@ import numpy as np
|
|
|
21
21
|
from mindspore.common.api import _wrap_func
|
|
22
22
|
from mindspore.log import _LogActionOnce
|
|
23
23
|
from mindspore import context, log as logger
|
|
24
|
-
from mindspore.parallel._utils import _is_in_auto_parallel_mode, _is_in_data_parallel_mode,
|
|
25
|
-
_is_in_hybrid_parallel_mode, SUPPORTED_TUPLE_IN_TUPLE_STRATEGY
|
|
24
|
+
from mindspore.parallel._utils import _is_in_auto_parallel_mode, _is_in_data_parallel_mode, _is_in_hybrid_parallel_mode
|
|
26
25
|
from mindspore.parallel._ps_context import _is_ps_mode, _is_role_sched
|
|
27
26
|
from mindspore.parallel.shard import Layout
|
|
28
27
|
from mindspore.common.api import _pynative_executor
|
|
@@ -74,7 +73,7 @@ class Primitive(Primitive_):
|
|
|
74
73
|
value (Any): Attribute value.
|
|
75
74
|
|
|
76
75
|
Examples:
|
|
77
|
-
>>>
|
|
76
|
+
>>> import mindspore.ops as ops
|
|
78
77
|
>>> a = ops.Add()
|
|
79
78
|
>>> a = a.add_prim_attr("attr",1)
|
|
80
79
|
>>> out = a.attrs["attr"]
|
|
@@ -112,7 +111,7 @@ class Primitive(Primitive_):
|
|
|
112
111
|
device_target (str): The target device to run, support "Ascend", "GPU", and "CPU".
|
|
113
112
|
|
|
114
113
|
Examples:
|
|
115
|
-
>>>
|
|
114
|
+
>>> import mindspore.ops as ops
|
|
116
115
|
>>> a = ops.Add()
|
|
117
116
|
>>> a = a.set_device("GPU")
|
|
118
117
|
>>> print(a.primitive_target)
|
|
@@ -168,7 +167,7 @@ class Primitive(Primitive_):
|
|
|
168
167
|
raise TypeError(f'The element of strategy must be tuple/Layout type, but got:{type(in_ele)}')
|
|
169
168
|
if isinstance(in_ele, tuple):
|
|
170
169
|
for in_value in in_ele:
|
|
171
|
-
if not isinstance(in_value, int)
|
|
170
|
+
if not isinstance(in_value, int):
|
|
172
171
|
raise TypeError(f'The {log_info}: {strategy} of {self.name} is not valid,'
|
|
173
172
|
f' the value of strategy must be int type, but got:{type(in_value)}')
|
|
174
173
|
is_layout.append(False)
|
|
@@ -222,7 +221,7 @@ class Primitive(Primitive_):
|
|
|
222
221
|
Args:
|
|
223
222
|
name (str): Attribute Name.
|
|
224
223
|
Examples:
|
|
225
|
-
>>>
|
|
224
|
+
>>> import mindspore.ops as ops
|
|
226
225
|
>>> a = ops.Add()
|
|
227
226
|
>>> a = a.add_prim_attr("attr",1)
|
|
228
227
|
>>> a = a.del_prim_attr("attr")
|
|
@@ -331,7 +330,7 @@ class Primitive(Primitive_):
|
|
|
331
330
|
instance_name (str): Instance name of primitive operator set by user.
|
|
332
331
|
|
|
333
332
|
Examples:
|
|
334
|
-
>>>
|
|
333
|
+
>>> import mindspore.ops as ops
|
|
335
334
|
>>> a = ops.Add()
|
|
336
335
|
>>> a = a.set_prim_instance_name("add")
|
|
337
336
|
>>> print(a.instance_name)
|
|
@@ -416,7 +415,7 @@ class Primitive(Primitive_):
|
|
|
416
415
|
inputs (list[str]): list of inputs names.
|
|
417
416
|
outputs (list[str]): list of outputs names.
|
|
418
417
|
Examples:
|
|
419
|
-
>>>
|
|
418
|
+
>>> import mindspore.ops as ops
|
|
420
419
|
>>> a = ops.Add()
|
|
421
420
|
>>> a.init_prim_io_names(["x","y"],["sum"])
|
|
422
421
|
>>> print(a.input_names)
|
|
@@ -504,7 +503,7 @@ class Primitive(Primitive_):
|
|
|
504
503
|
|
|
505
504
|
Examples:
|
|
506
505
|
>>> from mindspore import context
|
|
507
|
-
>>>
|
|
506
|
+
>>> import mindspore.ops as ops
|
|
508
507
|
>>> context.set_context(mode=context.GRAPH_MODE)
|
|
509
508
|
>>> matmul = ops.MatMul()
|
|
510
509
|
>>> matmul.place('MS_WORKER', 0)
|
mindspore/ops/silent_check.py
CHANGED
|
@@ -31,10 +31,10 @@ NPU_ASD_ENABLE = 'NPU_ASD_ENABLE'
|
|
|
31
31
|
|
|
32
32
|
class ASDBase:
|
|
33
33
|
"""
|
|
34
|
-
ASDBase is the base class of operator with
|
|
34
|
+
ASDBase is the base class of operator with accuracy-sensitive detection feature in python.
|
|
35
35
|
|
|
36
36
|
Args:
|
|
37
|
-
cls (Primitive): Original operator requiring
|
|
37
|
+
cls (Primitive): Original operator requiring accuracy-sensitive detection feature.
|
|
38
38
|
args (tuple): A variable parameter tuple to the original operator.
|
|
39
39
|
kwargs (dict): A variable parameter dictionary passed the original operator.
|
|
40
40
|
|
|
@@ -90,11 +90,11 @@ class ASDBase:
|
|
|
90
90
|
|
|
91
91
|
def generate_params(self):
|
|
92
92
|
"""
|
|
93
|
-
Generate support params for
|
|
93
|
+
Generate support params for accuracy-sensitive detection.
|
|
94
94
|
|
|
95
95
|
Returns:
|
|
96
96
|
tuple consisting of four elements.
|
|
97
|
-
The derived class initializes the parameters required for
|
|
97
|
+
The derived class initializes the parameters required for accuracy-sensitive detection by calling
|
|
98
98
|
this function.
|
|
99
99
|
|
|
100
100
|
Examples:
|
|
@@ -103,7 +103,7 @@ class ASDBase:
|
|
|
103
103
|
>>> class LayerNormASD(ASDBase):
|
|
104
104
|
... def __init__(self, *args, **kwargs):
|
|
105
105
|
... super().__init__(OriginLayerNorm, *args, **kwargs)
|
|
106
|
-
... # init parameters for
|
|
106
|
+
... # init parameters for accuracy-sensitive detection by calling the base class function
|
|
107
107
|
... self.pre_val, self.min_val, self.max_val, self.cnt = self.generate_params()
|
|
108
108
|
"""
|
|
109
109
|
pre_val = Parameter(Tensor(0, mstype.float32),
|
|
@@ -20,18 +20,25 @@ import mindspore as ms
|
|
|
20
20
|
from mindspore import ops
|
|
21
21
|
from mindspore.common.tensor import Tensor
|
|
22
22
|
from mindspore.ops.operations._sequence_ops import TensorToScalar, TensorToTuple
|
|
23
|
-
from mindspore.ops_generate.gen_ops_inner_prim import TupleToList
|
|
23
|
+
from mindspore.ops_generate.gen_ops_inner_prim import TupleToList
|
|
24
24
|
from mindspore._c_expression import OpDtype
|
|
25
25
|
|
|
26
26
|
tensor_to_tuple_ = TensorToTuple()
|
|
27
27
|
tuple_to_list = TupleToList()
|
|
28
|
-
list_to_tuple = ListToTuple()
|
|
29
28
|
|
|
30
29
|
|
|
31
30
|
def int_to_float(data):
|
|
32
31
|
return float(data)
|
|
33
32
|
|
|
34
33
|
|
|
34
|
+
def list_to_tuple(data):
|
|
35
|
+
# tuple() currently does not support Any from JIT Fallback.
|
|
36
|
+
res = ()
|
|
37
|
+
for element in data:
|
|
38
|
+
res += (element,)
|
|
39
|
+
return res
|
|
40
|
+
|
|
41
|
+
|
|
35
42
|
def scalar_to_tuple(data):
|
|
36
43
|
return (data,)
|
|
37
44
|
|
|
@@ -18,8 +18,6 @@ from mindspore.ops_generate.gen_ops_inner_prim import DtypeToEnum, StringToEnum
|
|
|
18
18
|
# Enum Class:
|
|
19
19
|
from mindspore._c_expression import FormatEnum as Format
|
|
20
20
|
from mindspore._c_expression import ReductionEnum as Reduction
|
|
21
|
-
from mindspore.common import Tensor
|
|
22
|
-
from mindspore.common import dtype as mstype
|
|
23
21
|
|
|
24
22
|
|
|
25
23
|
def arg_invalid_info(op_name, arg_name, arg_val):
|
|
@@ -29,17 +27,6 @@ def arg_invalid_info(op_name, arg_name, arg_val):
|
|
|
29
27
|
return f"For '{op_name}', the value of '{arg_name}' is invalid: '{arg_val}'."
|
|
30
28
|
|
|
31
29
|
|
|
32
|
-
def to_pair(op_name, arg_name, arg_val):
|
|
33
|
-
"""
|
|
34
|
-
convert arg_val: int/tuple[int*2] -> tuple[int*2].
|
|
35
|
-
"""
|
|
36
|
-
if isinstance(arg_val, (int, float)):
|
|
37
|
-
return (arg_val, arg_val)
|
|
38
|
-
if isinstance(arg_val, (list, tuple)):
|
|
39
|
-
return arg_val
|
|
40
|
-
raise ValueError(arg_invalid_info(op_name, arg_name, arg_val))
|
|
41
|
-
|
|
42
|
-
|
|
43
30
|
def to_kernel_size(op_name, arg_name, kernel_size):
|
|
44
31
|
"""
|
|
45
32
|
convert kernel_size: int/tuple[int*4] -> tuple[int*2].
|
|
@@ -176,19 +163,6 @@ def to_3d_paddings(op_name, arg_name, pad):
|
|
|
176
163
|
return pad
|
|
177
164
|
raise ValueError(arg_invalid_info(op_name, arg_name, pad))
|
|
178
165
|
|
|
179
|
-
|
|
180
|
-
def generator_handler(op_name, arg_name, inputs):
|
|
181
|
-
"""
|
|
182
|
-
convert constant value in tuple to tensor
|
|
183
|
-
"""
|
|
184
|
-
new_inputs = []
|
|
185
|
-
for input_ in inputs:
|
|
186
|
-
if isinstance(input_, int):
|
|
187
|
-
new_inputs.append(Tensor(input_, mstype.int64))
|
|
188
|
-
else:
|
|
189
|
-
new_inputs.append(input_)
|
|
190
|
-
return tuple(new_inputs)
|
|
191
|
-
|
|
192
166
|
dtype_to_type_id = DtypeToEnum()
|
|
193
167
|
|
|
194
168
|
# string to enum
|
|
@@ -172,7 +172,7 @@ def generate(op_name, class_name, op_yaml, h_and_cc, need_update_shape):
|
|
|
172
172
|
gen_cc(op_name, class_name, op_yaml, h_and_cc, need_update_shape)
|
|
173
173
|
|
|
174
174
|
|
|
175
|
-
def gen_aclnn_kernel(op_name,
|
|
175
|
+
def gen_aclnn_kernel(op_name, need_update_shape=False, auto=False):
|
|
176
176
|
"""gen_aclnn_kernel function"""
|
|
177
177
|
if check_op_registed(op_name) and not auto:
|
|
178
178
|
logging.warning("Kernel {%s} is already registered.", op_name)
|
|
@@ -180,7 +180,10 @@ def gen_aclnn_kernel(op_name, yaml_str, need_update_shape=False, auto=False):
|
|
|
180
180
|
current_path = os.path.dirname(os.path.abspath(__file__))
|
|
181
181
|
work_path = os.path.join(current_path, '../../../../')
|
|
182
182
|
|
|
183
|
+
# get ops yaml
|
|
184
|
+
ops_yaml_path = os.path.join(work_path, 'mindspore/python/mindspore/ops_generate/ops.yaml')
|
|
183
185
|
aclnn_path = 'mindspore/ccsrc/plugin/device/ascend/kernel/opapi/aclnn/'
|
|
186
|
+
yaml_str = gen_utils.safe_load_yaml(ops_yaml_path)
|
|
184
187
|
# merge inner ops
|
|
185
188
|
op_yaml = yaml_str.get(op_name)
|
|
186
189
|
class_name = ''.join(word.capitalize() for word in op_name.split('_'))
|
|
@@ -22,7 +22,7 @@ import pathlib
|
|
|
22
22
|
import logging
|
|
23
23
|
import gen_utils
|
|
24
24
|
from gen_utils import (py_licence_str, cc_license_str, check_change_and_replace_file, merge_files,
|
|
25
|
-
|
|
25
|
+
safe_load_yaml, convert_dtype_str, write_file)
|
|
26
26
|
from pyboost_utils import get_pyboost_name, is_pyboost_enable, AclnnUtils, get_dtypes
|
|
27
27
|
import template
|
|
28
28
|
from template import CppTemplate
|
|
@@ -707,14 +707,8 @@ namespace mindspore::ops {
|
|
|
707
707
|
dtype = get_dtype(arg_info)
|
|
708
708
|
if dtype == "str":
|
|
709
709
|
dtype = "std::string"
|
|
710
|
-
if dtype
|
|
711
|
-
dtype = "std::vector<std::string>"
|
|
712
|
-
if dtype in ("tuple[int]", "list[int]"):
|
|
710
|
+
if dtype == "tuple[int]":
|
|
713
711
|
dtype = "std::vector<int64_t>"
|
|
714
|
-
if dtype in ("tuple[float]", "list[float]"):
|
|
715
|
-
dtype = "std::vector<float>"
|
|
716
|
-
if dtype in ("tuple[bool]", "list[bool]"):
|
|
717
|
-
dtype = "std::vector<bool>"
|
|
718
712
|
if dtype == "int":
|
|
719
713
|
dtype = "int64_t"
|
|
720
714
|
lite_ops_h_gen += f""" void set_{arg_name}(const {dtype} &{arg_name});\n"""
|
|
@@ -762,14 +756,6 @@ std::unordered_map<std::string, OpDefPtr> gOpDefTable = {{"""
|
|
|
762
756
|
dispatch = "true"
|
|
763
757
|
enable_dispatch_str = f"""{dispatch}"""
|
|
764
758
|
|
|
765
|
-
is_view = operator_data.get('view')
|
|
766
|
-
if is_view:
|
|
767
|
-
is_view = "true"
|
|
768
|
-
else:
|
|
769
|
-
is_view = "false"
|
|
770
|
-
is_view_str = f"""{is_view}"""
|
|
771
|
-
|
|
772
|
-
|
|
773
759
|
gen_include += f"""\n#include "ops/ops_func_impl/{operator_name}.h\""""
|
|
774
760
|
cc_index_str = ''
|
|
775
761
|
gen_opdef_map += f"""\n {{"{class_name}", &g{class_name}}},"""
|
|
@@ -811,8 +797,7 @@ std::unordered_map<std::string, OpDefPtr> gOpDefTable = {{"""
|
|
|
811
797
|
|
|
812
798
|
op_def_cc = template.OP_PROTO_TEMPLATE.replace(class_name=class_name, input_args=input_args_str,
|
|
813
799
|
return_args=return_args_str, signatures=signature_code,
|
|
814
|
-
indexes=cc_index_str, enable_dispatch=enable_dispatch_str
|
|
815
|
-
is_view=is_view_str)
|
|
800
|
+
indexes=cc_index_str, enable_dispatch=enable_dispatch_str)
|
|
816
801
|
gen_cc_code += op_def_cc
|
|
817
802
|
gen_opdef_map += f"""\n}};"""
|
|
818
803
|
gen_cc_code += gen_opdef_map
|
|
@@ -969,11 +954,6 @@ def generate_create_instance_helper_file(work_path, yaml_str):
|
|
|
969
954
|
|
|
970
955
|
def generate_aclnn_reg_code(yaml_data):
|
|
971
956
|
"""generate aclnn register code"""
|
|
972
|
-
current_path = os.path.dirname(os.path.abspath(__file__))
|
|
973
|
-
work_path = os.path.join(current_path, '../../../../')
|
|
974
|
-
ops_yaml_path = os.path.join(work_path, 'mindspore/python/mindspore/ops_generate/ops.yaml')
|
|
975
|
-
yaml_str = gen_utils.safe_load_yaml(ops_yaml_path)
|
|
976
|
-
|
|
977
957
|
reg_code = f"""
|
|
978
958
|
#include "plugin/device/ascend/kernel/opapi/aclnn_kernel_mod.h"
|
|
979
959
|
|
|
@@ -989,7 +969,7 @@ namespace kernel {{
|
|
|
989
969
|
continue
|
|
990
970
|
_, _, none_tensor_exist = get_dtypes(operator_data)
|
|
991
971
|
if none_tensor_exist:
|
|
992
|
-
gen_aclnn_kernel(operator_name,
|
|
972
|
+
gen_aclnn_kernel(operator_name, auto=True)
|
|
993
973
|
continue
|
|
994
974
|
class_name = ''.join(word.capitalize() for word in operator_name.split('_'))
|
|
995
975
|
op_class = operator_data.get("class")
|
|
@@ -1046,10 +1026,8 @@ def main():
|
|
|
1046
1026
|
doc_yaml_path = os.path.join(work_path, 'mindspore/python/mindspore/ops_generate/ops_doc.yaml')
|
|
1047
1027
|
|
|
1048
1028
|
ops_yaml_dir_path = os.path.join(work_path, 'mindspore/core/ops/ops_def/')
|
|
1049
|
-
infer_ops_yaml_dir_path = os.path.join(work_path, 'mindspore/core/ops/ops_def/infer/')
|
|
1050
1029
|
doc_yaml_dir_path = os.path.join(work_path, 'mindspore/core/ops/ops_def/doc/')
|
|
1051
1030
|
merge_files(ops_yaml_dir_path, ops_yaml_path, '*op.yaml')
|
|
1052
|
-
merge_files_append(infer_ops_yaml_dir_path, ops_yaml_path, '*op.yaml')
|
|
1053
1031
|
merge_files(doc_yaml_dir_path, doc_yaml_path, '*doc.yaml')
|
|
1054
1032
|
|
|
1055
1033
|
# make auto_generate dir
|