mindspore 2.3.0__cp39-none-any.whl → 2.3.0rc2__cp39-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +0 -1512
- mindspore/__init__.py +1 -2
- mindspore/_c_dataengine.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +25 -5
- mindspore/_extends/graph_kernel/model/graph_parallel.py +1 -1
- mindspore/_extends/parse/__init__.py +2 -2
- mindspore/_extends/parse/compile_config.py +0 -29
- mindspore/_extends/parse/namespace.py +2 -2
- mindspore/_extends/parse/parser.py +5 -21
- mindspore/_extends/parse/resources.py +7 -5
- mindspore/_extends/parse/standard_method.py +59 -40
- mindspore/_mindspore_offline_debug.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/amp.py +5 -26
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/base.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +1 -1
- mindspore/boost/grad_freeze.py +2 -2
- mindspore/boost/less_batch_normalization.py +6 -9
- mindspore/common/__init__.py +1 -8
- mindspore/common/_register_for_tensor.py +9 -8
- mindspore/common/api.py +65 -275
- mindspore/common/dtype.py +4 -8
- mindspore/common/dump.py +5 -2
- mindspore/common/jit_config.py +1 -1
- mindspore/common/lazy_inline.py +2 -14
- mindspore/common/parameter.py +15 -14
- mindspore/common/recompute.py +5 -20
- mindspore/common/sparse_tensor.py +6 -21
- mindspore/common/tensor.py +52 -100
- mindspore/communication/__init__.py +11 -6
- mindspore/communication/management.py +94 -92
- mindspore/context.py +18 -180
- mindspore/dataset/engine/datasets.py +46 -69
- mindspore/dataset/engine/datasets_user_defined.py +53 -72
- mindspore/dataset/engine/datasets_vision.py +2 -2
- mindspore/dataset/engine/queue.py +38 -56
- mindspore/dataset/engine/validators.py +5 -11
- mindspore/dataset/vision/__init__.py +5 -5
- mindspore/dataset/vision/c_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +1 -1
- mindspore/dataset/vision/transforms.py +46 -591
- mindspore/dataset/vision/utils.py +1 -121
- mindspore/dataset/vision/validators.py +3 -9
- mindspore/hal/__init__.py +1 -7
- mindspore/hal/device.py +1 -1
- mindspore/include/api/model.h +0 -3
- mindspore/include/dataset/vision.h +2 -54
- mindspore/include/mindapi/base/types.h +0 -1
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +0 -35
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +0 -72
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/{aclnn_all_finite.h → aclnn_add_custom.h} +11 -9
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +12 -184
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +15 -7
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +15 -7
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +31 -77
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +31 -77
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +5 -4
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/DeviceBin +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +286 -275
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -3
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_tiling.h +45 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +4 -8
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_tiling.h +4 -11
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/kernel/flash_attention_score_mix_hwsync.h +0 -18
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -6
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +75 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/kernel/matmul.h +5 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/matmul_impl.h +3 -18
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_common_tiling.h +5 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_info.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/tiling_data.h +3 -36
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/kernel/matmul_stridedslice_fusion.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/matmul_stridedslice_fusion_impl.h +4 -22
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +2 -16
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/kernel/paged_attention_mix_hwsync.h +3 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +4 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_tiling.h +4 -9
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +2 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +4 -10
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +12 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +1 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +2 -10
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +1 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -17
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +7 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layernorm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_stridedslice_fusion_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libnot_equal_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblcal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/mindrecord/filewriter.py +2 -2
- mindspore/mint/__init__.py +40 -720
- mindspore/mint/nn/__init__.py +7 -89
- mindspore/mint/nn/functional.py +16 -165
- mindspore/mint/optim/adamw.py +16 -15
- mindspore/nn/__init__.py +2 -0
- mindspore/nn/cell.py +98 -97
- mindspore/nn/extend/basic.py +2 -2
- mindspore/nn/extend/embedding.py +1 -1
- mindspore/nn/extend/layer/normalization.py +5 -7
- mindspore/nn/generator.py +297 -0
- mindspore/nn/layer/activation.py +3 -4
- mindspore/nn/layer/basic.py +16 -79
- mindspore/nn/layer/conv.py +8 -17
- mindspore/nn/layer/embedding.py +4 -1
- mindspore/nn/layer/math.py +1 -1
- mindspore/nn/layer/normalization.py +1 -1
- mindspore/nn/layer/pooling.py +0 -5
- mindspore/nn/layer/rnn_cells.py +2 -2
- mindspore/nn/loss/loss.py +19 -19
- mindspore/nn/optim/adasum.py +1 -1
- mindspore/nn/optim/sgd.py +2 -3
- mindspore/nn/probability/distribution/exponential.py +1 -1
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/logistic.py +1 -1
- mindspore/nn/wrap/cell_wrapper.py +1 -25
- mindspore/nn/wrap/loss_scale.py +1 -24
- mindspore/numpy/array_ops.py +1 -5
- mindspore/numpy/dtypes.py +3 -3
- mindspore/numpy/math_ops.py +8 -8
- mindspore/ops/__init__.py +1 -1
- mindspore/ops/_grad_experimental/grad_comm_ops.py +16 -75
- mindspore/ops/_vmap/vmap_array_ops.py +0 -27
- mindspore/ops/_vmap/vmap_math_ops.py +1 -29
- mindspore/ops/_vmap/vmap_nn_ops.py +18 -19
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +8 -34
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +9 -2
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -26
- mindspore/ops/auto_generate/gen_extend_func.py +27 -603
- mindspore/ops/auto_generate/gen_ops_def.py +203 -993
- mindspore/ops/auto_generate/gen_ops_prim.py +402 -1946
- mindspore/ops/auto_generate/pyboost_inner_prim.py +20 -90
- mindspore/ops/composite/base.py +6 -3
- mindspore/ops/composite/math_ops.py +1 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +17 -24
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/extend/__init__.py +3 -2
- mindspore/ops/extend/array_func.py +51 -10
- mindspore/ops/extend/nn_func.py +78 -2
- mindspore/ops/function/__init__.py +13 -8
- mindspore/ops/function/array_func.py +179 -455
- mindspore/ops/function/clip_func.py +1 -1
- mindspore/ops/function/grad/grad_func.py +3 -3
- mindspore/ops/function/math_func.py +103 -117
- mindspore/ops/function/nn_func.py +163 -275
- mindspore/ops/function/other_func.py +2 -2
- mindspore/ops/function/random_func.py +69 -202
- mindspore/ops/function/sparse_func.py +4 -4
- mindspore/ops/functional.py +327 -332
- mindspore/ops/operations/__init__.py +3 -13
- mindspore/ops/operations/_grad_ops.py +27 -3
- mindspore/ops/operations/_inner_ops.py +356 -53
- mindspore/ops/operations/_rl_inner_ops.py +2 -2
- mindspore/ops/operations/_tensor_array.py +8 -8
- mindspore/ops/operations/array_ops.py +65 -82
- mindspore/ops/operations/comm_ops.py +93 -784
- mindspore/ops/operations/custom_ops.py +28 -51
- mindspore/ops/operations/debug_ops.py +4 -4
- mindspore/ops/operations/inner_ops.py +2 -2
- mindspore/ops/operations/manually_defined/ops_def.py +4 -304
- mindspore/ops/operations/math_ops.py +50 -3
- mindspore/ops/operations/nn_ops.py +247 -14
- mindspore/ops/operations/other_ops.py +3 -3
- mindspore/ops/operations/random_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +1 -1
- mindspore/ops/primitive.py +8 -9
- mindspore/ops/silent_check.py +5 -5
- mindspore/ops_generate/arg_dtype_cast.py +9 -2
- mindspore/ops_generate/arg_handler.py +0 -26
- mindspore/ops_generate/gen_aclnn_implement.py +4 -1
- mindspore/ops_generate/gen_ops.py +4 -26
- mindspore/ops_generate/gen_pyboost_func.py +12 -41
- mindspore/ops_generate/gen_utils.py +0 -21
- mindspore/ops_generate/pyboost_utils.py +2 -7
- mindspore/ops_generate/template.py +0 -1
- mindspore/parallel/_auto_parallel_context.py +1 -21
- mindspore/parallel/_tensor.py +5 -0
- mindspore/parallel/_transformer/transformer.py +1 -1
- mindspore/parallel/_utils.py +1 -15
- mindspore/parallel/algo_parameter_config.py +3 -1
- mindspore/parallel/checkpoint_transform.py +9 -12
- mindspore/parallel/cluster/process_entity/_api.py +29 -28
- mindspore/parallel/cluster/process_entity/_utils.py +3 -13
- mindspore/parallel/cluster/run.py +16 -13
- mindspore/parallel/parameter_broadcast.py +2 -2
- mindspore/parallel/shard.py +17 -31
- mindspore/profiler/__init__.py +2 -3
- mindspore/profiler/common/util.py +2 -107
- mindspore/profiler/envprofiling.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +21 -8
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -82
- mindspore/profiler/parser/ascend_analysis/function_event.py +28 -43
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +27 -49
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +10 -15
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +20 -25
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +5 -5
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +1 -10
- mindspore/profiler/parser/ascend_hccl_generator.py +1 -4
- mindspore/profiler/parser/ascend_msprof_exporter.py +22 -43
- mindspore/profiler/parser/ascend_timeline_generator.py +5 -7
- mindspore/profiler/parser/minddata_parser.py +3 -72
- mindspore/profiler/profiling.py +59 -176
- mindspore/rewrite/api/node.py +1 -1
- mindspore/rewrite/common/namespace.py +5 -5
- mindspore/rewrite/parsers/assign_parser.py +0 -2
- mindspore/rewrite/parsers/class_def_parser.py +4 -8
- mindspore/run_check/_check_version.py +1 -1
- mindspore/scipy/fft.py +3 -1
- mindspore/scipy/linalg.py +3 -2
- mindspore/scipy/ops.py +3 -5
- mindspore/scipy/optimize/__init__.py +2 -2
- mindspore/train/__init__.py +4 -4
- mindspore/train/anf_ir_pb2.py +2 -8
- mindspore/train/callback/__init__.py +2 -5
- mindspore/train/callback/_backup_and_restore.py +2 -2
- mindspore/train/callback/_checkpoint.py +16 -104
- mindspore/train/callback/_landscape.py +1 -1
- mindspore/train/callback/_time_monitor.py +1 -1
- mindspore/train/data_sink.py +4 -5
- mindspore/train/dataset_helper.py +20 -45
- mindspore/train/model.py +38 -266
- mindspore/train/serialization.py +105 -256
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/version.py +1 -1
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +2 -2
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +303 -420
- mindspore/_extends/pijit/__init__.py +0 -23
- mindspore/_extends/pijit/pijit_func_white_list.py +0 -343
- mindspore/common/file_system.py +0 -48
- mindspore/common/generator.py +0 -260
- mindspore/common/no_inline.py +0 -54
- mindspore/common/np_dtype.py +0 -25
- mindspore/communication/comm_func.py +0 -1140
- mindspore/hal/memory.py +0 -326
- mindspore/lib/libavcodec.so.59 +0 -0
- mindspore/lib/libavdevice.so.59 +0 -0
- mindspore/lib/libavfilter.so.8 +0 -0
- mindspore/lib/libavformat.so.59 +0 -0
- mindspore/lib/libavutil.so.57 +0 -0
- mindspore/lib/libmindspore_np_dtype.so +0 -0
- mindspore/lib/libswresample.so.4 +0 -0
- mindspore/lib/libswscale.so.6 +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.cpp +0 -326
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.py +0 -180
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/all_finite.json +0 -109
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/binary_info_config.json +0 -38
- mindspore/lib/plugin/ascend/custom_compiler/OWNERS +0 -12
- mindspore/lib/plugin/ascend/custom_compiler/setup.py +0 -255
- mindspore/lib/plugin/ascend/custom_compiler/start.sh +0 -26
- mindspore/lib/plugin/ascend/custom_compiler/template.json +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/base_type.h +0 -133
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -32
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/tiling_info.h +0 -60
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -37
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_op.h +0 -34
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_backoff_base.h +0 -62
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -33
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_ops.h +0 -88
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_pa_op.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -95
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -84
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -61
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -224
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_impl.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_tiling.h +0 -187
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul.h +0 -245
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_interface.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_utils.h +0 -111
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/tiling_data.h +0 -54
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -364
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -114
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -98
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/mint/linalg/__init__.py +0 -22
- mindspore/nn/layer/embedding_service.py +0 -531
- mindspore/nn/layer/embedding_service_layer.py +0 -393
- mindspore/ops/function/reshard_func.py +0 -102
- mindspore/ops/operations/_infer_ops.py +0 -19
- mindspore/ops/operations/reshard_ops.py +0 -53
- mindspore/profiler/common/process_pool.py +0 -41
- mindspore/profiler/common/singleton.py +0 -28
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/train/callback/_cluster_monitor.py +0 -201
- mindspore/train/callback/_flops_collector.py +0 -238
- mindspore/train/callback/_mindio_ttp.py +0 -443
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
mindspore/nn/cell.py
CHANGED
|
@@ -34,7 +34,6 @@ from mindspore import _checkparam as Validator
|
|
|
34
34
|
from mindspore.common import dtype as mstype
|
|
35
35
|
from mindspore.common.api import _cell_graph_executor, _pynative_executor, _get_args_for_run, cells_compile_cache
|
|
36
36
|
from mindspore.common.api import _generate_branch_control_input, _convert_python_data, _get_args_for_run_predict
|
|
37
|
-
from mindspore.common.api import _process_dyn_args, _generate_dyn_compile_args
|
|
38
37
|
from mindspore.common.parameter import Parameter, ParameterTuple
|
|
39
38
|
from mindspore.common.tensor import Tensor
|
|
40
39
|
from mindspore.ops.operations import Cast
|
|
@@ -81,7 +80,7 @@ class Cell(Cell_):
|
|
|
81
80
|
|
|
82
81
|
Examples:
|
|
83
82
|
>>> import mindspore.nn as nn
|
|
84
|
-
>>>
|
|
83
|
+
>>> import mindspore.ops as ops
|
|
85
84
|
>>> class MyCell(nn.Cell):
|
|
86
85
|
... def __init__(self, forward_net):
|
|
87
86
|
... super(MyCell, self).__init__(auto_prefix=False)
|
|
@@ -105,12 +104,9 @@ class Cell(Cell_):
|
|
|
105
104
|
'_forward_pre_hook', '_forward_hook', '_enable_forward_pre_hook', '_enable_forward_hook',
|
|
106
105
|
'_bprop_debug', '_enable_backward_hook', '_cell_backward_hook', '_is_run', '_param_prefix',
|
|
107
106
|
'_attr_synced', 'pynative', 'requires_grad', 'cell_type']
|
|
108
|
-
total_instance_count = 0
|
|
109
107
|
|
|
110
108
|
def __init__(self, auto_prefix=True, flags=None):
|
|
111
109
|
Cell_.__init__(self, self._cell_tag)
|
|
112
|
-
Cell.total_instance_count += 1
|
|
113
|
-
self.instance_count = Cell.total_instance_count
|
|
114
110
|
self._params = OrderedDict()
|
|
115
111
|
self._cells = OrderedDict()
|
|
116
112
|
self._params_list = OrderedDict()
|
|
@@ -136,7 +132,6 @@ class Cell(Cell_):
|
|
|
136
132
|
self.exist_names = set("")
|
|
137
133
|
self.exist_objs = set()
|
|
138
134
|
self.recompute_cell = None
|
|
139
|
-
self.sig = inspect.signature(self.construct)
|
|
140
135
|
init_pipeline()
|
|
141
136
|
|
|
142
137
|
# call gc to release GE session resources used by non-used cell objects
|
|
@@ -401,9 +396,6 @@ class Cell(Cell_):
|
|
|
401
396
|
cells_compile_cache.pop(id(self), None)
|
|
402
397
|
if hasattr(self, "compile_cache") and self.compile_cache:
|
|
403
398
|
_cell_graph_executor.del_net_res(self, self.compile_cache)
|
|
404
|
-
if isinstance(self, GraphCell):
|
|
405
|
-
_cell_graph_executor.dec_graph_cell_count()
|
|
406
|
-
Cell.total_instance_count -= 1
|
|
407
399
|
|
|
408
400
|
def __delattr__(self, name):
|
|
409
401
|
if name in self._params:
|
|
@@ -574,9 +566,8 @@ class Cell(Cell_):
|
|
|
574
566
|
def shard(self, in_strategy, out_strategy=None, parameter_plan=None, device="Ascend", level=0):
|
|
575
567
|
"""
|
|
576
568
|
Defining the input and output layouts of this cell and the parallel strategies of remaining ops will be
|
|
577
|
-
generated by sharding propagation. In PyNative mode, use this method
|
|
578
|
-
|
|
579
|
-
strategy for others will be set by sharding propagation.
|
|
569
|
+
generated by sharding propagation. In PyNative mode, use this method
|
|
570
|
+
to specify a Cell for distributed execution in graph mode.
|
|
580
571
|
in_strategy and out_strategy define the input and output layout respectively.
|
|
581
572
|
in_strategy/out_strategy should be a tuple, each element of which corresponds to the desired layout of
|
|
582
573
|
this input/output, and None represents data_parallel,
|
|
@@ -584,8 +575,8 @@ class Cell(Cell_):
|
|
|
584
575
|
The parallel strategies of remaining operators are derived from the strategy specified by the input and output.
|
|
585
576
|
|
|
586
577
|
Note:
|
|
587
|
-
|
|
588
|
-
|
|
578
|
+
Only effective in PYNATIVE_MODE and in either ParallelMode.AUTO_PARALLEL with
|
|
579
|
+
search_mode in auto_parallel_context set as sharding_propagation.
|
|
589
580
|
If the input contain Parameter, its strategy should be set in `in_strategy`.
|
|
590
581
|
|
|
591
582
|
Args:
|
|
@@ -607,7 +598,7 @@ class Cell(Cell_):
|
|
|
607
598
|
use right now. Support [ ``"0"`` , ``"1"`` , ``"2"`` ]. Default: ``0`` .
|
|
608
599
|
|
|
609
600
|
Returns:
|
|
610
|
-
|
|
601
|
+
Cell, the cell itself.
|
|
611
602
|
|
|
612
603
|
Examples:
|
|
613
604
|
>>> import mindspore.nn as nn
|
|
@@ -625,21 +616,22 @@ class Cell(Cell_):
|
|
|
625
616
|
... def __init__(self):
|
|
626
617
|
... self.block1 = Block()
|
|
627
618
|
... self.block2 = Block()
|
|
628
|
-
... self.
|
|
629
|
-
...
|
|
619
|
+
... self.block2.shard(in_strategy=((2, 1),), out_strategy=(None,),
|
|
620
|
+
... parameter_plan={'self.block2.shard.dense1.weight': (4, 1)})
|
|
630
621
|
... def construct(self, x):
|
|
631
622
|
... x = self.block1(x)
|
|
632
|
-
... x = self.
|
|
623
|
+
... x = self.block2(x)
|
|
633
624
|
... return x
|
|
634
625
|
"""
|
|
635
|
-
if context.
|
|
636
|
-
|
|
637
|
-
|
|
626
|
+
if context.get_context("mode") != context.PYNATIVE_MODE or \
|
|
627
|
+
context.get_auto_parallel_context("parallel_mode") not in ["auto_parallel"]:
|
|
628
|
+
raise AssertionError(f"Cell shard only supports auto parallel under PyNative mode. "
|
|
629
|
+
f"Please check if you call Cell.shard in the script.")
|
|
638
630
|
|
|
639
631
|
shard_fn = Shard()
|
|
640
632
|
fn = shard_fn(self, in_strategy, out_strategy, parameter_plan, device, level)
|
|
641
633
|
object.__setattr__(self, "_shard_fn", fn)
|
|
642
|
-
return
|
|
634
|
+
return self
|
|
643
635
|
|
|
644
636
|
def auto_cast_inputs(self, inputs):
|
|
645
637
|
"""
|
|
@@ -686,7 +678,7 @@ class Cell(Cell_):
|
|
|
686
678
|
# Run in Graph mode.
|
|
687
679
|
if os.getenv("MS_JIT") != '0' and context._get_mode() == context.GRAPH_MODE:
|
|
688
680
|
if kwargs:
|
|
689
|
-
bound_arguments = self.
|
|
681
|
+
bound_arguments = inspect.signature(self.construct).bind(*args, **kwargs)
|
|
690
682
|
bound_arguments.apply_defaults()
|
|
691
683
|
args = bound_arguments.args
|
|
692
684
|
kwargs = bound_arguments.kwargs
|
|
@@ -907,25 +899,14 @@ class Cell(Cell_):
|
|
|
907
899
|
"""
|
|
908
900
|
logger.warning("'set_parallel_input_with_inputs' function is deprecated.")
|
|
909
901
|
|
|
910
|
-
def set_inputs(self, *inputs
|
|
902
|
+
def set_inputs(self, *inputs):
|
|
911
903
|
"""
|
|
912
904
|
Save set inputs for computation graph. The number of inputs should be the same with that of the datasets. When
|
|
913
905
|
using Model for dynamic shape, please make sure that all networks and loss functions passed to the Model are
|
|
914
|
-
configured with set_inputs. The
|
|
915
|
-
|
|
916
|
-
.. note::
|
|
917
|
-
There are two mode:
|
|
918
|
-
|
|
919
|
-
- Full mode: arguments will be used as all compile inputs for graph-compiling.
|
|
920
|
-
- Incremental mode: arguments will set to some of the Cell inputs, which will be substituted into the input
|
|
921
|
-
at the corresponding position for graph-compiling.
|
|
922
|
-
|
|
923
|
-
Only one of inputs or kwargs can be set. Inputs for full mode and kwargs for incremental mode.
|
|
906
|
+
configured with set_inputs. The inputs can be Tensor of either dynamic or static shape.
|
|
924
907
|
|
|
925
908
|
Args:
|
|
926
|
-
inputs (tuple):
|
|
927
|
-
kwargs (dict): Incremental mode arguments. The acceptable key is the name of parameter defined
|
|
928
|
-
in `self.construct`.
|
|
909
|
+
inputs (tuple): Inputs of the Cell object.
|
|
929
910
|
|
|
930
911
|
.. warning::
|
|
931
912
|
This is an experimental API that is subject to change or deletion.
|
|
@@ -945,27 +926,16 @@ class Cell(Cell_):
|
|
|
945
926
|
>>> net = ReluNet()
|
|
946
927
|
>>> input_dyn = Tensor(shape=[3, None], dtype=ms.float32)
|
|
947
928
|
>>> net.set_inputs(input_dyn)
|
|
948
|
-
>>>
|
|
949
|
-
>>> output = net(
|
|
950
|
-
>>>
|
|
951
|
-
>>> net2 = ReluNet()
|
|
952
|
-
>>> net2.set_inputs(x=input_dyn)
|
|
953
|
-
>>> output = net2(input)
|
|
929
|
+
>>> input1 = Tensor(np.random.random([3, 10]), dtype=ms.float32)
|
|
930
|
+
>>> output = net(input1)
|
|
954
931
|
"""
|
|
955
932
|
if self.grad_ops_label:
|
|
956
933
|
logger.warning(f'For Cell, set_inputs must be set before the gradient function of the network is '
|
|
957
934
|
f'generated.')
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
if not kwargs:
|
|
963
|
-
self._dynamic_shape_inputs = inputs
|
|
964
|
-
self._check_construct_args(*inputs)
|
|
965
|
-
if context._get_mode() == context.PYNATIVE_MODE:
|
|
966
|
-
_pynative_executor.set_dynamic_input(self, *self._dynamic_shape_inputs)
|
|
967
|
-
else:
|
|
968
|
-
self._dynamic_shape_inputs = _process_dyn_args(self.construct, kwargs)
|
|
935
|
+
self._dynamic_shape_inputs = inputs
|
|
936
|
+
self._check_construct_args(*inputs)
|
|
937
|
+
if context._get_mode() == context.PYNATIVE_MODE:
|
|
938
|
+
_pynative_executor.set_dynamic_input(self, *self._dynamic_shape_inputs)
|
|
969
939
|
|
|
970
940
|
def get_inputs(self):
|
|
971
941
|
"""
|
|
@@ -1000,46 +970,18 @@ class Cell(Cell_):
|
|
|
1000
970
|
|
|
1001
971
|
return self._dynamic_shape_inputs
|
|
1002
972
|
|
|
1003
|
-
def _check_parameter_consistency(self, set_inputs, net_inputs):
|
|
1004
|
-
"""Check consistency for parameter."""
|
|
1005
|
-
for index, (set_input, net_input) in enumerate(zip(set_inputs, net_inputs)):
|
|
1006
|
-
if isinstance(set_input, Tensor):
|
|
1007
|
-
if not isinstance(net_input, Tensor):
|
|
1008
|
-
raise TypeError(
|
|
1009
|
-
f"For 'set_inputs' and tuple(list) in 'set_inputs',the type of {index + 1}th input must "
|
|
1010
|
-
f"be Tensor, but got {type(net_input)}.")
|
|
1011
|
-
if isinstance(set_input, Parameter) != isinstance(net_input, Parameter):
|
|
1012
|
-
raise TypeError(
|
|
1013
|
-
f"For 'set_inputs' and tuple(list) in 'set_inputs', the {index + 1}th input must be the same "
|
|
1014
|
-
f"as expected, but got expected: {type(set_input)} and input: {type(net_input)}.")
|
|
1015
|
-
elif isinstance(set_input, (tuple, list)):
|
|
1016
|
-
if not isinstance(net_input, (tuple, list)):
|
|
1017
|
-
raise TypeError(
|
|
1018
|
-
f"The {index + 1}th input type of 'set_inputs' or tuple(list) in "
|
|
1019
|
-
f"'set_inputs' must be tuple or list, but got {type(net_input)}.")
|
|
1020
|
-
self._check_parameter_consistency(set_input, net_input)
|
|
1021
|
-
|
|
1022
973
|
def _get_compile_args(self, args):
|
|
1023
974
|
"""Get compile arguments."""
|
|
1024
975
|
# this is used only for test
|
|
1025
|
-
|
|
1026
|
-
if is_auto_dynamic():
|
|
1027
|
-
if self._dynamic_shape_inputs is None:
|
|
1028
|
-
set_by_auto_dynamic = True
|
|
1029
|
-
else:
|
|
1030
|
-
if isinstance(self._dynamic_shape_inputs, (list, tuple)) and self._dynamic_shape_inputs[0] is None:
|
|
1031
|
-
set_by_auto_dynamic = True
|
|
1032
|
-
if set_by_auto_dynamic:
|
|
976
|
+
if is_auto_dynamic() and (self._dynamic_shape_inputs is None or self._dynamic_shape_inputs[0] is None):
|
|
1033
977
|
self._dynamic_shape_inputs = convert_inputs_to_dynamic(*args)
|
|
1034
978
|
|
|
1035
979
|
if self._dynamic_shape_inputs is not None:
|
|
1036
980
|
logger.debug("Compiled Graph with dynamic shape")
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
self.
|
|
1040
|
-
|
|
1041
|
-
self.saved_dynamic_shape = compile_args
|
|
1042
|
-
return compile_args
|
|
981
|
+
self._check_compile_dynamic_shape(self._dynamic_shape_inputs, args)
|
|
982
|
+
Validator.check_symbolic_shape(self._dynamic_shape_inputs, args)
|
|
983
|
+
self.saved_dynamic_shape = self._dynamic_shape_inputs
|
|
984
|
+
return self._dynamic_shape_inputs
|
|
1043
985
|
return args
|
|
1044
986
|
|
|
1045
987
|
def compile(self, *args, **kwargs):
|
|
@@ -1084,7 +1026,6 @@ class Cell(Cell_):
|
|
|
1084
1026
|
|
|
1085
1027
|
def exec_checkpoint_graph(self):
|
|
1086
1028
|
"""Executes GE saving checkpoint graph operation."""
|
|
1087
|
-
logger.warning("'exec_checkpoint_graph' function is deprecated.")
|
|
1088
1029
|
self.add_flags(ge_sync_data=True)
|
|
1089
1030
|
_cell_graph_executor(self, phase='save')
|
|
1090
1031
|
|
|
@@ -2000,11 +1941,11 @@ class Cell(Cell_):
|
|
|
2000
1941
|
Note:
|
|
2001
1942
|
- The `register_forward_pre_hook(hook_fn)` does not work in graph mode or functions decorated with 'jit'.
|
|
2002
1943
|
- 'hook_fn' must be defined as the following code.
|
|
2003
|
-
`
|
|
1944
|
+
`cell_id` is the information of registered Cell object, including name and ID. `inputs` is the forward
|
|
2004
1945
|
input objects passed to the Cell. The 'hook_fn' can modify the forward input objects by returning new
|
|
2005
1946
|
forward input objects.
|
|
2006
1947
|
- It should have the following signature:
|
|
2007
|
-
hook_fn(
|
|
1948
|
+
hook_fn(cell_id, inputs) -> new input objects or none.
|
|
2008
1949
|
- In order to prevent running failed when switching to graph mode, it is not recommended to write it in the
|
|
2009
1950
|
`construct` function of Cell object. In the pynative mode, if the `register_forward_pre_hook` function is
|
|
2010
1951
|
called in the `construct` function of the Cell object, a hook function will be added at each run time of
|
|
@@ -2028,7 +1969,7 @@ class Cell(Cell_):
|
|
|
2028
1969
|
>>> import mindspore as ms
|
|
2029
1970
|
>>> from mindspore import Tensor, nn, ops
|
|
2030
1971
|
>>> ms.set_context(mode=ms.PYNATIVE_MODE)
|
|
2031
|
-
>>> def forward_pre_hook_fn(
|
|
1972
|
+
>>> def forward_pre_hook_fn(cell_id, inputs):
|
|
2032
1973
|
... print("forward inputs: ", inputs)
|
|
2033
1974
|
...
|
|
2034
1975
|
>>> class Net(nn.Cell):
|
|
@@ -2074,8 +2015,9 @@ class Cell(Cell_):
|
|
|
2074
2015
|
Supported Platforms:
|
|
2075
2016
|
``Ascend`` ``GPU`` ``CPU``
|
|
2076
2017
|
"""
|
|
2018
|
+
cell_id = self.cls_name + "(" + str(id(self)) + ")"
|
|
2077
2019
|
for fn in self._forward_pre_hook.values():
|
|
2078
|
-
ret = fn(
|
|
2020
|
+
ret = fn(cell_id, inputs)
|
|
2079
2021
|
if ret is not None:
|
|
2080
2022
|
if not isinstance(ret, tuple):
|
|
2081
2023
|
inputs = (ret,)
|
|
@@ -2090,11 +2032,11 @@ class Cell(Cell_):
|
|
|
2090
2032
|
Note:
|
|
2091
2033
|
- The `register_forward_hook(hook_fn)` does not work in graph mode or functions decorated with 'jit'.
|
|
2092
2034
|
- 'hook_fn' must be defined as the following code.
|
|
2093
|
-
`
|
|
2035
|
+
`cell_id` is the information of registered Cell object, including name and ID. `inputs` is the forward
|
|
2094
2036
|
input objects passed to the Cell. `output` is the forward output object of the Cell. The 'hook_fn' can
|
|
2095
2037
|
modify the forward output object by returning new forward output object.
|
|
2096
2038
|
- It should have the following signature:
|
|
2097
|
-
hook_fn(
|
|
2039
|
+
hook_fn(cell_id, inputs, output) -> new output object or none.
|
|
2098
2040
|
- In order to prevent running failed when switching to graph mode, it is not recommended to write it in the
|
|
2099
2041
|
`construct` function of Cell object. In the pynative mode, if the `register_forward_hook` function is
|
|
2100
2042
|
called in the `construct` function of the Cell object, a hook function will be added at each run time of
|
|
@@ -2118,7 +2060,7 @@ class Cell(Cell_):
|
|
|
2118
2060
|
>>> import mindspore as ms
|
|
2119
2061
|
>>> from mindspore import Tensor, nn, ops
|
|
2120
2062
|
>>> ms.set_context(mode=ms.PYNATIVE_MODE)
|
|
2121
|
-
>>> def forward_hook_fn(
|
|
2063
|
+
>>> def forward_hook_fn(cell_id, inputs, output):
|
|
2122
2064
|
... print("forward inputs: ", inputs)
|
|
2123
2065
|
... print("forward output: ", output)
|
|
2124
2066
|
...
|
|
@@ -2167,8 +2109,9 @@ class Cell(Cell_):
|
|
|
2167
2109
|
Supported Platforms:
|
|
2168
2110
|
``Ascend`` ``GPU`` ``CPU``
|
|
2169
2111
|
"""
|
|
2112
|
+
cell_id = self.cls_name + "(" + str(id(self)) + ")"
|
|
2170
2113
|
for fn in self._forward_hook.values():
|
|
2171
|
-
ret = fn(
|
|
2114
|
+
ret = fn(cell_id, inputs, output)
|
|
2172
2115
|
if ret is not None:
|
|
2173
2116
|
output = ret
|
|
2174
2117
|
return output
|
|
@@ -2483,6 +2426,65 @@ class Cell(Cell_):
|
|
|
2483
2426
|
for op in all_ops:
|
|
2484
2427
|
op.place(role, rank_id)
|
|
2485
2428
|
|
|
2429
|
+
def _check_dynamic_tensor(self, set_input, net_input, index):
|
|
2430
|
+
"""
|
|
2431
|
+
Check if tensor is correctly set for dynamic shape.
|
|
2432
|
+
|
|
2433
|
+
Args:
|
|
2434
|
+
set_input (Tensor): Tensor set for dynamic shape.
|
|
2435
|
+
net_input (Tensor): Input tensor of the Cell object.
|
|
2436
|
+
index (int): Tensor index for set inputs.
|
|
2437
|
+
"""
|
|
2438
|
+
if not isinstance(net_input, Tensor):
|
|
2439
|
+
raise TypeError(
|
|
2440
|
+
f"For 'set_inputs' and tuple(list) in 'set_inputs',the type of {index + 1}th input must be Tensor, "
|
|
2441
|
+
f"but got {type(net_input)}.")
|
|
2442
|
+
is_param_set_input = isinstance(set_input, Parameter)
|
|
2443
|
+
is_param_net_input = isinstance(net_input, Parameter)
|
|
2444
|
+
if (is_param_set_input and not is_param_net_input) or (is_param_net_input and not is_param_set_input):
|
|
2445
|
+
raise TypeError(
|
|
2446
|
+
f"For 'set_inputs' and tuple(list) in 'set_inputs', the {index + 1}th input must be the same "
|
|
2447
|
+
f"as network's input, but got 'set_inputs': {type(set_input)} and network's input: {type(net_input)}.")
|
|
2448
|
+
if set_input.dtype != net_input.dtype:
|
|
2449
|
+
raise TypeError(
|
|
2450
|
+
f"For 'set_inputs' and tuple(list) in 'set_inputs',the dtype of {index + 1}th input must be the same "
|
|
2451
|
+
f"as network's input, but got 'set_inputs': {set_input.dtype} and network's input: {net_input.dtype}.")
|
|
2452
|
+
if -2 not in set_input.shape:
|
|
2453
|
+
if net_input.dim() != 0 and set_input.dim() != net_input.dim():
|
|
2454
|
+
raise ValueError(
|
|
2455
|
+
f"For 'set_inputs' and tuple(list) in 'set_inputs',the dims of {index + 1}th input must be the "
|
|
2456
|
+
f"same as network's input, but got 'set_inputs': {set_input.dim()} and network's input: "
|
|
2457
|
+
f"{net_input.dim()}.")
|
|
2458
|
+
if not all([ele1 in (-1, ele2) for ele1, ele2 in zip(set_input.shape, net_input.shape)]):
|
|
2459
|
+
raise ValueError(
|
|
2460
|
+
f"For 'set_inputs' and tuple(list) in 'set_inputs',the shape of {index + 1}th input must be the "
|
|
2461
|
+
f"same as network's input, but got 'set_inputs': {set_input.shape} and network's input: "
|
|
2462
|
+
f"{net_input.shape}.")
|
|
2463
|
+
|
|
2464
|
+
def _check_compile_dynamic_shape(self, set_inputs, net_inputs):
|
|
2465
|
+
"""
|
|
2466
|
+
Check if graph has been compiled with dynamic shape.
|
|
2467
|
+
|
|
2468
|
+
Args:
|
|
2469
|
+
net_inputs (tuple): Inputs of the Cell object.
|
|
2470
|
+
"""
|
|
2471
|
+
if not getattr(set_inputs, '__ms_dynamic_len__', False):
|
|
2472
|
+
set_inputs_len = len(set_inputs)
|
|
2473
|
+
net_inputs_len = len(net_inputs)
|
|
2474
|
+
if set_inputs_len != net_inputs_len:
|
|
2475
|
+
raise ValueError(f"The length of 'set_inputs' or tuple(list) in 'set_inputs' "
|
|
2476
|
+
f"must be equal to network's inputs, but got 'set_inputs': "
|
|
2477
|
+
f"{set_inputs_len} and network's input: {net_inputs_len}.")
|
|
2478
|
+
for index, (set_input, net_input) in enumerate(zip(set_inputs, net_inputs)):
|
|
2479
|
+
if isinstance(set_input, Tensor):
|
|
2480
|
+
self._check_dynamic_tensor(set_input, net_input, index)
|
|
2481
|
+
elif isinstance(set_input, (tuple, list)):
|
|
2482
|
+
if not isinstance(net_input, (tuple, list)):
|
|
2483
|
+
raise TypeError(
|
|
2484
|
+
f"The {index + 1}th input type of 'set_inputs' or tuple(list) in "
|
|
2485
|
+
f"'set_inputs' must be tuple or list, but got {type(net_input)}.")
|
|
2486
|
+
self._check_compile_dynamic_shape(set_input, net_input)
|
|
2487
|
+
|
|
2486
2488
|
def _mixed_precision_cast(self, inputs):
|
|
2487
2489
|
mixed_type = self.get_mixed_precision_type()
|
|
2488
2490
|
if mixed_type == MixedPrecisionType.NOTSET:
|
|
@@ -2579,7 +2581,6 @@ class GraphCell(Cell):
|
|
|
2579
2581
|
params_dict = update_func_graph_hyper_params(self.graph, params_init)
|
|
2580
2582
|
for name, param in params_dict.items():
|
|
2581
2583
|
self._params[name] = param
|
|
2582
|
-
_cell_graph_executor.inc_graph_cell_count()
|
|
2583
2584
|
|
|
2584
2585
|
def construct(self, *inputs):
|
|
2585
2586
|
return self.graph(*inputs)
|
mindspore/nn/extend/basic.py
CHANGED
|
@@ -76,10 +76,10 @@ class Linear(Cell):
|
|
|
76
76
|
Examples:
|
|
77
77
|
>>> import mindspore
|
|
78
78
|
>>> from mindspore import Tensor
|
|
79
|
-
>>> from mindspore import
|
|
79
|
+
>>> from mindspore.nn.extend import Linear
|
|
80
80
|
>>> import numpy as np
|
|
81
81
|
>>> x = Tensor(np.array([[180, 234, 154], [244, 48, 247]]), mindspore.float32)
|
|
82
|
-
>>> net =
|
|
82
|
+
>>> net = Linear(3, 4)
|
|
83
83
|
>>> output = net(x)
|
|
84
84
|
>>> print(output.shape)
|
|
85
85
|
(2, 4)
|
mindspore/nn/extend/embedding.py
CHANGED
|
@@ -57,7 +57,7 @@ class Embedding(Cell):
|
|
|
57
57
|
mindspore.int32 or mindspore.int64, and the value should be in range `[0, num_embeddings)`.
|
|
58
58
|
|
|
59
59
|
Outputs:
|
|
60
|
-
Tensor, has the same data type as weight, the shape is :math:`(*input.shape,
|
|
60
|
+
Tensor, has the same data type as weight, the shape is :math:`(*input.shape, embedding_dim)`.
|
|
61
61
|
|
|
62
62
|
Raises:
|
|
63
63
|
TypeError: If `num_embeddings` is not an int.
|
|
@@ -48,8 +48,7 @@ class LayerNorm(Cell):
|
|
|
48
48
|
beta_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the :math:`\beta` weight.
|
|
49
49
|
The values of str refer to the function `initializer` including ``'zeros'`` , ``'ones'`` ,
|
|
50
50
|
``'xavier_uniform'`` , ``'he_uniform'`` , etc. Default: ``'zeros'`` .
|
|
51
|
-
|
|
52
|
-
elementwise_affine (bool): A bool value, When set to True, gamma and beta can be learned. Default: True.
|
|
51
|
+
epsilon (float): A value added to the denominator for numerical stability(:math:`\epsilon`). Default: ``1e-5`` .
|
|
53
52
|
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
|
54
53
|
|
|
55
54
|
Inputs:
|
|
@@ -79,8 +78,7 @@ class LayerNorm(Cell):
|
|
|
79
78
|
normalized_shape,
|
|
80
79
|
gamma_init='ones',
|
|
81
80
|
beta_init='zeros',
|
|
82
|
-
|
|
83
|
-
elementwise_affine=True,
|
|
81
|
+
epsilon=1e-5,
|
|
84
82
|
dtype=mstype.float32
|
|
85
83
|
):
|
|
86
84
|
"""Initialize LayerNorm."""
|
|
@@ -94,11 +92,11 @@ class LayerNorm(Cell):
|
|
|
94
92
|
f"least one element, but got normalized_shape = {normalized_shape}"
|
|
95
93
|
)
|
|
96
94
|
self.normalized_shape = normalized_shape
|
|
97
|
-
self.epsilon =
|
|
95
|
+
self.epsilon = epsilon
|
|
98
96
|
self.gamma = Parameter(initializer(
|
|
99
|
-
gamma_init, normalized_shape, dtype=dtype), name="gamma"
|
|
97
|
+
gamma_init, normalized_shape, dtype=dtype), name="gamma")
|
|
100
98
|
self.beta = Parameter(initializer(
|
|
101
|
-
beta_init, normalized_shape, dtype=dtype), name="beta"
|
|
99
|
+
beta_init, normalized_shape, dtype=dtype), name="beta")
|
|
102
100
|
|
|
103
101
|
def construct(self, input_x):
|
|
104
102
|
y = F.layer_norm(input_x, self.normalized_shape, self.gamma.astype(input_x.dtype),
|