mindspore 2.3.0__cp39-none-any.whl → 2.3.0rc2__cp39-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +0 -1512
- mindspore/__init__.py +1 -2
- mindspore/_c_dataengine.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +25 -5
- mindspore/_extends/graph_kernel/model/graph_parallel.py +1 -1
- mindspore/_extends/parse/__init__.py +2 -2
- mindspore/_extends/parse/compile_config.py +0 -29
- mindspore/_extends/parse/namespace.py +2 -2
- mindspore/_extends/parse/parser.py +5 -21
- mindspore/_extends/parse/resources.py +7 -5
- mindspore/_extends/parse/standard_method.py +59 -40
- mindspore/_mindspore_offline_debug.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/amp.py +5 -26
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/base.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +1 -1
- mindspore/boost/grad_freeze.py +2 -2
- mindspore/boost/less_batch_normalization.py +6 -9
- mindspore/common/__init__.py +1 -8
- mindspore/common/_register_for_tensor.py +9 -8
- mindspore/common/api.py +65 -275
- mindspore/common/dtype.py +4 -8
- mindspore/common/dump.py +5 -2
- mindspore/common/jit_config.py +1 -1
- mindspore/common/lazy_inline.py +2 -14
- mindspore/common/parameter.py +15 -14
- mindspore/common/recompute.py +5 -20
- mindspore/common/sparse_tensor.py +6 -21
- mindspore/common/tensor.py +52 -100
- mindspore/communication/__init__.py +11 -6
- mindspore/communication/management.py +94 -92
- mindspore/context.py +18 -180
- mindspore/dataset/engine/datasets.py +46 -69
- mindspore/dataset/engine/datasets_user_defined.py +53 -72
- mindspore/dataset/engine/datasets_vision.py +2 -2
- mindspore/dataset/engine/queue.py +38 -56
- mindspore/dataset/engine/validators.py +5 -11
- mindspore/dataset/vision/__init__.py +5 -5
- mindspore/dataset/vision/c_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +1 -1
- mindspore/dataset/vision/transforms.py +46 -591
- mindspore/dataset/vision/utils.py +1 -121
- mindspore/dataset/vision/validators.py +3 -9
- mindspore/hal/__init__.py +1 -7
- mindspore/hal/device.py +1 -1
- mindspore/include/api/model.h +0 -3
- mindspore/include/dataset/vision.h +2 -54
- mindspore/include/mindapi/base/types.h +0 -1
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +0 -35
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +0 -72
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/{aclnn_all_finite.h → aclnn_add_custom.h} +11 -9
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +12 -184
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +15 -7
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +15 -7
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +31 -77
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +31 -77
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +5 -4
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/DeviceBin +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +286 -275
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -3
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_tiling.h +45 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +4 -8
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_tiling.h +4 -11
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/kernel/flash_attention_score_mix_hwsync.h +0 -18
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -6
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +75 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/kernel/matmul.h +5 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/matmul_impl.h +3 -18
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_common_tiling.h +5 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_info.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/tiling_data.h +3 -36
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/kernel/matmul_stridedslice_fusion.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/matmul_stridedslice_fusion_impl.h +4 -22
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +2 -16
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/kernel/paged_attention_mix_hwsync.h +3 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +4 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_tiling.h +4 -9
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +2 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +4 -10
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +12 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +1 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +2 -10
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +1 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -17
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +7 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layernorm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_stridedslice_fusion_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libnot_equal_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblcal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/mindrecord/filewriter.py +2 -2
- mindspore/mint/__init__.py +40 -720
- mindspore/mint/nn/__init__.py +7 -89
- mindspore/mint/nn/functional.py +16 -165
- mindspore/mint/optim/adamw.py +16 -15
- mindspore/nn/__init__.py +2 -0
- mindspore/nn/cell.py +98 -97
- mindspore/nn/extend/basic.py +2 -2
- mindspore/nn/extend/embedding.py +1 -1
- mindspore/nn/extend/layer/normalization.py +5 -7
- mindspore/nn/generator.py +297 -0
- mindspore/nn/layer/activation.py +3 -4
- mindspore/nn/layer/basic.py +16 -79
- mindspore/nn/layer/conv.py +8 -17
- mindspore/nn/layer/embedding.py +4 -1
- mindspore/nn/layer/math.py +1 -1
- mindspore/nn/layer/normalization.py +1 -1
- mindspore/nn/layer/pooling.py +0 -5
- mindspore/nn/layer/rnn_cells.py +2 -2
- mindspore/nn/loss/loss.py +19 -19
- mindspore/nn/optim/adasum.py +1 -1
- mindspore/nn/optim/sgd.py +2 -3
- mindspore/nn/probability/distribution/exponential.py +1 -1
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/logistic.py +1 -1
- mindspore/nn/wrap/cell_wrapper.py +1 -25
- mindspore/nn/wrap/loss_scale.py +1 -24
- mindspore/numpy/array_ops.py +1 -5
- mindspore/numpy/dtypes.py +3 -3
- mindspore/numpy/math_ops.py +8 -8
- mindspore/ops/__init__.py +1 -1
- mindspore/ops/_grad_experimental/grad_comm_ops.py +16 -75
- mindspore/ops/_vmap/vmap_array_ops.py +0 -27
- mindspore/ops/_vmap/vmap_math_ops.py +1 -29
- mindspore/ops/_vmap/vmap_nn_ops.py +18 -19
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +8 -34
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +9 -2
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -26
- mindspore/ops/auto_generate/gen_extend_func.py +27 -603
- mindspore/ops/auto_generate/gen_ops_def.py +203 -993
- mindspore/ops/auto_generate/gen_ops_prim.py +402 -1946
- mindspore/ops/auto_generate/pyboost_inner_prim.py +20 -90
- mindspore/ops/composite/base.py +6 -3
- mindspore/ops/composite/math_ops.py +1 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +17 -24
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/extend/__init__.py +3 -2
- mindspore/ops/extend/array_func.py +51 -10
- mindspore/ops/extend/nn_func.py +78 -2
- mindspore/ops/function/__init__.py +13 -8
- mindspore/ops/function/array_func.py +179 -455
- mindspore/ops/function/clip_func.py +1 -1
- mindspore/ops/function/grad/grad_func.py +3 -3
- mindspore/ops/function/math_func.py +103 -117
- mindspore/ops/function/nn_func.py +163 -275
- mindspore/ops/function/other_func.py +2 -2
- mindspore/ops/function/random_func.py +69 -202
- mindspore/ops/function/sparse_func.py +4 -4
- mindspore/ops/functional.py +327 -332
- mindspore/ops/operations/__init__.py +3 -13
- mindspore/ops/operations/_grad_ops.py +27 -3
- mindspore/ops/operations/_inner_ops.py +356 -53
- mindspore/ops/operations/_rl_inner_ops.py +2 -2
- mindspore/ops/operations/_tensor_array.py +8 -8
- mindspore/ops/operations/array_ops.py +65 -82
- mindspore/ops/operations/comm_ops.py +93 -784
- mindspore/ops/operations/custom_ops.py +28 -51
- mindspore/ops/operations/debug_ops.py +4 -4
- mindspore/ops/operations/inner_ops.py +2 -2
- mindspore/ops/operations/manually_defined/ops_def.py +4 -304
- mindspore/ops/operations/math_ops.py +50 -3
- mindspore/ops/operations/nn_ops.py +247 -14
- mindspore/ops/operations/other_ops.py +3 -3
- mindspore/ops/operations/random_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +1 -1
- mindspore/ops/primitive.py +8 -9
- mindspore/ops/silent_check.py +5 -5
- mindspore/ops_generate/arg_dtype_cast.py +9 -2
- mindspore/ops_generate/arg_handler.py +0 -26
- mindspore/ops_generate/gen_aclnn_implement.py +4 -1
- mindspore/ops_generate/gen_ops.py +4 -26
- mindspore/ops_generate/gen_pyboost_func.py +12 -41
- mindspore/ops_generate/gen_utils.py +0 -21
- mindspore/ops_generate/pyboost_utils.py +2 -7
- mindspore/ops_generate/template.py +0 -1
- mindspore/parallel/_auto_parallel_context.py +1 -21
- mindspore/parallel/_tensor.py +5 -0
- mindspore/parallel/_transformer/transformer.py +1 -1
- mindspore/parallel/_utils.py +1 -15
- mindspore/parallel/algo_parameter_config.py +3 -1
- mindspore/parallel/checkpoint_transform.py +9 -12
- mindspore/parallel/cluster/process_entity/_api.py +29 -28
- mindspore/parallel/cluster/process_entity/_utils.py +3 -13
- mindspore/parallel/cluster/run.py +16 -13
- mindspore/parallel/parameter_broadcast.py +2 -2
- mindspore/parallel/shard.py +17 -31
- mindspore/profiler/__init__.py +2 -3
- mindspore/profiler/common/util.py +2 -107
- mindspore/profiler/envprofiling.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +21 -8
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -82
- mindspore/profiler/parser/ascend_analysis/function_event.py +28 -43
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +27 -49
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +10 -15
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +20 -25
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +5 -5
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +1 -10
- mindspore/profiler/parser/ascend_hccl_generator.py +1 -4
- mindspore/profiler/parser/ascend_msprof_exporter.py +22 -43
- mindspore/profiler/parser/ascend_timeline_generator.py +5 -7
- mindspore/profiler/parser/minddata_parser.py +3 -72
- mindspore/profiler/profiling.py +59 -176
- mindspore/rewrite/api/node.py +1 -1
- mindspore/rewrite/common/namespace.py +5 -5
- mindspore/rewrite/parsers/assign_parser.py +0 -2
- mindspore/rewrite/parsers/class_def_parser.py +4 -8
- mindspore/run_check/_check_version.py +1 -1
- mindspore/scipy/fft.py +3 -1
- mindspore/scipy/linalg.py +3 -2
- mindspore/scipy/ops.py +3 -5
- mindspore/scipy/optimize/__init__.py +2 -2
- mindspore/train/__init__.py +4 -4
- mindspore/train/anf_ir_pb2.py +2 -8
- mindspore/train/callback/__init__.py +2 -5
- mindspore/train/callback/_backup_and_restore.py +2 -2
- mindspore/train/callback/_checkpoint.py +16 -104
- mindspore/train/callback/_landscape.py +1 -1
- mindspore/train/callback/_time_monitor.py +1 -1
- mindspore/train/data_sink.py +4 -5
- mindspore/train/dataset_helper.py +20 -45
- mindspore/train/model.py +38 -266
- mindspore/train/serialization.py +105 -256
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/version.py +1 -1
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +2 -2
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +303 -420
- mindspore/_extends/pijit/__init__.py +0 -23
- mindspore/_extends/pijit/pijit_func_white_list.py +0 -343
- mindspore/common/file_system.py +0 -48
- mindspore/common/generator.py +0 -260
- mindspore/common/no_inline.py +0 -54
- mindspore/common/np_dtype.py +0 -25
- mindspore/communication/comm_func.py +0 -1140
- mindspore/hal/memory.py +0 -326
- mindspore/lib/libavcodec.so.59 +0 -0
- mindspore/lib/libavdevice.so.59 +0 -0
- mindspore/lib/libavfilter.so.8 +0 -0
- mindspore/lib/libavformat.so.59 +0 -0
- mindspore/lib/libavutil.so.57 +0 -0
- mindspore/lib/libmindspore_np_dtype.so +0 -0
- mindspore/lib/libswresample.so.4 +0 -0
- mindspore/lib/libswscale.so.6 +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.cpp +0 -326
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.py +0 -180
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/all_finite.json +0 -109
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/binary_info_config.json +0 -38
- mindspore/lib/plugin/ascend/custom_compiler/OWNERS +0 -12
- mindspore/lib/plugin/ascend/custom_compiler/setup.py +0 -255
- mindspore/lib/plugin/ascend/custom_compiler/start.sh +0 -26
- mindspore/lib/plugin/ascend/custom_compiler/template.json +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/base_type.h +0 -133
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -32
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/tiling_info.h +0 -60
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -37
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_op.h +0 -34
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_backoff_base.h +0 -62
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -33
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_ops.h +0 -88
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_pa_op.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -95
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -84
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -61
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -224
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_impl.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_tiling.h +0 -187
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul.h +0 -245
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_interface.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_utils.h +0 -111
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/tiling_data.h +0 -54
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -364
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -114
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -98
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/mint/linalg/__init__.py +0 -22
- mindspore/nn/layer/embedding_service.py +0 -531
- mindspore/nn/layer/embedding_service_layer.py +0 -393
- mindspore/ops/function/reshard_func.py +0 -102
- mindspore/ops/operations/_infer_ops.py +0 -19
- mindspore/ops/operations/reshard_ops.py +0 -53
- mindspore/profiler/common/process_pool.py +0 -41
- mindspore/profiler/common/singleton.py +0 -28
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/train/callback/_cluster_monitor.py +0 -201
- mindspore/train/callback/_flops_collector.py +0 -238
- mindspore/train/callback/_mindio_ttp.py +0 -443
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
mindspore/common/parameter.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2020-
|
|
1
|
+
# Copyright 2020-2022 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -188,9 +188,6 @@ class Parameter(Tensor_):
|
|
|
188
188
|
parallel_optimizer (bool): It is used to filter the weight shard operation in `SEMI_AUTO_PARALLEL` or
|
|
189
189
|
`AUTO_PARALLEL` mode. It works only when enable parallel optimizer in
|
|
190
190
|
`mindspore.set_auto_parallel_context()`. Default: ``True`` .
|
|
191
|
-
storage_format (str): Only Ascend device target is supported. It is used to specify the format of the weight
|
|
192
|
-
loaded to the device. By default, the format is not changed. The optional values are ``"FRACTAL_NZ"`` ,
|
|
193
|
-
``"NC1HWC0"`` , ``"FRACTAL_Z"`` , etc. Default: ``""`` .
|
|
194
191
|
|
|
195
192
|
Examples:
|
|
196
193
|
>>> import numpy as np
|
|
@@ -226,7 +223,6 @@ class Parameter(Tensor_):
|
|
|
226
223
|
# it's better to make the Initializer a kind of tensor.
|
|
227
224
|
obj.init_mode = None
|
|
228
225
|
obj.is_default_input_init = init_data_flag
|
|
229
|
-
obj.from_ckpt = False
|
|
230
226
|
if obj.has_init:
|
|
231
227
|
obj.init_mode = default_input
|
|
232
228
|
else:
|
|
@@ -243,8 +239,7 @@ class Parameter(Tensor_):
|
|
|
243
239
|
return (
|
|
244
240
|
Parameter, (data, self.name, self.requires_grad, self.layerwise_parallel))
|
|
245
241
|
|
|
246
|
-
def __init__(self, default_input, name=None, requires_grad=True, layerwise_parallel=False, parallel_optimizer=True
|
|
247
|
-
storage_format=""):
|
|
242
|
+
def __init__(self, default_input, name=None, requires_grad=True, layerwise_parallel=False, parallel_optimizer=True):
|
|
248
243
|
self.param_info = ParamInfo()
|
|
249
244
|
self.init_in_server = False
|
|
250
245
|
self.name = name
|
|
@@ -267,7 +262,6 @@ class Parameter(Tensor_):
|
|
|
267
262
|
self.is_in_shard = False
|
|
268
263
|
self._pipeline_stage_list = []
|
|
269
264
|
self.slice_num = 1
|
|
270
|
-
self.from_ckpt = False
|
|
271
265
|
if -1 in self.shape:
|
|
272
266
|
raise ValueError(f"All shape elements of the Parameter must be positive. But got None.")
|
|
273
267
|
if isinstance(default_input, (Tensor_, Tensor)):
|
|
@@ -295,7 +289,6 @@ class Parameter(Tensor_):
|
|
|
295
289
|
raise TypeError(f"The type of the argument 'default_input' must be in ['Tensor', 'int', 'float',"
|
|
296
290
|
f" 'numpy.ndarray', 'list']. But got type {type(default_input)}.")
|
|
297
291
|
self.param_info.parameter_shape = self.shape
|
|
298
|
-
self.param_info.storage_format = storage_format
|
|
299
292
|
|
|
300
293
|
import mindspore.ops.operations.other_ops as other_ops
|
|
301
294
|
self.load = other_ops.Load()
|
|
@@ -338,6 +331,11 @@ class Parameter(Tensor_):
|
|
|
338
331
|
# in other place, so we can make a Tensor without copy data.
|
|
339
332
|
return (Tensor, data)
|
|
340
333
|
# make a copy of Tensor to init the parameter.
|
|
334
|
+
if data.dtype == mstype.bfloat16:
|
|
335
|
+
from mindspore.ops.operations import Cast
|
|
336
|
+
cpu_cast = Cast().set_device("CPU")
|
|
337
|
+
data = cpu_cast(data, mstype.float32)
|
|
338
|
+
return (Tensor, data.asnumpy(), mstype.bfloat16)
|
|
341
339
|
if data.dtype == mstype.qint4x2:
|
|
342
340
|
return (Tensor, data.asnumpy(), mstype.qint4x2)
|
|
343
341
|
return (Tensor, data.asnumpy())
|
|
@@ -750,7 +748,6 @@ class Parameter(Tensor_):
|
|
|
750
748
|
def requires_grad(self, value=True):
|
|
751
749
|
if not isinstance(value, bool):
|
|
752
750
|
raise TypeError("The argument `requires_grad` must be bool type")
|
|
753
|
-
Tensor_.wait_pipeline(self)
|
|
754
751
|
self.param_info.requires_grad = value
|
|
755
752
|
|
|
756
753
|
@property
|
|
@@ -792,10 +789,14 @@ class Parameter(Tensor_):
|
|
|
792
789
|
return new_param
|
|
793
790
|
|
|
794
791
|
@_LogActionOnce(logger=logger, key='add_pipeline_stage')
|
|
792
|
+
@deprecated("2.3", "add_pipeline_stage")
|
|
795
793
|
def add_pipeline_stage(self, stage):
|
|
796
794
|
"""
|
|
797
795
|
Add a pipeline stage to the parameter.
|
|
798
796
|
|
|
797
|
+
Note:
|
|
798
|
+
This interface is deprecated in 2.3, and will be deleted in the future.
|
|
799
|
+
|
|
799
800
|
Args:
|
|
800
801
|
stage(int): The pipeline stage to be added.
|
|
801
802
|
|
|
@@ -813,9 +814,9 @@ class Parameter(Tensor_):
|
|
|
813
814
|
f"Use .set_dtype(xxx) to change the dtype.")
|
|
814
815
|
|
|
815
816
|
@staticmethod
|
|
816
|
-
def _set_data_check_input_valid(current_shape, data_shape, current_tensor_is_init,
|
|
817
|
-
|
|
818
|
-
if
|
|
817
|
+
def _set_data_check_input_valid(current_shape, data_shape, current_tensor_is_init,
|
|
818
|
+
incoming_tensor_is_init, slice_shape=False, slice_num=1):
|
|
819
|
+
if incoming_tensor_is_init and not current_tensor_is_init:
|
|
819
820
|
raise TypeError("The original tensor data is initialized, but the argument 'data' is not initialized."
|
|
820
821
|
"Please initialize 'data' before call this method.")
|
|
821
822
|
if tuple(current_shape) != tuple(data_shape):
|
|
@@ -870,7 +871,7 @@ class Parameter(Tensor_):
|
|
|
870
871
|
incoming_tensor_is_init = isinstance(data, Tensor) and not data.has_init
|
|
871
872
|
current_tensor_is_init = isinstance(self, Tensor) and not self.has_init
|
|
872
873
|
Parameter._set_data_check_input_valid(self.shape, data.shape, current_tensor_is_init, incoming_tensor_is_init,
|
|
873
|
-
|
|
874
|
+
slice_shape, self.slice_num)
|
|
874
875
|
if self.dtype != data.dtype:
|
|
875
876
|
if mstype.implicit_conversion_seq.get(self.dtype) < mstype.implicit_conversion_seq.get(data.dtype):
|
|
876
877
|
self._raise_type_error(data.dtype)
|
mindspore/common/recompute.py
CHANGED
|
@@ -20,11 +20,10 @@ from mindspore import log as logger
|
|
|
20
20
|
from mindspore.nn.cell import Cell
|
|
21
21
|
from mindspore import context
|
|
22
22
|
from mindspore.common.tensor import Tensor
|
|
23
|
-
from mindspore import ops
|
|
24
23
|
from mindspore.ops.composite import GradOperation
|
|
25
24
|
from mindspore.common._register_for_recompute import recompute_registry
|
|
26
25
|
from mindspore.common.api import _pynative_executor
|
|
27
|
-
from mindspore.
|
|
26
|
+
from mindspore.nn.generator import get_rng_state, set_rng_state
|
|
28
27
|
|
|
29
28
|
|
|
30
29
|
class _WrapCell(Cell):
|
|
@@ -52,11 +51,10 @@ class _RecomputeCell(Cell):
|
|
|
52
51
|
|
|
53
52
|
def __init__(self, block):
|
|
54
53
|
"""Initialize Recompute cell."""
|
|
55
|
-
super(_RecomputeCell, self).__init__(
|
|
54
|
+
super(_RecomputeCell, self).__init__()
|
|
56
55
|
self.args = []
|
|
57
56
|
self.kwargs = []
|
|
58
57
|
self.wrap_cell = _WrapCell(block)
|
|
59
|
-
|
|
60
58
|
self.net = block
|
|
61
59
|
self.internal_params = []
|
|
62
60
|
self.save_rng_state = False
|
|
@@ -87,15 +85,13 @@ class _RecomputeCell(Cell):
|
|
|
87
85
|
self.kwargs.pop()
|
|
88
86
|
if kwargs:
|
|
89
87
|
input_args = list(input_args) + list(kwargs.values())
|
|
90
|
-
# To detach inputs to avoid erasing auto grad meta info of origin inputs.
|
|
91
|
-
input_args = _detach_input(input_args)
|
|
92
88
|
try:
|
|
93
89
|
pre_rng_state = get_rng_state()
|
|
94
|
-
set_rng_state(self.cpu_rng_state)
|
|
90
|
+
set_rng_state(*self.cpu_rng_state)
|
|
95
91
|
_pynative_executor.set_is_run_recompute(True)
|
|
96
92
|
grads = self.grad(self.net, self.internal_params)(*input_args, grad_input)
|
|
97
93
|
_pynative_executor.set_is_run_recompute(False)
|
|
98
|
-
set_rng_state(pre_rng_state)
|
|
94
|
+
set_rng_state(*pre_rng_state)
|
|
99
95
|
except Exception as err:
|
|
100
96
|
_pynative_executor.clear_res()
|
|
101
97
|
raise err
|
|
@@ -167,17 +163,6 @@ def _padding_input_grads(args, input_grads):
|
|
|
167
163
|
"but got {}".format(len(args), len(input_grads)))
|
|
168
164
|
|
|
169
165
|
|
|
170
|
-
def _detach_input(input_arg):
|
|
171
|
-
if isinstance(input_arg, Tensor):
|
|
172
|
-
return ops.stop_gradient(input_arg)
|
|
173
|
-
if isinstance(input_arg, (list, tuple)):
|
|
174
|
-
detach_inputs = []
|
|
175
|
-
for arg in input_arg:
|
|
176
|
-
detach_inputs.append(_detach_input(arg))
|
|
177
|
-
return detach_inputs if isinstance(input_arg, list) else tuple(detach_inputs)
|
|
178
|
-
return input_arg
|
|
179
|
-
|
|
180
|
-
|
|
181
166
|
def _check_validation(block):
|
|
182
167
|
if not isinstance(block, Cell):
|
|
183
168
|
raise TypeError("Recompute function now only support block which inherited from Cell!")
|
|
@@ -218,7 +203,7 @@ def recompute(block, *args, **kwargs):
|
|
|
218
203
|
Examples:
|
|
219
204
|
>>> import numpy as np
|
|
220
205
|
>>> import mindspore.nn as nn
|
|
221
|
-
>>>
|
|
206
|
+
>>> import mindspore.ops as ops
|
|
222
207
|
>>> from mindspore import Tensor, recompute
|
|
223
208
|
>>> class MyCell(nn.Cell):
|
|
224
209
|
... def __init__(self):
|
|
@@ -642,29 +642,14 @@ class CSRTensor(CSRTensor_):
|
|
|
642
642
|
values indicated by `values` and row and column positions indicated by `indptr`
|
|
643
643
|
and `indices`.
|
|
644
644
|
|
|
645
|
-
For example, if indptr is [0,
|
|
646
|
-
|
|
645
|
+
For example, if indptr is [0, 1, 2, 2], indices is [1, 2], values is [1., 2.], shape is
|
|
646
|
+
(3, 4), then the dense representation of the sparse tensor will be:
|
|
647
647
|
|
|
648
648
|
.. code-block::
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
[0., 0.,
|
|
652
|
-
|
|
653
|
-
The length of `indptr` should equal to `shape[0]+1`, where the elements should be equal or monotonically
|
|
654
|
-
increasing and the maximum value should be equal to the number of non-zero values in the tensor. The length
|
|
655
|
-
of `indices` and `values` should be equal to the number of non-zero values in the tensor. To be concrete, get
|
|
656
|
-
the query indices of none-zero elements in every line according to `indptr`. Then get the column positions of
|
|
657
|
-
none-zero elements in every line by looking up query indices in `indices`. Finally, get the actual values of
|
|
658
|
-
none-zero elements in every line by looking up query indices in `values`. In the former example, 'indptr' of
|
|
659
|
-
[0, 2, 5, 6] represents that the indices of 0th row of the tensor origins from [0, 2), the indices of
|
|
660
|
-
the 1st row of the tensor origins from [2, 5) and the 2nd row of the tensor origins from [5, 6). For example,
|
|
661
|
-
the column positions of the non-zero elements of the 0th row in the tensor are provided by the [0, 2) elements in
|
|
662
|
-
`indices` (i.e. [0, 3]) and the corresponding values are provided by the [0, 2) elements in `values`
|
|
663
|
-
(i.e. [1., 2.]). The column positions of the non-zero elements of the 1st row in the tensor are provided by the
|
|
664
|
-
[2, 5) elements in `indices` (i.e. [1, 2, 4]) and the corresponding values are provided by the [2, 5) elements in
|
|
665
|
-
`values` (i.e. [3., 4., 5.]). The column positions of the non-zero elements of the 2nd row in the tensor are
|
|
666
|
-
provided by the [5, 6) elements in `indices` (i.e. [2]) and the corresponding values are provided by the [5, 6)
|
|
667
|
-
elements in `values` (i.e. [6.]).
|
|
649
|
+
|
|
650
|
+
[[0., 1., 0., 0.],
|
|
651
|
+
[0., 0., 2., 0.],
|
|
652
|
+
[0., 0., 0., 0.]]
|
|
668
653
|
|
|
669
654
|
Common arithmetic operations include: addition (+), subtraction (-), multiplication (*),
|
|
670
655
|
and division (/). For details about operations supported by `CSRTensor`, see
|
mindspore/common/tensor.py
CHANGED
|
@@ -50,8 +50,7 @@ def _check_input_data_type(input_data):
|
|
|
50
50
|
valid_dtypes = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64,
|
|
51
51
|
np.float16, np.float32, np.float64, np.bool_, np.str_, np.complex64, np.complex128)
|
|
52
52
|
if isinstance(input_data, np.ndarray) and input_data.dtype not in valid_dtypes and \
|
|
53
|
-
input_data.dtype.kind != 'U' and input_data.dtype.kind != 'S'
|
|
54
|
-
input_data.dtype.kind != 'T': # Support dtype np.str_ and npy_bfloat16
|
|
53
|
+
input_data.dtype.kind != 'U' and input_data.dtype.kind != 'S': # Support dtype np.str_
|
|
55
54
|
new_line = '\n'
|
|
56
55
|
for index, x in np.ndenumerate(input_data):
|
|
57
56
|
if np.array(x).dtype not in valid_dtypes:
|
|
@@ -344,7 +343,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
344
343
|
return out
|
|
345
344
|
|
|
346
345
|
def __bool__(self):
|
|
347
|
-
|
|
346
|
+
if self.dtype == mstype.bfloat16:
|
|
347
|
+
data = self.float().asnumpy()
|
|
348
|
+
else:
|
|
349
|
+
data = self.asnumpy()
|
|
348
350
|
if data.shape == ():
|
|
349
351
|
return bool(data)
|
|
350
352
|
if data.shape == (1,):
|
|
@@ -360,15 +362,24 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
360
362
|
raise ValueError(message)
|
|
361
363
|
|
|
362
364
|
def __int__(self):
|
|
363
|
-
|
|
365
|
+
if self.dtype == mstype.bfloat16:
|
|
366
|
+
data = self.float().asnumpy()
|
|
367
|
+
else:
|
|
368
|
+
data = self.asnumpy()
|
|
364
369
|
return self._convert_scalar_(data, int, "Only one element tensors can be converted to Python scalars")
|
|
365
370
|
|
|
366
371
|
def __float__(self):
|
|
367
|
-
|
|
372
|
+
if self.dtype == mstype.bfloat16:
|
|
373
|
+
data = self.float().asnumpy()
|
|
374
|
+
else:
|
|
375
|
+
data = self.asnumpy()
|
|
368
376
|
return self._convert_scalar_(data, float, "Only one element tensors can be converted to Python scalars")
|
|
369
377
|
|
|
370
378
|
def __index__(self):
|
|
371
|
-
|
|
379
|
+
if self.dtype == mstype.bfloat16:
|
|
380
|
+
data = self.float().asnumpy()
|
|
381
|
+
else:
|
|
382
|
+
data = self.asnumpy()
|
|
372
383
|
if data.dtype not in ["int8", "int16", "int32", "int64", "bool"]:
|
|
373
384
|
raise ValueError("Only integer tensors of a single element can be converted to an index.")
|
|
374
385
|
return self._convert_scalar_(data, int,
|
|
@@ -505,6 +516,8 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
505
516
|
def __str__(self):
|
|
506
517
|
if self.dtype == mstype.type_none:
|
|
507
518
|
return "Unknown Tensor type!"
|
|
519
|
+
if self.dtype == mstype.bfloat16:
|
|
520
|
+
return str(self.float().asnumpy())
|
|
508
521
|
return str(self.asnumpy())
|
|
509
522
|
|
|
510
523
|
def __getstate__(self):
|
|
@@ -524,13 +537,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
524
537
|
"""
|
|
525
538
|
return self._shape
|
|
526
539
|
|
|
527
|
-
@shape.setter
|
|
528
|
-
def shape(self, shape_value):
|
|
529
|
-
r"""
|
|
530
|
-
Set the shape value.
|
|
531
|
-
"""
|
|
532
|
-
self._shape = shape_value
|
|
533
|
-
|
|
534
540
|
@property
|
|
535
541
|
def dtype(self):
|
|
536
542
|
"""Return the dtype of the tensor (:class:`mindspore.dtype`)."""
|
|
@@ -2841,7 +2847,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2841
2847
|
if slice_num_of_persistent_data > 1:
|
|
2842
2848
|
self.assign_value(Tensor_.persistent_data_from_numpy(data, slice_num_of_persistent_data))
|
|
2843
2849
|
else:
|
|
2844
|
-
self.
|
|
2850
|
+
if self.dtype == mstype.bfloat16:
|
|
2851
|
+
# The dtype of data is np.float32 when mstype is bfloat16,
|
|
2852
|
+
# so we create tensor_ by init func instead of asnumpy
|
|
2853
|
+
self.assign_value(Tensor_(data, self.dtype))
|
|
2854
|
+
else:
|
|
2855
|
+
self.assign_value(Tensor_.from_numpy(data))
|
|
2845
2856
|
return self
|
|
2846
2857
|
|
|
2847
2858
|
def resize(self, *new_shape):
|
|
@@ -3161,9 +3172,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3161
3172
|
location found is given. If 'right', return the last such index. If there is
|
|
3162
3173
|
no suitable index, return either 0 or N (where N is the length of the tensor).
|
|
3163
3174
|
Default: ``left`` .
|
|
3164
|
-
sorter (Union[int, list, tuple, Tensor]): optional tensor of
|
|
3165
|
-
integer indices that sort the tensor into ascending order
|
|
3166
|
-
|
|
3175
|
+
sorter (Union[int, float, bool, list, tuple, Tensor]): 1-D optional tensor of
|
|
3176
|
+
integer indices that sort the tensor into ascending order. They are typically
|
|
3177
|
+
the result of argsort. Default: ``None`` .
|
|
3167
3178
|
|
|
3168
3179
|
Returns:
|
|
3169
3180
|
Tensor, array of insertion points with the same shape as `v`.
|
|
@@ -3184,21 +3195,31 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3184
3195
|
if side not in ('left', 'right'):
|
|
3185
3196
|
raise ValueError(f"For 'Tensor.searchsorted', the argument 'side' should be one of in "
|
|
3186
3197
|
f"['left', 'right'], but got {side}.")
|
|
3198
|
+
a = self.astype(mstype.float32)
|
|
3187
3199
|
if not isinstance(v, Tensor):
|
|
3188
3200
|
v = tensor_operator_registry.get('make_tensor')(v)
|
|
3201
|
+
shape = v.shape
|
|
3189
3202
|
if sorter is not None:
|
|
3190
|
-
if not isinstance(sorter, (int, list, tuple, Tensor)):
|
|
3203
|
+
if not isinstance(sorter, (int, float, bool, list, tuple, Tensor)):
|
|
3191
3204
|
raise TypeError("For Tensor.searchsorted, the type of the argument 'sorter' must be one of 'int', "
|
|
3192
|
-
"'list', 'tuple', 'Tensor', but got {}.".format(type(sorter)))
|
|
3205
|
+
"'float', 'bool', 'list', 'tuple', 'Tensor', but got {}.".format(type(sorter)))
|
|
3193
3206
|
if not isinstance(sorter, Tensor):
|
|
3194
3207
|
sorter = tensor_operator_registry.get('make_tensor')(sorter)
|
|
3195
|
-
if sorter.size !=
|
|
3196
|
-
raise ValueError('
|
|
3197
|
-
|
|
3198
|
-
|
|
3199
|
-
|
|
3200
|
-
|
|
3201
|
-
|
|
3208
|
+
if sorter.ndim != 1 or sorter.size != a.size:
|
|
3209
|
+
raise ValueError('sorter must be 1-D array with the same size as the Tensor')
|
|
3210
|
+
sorter = sorter.reshape(sorter.shape + (1,))
|
|
3211
|
+
a = tensor_operator_registry.get('gather_nd')(a, sorter)
|
|
3212
|
+
less_op = tensor_operator_registry.get('__le__') if side == 'left' else tensor_operator_registry.get('__lt__')
|
|
3213
|
+
i = tensor_operator_registry.get('fill')(mstype.int32, shape, 0)
|
|
3214
|
+
j = tensor_operator_registry.get('fill')(mstype.int32, shape, a.size)
|
|
3215
|
+
|
|
3216
|
+
sort_range = tuple(range(math.ceil(math.log2(tensor_operator_registry.get('shape_mul')(a.shape) + 1))))
|
|
3217
|
+
for _ in sort_range:
|
|
3218
|
+
mid = (i - -j) // 2
|
|
3219
|
+
mask = less_op(v, tensor_operator_registry.get('gather_nd')(a, mid.reshape(mid.shape + (1,))))
|
|
3220
|
+
i = tensor_operator_registry.get('select')(mask, i, mid)
|
|
3221
|
+
j = tensor_operator_registry.get('select')(mask, mid, j)
|
|
3222
|
+
return j
|
|
3202
3223
|
|
|
3203
3224
|
def gather_nd(self, indices):
|
|
3204
3225
|
r"""
|
|
@@ -3215,35 +3236,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3215
3236
|
validator.check_is_int(batch_dims, "batch_dims")
|
|
3216
3237
|
return tensor_operator_registry.get('gather')(self, input_indices, axis, batch_dims)
|
|
3217
3238
|
|
|
3218
|
-
def uniform(self, from_=0., to=1., generator=None):
|
|
3219
|
-
r"""
|
|
3220
|
-
Generates random numbers in the half-open interval [from_, to).
|
|
3221
|
-
|
|
3222
|
-
Args:
|
|
3223
|
-
from_ (number): The lower bound of the interval.
|
|
3224
|
-
to (number): The upper bound of the interval.
|
|
3225
|
-
generator (Generator, optional): The random seed. Default: None.
|
|
3226
|
-
|
|
3227
|
-
Returns:
|
|
3228
|
-
Tensor, with the same shape as tensor.
|
|
3229
|
-
|
|
3230
|
-
Raises:
|
|
3231
|
-
TypeError: If `from_` is larger than `to`.
|
|
3232
|
-
|
|
3233
|
-
Supported Platforms:
|
|
3234
|
-
``Ascend``
|
|
3235
|
-
|
|
3236
|
-
Examples:
|
|
3237
|
-
>>> import mindspore
|
|
3238
|
-
>>> x = mindspore.ops.ones((4, 2))
|
|
3239
|
-
>>> generator = mindspore.Generator()
|
|
3240
|
-
>>> generator.manual_seed(100)
|
|
3241
|
-
>>> output = x.uniform(1., 2., generator)
|
|
3242
|
-
>>> print(output.shape)
|
|
3243
|
-
(4, 2)
|
|
3244
|
-
"""
|
|
3245
|
-
return tensor_operator_registry.get('uniform')(self, from_, to, generator)
|
|
3246
|
-
|
|
3247
3239
|
def var(self, axis=None, ddof=0, keepdims=False):
|
|
3248
3240
|
"""
|
|
3249
3241
|
Compute the variance along the specified axis.
|
|
@@ -3364,12 +3356,8 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3364
3356
|
[10. 35.]
|
|
3365
3357
|
"""
|
|
3366
3358
|
if initial is None:
|
|
3367
|
-
|
|
3368
|
-
|
|
3369
|
-
res = tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype) + initial
|
|
3370
|
-
if dtype is not None and (dtype == mstype.bool_):
|
|
3371
|
-
res = res.astype(mstype.bool_)
|
|
3372
|
-
return res
|
|
3359
|
+
return tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype)
|
|
3360
|
+
return tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype) + initial
|
|
3373
3361
|
|
|
3374
3362
|
def sum_to_size(self, *size):
|
|
3375
3363
|
r"""
|
|
@@ -3539,11 +3527,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3539
3527
|
validator.check_value_type('index', index, (Tensor, Tensor_,), 'Tensor.gather_elements')
|
|
3540
3528
|
return tensor_operator_registry.get('gather_elements')(self, dim, index)
|
|
3541
3529
|
|
|
3542
|
-
def nonzero(self
|
|
3530
|
+
def nonzero(self):
|
|
3543
3531
|
"""
|
|
3544
3532
|
For details, please refer to :func:`mindspore.ops.nonzero`.
|
|
3545
3533
|
"""
|
|
3546
|
-
return tensor_operator_registry.get('nonzero')(self
|
|
3534
|
+
return tensor_operator_registry.get('nonzero')(self)
|
|
3547
3535
|
|
|
3548
3536
|
def svd(self, full_matrices=False, compute_uv=True):
|
|
3549
3537
|
"""
|
|
@@ -4725,42 +4713,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4725
4713
|
return _index_put(self, values, indices)
|
|
4726
4714
|
|
|
4727
4715
|
|
|
4728
|
-
def move_to(self, to, blocking=True):
|
|
4729
|
-
r"""
|
|
4730
|
-
Copy Tensor to target device synchronously or asynchronously, default synchronously. only support PyNative mode.
|
|
4731
|
-
|
|
4732
|
-
Args:
|
|
4733
|
-
to (str): a string type value, one of ``"Ascend"``, ``"GPU"``, ``"CPU"``.
|
|
4734
|
-
blocking (bool): a bool type value, using synchronous copy or asynchronous copy.
|
|
4735
|
-
Default: ``True`` , synchronous copy.
|
|
4736
|
-
|
|
4737
|
-
Returns:
|
|
4738
|
-
New Tensor, storged on target device which with the same type and shape as the "self Tensor".
|
|
4739
|
-
|
|
4740
|
-
Raises:
|
|
4741
|
-
ValueError: If the type of `blocking` is not bool type.
|
|
4742
|
-
ValueError: If the value of `to` is not one of ``"Ascend"``, ``"GPU"``, ``"CPU"``.
|
|
4743
|
-
ValueError: If the run mode is not PyNative mode.
|
|
4744
|
-
|
|
4745
|
-
Supported Platforms:
|
|
4746
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4747
|
-
|
|
4748
|
-
Examples:
|
|
4749
|
-
>>> import mindspore as ms
|
|
4750
|
-
>>> from mindspore import Tensor
|
|
4751
|
-
>>> x = ms.Tensor([1, 2, 3], ms.int64)
|
|
4752
|
-
>>> new_tensor = x.move_to("CPU")
|
|
4753
|
-
"""
|
|
4754
|
-
if not isinstance(blocking, bool):
|
|
4755
|
-
raise ValueError(f"The type of 'blocking' must be bool, but got {blocking}")
|
|
4756
|
-
if to not in ("Ascend", "GPU", "CPU"):
|
|
4757
|
-
raise ValueError(f"The value of 'to' must be one of ['Ascend', 'GPU', 'CPU'], but got {to}")
|
|
4758
|
-
mode = context.get_context("mode")
|
|
4759
|
-
if mode != context.PYNATIVE_MODE:
|
|
4760
|
-
raise ValueError(f"The method of 'move_to' only supported in pynative mode, but got: {mode}.")
|
|
4761
|
-
return Tensor(Tensor_.move_to(self, to, blocking))
|
|
4762
|
-
|
|
4763
|
-
|
|
4764
4716
|
def _offload(self):
|
|
4765
4717
|
r"""
|
|
4766
4718
|
Offload tensor parameter to host. Currently, only support for pynative mode.
|
|
@@ -4856,4 +4808,4 @@ def _check_astype_and_convert(dtype):
|
|
|
4856
4808
|
return dtype
|
|
4857
4809
|
|
|
4858
4810
|
|
|
4859
|
-
|
|
4811
|
+
tensor_operator_registry.register('vm_compare', _vm_compare)
|
|
@@ -17,21 +17,26 @@ Collective communication interface.
|
|
|
17
17
|
|
|
18
18
|
Note that the APIs in the following list need to preset communication environment variables.
|
|
19
19
|
|
|
20
|
-
For Ascend
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
<https://www.mindspore.cn/tutorials/experts/zh-CN/master/parallel/msrun_launcher.html>`_
|
|
20
|
+
For the Ascend devices, users need to prepare the rank table, set rank_id and device_id.
|
|
21
|
+
Please see the `rank table Startup
|
|
22
|
+
<https://www.mindspore.cn/tutorials/experts/en/master/parallel/rank_table.html>`_
|
|
24
23
|
for more details.
|
|
24
|
+
|
|
25
|
+
For the GPU devices, users need to prepare the host file and mpi, please see the `mpirun Startup
|
|
26
|
+
<https://www.mindspore.cn/tutorials/experts/en/master/parallel/mpirun.html>`_ .
|
|
27
|
+
|
|
28
|
+
For the CPU device, users need to write a dynamic cluster startup script, please see the `Dynamic Cluster Startup
|
|
29
|
+
<https://www.mindspore.cn/tutorials/experts/en/master/parallel/dynamic_cluster.html>`_ .
|
|
25
30
|
"""
|
|
26
31
|
|
|
27
32
|
from mindspore.communication.management import GlobalComm, init, release, get_rank, \
|
|
28
33
|
get_group_size, get_world_rank_from_group_rank, \
|
|
29
34
|
get_group_rank_from_world_rank, create_group, HCCL_WORLD_COMM_GROUP, NCCL_WORLD_COMM_GROUP, \
|
|
30
|
-
MCCL_WORLD_COMM_GROUP, get_local_rank, get_local_rank_size, destroy_group
|
|
35
|
+
MCCL_WORLD_COMM_GROUP, get_local_rank, get_local_rank_size, destroy_group
|
|
31
36
|
|
|
32
37
|
|
|
33
38
|
__all__ = [
|
|
34
39
|
"GlobalComm", "init", "release", "get_rank", "get_group_size", "get_world_rank_from_group_rank",
|
|
35
40
|
"get_group_rank_from_world_rank", "create_group", "HCCL_WORLD_COMM_GROUP", "NCCL_WORLD_COMM_GROUP",
|
|
36
|
-
"MCCL_WORLD_COMM_GROUP", "get_local_rank", "get_local_rank_size", "destroy_group"
|
|
41
|
+
"MCCL_WORLD_COMM_GROUP", "get_local_rank", "get_local_rank_size", "destroy_group"
|
|
37
42
|
]
|