mindspore 2.3.0__cp39-none-any.whl → 2.3.0rc2__cp39-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +0 -1512
- mindspore/__init__.py +1 -2
- mindspore/_c_dataengine.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +25 -5
- mindspore/_extends/graph_kernel/model/graph_parallel.py +1 -1
- mindspore/_extends/parse/__init__.py +2 -2
- mindspore/_extends/parse/compile_config.py +0 -29
- mindspore/_extends/parse/namespace.py +2 -2
- mindspore/_extends/parse/parser.py +5 -21
- mindspore/_extends/parse/resources.py +7 -5
- mindspore/_extends/parse/standard_method.py +59 -40
- mindspore/_mindspore_offline_debug.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/amp.py +5 -26
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/base.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +1 -1
- mindspore/boost/grad_freeze.py +2 -2
- mindspore/boost/less_batch_normalization.py +6 -9
- mindspore/common/__init__.py +1 -8
- mindspore/common/_register_for_tensor.py +9 -8
- mindspore/common/api.py +65 -275
- mindspore/common/dtype.py +4 -8
- mindspore/common/dump.py +5 -2
- mindspore/common/jit_config.py +1 -1
- mindspore/common/lazy_inline.py +2 -14
- mindspore/common/parameter.py +15 -14
- mindspore/common/recompute.py +5 -20
- mindspore/common/sparse_tensor.py +6 -21
- mindspore/common/tensor.py +52 -100
- mindspore/communication/__init__.py +11 -6
- mindspore/communication/management.py +94 -92
- mindspore/context.py +18 -180
- mindspore/dataset/engine/datasets.py +46 -69
- mindspore/dataset/engine/datasets_user_defined.py +53 -72
- mindspore/dataset/engine/datasets_vision.py +2 -2
- mindspore/dataset/engine/queue.py +38 -56
- mindspore/dataset/engine/validators.py +5 -11
- mindspore/dataset/vision/__init__.py +5 -5
- mindspore/dataset/vision/c_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +1 -1
- mindspore/dataset/vision/transforms.py +46 -591
- mindspore/dataset/vision/utils.py +1 -121
- mindspore/dataset/vision/validators.py +3 -9
- mindspore/hal/__init__.py +1 -7
- mindspore/hal/device.py +1 -1
- mindspore/include/api/model.h +0 -3
- mindspore/include/dataset/vision.h +2 -54
- mindspore/include/mindapi/base/types.h +0 -1
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +0 -35
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +0 -72
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/{aclnn_all_finite.h → aclnn_add_custom.h} +11 -9
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +12 -184
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +15 -7
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +15 -7
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +31 -77
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +31 -77
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +5 -4
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/DeviceBin +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +286 -275
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -3
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_tiling.h +45 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +4 -8
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_tiling.h +4 -11
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/kernel/flash_attention_score_mix_hwsync.h +0 -18
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -6
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +75 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/kernel/matmul.h +5 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/matmul_impl.h +3 -18
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_common_tiling.h +5 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_info.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/tiling_data.h +3 -36
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/kernel/matmul_stridedslice_fusion.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/matmul_stridedslice_fusion_impl.h +4 -22
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +2 -16
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/kernel/paged_attention_mix_hwsync.h +3 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +4 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_tiling.h +4 -9
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +2 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +4 -10
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +12 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +1 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +2 -10
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +1 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -17
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +7 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layernorm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_stridedslice_fusion_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libnot_equal_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblcal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/mindrecord/filewriter.py +2 -2
- mindspore/mint/__init__.py +40 -720
- mindspore/mint/nn/__init__.py +7 -89
- mindspore/mint/nn/functional.py +16 -165
- mindspore/mint/optim/adamw.py +16 -15
- mindspore/nn/__init__.py +2 -0
- mindspore/nn/cell.py +98 -97
- mindspore/nn/extend/basic.py +2 -2
- mindspore/nn/extend/embedding.py +1 -1
- mindspore/nn/extend/layer/normalization.py +5 -7
- mindspore/nn/generator.py +297 -0
- mindspore/nn/layer/activation.py +3 -4
- mindspore/nn/layer/basic.py +16 -79
- mindspore/nn/layer/conv.py +8 -17
- mindspore/nn/layer/embedding.py +4 -1
- mindspore/nn/layer/math.py +1 -1
- mindspore/nn/layer/normalization.py +1 -1
- mindspore/nn/layer/pooling.py +0 -5
- mindspore/nn/layer/rnn_cells.py +2 -2
- mindspore/nn/loss/loss.py +19 -19
- mindspore/nn/optim/adasum.py +1 -1
- mindspore/nn/optim/sgd.py +2 -3
- mindspore/nn/probability/distribution/exponential.py +1 -1
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/logistic.py +1 -1
- mindspore/nn/wrap/cell_wrapper.py +1 -25
- mindspore/nn/wrap/loss_scale.py +1 -24
- mindspore/numpy/array_ops.py +1 -5
- mindspore/numpy/dtypes.py +3 -3
- mindspore/numpy/math_ops.py +8 -8
- mindspore/ops/__init__.py +1 -1
- mindspore/ops/_grad_experimental/grad_comm_ops.py +16 -75
- mindspore/ops/_vmap/vmap_array_ops.py +0 -27
- mindspore/ops/_vmap/vmap_math_ops.py +1 -29
- mindspore/ops/_vmap/vmap_nn_ops.py +18 -19
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +8 -34
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +9 -2
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -26
- mindspore/ops/auto_generate/gen_extend_func.py +27 -603
- mindspore/ops/auto_generate/gen_ops_def.py +203 -993
- mindspore/ops/auto_generate/gen_ops_prim.py +402 -1946
- mindspore/ops/auto_generate/pyboost_inner_prim.py +20 -90
- mindspore/ops/composite/base.py +6 -3
- mindspore/ops/composite/math_ops.py +1 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +17 -24
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/extend/__init__.py +3 -2
- mindspore/ops/extend/array_func.py +51 -10
- mindspore/ops/extend/nn_func.py +78 -2
- mindspore/ops/function/__init__.py +13 -8
- mindspore/ops/function/array_func.py +179 -455
- mindspore/ops/function/clip_func.py +1 -1
- mindspore/ops/function/grad/grad_func.py +3 -3
- mindspore/ops/function/math_func.py +103 -117
- mindspore/ops/function/nn_func.py +163 -275
- mindspore/ops/function/other_func.py +2 -2
- mindspore/ops/function/random_func.py +69 -202
- mindspore/ops/function/sparse_func.py +4 -4
- mindspore/ops/functional.py +327 -332
- mindspore/ops/operations/__init__.py +3 -13
- mindspore/ops/operations/_grad_ops.py +27 -3
- mindspore/ops/operations/_inner_ops.py +356 -53
- mindspore/ops/operations/_rl_inner_ops.py +2 -2
- mindspore/ops/operations/_tensor_array.py +8 -8
- mindspore/ops/operations/array_ops.py +65 -82
- mindspore/ops/operations/comm_ops.py +93 -784
- mindspore/ops/operations/custom_ops.py +28 -51
- mindspore/ops/operations/debug_ops.py +4 -4
- mindspore/ops/operations/inner_ops.py +2 -2
- mindspore/ops/operations/manually_defined/ops_def.py +4 -304
- mindspore/ops/operations/math_ops.py +50 -3
- mindspore/ops/operations/nn_ops.py +247 -14
- mindspore/ops/operations/other_ops.py +3 -3
- mindspore/ops/operations/random_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +1 -1
- mindspore/ops/primitive.py +8 -9
- mindspore/ops/silent_check.py +5 -5
- mindspore/ops_generate/arg_dtype_cast.py +9 -2
- mindspore/ops_generate/arg_handler.py +0 -26
- mindspore/ops_generate/gen_aclnn_implement.py +4 -1
- mindspore/ops_generate/gen_ops.py +4 -26
- mindspore/ops_generate/gen_pyboost_func.py +12 -41
- mindspore/ops_generate/gen_utils.py +0 -21
- mindspore/ops_generate/pyboost_utils.py +2 -7
- mindspore/ops_generate/template.py +0 -1
- mindspore/parallel/_auto_parallel_context.py +1 -21
- mindspore/parallel/_tensor.py +5 -0
- mindspore/parallel/_transformer/transformer.py +1 -1
- mindspore/parallel/_utils.py +1 -15
- mindspore/parallel/algo_parameter_config.py +3 -1
- mindspore/parallel/checkpoint_transform.py +9 -12
- mindspore/parallel/cluster/process_entity/_api.py +29 -28
- mindspore/parallel/cluster/process_entity/_utils.py +3 -13
- mindspore/parallel/cluster/run.py +16 -13
- mindspore/parallel/parameter_broadcast.py +2 -2
- mindspore/parallel/shard.py +17 -31
- mindspore/profiler/__init__.py +2 -3
- mindspore/profiler/common/util.py +2 -107
- mindspore/profiler/envprofiling.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +21 -8
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -82
- mindspore/profiler/parser/ascend_analysis/function_event.py +28 -43
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +27 -49
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +10 -15
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +20 -25
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +5 -5
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +1 -10
- mindspore/profiler/parser/ascend_hccl_generator.py +1 -4
- mindspore/profiler/parser/ascend_msprof_exporter.py +22 -43
- mindspore/profiler/parser/ascend_timeline_generator.py +5 -7
- mindspore/profiler/parser/minddata_parser.py +3 -72
- mindspore/profiler/profiling.py +59 -176
- mindspore/rewrite/api/node.py +1 -1
- mindspore/rewrite/common/namespace.py +5 -5
- mindspore/rewrite/parsers/assign_parser.py +0 -2
- mindspore/rewrite/parsers/class_def_parser.py +4 -8
- mindspore/run_check/_check_version.py +1 -1
- mindspore/scipy/fft.py +3 -1
- mindspore/scipy/linalg.py +3 -2
- mindspore/scipy/ops.py +3 -5
- mindspore/scipy/optimize/__init__.py +2 -2
- mindspore/train/__init__.py +4 -4
- mindspore/train/anf_ir_pb2.py +2 -8
- mindspore/train/callback/__init__.py +2 -5
- mindspore/train/callback/_backup_and_restore.py +2 -2
- mindspore/train/callback/_checkpoint.py +16 -104
- mindspore/train/callback/_landscape.py +1 -1
- mindspore/train/callback/_time_monitor.py +1 -1
- mindspore/train/data_sink.py +4 -5
- mindspore/train/dataset_helper.py +20 -45
- mindspore/train/model.py +38 -266
- mindspore/train/serialization.py +105 -256
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/version.py +1 -1
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +2 -2
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +303 -420
- mindspore/_extends/pijit/__init__.py +0 -23
- mindspore/_extends/pijit/pijit_func_white_list.py +0 -343
- mindspore/common/file_system.py +0 -48
- mindspore/common/generator.py +0 -260
- mindspore/common/no_inline.py +0 -54
- mindspore/common/np_dtype.py +0 -25
- mindspore/communication/comm_func.py +0 -1140
- mindspore/hal/memory.py +0 -326
- mindspore/lib/libavcodec.so.59 +0 -0
- mindspore/lib/libavdevice.so.59 +0 -0
- mindspore/lib/libavfilter.so.8 +0 -0
- mindspore/lib/libavformat.so.59 +0 -0
- mindspore/lib/libavutil.so.57 +0 -0
- mindspore/lib/libmindspore_np_dtype.so +0 -0
- mindspore/lib/libswresample.so.4 +0 -0
- mindspore/lib/libswscale.so.6 +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.cpp +0 -326
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.py +0 -180
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/all_finite.json +0 -109
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/binary_info_config.json +0 -38
- mindspore/lib/plugin/ascend/custom_compiler/OWNERS +0 -12
- mindspore/lib/plugin/ascend/custom_compiler/setup.py +0 -255
- mindspore/lib/plugin/ascend/custom_compiler/start.sh +0 -26
- mindspore/lib/plugin/ascend/custom_compiler/template.json +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/base_type.h +0 -133
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -32
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/tiling_info.h +0 -60
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -37
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_op.h +0 -34
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_backoff_base.h +0 -62
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -33
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_ops.h +0 -88
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_pa_op.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -95
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -84
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -61
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -224
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_impl.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_tiling.h +0 -187
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul.h +0 -245
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_interface.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_utils.h +0 -111
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/tiling_data.h +0 -54
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -364
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -114
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -98
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/mint/linalg/__init__.py +0 -22
- mindspore/nn/layer/embedding_service.py +0 -531
- mindspore/nn/layer/embedding_service_layer.py +0 -393
- mindspore/ops/function/reshard_func.py +0 -102
- mindspore/ops/operations/_infer_ops.py +0 -19
- mindspore/ops/operations/reshard_ops.py +0 -53
- mindspore/profiler/common/process_pool.py +0 -41
- mindspore/profiler/common/singleton.py +0 -28
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/train/callback/_cluster_monitor.py +0 -201
- mindspore/train/callback/_flops_collector.py +0 -238
- mindspore/train/callback/_mindio_ttp.py +0 -443
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -1,393 +0,0 @@
|
|
|
1
|
-
# Copyright 2024 Huawei Technologies Co., Ltd
|
|
2
|
-
#
|
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
-
# you may not use this file except in compliance with the License.
|
|
5
|
-
# You may obtain a copy of the License at
|
|
6
|
-
#
|
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
-
#
|
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
-
# See the License for the specific language governing permissions and
|
|
13
|
-
# limitations under the License.
|
|
14
|
-
# ============================================================================
|
|
15
|
-
"""embedding service layer"""
|
|
16
|
-
import numpy as np
|
|
17
|
-
|
|
18
|
-
import mindspore as ms
|
|
19
|
-
from mindspore import nn, ops, Tensor, Parameter
|
|
20
|
-
from mindspore.ops.auto_generate import init_partition_map, init_embedding_hashmap, embedding_table_find_and_init,\
|
|
21
|
-
embedding_table_find, fake_remote_lookup_uniqued
|
|
22
|
-
from mindspore.ops.operations.manually_defined import EmbeddingTableImport, EmbeddingTableExport, \
|
|
23
|
-
EmbeddingComputeVarImport, EmbeddingComputeVarExport
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
class CounterFilter:
|
|
27
|
-
""" Counter filter for embedding table. """
|
|
28
|
-
def __init__(self, filter_freq, default_key_or_value, default_key=None, default_value=None):
|
|
29
|
-
self.filter_freq = filter_freq
|
|
30
|
-
self.default_key = default_key
|
|
31
|
-
self.default_value = default_value
|
|
32
|
-
self.default_key_or_value = default_key_or_value
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
def _get_slot_var_num(optimizer_mode):
|
|
36
|
-
""" get slot var num by diff optimizer. """
|
|
37
|
-
# adam, adamw, rmsprop include m and v, 2 slots; adagrad include accumulator, 1 slot; sgd include 0 slot
|
|
38
|
-
if optimizer_mode == "adagrad":
|
|
39
|
-
return 1
|
|
40
|
-
if optimizer_mode == "sgd":
|
|
41
|
-
return 0
|
|
42
|
-
if optimizer_mode == "":
|
|
43
|
-
return 0
|
|
44
|
-
return 2
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
def _get_backward_float_params(optimizer_mode):
|
|
48
|
-
"""
|
|
49
|
-
backward_float_params (Union[tuple[float], list[float]]):
|
|
50
|
-
- when the backward_mode is 'adam', it means:
|
|
51
|
-
[beta1_power, beta2_power, lr, beta1, beta2, epsilon]
|
|
52
|
-
- when the backward_mode is 'ftrl', it means:
|
|
53
|
-
[lr, lr_power, lambda1, lambda2]
|
|
54
|
-
- when the backward_mode is 'adamw', it means:
|
|
55
|
-
[beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon]
|
|
56
|
-
- when the backward_mode is 'adagrad', it means [lr,]
|
|
57
|
-
"""
|
|
58
|
-
if optimizer_mode == "adagrad":
|
|
59
|
-
return [0.001]
|
|
60
|
-
if optimizer_mode == "adam":
|
|
61
|
-
return [0.9, 0.99, 0.001, 0.9, 0.999, 1e-08]
|
|
62
|
-
if optimizer_mode == "ftrl":
|
|
63
|
-
return [0.001, -0.5, 0.0, 0.0]
|
|
64
|
-
# adamw
|
|
65
|
-
return [0.9, 0.99, 0.001, 0.01, 0.9, 0.999, 1e-08]
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
class ESInitLayer(nn.Cell):
|
|
69
|
-
"""
|
|
70
|
-
ESInitLayer.
|
|
71
|
-
"""
|
|
72
|
-
def __init__(self, ps_num, ps_ids, train_mode, train_level, table_id, bucket_size, embedding_dim, slot_var_num,
|
|
73
|
-
es_initializer, filter_mode, optimizer, optimizer_params, max_feature_count, mode="train"):
|
|
74
|
-
super(ESInitLayer, self).__init__()
|
|
75
|
-
self.ps_num = ps_num
|
|
76
|
-
self.ps_ids = ps_ids
|
|
77
|
-
self.train_mode = train_mode
|
|
78
|
-
self.train_level = train_level
|
|
79
|
-
self.table_id = table_id
|
|
80
|
-
self.bucket_size = bucket_size
|
|
81
|
-
self.embedding_dim = embedding_dim
|
|
82
|
-
self.es_initializer = es_initializer
|
|
83
|
-
self.filter_mode = filter_mode
|
|
84
|
-
self.optimizer_mode = optimizer if optimizer else ''
|
|
85
|
-
self.optimizer_params = optimizer_params if optimizer_params else ()
|
|
86
|
-
self.max_feature_count = max_feature_count
|
|
87
|
-
|
|
88
|
-
self.ps_num_tensor = Tensor(self.ps_num, ms.int32)
|
|
89
|
-
self.ps_ids_tensor = Tensor(self.ps_ids, ms.int32)
|
|
90
|
-
self.table_id_tensor = Tensor(self.table_id, ms.int32)
|
|
91
|
-
self.depend = ops.Depend()
|
|
92
|
-
self.slot_var_num = _get_slot_var_num(self.optimizer_mode)
|
|
93
|
-
if mode == "train":
|
|
94
|
-
self.value_total_len = self.embedding_dim * (self.slot_var_num + 1) + 2
|
|
95
|
-
else:
|
|
96
|
-
self.value_total_len = self.embedding_dim * (self.slot_var_num + 1)
|
|
97
|
-
self.filter_freq = None
|
|
98
|
-
self.default_key = None
|
|
99
|
-
self.default_value = None
|
|
100
|
-
|
|
101
|
-
def construct(self):
|
|
102
|
-
init_partition = init_partition_map(self.ps_num_tensor,
|
|
103
|
-
self.ps_ids_tensor,
|
|
104
|
-
_embedding_dim=self.embedding_dim,
|
|
105
|
-
_max_key_num=self.max_feature_count,
|
|
106
|
-
_ps_num=self.ps_num)
|
|
107
|
-
depend = self.depend(self.table_id_tensor, init_partition)
|
|
108
|
-
if self.train_mode:
|
|
109
|
-
if self.train_level:
|
|
110
|
-
return init_embedding_hashmap(table_id=depend,
|
|
111
|
-
bucket_size=self.bucket_size,
|
|
112
|
-
value_total_len=self.value_total_len,
|
|
113
|
-
embedding_dim=self.embedding_dim,
|
|
114
|
-
initializer_mode=self.es_initializer.initializer_mode,
|
|
115
|
-
constant_value=self.es_initializer.constant_value,
|
|
116
|
-
min=self.es_initializer.min,
|
|
117
|
-
max=self.es_initializer.max,
|
|
118
|
-
mu=self.es_initializer.mu,
|
|
119
|
-
sigma=self.es_initializer.sigma,
|
|
120
|
-
seed=self.es_initializer.seed,
|
|
121
|
-
seed2=self.es_initializer.seed,
|
|
122
|
-
filter_mode=self.filter_mode,
|
|
123
|
-
optimizer_mode=self.optimizer_mode,
|
|
124
|
-
optimizer_params=self.optimizer_params,
|
|
125
|
-
_table_id=self.table_id)
|
|
126
|
-
return init_embedding_hashmap(table_id=depend,
|
|
127
|
-
bucket_size=self.bucket_size,
|
|
128
|
-
value_total_len=self.value_total_len,
|
|
129
|
-
embedding_dim=self.embedding_dim,
|
|
130
|
-
initializer_mode=None, constant_value=None,
|
|
131
|
-
min=None, max=None, mu=None, sigma=None,
|
|
132
|
-
seed=None, seed2=None, filter_mode=self.filter_mode,
|
|
133
|
-
optimizer_mode=self.optimizer_mode,
|
|
134
|
-
optimizer_params=self.optimizer_params,
|
|
135
|
-
_table_id=self.table_id)
|
|
136
|
-
|
|
137
|
-
return init_embedding_hashmap(table_id=depend,
|
|
138
|
-
value_total_len=self.value_total_len,
|
|
139
|
-
embedding_dim=self.embedding_dim,
|
|
140
|
-
bucket_size=self.bucket_size,
|
|
141
|
-
filter_mode=self.filter_mode,
|
|
142
|
-
optimizer_mode=self.optimizer_mode,
|
|
143
|
-
optimizer_params=self.optimizer_params,
|
|
144
|
-
_table_id=self.table_id)
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
class EsEmbeddingLookup(nn.Cell):
|
|
148
|
-
"""
|
|
149
|
-
EsEmbeddingLookup.
|
|
150
|
-
"""
|
|
151
|
-
def __init__(self, table_id, es_initializer, embedding_dim, max_key_num, optimizer_mode=None,
|
|
152
|
-
optimizer_params=None, es_filter=None, es_padding_key=None, es_completion_key=None):
|
|
153
|
-
super(EsEmbeddingLookup, self).__init__()
|
|
154
|
-
self.cast = ops.cast
|
|
155
|
-
self.reshape = ops.Reshape()
|
|
156
|
-
|
|
157
|
-
self.table_id = Tensor(table_id, ms.int32)
|
|
158
|
-
self._table_id = table_id
|
|
159
|
-
self.es_initializer = es_initializer
|
|
160
|
-
self.embedding_dim = embedding_dim
|
|
161
|
-
self.optimizer_mode = optimizer_mode
|
|
162
|
-
self.backward_float_params = _get_backward_float_params(self.optimizer_mode)
|
|
163
|
-
self.max_key_num = max_key_num
|
|
164
|
-
self.es_filter = es_filter
|
|
165
|
-
|
|
166
|
-
self.slot_var_num = _get_slot_var_num(self.optimizer_mode)
|
|
167
|
-
self.value_total_len = [self.embedding_dim[table_id] * (self.slot_var_num + 1) + 2] * len(embedding_dim)
|
|
168
|
-
|
|
169
|
-
self.default_key_or_value = 1
|
|
170
|
-
self.filter_freq = 0
|
|
171
|
-
self.default_key = 0
|
|
172
|
-
self.optimizer_params = optimizer_params
|
|
173
|
-
|
|
174
|
-
if es_filter is not None:
|
|
175
|
-
self.filter_mode = "counter"
|
|
176
|
-
self.filter_freq = es_filter.filter_freq
|
|
177
|
-
self.default_key_or_value = es_filter.default_key_or_value
|
|
178
|
-
self.default_key = 0 if es_filter.default_key is None else es_filter.default_key
|
|
179
|
-
self.default_value = 0.0
|
|
180
|
-
else:
|
|
181
|
-
self.filter_mode = "no_filter"
|
|
182
|
-
self.filter_freq = 1
|
|
183
|
-
self.default_key_or_value = 1
|
|
184
|
-
self.default_key = 0
|
|
185
|
-
self.default_value = -1.0
|
|
186
|
-
|
|
187
|
-
self.global_step = 1
|
|
188
|
-
if es_padding_key is not None:
|
|
189
|
-
self.mask_zero = 0 if es_padding_key.mask_zero is None else int(es_padding_key.mask_zero)
|
|
190
|
-
self.padding_key = es_padding_key.padding_key
|
|
191
|
-
self.padding_key_mask = int(es_padding_key.mask)
|
|
192
|
-
else:
|
|
193
|
-
self.mask_zero = 0
|
|
194
|
-
self.padding_key = 0
|
|
195
|
-
self.padding_key_mask = 1
|
|
196
|
-
if self.optimizer_mode in ["adam", "ftrl", "adagrad"]:
|
|
197
|
-
self.backward_int_params = ([self.global_step], [self.mask_zero],
|
|
198
|
-
[self.padding_key], [self.padding_key_mask])
|
|
199
|
-
else:
|
|
200
|
-
self.backward_int_params = ([self.global_step], [0], [0], [self.mask_zero],
|
|
201
|
-
[self.padding_key], [self.padding_key_mask])
|
|
202
|
-
|
|
203
|
-
if es_completion_key is not None:
|
|
204
|
-
self.completion_key = es_completion_key.completion_key
|
|
205
|
-
self.completion_key_mask = int(es_completion_key.mask)
|
|
206
|
-
else:
|
|
207
|
-
self.completion_key = 0
|
|
208
|
-
self.completion_key_mask = 1
|
|
209
|
-
|
|
210
|
-
self.b = Parameter(Tensor(0, ms.float32), name="b", requires_grad=True)
|
|
211
|
-
self.max_grad_norm = Tensor([1.0], ms.float32)
|
|
212
|
-
|
|
213
|
-
def construct(self, keys, actual_keys_input=None, unique_indices=None, key_count=None):
|
|
214
|
-
origin_shape = None
|
|
215
|
-
if len(keys.shape) != 1:
|
|
216
|
-
origin_shape = keys.shape
|
|
217
|
-
keys = self.reshape(keys, (-1,))
|
|
218
|
-
keys = self.cast(keys, ms.int64)
|
|
219
|
-
use_host_unique = False
|
|
220
|
-
use_counter_filter = 1 if self.filter_mode == "counter" else 0
|
|
221
|
-
if (actual_keys_input is not None) and (unique_indices is not None):
|
|
222
|
-
use_host_unique = True
|
|
223
|
-
actual_keys_input = self.cast(actual_keys_input, ms.int64)
|
|
224
|
-
unique_indices = self.cast(unique_indices, ms.int32)
|
|
225
|
-
if use_host_unique:
|
|
226
|
-
if not use_counter_filter:
|
|
227
|
-
key_count = keys
|
|
228
|
-
if self.training:
|
|
229
|
-
if use_host_unique:
|
|
230
|
-
output = fake_remote_lookup_uniqued(table_id=self.table_id,
|
|
231
|
-
keys=keys,
|
|
232
|
-
actual_keys_num=actual_keys_input,
|
|
233
|
-
unique_indices=unique_indices,
|
|
234
|
-
key_count=key_count,
|
|
235
|
-
max_grad_norm=self.max_grad_norm,
|
|
236
|
-
embedding_dim=self.embedding_dim,
|
|
237
|
-
initializer_mode=self.es_initializer.initializer_mode,
|
|
238
|
-
constant_value=self.es_initializer.constant_value,
|
|
239
|
-
min=self.es_initializer.min,
|
|
240
|
-
max=self.es_initializer.max,
|
|
241
|
-
mu=self.es_initializer.mu,
|
|
242
|
-
sigma=self.es_initializer.sigma,
|
|
243
|
-
seed=self.es_initializer.seed,
|
|
244
|
-
seed2=self.es_initializer.seed,
|
|
245
|
-
value_total_len=self.value_total_len,
|
|
246
|
-
filter_mode=self.filter_mode,
|
|
247
|
-
filter_freq=self.filter_freq,
|
|
248
|
-
default_key_or_value=self.default_key_or_value,
|
|
249
|
-
default_key=self.default_key,
|
|
250
|
-
default_value=self.default_value,
|
|
251
|
-
optimizer_mode=self.optimizer_mode,
|
|
252
|
-
optimizer_params=self.optimizer_params,
|
|
253
|
-
_max_key_num=self.max_key_num,
|
|
254
|
-
_table_id=self._table_id,
|
|
255
|
-
_use_counter_filter=use_counter_filter,
|
|
256
|
-
backward_mode=self.optimizer_mode,
|
|
257
|
-
backward_int_params=self.backward_int_params,
|
|
258
|
-
backward_float_params=self.backward_float_params,
|
|
259
|
-
completion_key=self.completion_key,
|
|
260
|
-
completion_key_mask=self.completion_key_mask,
|
|
261
|
-
parameter=self.b
|
|
262
|
-
)
|
|
263
|
-
else:
|
|
264
|
-
output = embedding_table_find_and_init(self.table_id, keys,
|
|
265
|
-
max_grad_norm=self.max_grad_norm,
|
|
266
|
-
embedding_dim=self.embedding_dim,
|
|
267
|
-
initializer_mode=self.es_initializer.initializer_mode,
|
|
268
|
-
constant_value=self.es_initializer.constant_value,
|
|
269
|
-
min=self.es_initializer.min,
|
|
270
|
-
max=self.es_initializer.max,
|
|
271
|
-
mu=self.es_initializer.mu,
|
|
272
|
-
sigma=self.es_initializer.sigma,
|
|
273
|
-
seed=self.es_initializer.seed,
|
|
274
|
-
seed2=self.es_initializer.seed,
|
|
275
|
-
value_total_len=self.value_total_len,
|
|
276
|
-
filter_mode=self.filter_mode,
|
|
277
|
-
filter_freq=self.filter_freq,
|
|
278
|
-
default_key_or_value=self.default_key_or_value,
|
|
279
|
-
default_key=self.default_key,
|
|
280
|
-
default_value=self.default_value,
|
|
281
|
-
optimizer_mode=self.optimizer_mode,
|
|
282
|
-
optimizer_params=self.optimizer_params,
|
|
283
|
-
_max_key_num=self.max_key_num,
|
|
284
|
-
_table_id=self._table_id,
|
|
285
|
-
_use_counter_filter=use_counter_filter,
|
|
286
|
-
backward_mode=self.optimizer_mode,
|
|
287
|
-
backward_int_params=self.backward_int_params,
|
|
288
|
-
backward_float_params=self.backward_float_params,
|
|
289
|
-
completion_key=self.completion_key,
|
|
290
|
-
completion_key_mask=self.completion_key_mask,
|
|
291
|
-
parameter=self.b)
|
|
292
|
-
else:
|
|
293
|
-
output = embedding_table_find(self.table_id, keys,
|
|
294
|
-
embedding_dim=self.embedding_dim,
|
|
295
|
-
default_value=self.default_value,
|
|
296
|
-
_max_key_num=self.max_key_num,
|
|
297
|
-
_table_id=self._table_id,
|
|
298
|
-
_use_counter_filter=use_counter_filter)
|
|
299
|
-
# input 20480 2 ->41960
|
|
300
|
-
# output 41960 embedding_dim -> 20480 2 embedding_dim
|
|
301
|
-
if origin_shape is not None:
|
|
302
|
-
output = self.reshape(output, origin_shape + (-1,))
|
|
303
|
-
return output
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
class ESEmbeddingCKPTExport(nn.Cell):
|
|
307
|
-
"""
|
|
308
|
-
ESEmbeddingCKPTExport.
|
|
309
|
-
"""
|
|
310
|
-
def __init__(self, embedding_dim_list, value_total_len_list, table_name_list, table_id_list,
|
|
311
|
-
file_path, steps_to_live_list):
|
|
312
|
-
super(ESEmbeddingCKPTExport, self).__init__()
|
|
313
|
-
self.embedding_table_export = EmbeddingTableExport(
|
|
314
|
-
embedding_dim_list,
|
|
315
|
-
value_total_len_list,
|
|
316
|
-
table_name=table_name_list,
|
|
317
|
-
steps_to_live_list=steps_to_live_list)
|
|
318
|
-
self.embedding_compute_var_export = EmbeddingComputeVarExport(table_name_list)
|
|
319
|
-
self.file_path = Tensor(np.array(file_path))
|
|
320
|
-
self.ps_id_tensor = Tensor(0, ms.int32)
|
|
321
|
-
self.table_id_tensor = Tensor(table_id_list, ms.int32)
|
|
322
|
-
self.depend = ops.Depend()
|
|
323
|
-
|
|
324
|
-
def construct(self):
|
|
325
|
-
export_op1 = self.embedding_table_export(self.file_path, self.ps_id_tensor, self.table_id_tensor)
|
|
326
|
-
z = self.depend(self.file_path, export_op1)
|
|
327
|
-
export_op2 = self.embedding_compute_var_export(z, self.ps_id_tensor, self.table_id_tensor)
|
|
328
|
-
return export_op2
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
class ESEmbeddingTableExport(nn.Cell):
|
|
332
|
-
"""
|
|
333
|
-
ESEmbeddingTableExport.
|
|
334
|
-
"""
|
|
335
|
-
def __init__(self, embedding_dim_list, value_total_len_list, table_name_list, table_id_list,
|
|
336
|
-
file_path, steps_to_live_list):
|
|
337
|
-
super(ESEmbeddingTableExport, self).__init__()
|
|
338
|
-
self.op = EmbeddingTableExport(
|
|
339
|
-
embedding_dim_list,
|
|
340
|
-
value_total_len_list,
|
|
341
|
-
table_name=table_name_list,
|
|
342
|
-
steps_to_live_list=steps_to_live_list,
|
|
343
|
-
only_var_flag=True)
|
|
344
|
-
self.file_path = Tensor(np.array(file_path))
|
|
345
|
-
self.ps_id_tensor = Tensor(0, ms.int32)
|
|
346
|
-
self.table_id_tensor = Tensor(table_id_list, ms.int32)
|
|
347
|
-
|
|
348
|
-
def construct(self):
|
|
349
|
-
y = self.op(self.file_path, self.ps_id_tensor, self.table_id_tensor)
|
|
350
|
-
return y
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
class ESEmbeddingCKPTImport(nn.Cell):
|
|
354
|
-
"""
|
|
355
|
-
ESEmbeddingCKPTImport.
|
|
356
|
-
"""
|
|
357
|
-
def __init__(self, embedding_dim_list, value_total_len_list, table_name_list, table_id_list, file_path):
|
|
358
|
-
super(ESEmbeddingCKPTImport, self).__init__()
|
|
359
|
-
self.embedding_table_import = EmbeddingTableImport(
|
|
360
|
-
embedding_dim_list,
|
|
361
|
-
value_total_len_list,
|
|
362
|
-
table_name=table_name_list)
|
|
363
|
-
self.embedding_compute_var_import = EmbeddingComputeVarImport(table_name_list)
|
|
364
|
-
self.file_path = Tensor(np.array(file_path))
|
|
365
|
-
self.ps_id_tensor = Tensor(0, ms.int32)
|
|
366
|
-
self.table_id_tensor = Tensor(table_id_list, ms.int32)
|
|
367
|
-
self.depend = ops.Depend()
|
|
368
|
-
|
|
369
|
-
def construct(self):
|
|
370
|
-
export_op1 = self.embedding_table_import(self.file_path, self.ps_id_tensor, self.table_id_tensor)
|
|
371
|
-
z = self.depend(self.file_path, export_op1)
|
|
372
|
-
export_op2 = self.embedding_compute_var_import(z, self.ps_id_tensor, self.table_id_tensor)
|
|
373
|
-
return export_op2
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
class ESEmbeddingTableImport(nn.Cell):
|
|
377
|
-
"""
|
|
378
|
-
ESEmbeddingTableImport.
|
|
379
|
-
"""
|
|
380
|
-
def __init__(self, embedding_dim_list, value_total_len_list, table_name_list, table_id_list, file_path):
|
|
381
|
-
super(ESEmbeddingTableImport, self).__init__()
|
|
382
|
-
self.op = EmbeddingTableImport(
|
|
383
|
-
embedding_dim_list,
|
|
384
|
-
value_total_len_list,
|
|
385
|
-
table_name=table_name_list,
|
|
386
|
-
only_var_flag=True)
|
|
387
|
-
self.file_path = Tensor(np.array(file_path))
|
|
388
|
-
self.ps_id_tensor = Tensor(0, ms.int32)
|
|
389
|
-
self.table_id_tensor = Tensor(table_id_list, ms.int32)
|
|
390
|
-
|
|
391
|
-
def construct(self):
|
|
392
|
-
y = self.op(self.file_path, self.ps_id_tensor, self.table_id_tensor)
|
|
393
|
-
return y
|
|
@@ -1,102 +0,0 @@
|
|
|
1
|
-
# Copyright 2023 Huawei Technologies Co., Ltd
|
|
2
|
-
#
|
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
-
# you may not use this file except in compliance with the License.
|
|
5
|
-
# You may obtain a copy of the License at
|
|
6
|
-
#
|
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
-
#
|
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
-
# See the License for the specific language governing permissions and
|
|
13
|
-
# limitations under the License.
|
|
14
|
-
# ============================================================================
|
|
15
|
-
"""Defines parameter operators with functional form."""
|
|
16
|
-
from mindspore.ops import operations as P
|
|
17
|
-
from mindspore.ops._primitive_cache import _get_cache_prim
|
|
18
|
-
from mindspore.parallel.shard import Layout
|
|
19
|
-
from mindspore.common.tensor import Tensor
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
def reshard(tensor, layout):
|
|
23
|
-
r"""
|
|
24
|
-
Specify the tensor by the given layout. The given layout must be type mindspore.Layout,
|
|
25
|
-
can check :class:`mindspore.Layout` for reference.
|
|
26
|
-
|
|
27
|
-
- In the Graph mode, this function can set the sharding propagation strategy of a tensor.
|
|
28
|
-
For those tensor do not manually be set, their strategies are decided by the sharding
|
|
29
|
-
strategy propagation algorithm automatically.
|
|
30
|
-
- In the PyNative mode, this function can set a tensor sharding strategy in a Cell that
|
|
31
|
-
runs in the Graph mode (i.e. inside the Cell processed by Cell.shard/F.shard).
|
|
32
|
-
|
|
33
|
-
Note:
|
|
34
|
-
- In the auto parallel mode, an exception will throw if the search mode is not
|
|
35
|
-
"sharding_propagation".
|
|
36
|
-
- In the semi-auto parallel mode, the parallel mode will automatically switch to auto
|
|
37
|
-
parallel mode with the search mode be set to "sharding_propagation".
|
|
38
|
-
|
|
39
|
-
Args:
|
|
40
|
-
tensor (Tensor): The tensor to be set the sharding strategy.
|
|
41
|
-
layout (Layout): The layout to shard the tensor precisely, including the device
|
|
42
|
-
arrangement (device_matrix) and the alias for the device matrix
|
|
43
|
-
(alias_name).
|
|
44
|
-
|
|
45
|
-
Returns:
|
|
46
|
-
Tensor. The mathematically equivalent of the input tensor.
|
|
47
|
-
|
|
48
|
-
Raises:
|
|
49
|
-
TypeError: Reshard takes in Tensor type as the first input param, but got: `type(tensor)`.
|
|
50
|
-
TypeError: Reshard only support type mindspore.Layout but got: `type(layout)`.
|
|
51
|
-
|
|
52
|
-
Examples:
|
|
53
|
-
>>> import numpy as np
|
|
54
|
-
>>> import mindspore as ms
|
|
55
|
-
>>> from mindspore import ops, nn, Tensor, context, Layout
|
|
56
|
-
>>> context.set_context(mode=ms.GRAPH_MODE)
|
|
57
|
-
>>> context.set_auto_parallel_context(parallel_mode=ms.ParallelMode.AUTO_PARALLEL,
|
|
58
|
-
>>> search_mode="sharding_propagation")
|
|
59
|
-
>>> class Network(nn.Cell):
|
|
60
|
-
>>> def __init__(self):
|
|
61
|
-
>>> super().__init__()
|
|
62
|
-
>>> self.matmul = ops.MatMul()
|
|
63
|
-
>>> self.relu = ops.ReLU()
|
|
64
|
-
>>> def construct(self, x, layout):
|
|
65
|
-
>>> x = self.relu(x)
|
|
66
|
-
>>> x_reshard = ops.reshard(x, self.layout)
|
|
67
|
-
>>> y = Tensor(np.ones(shape=(128, 128)), dtype=ms.float32)
|
|
68
|
-
>>> x = self.matmul(x_reshard, y)
|
|
69
|
-
>>> return x
|
|
70
|
-
>>>
|
|
71
|
-
>>> layout = Layout((4, 2), ("dp", "mp"))
|
|
72
|
-
>>> input_layout = layout("dp", "mp")
|
|
73
|
-
>>> net = Network()
|
|
74
|
-
>>> tensor = Tensor(np.ones(shape=(128, 128)), dtype=ms.float32)
|
|
75
|
-
>>> out = net(tensor, input_layout)
|
|
76
|
-
"""
|
|
77
|
-
if not isinstance(tensor, Tensor):
|
|
78
|
-
raise TypeError(f"Reshard takes in Tensor type as the first input param, but got: {type(tensor)}.")
|
|
79
|
-
if not isinstance(layout, Layout):
|
|
80
|
-
raise TypeError(f"Reshard only support type mindspore.Layout, but got: {type(layout)}.")
|
|
81
|
-
|
|
82
|
-
def layout_to_tuple(layout):
|
|
83
|
-
layout_dict = layout.to_dict()
|
|
84
|
-
tensor_map = layout_dict["tensor_map"]
|
|
85
|
-
device_matrix_rev = layout_dict["device_matrix"][::-1]
|
|
86
|
-
axis_stgy = ()
|
|
87
|
-
for ind in tensor_map:
|
|
88
|
-
if ind == -1:
|
|
89
|
-
axis_stgy += (1,)
|
|
90
|
-
else:
|
|
91
|
-
axis_stgy += (device_matrix_rev[ind],)
|
|
92
|
-
return axis_stgy
|
|
93
|
-
|
|
94
|
-
in_strategy = layout_to_tuple(layout)
|
|
95
|
-
_reshard = _get_cache_prim(P.Reshard)(in_layout=(layout,), out_layout=(layout,), in_strategy=(in_strategy,))
|
|
96
|
-
return _reshard(tensor)
|
|
97
|
-
|
|
98
|
-
__all__ = [
|
|
99
|
-
'reshard'
|
|
100
|
-
]
|
|
101
|
-
|
|
102
|
-
__all__.sort()
|
|
@@ -1,19 +0,0 @@
|
|
|
1
|
-
# Copyright 2024 Huawei Technologies Co., Ltd
|
|
2
|
-
#
|
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
-
# you may not use this file except in compliance with the License.
|
|
5
|
-
# You may obtain a copy of the License at
|
|
6
|
-
#
|
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
-
#
|
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
-
# See the License for the specific language governing permissions and
|
|
13
|
-
# limitations under the License.
|
|
14
|
-
# ============================================================================
|
|
15
|
-
|
|
16
|
-
"""Operator of infer net"""
|
|
17
|
-
# pylint: disable=unused-import
|
|
18
|
-
from ..auto_generate import (QuantV2, DynamicQuantExt, QuantBatchMatmul, WeightQuantBatchMatmul, KVCacheScatterUpdate,
|
|
19
|
-
FusedInferAttentionScore, GroupedMatmul, MoeFinalizeRouting)
|
|
@@ -1,53 +0,0 @@
|
|
|
1
|
-
# Copyright 2020-2022 Huawei Technologies Co., Ltd
|
|
2
|
-
#
|
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
-
# you may not use this file except in compliance with the License.
|
|
5
|
-
# You may obtain a copy of the License at
|
|
6
|
-
#
|
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
-
#
|
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
-
# See the License for the specific language governing permissions and
|
|
13
|
-
# limitations under the License.
|
|
14
|
-
# ============================================================================
|
|
15
|
-
"""Operators for reshard."""
|
|
16
|
-
from mindspore.ops.primitive import Primitive, prim_attr_register
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class Reshard(Primitive):
|
|
20
|
-
r"""
|
|
21
|
-
Reshard the tensor by the given in_layout and out_layout, which can precisely
|
|
22
|
-
define how the dimension of the tensor and the device clusters be sharded in
|
|
23
|
-
parallel procedure.
|
|
24
|
-
|
|
25
|
-
Note:
|
|
26
|
-
- The in and out layout should be the type mindspore.Layout.
|
|
27
|
-
- The in and out layout should be the same value of layout when invoke
|
|
28
|
-
ops.Reshard(layout, layout, in_strategy).
|
|
29
|
-
- The in_strategy should be the strategy derived from the layout.
|
|
30
|
-
- This primitive is not recommended to use directly. We recommend to use mindspore.reshard.
|
|
31
|
-
|
|
32
|
-
Inputs:
|
|
33
|
-
- **tensor** (Tensor) - The tensor to be resharded.
|
|
34
|
-
|
|
35
|
-
Outputs:
|
|
36
|
-
Tensor. The mathematically equivalent of the input tensor.
|
|
37
|
-
|
|
38
|
-
Examples:
|
|
39
|
-
>>> from mindspore.parallel.shard import Layout
|
|
40
|
-
>>> _layout = Layout((4, 2), ("dp", "mp"))
|
|
41
|
-
>>> layout = (_layout("dp", "mp"),)
|
|
42
|
-
>>> reshard = ops.Reshard(layout, layout, in_strategy)
|
|
43
|
-
>>> reshard(tensor)
|
|
44
|
-
"""
|
|
45
|
-
|
|
46
|
-
@prim_attr_register
|
|
47
|
-
def __init__(self, in_layout, out_layout, in_strategy):
|
|
48
|
-
super().__init__(name="Reshard")
|
|
49
|
-
self.shard(in_layout, out_layout)
|
|
50
|
-
self.in_strategy = in_strategy
|
|
51
|
-
|
|
52
|
-
def __call__(self, tensor):
|
|
53
|
-
return tensor
|
|
@@ -1,41 +0,0 @@
|
|
|
1
|
-
# Copyright 2022-2024 Huawei Technologies Co., Ltd
|
|
2
|
-
#
|
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
-
# you may not use this file except in compliance with the License.
|
|
5
|
-
# You may obtain a copy of the License at
|
|
6
|
-
#
|
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
-
#
|
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
-
# See the License for the specific language governing permissions and
|
|
13
|
-
# limitations under the License.
|
|
14
|
-
# ============================================================================
|
|
15
|
-
"""Analyze profiling data asynchronously by asynchronous process"""
|
|
16
|
-
import atexit
|
|
17
|
-
from typing import List
|
|
18
|
-
from multiprocessing import Process
|
|
19
|
-
|
|
20
|
-
from mindspore.profiler.common.singleton import Singleton
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
@Singleton
|
|
24
|
-
class MultiProcessPool:
|
|
25
|
-
"""A ProcessPool to run task asynchronously"""
|
|
26
|
-
|
|
27
|
-
def __init__(self) -> None:
|
|
28
|
-
self.porcess_list: List[Process] = []
|
|
29
|
-
atexit.register(self.wait_all_job_finished)
|
|
30
|
-
|
|
31
|
-
def add_async_job(self, func):
|
|
32
|
-
"""Add job and run in subprocess"""
|
|
33
|
-
process = Process(target=func)
|
|
34
|
-
process.start()
|
|
35
|
-
self.porcess_list.append(process)
|
|
36
|
-
|
|
37
|
-
def wait_all_job_finished(self):
|
|
38
|
-
"""Wait all subprocess finished"""
|
|
39
|
-
for process in self.porcess_list:
|
|
40
|
-
process.join()
|
|
41
|
-
self.porcess_list = []
|
|
@@ -1,28 +0,0 @@
|
|
|
1
|
-
# Copyright 2022-2024 Huawei Technologies Co., Ltd
|
|
2
|
-
#
|
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
-
# you may not use this file except in compliance with the License.
|
|
5
|
-
# You may obtain a copy of the License at
|
|
6
|
-
#
|
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
-
#
|
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
-
# See the License for the specific language governing permissions and
|
|
13
|
-
# limitations under the License.
|
|
14
|
-
# ============================================================================
|
|
15
|
-
"""A singleton implement in python"""
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
class Singleton:
|
|
19
|
-
"""A singleton implement"""
|
|
20
|
-
|
|
21
|
-
def __init__(self, cls):
|
|
22
|
-
self._cls = cls
|
|
23
|
-
self._instance = {}
|
|
24
|
-
|
|
25
|
-
def __call__(self):
|
|
26
|
-
if self._cls not in self._instance:
|
|
27
|
-
self._instance[self._cls] = self._cls()
|
|
28
|
-
return self._instance[self._cls]
|