mindspore 2.3.0__cp39-none-any.whl → 2.3.0rc2__cp39-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +0 -1512
- mindspore/__init__.py +1 -2
- mindspore/_c_dataengine.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +25 -5
- mindspore/_extends/graph_kernel/model/graph_parallel.py +1 -1
- mindspore/_extends/parse/__init__.py +2 -2
- mindspore/_extends/parse/compile_config.py +0 -29
- mindspore/_extends/parse/namespace.py +2 -2
- mindspore/_extends/parse/parser.py +5 -21
- mindspore/_extends/parse/resources.py +7 -5
- mindspore/_extends/parse/standard_method.py +59 -40
- mindspore/_mindspore_offline_debug.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/amp.py +5 -26
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/base.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +1 -1
- mindspore/boost/grad_freeze.py +2 -2
- mindspore/boost/less_batch_normalization.py +6 -9
- mindspore/common/__init__.py +1 -8
- mindspore/common/_register_for_tensor.py +9 -8
- mindspore/common/api.py +65 -275
- mindspore/common/dtype.py +4 -8
- mindspore/common/dump.py +5 -2
- mindspore/common/jit_config.py +1 -1
- mindspore/common/lazy_inline.py +2 -14
- mindspore/common/parameter.py +15 -14
- mindspore/common/recompute.py +5 -20
- mindspore/common/sparse_tensor.py +6 -21
- mindspore/common/tensor.py +52 -100
- mindspore/communication/__init__.py +11 -6
- mindspore/communication/management.py +94 -92
- mindspore/context.py +18 -180
- mindspore/dataset/engine/datasets.py +46 -69
- mindspore/dataset/engine/datasets_user_defined.py +53 -72
- mindspore/dataset/engine/datasets_vision.py +2 -2
- mindspore/dataset/engine/queue.py +38 -56
- mindspore/dataset/engine/validators.py +5 -11
- mindspore/dataset/vision/__init__.py +5 -5
- mindspore/dataset/vision/c_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +1 -1
- mindspore/dataset/vision/transforms.py +46 -591
- mindspore/dataset/vision/utils.py +1 -121
- mindspore/dataset/vision/validators.py +3 -9
- mindspore/hal/__init__.py +1 -7
- mindspore/hal/device.py +1 -1
- mindspore/include/api/model.h +0 -3
- mindspore/include/dataset/vision.h +2 -54
- mindspore/include/mindapi/base/types.h +0 -1
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +0 -35
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +0 -72
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/{aclnn_all_finite.h → aclnn_add_custom.h} +11 -9
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +12 -184
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +15 -7
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +15 -7
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +31 -77
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +31 -77
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +5 -4
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/DeviceBin +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +286 -275
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -3
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_tiling.h +45 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +4 -8
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_tiling.h +4 -11
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/kernel/flash_attention_score_mix_hwsync.h +0 -18
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -6
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +75 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/kernel/matmul.h +5 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/matmul_impl.h +3 -18
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_common_tiling.h +5 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_info.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/tiling_data.h +3 -36
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/kernel/matmul_stridedslice_fusion.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/matmul_stridedslice_fusion_impl.h +4 -22
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +2 -16
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/kernel/paged_attention_mix_hwsync.h +3 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +4 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_tiling.h +4 -9
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +2 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +4 -10
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +12 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +1 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +2 -10
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +1 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -17
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +7 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layernorm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_stridedslice_fusion_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libnot_equal_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblcal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/mindrecord/filewriter.py +2 -2
- mindspore/mint/__init__.py +40 -720
- mindspore/mint/nn/__init__.py +7 -89
- mindspore/mint/nn/functional.py +16 -165
- mindspore/mint/optim/adamw.py +16 -15
- mindspore/nn/__init__.py +2 -0
- mindspore/nn/cell.py +98 -97
- mindspore/nn/extend/basic.py +2 -2
- mindspore/nn/extend/embedding.py +1 -1
- mindspore/nn/extend/layer/normalization.py +5 -7
- mindspore/nn/generator.py +297 -0
- mindspore/nn/layer/activation.py +3 -4
- mindspore/nn/layer/basic.py +16 -79
- mindspore/nn/layer/conv.py +8 -17
- mindspore/nn/layer/embedding.py +4 -1
- mindspore/nn/layer/math.py +1 -1
- mindspore/nn/layer/normalization.py +1 -1
- mindspore/nn/layer/pooling.py +0 -5
- mindspore/nn/layer/rnn_cells.py +2 -2
- mindspore/nn/loss/loss.py +19 -19
- mindspore/nn/optim/adasum.py +1 -1
- mindspore/nn/optim/sgd.py +2 -3
- mindspore/nn/probability/distribution/exponential.py +1 -1
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/logistic.py +1 -1
- mindspore/nn/wrap/cell_wrapper.py +1 -25
- mindspore/nn/wrap/loss_scale.py +1 -24
- mindspore/numpy/array_ops.py +1 -5
- mindspore/numpy/dtypes.py +3 -3
- mindspore/numpy/math_ops.py +8 -8
- mindspore/ops/__init__.py +1 -1
- mindspore/ops/_grad_experimental/grad_comm_ops.py +16 -75
- mindspore/ops/_vmap/vmap_array_ops.py +0 -27
- mindspore/ops/_vmap/vmap_math_ops.py +1 -29
- mindspore/ops/_vmap/vmap_nn_ops.py +18 -19
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +8 -34
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +9 -2
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -26
- mindspore/ops/auto_generate/gen_extend_func.py +27 -603
- mindspore/ops/auto_generate/gen_ops_def.py +203 -993
- mindspore/ops/auto_generate/gen_ops_prim.py +402 -1946
- mindspore/ops/auto_generate/pyboost_inner_prim.py +20 -90
- mindspore/ops/composite/base.py +6 -3
- mindspore/ops/composite/math_ops.py +1 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +17 -24
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/extend/__init__.py +3 -2
- mindspore/ops/extend/array_func.py +51 -10
- mindspore/ops/extend/nn_func.py +78 -2
- mindspore/ops/function/__init__.py +13 -8
- mindspore/ops/function/array_func.py +179 -455
- mindspore/ops/function/clip_func.py +1 -1
- mindspore/ops/function/grad/grad_func.py +3 -3
- mindspore/ops/function/math_func.py +103 -117
- mindspore/ops/function/nn_func.py +163 -275
- mindspore/ops/function/other_func.py +2 -2
- mindspore/ops/function/random_func.py +69 -202
- mindspore/ops/function/sparse_func.py +4 -4
- mindspore/ops/functional.py +327 -332
- mindspore/ops/operations/__init__.py +3 -13
- mindspore/ops/operations/_grad_ops.py +27 -3
- mindspore/ops/operations/_inner_ops.py +356 -53
- mindspore/ops/operations/_rl_inner_ops.py +2 -2
- mindspore/ops/operations/_tensor_array.py +8 -8
- mindspore/ops/operations/array_ops.py +65 -82
- mindspore/ops/operations/comm_ops.py +93 -784
- mindspore/ops/operations/custom_ops.py +28 -51
- mindspore/ops/operations/debug_ops.py +4 -4
- mindspore/ops/operations/inner_ops.py +2 -2
- mindspore/ops/operations/manually_defined/ops_def.py +4 -304
- mindspore/ops/operations/math_ops.py +50 -3
- mindspore/ops/operations/nn_ops.py +247 -14
- mindspore/ops/operations/other_ops.py +3 -3
- mindspore/ops/operations/random_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +1 -1
- mindspore/ops/primitive.py +8 -9
- mindspore/ops/silent_check.py +5 -5
- mindspore/ops_generate/arg_dtype_cast.py +9 -2
- mindspore/ops_generate/arg_handler.py +0 -26
- mindspore/ops_generate/gen_aclnn_implement.py +4 -1
- mindspore/ops_generate/gen_ops.py +4 -26
- mindspore/ops_generate/gen_pyboost_func.py +12 -41
- mindspore/ops_generate/gen_utils.py +0 -21
- mindspore/ops_generate/pyboost_utils.py +2 -7
- mindspore/ops_generate/template.py +0 -1
- mindspore/parallel/_auto_parallel_context.py +1 -21
- mindspore/parallel/_tensor.py +5 -0
- mindspore/parallel/_transformer/transformer.py +1 -1
- mindspore/parallel/_utils.py +1 -15
- mindspore/parallel/algo_parameter_config.py +3 -1
- mindspore/parallel/checkpoint_transform.py +9 -12
- mindspore/parallel/cluster/process_entity/_api.py +29 -28
- mindspore/parallel/cluster/process_entity/_utils.py +3 -13
- mindspore/parallel/cluster/run.py +16 -13
- mindspore/parallel/parameter_broadcast.py +2 -2
- mindspore/parallel/shard.py +17 -31
- mindspore/profiler/__init__.py +2 -3
- mindspore/profiler/common/util.py +2 -107
- mindspore/profiler/envprofiling.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +21 -8
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -82
- mindspore/profiler/parser/ascend_analysis/function_event.py +28 -43
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +27 -49
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +10 -15
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +20 -25
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +5 -5
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +1 -10
- mindspore/profiler/parser/ascend_hccl_generator.py +1 -4
- mindspore/profiler/parser/ascend_msprof_exporter.py +22 -43
- mindspore/profiler/parser/ascend_timeline_generator.py +5 -7
- mindspore/profiler/parser/minddata_parser.py +3 -72
- mindspore/profiler/profiling.py +59 -176
- mindspore/rewrite/api/node.py +1 -1
- mindspore/rewrite/common/namespace.py +5 -5
- mindspore/rewrite/parsers/assign_parser.py +0 -2
- mindspore/rewrite/parsers/class_def_parser.py +4 -8
- mindspore/run_check/_check_version.py +1 -1
- mindspore/scipy/fft.py +3 -1
- mindspore/scipy/linalg.py +3 -2
- mindspore/scipy/ops.py +3 -5
- mindspore/scipy/optimize/__init__.py +2 -2
- mindspore/train/__init__.py +4 -4
- mindspore/train/anf_ir_pb2.py +2 -8
- mindspore/train/callback/__init__.py +2 -5
- mindspore/train/callback/_backup_and_restore.py +2 -2
- mindspore/train/callback/_checkpoint.py +16 -104
- mindspore/train/callback/_landscape.py +1 -1
- mindspore/train/callback/_time_monitor.py +1 -1
- mindspore/train/data_sink.py +4 -5
- mindspore/train/dataset_helper.py +20 -45
- mindspore/train/model.py +38 -266
- mindspore/train/serialization.py +105 -256
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/version.py +1 -1
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +2 -2
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +303 -420
- mindspore/_extends/pijit/__init__.py +0 -23
- mindspore/_extends/pijit/pijit_func_white_list.py +0 -343
- mindspore/common/file_system.py +0 -48
- mindspore/common/generator.py +0 -260
- mindspore/common/no_inline.py +0 -54
- mindspore/common/np_dtype.py +0 -25
- mindspore/communication/comm_func.py +0 -1140
- mindspore/hal/memory.py +0 -326
- mindspore/lib/libavcodec.so.59 +0 -0
- mindspore/lib/libavdevice.so.59 +0 -0
- mindspore/lib/libavfilter.so.8 +0 -0
- mindspore/lib/libavformat.so.59 +0 -0
- mindspore/lib/libavutil.so.57 +0 -0
- mindspore/lib/libmindspore_np_dtype.so +0 -0
- mindspore/lib/libswresample.so.4 +0 -0
- mindspore/lib/libswscale.so.6 +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.cpp +0 -326
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.py +0 -180
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/all_finite.json +0 -109
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/binary_info_config.json +0 -38
- mindspore/lib/plugin/ascend/custom_compiler/OWNERS +0 -12
- mindspore/lib/plugin/ascend/custom_compiler/setup.py +0 -255
- mindspore/lib/plugin/ascend/custom_compiler/start.sh +0 -26
- mindspore/lib/plugin/ascend/custom_compiler/template.json +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/base_type.h +0 -133
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -32
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/tiling_info.h +0 -60
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -37
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_op.h +0 -34
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_backoff_base.h +0 -62
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -33
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_ops.h +0 -88
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_pa_op.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -95
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -84
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -61
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -224
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_impl.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_tiling.h +0 -187
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul.h +0 -245
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_interface.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_utils.h +0 -111
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/tiling_data.h +0 -54
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -364
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -114
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -98
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/mint/linalg/__init__.py +0 -22
- mindspore/nn/layer/embedding_service.py +0 -531
- mindspore/nn/layer/embedding_service_layer.py +0 -393
- mindspore/ops/function/reshard_func.py +0 -102
- mindspore/ops/operations/_infer_ops.py +0 -19
- mindspore/ops/operations/reshard_ops.py +0 -53
- mindspore/profiler/common/process_pool.py +0 -41
- mindspore/profiler/common/singleton.py +0 -28
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/train/callback/_cluster_monitor.py +0 -201
- mindspore/train/callback/_flops_collector.py +0 -238
- mindspore/train/callback/_mindio_ttp.py +0 -443
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -71,8 +71,7 @@ from .queue import _SharedQueue, _Queue
|
|
|
71
71
|
from .validators import check_batch, check_shuffle, check_map, check_filter, check_repeat, check_skip, check_zip, \
|
|
72
72
|
check_rename, check_device_send, check_take, check_output_shape, check_project, \
|
|
73
73
|
check_sync_wait, check_zip_dataset, check_add_column, check_concat, check_split, check_bucket_batch_by_length, \
|
|
74
|
-
check_save, check_tuple_iterator, check_dict_iterator, check_schema, check_to_device_send, check_padded_batch
|
|
75
|
-
check_total_batch
|
|
74
|
+
check_save, check_tuple_iterator, check_dict_iterator, check_schema, check_to_device_send, check_padded_batch
|
|
76
75
|
from ..core.config import get_callback_timeout, _init_device_info, get_enable_shared_mem, get_num_parallel_workers, \
|
|
77
76
|
get_enable_watchdog, get_seed, set_seed, get_debug_mode, get_multiprocessing_timeout_interval, _get_debug_hook_list
|
|
78
77
|
from ..core.datatypes import mstype_to_detype
|
|
@@ -605,9 +604,8 @@ class Dataset:
|
|
|
605
604
|
|
|
606
605
|
- max_rowsize(Union[int, list[int]], optional): Maximum size of row in MB that is used for shared memory
|
|
607
606
|
allocation to copy data between processes, the total occupied shared memory will increase as
|
|
608
|
-
``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase.
|
|
609
|
-
|
|
610
|
-
``python_multiprocessing`` is set to True. If it is an int value, it represents
|
|
607
|
+
``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase. This is only
|
|
608
|
+
used if python_multiprocessing is set to True. If it is an int value, it represents
|
|
611
609
|
``input_columns`` and ``output_columns`` use this value as the unit to create shared memory.
|
|
612
610
|
If it is a list, the first element represents the ``input_columns`` use this value as the unit to
|
|
613
611
|
create shared memory, and the second element represents ``output_columns`` use this value as the unit
|
|
@@ -899,9 +897,8 @@ class Dataset:
|
|
|
899
897
|
|
|
900
898
|
- max_rowsize (Union[int, list[int]], optional): Maximum size of row in MB that is used for shared
|
|
901
899
|
memory allocation to copy data between processes, the total occupied shared memory will increase as
|
|
902
|
-
``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase.
|
|
903
|
-
|
|
904
|
-
``python_multiprocessing`` is set to True. If it is an int value, it represents
|
|
900
|
+
``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase. This is only
|
|
901
|
+
used if python_multiprocessing is set to True. If it is an int value, it represents
|
|
905
902
|
``input_columns`` and ``output_columns`` use this value as the unit to create shared memory.
|
|
906
903
|
If it is a list, the first element represents the ``input_columns`` use this value as the unit to
|
|
907
904
|
create shared memory, and the second element represents ``output_columns`` use this value as the unit
|
|
@@ -1571,34 +1568,32 @@ class Dataset:
|
|
|
1571
1568
|
@check_tuple_iterator
|
|
1572
1569
|
def create_tuple_iterator(self, columns=None, num_epochs=-1, output_numpy=False, do_copy=True):
|
|
1573
1570
|
"""
|
|
1574
|
-
Create an iterator over the dataset
|
|
1575
|
-
|
|
1571
|
+
Create an iterator over the dataset. The datatype retrieved back will be a list of `numpy.ndarray` .
|
|
1572
|
+
|
|
1573
|
+
To specify which columns to list and the order needed, use columns_list. If columns_list
|
|
1574
|
+
is not provided, the order of the columns will remain unchanged.
|
|
1576
1575
|
|
|
1577
1576
|
Args:
|
|
1578
|
-
columns (list[str], optional):
|
|
1579
|
-
Default: ``None``,
|
|
1580
|
-
num_epochs (int, optional):
|
|
1581
|
-
Default: ``-1
|
|
1582
|
-
output_numpy (bool, optional): Whether
|
|
1583
|
-
|
|
1584
|
-
do_copy (bool, optional):
|
|
1585
|
-
|
|
1586
|
-
Default: ``True
|
|
1577
|
+
columns (list[str], optional): List of columns to be used to specify the order of columns.
|
|
1578
|
+
Default: ``None``, means all columns.
|
|
1579
|
+
num_epochs (int, optional): Maximum number of epochs that iterator can be iterated.
|
|
1580
|
+
Default: ``-1``, iterator can be iterated infinite number of epochs.
|
|
1581
|
+
output_numpy (bool, optional): Whether or not to output NumPy datatype.
|
|
1582
|
+
If `output_numpy` is ``False``, iterator will output MSTensor. Default: ``False``.
|
|
1583
|
+
do_copy (bool, optional): When output data type is :class:`mindspore.Tensor`,
|
|
1584
|
+
use this param to select the conversion method, only take False for better performance.
|
|
1585
|
+
Default: ``True``.
|
|
1587
1586
|
|
|
1588
1587
|
Returns:
|
|
1589
|
-
Iterator, a dataset iterator that
|
|
1588
|
+
Iterator, a dataset iterator that returns data of type Tuple.
|
|
1590
1589
|
|
|
1591
1590
|
Examples:
|
|
1592
1591
|
>>> import mindspore.dataset as ds
|
|
1593
|
-
>>>
|
|
1594
|
-
>>>
|
|
1595
|
-
>>>
|
|
1596
|
-
|
|
1597
|
-
|
|
1598
|
-
... for item in iterator:
|
|
1599
|
-
... # output is of type tuple
|
|
1600
|
-
... print(type(item))
|
|
1601
|
-
... break
|
|
1592
|
+
>>> dataset = ds.GeneratorDataset([i for i in range(10)], "column1")
|
|
1593
|
+
>>> iterator = dataset.create_tuple_iterator()
|
|
1594
|
+
>>> for item in iterator:
|
|
1595
|
+
... # item is a list
|
|
1596
|
+
... print(type(item))
|
|
1602
1597
|
... break
|
|
1603
1598
|
<class 'list'>
|
|
1604
1599
|
"""
|
|
@@ -1612,32 +1607,27 @@ class Dataset:
|
|
|
1612
1607
|
@check_dict_iterator
|
|
1613
1608
|
def create_dict_iterator(self, num_epochs=-1, output_numpy=False, do_copy=True):
|
|
1614
1609
|
"""
|
|
1615
|
-
Create an iterator over the dataset
|
|
1616
|
-
while the key is the column name and the value is the data.
|
|
1610
|
+
Create an iterator over the dataset. The data retrieved will be a dictionary datatype.
|
|
1617
1611
|
|
|
1618
1612
|
Args:
|
|
1619
|
-
num_epochs (int, optional):
|
|
1620
|
-
Default: ``-1`` ,
|
|
1621
|
-
output_numpy (bool, optional): Whether
|
|
1622
|
-
|
|
1623
|
-
do_copy (bool, optional):
|
|
1624
|
-
|
|
1613
|
+
num_epochs (int, optional): Maximum number of epochs that iterator can be iterated.
|
|
1614
|
+
Default: ``-1`` , iterator can be iterated infinite number of epochs.
|
|
1615
|
+
output_numpy (bool, optional): Whether or not to output NumPy datatype,
|
|
1616
|
+
if `output_numpy` is ``False``, iterator will output MSTensor. Default: ``False`` .
|
|
1617
|
+
do_copy (bool, optional): When output data type is :class:`mindspore.Tensor`,
|
|
1618
|
+
use this param to select the conversion method, only take False for better performance.
|
|
1625
1619
|
Default: ``True`` .
|
|
1626
1620
|
|
|
1627
1621
|
Returns:
|
|
1628
|
-
Iterator, a dataset iterator that
|
|
1622
|
+
Iterator, a dataset iterator that returns data of type Dict.
|
|
1629
1623
|
|
|
1630
1624
|
Examples:
|
|
1631
1625
|
>>> import mindspore.dataset as ds
|
|
1632
|
-
>>>
|
|
1633
|
-
>>>
|
|
1634
|
-
>>>
|
|
1635
|
-
|
|
1636
|
-
|
|
1637
|
-
... for item in iterator:
|
|
1638
|
-
... # output is of type dict
|
|
1639
|
-
... print(type(item))
|
|
1640
|
-
... break
|
|
1626
|
+
>>> dataset = ds.GeneratorDataset([i for i in range(10)], "column1")
|
|
1627
|
+
>>> iterator = dataset.create_dict_iterator()
|
|
1628
|
+
>>> for item in iterator:
|
|
1629
|
+
... # item is a dict
|
|
1630
|
+
... print(type(item))
|
|
1641
1631
|
... break
|
|
1642
1632
|
<class 'dict'>
|
|
1643
1633
|
"""
|
|
@@ -2574,8 +2564,7 @@ def _check_shm_usage(num_worker, queue_size, in_rowsize, out_rowsize):
|
|
|
2574
2564
|
when training in parallel mode.
|
|
2575
2565
|
"""
|
|
2576
2566
|
threshold_ratio = 0.8
|
|
2577
|
-
|
|
2578
|
-
if platform.system().lower() not in {"windows", "darwin"} and in_rowsize != -1 and out_rowsize != -1:
|
|
2567
|
+
if platform.system().lower() not in {"windows", "darwin"}:
|
|
2579
2568
|
device_num = _get_device_num()
|
|
2580
2569
|
# In the cluster, _get_device_num indicates the number of the entire cluster. The maximum number of cards
|
|
2581
2570
|
# on the ascend server is 8.
|
|
@@ -2623,9 +2612,8 @@ class BatchDataset(UnionBaseDataset):
|
|
|
2623
2612
|
name as the input columns, i.e., the columns will be replaced.
|
|
2624
2613
|
max_rowsize(Union[int, list[int]], optional): Maximum size of row in MB that is used for shared memory
|
|
2625
2614
|
allocation to copy data between processes, the total occupied shared memory will increase as
|
|
2626
|
-
``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase.
|
|
2627
|
-
|
|
2628
|
-
``python_multiprocessing`` is set to True. If it is an int value, it represents
|
|
2615
|
+
``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase. This is only
|
|
2616
|
+
used if python_multiprocessing is set to True. If it is an int value, it represents
|
|
2629
2617
|
``input_columns`` and ``output_columns`` use this value as the unit to create shared memory.
|
|
2630
2618
|
If it is a list, the first element represents the ``input_columns`` use this value as the unit to
|
|
2631
2619
|
create shared memory, and the second element represents ``output_columns`` use this value as the unit
|
|
@@ -2656,7 +2644,7 @@ class BatchDataset(UnionBaseDataset):
|
|
|
2656
2644
|
self.python_multiprocessing = python_multiprocessing
|
|
2657
2645
|
self.process_pool = None
|
|
2658
2646
|
if isinstance(max_rowsize, int):
|
|
2659
|
-
self.max_rowsize = [max_rowsize * self.batch_size] * 2
|
|
2647
|
+
self.max_rowsize = [max_rowsize * self.batch_size] * 2
|
|
2660
2648
|
else:
|
|
2661
2649
|
self.max_rowsize = [max_rowsize[0] * self.batch_size, max_rowsize[1] * self.batch_size]
|
|
2662
2650
|
|
|
@@ -3188,8 +3176,7 @@ class _MPWorker(multiprocessing.Process):
|
|
|
3188
3176
|
shared_memory = get_enable_shared_mem()
|
|
3189
3177
|
self.pipe = Pipe(warning_ctl, shared_memory=shared_memory, max_rowsize=max_rowsize)
|
|
3190
3178
|
self.check_interval = get_multiprocessing_timeout_interval()
|
|
3191
|
-
super().__init__(target=worker_target(operations, worker_id),
|
|
3192
|
-
args=(self.pipe,), daemon=True)
|
|
3179
|
+
super().__init__(target=worker_target(operations, worker_id), args=(self.pipe,), daemon=True)
|
|
3193
3180
|
|
|
3194
3181
|
def execute(self, idx, *args):
|
|
3195
3182
|
"""Acquiring data from a worker in an infinite loop"""
|
|
@@ -3216,14 +3203,6 @@ class _MPWorker(multiprocessing.Process):
|
|
|
3216
3203
|
logger.warning("Please `pip install py-spy` to get the stacks of the stuck process.")
|
|
3217
3204
|
try:
|
|
3218
3205
|
res = self.pipe.master_receive()
|
|
3219
|
-
# Because there is no need to copy when creating Tensors in the C++layer, it reduces the time
|
|
3220
|
-
# from np.ndarray to C++Tensor creation. However, when using shared memory in multiple processes,
|
|
3221
|
-
# the address of the shared memory will always be passed to subsequent nodes in the dataset pipeline,
|
|
3222
|
-
# and the shared memory will also be written by the current node, causing dirty data to be accessed
|
|
3223
|
-
# by subsequent nodes in the pipeline. So make a memory copy here to solve the problem of
|
|
3224
|
-
# shared memory being contaminated.
|
|
3225
|
-
if get_enable_shared_mem():
|
|
3226
|
-
res = copy.deepcopy(res)
|
|
3227
3206
|
except queue.Empty:
|
|
3228
3207
|
continue
|
|
3229
3208
|
if res is None:
|
|
@@ -3584,16 +3563,16 @@ class _PythonMultiprocessing(cde.PythonMultiprocessingRuntime):
|
|
|
3584
3563
|
if platform.system().lower() != 'windows':
|
|
3585
3564
|
self.eof = multiprocessing.Event()
|
|
3586
3565
|
self.cleaning_process = multiprocessing.Process(target=self._clean_process,
|
|
3587
|
-
name="MapCleanProcess",
|
|
3588
3566
|
args=(self.ppid, self.workers, self.eof),
|
|
3567
|
+
name="OrphanCleaner",
|
|
3589
3568
|
daemon=True)
|
|
3590
3569
|
self.cleaning_process.start()
|
|
3591
3570
|
|
|
3592
3571
|
if get_enable_watchdog():
|
|
3593
3572
|
self.eot = threading.Event()
|
|
3594
3573
|
self.watch_dog = threading.Thread(target=self._watch_dog,
|
|
3595
|
-
name="MapWatchDog",
|
|
3596
3574
|
args=(self.eot, self.workers + [self.cleaning_process]),
|
|
3575
|
+
name="WatchDog",
|
|
3597
3576
|
daemon=True)
|
|
3598
3577
|
self.watch_dog.start()
|
|
3599
3578
|
|
|
@@ -3672,9 +3651,8 @@ class MapDataset(UnionBaseDataset):
|
|
|
3672
3651
|
callbacks (DSCallback, list[DSCallback], optional): List of Dataset callbacks to be called. Default: ``None``.
|
|
3673
3652
|
max_rowsize(Union[int, list[int]], optional): Maximum size of row in MB that is used for shared memory
|
|
3674
3653
|
allocation to copy data between processes, the total occupied shared memory will increase as
|
|
3675
|
-
``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase.
|
|
3676
|
-
|
|
3677
|
-
``python_multiprocessing`` is set to True. If it is an int value, it represents ``input_columns`` and
|
|
3654
|
+
``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase. This is only
|
|
3655
|
+
used if python_multiprocessing is set to True. If it is an int value, it represents ``input_columns`` and
|
|
3678
3656
|
``output_columns`` use this value as the unit to create shared memory. If it is a list, the first element
|
|
3679
3657
|
represents the ``input_columns`` use this value as the unit to create shared memory, and the second element
|
|
3680
3658
|
represents ``output_columns`` use this value as the unit to create shared memory. Default: 16.
|
|
@@ -4325,7 +4303,6 @@ class TransferDataset(Dataset):
|
|
|
4325
4303
|
total_batch = 0
|
|
4326
4304
|
if hasattr(self.children[0], "__total_batch__"):
|
|
4327
4305
|
total_batch = self.children[0].__total_batch__
|
|
4328
|
-
check_total_batch(total_batch)
|
|
4329
4306
|
return cde.DataQueueNode(children[0], self.queue_name, self.device_type, self.device_id, self._send_epoch_end,
|
|
4330
4307
|
total_batch, self._create_data_info_queue)
|
|
4331
4308
|
|
|
@@ -19,7 +19,6 @@ After declaring the dataset object, you can further apply dataset operations
|
|
|
19
19
|
(e.g. filter, skip, concat, map, batch) on it.
|
|
20
20
|
"""
|
|
21
21
|
import builtins
|
|
22
|
-
import copy
|
|
23
22
|
import errno
|
|
24
23
|
import math
|
|
25
24
|
import os
|
|
@@ -116,28 +115,33 @@ def _cpp_sampler_fn_mp(sample_ids, sample_fn):
|
|
|
116
115
|
return sample_fn.process(sample_ids)
|
|
117
116
|
|
|
118
117
|
|
|
119
|
-
def _fill_worker_indices(workers, indices,
|
|
118
|
+
def _fill_worker_indices(workers, indices, idx):
|
|
120
119
|
"""
|
|
121
|
-
Worker index queue filler, fill worker index queue in round robin order
|
|
120
|
+
Worker index queue filler, fill worker index queue in round robin order.
|
|
122
121
|
"""
|
|
123
122
|
num_worker = len(workers)
|
|
124
|
-
|
|
125
|
-
|
|
123
|
+
while idx < len(indices):
|
|
124
|
+
try:
|
|
125
|
+
workers[idx % num_worker].put(indices[idx])
|
|
126
|
+
idx += 1
|
|
127
|
+
except queue.Full:
|
|
128
|
+
break
|
|
129
|
+
return idx
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def _fill_worker_quit_flag(workers, worker_to_quit):
|
|
133
|
+
"""
|
|
134
|
+
Worker index queue filler, fill worker index queue with QUIT flag.
|
|
135
|
+
"""
|
|
136
|
+
num_worker = len(workers)
|
|
137
|
+
for i in range(num_worker):
|
|
138
|
+
# just put only one QUIT flag to the sub-thread / sub-process
|
|
139
|
+
if str(i) not in worker_to_quit:
|
|
126
140
|
try:
|
|
127
|
-
workers[
|
|
128
|
-
|
|
141
|
+
workers[i].put("QUIT")
|
|
142
|
+
worker_to_quit[str(i)] = "QUIT"
|
|
129
143
|
except queue.Full:
|
|
130
|
-
|
|
131
|
-
else:
|
|
132
|
-
for i in range(num_worker):
|
|
133
|
-
# just put only one QUIT flag to the sub-thread / sub-process
|
|
134
|
-
if str(i) not in worker_to_quit:
|
|
135
|
-
try:
|
|
136
|
-
workers[i].put("QUIT")
|
|
137
|
-
worker_to_quit[str(i)] = "QUIT"
|
|
138
|
-
except queue.Full:
|
|
139
|
-
continue
|
|
140
|
-
return idx_cursor, worker_to_quit
|
|
144
|
+
continue
|
|
141
145
|
|
|
142
146
|
|
|
143
147
|
def _convert_row(row):
|
|
@@ -217,11 +221,10 @@ class SamplerFn:
|
|
|
217
221
|
# res_queue is used shared memory, so it' size is max_rowsize which is defined by user.
|
|
218
222
|
_check_shm_usage(num_worker, queue_size, 0, max_rowsize)
|
|
219
223
|
self.count = multiprocessing.Value('i', 0)
|
|
220
|
-
for
|
|
224
|
+
for _ in range(num_worker):
|
|
221
225
|
if multi_process is True:
|
|
222
226
|
try:
|
|
223
|
-
worker = _GeneratorWorkerMp(dataset, self.eof, max_rowsize, queue_size, self.ppid, self.count
|
|
224
|
-
worker_id)
|
|
227
|
+
worker = _GeneratorWorkerMp(dataset, self.eof, max_rowsize, queue_size, self.ppid, self.count)
|
|
225
228
|
worker.daemon = True
|
|
226
229
|
# When multi processes fork a subprocess, the lock of the main process is copied to the subprocess,
|
|
227
230
|
# which may cause deadlock. Therefore, the subprocess startup is performed in the initialization
|
|
@@ -240,18 +243,11 @@ class SamplerFn:
|
|
|
240
243
|
self.pids.append(worker.pid)
|
|
241
244
|
self.need_join = True
|
|
242
245
|
else:
|
|
243
|
-
worker = _GeneratorWorkerMt(dataset, self.eof
|
|
246
|
+
worker = _GeneratorWorkerMt(dataset, self.eof)
|
|
244
247
|
worker.daemon = True
|
|
245
248
|
self.workers.append(worker)
|
|
246
249
|
self._launch_cleanup_worker(multi_process=multi_process)
|
|
247
250
|
|
|
248
|
-
def _interval_log(self, i, start_time, wait_count):
|
|
249
|
-
cost_time = int(time.time()) - start_time
|
|
250
|
-
if cost_time / self.check_interval >= wait_count:
|
|
251
|
-
wait_count += 1
|
|
252
|
-
self._log_stuck_warning(self.workers[i % self.num_worker], cost_time)
|
|
253
|
-
return wait_count
|
|
254
|
-
|
|
255
251
|
def process(self, indices):
|
|
256
252
|
"""
|
|
257
253
|
The main process, start the child process or child thread, and fill the index queue.
|
|
@@ -262,21 +258,19 @@ class SamplerFn:
|
|
|
262
258
|
if not w.queue_empty():
|
|
263
259
|
# in failover reset scenario the QUIT flag should be pop first
|
|
264
260
|
while w.idx_queue.qsize() > 0:
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
raise Exception("The queue of the subprocess is not empty.")
|
|
269
|
-
except queue.Empty:
|
|
270
|
-
continue
|
|
261
|
+
result = w.idx_queue.get()
|
|
262
|
+
if result != "QUIT":
|
|
263
|
+
raise Exception("The queue of the subprocess is not empty.")
|
|
271
264
|
# Start all workers
|
|
272
265
|
if not w.is_alive():
|
|
273
266
|
w.start()
|
|
274
267
|
|
|
275
268
|
# Fill initial index queues
|
|
276
269
|
idx_cursor = 0
|
|
270
|
+
idx_cursor = _fill_worker_indices(self.workers, indices, idx_cursor)
|
|
271
|
+
|
|
277
272
|
# worker to quit
|
|
278
273
|
worker_to_quit = {}
|
|
279
|
-
idx_cursor, worker_to_quit = _fill_worker_indices(self.workers, indices, idx_cursor, worker_to_quit)
|
|
280
274
|
|
|
281
275
|
# Fetch results
|
|
282
276
|
for i in range(len(indices)):
|
|
@@ -298,16 +292,11 @@ class SamplerFn:
|
|
|
298
292
|
self._stop_subprocess()
|
|
299
293
|
return
|
|
300
294
|
time.sleep(0.1)
|
|
301
|
-
|
|
295
|
+
cost_time = int(time.time()) - start_time
|
|
296
|
+
if cost_time / self.check_interval >= wait_count:
|
|
297
|
+
wait_count += 1
|
|
298
|
+
self._log_stuck_warning(self.workers[i % self.num_worker], cost_time)
|
|
302
299
|
result = self.workers[i % self.num_worker].get()
|
|
303
|
-
# Because there is no need to copy when creating Tensors in the C++layer, it reduces the time
|
|
304
|
-
# from np.ndarray to C++Tensor creation. However, when using shared memory in multiple processes,
|
|
305
|
-
# the address of the shared memory will always be passed to subsequent nodes in the dataset pipeline,
|
|
306
|
-
# and the shared memory will also be written by the current node, causing dirty data to be accessed
|
|
307
|
-
# by subsequent nodes in the pipeline. So make a memory copy here to solve the problem of
|
|
308
|
-
# shared memory being contaminated.
|
|
309
|
-
if self.multi_process is True and get_enable_shared_mem():
|
|
310
|
-
result = copy.deepcopy(result)
|
|
311
300
|
if isinstance(result, ExceptionHandler):
|
|
312
301
|
result.reraise()
|
|
313
302
|
except queue.Empty:
|
|
@@ -319,9 +308,11 @@ class SamplerFn:
|
|
|
319
308
|
if self.eof.is_set():
|
|
320
309
|
self._stop_subprocess()
|
|
321
310
|
return
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
311
|
+
if idx_cursor < len(indices):
|
|
312
|
+
idx_cursor = _fill_worker_indices(self.workers, indices, idx_cursor)
|
|
313
|
+
else:
|
|
314
|
+
# send QUIT flag to workers
|
|
315
|
+
_fill_worker_quit_flag(self.workers, worker_to_quit)
|
|
325
316
|
yield _convert_row(result)
|
|
326
317
|
|
|
327
318
|
def _log_stuck_warning(self, worker, waiting_time):
|
|
@@ -370,7 +361,6 @@ class SamplerFn:
|
|
|
370
361
|
if multi_process is True and platform.system().lower() != 'windows':
|
|
371
362
|
_clean_worker_func = _PythonMultiprocessing._clean_process # pylint: disable=W0212
|
|
372
363
|
self.cleaning_process = multiprocessing.Process(target=_clean_worker_func,
|
|
373
|
-
name="GeneratorCleanProcess",
|
|
374
364
|
args=(self.ppid, self.workers, self.eof))
|
|
375
365
|
self.cleaning_process.daemon = True
|
|
376
366
|
self.cleaning_process.start()
|
|
@@ -378,7 +368,6 @@ class SamplerFn:
|
|
|
378
368
|
if get_enable_watchdog():
|
|
379
369
|
self.eot = threading.Event()
|
|
380
370
|
self.watch_dog = threading.Thread(target=_PythonMultiprocessing._watch_dog, # pylint: disable=W0212
|
|
381
|
-
name="GeneratorWatchDog",
|
|
382
371
|
args=(self.eot, self.workers + [self.cleaning_process]))
|
|
383
372
|
self.watch_dog.daemon = True
|
|
384
373
|
self.watch_dog.start()
|
|
@@ -393,7 +382,7 @@ class SamplerFn:
|
|
|
393
382
|
def _stop_subprocess(self):
|
|
394
383
|
"""Only the main process can call join."""
|
|
395
384
|
if self.need_join is True and self.ppid == os.getpid():
|
|
396
|
-
if hasattr(self, 'eof') and self.eof is not None:
|
|
385
|
+
if hasattr(self, 'eof') and self.eof is not None and not self.eof.is_set():
|
|
397
386
|
self.eof.set()
|
|
398
387
|
# close the watch dog first
|
|
399
388
|
self._abort_watchdog()
|
|
@@ -405,12 +394,10 @@ class SamplerFn:
|
|
|
405
394
|
del w.res_queue
|
|
406
395
|
del w.idx_queue
|
|
407
396
|
|
|
408
|
-
#
|
|
409
|
-
w.
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
w.terminate()
|
|
413
|
-
w.close()
|
|
397
|
+
# close all the subprocess workers
|
|
398
|
+
w.terminate()
|
|
399
|
+
w.join()
|
|
400
|
+
w.close()
|
|
414
401
|
except Exception: # pylint: disable=W0703
|
|
415
402
|
# Block all errors when join
|
|
416
403
|
continue
|
|
@@ -442,15 +429,10 @@ class SamplerFn:
|
|
|
442
429
|
self.workers = None
|
|
443
430
|
|
|
444
431
|
def _abort_watchdog(self):
|
|
445
|
-
"""Let watchdog quit."""
|
|
446
432
|
if hasattr(self, 'eot') and self.eot is not None and not self.eot.is_set():
|
|
447
433
|
self.eot.set()
|
|
448
434
|
if hasattr(self, 'cleaning_process') and self.cleaning_process is not None:
|
|
449
|
-
#
|
|
450
|
-
self.cleaning_process.join(timeout=5)
|
|
451
|
-
if self.cleaning_process.is_alive():
|
|
452
|
-
# if the cleaning process did not exit, it may hang, try to terminate it
|
|
453
|
-
_PythonMultiprocessing._terminate_processes([self.cleaning_process]) # pylint: disable=W0212
|
|
435
|
+
_PythonMultiprocessing._terminate_processes([self.cleaning_process]) # pylint: disable=W0212
|
|
454
436
|
del self.cleaning_process
|
|
455
437
|
if hasattr(self, 'count'):
|
|
456
438
|
del self.count
|
|
@@ -505,7 +487,7 @@ def _generator_worker_loop(dataset, idx_queue, result_queue, eof, is_multiproces
|
|
|
505
487
|
if is_multiprocessing:
|
|
506
488
|
result_queue.cancel_join_thread() # Ensure that the process does not hung when exiting
|
|
507
489
|
signal.signal(signal.SIGTERM, partial(_subprocess_handle, eof))
|
|
508
|
-
while
|
|
490
|
+
while True:
|
|
509
491
|
_ignore_sigint(is_multiprocessing=is_multiprocessing)
|
|
510
492
|
|
|
511
493
|
# Fetch index, block
|
|
@@ -540,7 +522,7 @@ def _generator_worker_loop(dataset, idx_queue, result_queue, eof, is_multiproces
|
|
|
540
522
|
except Exception: # pylint: disable=broad-except
|
|
541
523
|
result = ExceptionHandler(where="in GeneratorDataset worker process")
|
|
542
524
|
# Send data, block
|
|
543
|
-
while
|
|
525
|
+
while True:
|
|
544
526
|
try:
|
|
545
527
|
result_queue.put(result, timeout=5)
|
|
546
528
|
except queue.Full:
|
|
@@ -559,11 +541,11 @@ class _GeneratorWorkerMt(threading.Thread):
|
|
|
559
541
|
Worker process for multi-thread Generator.
|
|
560
542
|
"""
|
|
561
543
|
|
|
562
|
-
def __init__(self, dataset, eof
|
|
544
|
+
def __init__(self, dataset, eof):
|
|
563
545
|
self.idx_queue = queue.Queue(16)
|
|
564
546
|
self.res_queue = queue.Queue(16)
|
|
565
547
|
super().__init__(target=_generator_worker_loop, args=(dataset, self.idx_queue, self.res_queue, eof, False),
|
|
566
|
-
name="GeneratorWorkerThread"
|
|
548
|
+
name="GeneratorWorkerThread")
|
|
567
549
|
|
|
568
550
|
def put(self, item):
|
|
569
551
|
"""
|
|
@@ -592,7 +574,7 @@ class _GeneratorWorkerMp(multiprocessing.Process):
|
|
|
592
574
|
Worker process for multiprocess Generator.
|
|
593
575
|
"""
|
|
594
576
|
|
|
595
|
-
def __init__(self, dataset, eof, max_rowsize, queue_size, ppid, count
|
|
577
|
+
def __init__(self, dataset, eof, max_rowsize, queue_size, ppid, count):
|
|
596
578
|
self.idx_queue = multiprocessing.Queue(queue_size)
|
|
597
579
|
if get_enable_shared_mem():
|
|
598
580
|
self.res_queue = _SharedQueue(queue_size, count, max_rowsize=max_rowsize)
|
|
@@ -600,7 +582,7 @@ class _GeneratorWorkerMp(multiprocessing.Process):
|
|
|
600
582
|
self.res_queue = multiprocessing.Queue(queue_size)
|
|
601
583
|
self.idx_queue.cancel_join_thread() # Ensure that the process does not hung when exiting
|
|
602
584
|
super().__init__(target=_generator_worker_loop, args=(dataset, self.idx_queue, self.res_queue, eof, True, ppid),
|
|
603
|
-
name="GeneratorWorkerProcess"
|
|
585
|
+
name="GeneratorWorkerProcess")
|
|
604
586
|
|
|
605
587
|
def put(self, item):
|
|
606
588
|
"""
|
|
@@ -673,9 +655,8 @@ class GeneratorDataset(MappableDataset, UnionBaseDataset):
|
|
|
673
655
|
option could be beneficial if the Python operation is computational heavy. Default: ``True``.
|
|
674
656
|
max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory
|
|
675
657
|
allocation to copy data between processes, the total occupied shared memory will increase as
|
|
676
|
-
``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase.
|
|
677
|
-
|
|
678
|
-
``python_multiprocessing`` is set to True. Default: 16.
|
|
658
|
+
``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase. This is only
|
|
659
|
+
used if python_multiprocessing is set to True. Default: 16.
|
|
679
660
|
|
|
680
661
|
Raises:
|
|
681
662
|
RuntimeError: If source raises an exception during execution.
|
|
@@ -2799,8 +2799,8 @@ class ManifestDataset(MappableDataset, VisionBaseDataset):
|
|
|
2799
2799
|
|
|
2800
2800
|
Manifest file contains a list of files included in a dataset, including basic file info such as File name and File
|
|
2801
2801
|
ID, along with extended file metadata. Manifest is a data format file supported by Huawei Modelarts. For details,
|
|
2802
|
-
see `Specifications for Importing the Manifest File <https://support.huaweicloud.com/
|
|
2803
|
-
|
|
2802
|
+
see `Specifications for Importing the Manifest File <https://support.huaweicloud.com/engineers-modelarts/
|
|
2803
|
+
modelarts_23_0009.html>`_ .
|
|
2804
2804
|
|
|
2805
2805
|
.. code-block::
|
|
2806
2806
|
|