mindspore 2.5.0__cp310-cp310-win_amd64.whl → 2.6.0rc1__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +6 -4
- mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -0
- mindspore/_checkparam.py +3 -33
- mindspore/_deprecated/__init__.py +17 -0
- mindspore/_deprecated/jit.py +198 -0
- mindspore/_extends/builtin_operations.py +1 -1
- mindspore/_extends/parse/__init__.py +6 -7
- mindspore/_extends/parse/compile_config.py +19 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
- mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
- mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
- mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
- mindspore/_extends/parse/parser.py +24 -193
- mindspore/_extends/parse/resources.py +1 -5
- mindspore/_extends/parse/standard_method.py +97 -74
- mindspore/_extends/pijit/__init__.py +2 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
- mindspore/_extends/pijit/tensor_func_list.py +27 -0
- mindspore/_extends/utils.py +1 -1
- mindspore/amp.py +4 -4
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/__init__.py +2 -2
- mindspore/boost/base.py +3 -7
- mindspore/boost/boost_cell_wrapper.py +2 -2
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +4 -3
- mindspore/common/_grad_function.py +56 -0
- mindspore/common/_pijit_context.py +14 -5
- mindspore/common/_register_for_tensor.py +1 -1
- mindspore/common/_stub_tensor.py +5 -10
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +1915 -3287
- mindspore/common/api.py +341 -354
- mindspore/common/auto_dynamic_shape.py +41 -44
- mindspore/common/dtype.py +5 -2
- mindspore/common/dump.py +7 -5
- mindspore/common/file_system.py +3 -0
- mindspore/common/hook_handle.py +5 -3
- mindspore/common/initializer.py +10 -6
- mindspore/common/jit_begin_end.py +94 -0
- mindspore/common/jit_config.py +6 -1
- mindspore/common/jit_context.py +76 -0
- mindspore/common/jit_trace.py +378 -0
- mindspore/common/lazy_inline.py +2 -2
- mindspore/common/mutable.py +5 -4
- mindspore/common/parameter.py +106 -39
- mindspore/common/seed.py +2 -2
- mindspore/common/sparse_tensor.py +23 -17
- mindspore/common/tensor.py +297 -714
- mindspore/communication/__init__.py +7 -5
- mindspore/communication/_comm_helper.py +47 -2
- mindspore/communication/comm_func.py +70 -53
- mindspore/communication/management.py +83 -17
- mindspore/context.py +214 -560
- mindspore/dataset/__init__.py +44 -20
- mindspore/dataset/audio/__init__.py +2 -8
- mindspore/dataset/audio/transforms.py +3 -17
- mindspore/dataset/core/config.py +3 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +102 -120
- mindspore/dataset/engine/datasets_audio.py +22 -22
- mindspore/dataset/engine/datasets_standard_format.py +43 -24
- mindspore/dataset/engine/datasets_text.py +78 -85
- mindspore/dataset/engine/datasets_user_defined.py +108 -76
- mindspore/dataset/engine/datasets_vision.py +111 -108
- mindspore/dataset/engine/iterators.py +5 -3
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
- mindspore/dataset/engine/samplers.py +279 -57
- mindspore/dataset/engine/serializer_deserializer.py +2 -1
- mindspore/dataset/engine/validators.py +10 -0
- mindspore/dataset/text/__init__.py +7 -6
- mindspore/dataset/text/transforms.py +6 -5
- mindspore/dataset/text/utils.py +3 -3
- mindspore/dataset/transforms/__init__.py +0 -9
- mindspore/dataset/transforms/transforms.py +3 -3
- mindspore/dataset/utils/browse_dataset.py +1 -1
- mindspore/dataset/vision/__init__.py +2 -9
- mindspore/dataset/vision/transforms.py +202 -158
- mindspore/dataset/vision/utils.py +7 -5
- mindspore/device_context/ascend/op_debug.py +60 -1
- mindspore/device_context/ascend/op_tuning.py +0 -4
- mindspore/device_manager.py +39 -3
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/es/embedding_service.py +35 -27
- mindspore/experimental/map_parameter.py +4 -4
- mindspore/experimental/optim/adadelta.py +22 -26
- mindspore/experimental/optim/adagrad.py +4 -4
- mindspore/experimental/optim/adam.py +4 -0
- mindspore/experimental/optim/adamax.py +4 -4
- mindspore/experimental/optim/adamw.py +4 -0
- mindspore/experimental/optim/asgd.py +1 -1
- mindspore/experimental/optim/lr_scheduler.py +40 -22
- mindspore/experimental/optim/radam.py +5 -5
- mindspore/experimental/optim/rprop.py +1 -1
- mindspore/experimental/optim/sgd.py +1 -1
- mindspore/hal/contiguous_tensors_handle.py +6 -10
- mindspore/hal/device.py +55 -81
- mindspore/hal/event.py +38 -55
- mindspore/hal/memory.py +93 -144
- mindspore/hal/stream.py +81 -125
- mindspore/include/dataset/constants.h +7 -4
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +40 -2
- mindspore/mindrecord/__init__.py +20 -7
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +131 -700
- mindspore/mint/distributed/__init__.py +5 -1
- mindspore/mint/distributed/distributed.py +194 -109
- mindspore/mint/linalg/__init__.py +2 -0
- mindspore/mint/nn/__init__.py +280 -18
- mindspore/mint/nn/functional.py +282 -64
- mindspore/mint/nn/layer/__init__.py +4 -0
- mindspore/mint/nn/layer/_functions.py +7 -3
- mindspore/mint/nn/layer/activation.py +120 -13
- mindspore/mint/nn/layer/conv.py +218 -24
- mindspore/mint/nn/layer/normalization.py +15 -16
- mindspore/mint/nn/layer/padding.py +1 -1
- mindspore/mint/nn/layer/pooling.py +66 -1
- mindspore/mint/optim/__init__.py +2 -1
- mindspore/mint/optim/sgd.py +171 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/__init__.py +4 -1
- mindspore/nn/cell.py +1250 -176
- mindspore/nn/layer/activation.py +23 -21
- mindspore/nn/layer/basic.py +22 -16
- mindspore/nn/layer/container.py +1 -1
- mindspore/nn/layer/conv.py +22 -17
- mindspore/nn/layer/embedding.py +9 -8
- mindspore/nn/layer/normalization.py +48 -42
- mindspore/nn/layer/pooling.py +75 -31
- mindspore/nn/layer/transformer.py +11 -10
- mindspore/nn/learning_rate_schedule.py +4 -2
- mindspore/nn/loss/loss.py +27 -19
- mindspore/nn/optim/ada_grad.py +6 -5
- mindspore/nn/optim/adadelta.py +9 -7
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +16 -12
- mindspore/nn/optim/adamax.py +8 -7
- mindspore/nn/optim/adasum.py +5 -5
- mindspore/nn/optim/asgd.py +1 -1
- mindspore/nn/optim/ftrl.py +11 -9
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/lazyadam.py +12 -10
- mindspore/nn/optim/momentum.py +7 -6
- mindspore/nn/optim/optimizer.py +2 -2
- mindspore/nn/optim/proximal_ada_grad.py +12 -10
- mindspore/nn/optim/rmsprop.py +13 -12
- mindspore/nn/optim/rprop.py +9 -7
- mindspore/nn/optim/sgd.py +9 -6
- mindspore/nn/optim/tft_wrapper.py +5 -2
- mindspore/nn/probability/bijector/bijector.py +17 -11
- mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
- mindspore/nn/probability/bijector/invert.py +2 -2
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +3 -2
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +1 -1
- mindspore/nn/probability/distribution/cauchy.py +4 -2
- mindspore/nn/probability/distribution/exponential.py +6 -7
- mindspore/nn/probability/distribution/gamma.py +2 -2
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/half_normal.py +5 -3
- mindspore/nn/probability/distribution/logistic.py +5 -3
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/uniform.py +5 -3
- mindspore/nn/reinforcement/_tensors_queue.py +1 -1
- mindspore/nn/reinforcement/tensor_array.py +1 -1
- mindspore/nn/wrap/__init__.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +178 -117
- mindspore/nn/wrap/grad_reducer.py +45 -36
- mindspore/nn/wrap/loss_scale.py +3 -3
- mindspore/numpy/array_creations.py +3 -3
- mindspore/numpy/array_ops.py +1 -1
- mindspore/numpy/math_ops.py +4 -4
- mindspore/numpy/utils.py +1 -2
- mindspore/numpy/utils_const.py +1 -2
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +3 -2
- mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
- mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
- mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
- mindspore/ops/_register_for_op.py +0 -11
- mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
- mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
- mindspore/ops/_vmap/vmap_array_ops.py +7 -6
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
- mindspore/ops/_vmap/vmap_math_ops.py +4 -7
- mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
- mindspore/ops/auto_generate/__init__.py +4 -3
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +102 -49
- mindspore/ops/auto_generate/gen_extend_func.py +281 -135
- mindspore/ops/auto_generate/gen_ops_def.py +2574 -2326
- mindspore/ops/auto_generate/gen_ops_prim.py +8566 -2755
- mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
- mindspore/ops/composite/__init__.py +2 -1
- mindspore/ops/composite/base.py +19 -24
- mindspore/ops/composite/math_ops.py +6 -16
- mindspore/ops/composite/multitype_ops/__init__.py +5 -2
- mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -3
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
- mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
- mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
- mindspore/ops/function/__init__.py +28 -2
- mindspore/ops/function/_add_attr_func.py +58 -0
- mindspore/ops/function/array_func.py +1629 -2345
- mindspore/ops/function/clip_func.py +38 -45
- mindspore/ops/function/debug_func.py +36 -44
- mindspore/ops/function/grad/__init__.py +1 -0
- mindspore/ops/function/grad/grad_func.py +104 -71
- mindspore/ops/function/image_func.py +1 -1
- mindspore/ops/function/linalg_func.py +46 -78
- mindspore/ops/function/math_func.py +3035 -3705
- mindspore/ops/function/nn_func.py +676 -241
- mindspore/ops/function/other_func.py +159 -1
- mindspore/ops/function/parameter_func.py +17 -30
- mindspore/ops/function/random_func.py +204 -361
- mindspore/ops/function/reshard_func.py +4 -70
- mindspore/ops/function/sparse_func.py +3 -3
- mindspore/ops/function/sparse_unary_func.py +5 -5
- mindspore/ops/function/spectral_func.py +25 -58
- mindspore/ops/function/vmap_func.py +24 -17
- mindspore/ops/functional.py +6 -4
- mindspore/ops/functional_overload.py +547 -4
- mindspore/ops/op_info_register.py +32 -244
- mindspore/ops/operations/__init__.py +10 -5
- mindspore/ops/operations/_custom_ops_utils.py +247 -0
- mindspore/ops/operations/_grad_ops.py +1 -10
- mindspore/ops/operations/_inner_ops.py +5 -76
- mindspore/ops/operations/_ms_kernel.py +4 -10
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +3 -2
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/array_ops.py +37 -22
- mindspore/ops/operations/comm_ops.py +150 -107
- mindspore/ops/operations/custom_ops.py +221 -23
- mindspore/ops/operations/debug_ops.py +115 -16
- mindspore/ops/operations/inner_ops.py +1 -1
- mindspore/ops/operations/linalg_ops.py +1 -58
- mindspore/ops/operations/manually_defined/_inner.py +1 -1
- mindspore/ops/operations/manually_defined/ops_def.py +746 -79
- mindspore/ops/operations/math_ops.py +21 -18
- mindspore/ops/operations/nn_ops.py +65 -191
- mindspore/ops/operations/other_ops.py +62 -9
- mindspore/ops/operations/random_ops.py +13 -7
- mindspore/ops/operations/reshard_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +2 -2
- mindspore/ops/primitive.py +43 -32
- mindspore/ops/tensor_method.py +232 -13
- mindspore/ops_generate/__init__.py +0 -5
- mindspore/ops_generate/aclnn/__init__.py +0 -0
- mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
- mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
- mindspore/ops_generate/api/__init__.py +0 -0
- mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
- mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
- mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
- mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
- mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
- mindspore/ops_generate/api/gen_api.py +103 -0
- mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
- mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
- mindspore/ops_generate/common/__init__.py +0 -0
- mindspore/ops_generate/common/gen_constants.py +91 -0
- mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
- mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
- mindspore/ops_generate/{template.py → common/template.py} +96 -84
- mindspore/ops_generate/gen_ops.py +23 -325
- mindspore/ops_generate/op_def/__init__.py +0 -0
- mindspore/ops_generate/op_def/gen_op_def.py +90 -0
- mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
- mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -7
- mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
- mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
- mindspore/ops_generate/op_def_py/__init__.py +0 -0
- mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
- mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
- mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
- mindspore/ops_generate/pyboost/__init__.py +0 -0
- mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
- mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
- mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
- mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
- mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
- mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
- mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
- mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
- mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
- mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
- mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
- mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
- mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
- mindspore/ops_generate/resources/__init__.py +0 -0
- mindspore/ops_generate/resources/resource_list.py +30 -0
- mindspore/ops_generate/resources/resource_loader.py +36 -0
- mindspore/ops_generate/resources/resource_manager.py +64 -0
- mindspore/ops_generate/resources/yaml_loader.py +88 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
- mindspore/parallel/__init__.py +6 -2
- mindspore/parallel/_auto_parallel_context.py +133 -6
- mindspore/parallel/_cell_wrapper.py +130 -15
- mindspore/parallel/_parallel_serialization.py +95 -4
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +7 -2
- mindspore/parallel/_tensor.py +142 -18
- mindspore/parallel/_utils.py +198 -25
- mindspore/parallel/algo_parameter_config.py +3 -3
- mindspore/parallel/auto_parallel.py +732 -0
- mindspore/parallel/checkpoint_convert.py +159 -0
- mindspore/parallel/checkpoint_transform.py +656 -37
- mindspore/parallel/cluster/process_entity/_api.py +151 -19
- mindspore/parallel/cluster/run.py +1 -1
- mindspore/parallel/function/__init__.py +24 -0
- mindspore/parallel/function/reshard_func.py +259 -0
- mindspore/parallel/nn/__init__.py +25 -0
- mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
- mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
- mindspore/parallel/parameter_broadcast.py +24 -13
- mindspore/parallel/shard.py +137 -61
- mindspore/parallel/transform_safetensors.py +287 -95
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +9 -5
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
- mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +22 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
- mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
- mindspore/profiler/common/constant.py +12 -0
- mindspore/profiler/common/msprof_cmd_tool.py +42 -23
- mindspore/profiler/common/path_manager.py +24 -0
- mindspore/profiler/common/profiler_context.py +26 -2
- mindspore/profiler/common/profiler_meta_data.py +74 -0
- mindspore/profiler/common/profiler_parameters.py +59 -18
- mindspore/profiler/common/profiler_path_manager.py +66 -7
- mindspore/profiler/dynamic_profiler.py +112 -79
- mindspore/profiler/envprofiler.py +26 -1
- mindspore/profiler/experimental_config.py +197 -0
- mindspore/profiler/mstx.py +57 -14
- mindspore/profiler/platform/npu_profiler.py +33 -7
- mindspore/profiler/profiler.py +541 -45
- mindspore/profiler/profiler_action_controller.py +1 -1
- mindspore/profiler/profiler_interface.py +4 -0
- mindspore/profiler/schedule.py +57 -22
- mindspore/rewrite/api/node.py +15 -13
- mindspore/rewrite/api/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +25 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/runtime/__init__.py +2 -2
- mindspore/runtime/executor.py +40 -11
- mindspore/runtime/memory.py +25 -8
- mindspore/safeguard/rewrite_obfuscation.py +12 -9
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +8 -8
- mindspore/train/_utils.py +35 -7
- mindspore/train/amp.py +1 -1
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +2 -16
- mindspore/train/callback/_checkpoint.py +24 -40
- mindspore/train/callback/_cluster_monitor.py +14 -18
- mindspore/train/callback/_flops_collector.py +2 -3
- mindspore/train/callback/_history.py +7 -4
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +0 -3
- mindspore/train/callback/_loss_monitor.py +2 -1
- mindspore/train/callback/_on_request_exit.py +6 -5
- mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
- mindspore/train/callback/_summary_collector.py +8 -13
- mindspore/train/callback/_time_monitor.py +2 -1
- mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +179 -103
- mindspore/train/data_sink.py +25 -2
- mindspore/train/dataset_helper.py +4 -5
- mindspore/train/loss_scale_manager.py +8 -7
- mindspore/train/metrics/accuracy.py +3 -3
- mindspore/train/metrics/confusion_matrix.py +9 -9
- mindspore/train/metrics/error.py +3 -3
- mindspore/train/metrics/hausdorff_distance.py +4 -4
- mindspore/train/metrics/mean_surface_distance.py +3 -3
- mindspore/train/metrics/metric.py +0 -12
- mindspore/train/metrics/occlusion_sensitivity.py +4 -2
- mindspore/train/metrics/precision.py +8 -6
- mindspore/train/metrics/recall.py +9 -9
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +19 -12
- mindspore/train/model.py +176 -103
- mindspore/train/serialization.py +246 -988
- mindspore/train/summary/_summary_adapter.py +2 -2
- mindspore/train/summary/summary_record.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +4 -2
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/utils.py +138 -4
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/METADATA +2 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/RECORD +483 -438
- mindspore/_install_custom.py +0 -43
- mindspore/common/_register_for_adapter.py +0 -74
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
- mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
- mindspore/ops_generate/gen_constants.py +0 -190
- mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
- mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
- /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2019-
|
|
1
|
+
# Copyright 2019-2025 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -18,35 +18,37 @@ You can define your own dataset loading class, and use GeneratorDataset to help
|
|
|
18
18
|
After declaring the dataset object, you can further apply dataset operations
|
|
19
19
|
(e.g. filter, skip, concat, map, batch) on it.
|
|
20
20
|
"""
|
|
21
|
+
import atexit
|
|
21
22
|
import builtins
|
|
22
23
|
import copy
|
|
23
24
|
import errno
|
|
24
25
|
import itertools
|
|
25
26
|
import math
|
|
26
|
-
import os
|
|
27
|
-
import signal
|
|
28
|
-
import time
|
|
29
|
-
from types import GeneratorType
|
|
30
27
|
import multiprocessing
|
|
28
|
+
import os
|
|
29
|
+
import platform
|
|
31
30
|
import queue
|
|
32
|
-
|
|
31
|
+
import signal
|
|
33
32
|
import subprocess
|
|
34
33
|
import threading
|
|
35
|
-
import
|
|
36
|
-
import
|
|
34
|
+
import time
|
|
35
|
+
import weakref
|
|
36
|
+
from functools import partial
|
|
37
|
+
from types import GeneratorType
|
|
38
|
+
|
|
39
|
+
import dill
|
|
37
40
|
import numpy as np
|
|
41
|
+
import psutil
|
|
38
42
|
|
|
39
43
|
import mindspore._c_dataengine as cde
|
|
40
|
-
|
|
41
|
-
from mindspore.common import Tensor
|
|
42
44
|
from mindspore import log as logger
|
|
43
|
-
|
|
44
|
-
from .datasets import UnionBaseDataset, MappableDataset, Schema, to_list, _PythonMultiprocessing, _check_shm_usage
|
|
45
|
+
from mindspore.common import Tensor
|
|
45
46
|
from . import samplers
|
|
47
|
+
from .datasets import UnionBaseDataset, MappableDataset, Schema, to_list, _PythonMultiprocessing, _check_shm_usage
|
|
46
48
|
from .queue import _SharedQueue
|
|
47
49
|
from .validators import check_generator_dataset, check_numpy_slices_dataset, check_padded_dataset
|
|
48
50
|
from ..core.config import get_enable_shared_mem, get_prefetch_size, get_multiprocessing_timeout_interval, \
|
|
49
|
-
get_enable_watchdog, get_debug_mode, get_seed, set_seed
|
|
51
|
+
get_enable_watchdog, get_debug_mode, get_seed, set_seed, get_multiprocessing_start_method
|
|
50
52
|
from ..core.datatypes import mstypelist_to_detypelist
|
|
51
53
|
from ..core.py_util_helpers import ExceptionHandler
|
|
52
54
|
from ..core.validator_helpers import type_check
|
|
@@ -220,31 +222,30 @@ class SamplerFn(cde.PythonMultiprocessingRuntime):
|
|
|
220
222
|
self.pids = []
|
|
221
223
|
self.check_interval = get_multiprocessing_timeout_interval() # the interval of check queue's size
|
|
222
224
|
|
|
223
|
-
# Event for end of epoch
|
|
224
225
|
if self.multi_process is True:
|
|
226
|
+
multiprocessing.set_start_method(get_multiprocessing_start_method(), True)
|
|
227
|
+
# Event for end of epoch
|
|
225
228
|
try:
|
|
226
229
|
self.eof = multiprocessing.Event()
|
|
227
230
|
except Exception:
|
|
228
231
|
raise RuntimeError("Init multiprocessing.Event() failed, This might be caused by insufficient shm,"
|
|
229
232
|
+ " and the recommended shm size is at least 5 GB.")
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
self.count = multiprocessing.Value('i', 0)
|
|
245
|
-
for worker_id in range(self.num_worker):
|
|
246
|
-
if self.multi_process is True:
|
|
233
|
+
|
|
234
|
+
# Create workers
|
|
235
|
+
# get default queue size and adjust queue size per worker if there are large # workers
|
|
236
|
+
queue_size = get_prefetch_size()
|
|
237
|
+
queue_size = min(queue_size, queue_size * 4 // self.num_worker)
|
|
238
|
+
queue_size = max(2, queue_size)
|
|
239
|
+
|
|
240
|
+
if get_enable_shared_mem():
|
|
241
|
+
# generator dataset use idx_queue and res_queue to transfer data between main and subprocess
|
|
242
|
+
# idx_queue is used multiprocess.Queue which is not shared memory, so it's size is 0.
|
|
243
|
+
# res_queue is used shared memory, so its size is max_rowsize which is defined by user.
|
|
244
|
+
_check_shm_usage(self.num_worker, queue_size, 0, self.max_rowsize)
|
|
245
|
+
self.count = multiprocessing.Value('i', 0)
|
|
246
|
+
for worker_id in range(self.num_worker):
|
|
247
247
|
try:
|
|
248
|
+
logger.info("Multiprocessing start method: {}".format(multiprocessing.get_start_method()))
|
|
248
249
|
worker = _GeneratorWorkerMp(self.dataset, self.eof, self.max_rowsize, queue_size, self.ppid,
|
|
249
250
|
self.count, worker_id)
|
|
250
251
|
worker.daemon = True
|
|
@@ -264,16 +265,23 @@ class SamplerFn(cde.PythonMultiprocessingRuntime):
|
|
|
264
265
|
raise RuntimeError("Failed to launch multiprocessing of GeneratorDataset: {0}".format(e))
|
|
265
266
|
self.pids.append(worker.pid)
|
|
266
267
|
self.need_join = True
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
worker.daemon = True
|
|
270
|
-
self.need_join = True
|
|
271
|
-
self.workers.append(worker)
|
|
268
|
+
self.workers.append(worker)
|
|
269
|
+
multiprocessing.set_start_method("fork", True)
|
|
272
270
|
|
|
273
|
-
if self.multi_process:
|
|
274
271
|
logger.info("Launch generator worker process(es): {}".format([worker.pid for worker in self.workers]))
|
|
275
272
|
if platform.system().lower() != 'windows':
|
|
276
273
|
self._launch_monitor()
|
|
274
|
+
else:
|
|
275
|
+
self.eof = threading.Event()
|
|
276
|
+
for worker_id in range(self.num_worker):
|
|
277
|
+
worker = _GeneratorWorkerMt(self.dataset, self.eof, worker_id)
|
|
278
|
+
worker.daemon = True
|
|
279
|
+
self.need_join = True
|
|
280
|
+
self.workers.append(worker)
|
|
281
|
+
|
|
282
|
+
# Register a termination function using weakref to avoid the object from unable to properly destruct.
|
|
283
|
+
atexit.register(lambda cleanup: cleanup()() if cleanup() is not None else None,
|
|
284
|
+
weakref.WeakMethod(self.terminate))
|
|
277
285
|
|
|
278
286
|
def terminate(self):
|
|
279
287
|
self._stop_subprocess()
|
|
@@ -451,9 +459,9 @@ class SamplerFn(cde.PythonMultiprocessingRuntime):
|
|
|
451
459
|
def _stop_subprocess(self):
|
|
452
460
|
"""Only the main process can call join. All the sub-process / sub-thread will be stopped."""
|
|
453
461
|
if self.need_join is True and self.ppid == os.getpid():
|
|
462
|
+
self.need_join = False
|
|
454
463
|
# abort the monitor first
|
|
455
464
|
self._abort_monitor()
|
|
456
|
-
self.need_join = False
|
|
457
465
|
|
|
458
466
|
# waiting for the sub-process stop
|
|
459
467
|
for w in self.workers:
|
|
@@ -478,6 +486,10 @@ class SamplerFn(cde.PythonMultiprocessingRuntime):
|
|
|
478
486
|
|
|
479
487
|
self.workers.clear()
|
|
480
488
|
self.workers = None
|
|
489
|
+
# Under independent processes, the GeneratorDataset pulls up multiple processes in a spawn manner, and
|
|
490
|
+
# after the use case exits normally, there will be a warning: UserWarning: resource_tracker: There appear
|
|
491
|
+
# to be %d leaked semaphore objects to clean up at shutdown.
|
|
492
|
+
self.eof = None
|
|
481
493
|
|
|
482
494
|
def _abort_monitor(self):
|
|
483
495
|
"""Deregister workers monitored by the watch dog and join clean process."""
|
|
@@ -505,10 +517,6 @@ class SamplerFn(cde.PythonMultiprocessingRuntime):
|
|
|
505
517
|
self.__init__(self.dataset, self.num_worker, self.multi_process, self.max_rowsize)
|
|
506
518
|
|
|
507
519
|
|
|
508
|
-
def _subprocess_handle(eof, signum, frame):
|
|
509
|
-
threading.Thread(target=eof.set()).start()
|
|
510
|
-
|
|
511
|
-
|
|
512
520
|
def _ignore_sigint(is_multiprocessing):
|
|
513
521
|
"""
|
|
514
522
|
We need to ignore sigint signal here so subprocesses can exit normally and clear.
|
|
@@ -539,7 +547,6 @@ def _generator_worker_loop(dataset, idx_queue, result_queue, eof, is_multiproces
|
|
|
539
547
|
|
|
540
548
|
if is_multiprocessing:
|
|
541
549
|
result_queue.cancel_join_thread() # Ensure that the process does not hang when exiting
|
|
542
|
-
signal.signal(signal.SIGTERM, partial(_subprocess_handle, eof))
|
|
543
550
|
|
|
544
551
|
# init the random seed and np.random seed for the subprocess
|
|
545
552
|
if get_seed() != 5489:
|
|
@@ -691,6 +698,25 @@ class _GeneratorWrapper:
|
|
|
691
698
|
return next(self.generator_new)
|
|
692
699
|
|
|
693
700
|
|
|
701
|
+
class _PickleGeneratorSource:
|
|
702
|
+
"""Starting multiple processes in spawn mode requires pickling source object in GeneratorDataset."""
|
|
703
|
+
def __init__(self, dataset):
|
|
704
|
+
self.dataset = dataset
|
|
705
|
+
|
|
706
|
+
def __getitem__(self, index):
|
|
707
|
+
return self.dataset[index]
|
|
708
|
+
|
|
709
|
+
def __len__(self):
|
|
710
|
+
return len(self.dataset)
|
|
711
|
+
|
|
712
|
+
def __getstate__(self):
|
|
713
|
+
state = dill.dumps(self.dataset)
|
|
714
|
+
return state
|
|
715
|
+
|
|
716
|
+
def __setstate__(self, state):
|
|
717
|
+
self.dataset = dill.loads(state)
|
|
718
|
+
|
|
719
|
+
|
|
694
720
|
class GeneratorDataset(MappableDataset, UnionBaseDataset):
|
|
695
721
|
"""
|
|
696
722
|
A source dataset that generates data from Python by invoking Python data source each epoch.
|
|
@@ -717,10 +743,10 @@ class GeneratorDataset(MappableDataset, UnionBaseDataset):
|
|
|
717
743
|
column_names (Union[str, list[str]], optional): List of column names of the dataset. Default: ``None`` .
|
|
718
744
|
Users are required to provide either column_names or schema.
|
|
719
745
|
column_types (list[mindspore.dtype], optional): List of column data types of the dataset. Default: ``None`` .
|
|
720
|
-
If provided, sanity check will be performed on generator output.
|
|
746
|
+
If provided, sanity check will be performed on generator output (deprecated in future version).
|
|
721
747
|
schema (Union[str, Schema], optional): Data format policy, which specifies the data types and shapes of the data
|
|
722
748
|
column to be read. Both JSON file path and objects constructed by :class:`mindspore.dataset.Schema` are
|
|
723
|
-
acceptable. Default: ``None`` .
|
|
749
|
+
acceptable (deprecated in future version). Default: ``None`` .
|
|
724
750
|
num_samples (int, optional): The number of samples to be included in the dataset.
|
|
725
751
|
Default: ``None`` , all images.
|
|
726
752
|
num_parallel_workers (int, optional): Number of worker threads/subprocesses used to
|
|
@@ -731,8 +757,8 @@ class GeneratorDataset(MappableDataset, UnionBaseDataset):
|
|
|
731
757
|
input is required. Default: ``None`` , expected order behavior shown in the table below.
|
|
732
758
|
num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
|
|
733
759
|
Random accessible input is required. When this argument is specified, `num_samples` reflects the maximum
|
|
734
|
-
sample number of per shard. Used in `data parallel training <https://www.mindspore.cn/
|
|
735
|
-
|
|
760
|
+
sample number of per shard. Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
761
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
736
762
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` .
|
|
737
763
|
This argument must be specified only when `num_shards` is also specified.
|
|
738
764
|
Random accessible input is required.
|
|
@@ -740,9 +766,10 @@ class GeneratorDataset(MappableDataset, UnionBaseDataset):
|
|
|
740
766
|
option could be beneficial if the Python operation is computational heavy. Default: ``True``.
|
|
741
767
|
max_rowsize(int, optional): Maximum size of data (in MB) that is used for shared memory
|
|
742
768
|
allocation to copy data between processes, the total occupied shared memory will increase as
|
|
743
|
-
``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase. If set to
|
|
769
|
+
``num_parallel_workers`` and :func:`mindspore.dataset.config.set_prefetch_size` increase. If set to ``-1``,
|
|
744
770
|
shared memory will be dynamically allocated with the actual size of data. This is only used if
|
|
745
|
-
``python_multiprocessing`` is set to True
|
|
771
|
+
``python_multiprocessing`` is set to ``True``. Default: ``None`` , allocate shared memory dynamically
|
|
772
|
+
(deprecated in future version).
|
|
746
773
|
batch_sampler (Iterable, optional): Similar to `sampler` , but returns a batch of indices at a time, the
|
|
747
774
|
corresponding data will be combined into a batch. Mutually exclusive with `num_samples` , `shuffle` ,
|
|
748
775
|
`num_shards` , `shard_id` and `sampler` . Default: ``None`` , do not use batch sampler.
|
|
@@ -782,15 +809,35 @@ class GeneratorDataset(MappableDataset, UnionBaseDataset):
|
|
|
782
809
|
(such as Pandas, Numpy or PyArrow objects) for member variables, or load less metadata in member variables,
|
|
783
810
|
or configure `python_multiprocessing=False` to use multi-threading mode.
|
|
784
811
|
|
|
785
|
-
|
|
786
|
-
to use them:
|
|
812
|
+
You can use the following classes/functions to reduce the size of member variables:
|
|
787
813
|
|
|
788
|
-
|
|
814
|
+
:class:`mindspore.dataset.utils.LineReader`: Use this class to initialize your text file object in the
|
|
789
815
|
`__init__` function. Then read the file content based on the line number of the object with the `__getitem__`
|
|
790
816
|
function.
|
|
791
817
|
|
|
792
|
-
- Input `source` accepts user-defined Python functions (PyFuncs),
|
|
793
|
-
|
|
818
|
+
- Input `source` accepts user-defined Python functions (PyFuncs), and sets the multiprocessing start method
|
|
819
|
+
to `spawn` mode by ds.config.set_multiprocessing_start_method("spawn") with `python_ multiprocessing=True`
|
|
820
|
+
and `num_parallel_workers>1` supports adding network computing operators from mindspore.nn and mindspore.ops
|
|
821
|
+
or others into this `source`, otherwise adding to the `source` is not supported.
|
|
822
|
+
- When the user defined dataset by `source` calls the DVPP operator during dataset loading and processing,
|
|
823
|
+
the supported scenarios are as follows:
|
|
824
|
+
|
|
825
|
+
+---------------+----------------------------+----------------------------+----------------------------+
|
|
826
|
+
| | | Multiprocessing |
|
|
827
|
+
| | Multithreading +----------------------------+----------------------------+
|
|
828
|
+
| | | spawn | fork |
|
|
829
|
+
+===============+============================+============================+============================+
|
|
830
|
+
|Independent |Data Processing: support |Data Processing: support |Data Processing: support |
|
|
831
|
+
| | | | |
|
|
832
|
+
|process mode |Data Processing + Network |Data Processing + Network |Data Processing + Network |
|
|
833
|
+
| |training: not support |training: support |training: not support |
|
|
834
|
+
+---------------+----------------------------+----------------------------+----------------------------+
|
|
835
|
+
|Non-independent|Data Processing: support |Data Processing: support |Data Processing: support |
|
|
836
|
+
| | | | |
|
|
837
|
+
|process mode |Data Processing + Network |Data Processing + Network |Data Processing + Network |
|
|
838
|
+
| |training: support |training: support |training: not support |
|
|
839
|
+
+---------------+----------------------------+----------------------------+----------------------------+
|
|
840
|
+
|
|
794
841
|
- The parameters `num_samples` , `shuffle` , `num_shards` , `shard_id` can be used to control the sampler
|
|
795
842
|
used in the dataset, and their effects when combined with parameter `sampler` are as follows.
|
|
796
843
|
|
|
@@ -873,7 +920,8 @@ class GeneratorDataset(MappableDataset, UnionBaseDataset):
|
|
|
873
920
|
self.source = _GeneratorWrapper(self.source)
|
|
874
921
|
|
|
875
922
|
self.prepared_source = None # source to be sent to C++
|
|
876
|
-
if hasattr(self, 'operator_mixed') and getattr(self, 'operator_mixed') is True
|
|
923
|
+
if hasattr(self, 'operator_mixed') and getattr(self, 'operator_mixed') is True and \
|
|
924
|
+
get_multiprocessing_start_method() == "fork":
|
|
877
925
|
self.num_parallel_workers = 1
|
|
878
926
|
logger.warning(
|
|
879
927
|
"Input 'source' of 'GeneratorDataset' includes network computing operators like in mindspore.nn, "
|
|
@@ -911,31 +959,13 @@ class GeneratorDataset(MappableDataset, UnionBaseDataset):
|
|
|
911
959
|
|
|
912
960
|
# Move get dataset_size by len from parse to here, because self.source will
|
|
913
961
|
# lose attribution of '__len__' after deepcopy.
|
|
914
|
-
self.
|
|
962
|
+
self.source_len = len(self.source) if hasattr(self.source, "__len__") else -1
|
|
915
963
|
|
|
916
964
|
self.max_rowsize = max_rowsize if max_rowsize is not None else -1
|
|
917
965
|
self.sample_fn = None
|
|
918
966
|
# Ignore batch_info in the input parameter.
|
|
919
967
|
self.collate_fn = (lambda *args: collate_fn(*args[:-1])) if collate_fn is not None else None
|
|
920
968
|
|
|
921
|
-
def _calculate_source_length(self):
|
|
922
|
-
"""Calculate the source length according to the source and sampler."""
|
|
923
|
-
self.source_len = -1 # unknown
|
|
924
|
-
if hasattr(self.source, "__len__"):
|
|
925
|
-
self.source_len = len(self.source)
|
|
926
|
-
|
|
927
|
-
# if user defined sampler, update the self.source_len
|
|
928
|
-
if isinstance(self.sampler, samplers.Sampler) or hasattr(self.sampler, "__iter__"):
|
|
929
|
-
if self.sampler.child_sampler is not None:
|
|
930
|
-
raise RuntimeError("GeneratorDataset does not support user defined sampler with child sampler yet.")
|
|
931
|
-
if self.sampler.num_samples is not None:
|
|
932
|
-
self.source_len = self.sampler.num_samples
|
|
933
|
-
elif hasattr(self.sampler, "__len__"):
|
|
934
|
-
self.source_len = len(self.sampler)
|
|
935
|
-
else:
|
|
936
|
-
# counting on a copied sampler to prevent changing the random state of the original one
|
|
937
|
-
self.source_len = len(list(copy.deepcopy(self.sampler)))
|
|
938
|
-
|
|
939
969
|
def __deepcopy__(self, memodict):
|
|
940
970
|
if id(self) in memodict:
|
|
941
971
|
return memodict[id(self)]
|
|
@@ -993,6 +1023,8 @@ class GeneratorDataset(MappableDataset, UnionBaseDataset):
|
|
|
993
1023
|
|
|
994
1024
|
if self.num_parallel_workers > 1 and not get_debug_mode():
|
|
995
1025
|
self.__validate_memory_usage()
|
|
1026
|
+
# Starting multiple processes in spawn mode requires pickling source object
|
|
1027
|
+
self.source = _PickleGeneratorSource(self.source)
|
|
996
1028
|
|
|
997
1029
|
sample_fn = SamplerFn(self.source, self.num_parallel_workers, self.python_multiprocessing,
|
|
998
1030
|
self.max_rowsize)
|
|
@@ -1139,8 +1171,8 @@ class NumpySlicesDataset(GeneratorDataset):
|
|
|
1139
1171
|
Default: ``None`` , expected order behavior shown in the table below.
|
|
1140
1172
|
num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
|
|
1141
1173
|
When this argument is specified, `num_samples` reflects the max sample number of per shard.
|
|
1142
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
1143
|
-
parallel/data_parallel.html#
|
|
1174
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
1175
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
1144
1176
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This argument must be
|
|
1145
1177
|
specified only when `num_shards` is also specified.
|
|
1146
1178
|
|