mindspore 2.5.0__cp311-cp311-win_amd64.whl → 2.6.0__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +6 -4
- mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -0
- mindspore/_checkparam.py +3 -33
- mindspore/_deprecated/__init__.py +17 -0
- mindspore/_deprecated/jit.py +198 -0
- mindspore/_extends/builtin_operations.py +1 -1
- mindspore/_extends/parse/__init__.py +6 -7
- mindspore/_extends/parse/compile_config.py +19 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
- mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
- mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
- mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
- mindspore/_extends/parse/parser.py +25 -194
- mindspore/_extends/parse/resources.py +1 -5
- mindspore/_extends/parse/standard_method.py +109 -75
- mindspore/_extends/pijit/__init__.py +2 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
- mindspore/_extends/pijit/tensor_func_list.py +27 -0
- mindspore/_extends/utils.py +1 -1
- mindspore/amp.py +4 -4
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/__init__.py +2 -2
- mindspore/boost/base.py +3 -7
- mindspore/boost/boost_cell_wrapper.py +2 -2
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +4 -3
- mindspore/common/_grad_function.py +56 -0
- mindspore/common/_pijit_context.py +14 -5
- mindspore/common/_register_for_tensor.py +1 -1
- mindspore/common/_stub_tensor.py +5 -10
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +2014 -3386
- mindspore/common/api.py +386 -355
- mindspore/common/auto_dynamic_shape.py +41 -44
- mindspore/common/dtype.py +5 -2
- mindspore/common/dump.py +7 -5
- mindspore/common/file_system.py +3 -0
- mindspore/common/generator.py +3 -0
- mindspore/common/hook_handle.py +5 -3
- mindspore/common/initializer.py +10 -6
- mindspore/common/jit_begin_end.py +94 -0
- mindspore/common/jit_config.py +6 -1
- mindspore/common/jit_context.py +76 -0
- mindspore/common/jit_trace.py +378 -0
- mindspore/common/lazy_inline.py +2 -2
- mindspore/common/mutable.py +5 -4
- mindspore/common/parameter.py +106 -39
- mindspore/common/seed.py +2 -2
- mindspore/common/sparse_tensor.py +23 -17
- mindspore/common/tensor.py +332 -714
- mindspore/communication/__init__.py +7 -5
- mindspore/communication/_comm_helper.py +47 -2
- mindspore/communication/comm_func.py +70 -53
- mindspore/communication/management.py +83 -17
- mindspore/context.py +228 -571
- mindspore/dataset/__init__.py +44 -20
- mindspore/dataset/audio/__init__.py +2 -8
- mindspore/dataset/audio/transforms.py +3 -17
- mindspore/dataset/core/config.py +3 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +102 -120
- mindspore/dataset/engine/datasets_audio.py +22 -22
- mindspore/dataset/engine/datasets_standard_format.py +43 -24
- mindspore/dataset/engine/datasets_text.py +78 -85
- mindspore/dataset/engine/datasets_user_defined.py +109 -77
- mindspore/dataset/engine/datasets_vision.py +111 -108
- mindspore/dataset/engine/iterators.py +5 -3
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
- mindspore/dataset/engine/samplers.py +279 -57
- mindspore/dataset/engine/serializer_deserializer.py +2 -1
- mindspore/dataset/engine/validators.py +10 -0
- mindspore/dataset/text/__init__.py +7 -6
- mindspore/dataset/text/transforms.py +6 -5
- mindspore/dataset/text/utils.py +3 -3
- mindspore/dataset/transforms/__init__.py +0 -9
- mindspore/dataset/transforms/transforms.py +3 -3
- mindspore/dataset/utils/browse_dataset.py +1 -1
- mindspore/dataset/vision/__init__.py +2 -9
- mindspore/dataset/vision/transforms.py +202 -158
- mindspore/dataset/vision/utils.py +7 -5
- mindspore/device_context/ascend/op_debug.py +60 -1
- mindspore/device_context/ascend/op_tuning.py +0 -4
- mindspore/device_manager.py +39 -3
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/es/embedding_service.py +35 -27
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -2
- mindspore/experimental/map_parameter.py +4 -4
- mindspore/experimental/optim/adadelta.py +22 -26
- mindspore/experimental/optim/adagrad.py +4 -4
- mindspore/experimental/optim/adam.py +4 -0
- mindspore/experimental/optim/adamax.py +4 -4
- mindspore/experimental/optim/adamw.py +4 -0
- mindspore/experimental/optim/asgd.py +1 -1
- mindspore/experimental/optim/lr_scheduler.py +40 -22
- mindspore/experimental/optim/radam.py +5 -5
- mindspore/experimental/optim/rprop.py +1 -1
- mindspore/experimental/optim/sgd.py +1 -1
- mindspore/hal/contiguous_tensors_handle.py +6 -10
- mindspore/hal/device.py +55 -81
- mindspore/hal/event.py +38 -55
- mindspore/hal/memory.py +115 -147
- mindspore/hal/stream.py +81 -125
- mindspore/include/dataset/constants.h +7 -4
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +40 -2
- mindspore/mindrecord/__init__.py +20 -7
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +133 -702
- mindspore/mint/distributed/__init__.py +5 -1
- mindspore/mint/distributed/distributed.py +198 -113
- mindspore/mint/linalg/__init__.py +2 -0
- mindspore/mint/nn/__init__.py +280 -18
- mindspore/mint/nn/functional.py +282 -64
- mindspore/mint/nn/layer/__init__.py +4 -0
- mindspore/mint/nn/layer/_functions.py +7 -3
- mindspore/mint/nn/layer/activation.py +120 -13
- mindspore/mint/nn/layer/conv.py +234 -28
- mindspore/mint/nn/layer/normalization.py +15 -16
- mindspore/mint/nn/layer/padding.py +1 -1
- mindspore/mint/nn/layer/pooling.py +66 -1
- mindspore/mint/optim/__init__.py +2 -1
- mindspore/mint/optim/sgd.py +171 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/__init__.py +4 -1
- mindspore/nn/cell.py +1253 -179
- mindspore/nn/layer/activation.py +23 -21
- mindspore/nn/layer/basic.py +22 -16
- mindspore/nn/layer/container.py +1 -1
- mindspore/nn/layer/conv.py +53 -42
- mindspore/nn/layer/embedding.py +9 -8
- mindspore/nn/layer/normalization.py +48 -42
- mindspore/nn/layer/pooling.py +75 -31
- mindspore/nn/layer/transformer.py +11 -10
- mindspore/nn/learning_rate_schedule.py +4 -2
- mindspore/nn/loss/loss.py +27 -19
- mindspore/nn/optim/ada_grad.py +6 -5
- mindspore/nn/optim/adadelta.py +9 -7
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +18 -14
- mindspore/nn/optim/adamax.py +8 -7
- mindspore/nn/optim/adasum.py +5 -5
- mindspore/nn/optim/asgd.py +3 -1
- mindspore/nn/optim/ftrl.py +11 -9
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/lazyadam.py +12 -10
- mindspore/nn/optim/momentum.py +7 -6
- mindspore/nn/optim/optimizer.py +2 -2
- mindspore/nn/optim/proximal_ada_grad.py +12 -10
- mindspore/nn/optim/rmsprop.py +13 -12
- mindspore/nn/optim/rprop.py +9 -7
- mindspore/nn/optim/sgd.py +9 -6
- mindspore/nn/optim/tft_wrapper.py +5 -2
- mindspore/nn/probability/bijector/bijector.py +17 -11
- mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
- mindspore/nn/probability/bijector/invert.py +2 -2
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +3 -2
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +1 -1
- mindspore/nn/probability/distribution/cauchy.py +4 -2
- mindspore/nn/probability/distribution/exponential.py +6 -7
- mindspore/nn/probability/distribution/gamma.py +2 -2
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/half_normal.py +5 -3
- mindspore/nn/probability/distribution/logistic.py +5 -3
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/uniform.py +5 -3
- mindspore/nn/reinforcement/_tensors_queue.py +1 -1
- mindspore/nn/reinforcement/tensor_array.py +1 -1
- mindspore/nn/wrap/__init__.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +178 -117
- mindspore/nn/wrap/grad_reducer.py +45 -36
- mindspore/nn/wrap/loss_scale.py +3 -3
- mindspore/numpy/array_creations.py +3 -3
- mindspore/numpy/array_ops.py +1 -1
- mindspore/numpy/utils.py +1 -2
- mindspore/numpy/utils_const.py +1 -2
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +3 -2
- mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
- mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
- mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
- mindspore/ops/_register_for_op.py +0 -11
- mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
- mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
- mindspore/ops/_vmap/vmap_array_ops.py +32 -6
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
- mindspore/ops/_vmap/vmap_math_ops.py +4 -7
- mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
- mindspore/ops/auto_generate/__init__.py +4 -3
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +127 -52
- mindspore/ops/auto_generate/gen_extend_func.py +286 -208
- mindspore/ops/auto_generate/gen_ops_def.py +2783 -2335
- mindspore/ops/auto_generate/gen_ops_prim.py +8992 -2686
- mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
- mindspore/ops/composite/__init__.py +2 -1
- mindspore/ops/composite/base.py +19 -24
- mindspore/ops/composite/math_ops.py +6 -16
- mindspore/ops/composite/multitype_ops/__init__.py +5 -2
- mindspore/ops/composite/multitype_ops/_compile_utils.py +4 -5
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
- mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
- mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
- mindspore/ops/function/__init__.py +28 -2
- mindspore/ops/function/_add_attr_func.py +58 -0
- mindspore/ops/function/array_func.py +1631 -2347
- mindspore/ops/function/clip_func.py +38 -45
- mindspore/ops/function/debug_func.py +36 -44
- mindspore/ops/function/grad/__init__.py +1 -0
- mindspore/ops/function/grad/grad_func.py +104 -71
- mindspore/ops/function/image_func.py +1 -1
- mindspore/ops/function/linalg_func.py +46 -78
- mindspore/ops/function/math_func.py +3024 -3855
- mindspore/ops/function/nn_func.py +678 -274
- mindspore/ops/function/other_func.py +159 -1
- mindspore/ops/function/parameter_func.py +17 -30
- mindspore/ops/function/random_func.py +216 -361
- mindspore/ops/function/reshard_func.py +4 -70
- mindspore/ops/function/sparse_func.py +3 -3
- mindspore/ops/function/sparse_unary_func.py +5 -5
- mindspore/ops/function/spectral_func.py +25 -58
- mindspore/ops/function/vmap_func.py +26 -18
- mindspore/ops/functional.py +8 -5
- mindspore/ops/functional_overload.py +655 -4
- mindspore/ops/op_info_register.py +32 -244
- mindspore/ops/operations/__init__.py +21 -14
- mindspore/ops/operations/_custom_ops_utils.py +235 -0
- mindspore/ops/operations/_grad_ops.py +1 -10
- mindspore/ops/operations/_inner_ops.py +5 -76
- mindspore/ops/operations/_ms_kernel.py +4 -10
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +3 -2
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/array_ops.py +39 -24
- mindspore/ops/operations/comm_ops.py +150 -107
- mindspore/ops/operations/custom_ops.py +287 -32
- mindspore/ops/operations/debug_ops.py +119 -16
- mindspore/ops/operations/inner_ops.py +1 -1
- mindspore/ops/operations/linalg_ops.py +1 -58
- mindspore/ops/operations/manually_defined/_inner.py +1 -1
- mindspore/ops/operations/manually_defined/ops_def.py +746 -79
- mindspore/ops/operations/math_ops.py +21 -18
- mindspore/ops/operations/nn_ops.py +67 -224
- mindspore/ops/operations/other_ops.py +62 -9
- mindspore/ops/operations/random_ops.py +13 -7
- mindspore/ops/operations/reshard_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +2 -2
- mindspore/ops/primitive.py +43 -32
- mindspore/ops/tensor_method.py +243 -17
- mindspore/ops_generate/__init__.py +0 -5
- mindspore/ops_generate/aclnn/__init__.py +0 -0
- mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
- mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
- mindspore/ops_generate/api/__init__.py +0 -0
- mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
- mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
- mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
- mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
- mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
- mindspore/ops_generate/api/gen_api.py +103 -0
- mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
- mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
- mindspore/ops_generate/common/__init__.py +0 -0
- mindspore/ops_generate/common/gen_constants.py +91 -0
- mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
- mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
- mindspore/ops_generate/{template.py → common/template.py} +96 -84
- mindspore/ops_generate/gen_ops.py +23 -325
- mindspore/ops_generate/op_def/__init__.py +0 -0
- mindspore/ops_generate/op_def/gen_op_def.py +90 -0
- mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
- mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -10
- mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
- mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
- mindspore/ops_generate/op_def_py/__init__.py +0 -0
- mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
- mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
- mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
- mindspore/ops_generate/pyboost/__init__.py +0 -0
- mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
- mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
- mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
- mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
- mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
- mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
- mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
- mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
- mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
- mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
- mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
- mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
- mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
- mindspore/ops_generate/resources/__init__.py +0 -0
- mindspore/ops_generate/resources/resource_list.py +30 -0
- mindspore/ops_generate/resources/resource_loader.py +36 -0
- mindspore/ops_generate/resources/resource_manager.py +64 -0
- mindspore/ops_generate/resources/yaml_loader.py +88 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
- mindspore/parallel/__init__.py +6 -2
- mindspore/parallel/_auto_parallel_context.py +140 -12
- mindspore/parallel/_cell_wrapper.py +132 -15
- mindspore/parallel/_parallel_serialization.py +95 -4
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +7 -2
- mindspore/parallel/_tensor.py +142 -18
- mindspore/parallel/_utils.py +198 -25
- mindspore/parallel/algo_parameter_config.py +3 -3
- mindspore/parallel/auto_parallel.py +732 -0
- mindspore/parallel/checkpoint_convert.py +159 -0
- mindspore/parallel/checkpoint_transform.py +658 -37
- mindspore/parallel/cluster/process_entity/_api.py +151 -19
- mindspore/parallel/cluster/run.py +1 -1
- mindspore/parallel/function/__init__.py +24 -0
- mindspore/parallel/function/reshard_func.py +258 -0
- mindspore/parallel/nn/__init__.py +25 -0
- mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
- mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
- mindspore/parallel/parameter_broadcast.py +24 -13
- mindspore/parallel/shard.py +137 -62
- mindspore/parallel/transform_safetensors.py +288 -95
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +9 -5
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
- mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +25 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
- mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
- mindspore/profiler/common/constant.py +12 -0
- mindspore/profiler/common/msprof_cmd_tool.py +42 -23
- mindspore/profiler/common/path_manager.py +24 -0
- mindspore/profiler/common/profiler_context.py +26 -2
- mindspore/profiler/common/profiler_meta_data.py +74 -0
- mindspore/profiler/common/profiler_parameters.py +59 -18
- mindspore/profiler/common/profiler_path_manager.py +66 -7
- mindspore/profiler/dynamic_profiler.py +112 -79
- mindspore/profiler/envprofiler.py +26 -1
- mindspore/profiler/experimental_config.py +197 -0
- mindspore/profiler/mstx.py +57 -14
- mindspore/profiler/platform/npu_profiler.py +33 -7
- mindspore/profiler/profiler.py +541 -45
- mindspore/profiler/profiler_action_controller.py +1 -1
- mindspore/profiler/profiler_interface.py +4 -0
- mindspore/profiler/schedule.py +57 -22
- mindspore/rewrite/api/node.py +15 -13
- mindspore/rewrite/api/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +25 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/runtime/__init__.py +2 -2
- mindspore/runtime/executor.py +40 -11
- mindspore/runtime/memory.py +37 -13
- mindspore/safeguard/rewrite_obfuscation.py +12 -9
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +8 -8
- mindspore/train/_utils.py +43 -9
- mindspore/train/amp.py +1 -1
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +2 -16
- mindspore/train/callback/_checkpoint.py +24 -40
- mindspore/train/callback/_cluster_monitor.py +14 -18
- mindspore/train/callback/_flops_collector.py +2 -3
- mindspore/train/callback/_history.py +7 -4
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +0 -3
- mindspore/train/callback/_loss_monitor.py +2 -1
- mindspore/train/callback/_on_request_exit.py +6 -5
- mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
- mindspore/train/callback/_summary_collector.py +8 -13
- mindspore/train/callback/_time_monitor.py +2 -1
- mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +204 -105
- mindspore/train/data_sink.py +25 -2
- mindspore/train/dataset_helper.py +4 -5
- mindspore/train/loss_scale_manager.py +8 -7
- mindspore/train/metrics/accuracy.py +3 -3
- mindspore/train/metrics/confusion_matrix.py +9 -9
- mindspore/train/metrics/error.py +3 -3
- mindspore/train/metrics/hausdorff_distance.py +4 -4
- mindspore/train/metrics/mean_surface_distance.py +3 -3
- mindspore/train/metrics/metric.py +0 -12
- mindspore/train/metrics/occlusion_sensitivity.py +4 -2
- mindspore/train/metrics/precision.py +8 -6
- mindspore/train/metrics/recall.py +9 -9
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +19 -12
- mindspore/train/model.py +262 -127
- mindspore/train/serialization.py +246 -988
- mindspore/train/summary/_summary_adapter.py +2 -2
- mindspore/train/summary/summary_record.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +4 -2
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/runtime_execution_order_check.py +2 -0
- mindspore/utils/utils.py +138 -4
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/METADATA +2 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/RECORD +485 -440
- mindspore/_install_custom.py +0 -43
- mindspore/common/_register_for_adapter.py +0 -74
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
- mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
- mindspore/ops_generate/gen_constants.py +0 -190
- mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
- mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
- /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/WHEEL +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/entry_points.txt +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/top_level.txt +0 -0
|
@@ -60,12 +60,12 @@ class CMUArcticDataset(MappableDataset, AudioBaseDataset):
|
|
|
60
60
|
num_shards (int, optional): Number of shards that the dataset will be divided into.
|
|
61
61
|
Default: ``None``, no dividing. When this argument is specified, `num_samples`
|
|
62
62
|
reflects the max sample number of per shard.
|
|
63
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
64
|
-
parallel/data_parallel.html#
|
|
63
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
64
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
65
65
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None``, will use ``0``. This
|
|
66
66
|
argument can only be specified when `num_shards` is also specified.
|
|
67
67
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
68
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
68
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
69
69
|
Default: ``None``, which means no cache is used.
|
|
70
70
|
|
|
71
71
|
Raises:
|
|
@@ -179,12 +179,12 @@ class GTZANDataset(MappableDataset, AudioBaseDataset):
|
|
|
179
179
|
dataset. Default: ``None`` , expected order behavior shown in the table below.
|
|
180
180
|
num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
|
|
181
181
|
When this argument is specified, `num_samples` reflects the max sample number of per shard.
|
|
182
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
183
|
-
parallel/data_parallel.html#
|
|
182
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
183
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
184
184
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
185
185
|
argument can only be specified when `num_shards` is also specified.
|
|
186
186
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
187
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
187
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
188
188
|
Default: ``None`` , which means no cache is used.
|
|
189
189
|
|
|
190
190
|
Raises:
|
|
@@ -299,12 +299,12 @@ class LibriTTSDataset(MappableDataset, AudioBaseDataset):
|
|
|
299
299
|
dataset. Default: ``None`` , expected order behavior shown in the table below.
|
|
300
300
|
num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
|
|
301
301
|
When this argument is specified, `num_samples` reflects the max sample number of per shard.
|
|
302
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
303
|
-
parallel/data_parallel.html#
|
|
302
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
303
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
304
304
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
305
305
|
argument can only be specified when `num_shards` is also specified.
|
|
306
306
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
307
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
307
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
308
308
|
Default: ``None`` , which means no cache is used.
|
|
309
309
|
|
|
310
310
|
Raises:
|
|
@@ -428,12 +428,12 @@ class LJSpeechDataset(MappableDataset, AudioBaseDataset):
|
|
|
428
428
|
num_shards (int, optional): Number of shards that the dataset will be divided into.
|
|
429
429
|
Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
430
430
|
the maximum sample number of per shard.
|
|
431
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
432
|
-
parallel/data_parallel.html#
|
|
431
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
432
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
433
433
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
434
434
|
argument can only be specified when `num_shards` is also specified.
|
|
435
435
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
436
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
436
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
437
437
|
Default: ``None`` , which means no cache is used.
|
|
438
438
|
|
|
439
439
|
Raises:
|
|
@@ -553,12 +553,12 @@ class SpeechCommandsDataset(MappableDataset, AudioBaseDataset):
|
|
|
553
553
|
Default: ``None`` , expected order behavior shown in the table below.
|
|
554
554
|
num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
|
|
555
555
|
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
|
|
556
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
557
|
-
parallel/data_parallel.html#
|
|
556
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
557
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
558
558
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` .
|
|
559
559
|
This argument can only be specified when `num_shards` is also specified.
|
|
560
560
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
561
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
561
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
562
562
|
Default: ``None`` , which means no cache is used.
|
|
563
563
|
|
|
564
564
|
Raises:
|
|
@@ -668,12 +668,12 @@ class TedliumDataset(MappableDataset, AudioBaseDataset):
|
|
|
668
668
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
669
669
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
670
670
|
the maximum sample number of per shard.
|
|
671
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
672
|
-
parallel/data_parallel.html#
|
|
671
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
672
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
673
673
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
674
674
|
argument can only be specified when `num_shards` is also specified.
|
|
675
675
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
676
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
676
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
677
677
|
Default: ``None`` , which means no cache is used.
|
|
678
678
|
|
|
679
679
|
Raises:
|
|
@@ -721,7 +721,7 @@ class TedliumDataset(MappableDataset, AudioBaseDataset):
|
|
|
721
721
|
|
|
722
722
|
This is the TED-LIUM corpus release 2, licensed under Creative Commons BY-NC-ND 3.0. All talks and text are
|
|
723
723
|
property of TED Conferences LLC. The TED-LIUM corpus was made from audio talks and their transcriptions available
|
|
724
|
-
on the TED website.
|
|
724
|
+
on the TED website. These data were prepared and filtered in order to train acoustic models to participate to
|
|
725
725
|
the International Workshop on Spoken Language Translation 2011 (the LIUM English/French SLT system reached the
|
|
726
726
|
first rank in the SLT task).
|
|
727
727
|
|
|
@@ -850,12 +850,12 @@ class YesNoDataset(MappableDataset, AudioBaseDataset):
|
|
|
850
850
|
dataset. Default: ``None`` , expected order behavior shown in the table below.
|
|
851
851
|
num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
|
|
852
852
|
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
|
|
853
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
854
|
-
parallel/data_parallel.html#
|
|
853
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
854
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
855
855
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This argument can only
|
|
856
856
|
be specified when `num_shards` is also specified.
|
|
857
857
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
858
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
858
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
859
859
|
Default: ``None`` , which means no cache is used.
|
|
860
860
|
|
|
861
861
|
Raises:
|
|
@@ -28,8 +28,8 @@ import numpy as np
|
|
|
28
28
|
import mindspore._c_dataengine as cde
|
|
29
29
|
from mindspore import log as logger
|
|
30
30
|
|
|
31
|
-
from .datasets import UnionBaseDataset, SourceDataset, MappableDataset,
|
|
32
|
-
|
|
31
|
+
from .datasets import UnionBaseDataset, SourceDataset, MappableDataset, Schema
|
|
32
|
+
from .samplers import Shuffle, shuffle_to_shuffle_mode
|
|
33
33
|
from .datasets_user_defined import GeneratorDataset
|
|
34
34
|
from .obs.obs_mindrecord_dataset import MindRecordFromOBS
|
|
35
35
|
from .validators import check_csvdataset, check_minddataset, check_tfrecorddataset, check_obsminddataset
|
|
@@ -75,12 +75,12 @@ class CSVDataset(SourceDataset, UnionBaseDataset):
|
|
|
75
75
|
|
|
76
76
|
num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
|
|
77
77
|
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
|
|
78
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
79
|
-
parallel/data_parallel.html#
|
|
78
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
79
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
80
80
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None``. This
|
|
81
81
|
argument can only be specified when `num_shards` is also specified.
|
|
82
82
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
83
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
83
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
84
84
|
Default: ``None``, which means no cache is used.
|
|
85
85
|
|
|
86
86
|
Raises:
|
|
@@ -128,25 +128,30 @@ class MindDataset(MappableDataset, UnionBaseDataset):
|
|
|
128
128
|
num_parallel_workers (int, optional): Number of worker threads to read the data.
|
|
129
129
|
Default: ``None`` , will use global default workers(8), it can be set
|
|
130
130
|
by :func:`mindspore.dataset.config.set_num_parallel_workers` .
|
|
131
|
-
shuffle (Union[bool, Shuffle], optional): Perform reshuffling of the data every epoch
|
|
132
|
-
Default: ``None``, performs
|
|
133
|
-
|
|
134
|
-
If `shuffle` is ``
|
|
135
|
-
|
|
136
|
-
There are three levels of shuffling, desired shuffle enum defined by :class:`mindspore.dataset.Shuffle` .
|
|
131
|
+
shuffle (Union[bool, Shuffle], optional): Perform reshuffling of the data every epoch, bool type and ``Shuffle``
|
|
132
|
+
enum are both supported to pass in. Default: ``None``, performs ``mindspore.dataset.Shuffle.ADAPTIVE`` .
|
|
133
|
+
If `shuffle` is set to ``False`` , no shuffling will be performed.
|
|
134
|
+
If `shuffle` is set to ``True`` , `shuffle` is set to ``mindspore.dataset.Shuffle.ADAPTIVE`` .
|
|
135
|
+
There are several levels of shuffling, desired shuffle enum defined by :class:`mindspore.dataset.Shuffle` .
|
|
137
136
|
|
|
138
|
-
- ``Shuffle.
|
|
137
|
+
- ``Shuffle.ADAPTIVE`` : When the number of dataset samples is less than or equal to 100 million,
|
|
138
|
+
``Shuffle.GLOBAL`` is used. When the number of dataset samples is greater than 100
|
|
139
|
+
million, ``Shuffle.PARTIAL`` is used. The shuffle is performed once
|
|
140
|
+
every 1 million samples.
|
|
141
|
+
|
|
142
|
+
- ``Shuffle.GLOBAL`` : Global shuffle of all rows of data in dataset. The memory usage is large.
|
|
143
|
+
|
|
144
|
+
- ``Shuffle.PARTIAL`` : Partial shuffle of data in dataset for every 1 million samples.
|
|
145
|
+
The memory usage is less than ``Shuffle.GLOBAL`` .
|
|
139
146
|
|
|
140
147
|
- ``Shuffle.FILES`` : Shuffle the file sequence but keep the order of data within each file.
|
|
141
|
-
Not supported when the number of samples in the dataset is greater than 100 million.
|
|
142
148
|
|
|
143
149
|
- ``Shuffle.INFILE`` : Keep the file sequence the same but shuffle the data within each file.
|
|
144
|
-
Not supported when the number of samples in the dataset is greater than 100 million.
|
|
145
150
|
|
|
146
151
|
num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
|
|
147
152
|
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
|
|
148
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
149
|
-
parallel/data_parallel.html#
|
|
153
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
154
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
150
155
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
151
156
|
argument can only be specified when `num_shards` is also specified.
|
|
152
157
|
sampler (Sampler, optional): Object used to choose samples from the
|
|
@@ -161,7 +166,7 @@ class MindDataset(MappableDataset, UnionBaseDataset):
|
|
|
161
166
|
num_samples (int, optional): The number of samples to be included in the dataset.
|
|
162
167
|
Default: ``None`` , all samples.
|
|
163
168
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
164
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
169
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
165
170
|
Default: ``None`` , which means no cache is used.
|
|
166
171
|
|
|
167
172
|
Raises:
|
|
@@ -170,10 +175,13 @@ class MindDataset(MappableDataset, UnionBaseDataset):
|
|
|
170
175
|
RuntimeError: If `num_shards` is specified but `shard_id` is None.
|
|
171
176
|
RuntimeError: If `shard_id` is specified but `num_shards` is None.
|
|
172
177
|
ValueError: If `shard_id` is not in range of [0, `num_shards` ).
|
|
178
|
+
TypeError: If `shuffle` is not of type None, bool or Shuffle.
|
|
173
179
|
|
|
174
180
|
Note:
|
|
175
181
|
- When sharding MindRecord (by configuring `num_shards` and `shard_id`), there are two strategies to implement
|
|
176
|
-
the data sharding logic. This API uses the strategy 2
|
|
182
|
+
the data sharding logic. This API uses the strategy 1 by default, which can be switched to strategy 2 by
|
|
183
|
+
setting the environment variable `MS_DEV_MINDRECORD_SHARD_BY_BLOCK=True` . This environment variable only
|
|
184
|
+
applies to the `DistributedSampler` sampler.
|
|
177
185
|
|
|
178
186
|
.. list-table:: Data sharding strategy 1
|
|
179
187
|
:widths: 50 50 50 50
|
|
@@ -230,14 +238,25 @@ class MindDataset(MappableDataset, UnionBaseDataset):
|
|
|
230
238
|
"""
|
|
231
239
|
|
|
232
240
|
def parse(self, children=None):
|
|
241
|
+
child_sampler = self.sampler.get_child()
|
|
242
|
+
if (child_sampler is not None and not isinstance(child_sampler, samplers.DistributedSampler)
|
|
243
|
+
and self.num_padded > 0):
|
|
244
|
+
raise RuntimeError("When the padded sample logic is enabled and use sampler chain,"
|
|
245
|
+
"the first sampler which is specified by parameter "
|
|
246
|
+
"sampler or (num_shards, shard_id) is not distributed sampling.")
|
|
233
247
|
return cde.MindDataNode(self.dataset_files, self.columns_list, self.sampler, self.new_padded_sample,
|
|
234
248
|
self.num_padded, shuffle_to_shuffle_mode(self.shuffle_option))
|
|
235
249
|
|
|
236
250
|
@check_minddataset
|
|
237
251
|
def __init__(self, dataset_files, columns_list=None, num_parallel_workers=None, shuffle=None, num_shards=None,
|
|
238
252
|
shard_id=None, sampler=None, padded_sample=None, num_padded=None, num_samples=None, cache=None):
|
|
253
|
+
if sampler is None:
|
|
254
|
+
if shuffle is None or shuffle is True:
|
|
255
|
+
shuffle = Shuffle.ADAPTIVE
|
|
256
|
+
elif shuffle is False:
|
|
257
|
+
shuffle = Shuffle.FALSE
|
|
239
258
|
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
|
|
240
|
-
shuffle=
|
|
259
|
+
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
|
|
241
260
|
if num_samples and shuffle in (Shuffle.FILES, Shuffle.INFILE):
|
|
242
261
|
raise ValueError("'Shuffle.FILES' or 'Shuffle.INFILE' and 'num_samples' "
|
|
243
262
|
"cannot be specified at the same time.")
|
|
@@ -357,8 +376,8 @@ class TFRecordDataset(SourceDataset, UnionBaseDataset):
|
|
|
357
376
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
358
377
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
359
378
|
the maximum sample number per shard.
|
|
360
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
361
|
-
parallel/data_parallel.html#
|
|
379
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
380
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
362
381
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
363
382
|
argument can only be specified when `num_shards` is also specified.
|
|
364
383
|
shard_equal_rows (bool, optional): Get equal rows for all shards. Default: ``False``. If `shard_equal_rows`
|
|
@@ -368,7 +387,7 @@ class TFRecordDataset(SourceDataset, UnionBaseDataset):
|
|
|
368
387
|
When `compression_type` is not ``None``, and `num_samples` or numRows (parsed from `schema` ) is provided,
|
|
369
388
|
`shard_equal_rows` will be implied as ``True``.
|
|
370
389
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
371
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
390
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
372
391
|
Default: ``None`` , which means no cache is used.
|
|
373
392
|
compression_type (str, optional): The type of compression used for all files, must be either ``''``,
|
|
374
393
|
``'GZIP'``, or ``'ZLIB'``. Default: ``None`` , as in empty string. It is highly recommended to
|
|
@@ -474,8 +493,8 @@ class OBSMindDataset(GeneratorDataset):
|
|
|
474
493
|
|
|
475
494
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
476
495
|
into. Default: ``None`` .
|
|
477
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
478
|
-
parallel/data_parallel.html#
|
|
496
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
497
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
479
498
|
shard_id (int, optional): The shard ID within num_shards. Default: ``None`` . This
|
|
480
499
|
argument can only be specified when `num_shards` is also specified.
|
|
481
500
|
shard_equal_rows (bool, optional): Get equal rows for all shards. Default: ``True``. If shard_equal_rows
|