mindspore 2.5.0__cp39-cp39-win_amd64.whl → 2.6.0rc1__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +6 -4
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -0
- mindspore/_checkparam.py +3 -33
- mindspore/_deprecated/__init__.py +17 -0
- mindspore/_deprecated/jit.py +198 -0
- mindspore/_extends/builtin_operations.py +1 -1
- mindspore/_extends/parse/__init__.py +6 -7
- mindspore/_extends/parse/compile_config.py +19 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
- mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
- mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
- mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
- mindspore/_extends/parse/parser.py +24 -193
- mindspore/_extends/parse/resources.py +1 -5
- mindspore/_extends/parse/standard_method.py +97 -74
- mindspore/_extends/pijit/__init__.py +2 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
- mindspore/_extends/pijit/tensor_func_list.py +27 -0
- mindspore/_extends/utils.py +1 -1
- mindspore/amp.py +4 -4
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/__init__.py +2 -2
- mindspore/boost/base.py +3 -7
- mindspore/boost/boost_cell_wrapper.py +2 -2
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +4 -3
- mindspore/common/_grad_function.py +56 -0
- mindspore/common/_pijit_context.py +14 -5
- mindspore/common/_register_for_tensor.py +1 -1
- mindspore/common/_stub_tensor.py +5 -10
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +1915 -3287
- mindspore/common/api.py +341 -354
- mindspore/common/auto_dynamic_shape.py +41 -44
- mindspore/common/dtype.py +5 -2
- mindspore/common/dump.py +7 -5
- mindspore/common/file_system.py +3 -0
- mindspore/common/hook_handle.py +5 -3
- mindspore/common/initializer.py +10 -6
- mindspore/common/jit_begin_end.py +94 -0
- mindspore/common/jit_config.py +6 -1
- mindspore/common/jit_context.py +76 -0
- mindspore/common/jit_trace.py +378 -0
- mindspore/common/lazy_inline.py +2 -2
- mindspore/common/mutable.py +5 -4
- mindspore/common/parameter.py +106 -39
- mindspore/common/seed.py +2 -2
- mindspore/common/sparse_tensor.py +23 -17
- mindspore/common/tensor.py +297 -714
- mindspore/communication/__init__.py +7 -5
- mindspore/communication/_comm_helper.py +47 -2
- mindspore/communication/comm_func.py +70 -53
- mindspore/communication/management.py +83 -17
- mindspore/context.py +214 -560
- mindspore/dataset/__init__.py +44 -20
- mindspore/dataset/audio/__init__.py +2 -8
- mindspore/dataset/audio/transforms.py +3 -17
- mindspore/dataset/core/config.py +3 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +102 -120
- mindspore/dataset/engine/datasets_audio.py +22 -22
- mindspore/dataset/engine/datasets_standard_format.py +43 -24
- mindspore/dataset/engine/datasets_text.py +78 -85
- mindspore/dataset/engine/datasets_user_defined.py +108 -76
- mindspore/dataset/engine/datasets_vision.py +111 -108
- mindspore/dataset/engine/iterators.py +5 -3
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
- mindspore/dataset/engine/samplers.py +279 -57
- mindspore/dataset/engine/serializer_deserializer.py +2 -1
- mindspore/dataset/engine/validators.py +10 -0
- mindspore/dataset/text/__init__.py +7 -6
- mindspore/dataset/text/transforms.py +6 -5
- mindspore/dataset/text/utils.py +3 -3
- mindspore/dataset/transforms/__init__.py +0 -9
- mindspore/dataset/transforms/transforms.py +3 -3
- mindspore/dataset/utils/browse_dataset.py +1 -1
- mindspore/dataset/vision/__init__.py +2 -9
- mindspore/dataset/vision/transforms.py +202 -158
- mindspore/dataset/vision/utils.py +7 -5
- mindspore/device_context/ascend/op_debug.py +60 -1
- mindspore/device_context/ascend/op_tuning.py +0 -4
- mindspore/device_manager.py +39 -3
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/es/embedding_service.py +35 -27
- mindspore/experimental/map_parameter.py +4 -4
- mindspore/experimental/optim/adadelta.py +22 -26
- mindspore/experimental/optim/adagrad.py +4 -4
- mindspore/experimental/optim/adam.py +4 -0
- mindspore/experimental/optim/adamax.py +4 -4
- mindspore/experimental/optim/adamw.py +4 -0
- mindspore/experimental/optim/asgd.py +1 -1
- mindspore/experimental/optim/lr_scheduler.py +40 -22
- mindspore/experimental/optim/radam.py +5 -5
- mindspore/experimental/optim/rprop.py +1 -1
- mindspore/experimental/optim/sgd.py +1 -1
- mindspore/hal/contiguous_tensors_handle.py +6 -10
- mindspore/hal/device.py +55 -81
- mindspore/hal/event.py +38 -55
- mindspore/hal/memory.py +93 -144
- mindspore/hal/stream.py +81 -125
- mindspore/include/dataset/constants.h +7 -4
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +40 -2
- mindspore/mindrecord/__init__.py +20 -7
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +131 -700
- mindspore/mint/distributed/__init__.py +5 -1
- mindspore/mint/distributed/distributed.py +194 -109
- mindspore/mint/linalg/__init__.py +2 -0
- mindspore/mint/nn/__init__.py +280 -18
- mindspore/mint/nn/functional.py +282 -64
- mindspore/mint/nn/layer/__init__.py +4 -0
- mindspore/mint/nn/layer/_functions.py +7 -3
- mindspore/mint/nn/layer/activation.py +120 -13
- mindspore/mint/nn/layer/conv.py +218 -24
- mindspore/mint/nn/layer/normalization.py +15 -16
- mindspore/mint/nn/layer/padding.py +1 -1
- mindspore/mint/nn/layer/pooling.py +66 -1
- mindspore/mint/optim/__init__.py +2 -1
- mindspore/mint/optim/sgd.py +171 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/__init__.py +4 -1
- mindspore/nn/cell.py +1250 -176
- mindspore/nn/layer/activation.py +23 -21
- mindspore/nn/layer/basic.py +22 -16
- mindspore/nn/layer/container.py +1 -1
- mindspore/nn/layer/conv.py +22 -17
- mindspore/nn/layer/embedding.py +9 -8
- mindspore/nn/layer/normalization.py +48 -42
- mindspore/nn/layer/pooling.py +75 -31
- mindspore/nn/layer/transformer.py +11 -10
- mindspore/nn/learning_rate_schedule.py +4 -2
- mindspore/nn/loss/loss.py +27 -19
- mindspore/nn/optim/ada_grad.py +6 -5
- mindspore/nn/optim/adadelta.py +9 -7
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +16 -12
- mindspore/nn/optim/adamax.py +8 -7
- mindspore/nn/optim/adasum.py +5 -5
- mindspore/nn/optim/asgd.py +1 -1
- mindspore/nn/optim/ftrl.py +11 -9
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/lazyadam.py +12 -10
- mindspore/nn/optim/momentum.py +7 -6
- mindspore/nn/optim/optimizer.py +2 -2
- mindspore/nn/optim/proximal_ada_grad.py +12 -10
- mindspore/nn/optim/rmsprop.py +13 -12
- mindspore/nn/optim/rprop.py +9 -7
- mindspore/nn/optim/sgd.py +9 -6
- mindspore/nn/optim/tft_wrapper.py +5 -2
- mindspore/nn/probability/bijector/bijector.py +17 -11
- mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
- mindspore/nn/probability/bijector/invert.py +2 -2
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +3 -2
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +1 -1
- mindspore/nn/probability/distribution/cauchy.py +4 -2
- mindspore/nn/probability/distribution/exponential.py +6 -7
- mindspore/nn/probability/distribution/gamma.py +2 -2
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/half_normal.py +5 -3
- mindspore/nn/probability/distribution/logistic.py +5 -3
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/uniform.py +5 -3
- mindspore/nn/reinforcement/_tensors_queue.py +1 -1
- mindspore/nn/reinforcement/tensor_array.py +1 -1
- mindspore/nn/wrap/__init__.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +178 -117
- mindspore/nn/wrap/grad_reducer.py +45 -36
- mindspore/nn/wrap/loss_scale.py +3 -3
- mindspore/numpy/array_creations.py +3 -3
- mindspore/numpy/array_ops.py +1 -1
- mindspore/numpy/math_ops.py +4 -4
- mindspore/numpy/utils.py +1 -2
- mindspore/numpy/utils_const.py +1 -2
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +3 -2
- mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
- mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
- mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
- mindspore/ops/_register_for_op.py +0 -11
- mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
- mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
- mindspore/ops/_vmap/vmap_array_ops.py +7 -6
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
- mindspore/ops/_vmap/vmap_math_ops.py +4 -7
- mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
- mindspore/ops/auto_generate/__init__.py +4 -3
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +102 -49
- mindspore/ops/auto_generate/gen_extend_func.py +281 -135
- mindspore/ops/auto_generate/gen_ops_def.py +2574 -2326
- mindspore/ops/auto_generate/gen_ops_prim.py +8566 -2755
- mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
- mindspore/ops/composite/__init__.py +2 -1
- mindspore/ops/composite/base.py +19 -24
- mindspore/ops/composite/math_ops.py +6 -16
- mindspore/ops/composite/multitype_ops/__init__.py +5 -2
- mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -3
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
- mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
- mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
- mindspore/ops/function/__init__.py +28 -2
- mindspore/ops/function/_add_attr_func.py +58 -0
- mindspore/ops/function/array_func.py +1629 -2345
- mindspore/ops/function/clip_func.py +38 -45
- mindspore/ops/function/debug_func.py +36 -44
- mindspore/ops/function/grad/__init__.py +1 -0
- mindspore/ops/function/grad/grad_func.py +104 -71
- mindspore/ops/function/image_func.py +1 -1
- mindspore/ops/function/linalg_func.py +46 -78
- mindspore/ops/function/math_func.py +3035 -3705
- mindspore/ops/function/nn_func.py +676 -241
- mindspore/ops/function/other_func.py +159 -1
- mindspore/ops/function/parameter_func.py +17 -30
- mindspore/ops/function/random_func.py +204 -361
- mindspore/ops/function/reshard_func.py +4 -70
- mindspore/ops/function/sparse_func.py +3 -3
- mindspore/ops/function/sparse_unary_func.py +5 -5
- mindspore/ops/function/spectral_func.py +25 -58
- mindspore/ops/function/vmap_func.py +24 -17
- mindspore/ops/functional.py +6 -4
- mindspore/ops/functional_overload.py +547 -4
- mindspore/ops/op_info_register.py +32 -244
- mindspore/ops/operations/__init__.py +10 -5
- mindspore/ops/operations/_custom_ops_utils.py +247 -0
- mindspore/ops/operations/_grad_ops.py +1 -10
- mindspore/ops/operations/_inner_ops.py +5 -76
- mindspore/ops/operations/_ms_kernel.py +4 -10
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +3 -2
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/array_ops.py +37 -22
- mindspore/ops/operations/comm_ops.py +150 -107
- mindspore/ops/operations/custom_ops.py +221 -23
- mindspore/ops/operations/debug_ops.py +115 -16
- mindspore/ops/operations/inner_ops.py +1 -1
- mindspore/ops/operations/linalg_ops.py +1 -58
- mindspore/ops/operations/manually_defined/_inner.py +1 -1
- mindspore/ops/operations/manually_defined/ops_def.py +746 -79
- mindspore/ops/operations/math_ops.py +21 -18
- mindspore/ops/operations/nn_ops.py +65 -191
- mindspore/ops/operations/other_ops.py +62 -9
- mindspore/ops/operations/random_ops.py +13 -7
- mindspore/ops/operations/reshard_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +2 -2
- mindspore/ops/primitive.py +43 -32
- mindspore/ops/tensor_method.py +232 -13
- mindspore/ops_generate/__init__.py +0 -5
- mindspore/ops_generate/aclnn/__init__.py +0 -0
- mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
- mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
- mindspore/ops_generate/api/__init__.py +0 -0
- mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
- mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
- mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
- mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
- mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
- mindspore/ops_generate/api/gen_api.py +103 -0
- mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
- mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
- mindspore/ops_generate/common/__init__.py +0 -0
- mindspore/ops_generate/common/gen_constants.py +91 -0
- mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
- mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
- mindspore/ops_generate/{template.py → common/template.py} +96 -84
- mindspore/ops_generate/gen_ops.py +23 -325
- mindspore/ops_generate/op_def/__init__.py +0 -0
- mindspore/ops_generate/op_def/gen_op_def.py +90 -0
- mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
- mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -7
- mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
- mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
- mindspore/ops_generate/op_def_py/__init__.py +0 -0
- mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
- mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
- mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
- mindspore/ops_generate/pyboost/__init__.py +0 -0
- mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
- mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
- mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
- mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
- mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
- mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
- mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
- mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
- mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
- mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
- mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
- mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
- mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
- mindspore/ops_generate/resources/__init__.py +0 -0
- mindspore/ops_generate/resources/resource_list.py +30 -0
- mindspore/ops_generate/resources/resource_loader.py +36 -0
- mindspore/ops_generate/resources/resource_manager.py +64 -0
- mindspore/ops_generate/resources/yaml_loader.py +88 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
- mindspore/parallel/__init__.py +6 -2
- mindspore/parallel/_auto_parallel_context.py +133 -6
- mindspore/parallel/_cell_wrapper.py +130 -15
- mindspore/parallel/_parallel_serialization.py +95 -4
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +7 -2
- mindspore/parallel/_tensor.py +142 -18
- mindspore/parallel/_utils.py +198 -25
- mindspore/parallel/algo_parameter_config.py +3 -3
- mindspore/parallel/auto_parallel.py +732 -0
- mindspore/parallel/checkpoint_convert.py +159 -0
- mindspore/parallel/checkpoint_transform.py +656 -37
- mindspore/parallel/cluster/process_entity/_api.py +151 -19
- mindspore/parallel/cluster/run.py +1 -1
- mindspore/parallel/function/__init__.py +24 -0
- mindspore/parallel/function/reshard_func.py +259 -0
- mindspore/parallel/nn/__init__.py +25 -0
- mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
- mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
- mindspore/parallel/parameter_broadcast.py +24 -13
- mindspore/parallel/shard.py +137 -61
- mindspore/parallel/transform_safetensors.py +287 -95
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +9 -5
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
- mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +22 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
- mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
- mindspore/profiler/common/constant.py +12 -0
- mindspore/profiler/common/msprof_cmd_tool.py +42 -23
- mindspore/profiler/common/path_manager.py +24 -0
- mindspore/profiler/common/profiler_context.py +26 -2
- mindspore/profiler/common/profiler_meta_data.py +74 -0
- mindspore/profiler/common/profiler_parameters.py +59 -18
- mindspore/profiler/common/profiler_path_manager.py +66 -7
- mindspore/profiler/dynamic_profiler.py +112 -79
- mindspore/profiler/envprofiler.py +26 -1
- mindspore/profiler/experimental_config.py +197 -0
- mindspore/profiler/mstx.py +57 -14
- mindspore/profiler/platform/npu_profiler.py +33 -7
- mindspore/profiler/profiler.py +541 -45
- mindspore/profiler/profiler_action_controller.py +1 -1
- mindspore/profiler/profiler_interface.py +4 -0
- mindspore/profiler/schedule.py +57 -22
- mindspore/rewrite/api/node.py +15 -13
- mindspore/rewrite/api/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +25 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/runtime/__init__.py +2 -2
- mindspore/runtime/executor.py +40 -11
- mindspore/runtime/memory.py +25 -8
- mindspore/safeguard/rewrite_obfuscation.py +12 -9
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +8 -8
- mindspore/train/_utils.py +35 -7
- mindspore/train/amp.py +1 -1
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +2 -16
- mindspore/train/callback/_checkpoint.py +24 -40
- mindspore/train/callback/_cluster_monitor.py +14 -18
- mindspore/train/callback/_flops_collector.py +2 -3
- mindspore/train/callback/_history.py +7 -4
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +0 -3
- mindspore/train/callback/_loss_monitor.py +2 -1
- mindspore/train/callback/_on_request_exit.py +6 -5
- mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
- mindspore/train/callback/_summary_collector.py +8 -13
- mindspore/train/callback/_time_monitor.py +2 -1
- mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +179 -103
- mindspore/train/data_sink.py +25 -2
- mindspore/train/dataset_helper.py +4 -5
- mindspore/train/loss_scale_manager.py +8 -7
- mindspore/train/metrics/accuracy.py +3 -3
- mindspore/train/metrics/confusion_matrix.py +9 -9
- mindspore/train/metrics/error.py +3 -3
- mindspore/train/metrics/hausdorff_distance.py +4 -4
- mindspore/train/metrics/mean_surface_distance.py +3 -3
- mindspore/train/metrics/metric.py +0 -12
- mindspore/train/metrics/occlusion_sensitivity.py +4 -2
- mindspore/train/metrics/precision.py +8 -6
- mindspore/train/metrics/recall.py +9 -9
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +19 -12
- mindspore/train/model.py +176 -103
- mindspore/train/serialization.py +246 -988
- mindspore/train/summary/_summary_adapter.py +2 -2
- mindspore/train/summary/summary_record.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +4 -2
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/utils.py +138 -4
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/METADATA +2 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/RECORD +483 -438
- mindspore/_install_custom.py +0 -43
- mindspore/common/_register_for_adapter.py +0 -74
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
- mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
- mindspore/ops_generate/gen_constants.py +0 -190
- mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
- mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
- /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -30,7 +30,8 @@ from PIL import Image
|
|
|
30
30
|
|
|
31
31
|
import mindspore._c_dataengine as cde
|
|
32
32
|
|
|
33
|
-
from .
|
|
33
|
+
from .samplers import Shuffle
|
|
34
|
+
from .datasets import VisionBaseDataset, SourceDataset, MappableDataset, Schema
|
|
34
35
|
from .datasets_user_defined import GeneratorDataset
|
|
35
36
|
from .validators import check_caltech101_dataset, check_caltech256_dataset, check_celebadataset, \
|
|
36
37
|
check_cityscapes_dataset, check_cocodataset, check_div2k_dataset, check_emnist_dataset, check_fake_image_dataset, \
|
|
@@ -141,8 +142,8 @@ class Caltech101Dataset(GeneratorDataset):
|
|
|
141
142
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
142
143
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
143
144
|
the maximum sample number of per shard.
|
|
144
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
145
|
-
parallel/data_parallel.html#
|
|
145
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
146
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
146
147
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
147
148
|
argument can only be specified when `num_shards` is also specified.
|
|
148
149
|
|
|
@@ -286,12 +287,12 @@ class Caltech256Dataset(MappableDataset, VisionBaseDataset):
|
|
|
286
287
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
287
288
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
288
289
|
the maximum sample number of per shard.
|
|
289
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
290
|
-
parallel/data_parallel.html#
|
|
290
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
291
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
291
292
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
292
293
|
argument can only be specified when `num_shards` is also specified.
|
|
293
294
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
294
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
295
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
295
296
|
Default: ``None`` , which means no cache is used.
|
|
296
297
|
|
|
297
298
|
Raises:
|
|
@@ -400,14 +401,14 @@ class CelebADataset(MappableDataset, VisionBaseDataset):
|
|
|
400
401
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
401
402
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
402
403
|
the maximum sample number of per shard.
|
|
403
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
404
|
-
parallel/data_parallel.html#
|
|
404
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
405
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
405
406
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
406
407
|
argument can only be specified when `num_shards` is also specified.
|
|
407
408
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
408
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
409
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
409
410
|
Default: ``None`` , which means no cache is used.
|
|
410
|
-
decrypt (callable, optional): Image decryption function, which
|
|
411
|
+
decrypt (callable, optional): Image decryption function, which receives the path of the encrypted image file
|
|
411
412
|
and returns the decrypted bytes data. Default: ``None`` , no decryption.
|
|
412
413
|
|
|
413
414
|
Raises:
|
|
@@ -555,12 +556,12 @@ class Cifar10Dataset(MappableDataset, VisionBaseDataset):
|
|
|
555
556
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
556
557
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
557
558
|
the maximum sample number of per shard.
|
|
558
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
559
|
-
parallel/data_parallel.html#
|
|
559
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
560
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
560
561
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
561
562
|
argument can only be specified when `num_shards` is also specified.
|
|
562
563
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
563
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
564
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
564
565
|
Default: ``None`` , which means no cache is used.
|
|
565
566
|
|
|
566
567
|
Raises:
|
|
@@ -671,12 +672,12 @@ class Cifar100Dataset(MappableDataset, VisionBaseDataset):
|
|
|
671
672
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
672
673
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
673
674
|
the maximum sample number of per shard.
|
|
674
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
675
|
-
parallel/data_parallel.html#
|
|
675
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
676
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
676
677
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
677
678
|
argument can only be specified when `num_shards` is also specified.
|
|
678
679
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
679
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
680
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
680
681
|
Default: ``None`` , which means no cache is used.
|
|
681
682
|
|
|
682
683
|
Raises:
|
|
@@ -781,18 +782,19 @@ class CityscapesDataset(MappableDataset, VisionBaseDataset):
|
|
|
781
782
|
by :func:`mindspore.dataset.config.set_num_parallel_workers` .
|
|
782
783
|
shuffle (bool, optional): Whether to perform shuffle on the dataset. Default: ``None`` , expected
|
|
783
784
|
order behavior shown in the table below.
|
|
784
|
-
decode (bool, optional): Decode the images after reading. Default: ``None``,
|
|
785
|
+
decode (bool, optional): Decode the images after reading. Default: ``None``,
|
|
786
|
+
which means ``False``, the images are not decoded after reading.
|
|
785
787
|
sampler (Sampler, optional): Object used to choose samples from the
|
|
786
788
|
dataset. Default: ``None`` , expected order behavior shown in the table below.
|
|
787
789
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
788
790
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
789
791
|
the max sample number of per shard.
|
|
790
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
791
|
-
parallel/data_parallel.html#
|
|
792
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
793
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
792
794
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
793
795
|
argument can only be specified when `num_shards` is also specified.
|
|
794
796
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
795
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
797
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
796
798
|
Default: ``None`` , which means no cache is used.
|
|
797
799
|
|
|
798
800
|
Raises:
|
|
@@ -940,12 +942,12 @@ class CocoDataset(MappableDataset, VisionBaseDataset):
|
|
|
940
942
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
941
943
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
942
944
|
the maximum sample number of per shard.
|
|
943
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
944
|
-
parallel/data_parallel.html#
|
|
945
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
946
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
945
947
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
946
948
|
argument can only be specified when `num_shards` is also specified.
|
|
947
949
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
948
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
950
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
949
951
|
Default: ``None`` , which means no cache is used.
|
|
950
952
|
extra_metadata(bool, optional): Flag to add extra meta-data to row. If True, an additional column will be
|
|
951
953
|
output at the end :py:obj:`[_meta-filename, dtype=string]` . Default: ``False``.
|
|
@@ -1184,12 +1186,12 @@ class DIV2KDataset(MappableDataset, VisionBaseDataset):
|
|
|
1184
1186
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
1185
1187
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
1186
1188
|
the max sample number of per shard.
|
|
1187
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
1188
|
-
parallel/data_parallel.html#
|
|
1189
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
1190
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
1189
1191
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
1190
1192
|
argument can only be specified when `num_shards` is also specified.
|
|
1191
1193
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
1192
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
1194
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
1193
1195
|
Default: ``None`` , which means no cache is used.
|
|
1194
1196
|
|
|
1195
1197
|
Raises:
|
|
@@ -1354,12 +1356,12 @@ class EMnistDataset(MappableDataset, VisionBaseDataset):
|
|
|
1354
1356
|
dataset. Default: ``None`` , expected order behavior shown in the table below.
|
|
1355
1357
|
num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
|
|
1356
1358
|
When this argument is specified, `num_samples` reflects the max sample number of per shard.
|
|
1357
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
1358
|
-
parallel/data_parallel.html#
|
|
1359
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
1360
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
1359
1361
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
1360
1362
|
argument can only be specified when `num_shards` is also specified.
|
|
1361
1363
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
1362
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
1364
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
1363
1365
|
Default: ``None`` , which means no cache is used.
|
|
1364
1366
|
|
|
1365
1367
|
Raises:
|
|
@@ -1471,12 +1473,12 @@ class FakeImageDataset(MappableDataset, VisionBaseDataset):
|
|
|
1471
1473
|
dataset. Default: ``None`` , expected order behavior shown in the table below.
|
|
1472
1474
|
num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
|
|
1473
1475
|
When this argument is specified, `num_samples` reflects the max sample number of per shard.
|
|
1474
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
1475
|
-
parallel/data_parallel.html#
|
|
1476
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
1477
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
1476
1478
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
1477
1479
|
argument can only be specified when `num_shards` is also specified.
|
|
1478
1480
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
1479
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
1481
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
1480
1482
|
Default: ``None`` , which means no cache is used.
|
|
1481
1483
|
|
|
1482
1484
|
Raises:
|
|
@@ -1544,12 +1546,12 @@ class FashionMnistDataset(MappableDataset, VisionBaseDataset):
|
|
|
1544
1546
|
Default: ``None`` , expected order behavior shown in the table below.
|
|
1545
1547
|
num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
|
|
1546
1548
|
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
|
|
1547
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
1548
|
-
parallel/data_parallel.html#
|
|
1549
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
1550
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
1549
1551
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
1550
1552
|
argument can only be specified when `num_shards` is also specified.
|
|
1551
1553
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
1552
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
1554
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
1553
1555
|
Default: ``None`` , which means no cache is used.
|
|
1554
1556
|
|
|
1555
1557
|
Raises:
|
|
@@ -1651,12 +1653,12 @@ class FlickrDataset(MappableDataset, VisionBaseDataset):
|
|
|
1651
1653
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
1652
1654
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
1653
1655
|
the max sample number of per shard.
|
|
1654
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
1655
|
-
parallel/data_parallel.html#
|
|
1656
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
1657
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
1656
1658
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
1657
1659
|
argument can only be specified when `num_shards` is also specified.
|
|
1658
1660
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
1659
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
1661
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
1660
1662
|
Default: ``None`` , which means no cache is used.
|
|
1661
1663
|
|
|
1662
1664
|
Raises:
|
|
@@ -1881,8 +1883,8 @@ class Flowers102Dataset(GeneratorDataset):
|
|
|
1881
1883
|
Default: ``None`` , expected order behavior shown in the table below.
|
|
1882
1884
|
num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
|
|
1883
1885
|
When this argument is specified, `num_samples` reflects the max sample number of per shard.
|
|
1884
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
1885
|
-
parallel/data_parallel.html#
|
|
1886
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
1887
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
1886
1888
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` .
|
|
1887
1889
|
This argument must be specified only when `num_shards` is also specified.
|
|
1888
1890
|
|
|
@@ -2029,12 +2031,12 @@ class Food101Dataset(MappableDataset, VisionBaseDataset):
|
|
|
2029
2031
|
num_shards (int, optional): Number of shards that the dataset will be divided into.
|
|
2030
2032
|
Default: ``None`` . When this argument
|
|
2031
2033
|
is specified, `num_samples` reflects the maximum sample number of per shard.
|
|
2032
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
2033
|
-
parallel/data_parallel.html#
|
|
2034
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
2035
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
2034
2036
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` .
|
|
2035
2037
|
This argument can only be specified when `num_shards` is also specified.
|
|
2036
2038
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
2037
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
2039
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
2038
2040
|
Default: ``None`` , which means no cache is used.
|
|
2039
2041
|
|
|
2040
2042
|
Raises:
|
|
@@ -2152,12 +2154,12 @@ class ImageFolderDataset(MappableDataset, VisionBaseDataset):
|
|
|
2152
2154
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
2153
2155
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
2154
2156
|
the maximum sample number of per shard.
|
|
2155
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
2156
|
-
parallel/data_parallel.html#
|
|
2157
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
2158
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
2157
2159
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
2158
2160
|
argument can only be specified when `num_shards` is also specified.
|
|
2159
2161
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
2160
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
2162
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
2161
2163
|
Default: ``None`` , which means no cache is used.
|
|
2162
2164
|
decrypt (callable, optional): Image decryption function, which accepts the path of the encrypted image file
|
|
2163
2165
|
and returns the decrypted bytes data. Default: ``None`` , no decryption.
|
|
@@ -2298,12 +2300,12 @@ class KITTIDataset(MappableDataset, VisionBaseDataset):
|
|
|
2298
2300
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
2299
2301
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
2300
2302
|
the max sample number of per shard.
|
|
2301
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
2302
|
-
parallel/data_parallel.html#
|
|
2303
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
2304
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
2303
2305
|
shard_id (int, optional): The shard ID within `num_shards`. Default: ``None`` . This
|
|
2304
2306
|
argument can only be specified when `num_shards` is also specified.
|
|
2305
2307
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
2306
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
2308
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
2307
2309
|
Default: ``None`` , which means no cache is used.
|
|
2308
2310
|
|
|
2309
2311
|
Raises:
|
|
@@ -2420,12 +2422,12 @@ class KMnistDataset(MappableDataset, VisionBaseDataset):
|
|
|
2420
2422
|
Default: ``None`` , expected order behavior shown in the table below.
|
|
2421
2423
|
num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
|
|
2422
2424
|
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
|
|
2423
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
2424
|
-
parallel/data_parallel.html#
|
|
2425
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
2426
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
2425
2427
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
2426
2428
|
argument can only be specified when `num_shards` is also specified.
|
|
2427
2429
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
2428
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
2430
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
2429
2431
|
Default: ``None`` , which means no cache is used.
|
|
2430
2432
|
|
|
2431
2433
|
Raises:
|
|
@@ -2532,12 +2534,12 @@ class LFWDataset(MappableDataset, VisionBaseDataset):
|
|
|
2532
2534
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
2533
2535
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
2534
2536
|
the max sample number of per shard.
|
|
2535
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
2536
|
-
parallel/data_parallel.html#
|
|
2537
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
2538
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
2537
2539
|
shard_id (int, optional): The shard ID within `num_shards`. Default: ``None`` . This
|
|
2538
2540
|
argument can only be specified when `num_shards` is also specified.
|
|
2539
2541
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
2540
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
2542
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
2541
2543
|
Default: ``None`` , which means no cache is used.
|
|
2542
2544
|
|
|
2543
2545
|
Raises:
|
|
@@ -2673,12 +2675,12 @@ class LSUNDataset(MappableDataset, VisionBaseDataset):
|
|
|
2673
2675
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
2674
2676
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
2675
2677
|
the max sample number of per shard.
|
|
2676
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
2677
|
-
parallel/data_parallel.html#
|
|
2678
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
2679
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
2678
2680
|
shard_id (int, optional): The shard ID within `num_shards`. Default: ``None`` . This
|
|
2679
2681
|
argument can only be specified when `num_shards` is also specified.
|
|
2680
2682
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
2681
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
2683
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
2682
2684
|
Default: ``None`` , which means no cache is used.
|
|
2683
2685
|
|
|
2684
2686
|
Raises:
|
|
@@ -2796,12 +2798,12 @@ class ManifestDataset(MappableDataset, VisionBaseDataset):
|
|
|
2796
2798
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
2797
2799
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
2798
2800
|
the max number of samples per shard.
|
|
2799
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
2800
|
-
parallel/data_parallel.html#
|
|
2801
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
2802
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
2801
2803
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
2802
2804
|
argument can only be specified when `num_shards` is also specified.
|
|
2803
2805
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
2804
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
2806
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
2805
2807
|
Default: ``None`` , which means no cache is used.
|
|
2806
2808
|
|
|
2807
2809
|
Raises:
|
|
@@ -2919,12 +2921,12 @@ class MnistDataset(MappableDataset, VisionBaseDataset):
|
|
|
2919
2921
|
dataset. Default: ``None`` , expected order behavior shown in the table below.
|
|
2920
2922
|
num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
|
|
2921
2923
|
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
|
|
2922
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
2923
|
-
parallel/data_parallel.html#
|
|
2924
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
2925
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
2924
2926
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
2925
2927
|
argument can only be specified when `num_shards` is also specified.
|
|
2926
2928
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
2927
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
2929
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
2928
2930
|
Default: ``None`` , which means no cache is used.
|
|
2929
2931
|
|
|
2930
2932
|
Raises:
|
|
@@ -3026,12 +3028,12 @@ class OmniglotDataset(MappableDataset, VisionBaseDataset):
|
|
|
3026
3028
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
3027
3029
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
3028
3030
|
the max sample number of per shard.
|
|
3029
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
3030
|
-
parallel/data_parallel.html#
|
|
3031
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
3032
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
3031
3033
|
shard_id (int, optional): The shard ID within `num_shards`. Default: ``None`` . This
|
|
3032
3034
|
argument can only be specified when `num_shards` is also specified.
|
|
3033
3035
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
3034
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
3036
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
3035
3037
|
Default: ``None`` , which means no cache is used.
|
|
3036
3038
|
|
|
3037
3039
|
Raises:
|
|
@@ -3148,12 +3150,12 @@ class PhotoTourDataset(MappableDataset, VisionBaseDataset):
|
|
|
3148
3150
|
Default: ``None`` , expected order behavior shown in the table below.
|
|
3149
3151
|
num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
|
|
3150
3152
|
When this argument is specified, `num_samples` reflects the max sample number of per shard.
|
|
3151
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
3152
|
-
parallel/data_parallel.html#
|
|
3153
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
3154
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
3153
3155
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
3154
3156
|
argument can only be specified when `num_shards` is also specified.
|
|
3155
3157
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
3156
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
3158
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
3157
3159
|
Default: ``None`` , which means no cache is used.
|
|
3158
3160
|
|
|
3159
3161
|
Raises:
|
|
@@ -3278,12 +3280,12 @@ class Places365Dataset(MappableDataset, VisionBaseDataset):
|
|
|
3278
3280
|
dataset. Default: ``None`` , expected order behavior shown in the table below.
|
|
3279
3281
|
num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
|
|
3280
3282
|
When this argument is specified, `num_samples` reflects the max sample number of per shard.
|
|
3281
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
3282
|
-
parallel/data_parallel.html#
|
|
3283
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
3284
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
3283
3285
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
3284
3286
|
argument can only be specified when `num_shards` is also specified.
|
|
3285
3287
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
3286
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
3288
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
3287
3289
|
Default: ``None`` , which means no cache is used.
|
|
3288
3290
|
|
|
3289
3291
|
Raises:
|
|
@@ -3389,7 +3391,8 @@ class QMnistDataset(MappableDataset, VisionBaseDataset):
|
|
|
3389
3391
|
dataset_dir (str): Path to the root directory that contains the dataset.
|
|
3390
3392
|
usage (str, optional): Usage of this dataset, can be ``'train'``, ``'test'``, ``'test10k'``,
|
|
3391
3393
|
``'test50k'``, ``'nist'`` or ``'all'``. Default: ``None`` , will read all samples.
|
|
3392
|
-
compat (bool, optional):
|
|
3394
|
+
compat (bool, optional): Specifies the labeling information for each sample.
|
|
3395
|
+
Whether the label for each example is class number (compat= ``True`` )
|
|
3393
3396
|
or the full QMNIST information (compat= ``False`` ). Default: ``True``.
|
|
3394
3397
|
num_samples (int, optional): The number of images to be included in the dataset.
|
|
3395
3398
|
Default: ``None`` , will read all images.
|
|
@@ -3402,12 +3405,12 @@ class QMnistDataset(MappableDataset, VisionBaseDataset):
|
|
|
3402
3405
|
dataset. Default: ``None`` , expected order behavior shown in the table below.
|
|
3403
3406
|
num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
|
|
3404
3407
|
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
|
|
3405
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
3406
|
-
parallel/data_parallel.html#
|
|
3408
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
3409
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
3407
3410
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
3408
3411
|
argument can only be specified when `num_shards` is also specified.
|
|
3409
3412
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
3410
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
3413
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
3411
3414
|
Default: ``None`` , which means no cache is used.
|
|
3412
3415
|
|
|
3413
3416
|
Raises:
|
|
@@ -3505,15 +3508,15 @@ class RandomDataset(SourceDataset, VisionBaseDataset):
|
|
|
3505
3508
|
Default: ``None`` , will use global default workers(8), it can be set
|
|
3506
3509
|
by :func:`mindspore.dataset.config.set_num_parallel_workers` .
|
|
3507
3510
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
3508
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
3511
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
3509
3512
|
Default: ``None`` , which means no cache is used.
|
|
3510
3513
|
shuffle (bool, optional): Whether or not to perform shuffle on the dataset.
|
|
3511
3514
|
Default: ``None`` , expected order behavior shown in the table below.
|
|
3512
3515
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
3513
3516
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
3514
3517
|
the maximum sample number of per shard.
|
|
3515
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
3516
|
-
parallel/data_parallel.html#
|
|
3518
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
3519
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
3517
3520
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
3518
3521
|
argument can only be specified when `num_shards` is also specified.
|
|
3519
3522
|
|
|
@@ -3589,12 +3592,12 @@ class RenderedSST2Dataset(MappableDataset, VisionBaseDataset):
|
|
|
3589
3592
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
3590
3593
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
3591
3594
|
the maximum sample number of per shard.
|
|
3592
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
3593
|
-
parallel/data_parallel.html#
|
|
3595
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
3596
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
3594
3597
|
shard_id (int, optional): The shard ID within `num_shards` . This
|
|
3595
3598
|
argument can only be specified when `num_shards` is also specified. Default: ``None`` .
|
|
3596
3599
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
3597
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
3600
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
3598
3601
|
Default: ``None`` , which means no cache is used.
|
|
3599
3602
|
|
|
3600
3603
|
Raises:
|
|
@@ -3788,8 +3791,8 @@ class SBDataset(GeneratorDataset):
|
|
|
3788
3791
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
3789
3792
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
3790
3793
|
the max sample number of per shard.
|
|
3791
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
3792
|
-
parallel/data_parallel.html#
|
|
3794
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
3795
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
3793
3796
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
3794
3797
|
argument can only be specified when `num_shards` is also specified.
|
|
3795
3798
|
|
|
@@ -3901,12 +3904,12 @@ class SBUDataset(MappableDataset, VisionBaseDataset):
|
|
|
3901
3904
|
dataset. Default: ``None`` , expected order behavior shown in the table below.
|
|
3902
3905
|
num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
|
|
3903
3906
|
When this argument is specified, `num_samples` reflects the max sample number of per shard.
|
|
3904
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
3905
|
-
parallel/data_parallel.html#
|
|
3907
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
3908
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
3906
3909
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
3907
3910
|
argument can only be specified when `num_shards` is also specified.
|
|
3908
3911
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
3909
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
3912
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
3910
3913
|
Default: ``None`` , which means no cache is used.
|
|
3911
3914
|
|
|
3912
3915
|
Raises:
|
|
@@ -4000,12 +4003,12 @@ class SemeionDataset(MappableDataset, VisionBaseDataset):
|
|
|
4000
4003
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
4001
4004
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
4002
4005
|
the maximum sample number of per shard.
|
|
4003
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
4004
|
-
parallel/data_parallel.html#
|
|
4006
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
4007
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
4005
4008
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
4006
4009
|
argument can only be specified when `num_shards` is also specified.
|
|
4007
4010
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
4008
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
4011
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
4009
4012
|
Default: ``None`` , which means no cache is used.
|
|
4010
4013
|
|
|
4011
4014
|
Raises:
|
|
@@ -4112,12 +4115,12 @@ class STL10Dataset(MappableDataset, VisionBaseDataset):
|
|
|
4112
4115
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
4113
4116
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
4114
4117
|
the max sample number of per shard.
|
|
4115
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
4116
|
-
parallel/data_parallel.html#
|
|
4118
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
4119
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
4117
4120
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
4118
4121
|
argument can only be specified when `num_shards` is also specified.
|
|
4119
4122
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
4120
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
4123
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
4121
4124
|
Default: ``None`` , which means no cache is used.
|
|
4122
4125
|
|
|
4123
4126
|
Raises:
|
|
@@ -4227,12 +4230,12 @@ class SUN397Dataset(MappableDataset, VisionBaseDataset):
|
|
|
4227
4230
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
4228
4231
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
4229
4232
|
the maximum sample number of per shard.
|
|
4230
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
4231
|
-
parallel/data_parallel.html#
|
|
4233
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
4234
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
4232
4235
|
shard_id (int, optional): The shard ID within `num_shards` . This
|
|
4233
4236
|
argument can only be specified when `num_shards` is also specified. Default: ``None`` .
|
|
4234
4237
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
4235
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
4238
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
4236
4239
|
Default: ``None`` , which means no cache is used.
|
|
4237
4240
|
|
|
4238
4241
|
Raises:
|
|
@@ -4390,8 +4393,8 @@ class SVHNDataset(GeneratorDataset):
|
|
|
4390
4393
|
input is required. Default: ``None`` , expected order behavior shown in the table below.
|
|
4391
4394
|
num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
|
|
4392
4395
|
When this argument is specified, `num_samples` reflects the max sample number of per shard.
|
|
4393
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
4394
|
-
parallel/data_parallel.html#
|
|
4396
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
4397
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
4395
4398
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` .
|
|
4396
4399
|
This argument must be specified only when `num_shards` is also specified.
|
|
4397
4400
|
|
|
@@ -4492,12 +4495,12 @@ class USPSDataset(SourceDataset, VisionBaseDataset):
|
|
|
4492
4495
|
|
|
4493
4496
|
num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
|
|
4494
4497
|
When this argument is specified, `num_samples` reflects the max sample number of per shard.
|
|
4495
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
4496
|
-
parallel/data_parallel.html#
|
|
4498
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
4499
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
4497
4500
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
4498
4501
|
argument can only be specified when `num_shards` is also specified.
|
|
4499
4502
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
4500
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
4503
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
4501
4504
|
Default: ``None`` , which means no cache is used.
|
|
4502
4505
|
|
|
4503
4506
|
Raises:
|
|
@@ -4602,12 +4605,12 @@ class VOCDataset(MappableDataset, VisionBaseDataset):
|
|
|
4602
4605
|
num_shards (int, optional): Number of shards that the dataset will be divided
|
|
4603
4606
|
into. Default: ``None`` . When this argument is specified, `num_samples` reflects
|
|
4604
4607
|
the maximum sample number of per shard.
|
|
4605
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
4606
|
-
parallel/data_parallel.html#
|
|
4608
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
4609
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
4607
4610
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
|
|
4608
4611
|
argument can only be specified when `num_shards` is also specified.
|
|
4609
4612
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
4610
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
4613
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
4611
4614
|
Default: ``None`` , which means no cache is used.
|
|
4612
4615
|
extra_metadata(bool, optional): Flag to add extra meta-data to row. If True, an additional column named
|
|
4613
4616
|
:py:obj:`[_meta-filename, dtype=string]` will be output at the end. Default: ``False``.
|
|
@@ -4786,12 +4789,12 @@ class WIDERFaceDataset(MappableDataset, VisionBaseDataset):
|
|
|
4786
4789
|
Default: ``None`` , expected order behavior shown in the table below.
|
|
4787
4790
|
num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
|
|
4788
4791
|
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
|
|
4789
|
-
Used in `data parallel training <https://www.mindspore.cn/
|
|
4790
|
-
parallel/data_parallel.html#
|
|
4792
|
+
Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
|
|
4793
|
+
parallel/data_parallel.html#loading-datasets>`_ .
|
|
4791
4794
|
shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` .
|
|
4792
4795
|
This argument can only be specified when `num_shards` is also specified.
|
|
4793
4796
|
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
|
|
4794
|
-
`Single-Node Data Cache <https://www.mindspore.cn/
|
|
4797
|
+
`Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
|
|
4795
4798
|
Default: ``None`` , which means no cache is used.
|
|
4796
4799
|
|
|
4797
4800
|
Raises:
|
|
@@ -355,9 +355,11 @@ class Iterator:
|
|
|
355
355
|
"It might because Iterator stop() had been called, or C++ pipeline crashed silently.")
|
|
356
356
|
raise RuntimeError("Iterator does not have a running C++ pipeline.")
|
|
357
357
|
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
358
|
+
from mindspore.profiler import mstx
|
|
359
|
+
range_id = mstx.range_start('dataloader', None)
|
|
360
|
+
out = self._parallel_transformation_iteration() if self.parallel_convert else self.serial_conversion_iteration()
|
|
361
|
+
mstx.range_end(range_id)
|
|
362
|
+
return out
|
|
361
363
|
|
|
362
364
|
def __deepcopy__(self, memo):
|
|
363
365
|
return self
|