mindspore 2.1.0__cp39-cp39-win_amd64.whl → 2.2.11__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +4 -1
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -1
- mindspore/_checkparam.py +23 -29
- mindspore/_extends/graph_kernel/__init__.py +0 -1
- mindspore/_extends/graph_kernel/model/graph_split.py +84 -76
- mindspore/_extends/graph_kernel/model/model_builder.py +9 -50
- mindspore/_extends/graph_kernel/splitter.py +4 -11
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +122 -15
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +84 -67
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +4 -2
- mindspore/_extends/parallel_compile/akg_compiler/util.py +10 -7
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +2 -2
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +6 -5
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +1 -1
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -1
- mindspore/_extends/parse/__init__.py +13 -15
- mindspore/_extends/parse/namespace.py +7 -33
- mindspore/_extends/parse/parser.py +67 -72
- mindspore/_extends/parse/resources.py +1 -1
- mindspore/_extends/parse/standard_method.py +86 -106
- mindspore/_extends/parse/trope.py +1 -1
- mindspore/_extends/remote/kernel_build_server.py +25 -7
- mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
- mindspore/_install_custom.py +43 -0
- mindspore/amp.py +47 -11
- mindspore/boost/boost.py +1 -8
- mindspore/boost/boost_cell_wrapper.py +3 -2
- mindspore/boost/grad_accumulation.py +1 -1
- mindspore/boost/group_loss_scale_manager.py +8 -7
- mindspore/common/__init__.py +5 -3
- mindspore/common/_jit_fallback_utils.py +6 -0
- mindspore/common/_register_for_adapter.py +2 -0
- mindspore/common/_register_for_tensor.py +2 -2
- mindspore/common/_stub_tensor.py +13 -0
- mindspore/common/_utils.py +29 -0
- mindspore/common/api.py +174 -259
- mindspore/common/auto_dynamic_shape.py +494 -0
- mindspore/common/dtype.py +18 -11
- mindspore/common/dump.py +6 -4
- mindspore/common/initializer.py +14 -14
- mindspore/common/jit_config.py +33 -15
- mindspore/common/lazy_inline.py +126 -7
- mindspore/common/mindir_util.py +101 -0
- mindspore/common/parameter.py +51 -41
- mindspore/common/seed.py +4 -4
- mindspore/common/sparse_tensor.py +13 -14
- mindspore/common/tensor.py +243 -165
- mindspore/communication/__init__.py +7 -4
- mindspore/communication/_comm_helper.py +83 -4
- mindspore/communication/management.py +152 -84
- mindspore/config/op_info.config +14 -3
- mindspore/context.py +152 -61
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +2 -2
- mindspore/dataset/audio/transforms.py +52 -52
- mindspore/dataset/callback/ds_callback.py +16 -2
- mindspore/dataset/core/config.py +68 -51
- mindspore/dataset/engine/cache_client.py +33 -7
- mindspore/dataset/engine/datasets.py +250 -112
- mindspore/dataset/engine/datasets_audio.py +43 -211
- mindspore/dataset/engine/datasets_standard_format.py +16 -35
- mindspore/dataset/engine/datasets_text.py +43 -67
- mindspore/dataset/engine/datasets_user_defined.py +86 -100
- mindspore/dataset/engine/datasets_vision.py +219 -1029
- mindspore/dataset/engine/iterators.py +11 -4
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +4 -0
- mindspore/dataset/engine/obs/util.py +3 -0
- mindspore/dataset/engine/samplers.py +1 -1
- mindspore/dataset/engine/validators.py +19 -5
- mindspore/dataset/text/__init__.py +3 -3
- mindspore/dataset/text/transforms.py +101 -127
- mindspore/dataset/text/utils.py +205 -138
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/py_transforms_util.py +40 -12
- mindspore/dataset/transforms/transforms.py +95 -40
- mindspore/dataset/utils/browse_dataset.py +8 -2
- mindspore/dataset/utils/line_reader.py +17 -19
- mindspore/dataset/vision/__init__.py +3 -3
- mindspore/dataset/vision/c_transforms.py +6 -3
- mindspore/dataset/vision/transforms.py +409 -287
- mindspore/dataset/vision/utils.py +13 -14
- mindspore/dataset/vision/validators.py +11 -1
- mindspore/dnnl.dll +0 -0
- mindspore/experimental/map_parameter.py +14 -0
- mindspore/{nn/optim_ex → experimental/optim}/__init__.py +30 -29
- mindspore/{nn/optim_ex → experimental/optim}/adam.py +60 -67
- mindspore/{nn/optim_ex → experimental/optim}/adamw.py +181 -203
- mindspore/experimental/optim/lr_scheduler.py +1427 -0
- mindspore/{nn/optim_ex → experimental/optim}/optimizer.py +252 -259
- mindspore/{nn/optim_ex → experimental/optim}/sgd.py +147 -152
- mindspore/gen_ops.py +273 -0
- mindspore/include/OWNERS +0 -1
- mindspore/include/api/data_type.h +2 -1
- mindspore/include/api/graph.h +0 -15
- mindspore/include/api/kernel.h +2 -0
- mindspore/include/api/kernel_api.h +37 -12
- mindspore/include/api/model.h +17 -14
- mindspore/include/api/status.h +8 -3
- mindspore/include/api/types.h +37 -4
- mindspore/include/c_api/ms/abstract.h +67 -0
- mindspore/include/c_api/ms/attribute.h +197 -0
- mindspore/include/c_api/ms/base/handle_types.h +43 -0
- mindspore/include/c_api/ms/base/macros.h +32 -0
- mindspore/include/c_api/ms/base/status.h +33 -0
- mindspore/include/c_api/ms/base/types.h +282 -0
- mindspore/include/c_api/ms/context.h +102 -0
- mindspore/include/c_api/ms/graph.h +160 -0
- mindspore/include/c_api/ms/node.h +606 -0
- mindspore/include/c_api/ms/tensor.h +161 -0
- mindspore/include/c_api/ms/value.h +84 -0
- mindspore/include/dataset/constants.h +6 -5
- mindspore/include/dataset/execute.h +23 -13
- mindspore/include/dataset/text.h +26 -26
- mindspore/include/dataset/transforms.h +13 -13
- mindspore/include/dataset/vision.h +60 -60
- mindspore/include/dataset/vision_ascend.h +5 -6
- mindspore/include/dataset/vision_lite.h +17 -17
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/tools/imagenet_to_mr.py +1 -1
- mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/nn/__init__.py +0 -2
- mindspore/nn/cell.py +313 -74
- mindspore/nn/dynamic_lr.py +21 -21
- mindspore/nn/layer/activation.py +22 -30
- mindspore/nn/layer/basic.py +15 -13
- mindspore/nn/layer/channel_shuffle.py +1 -1
- mindspore/nn/layer/container.py +271 -9
- mindspore/nn/layer/conv.py +323 -204
- mindspore/nn/layer/dense.py +8 -5
- mindspore/nn/layer/embedding.py +33 -27
- mindspore/nn/layer/flash_attention.py +61 -95
- mindspore/nn/layer/image.py +8 -6
- mindspore/nn/layer/math.py +16 -25
- mindspore/nn/layer/normalization.py +107 -66
- mindspore/nn/layer/padding.py +1 -1
- mindspore/nn/layer/pooling.py +131 -109
- mindspore/nn/layer/rnn_cells.py +27 -22
- mindspore/nn/layer/rnns.py +13 -16
- mindspore/nn/layer/thor_layer.py +1 -1
- mindspore/nn/layer/transformer.py +221 -154
- mindspore/nn/learning_rate_schedule.py +9 -1
- mindspore/nn/loss/loss.py +235 -174
- mindspore/nn/optim/ada_grad.py +2 -1
- mindspore/nn/optim/adadelta.py +1 -0
- mindspore/nn/optim/adafactor.py +2 -1
- mindspore/nn/optim/adam.py +7 -4
- mindspore/nn/optim/adamax.py +3 -2
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +2 -3
- mindspore/nn/optim/ftrl.py +6 -5
- mindspore/nn/optim/lamb.py +7 -4
- mindspore/nn/optim/lars.py +1 -1
- mindspore/nn/optim/lazyadam.py +5 -3
- mindspore/nn/optim/momentum.py +2 -1
- mindspore/nn/optim/optimizer.py +53 -4
- mindspore/nn/optim/proximal_ada_grad.py +3 -4
- mindspore/nn/optim/rmsprop.py +4 -3
- mindspore/nn/optim/rprop.py +23 -12
- mindspore/nn/optim/sgd.py +26 -11
- mindspore/nn/optim/thor.py +9 -7
- mindspore/nn/probability/bijector/bijector.py +5 -5
- mindspore/nn/probability/bijector/power_transform.py +27 -27
- mindspore/nn/probability/bijector/softplus.py +3 -3
- mindspore/nn/probability/distribution/_utils/custom_ops.py +3 -3
- mindspore/nn/probability/distribution/bernoulli.py +5 -5
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +7 -7
- mindspore/nn/probability/distribution/cauchy.py +0 -1
- mindspore/nn/probability/distribution/distribution.py +3 -3
- mindspore/nn/probability/distribution/gamma.py +3 -3
- mindspore/nn/probability/distribution/geometric.py +4 -4
- mindspore/nn/probability/distribution/gumbel.py +4 -4
- mindspore/nn/probability/distribution/log_normal.py +2 -2
- mindspore/nn/probability/distribution/logistic.py +2 -2
- mindspore/nn/probability/distribution/poisson.py +4 -4
- mindspore/nn/probability/distribution/transformed_distribution.py +3 -3
- mindspore/nn/probability/distribution/uniform.py +6 -6
- mindspore/nn/wrap/__init__.py +4 -2
- mindspore/nn/wrap/cell_wrapper.py +87 -34
- mindspore/nn/wrap/grad_reducer.py +8 -5
- mindspore/nn/wrap/loss_scale.py +105 -42
- mindspore/numpy/array_creations.py +1 -2
- mindspore/numpy/array_ops.py +3 -2
- mindspore/numpy/utils_const.py +5 -5
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_grad_experimental/__init__.py +0 -5
- mindspore/ops/_grad_experimental/grad_array_ops.py +2 -3
- mindspore/ops/_grad_experimental/grad_comm_ops.py +15 -2
- mindspore/ops/_grad_experimental/grad_debug_ops.py +0 -37
- mindspore/ops/_grad_experimental/grad_implementations.py +11 -1
- mindspore/ops/_grad_experimental/grad_inner_ops.py +2 -216
- mindspore/ops/_grad_experimental/grad_math_ops.py +19 -199
- mindspore/ops/_grad_experimental/grad_sparse.py +15 -0
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +14 -2
- mindspore/ops/_op_impl/aicpu/add.py +3 -3
- mindspore/ops/_op_impl/aicpu/bias_add_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
- mindspore/ops/_op_impl/{_custom_op/flash_attention/constants.py → aicpu/eps.py} +18 -27
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -2
- mindspore/ops/_op_impl/aicpu/linear_sum_assignment.py +21 -2
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +6 -3
- mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/multinomial.py +3 -3
- mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +15 -7
- mindspore/ops/_op_impl/aicpu/random_categorical.py +39 -19
- mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +5 -2
- mindspore/ops/_op_impl/aicpu/random_poisson.py +103 -52
- mindspore/ops/_op_impl/aicpu/random_shuffle.py +17 -15
- mindspore/ops/_op_impl/aicpu/{sparseaddmm.py → sparse_addmm.py} +2 -2
- mindspore/ops/_op_impl/aicpu/{sparsesparsemaximum.py → sparse_sparse_maximum.py} +4 -4
- mindspore/ops/_op_impl/aicpu/standard_laplace.py +5 -5
- mindspore/ops/_op_impl/aicpu/standard_normal.py +5 -5
- mindspore/ops/_op_impl/aicpu/truncated_normal.py +9 -7
- mindspore/ops/_op_impl/aicpu/uniform.py +5 -3
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +8 -4
- mindspore/ops/_op_impl/aicpu/uniform_int.py +5 -5
- mindspore/ops/_op_impl/aicpu/uniform_real.py +4 -4
- mindspore/ops/_op_impl/tbe/__init__.py +4 -4
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +7 -3
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +2 -0
- mindspore/ops/_primitive_cache.py +1 -1
- mindspore/ops/_tracefunc.py +45 -13
- mindspore/ops/_utils/utils.py +6 -1
- mindspore/ops/_vmap/vmap_array_ops.py +3 -3
- mindspore/ops/_vmap/vmap_base.py +3 -3
- mindspore/ops/_vmap/vmap_convolution_ops.py +1 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +6 -4
- mindspore/ops/_vmap/vmap_math_ops.py +5 -2
- mindspore/ops/_vmap/vmap_nn_ops.py +61 -7
- mindspore/ops/arg_dtype_cast.py +54 -0
- mindspore/ops/composite/base.py +37 -10
- mindspore/ops/composite/math_ops.py +5 -4
- mindspore/ops/composite/multitype_ops/_compile_utils.py +275 -73
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +16 -9
- mindspore/ops/composite/multitype_ops/add_impl.py +43 -4
- mindspore/ops/composite/multitype_ops/getitem_impl.py +42 -4
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +9 -0
- mindspore/ops/deprecated.py +304 -0
- mindspore/ops/function/__init__.py +4 -1
- mindspore/ops/function/array_func.py +174 -193
- mindspore/ops/function/clip_func.py +81 -13
- mindspore/ops/function/debug_func.py +1 -1
- mindspore/ops/function/grad/grad_func.py +18 -9
- mindspore/ops/function/image_func.py +10 -4
- mindspore/ops/function/linalg_func.py +5 -5
- mindspore/ops/function/math_func.py +575 -386
- mindspore/ops/function/nn_func.py +568 -260
- mindspore/ops/function/random_func.py +88 -57
- mindspore/ops/function/sparse_func.py +1 -1
- mindspore/ops/function/sparse_unary_func.py +14 -12
- mindspore/ops/function/vmap_func.py +6 -5
- mindspore/ops/functional.py +15 -10
- mindspore/ops/op_info_register.py +244 -25
- mindspore/ops/operations/__init__.py +31 -19
- mindspore/ops/operations/_grad_ops.py +71 -7
- mindspore/ops/operations/_inner_ops.py +350 -17
- mindspore/ops/operations/_quant_ops.py +4 -8
- mindspore/ops/operations/_sequence_ops.py +42 -0
- mindspore/ops/operations/array_ops.py +68 -282
- mindspore/ops/operations/comm_ops.py +107 -59
- mindspore/ops/operations/custom_ops.py +94 -70
- mindspore/ops/operations/debug_ops.py +8 -4
- mindspore/ops/operations/image_ops.py +18 -12
- mindspore/ops/operations/inner_ops.py +26 -3
- mindspore/ops/operations/math_ops.py +192 -144
- mindspore/ops/operations/nn_ops.py +857 -489
- mindspore/ops/operations/other_ops.py +0 -22
- mindspore/ops/operations/random_ops.py +53 -111
- mindspore/ops/operations/sparse_ops.py +3 -1
- mindspore/ops/primitive.py +24 -18
- mindspore/parallel/_auto_parallel_context.py +68 -8
- mindspore/parallel/_cost_model_context.py +2 -2
- mindspore/parallel/_offload_context.py +17 -3
- mindspore/parallel/_parallel_serialization.py +12 -5
- mindspore/parallel/_ps_context.py +12 -0
- mindspore/parallel/_tensor.py +18 -13
- mindspore/parallel/_transformer/layers.py +5 -3
- mindspore/parallel/_transformer/loss.py +1 -0
- mindspore/parallel/_transformer/moe.py +2 -2
- mindspore/parallel/_transformer/op_parallel_config.py +12 -1
- mindspore/parallel/_transformer/transformer.py +23 -3
- mindspore/parallel/_utils.py +11 -7
- mindspore/parallel/algo_parameter_config.py +85 -5
- mindspore/parallel/checkpoint_transform.py +19 -12
- mindspore/parallel/shard.py +21 -14
- mindspore/profiler/common/struct_type.py +3 -3
- mindspore/profiler/common/util.py +4 -2
- mindspore/profiler/envprofiling.py +1 -1
- mindspore/profiler/parser/aicpu_data_parser.py +5 -3
- mindspore/profiler/parser/ascend_flops_generator.py +2 -2
- mindspore/profiler/parser/ascend_fpbp_generator.py +1 -1
- mindspore/profiler/parser/ascend_hccl_generator.py +249 -12
- mindspore/profiler/parser/ascend_msprof_exporter.py +150 -255
- mindspore/profiler/parser/ascend_msprof_generator.py +204 -17
- mindspore/profiler/parser/ascend_op_generator.py +6 -6
- mindspore/profiler/parser/ascend_steptrace_generator.py +6 -4
- mindspore/profiler/parser/ascend_timeline_generator.py +14 -187
- mindspore/profiler/parser/base_timeline_generator.py +10 -8
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +16 -12
- mindspore/profiler/parser/flops_parser.py +15 -11
- mindspore/profiler/parser/framework_parser.py +38 -22
- mindspore/profiler/parser/hccl_parser.py +16 -12
- mindspore/profiler/parser/integrator.py +22 -11
- mindspore/profiler/parser/memory_usage_parser.py +2 -2
- mindspore/profiler/parser/minddata_analyzer.py +12 -14
- mindspore/profiler/parser/minddata_pipeline_parser.py +1 -1
- mindspore/profiler/parser/msadvisor_parser.py +8 -4
- mindspore/profiler/parser/op_intermediate_parser.py +5 -2
- mindspore/profiler/parser/optime_parser.py +1 -1
- mindspore/profiler/parser/profiler_info.py +21 -2
- mindspore/profiler/parser/step_trace_parser.py +11 -14
- mindspore/profiler/profiling.py +179 -89
- mindspore/rewrite/api/node.py +102 -19
- mindspore/rewrite/api/node_type.py +5 -1
- mindspore/rewrite/api/pattern_engine.py +1 -1
- mindspore/rewrite/api/scoped_value.py +9 -17
- mindspore/rewrite/api/symbol_tree.py +131 -47
- mindspore/rewrite/ast_helpers/__init__.py +2 -1
- mindspore/rewrite/ast_helpers/ast_finder.py +129 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +116 -104
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +93 -46
- mindspore/rewrite/common/rewrite_elog.py +5 -1
- mindspore/rewrite/namer.py +33 -24
- mindspore/rewrite/namespace.py +14 -5
- mindspore/{_extends/graph_kernel/expanders/complex → rewrite/node}/__init__.py +9 -9
- mindspore/rewrite/node/call_function.py +79 -0
- mindspore/rewrite/node/cell_container.py +135 -0
- mindspore/rewrite/node/control_flow.py +88 -0
- mindspore/rewrite/{node.py → node/node.py} +273 -234
- mindspore/rewrite/node/node_manager.py +254 -0
- mindspore/rewrite/{topological_manager.py → node/node_topological_manager.py} +13 -46
- mindspore/rewrite/parsers/arguments_parser.py +22 -21
- mindspore/rewrite/parsers/assign_parser.py +216 -221
- mindspore/rewrite/parsers/attribute_parser.py +9 -7
- mindspore/rewrite/parsers/class_def_parser.py +174 -113
- mindspore/rewrite/parsers/constant_parser.py +9 -6
- mindspore/rewrite/parsers/container_parser.py +9 -7
- mindspore/rewrite/parsers/for_parser.py +42 -21
- mindspore/rewrite/parsers/function_def_parser.py +24 -16
- mindspore/rewrite/parsers/if_parser.py +28 -24
- mindspore/rewrite/parsers/module_parser.py +196 -25
- mindspore/rewrite/{parser.py → parsers/parser.py} +4 -2
- mindspore/rewrite/{parser_register.py → parsers/parser_register.py} +1 -1
- mindspore/rewrite/parsers/return_parser.py +6 -6
- mindspore/rewrite/sparsify/sparse_transformer.py +12 -3
- mindspore/rewrite/sparsify/utils.py +1 -1
- mindspore/rewrite/symbol_tree.py +523 -578
- mindspore/rewrite/symbol_tree_builder.py +9 -193
- mindspore/rewrite/symbol_tree_dumper.py +2 -2
- mindspore/run_check/_check_version.py +6 -4
- mindspore/{ops/bprop_mindir → safeguard}/__init__.py +4 -3
- mindspore/safeguard/rewrite_obfuscation.py +541 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/_utils.py +7 -3
- mindspore/train/amp.py +323 -123
- mindspore/train/anf_ir_pb2.py +14 -2
- mindspore/train/callback/_backup_and_restore.py +2 -12
- mindspore/train/callback/_callback.py +29 -4
- mindspore/train/callback/_checkpoint.py +23 -8
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_landscape.py +4 -4
- mindspore/train/callback/_loss_monitor.py +2 -2
- mindspore/train/callback/_on_request_exit.py +2 -2
- mindspore/train/callback/_reduce_lr_on_plateau.py +3 -4
- mindspore/train/callback/_summary_collector.py +15 -8
- mindspore/train/callback/_time_monitor.py +58 -5
- mindspore/train/data_sink.py +5 -11
- mindspore/train/dataset_helper.py +84 -57
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/__init__.py +3 -3
- mindspore/train/metrics/cosine_similarity.py +1 -1
- mindspore/train/metrics/hausdorff_distance.py +3 -2
- mindspore/train/metrics/mean_surface_distance.py +3 -2
- mindspore/train/metrics/metric.py +39 -19
- mindspore/train/metrics/roc.py +2 -2
- mindspore/train/metrics/root_mean_square_surface_distance.py +4 -3
- mindspore/train/mind_ir_pb2.py +85 -36
- mindspore/train/model.py +187 -47
- mindspore/train/serialization.py +487 -161
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/_writer_pool.py +3 -2
- mindspore/train/summary/summary_record.py +37 -17
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/train/train_thor/dataset_helper.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.1.0.dist-info → mindspore-2.2.11.dist-info}/METADATA +7 -4
- {mindspore-2.1.0.dist-info → mindspore-2.2.11.dist-info}/RECORD +406 -463
- mindspore/_extends/graph_kernel/expander.py +0 -80
- mindspore/_extends/graph_kernel/expanders/__init__.py +0 -54
- mindspore/_extends/graph_kernel/expanders/_utils.py +0 -269
- mindspore/_extends/graph_kernel/expanders/addn.py +0 -33
- mindspore/_extends/graph_kernel/expanders/batchnorm.py +0 -152
- mindspore/_extends/graph_kernel/expanders/batchnorm_grad.py +0 -105
- mindspore/_extends/graph_kernel/expanders/clip_by_norm_no_div_sum.py +0 -33
- mindspore/_extends/graph_kernel/expanders/complex/abs.py +0 -30
- mindspore/_extends/graph_kernel/expanders/complex/add.py +0 -44
- mindspore/_extends/graph_kernel/expanders/complex/div.py +0 -62
- mindspore/_extends/graph_kernel/expanders/complex/mul.py +0 -52
- mindspore/_extends/graph_kernel/expanders/complex/real_div.py +0 -62
- mindspore/_extends/graph_kernel/expanders/complex/sub.py +0 -45
- mindspore/_extends/graph_kernel/expanders/conv2d.py +0 -200
- mindspore/_extends/graph_kernel/expanders/dropout_grad.py +0 -30
- mindspore/_extends/graph_kernel/expanders/equal_count.py +0 -50
- mindspore/_extends/graph_kernel/expanders/erfc.py +0 -35
- mindspore/_extends/graph_kernel/expanders/expand_dims.py +0 -50
- mindspore/_extends/graph_kernel/expanders/fused_adam.py +0 -44
- mindspore/_extends/graph_kernel/expanders/fused_adam_weight_decay.py +0 -47
- mindspore/_extends/graph_kernel/expanders/fused_mul_add.py +0 -28
- mindspore/_extends/graph_kernel/expanders/gelu_grad.py +0 -70
- mindspore/_extends/graph_kernel/expanders/gkdropout.py +0 -40
- mindspore/_extends/graph_kernel/expanders/identity.py +0 -25
- mindspore/_extends/graph_kernel/expanders/layernorm.py +0 -93
- mindspore/_extends/graph_kernel/expanders/layernorm_grad.py +0 -113
- mindspore/_extends/graph_kernel/expanders/logsoftmax.py +0 -46
- mindspore/_extends/graph_kernel/expanders/logsoftmax_grad.py +0 -36
- mindspore/_extends/graph_kernel/expanders/matmul.py +0 -80
- mindspore/_extends/graph_kernel/expanders/maximum_grad.py +0 -59
- mindspore/_extends/graph_kernel/expanders/minimum_grad.py +0 -80
- mindspore/_extends/graph_kernel/expanders/oneslike.py +0 -26
- mindspore/_extends/graph_kernel/expanders/reduce_mean.py +0 -43
- mindspore/_extends/graph_kernel/expanders/relu_grad.py +0 -32
- mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits_grad.py +0 -35
- mindspore/_extends/graph_kernel/expanders/sigmoid_grad.py +0 -31
- mindspore/_extends/graph_kernel/expanders/slice.py +0 -35
- mindspore/_extends/graph_kernel/expanders/softmax_cross_entropy_with_logits.py +0 -42
- mindspore/_extends/graph_kernel/expanders/softmax_grad_ext.py +0 -41
- mindspore/_extends/graph_kernel/expanders/softsign.py +0 -28
- mindspore/_extends/graph_kernel/expanders/sqrt_grad.py +0 -29
- mindspore/_extends/graph_kernel/expanders/square_sum_all.py +0 -44
- mindspore/_extends/graph_kernel/expanders/square_sum_v1.py +0 -37
- mindspore/_extends/graph_kernel/expanders/squared_difference.py +0 -43
- mindspore/_extends/graph_kernel/expanders/tanh_grad.py +0 -31
- mindspore/_extends/graph_kernel/model/op_infer.py +0 -506
- mindspore/dataset/datapreprocess/__init__.py +0 -20
- mindspore/dataset/datapreprocess/preprocess_imagenet_validate_dataset.py +0 -54
- mindspore/include/api/net.h +0 -142
- mindspore/nn/lr_scheduler.py +0 -262
- mindspore/ops/_grad_experimental/grad_image_ops.py +0 -248
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +0 -181
- mindspore/ops/_grad_experimental/grad_other_ops.py +0 -72
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +0 -112
- mindspore/ops/_grad_experimental/grad_sequence_ops.py +0 -351
- mindspore/ops/_op_impl/_custom_op/flash_attention/__init__.py +0 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/attention.py +0 -350
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_bwd.py +0 -409
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_fwd.py +0 -578
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_impl.py +0 -199
- mindspore/ops/_op_impl/_custom_op/flash_attention/tik_ops_utils.py +0 -446
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/__init__.py +0 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/sparse_tiling.py +0 -45
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/strategy.py +0 -67
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/wukong_tiling.py +0 -62
- mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Depend_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +0 -138
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Load_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Switch_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TransShape_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Unique_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Unstack_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/generate_mindir.py +0 -114
- mindspore/rewrite/node_visitor.py +0 -44
- {mindspore-2.1.0.dist-info → mindspore-2.2.11.dist-info}/WHEEL +0 -0
- {mindspore-2.1.0.dist-info → mindspore-2.2.11.dist-info}/entry_points.txt +0 -0
- {mindspore-2.1.0.dist-info → mindspore-2.2.11.dist-info}/top_level.txt +0 -0
mindspore/context.py
CHANGED
|
@@ -268,6 +268,8 @@ class _Context:
|
|
|
268
268
|
"allow_mix_precision_fp16" and "allow_mix_precision_bf16".
|
|
269
269
|
- jit_compile (bool): ``False`` and ``True``.
|
|
270
270
|
- atomic_clean_policy (int): ``0`` and ``1``. Default: ``1`` .
|
|
271
|
+
- exception_dump (str): Enable exception dump for Ascend operators. ``"0"`` , ``"1"`` and ``"2"``.
|
|
272
|
+
Default: ``"2"`` .
|
|
271
273
|
- op_precision_mode (str): config file path.
|
|
272
274
|
- parallel_speed_up_json_path(Union[str, None]): The path to the parallel speed up json file.
|
|
273
275
|
If its value is None or '', it does not take effect. Default None.
|
|
@@ -280,6 +282,7 @@ class _Context:
|
|
|
280
282
|
'atomic_clean_policy': [0, 1],
|
|
281
283
|
'matmul_allow_hf32': [True, False],
|
|
282
284
|
'conv_allow_hf32': [True, False],
|
|
285
|
+
'exception_dump': ["0", "1", "2"],
|
|
283
286
|
'op_precision_mode': (str,),
|
|
284
287
|
'parallel_speed_up_json_path': (str, None)
|
|
285
288
|
}
|
|
@@ -289,6 +292,7 @@ class _Context:
|
|
|
289
292
|
'atomic_clean_policy': self._get_ascend_config_setter('atomic_clean_policy', str),
|
|
290
293
|
'matmul_allow_hf32': self._get_ascend_config_setter('matmul_allow_hf32', lambda v: "1" if v else "0"),
|
|
291
294
|
'conv_allow_hf32': self._get_ascend_config_setter('conv_allow_hf32', lambda v: "1" if v else "0"),
|
|
295
|
+
'exception_dump': self._get_ascend_config_setter('exception_dump'),
|
|
292
296
|
'op_precision_mode': self._set_op_precision_mode,
|
|
293
297
|
'parallel_speed_up_json_path': self._set_speedup_config_path
|
|
294
298
|
}
|
|
@@ -302,8 +306,8 @@ class _Context:
|
|
|
302
306
|
raise ValueError(f"For 'ascend_config', the value of argument {ascend_key} must be one of "
|
|
303
307
|
f"{supported_modes}, but got {ascend_value}.")
|
|
304
308
|
if isinstance(supported_modes, tuple) and not isinstance(ascend_value, supported_modes):
|
|
305
|
-
raise
|
|
306
|
-
|
|
309
|
+
raise TypeError(f"For 'ascend_config', the type of argument {ascend_key} must be one of "
|
|
310
|
+
f"{supported_modes}, but got {type(ascend_value)}.")
|
|
307
311
|
cfg_setter = ascend_cfg_setters.get(ascend_key)
|
|
308
312
|
cfg_setter(ascend_value)
|
|
309
313
|
|
|
@@ -317,6 +321,8 @@ class _Context:
|
|
|
317
321
|
- conv_fprop_algo (str): "normal", "performance" or user specifies conv forward algorithm directly.
|
|
318
322
|
- conv_dgrad_algo (str): "normal", "performance" or user specifies conv data grad algorithm directly.
|
|
319
323
|
- conv_wgrad_algo (str): "normal", "performance" or user specifies conv weight grad algorithm directly.
|
|
324
|
+
- conv_allow_tf32 (bool): ``False`` and ``True``.
|
|
325
|
+
- matmul_allow_tf32 (bool): ``False`` and ``True``.
|
|
320
326
|
"""
|
|
321
327
|
|
|
322
328
|
gpu_cfgs = {'conv_fprop_algo': ["normal", "performance", "implicit_gemm", "precomp_gemm", "gemm", "direct",
|
|
@@ -324,7 +330,9 @@ class _Context:
|
|
|
324
330
|
'conv_dgrad_algo': ["normal", "performance", "algo_0", "algo_1", "fft", "fft_tiling", "winograd",
|
|
325
331
|
"winograd_nonfused"],
|
|
326
332
|
'conv_wgrad_algo': ["normal", "performance", "algo_0", "algo_1", "fft", "algo_3", "fft_tiling",
|
|
327
|
-
"winograd_nonfused"]
|
|
333
|
+
"winograd_nonfused"],
|
|
334
|
+
'conv_allow_tf32': [True, False],
|
|
335
|
+
'matmul_allow_tf32': [True, False]}
|
|
328
336
|
for gpu_key in gpu_config:
|
|
329
337
|
if gpu_key not in gpu_cfgs:
|
|
330
338
|
raise ValueError(f"For 'context.set_context', the key of argument 'gpu_config' must be one of "
|
|
@@ -339,6 +347,10 @@ class _Context:
|
|
|
339
347
|
self.set_param(ms_ctx_param.conv_dgrad_algo, gpu_config[gpu_key])
|
|
340
348
|
if gpu_key == 'conv_wgrad_algo':
|
|
341
349
|
self.set_param(ms_ctx_param.conv_wgrad_algo, gpu_config[gpu_key])
|
|
350
|
+
if gpu_key == 'conv_allow_tf32':
|
|
351
|
+
self.set_param(ms_ctx_param.conv_allow_tf32, gpu_config[gpu_key])
|
|
352
|
+
if gpu_key == 'matmul_allow_tf32':
|
|
353
|
+
self.set_param(ms_ctx_param.matmul_allow_tf32, gpu_config[gpu_key])
|
|
342
354
|
|
|
343
355
|
def set_backend_policy(self, policy):
|
|
344
356
|
success = self._context_handle.set_backend_policy(policy)
|
|
@@ -388,6 +400,29 @@ class _Context:
|
|
|
388
400
|
raise ValueError(f"For 'context.set_context', the argument 'aoe_tune_mode' must be in "
|
|
389
401
|
f"['online', 'offline'], but got {tune_mode}.")
|
|
390
402
|
|
|
403
|
+
def set_aoe_config(self, aoe_config):
|
|
404
|
+
"""
|
|
405
|
+
Enable aoe config.
|
|
406
|
+
|
|
407
|
+
Args:
|
|
408
|
+
aoe_config (dict):
|
|
409
|
+
- job_type (str): ``"1"``, ``"2"``. Default: ``"2"`` .
|
|
410
|
+
- ``"1"``: subgraph tuning.
|
|
411
|
+
- ``"2"``: operator tuning.
|
|
412
|
+
"""
|
|
413
|
+
|
|
414
|
+
aoe_cfgs = {'job_type': ["1", "2"]}
|
|
415
|
+
for aoe_config_key in aoe_config:
|
|
416
|
+
if aoe_config_key not in aoe_cfgs:
|
|
417
|
+
raise ValueError(f"For 'context.set_context', the key of argument 'aoe_config' must be one of "
|
|
418
|
+
f"{aoe_cfgs}, but got {aoe_config_key}.")
|
|
419
|
+
supported_value = aoe_cfgs.get(aoe_config_key)
|
|
420
|
+
if aoe_config[aoe_config_key] not in supported_value:
|
|
421
|
+
raise ValueError(f"For 'aoe_config', the value of argument {aoe_config_key} must be one of "
|
|
422
|
+
f"{supported_value}, but got {aoe_config[aoe_config_key]}.")
|
|
423
|
+
if aoe_config_key == 'job_type':
|
|
424
|
+
self.set_param(ms_ctx_param.aoe_job_type, aoe_config[aoe_config_key])
|
|
425
|
+
|
|
391
426
|
def set_device_id(self, device_id):
|
|
392
427
|
if device_id < 0 or device_id > 4095:
|
|
393
428
|
raise ValueError(f"For 'context.set_context', the argument 'device_id' must be in range [0, 4095], "
|
|
@@ -484,7 +519,7 @@ class _Context:
|
|
|
484
519
|
except (TypeError, ValueError) as exo:
|
|
485
520
|
raise ValueError(str(exo) + "\nFor 'context.set_context', open or load the 'env_config_path' file {} "
|
|
486
521
|
"failed, please check whether 'env_config_path' is json file and correct, "
|
|
487
|
-
"or may not have permission to read it.".format(env_config_path))
|
|
522
|
+
"or may not have permission to read it.".format(env_config_path)) from exo
|
|
488
523
|
self.set_param(ms_ctx_param.env_config_path, env_config_path)
|
|
489
524
|
|
|
490
525
|
def set_runtime_num_threads(self, runtime_num_threads):
|
|
@@ -527,6 +562,7 @@ class _Context:
|
|
|
527
562
|
'ascend_config': set_ascend_config,
|
|
528
563
|
'jit_syntax_level': set_jit_syntax_level,
|
|
529
564
|
'gpu_config': set_gpu_config,
|
|
565
|
+
'aoe_config': set_aoe_config,
|
|
530
566
|
}
|
|
531
567
|
|
|
532
568
|
@property
|
|
@@ -595,7 +631,9 @@ class _Context:
|
|
|
595
631
|
valid_option = {"recompute_comm_overlap": ms_ctx_param.recompute_comm_overlap,
|
|
596
632
|
"matmul_grad_comm_overlap": ms_ctx_param.matmul_grad_comm_overlap,
|
|
597
633
|
"enable_task_opt": ms_ctx_param.enable_task_opt,
|
|
634
|
+
"enable_grad_comm_opt": ms_ctx_param.enable_grad_comm_opt,
|
|
598
635
|
"interleaved_matmul_comm": ms_ctx_param.interleaved_matmul_comm,
|
|
636
|
+
"enable_opt_shard_comm_opt": ms_ctx_param.enable_opt_shard_comm_opt,
|
|
599
637
|
"interleaved_layernorm_comm": ms_ctx_param.interleaved_layernorm_comm}
|
|
600
638
|
with open(speedup_config_real_path, 'r') as f:
|
|
601
639
|
speedup_config = json.load(f)
|
|
@@ -611,7 +649,8 @@ class _Context:
|
|
|
611
649
|
raise ValueError(str(exo) + "\nFor 'context.set_context', "
|
|
612
650
|
"open or load the 'speedup_config_path' file {} "
|
|
613
651
|
"failed, please check whether 'speedup_config_path' is json file and correct, "
|
|
614
|
-
"or may not have permission to read it.".format(speedup_config_real_path))
|
|
652
|
+
"or may not have permission to read it.".format(speedup_config_real_path)) \
|
|
653
|
+
from exo
|
|
615
654
|
|
|
616
655
|
|
|
617
656
|
def _context():
|
|
@@ -641,8 +680,9 @@ def _context():
|
|
|
641
680
|
@args_type_check(device_num=int, global_rank=int, gradients_mean=bool, gradient_fp32_sync=bool, parallel_mode=str,
|
|
642
681
|
auto_parallel_search_mode=str, search_mode=str, parameter_broadcast=bool, strategy_ckpt_load_file=str,
|
|
643
682
|
strategy_ckpt_save_file=str, full_batch=bool, enable_parallel_optimizer=bool, enable_alltoall=bool,
|
|
644
|
-
all_reduce_fusion_config=list, pipeline_stages=int,
|
|
645
|
-
parallel_optimizer_config=dict,
|
|
683
|
+
all_reduce_fusion_config=list, pipeline_stages=int, pipeline_segments=int,
|
|
684
|
+
parallel_optimizer_config=dict,
|
|
685
|
+
comm_fusion=dict, strategy_ckpt_config=dict)
|
|
646
686
|
def set_auto_parallel_context(**kwargs):
|
|
647
687
|
r"""
|
|
648
688
|
Set auto parallel context, only data parallel supported on CPU.
|
|
@@ -663,11 +703,11 @@ def set_auto_parallel_context(**kwargs):
|
|
|
663
703
|
device_num gradient_fp32_sync
|
|
664
704
|
global_rank loss_repeated_mean
|
|
665
705
|
gradients_mean search_mode
|
|
666
|
-
parallel_mode
|
|
667
|
-
all_reduce_fusion_config
|
|
668
|
-
enable_parallel_optimizer
|
|
669
|
-
parallel_optimizer_config
|
|
670
|
-
enable_alltoall
|
|
706
|
+
parallel_mode parameter_broadcast
|
|
707
|
+
all_reduce_fusion_config strategy_ckpt_load_file
|
|
708
|
+
enable_parallel_optimizer strategy_ckpt_save_file
|
|
709
|
+
parallel_optimizer_config dataset_strategy
|
|
710
|
+
enable_alltoall pipeline_stages
|
|
671
711
|
\ auto_parallel_search_mode
|
|
672
712
|
\ comm_fusion
|
|
673
713
|
\ strategy_ckpt_config
|
|
@@ -694,7 +734,7 @@ def set_auto_parallel_context(**kwargs):
|
|
|
694
734
|
|
|
695
735
|
- auto_parallel: Achieving parallelism automatically.
|
|
696
736
|
search_mode (str): There are three kinds of shard strategy search modes: ``"recursive_programming"`` ,
|
|
697
|
-
``"dynamic_programming"`` and ``"sharding_propagation"`` . Default: ``"
|
|
737
|
+
``"dynamic_programming"`` and ``"sharding_propagation"`` . Default: ``"recursive_programming"`` .
|
|
698
738
|
|
|
699
739
|
- recursive_programming: Recursive programming search mode. In order to obtain optimal performance,
|
|
700
740
|
it is recommended that users set the batch size to be greater than or equal to the product of
|
|
@@ -737,12 +777,9 @@ def set_auto_parallel_context(**kwargs):
|
|
|
737
777
|
distributed alone in the pipeline. The total devices will be divided into 'pipeline_stags'
|
|
738
778
|
stages.
|
|
739
779
|
Default: ``1`` .
|
|
740
|
-
grad_accumulation_step (int): Set the accumulation steps of gradients in auto and semi auto parallel mode.
|
|
741
|
-
This should be a positive int. Default: ``1`` .
|
|
742
780
|
parallel_optimizer_config (dict): A dict contains the keys and values for setting the parallel optimizer
|
|
743
781
|
configure. The configure provides more detailed behavior control about parallel training
|
|
744
|
-
when parallel optimizer is enabled.
|
|
745
|
-
The configure will be effective when we use
|
|
782
|
+
when parallel optimizer is enabled. The configure will be effective when we use
|
|
746
783
|
mindspore.set_auto_parallel_context(enable_parallel_optimizer=True).
|
|
747
784
|
It supports the following keys.
|
|
748
785
|
|
|
@@ -760,6 +797,14 @@ def set_auto_parallel_context(**kwargs):
|
|
|
760
797
|
across the devices. Parameter size = shape[0] \* ... \* shape[n] \* size(dtype). Non-negative.
|
|
761
798
|
Unit: KB. Default: ``64`` .
|
|
762
799
|
|
|
800
|
+
- optimizer_weight_shard_size(int): Set the optimizer weight shard group size, if you want to
|
|
801
|
+
specific the maximum group size across devices when the parallel optimizer is enabled.
|
|
802
|
+
The numerical range can be (0, device_num]. If pipeline parallel is enabled, the numerical
|
|
803
|
+
range is (0, device_num/stage]. If the size of data parallel communication domain
|
|
804
|
+
of the parameter cannot be divided by `optimizer_weight_shard_size`, then the specified
|
|
805
|
+
communication group size will not take effect. Default value is ``-1`` , which means the
|
|
806
|
+
optimizer weight shard group size will be the size of data parallel group of each parameter.
|
|
807
|
+
|
|
763
808
|
comm_fusion (dict): A dict contains the types and configurations for setting the communication fusion. each
|
|
764
809
|
communication fusion config has two keys: "mode" and "config".
|
|
765
810
|
It supports following communication fusion types and configurations:
|
|
@@ -820,7 +865,8 @@ def set_auto_parallel_context(**kwargs):
|
|
|
820
865
|
>>> ms.set_auto_parallel_context(enable_alltoall=False)
|
|
821
866
|
>>> ms.set_auto_parallel_context(all_reduce_fusion_config=[8, 160])
|
|
822
867
|
>>> ms.set_auto_parallel_context(pipeline_stages=2)
|
|
823
|
-
>>> parallel_config = {"gradient_accumulation_shard": True, "parallel_optimizer_threshold": 24
|
|
868
|
+
>>> parallel_config = {"gradient_accumulation_shard": True, "parallel_optimizer_threshold": 24,
|
|
869
|
+
... "optimizer_weight_shard_size": 2}
|
|
824
870
|
>>> ms.set_auto_parallel_context(parallel_optimizer_config=parallel_config, enable_parallel_optimizer=True)
|
|
825
871
|
>>> config = {"allreduce": {"mode": "size", "config": 32}, "allgather": {"mode": "size", "config": 32}}
|
|
826
872
|
>>> ms.set_auto_parallel_context(comm_fusion=config)
|
|
@@ -860,8 +906,8 @@ def reset_auto_parallel_context():
|
|
|
860
906
|
- gradients_mean: False.
|
|
861
907
|
- gradient_fp32_sync: True.
|
|
862
908
|
- parallel_mode: 'stand_alone'.
|
|
863
|
-
- search_mode: '
|
|
864
|
-
- auto_parallel_search_mode: '
|
|
909
|
+
- search_mode: 'recursive_programming'.
|
|
910
|
+
- auto_parallel_search_mode: 'recursive_programming'.
|
|
865
911
|
- parameter_broadcast: False.
|
|
866
912
|
- strategy_ckpt_load_file: ''.
|
|
867
913
|
- strategy_ckpt_save_file: ''.
|
|
@@ -881,24 +927,31 @@ def reset_auto_parallel_context():
|
|
|
881
927
|
@args_type_check(offload_config=dict)
|
|
882
928
|
def set_offload_context(offload_config):
|
|
883
929
|
r"""
|
|
884
|
-
|
|
885
|
-
|
|
930
|
+
Configure heterogeneous training detailed parameters to adjust the offload strategy.
|
|
931
|
+
|
|
932
|
+
Note:
|
|
933
|
+
The offload configuration is only used if the memory offload feature is enabled
|
|
934
|
+
via mindspore.set_context(memory_offload="ON").
|
|
886
935
|
|
|
887
936
|
Args:
|
|
888
937
|
offload_config (dict): A dict contains the keys and values for setting the offload context
|
|
889
938
|
configure.It supports the following keys.
|
|
890
939
|
|
|
891
|
-
-
|
|
892
|
-
- offload_path (str): The path of offload.
|
|
940
|
+
- offload_path (str): The path of offload, relative paths are supported. Default: ``"./offload"``.
|
|
893
941
|
- offload_cpu_size (str): The cpu memory size for offload. The format is "xxGB".
|
|
894
942
|
- offload_disk_size (str): The disk size for offload. The format is "xxGB"
|
|
895
|
-
- hbm_ratio (float): The ratio that can be used based on the maximum device memory.
|
|
896
|
-
|
|
943
|
+
- hbm_ratio (float): The ratio that can be used based on the maximum device memory.
|
|
944
|
+
The range is (0,1], Default: ``1.0``.
|
|
945
|
+
- cpu_ratio (float): The ratio that can be used based on the maximum host memory.
|
|
946
|
+
The range is (0,1], Default: ``1.0``.
|
|
947
|
+
- enable_pinned_mem (bool): The flag of whether enabling Pinned Memory. Default: ``True``.
|
|
897
948
|
- enable_aio (bool): The flag of whether enabling aio. Default: ``True``.
|
|
898
|
-
- aio_block_size (str): The size of aio block. The format is "xxGB"
|
|
949
|
+
- aio_block_size (str): The size of aio block. The format is "xxGB".
|
|
899
950
|
- aio_queue_depth (int): The depth of aio queue.
|
|
900
|
-
-
|
|
901
|
-
-
|
|
951
|
+
- offload_param (str): The param for offload destination, cpu or disk, Default: ``""``.
|
|
952
|
+
- offload_checkpoint (str): The checkpoint for offload destination, only valid if recompute is turned on,
|
|
953
|
+
cpu or disk, Default: ``""``.
|
|
954
|
+
- auto_offload (bool): The flag of whether auto offload. Default: ``True``.
|
|
902
955
|
- host_mem_block_size (str): The memory block size of host memory pool. The format is "xxGB"
|
|
903
956
|
|
|
904
957
|
Raises:
|
|
@@ -906,14 +959,19 @@ def set_offload_context(offload_config):
|
|
|
906
959
|
|
|
907
960
|
Examples:
|
|
908
961
|
>>> from mindspore import context
|
|
909
|
-
>>> context.set_offload_context(offload_config={"offload_param"
|
|
962
|
+
>>> context.set_offload_context(offload_config={"offload_param":"cpu"})
|
|
910
963
|
"""
|
|
911
964
|
_set_offload_context(offload_config)
|
|
912
965
|
|
|
913
966
|
|
|
914
967
|
def get_offload_context():
|
|
915
968
|
"""
|
|
916
|
-
|
|
969
|
+
Gets the offload configuration parameters. Configure through interface mindspore.set_offload_context().
|
|
970
|
+
If the user is not set, the default configuration is obtained.
|
|
971
|
+
|
|
972
|
+
Returns:
|
|
973
|
+
Dict, heterogeneous training offload detailed configuration parameters.
|
|
974
|
+
|
|
917
975
|
Examples:
|
|
918
976
|
>>> from mindspore import context
|
|
919
977
|
>>> offload_config = context.get_offload_context()
|
|
@@ -948,7 +1006,7 @@ def _check_target_specific_cfgs(device, arg_key):
|
|
|
948
1006
|
|
|
949
1007
|
|
|
950
1008
|
@args_type_check(mode=int, precompile_only=bool, device_target=str, device_id=int, save_graphs=(bool, int),
|
|
951
|
-
save_graphs_path=str, enable_dump=bool, aoe_tune_mode=str,
|
|
1009
|
+
save_graphs_path=str, enable_dump=bool, aoe_tune_mode=str, aoe_config=dict,
|
|
952
1010
|
save_dump_path=str, enable_reduce_precision=bool, variable_memory_max_size=str,
|
|
953
1011
|
enable_auto_mixed_precision=bool, inter_op_parallel_num=int,
|
|
954
1012
|
enable_graph_kernel=bool, reserve_class_name_in_scope=bool, check_bprop=bool,
|
|
@@ -1004,7 +1062,7 @@ def set_context(**kwargs):
|
|
|
1004
1062
|
| +------------------------------+----------------------------+
|
|
1005
1063
|
| | reserve_class_name_in_scope | CPU/GPU/Ascend |
|
|
1006
1064
|
| +------------------------------+----------------------------+
|
|
1007
|
-
| | pynative_synchronize | GPU/Ascend
|
|
1065
|
+
| | pynative_synchronize | CPU/GPU/Ascend |
|
|
1008
1066
|
+-------------------------+------------------------------+----------------------------+
|
|
1009
1067
|
| Executive Control | mode | CPU/GPU/Ascend |
|
|
1010
1068
|
| +------------------------------+----------------------------+
|
|
@@ -1014,6 +1072,10 @@ def set_context(**kwargs):
|
|
|
1014
1072
|
| +------------------------------+----------------------------+
|
|
1015
1073
|
| | enable_reduce_precision | Ascend |
|
|
1016
1074
|
| +------------------------------+----------------------------+
|
|
1075
|
+
| | aoe_tune_mode | Ascend |
|
|
1076
|
+
| +------------------------------+----------------------------+
|
|
1077
|
+
| | aoe_config | Ascend |
|
|
1078
|
+
| +------------------------------+----------------------------+
|
|
1017
1079
|
| | check_bprop | CPU/GPU/Ascend |
|
|
1018
1080
|
| +------------------------------+----------------------------+
|
|
1019
1081
|
| | max_call_depth | CPU/GPU/Ascend |
|
|
@@ -1050,7 +1112,7 @@ def set_context(**kwargs):
|
|
|
1050
1112
|
If device target is not set, the version of MindSpore package is used.
|
|
1051
1113
|
max_device_memory (str): Set the maximum memory available for devices. The format is "xxGB".
|
|
1052
1114
|
Default: ``" 1024GB"`` . The actual used memory size is the minimum of the available memory of the device
|
|
1053
|
-
and max_device_memory.
|
|
1115
|
+
and max_device_memory. 'max_device_memory' should be set before the program runs.
|
|
1054
1116
|
variable_memory_max_size (str): This parameter is deprecated, and will be removed in a future version.
|
|
1055
1117
|
Please use parameter 'max_device_memory' instead.
|
|
1056
1118
|
mempool_block_size (str): Set the size of the memory pool block in PyNative mode for devices.
|
|
@@ -1063,7 +1125,7 @@ def set_context(**kwargs):
|
|
|
1063
1125
|
Available values are:
|
|
1064
1126
|
|
|
1065
1127
|
- False or 0: disable saving of intermediate compilation graphs.
|
|
1066
|
-
- 1: some intermediate files will be generated during graph
|
|
1128
|
+
- 1: some intermediate files will be generated during graph compilation.
|
|
1067
1129
|
- True or 2: Generate more ir files related to backend process.
|
|
1068
1130
|
- 3: Generate visualization computing graphs and detailed frontend ir graphs.
|
|
1069
1131
|
|
|
@@ -1134,11 +1196,17 @@ def set_context(**kwargs):
|
|
|
1134
1196
|
If enable_graph_kernel is set to ``True`` , acceleration can be enabled.
|
|
1135
1197
|
For details of graph kernel fusion, please check
|
|
1136
1198
|
`Enabling Graph Kernel Fusion
|
|
1137
|
-
<https://www.mindspore.cn/tutorials/experts/en/r2.
|
|
1199
|
+
<https://www.mindspore.cn/tutorials/experts/en/r2.2/optimize/graph_fusion_engine.html>`_.
|
|
1138
1200
|
graph_kernel_flags (str):
|
|
1139
1201
|
Optimization options of graph kernel fusion, and the priority is higher when it conflicts
|
|
1140
1202
|
with enable_graph_kernel. Only for experienced users.
|
|
1141
|
-
For example,
|
|
1203
|
+
For example,
|
|
1204
|
+
|
|
1205
|
+
.. code-block::
|
|
1206
|
+
|
|
1207
|
+
mindspore.set_context(graph_kernel_flags="--opt_level=2 --dump_as_text")
|
|
1208
|
+
|
|
1209
|
+
Some general options:
|
|
1142
1210
|
|
|
1143
1211
|
- opt_level: Set the optimization level.
|
|
1144
1212
|
Default: ``2`` . Graph kernel fusion can be enabled equivalently by setting opt_level greater than 0.
|
|
@@ -1154,10 +1222,19 @@ def set_context(**kwargs):
|
|
|
1154
1222
|
|
|
1155
1223
|
- dump_as_text: dumps detail info as text files. Default: ``False`` .
|
|
1156
1224
|
|
|
1157
|
-
More options can refer to the implementation code.
|
|
1158
1225
|
enable_reduce_precision (bool): Whether to enable precision reduction.
|
|
1159
1226
|
If the operator does not support the user-specified precision, the precision will
|
|
1160
1227
|
be changed automatically. Default: ``True`` .
|
|
1228
|
+
aoe_tune_mode (str): AOE tuning mode setting, which is not set by default.
|
|
1229
|
+
When set to ``"online"`` , the tuning in online function is turned on.
|
|
1230
|
+
When set to ``"offline"`` , ge graph will be save for offline tuning.
|
|
1231
|
+
aoe_config (dict): Set the parameters specific to Ascend Optimization Engine. It is not set by default.
|
|
1232
|
+
|
|
1233
|
+
- job_type (str): Mode type setting, default value is ``"2"``.
|
|
1234
|
+
|
|
1235
|
+
- ``"1"``: subgraph tuning;
|
|
1236
|
+
- ``"2"``: operator tuning.
|
|
1237
|
+
|
|
1161
1238
|
check_bprop (bool): Whether to check back propagation nodes. The checking ensures that the shape and dtype
|
|
1162
1239
|
of back propagation node outputs is the same as input parameters. Default: ``False`` .
|
|
1163
1240
|
max_call_depth (int): Specify the maximum depth of function call. Must be positive integer. Default: ``1000`` .
|
|
@@ -1205,12 +1282,10 @@ def set_context(**kwargs):
|
|
|
1205
1282
|
memory_optimize_level is set 'O1'.
|
|
1206
1283
|
- OFF: Turn off the memory Offload function.
|
|
1207
1284
|
ascend_config (dict): Set the parameters specific to Ascend hardware platform. It is not set by default.
|
|
1208
|
-
|
|
1209
|
-
hardware platform. The default value of `precision_mode`, `jit_compile` and
|
|
1285
|
+
The default value of `precision_mode`, `jit_compile` and
|
|
1210
1286
|
`atomic_clean_policy` are experimental parameters, may change in the future.
|
|
1211
1287
|
|
|
1212
|
-
- precision_mode (str): Mixed precision mode setting,
|
|
1213
|
-
value of training network is based on the value of CANN, and the default value of inference network
|
|
1288
|
+
- precision_mode (str): Mixed precision mode setting, and the default value of inference network
|
|
1214
1289
|
is ``force_fp16`` . The value range is as follows:
|
|
1215
1290
|
|
|
1216
1291
|
- force_fp16: When the operator supports both float16 and float32, select float16 directly.
|
|
@@ -1238,35 +1313,44 @@ def set_context(**kwargs):
|
|
|
1238
1313
|
When the memory of the network exceeds the limit, you may try this cleaning policy, but it may cause
|
|
1239
1314
|
performance loss.
|
|
1240
1315
|
- matmul_allow_hf32 (bool): Whether to convert FP32 to HF32 for Matmul operators. Default value: ``False``.
|
|
1316
|
+
This is an experimental prototype that is subject to change and/or deletion.
|
|
1241
1317
|
For detailed information, please refer to `Ascend community <https://www.hiascend.com/>`_ .
|
|
1242
1318
|
- conv_allow_hf32 (bool): Whether to convert FP32 to HF32 for Conv operators. Default value: ``True``.
|
|
1319
|
+
This is an experimental prototype that is subject to change and/or deletion.
|
|
1243
1320
|
For detailed information, please refer to `Ascend community <https://www.hiascend.com/>`_ .
|
|
1321
|
+
- exception_dump (str): Enable exception dump for Ascend operators, providing the input and output data for
|
|
1322
|
+
failing Ascend operators. The value can be ``"0"`` , ``"1"`` and ``"2"``. For ``"0"`` , exception dump is
|
|
1323
|
+
turned off; for ``"1"``, all inputs and outputs will be dumped for AICore and AICPU exception operators;
|
|
1324
|
+
for ``"2"``, inputs will be dumped for AICore exception operators. Default: ``"2"`` .
|
|
1244
1325
|
- op_precision_mode (str): Path to config file of op precision mode. For detailed information, please refer
|
|
1245
1326
|
to `Ascend community <https://www.hiascend.com/>`_ .
|
|
1246
1327
|
- parallel_speed_up_json_path(Union[str, None]): The path to the parallel speed up json file, configuration
|
|
1247
1328
|
can refer to `parallel_speed_up.json
|
|
1248
|
-
<https://gitee.com/mindspore/mindspore/blob/r2.
|
|
1329
|
+
<https://gitee.com/mindspore/mindspore/blob/r2.2/config/parallel_speed_up.json>`_ .
|
|
1249
1330
|
If its value is None or '', it does not take effect. Default None.
|
|
1250
1331
|
|
|
1251
1332
|
- recompute_comm_overlap (bool): Enable overlap between recompute ops and communication ops if True.
|
|
1252
1333
|
Default: False.
|
|
1253
1334
|
- matmul_grad_comm_overlap (bool): Enable overlap between grad ops and communication ops if True.
|
|
1254
1335
|
Default: False.
|
|
1255
|
-
- enable_task_opt (bool): Enable the
|
|
1336
|
+
- enable_task_opt (bool): Enable the optimization of the number of tasks for each communication if True.
|
|
1256
1337
|
Default: False.
|
|
1257
1338
|
- interleaved_matmul_comm (bool): Enable interleaved optimization of Matmul-Comm if True. Default: False.
|
|
1258
1339
|
- interleaved_layernorm_comm (bool): Enable interleaved optimization of LayerNorm-Comm if True.
|
|
1259
1340
|
Default: False.
|
|
1341
|
+
|
|
1260
1342
|
jit_syntax_level (int): Set JIT syntax level for graph compiling, triggered by GRAPH_MODE and @jit decorator.
|
|
1261
|
-
The value must be
|
|
1262
|
-
|
|
1343
|
+
The value must be ``STRICT`` or ``LAX`` . Default: ``LAX`` . All levels support all backends.
|
|
1344
|
+
|
|
1345
|
+
- ``STRICT`` : Only basic syntax is supported, and execution performance is optimal. Can be used for MindIR
|
|
1346
|
+
load and export.
|
|
1347
|
+
- ``LAX`` : Compatible with all Python syntax as much as possible. However, execution performance may be
|
|
1348
|
+
affected and not optimal. Cannot be used for MindIR load and export due to some syntax that may not be
|
|
1349
|
+
able to be exported.
|
|
1263
1350
|
|
|
1264
|
-
- STRICT: Only basic syntax is supported, and execution performance is optimal.
|
|
1265
|
-
- LAX: Compatible with all Python syntax as much as possible. However, execution performance may be
|
|
1266
|
-
affected and not optimal.
|
|
1267
1351
|
gpu_config (dict): Set the parameters specific to gpu hardware platform. It is not set by default.
|
|
1268
|
-
Currently, only setting `conv_fprop_algo` and `conv_dgrad_algo` and `conv_wgrad_algo`
|
|
1269
|
-
hardware platform.
|
|
1352
|
+
Currently, only setting `conv_fprop_algo` and `conv_dgrad_algo` and `conv_wgrad_algo` and `conv_allow_tf32`
|
|
1353
|
+
and `matmul_allow_tf32` are supported on GPU hardware platform.
|
|
1270
1354
|
|
|
1271
1355
|
- conv_fprop_algo (str): Specifies convolution forward algorithm and the default value is 'normal',
|
|
1272
1356
|
The value range is as follows:
|
|
@@ -1330,6 +1414,10 @@ def set_context(**kwargs):
|
|
|
1330
1414
|
- fft_tiling: This algorithm uses the Fast-Fourier Transform approach but splits the inputs into tiles.
|
|
1331
1415
|
A significant memory workspace is needed to store intermediate results but less than fft for large size
|
|
1332
1416
|
images. The results are deterministic.
|
|
1417
|
+
- conv_allow_tf32 (bool): The flag below controls to allow Tensor core TF32 computation on CUDNN and the
|
|
1418
|
+
default value is ``True``.
|
|
1419
|
+
- matmul_allow_tf32 (bool): The flag below controls to allow Tensor core TF32 computation on CUBLAS and the
|
|
1420
|
+
default value is ``False``.
|
|
1333
1421
|
|
|
1334
1422
|
Raises:
|
|
1335
1423
|
ValueError: If input key is not an attribute in context.
|
|
@@ -1346,6 +1434,8 @@ def set_context(**kwargs):
|
|
|
1346
1434
|
>>> ms.set_context(graph_kernel_flags="--opt_level=2 --dump_as_text")
|
|
1347
1435
|
>>> ms.set_context(reserve_class_name_in_scope=True)
|
|
1348
1436
|
>>> ms.set_context(variable_memory_max_size="6GB")
|
|
1437
|
+
>>> ms.set_context(aoe_tune_mode="online")
|
|
1438
|
+
>>> ms.set_context(aoe_config={"job_type": "2"})
|
|
1349
1439
|
>>> ms.set_context(check_bprop=True)
|
|
1350
1440
|
>>> ms.set_context(max_device_memory="3.5GB")
|
|
1351
1441
|
>>> ms.set_context(mempool_block_size="1GB")
|
|
@@ -1364,7 +1454,8 @@ def set_context(**kwargs):
|
|
|
1364
1454
|
>>> ms.set_context(ascend_config={"precision_mode": "force_fp16", "jit_compile": True,
|
|
1365
1455
|
... "atomic_clean_policy": 1, "op_precision_mode": "./op_precision_config_file"})
|
|
1366
1456
|
>>> ms.set_context(jit_syntax_level=ms.STRICT)
|
|
1367
|
-
>>> ms.set_context(gpu_config={"conv_fprop_algo": "performance"
|
|
1457
|
+
>>> ms.set_context(gpu_config={"conv_fprop_algo": "performance", "conv_allow_tf32": True,
|
|
1458
|
+
... "matmul_allow_tf32": True})
|
|
1368
1459
|
"""
|
|
1369
1460
|
ctx = _context()
|
|
1370
1461
|
# set device target first
|
|
@@ -1390,7 +1481,7 @@ def set_context(**kwargs):
|
|
|
1390
1481
|
value = 0
|
|
1391
1482
|
if value > 3:
|
|
1392
1483
|
raise ValueError(f"value for save_graphs should be 0-3 but got '{value}'")
|
|
1393
|
-
if key == 'jit_syntax_level' and value
|
|
1484
|
+
if key == 'jit_syntax_level' and value not in (STRICT, COMPATIBLE, LAX):
|
|
1394
1485
|
raise ValueError(f"For 'jit_syntax_level', the value should be context.STRICT"
|
|
1395
1486
|
f" or context.LAX, but got {value}.")
|
|
1396
1487
|
if not _check_target_specific_cfgs(device, key):
|
|
@@ -1454,16 +1545,16 @@ class ParallelMode:
|
|
|
1454
1545
|
"""
|
|
1455
1546
|
Parallel mode options.
|
|
1456
1547
|
|
|
1457
|
-
There are five kinds of parallel modes,
|
|
1458
|
-
|
|
1548
|
+
There are five kinds of parallel modes, ``STAND_ALONE``, ``DATA_PARALLEL``,
|
|
1549
|
+
``HYBRID_PARALLEL``, ``SEMI_AUTO_PARALLEL`` and ``AUTO_PARALLEL``. Default: ``STAND_ALONE``.
|
|
1459
1550
|
|
|
1460
|
-
- STAND_ALONE
|
|
1461
|
-
- DATA_PARALLEL
|
|
1462
|
-
- HYBRID_PARALLEL
|
|
1463
|
-
- SEMI_AUTO_PARALLEL
|
|
1464
|
-
- AUTO_PARALLEL
|
|
1551
|
+
- ``STAND_ALONE``: Only one processor is working.
|
|
1552
|
+
- ``DATA_PARALLEL``: Distributes the data across different processors.
|
|
1553
|
+
- ``HYBRID_PARALLEL``: Achieves data parallelism and model parallelism manually.
|
|
1554
|
+
- ``SEMI_AUTO_PARALLEL``: Achieves data parallelism and model parallelism by setting parallel strategies.
|
|
1555
|
+
- ``AUTO_PARALLEL``: Achieves parallelism automatically.
|
|
1465
1556
|
|
|
1466
|
-
MODE_LIST
|
|
1557
|
+
``MODE_LIST``: The list of all supported parallel modes.
|
|
1467
1558
|
"""
|
|
1468
1559
|
|
|
1469
1560
|
STAND_ALONE = "stand_alone"
|
mindspore/dataset/__init__.py
CHANGED
|
@@ -21,7 +21,7 @@ Besides, this module provides APIs to sample data while loading.
|
|
|
21
21
|
|
|
22
22
|
We can enable cache in most of the dataset with its key arguments 'cache'. Please notice that cache is not supported
|
|
23
23
|
on Windows platform yet. Do not use it while loading and processing data on Windows. More introductions and limitations
|
|
24
|
-
can refer `Single-Node Tensor Cache <https://www.mindspore.cn/tutorials/experts/en/r2.
|
|
24
|
+
can refer `Single-Node Tensor Cache <https://www.mindspore.cn/tutorials/experts/en/r2.2/dataset/cache.html>`_ .
|
|
25
25
|
|
|
26
26
|
Common imported modules in corresponding API examples are as follows:
|
|
27
27
|
|
|
@@ -55,11 +55,11 @@ The specific steps are as follows:
|
|
|
55
55
|
- Dataset operation: The user uses the dataset object method `.shuffle` / `.filter` / `.skip` / `.split` /
|
|
56
56
|
`.take` / ... to further shuffle, filter, skip, and obtain the maximum number of samples of datasets;
|
|
57
57
|
- Dataset sample transform operation: The user can add data transform operations
|
|
58
|
-
( `vision transform <https://mindspore.cn/docs/en/r2.
|
|
58
|
+
( `vision transform <https://mindspore.cn/docs/en/r2.2/api_python/mindspore.\
|
|
59
59
|
dataset.transforms.html#module-mindspore.dataset.vision>`_ ,
|
|
60
|
-
`NLP transform <https://mindspore.cn/docs/en/r2.
|
|
60
|
+
`NLP transform <https://mindspore.cn/docs/en/r2.2/api_python/mindspore.\
|
|
61
61
|
dataset.transforms.html#module-mindspore.dataset.text>`_ ,
|
|
62
|
-
`audio transform <https://mindspore.cn/docs/en/r2.
|
|
62
|
+
`audio transform <https://mindspore.cn/docs/en/r2.2/api_python/mindspore.\
|
|
63
63
|
dataset.transforms.html#module-mindspore.dataset.audio>`_ ) to the map
|
|
64
64
|
operation to perform transformations. During data preprocessing, multiple map operations can be defined to
|
|
65
65
|
perform different transform operations to different fields. The data transform operation can also be a
|
|
@@ -73,7 +73,7 @@ Quick start of Dataset Pipeline
|
|
|
73
73
|
-------------------------------
|
|
74
74
|
|
|
75
75
|
For a quick start of using Dataset Pipeline, download `Load & Process Data With Dataset Pipeline
|
|
76
|
-
<https://www.mindspore.cn/docs/en/r2.
|
|
76
|
+
<https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/dataset_gallery.html>`_
|
|
77
77
|
to local and run in sequence.
|
|
78
78
|
|
|
79
79
|
"""
|
|
@@ -40,10 +40,10 @@ Descriptions of common data processing terms are as follows:
|
|
|
40
40
|
The data transform operation can be executed in the data processing pipeline or in the eager mode:
|
|
41
41
|
|
|
42
42
|
- Pipeline mode is generally used to process big datasets. Examples refer to
|
|
43
|
-
`introduction to data processing pipeline <https://www.mindspore.cn/docs/en/r2.
|
|
43
|
+
`introduction to data processing pipeline <https://www.mindspore.cn/docs/en/r2.2/api_python/
|
|
44
44
|
mindspore.dataset.html#introduction-to-data-processing-pipeline>`_ .
|
|
45
45
|
- Eager mode is more like a function call to process data. Examples refer to
|
|
46
|
-
`Lightweight Data Processing <https://www.mindspore.cn/tutorials/en/r2.
|
|
46
|
+
`Lightweight Data Processing <https://www.mindspore.cn/tutorials/en/r2.2/advanced/dataset/eager.html>`_ .
|
|
47
47
|
"""
|
|
48
48
|
from __future__ import absolute_import
|
|
49
49
|
|