mindspore 2.7.0rc1__cp310-cp310-win_amd64.whl → 2.7.1__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +5 -2
- mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +2 -2
- mindspore/_extends/builtin_operations.py +3 -3
- mindspore/_extends/parallel_compile/akg_compiler/custom.py +1109 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/__init__.py +3 -3
- mindspore/_extends/parse/compile_config.py +24 -1
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +6 -3
- mindspore/_extends/parse/parser.py +28 -22
- mindspore/_extends/parse/resources.py +1 -1
- mindspore/_extends/parse/standard_method.py +23 -2
- mindspore/_extends/parse/trope.py +2 -1
- mindspore/_extends/pijit/pijit_func_white_list.py +9 -27
- mindspore/amp.py +0 -18
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/base.py +29 -2
- mindspore/common/__init__.py +18 -12
- mindspore/common/_decorator.py +3 -2
- mindspore/common/_grad_function.py +3 -1
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +371 -96
- mindspore/common/_utils.py +7 -43
- mindspore/common/api.py +434 -135
- mindspore/common/dtype.py +98 -57
- mindspore/common/dump.py +7 -108
- mindspore/common/dynamic_shape/__init__.py +0 -0
- mindspore/common/{auto_dynamic_shape.py → dynamic_shape/auto_dynamic_shape.py} +15 -23
- mindspore/common/dynamic_shape/enable_dynamic.py +197 -0
- mindspore/common/file_system.py +59 -9
- mindspore/common/hook_handle.py +82 -3
- mindspore/common/jit_config.py +5 -1
- mindspore/common/jit_trace.py +27 -12
- mindspore/common/lazy_inline.py +5 -3
- mindspore/common/np_dtype.py +3 -3
- mindspore/common/parameter.py +17 -127
- mindspore/common/recompute.py +4 -13
- mindspore/common/tensor.py +50 -217
- mindspore/communication/_comm_helper.py +11 -1
- mindspore/communication/comm_func.py +138 -4
- mindspore/communication/management.py +85 -1
- mindspore/config/op_info.config +0 -15
- mindspore/context.py +20 -106
- mindspore/dataset/__init__.py +1 -1
- mindspore/dataset/audio/transforms.py +1 -1
- mindspore/dataset/core/config.py +35 -1
- mindspore/dataset/engine/datasets.py +338 -319
- mindspore/dataset/engine/datasets_user_defined.py +38 -22
- mindspore/dataset/engine/datasets_vision.py +1 -1
- mindspore/dataset/engine/validators.py +1 -15
- mindspore/dataset/transforms/c_transforms.py +2 -2
- mindspore/dataset/transforms/transforms.py +3 -3
- mindspore/dataset/vision/__init__.py +1 -1
- mindspore/dataset/vision/py_transforms.py +8 -8
- mindspore/dataset/vision/transforms.py +17 -5
- mindspore/dataset/vision/utils.py +632 -21
- mindspore/device_context/ascend/op_tuning.py +35 -1
- mindspore/dnnl.dll +0 -0
- mindspore/{profiler/common/validator → graph}/__init__.py +9 -1
- mindspore/graph/custom_pass.py +55 -0
- mindspore/include/api/cell.h +28 -4
- mindspore/include/api/cfg.h +24 -7
- mindspore/include/api/context.h +1 -0
- mindspore/include/api/delegate.h +0 -2
- mindspore/include/api/dual_abi_helper.h +100 -19
- mindspore/include/api/graph.h +14 -1
- mindspore/include/api/kernel.h +16 -3
- mindspore/include/api/kernel_api.h +9 -1
- mindspore/include/api/metrics/accuracy.h +9 -0
- mindspore/include/api/model.h +5 -1
- mindspore/include/api/model_group.h +4 -0
- mindspore/include/api/model_parallel_runner.h +2 -0
- mindspore/include/api/status.h +48 -10
- mindspore/include/api/types.h +6 -1
- mindspore/include/dataset/constants.h +9 -0
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/__init__.py +3 -3
- mindspore/mindrecord/common/exceptions.py +1 -0
- mindspore/mindrecord/config.py +1 -1
- mindspore/{parallel/mpi → mindrecord/core}/__init__.py +4 -1
- mindspore/mindrecord/{shardheader.py → core/shardheader.py} +2 -1
- mindspore/mindrecord/{shardindexgenerator.py → core/shardindexgenerator.py} +1 -1
- mindspore/mindrecord/{shardreader.py → core/shardreader.py} +2 -1
- mindspore/mindrecord/{shardsegment.py → core/shardsegment.py} +2 -2
- mindspore/mindrecord/{shardutils.py → core/shardutils.py} +1 -1
- mindspore/mindrecord/{shardwriter.py → core/shardwriter.py} +1 -1
- mindspore/mindrecord/filereader.py +4 -4
- mindspore/mindrecord/filewriter.py +5 -5
- mindspore/mindrecord/mindpage.py +2 -2
- mindspore/mindrecord/tools/cifar10.py +4 -3
- mindspore/mindrecord/tools/cifar100.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +1 -1
- mindspore/mindrecord/tools/cifar10_to_mr.py +6 -6
- mindspore/mindrecord/tools/csv_to_mr.py +1 -1
- mindspore/mindrecord/tools/imagenet_to_mr.py +1 -1
- mindspore/mindrecord/tools/mnist_to_mr.py +1 -1
- mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -1
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_cluster.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_hardware_abstract.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_ops_host.dll → mindspore_ops_cpu.dll} +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mindspore_runtime_utils.dll +0 -0
- mindspore/mindspore_tools.dll +0 -0
- mindspore/mint/__init__.py +15 -10
- mindspore/mint/distributed/__init__.py +4 -0
- mindspore/mint/distributed/distributed.py +392 -69
- mindspore/mint/nn/__init__.py +2 -16
- mindspore/mint/nn/functional.py +4 -110
- mindspore/mint/nn/layer/__init__.py +0 -2
- mindspore/mint/nn/layer/_functions.py +1 -2
- mindspore/mint/nn/layer/activation.py +0 -6
- mindspore/mint/nn/layer/basic.py +0 -47
- mindspore/mint/nn/layer/conv.py +10 -10
- mindspore/mint/nn/layer/normalization.py +11 -16
- mindspore/mint/nn/layer/pooling.py +0 -4
- mindspore/nn/__init__.py +1 -3
- mindspore/nn/cell.py +231 -239
- mindspore/nn/layer/activation.py +4 -2
- mindspore/nn/layer/basic.py +56 -14
- mindspore/nn/layer/container.py +16 -0
- mindspore/nn/layer/embedding.py +4 -169
- mindspore/nn/layer/image.py +1 -1
- mindspore/nn/layer/normalization.py +2 -1
- mindspore/nn/layer/thor_layer.py +4 -85
- mindspore/nn/optim/ada_grad.py +0 -1
- mindspore/nn/optim/adafactor.py +0 -1
- mindspore/nn/optim/adam.py +32 -127
- mindspore/nn/optim/adamax.py +0 -1
- mindspore/nn/optim/asgd.py +0 -1
- mindspore/nn/optim/ftrl.py +8 -102
- mindspore/nn/optim/lamb.py +1 -4
- mindspore/nn/optim/lars.py +0 -3
- mindspore/nn/optim/lazyadam.py +25 -218
- mindspore/nn/optim/momentum.py +5 -43
- mindspore/nn/optim/optimizer.py +6 -55
- mindspore/nn/optim/proximal_ada_grad.py +0 -1
- mindspore/nn/optim/rmsprop.py +0 -1
- mindspore/nn/optim/rprop.py +0 -1
- mindspore/nn/optim/sgd.py +0 -1
- mindspore/nn/optim/tft_wrapper.py +2 -4
- mindspore/nn/optim/thor.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +7 -8
- mindspore/nn/probability/bijector/gumbel_cdf.py +2 -2
- mindspore/nn/probability/bijector/power_transform.py +20 -21
- mindspore/nn/probability/bijector/scalar_affine.py +5 -5
- mindspore/nn/probability/bijector/softplus.py +13 -14
- mindspore/nn/probability/distribution/_utils/utils.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +39 -5
- mindspore/nn/wrap/grad_reducer.py +4 -89
- mindspore/numpy/array_creations.py +4 -4
- mindspore/numpy/fft.py +9 -9
- mindspore/numpy/utils_const.py +1 -1
- mindspore/{nn/reinforcement → onnx}/__init__.py +5 -8
- mindspore/onnx/onnx_export.py +137 -0
- mindspore/opencv_core4110.dll +0 -0
- mindspore/opencv_imgcodecs4110.dll +0 -0
- mindspore/{opencv_imgproc452.dll → opencv_imgproc4110.dll} +0 -0
- mindspore/ops/__init__.py +2 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +38 -2
- mindspore/ops/_grad_experimental/grad_inner_ops.py +0 -9
- mindspore/ops/_op_impl/aicpu/__init__.py +0 -10
- mindspore/ops/_op_impl/cpu/__init__.py +1 -5
- mindspore/ops/_op_impl/cpu/{buffer_append.py → joinedstr_op.py} +8 -8
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +28 -24
- mindspore/ops/auto_generate/gen_extend_func.py +6 -11
- mindspore/ops/auto_generate/gen_ops_def.py +385 -154
- mindspore/ops/auto_generate/gen_ops_prim.py +5676 -5167
- mindspore/ops/communication.py +97 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +16 -2
- mindspore/ops/composite/multitype_ops/__init__.py +3 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +150 -8
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/composite/multitype_ops/add_impl.py +7 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +27 -0
- mindspore/ops/function/__init__.py +2 -0
- mindspore/ops/function/array_func.py +24 -18
- mindspore/ops/function/comm_func.py +3883 -0
- mindspore/ops/function/debug_func.py +7 -6
- mindspore/ops/function/grad/grad_func.py +4 -12
- mindspore/ops/function/math_func.py +89 -86
- mindspore/ops/function/nn_func.py +92 -313
- mindspore/ops/function/random_func.py +9 -18
- mindspore/ops/functional.py +4 -1
- mindspore/ops/functional_overload.py +377 -30
- mindspore/ops/operations/__init__.py +2 -5
- mindspore/ops/operations/_custom_ops_utils.py +7 -9
- mindspore/ops/operations/_inner_ops.py +12 -50
- mindspore/ops/operations/_rl_inner_ops.py +0 -933
- mindspore/ops/operations/array_ops.py +5 -50
- mindspore/ops/operations/comm_ops.py +95 -17
- mindspore/ops/operations/custom_ops.py +237 -22
- mindspore/ops/operations/debug_ops.py +33 -35
- mindspore/ops/operations/manually_defined/ops_def.py +39 -318
- mindspore/ops/operations/math_ops.py +5 -5
- mindspore/ops/operations/nn_ops.py +3 -3
- mindspore/ops/operations/sparse_ops.py +0 -83
- mindspore/ops/primitive.py +4 -27
- mindspore/ops/tensor_method.py +88 -10
- mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +5 -5
- mindspore/ops_generate/aclnn/gen_aclnn_implement.py +8 -8
- mindspore/ops_generate/api/functions_cc_generator.py +53 -4
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +25 -11
- mindspore/ops_generate/common/gen_constants.py +11 -10
- mindspore/ops_generate/common/op_proto.py +18 -1
- mindspore/ops_generate/common/template.py +102 -245
- mindspore/ops_generate/common/template_utils.py +212 -0
- mindspore/ops_generate/gen_custom_ops.py +69 -0
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +78 -7
- mindspore/ops_generate/op_def_py/base_op_prim_py_generator.py +360 -0
- mindspore/ops_generate/op_def_py/custom_op_prim_py_generator.py +140 -0
- mindspore/ops_generate/op_def_py/op_def_py_generator.py +54 -7
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -312
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +74 -17
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +22 -5
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +0 -16
- mindspore/ops_generate/pyboost/op_template_parser.py +3 -2
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +21 -5
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +2 -2
- mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +30 -10
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +10 -3
- mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +19 -9
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +71 -28
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +10 -9
- mindspore/ops_generate/pyboost/pyboost_utils.py +27 -16
- mindspore/ops_generate/resources/yaml_loader.py +13 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +2 -2
- mindspore/parallel/_auto_parallel_context.py +5 -15
- mindspore/parallel/_cell_wrapper.py +1 -1
- mindspore/parallel/_parallel_serialization.py +4 -6
- mindspore/parallel/_ps_context.py +2 -2
- mindspore/parallel/_utils.py +34 -17
- mindspore/parallel/auto_parallel.py +23 -9
- mindspore/parallel/checkpoint_transform.py +20 -2
- mindspore/parallel/cluster/process_entity/_api.py +28 -33
- mindspore/parallel/cluster/process_entity/_utils.py +9 -5
- mindspore/parallel/cluster/run.py +5 -3
- mindspore/{experimental/llm_boost/ascend_native → parallel/distributed}/__init__.py +21 -22
- mindspore/parallel/distributed/distributed_data_parallel.py +393 -0
- mindspore/parallel/distributed/flatten_grad_buffer.py +295 -0
- mindspore/parallel/function/reshard_func.py +6 -5
- mindspore/parallel/nn/parallel_cell_wrapper.py +40 -3
- mindspore/parallel/nn/parallel_grad_reducer.py +0 -8
- mindspore/parallel/shard.py +7 -21
- mindspore/parallel/strategy.py +336 -0
- mindspore/parallel/transform_safetensors.py +127 -20
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +13 -9
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +1 -1
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +1 -1
- mindspore/profiler/common/constant.py +5 -0
- mindspore/profiler/common/file_manager.py +9 -0
- mindspore/profiler/common/msprof_cmd_tool.py +40 -4
- mindspore/profiler/common/path_manager.py +65 -24
- mindspore/profiler/common/profiler_context.py +27 -14
- mindspore/profiler/common/profiler_info.py +3 -3
- mindspore/profiler/common/profiler_meta_data.py +1 -0
- mindspore/profiler/common/profiler_op_analyse.py +10 -6
- mindspore/profiler/common/profiler_path_manager.py +13 -0
- mindspore/profiler/common/util.py +30 -3
- mindspore/profiler/dynamic_profiler.py +91 -46
- mindspore/profiler/envprofiler.py +30 -5
- mindspore/profiler/experimental_config.py +18 -2
- mindspore/profiler/platform/cpu_profiler.py +10 -4
- mindspore/profiler/platform/npu_profiler.py +34 -7
- mindspore/profiler/profiler.py +193 -145
- mindspore/profiler/profiler_action_controller.py +1 -1
- mindspore/profiler/profiler_interface.py +2 -2
- mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +108 -24
- mindspore/runtime/__init__.py +9 -6
- mindspore/runtime/executor.py +35 -0
- mindspore/runtime/memory.py +113 -0
- mindspore/runtime/thread_bind_core.py +1 -1
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/{experimental/llm_boost → tools}/__init__.py +5 -5
- mindspore/tools/data_dump.py +130 -0
- mindspore/tools/sdc_detect.py +91 -0
- mindspore/tools/stress_detect.py +63 -0
- mindspore/train/__init__.py +6 -6
- mindspore/train/_utils.py +8 -21
- mindspore/train/amp.py +6 -7
- mindspore/train/callback/_callback.py +2 -1
- mindspore/train/callback/_checkpoint.py +1 -17
- mindspore/train/callback/_flops_collector.py +10 -6
- mindspore/train/callback/_train_fault_tolerance.py +72 -25
- mindspore/train/data_sink.py +5 -9
- mindspore/train/dataset_helper.py +5 -5
- mindspore/train/model.py +41 -230
- mindspore/train/serialization.py +160 -401
- mindspore/train/train_thor/model_thor.py +2 -2
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +6 -3
- mindspore/utils/dlpack.py +92 -0
- mindspore/utils/dryrun.py +1 -1
- mindspore/utils/runtime_execution_order_check.py +10 -0
- mindspore/utils/sdc_detect.py +14 -12
- mindspore/utils/stress_detect.py +43 -0
- mindspore/utils/utils.py +152 -16
- mindspore/version.py +1 -1
- {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/METADATA +3 -2
- {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/RECORD +330 -344
- mindspore/_extends/remote/kernel_build_server_ascend.py +0 -75
- mindspore/communication/_hccl_management.py +0 -297
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -207
- mindspore/experimental/llm_boost/ascend_native/llm_boost.py +0 -52
- mindspore/experimental/llm_boost/atb/__init__.py +0 -23
- mindspore/experimental/llm_boost/atb/boost_base.py +0 -385
- mindspore/experimental/llm_boost/atb/llama_boost.py +0 -137
- mindspore/experimental/llm_boost/atb/qwen_boost.py +0 -124
- mindspore/experimental/llm_boost/register.py +0 -130
- mindspore/experimental/llm_boost/utils.py +0 -31
- mindspore/include/OWNERS +0 -7
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/nn/optim/_dist_optimizer_registry.py +0 -111
- mindspore/nn/reinforcement/_batch_read_write.py +0 -142
- mindspore/nn/reinforcement/_tensors_queue.py +0 -152
- mindspore/nn/reinforcement/tensor_array.py +0 -145
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +0 -113
- mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +0 -96
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +0 -42
- mindspore/ops/_op_impl/cpu/buffer_get.py +0 -28
- mindspore/ops/_op_impl/cpu/buffer_sample.py +0 -28
- mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +0 -42
- mindspore/ops/operations/_tensor_array.py +0 -359
- mindspore/ops/operations/rl_ops.py +0 -288
- mindspore/parallel/_offload_context.py +0 -275
- mindspore/parallel/_recovery_context.py +0 -115
- mindspore/parallel/_transformer/__init__.py +0 -35
- mindspore/parallel/_transformer/layers.py +0 -765
- mindspore/parallel/_transformer/loss.py +0 -251
- mindspore/parallel/_transformer/moe.py +0 -693
- mindspore/parallel/_transformer/op_parallel_config.py +0 -222
- mindspore/parallel/_transformer/transformer.py +0 -3124
- mindspore/parallel/mpi/_mpi_config.py +0 -116
- mindspore/profiler/common/validator/validate_path.py +0 -84
- mindspore/train/memory_profiling_pb2.py +0 -298
- mindspore/utils/hooks.py +0 -81
- /mindspore/common/{_auto_dynamic.py → dynamic_shape/_auto_dynamic.py} +0 -0
- {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/WHEEL +0 -0
- {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/top_level.txt +0 -0
mindspore/context.py
CHANGED
|
@@ -33,13 +33,12 @@ from mindspore.parallel._auto_parallel_context import _set_auto_parallel_context
|
|
|
33
33
|
_reset_auto_parallel_context
|
|
34
34
|
from mindspore.parallel._ps_context import _set_ps_context, _get_ps_context, _reset_ps_context, \
|
|
35
35
|
_need_reset_device_target_for_ps
|
|
36
|
-
from mindspore.parallel._offload_context import _set_offload_context, _get_offload_context
|
|
37
36
|
from mindspore.hal.device import is_initialized
|
|
38
37
|
from mindspore.common import api
|
|
39
38
|
|
|
40
39
|
__all__ = ['GRAPH_MODE', 'PYNATIVE_MODE', 'STRICT', 'COMPATIBLE', 'LAX', 'set_context', 'get_context',
|
|
41
40
|
'set_auto_parallel_context', 'get_auto_parallel_context', 'reset_auto_parallel_context', 'ParallelMode',
|
|
42
|
-
'set_ps_context', 'get_ps_context'
|
|
41
|
+
'set_ps_context', 'get_ps_context']
|
|
43
42
|
|
|
44
43
|
GRAPH_MODE = 0
|
|
45
44
|
PYNATIVE_MODE = 1
|
|
@@ -204,13 +203,6 @@ class _Context:
|
|
|
204
203
|
if mode == PYNATIVE_MODE:
|
|
205
204
|
if self.enable_debug_runtime:
|
|
206
205
|
self.set_backend_policy("vm")
|
|
207
|
-
parallel_mode = _get_auto_parallel_context("parallel_mode")
|
|
208
|
-
if parallel_mode not in (ParallelMode.DATA_PARALLEL, ParallelMode.STAND_ALONE, ParallelMode.AUTO_PARALLEL):
|
|
209
|
-
raise ValueError(f"Got {parallel_mode}, when the user enabled SEMI_AUTO_PARALELL, "
|
|
210
|
-
f"pynative mode does not support, you should set either "
|
|
211
|
-
f"context.set_auto_parallel_context(parallel_mode='data_parallel'), "
|
|
212
|
-
f"context.set_auto_parallel_context(parallel_mode='stand_alone') "
|
|
213
|
-
f"or context.set_auto_parallel_context(parallel_mode='auto_parallel').")
|
|
214
206
|
self._context_switches.push(True, None)
|
|
215
207
|
elif mode == GRAPH_MODE:
|
|
216
208
|
if self.enable_debug_runtime:
|
|
@@ -262,22 +254,6 @@ class _Context:
|
|
|
262
254
|
f"{exec_order_modes}, but got {exec_order}.")
|
|
263
255
|
self.set_param(ms_ctx_param.exec_order, exec_order)
|
|
264
256
|
|
|
265
|
-
def set_memory_offload(self, memory_offload):
|
|
266
|
-
"""
|
|
267
|
-
Enable memory offload or not, support "ON", "OFF".
|
|
268
|
-
|
|
269
|
-
Args:
|
|
270
|
-
memory_offload (str): "ON", "OFF"
|
|
271
|
-
"""
|
|
272
|
-
memory_offload_options = ["ON", "OFF"]
|
|
273
|
-
if memory_offload not in memory_offload_options:
|
|
274
|
-
raise ValueError(f"For 'context.set_context', the argument 'memory_offload' must be one of "
|
|
275
|
-
f"{memory_offload_options}, but got {memory_offload}.")
|
|
276
|
-
if memory_offload == "ON":
|
|
277
|
-
self.set_param(ms_ctx_param.memory_offload, True)
|
|
278
|
-
else:
|
|
279
|
-
self.set_param(ms_ctx_param.memory_offload, False)
|
|
280
|
-
|
|
281
257
|
def set_deterministic(self, deterministic):
|
|
282
258
|
"""
|
|
283
259
|
Enable model run in deterministic, and support the values "ON" and "OFF".
|
|
@@ -605,12 +581,12 @@ class _Context:
|
|
|
605
581
|
def set_mempool_block_size(self, mempool_block_size):
|
|
606
582
|
"""Set the block size of memory pool."""
|
|
607
583
|
global_jit_config = get_jit_config()
|
|
608
|
-
|
|
584
|
+
is_ge = False
|
|
609
585
|
if global_jit_config:
|
|
610
|
-
|
|
611
|
-
if
|
|
612
|
-
logger.warning("
|
|
613
|
-
"you can use
|
|
586
|
+
is_ge = global_jit_config.get('backend') == "GE" or global_jit_config.get('jit_level') == "O2"
|
|
587
|
+
if is_ge:
|
|
588
|
+
logger.warning("GE doesn't support to set parameter 'mempool_block_size' of context currently, "
|
|
589
|
+
"you can use pynative mode or set jit_level=O0/O1.")
|
|
614
590
|
return
|
|
615
591
|
if not Validator.check_str_by_regular(mempool_block_size, _RE_PATTERN):
|
|
616
592
|
raise ValueError("For 'context.set_context', the argument 'mempool_block_size' should be in "
|
|
@@ -693,7 +669,6 @@ class _Context:
|
|
|
693
669
|
'memory_optimize_level': set_memory_optimize_level,
|
|
694
670
|
'exec_order': set_exec_order,
|
|
695
671
|
'op_timeout': set_op_timeout,
|
|
696
|
-
'memory_offload': set_memory_offload,
|
|
697
672
|
'deterministic': set_deterministic,
|
|
698
673
|
'ascend_config': set_ascend_config,
|
|
699
674
|
'jit_syntax_level': set_jit_syntax_level,
|
|
@@ -843,7 +818,8 @@ class _Context:
|
|
|
843
818
|
@staticmethod
|
|
844
819
|
def _check_speedup_config_str_value(key, value):
|
|
845
820
|
"""check speedup config str value"""
|
|
846
|
-
if key in ["pp_1f1b_overlap", "recompute_comm_overlap", "recomputation_communication_overlap"
|
|
821
|
+
if key in ["pp_1f1b_overlap", "recompute_comm_overlap", "recomputation_communication_overlap",
|
|
822
|
+
"matmul_grad_comm_overlap", "grad_matmul_communication_overlap"]:
|
|
847
823
|
if isinstance(value, str):
|
|
848
824
|
values = value.split(",")
|
|
849
825
|
for v in values:
|
|
@@ -871,8 +847,8 @@ class _Context:
|
|
|
871
847
|
try:
|
|
872
848
|
valid_option = {"recompute_comm_overlap": (ms_ctx_param.recompute_comm_overlap, str),
|
|
873
849
|
"recomputation_communication_overlap": (ms_ctx_param.recompute_comm_overlap, str),
|
|
874
|
-
"matmul_grad_comm_overlap": (ms_ctx_param.matmul_grad_comm_overlap, bool),
|
|
875
|
-
"grad_matmul_communication_overlap": (ms_ctx_param.matmul_grad_comm_overlap, bool),
|
|
850
|
+
"matmul_grad_comm_overlap": (ms_ctx_param.matmul_grad_comm_overlap, (bool, str)),
|
|
851
|
+
"grad_matmul_communication_overlap": (ms_ctx_param.matmul_grad_comm_overlap, (bool, str)),
|
|
876
852
|
"enable_task_opt": (ms_ctx_param.enable_task_opt, bool),
|
|
877
853
|
"enable_communication_fusion": (ms_ctx_param.enable_task_opt, bool),
|
|
878
854
|
"enable_grad_comm_opt": (ms_ctx_param.enable_grad_comm_opt, bool),
|
|
@@ -1087,8 +1063,8 @@ def set_auto_parallel_context(**kwargs):
|
|
|
1087
1063
|
|
|
1088
1064
|
- pipeline_interleave(bool): Indicates whether to enable the interleaved execution mode.
|
|
1089
1065
|
- pipeline_scheduler(str): Indicates the scheduling mode for pipeline parallelism. Only support
|
|
1090
|
-
``gpipe/1f1b/seqpipe/seqvpp/seqsmartvpp``. When applying seqsmartvpp,
|
|
1091
|
-
must be an even number.
|
|
1066
|
+
``gpipe/1f1b/seqpipe/seqvpp/seqsmartvpp/zero_bubble_v``. When applying seqsmartvpp,
|
|
1067
|
+
the pipeline parallel must be an even number.
|
|
1092
1068
|
parallel_optimizer_config (dict): A dict contains the keys and values for setting the parallel optimizer
|
|
1093
1069
|
configure. The configure provides more detailed behavior control about parallel training
|
|
1094
1070
|
when parallel optimizer is enabled. The configure will be effective when we use
|
|
@@ -1272,65 +1248,6 @@ def reset_auto_parallel_context():
|
|
|
1272
1248
|
api.ms_compile_cache.clear()
|
|
1273
1249
|
|
|
1274
1250
|
|
|
1275
|
-
@args_type_check(offload_config=dict)
|
|
1276
|
-
def set_offload_context(offload_config):
|
|
1277
|
-
r"""
|
|
1278
|
-
Configure heterogeneous training detailed parameters to adjust the offload strategy, this api will be deprecated
|
|
1279
|
-
and removed in future versions.
|
|
1280
|
-
|
|
1281
|
-
Note:
|
|
1282
|
-
The offload configuration is only used if the memory offload feature is enabled
|
|
1283
|
-
via mindspore.set_context(memory_offload="ON"), and the memory_optimize_level must be set to O0. On the Ascend
|
|
1284
|
-
hardware platform, the graph compilation level must be O0.
|
|
1285
|
-
|
|
1286
|
-
Args:
|
|
1287
|
-
offload_config (dict): A dict contains the keys and values for setting the offload context
|
|
1288
|
-
configure.It supports the following keys.
|
|
1289
|
-
|
|
1290
|
-
- offload_path (str): The path of offload, relative paths are supported. Default: ``"./offload"``.
|
|
1291
|
-
- offload_cpu_size (str): The cpu memory size for offload. The format is "xxGB".
|
|
1292
|
-
- offload_disk_size (str): The disk size for offload. The format is "xxGB"
|
|
1293
|
-
- hbm_ratio (float): The ratio that can be used based on the maximum device memory.
|
|
1294
|
-
The range is (0,1], Default: ``1.0``.
|
|
1295
|
-
- cpu_ratio (float): The ratio that can be used based on the maximum host memory.
|
|
1296
|
-
The range is (0,1], Default: ``1.0``.
|
|
1297
|
-
- enable_pinned_mem (bool): The flag of whether enabling Pinned Memory. Default: ``True``.
|
|
1298
|
-
- enable_aio (bool): The flag of whether enabling aio. Default: ``True``.
|
|
1299
|
-
- aio_block_size (str): The size of aio block. The format is "xxGB".
|
|
1300
|
-
- aio_queue_depth (int): The depth of aio queue.
|
|
1301
|
-
- offload_param (str): The param for offload destination, cpu or disk, Default: ``""``.
|
|
1302
|
-
- offload_checkpoint (str): The checkpoint for offload destination, only valid if recompute is turned on,
|
|
1303
|
-
cpu or disk, Default: ``""``.
|
|
1304
|
-
- auto_offload (bool): The flag of whether auto offload. Default: ``True``.
|
|
1305
|
-
- host_mem_block_size (str): The memory block size of host memory pool. The format is "xxGB"
|
|
1306
|
-
|
|
1307
|
-
Raises:
|
|
1308
|
-
ValueError: If input key is not attribute in auto parallel context.
|
|
1309
|
-
|
|
1310
|
-
Examples:
|
|
1311
|
-
>>> from mindspore import context
|
|
1312
|
-
>>> context.set_offload_context(offload_config={"offload_param":"cpu"})
|
|
1313
|
-
"""
|
|
1314
|
-
_set_offload_context(offload_config)
|
|
1315
|
-
|
|
1316
|
-
|
|
1317
|
-
def get_offload_context():
|
|
1318
|
-
"""
|
|
1319
|
-
Gets the offload configuration parameters, this api will be deprecated and removed in future versions.
|
|
1320
|
-
|
|
1321
|
-
Configure through interface mindspore.set_offload_context(). If the user is not set, the default configuration is
|
|
1322
|
-
obtained.
|
|
1323
|
-
|
|
1324
|
-
Returns:
|
|
1325
|
-
Dict, heterogeneous training offload detailed configuration parameters.
|
|
1326
|
-
|
|
1327
|
-
Examples:
|
|
1328
|
-
>>> from mindspore import context
|
|
1329
|
-
>>> offload_config = context.get_offload_context()
|
|
1330
|
-
"""
|
|
1331
|
-
return _get_offload_context()
|
|
1332
|
-
|
|
1333
|
-
|
|
1334
1251
|
def _check_target_specific_cfgs(device, arg_key):
|
|
1335
1252
|
"""Checking whether a config is suitable for a specified device"""
|
|
1336
1253
|
device_cfgs = {
|
|
@@ -1406,8 +1323,7 @@ def _check_context_deprecated(key):
|
|
|
1406
1323
|
mindspore.device_context.gpu.op_precision.conv_fprop_algo(),
|
|
1407
1324
|
mindspore.device_context.gpu.op_precision.conv_wgrad_algo(),
|
|
1408
1325
|
mindspore.device_context.gpu.op_precision.conv_dgrad_algo()''',
|
|
1409
|
-
'runtime_num_threads': 'api mindspore.device_context.cpu.op_tuning.threads_num()'
|
|
1410
|
-
'memory_offload': "`device` parameter of `mindspore.Parameter`"}
|
|
1326
|
+
'runtime_num_threads': 'api mindspore.device_context.cpu.op_tuning.threads_num()'}
|
|
1411
1327
|
invalid_context_dict = {
|
|
1412
1328
|
'exception_dump': {'version': '2.6', 'interface': 'device_context.ascend.op_debug.aclinit_config()'}
|
|
1413
1329
|
}
|
|
@@ -1474,9 +1390,6 @@ def set_context(**kwargs):
|
|
|
1474
1390
|
inter_op_parallel_num(int): The thread number of op parallel at the same time.
|
|
1475
1391
|
Default ``0`` . This parameter will be deprecated and removed in future versions.
|
|
1476
1392
|
Please use the api :func:`mindspore.runtime.dispatch_threads_num` instead.
|
|
1477
|
-
memory_offload (str): Whether to enable the memory offload function. Default ``"OFF"`` .
|
|
1478
|
-
This parameter will be deprecated and removed in future versions. Please use the api
|
|
1479
|
-
:func:`mindspore.nn.Cell.offload` instead.
|
|
1480
1393
|
disable_format_transform (bool): Whether to disable the automatic format transform function from NCHW
|
|
1481
1394
|
to NHWC. Default ``False`` . This parameter will be deprecated and removed in future versions. Please
|
|
1482
1395
|
use the related parameter of :func:`mindspore.jit` instead.
|
|
@@ -1546,7 +1459,9 @@ def set_context(**kwargs):
|
|
|
1546
1459
|
This parameter will be deprecated and removed in future versions. Please use the
|
|
1547
1460
|
api :func:`mindspore.parallel.auto_parallel.AutoParallel.transformer_opt` instead.
|
|
1548
1461
|
- hccl_watchdog (bool): Enable a thread to monitor the failure of collective communication.
|
|
1549
|
-
Default ``True`` .
|
|
1462
|
+
Default ``True`` . This parameter will be deprecated and removed in future versions. Please use the
|
|
1463
|
+
environment variable `MS_ENABLE_THM="{HCCL_WATCHDOG:1}"` instead.
|
|
1464
|
+
|
|
1550
1465
|
gpu_config (dict): Set the parameters specific to gpu hardware platform. It is not set by default.
|
|
1551
1466
|
|
|
1552
1467
|
- conv_fprop_algo (str): Specifies convolution forward algorithm. Default ``"normal"`` .
|
|
@@ -1601,7 +1516,6 @@ def set_context(**kwargs):
|
|
|
1601
1516
|
>>> ms.set_context(inter_op_parallel_num=4)
|
|
1602
1517
|
>>> ms.set_context(disable_format_transform=True)
|
|
1603
1518
|
>>> ms.set_context(memory_optimize_level='O0')
|
|
1604
|
-
>>> ms.set_context(memory_offload='ON')
|
|
1605
1519
|
>>> ms.set_context(deterministic='ON')
|
|
1606
1520
|
>>> ms.set_context(ascend_config={"precision_mode": "force_fp16", "jit_compile": True,
|
|
1607
1521
|
... "atomic_clean_policy": 1, "op_precision_mode": "./op_precision_config_file",
|
|
@@ -1787,8 +1701,8 @@ def set_ps_context(**kwargs):
|
|
|
1787
1701
|
config_file_path (str): Configuration file path used by recovery, parameter server training mode only
|
|
1788
1702
|
supports Server disaster recovery currently. Default: ``''`` .
|
|
1789
1703
|
enable_ssl (bool): Set PS SSL mode enabled or disabled. Default: ``False``.
|
|
1790
|
-
|
|
1791
|
-
|
|
1704
|
+
When set to False, users need to review and confirm the security of network environment
|
|
1705
|
+
where the distributed job is located.
|
|
1792
1706
|
client_password (str): Password to decrypt the secret key stored in the client certificate. Default: ``''`` .
|
|
1793
1707
|
server_password (str): Password to decrypt the secret key stored in the server certificate. Default: ``''`` .
|
|
1794
1708
|
|
|
@@ -1815,8 +1729,8 @@ def get_ps_context(attr_key):
|
|
|
1815
1729
|
parameter server training mode only
|
|
1816
1730
|
supports Server disaster recovery currently. Default: ``''`` .
|
|
1817
1731
|
- enable_ssl (bool, optional): Set PS SSL mode enabled or disabled. Default: ``False`` .
|
|
1818
|
-
|
|
1819
|
-
|
|
1732
|
+
When set to False, users need to review and confirm the security of network environment
|
|
1733
|
+
where the distributed job is located.
|
|
1820
1734
|
|
|
1821
1735
|
Returns:
|
|
1822
1736
|
Returns attribute value according to the key.
|
mindspore/dataset/__init__.py
CHANGED
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
"""
|
|
15
15
|
At the heart of MindSpore data loading utility is the `mindspore.dataset` module.
|
|
16
|
-
It is a `dataset engine <https://www.mindspore.cn/docs/en/master/
|
|
16
|
+
It is a `dataset engine <https://www.mindspore.cn/docs/en/master/features/data_engine.html>`_ based on pipline design.
|
|
17
17
|
|
|
18
18
|
This module provides the following data loading methods to help users load datasets into MindSpore.
|
|
19
19
|
|
|
@@ -2793,7 +2793,7 @@ class PhaseVocoder(AudioTensorOperation):
|
|
|
2793
2793
|
Raises:
|
|
2794
2794
|
TypeError: If `rate` is not of type float.
|
|
2795
2795
|
ValueError: If `rate` is not a positive number.
|
|
2796
|
-
TypeError: If `phase_advance` is not of type
|
|
2796
|
+
TypeError: If `phase_advance` is not of type `numpy.ndarray` .
|
|
2797
2797
|
RuntimeError: If input tensor is not in shape of <..., freq, num_frame, complex=2>.
|
|
2798
2798
|
|
|
2799
2799
|
Supported Platforms:
|
mindspore/dataset/core/config.py
CHANGED
|
@@ -34,7 +34,6 @@ from mindspore.dataset.core.validator_helpers import replace_none, type_check, c
|
|
|
34
34
|
from mindspore.dataset.debug import DebugHook, PrintMetaDataHook
|
|
35
35
|
from mindspore.dataset.core.validator_helpers import check_independent_mode
|
|
36
36
|
|
|
37
|
-
|
|
38
37
|
__all__ = ['set_sending_batches', 'load', '_init_device_info',
|
|
39
38
|
'set_seed', 'get_seed',
|
|
40
39
|
'set_prefetch_size', 'get_prefetch_size',
|
|
@@ -1174,3 +1173,38 @@ def get_multiprocessing_start_method():
|
|
|
1174
1173
|
>>> multiprocessing_start_method = ds.config.get_multiprocessing_start_method()
|
|
1175
1174
|
"""
|
|
1176
1175
|
return _config.get_multiprocessing_start_method()
|
|
1176
|
+
|
|
1177
|
+
|
|
1178
|
+
def set_video_backend(backend):
|
|
1179
|
+
"""
|
|
1180
|
+
Set the backend used to decode videos.
|
|
1181
|
+
|
|
1182
|
+
Args:
|
|
1183
|
+
backend (str): Type of the video backend. It can be "CPU" or "Ascend".
|
|
1184
|
+
|
|
1185
|
+
Raises:
|
|
1186
|
+
TypeError: If `backend` is not of type str.
|
|
1187
|
+
ValueError: If `backend` is not "CPU" or "Ascend".
|
|
1188
|
+
|
|
1189
|
+
Examples:
|
|
1190
|
+
>>> import mindspore.dataset as ds
|
|
1191
|
+
>>> ds.config.set_video_backend("CPU")
|
|
1192
|
+
"""
|
|
1193
|
+
|
|
1194
|
+
type_check(backend, (str,), "backend")
|
|
1195
|
+
check_valid_str(backend, ["CPU", "Ascend"], "backend")
|
|
1196
|
+
_config.set_video_backend(backend)
|
|
1197
|
+
|
|
1198
|
+
|
|
1199
|
+
def get_video_backend():
|
|
1200
|
+
"""
|
|
1201
|
+
Returns the currently active backend used to decode videos.
|
|
1202
|
+
|
|
1203
|
+
Returns:
|
|
1204
|
+
str, backend used to decode videos.
|
|
1205
|
+
|
|
1206
|
+
Examples:
|
|
1207
|
+
>>> import mindspore.dataset as ds
|
|
1208
|
+
>>> backend = ds.config.get_video_backend()
|
|
1209
|
+
"""
|
|
1210
|
+
return _config.get_video_backend()
|