mindspore 2.4.10__cp311-cp311-win_amd64.whl → 2.5.0__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +8 -3
- mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +0 -5
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/compile_config.py +64 -0
- mindspore/_extends/parse/deprecated/__init__.py +0 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +375 -0
- mindspore/_extends/parse/parser.py +23 -5
- mindspore/_extends/parse/standard_method.py +123 -27
- mindspore/_extends/pijit/pijit_func_white_list.py +1 -1
- mindspore/amp.py +7 -1
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/boost_cell_wrapper.py +136 -41
- mindspore/common/__init__.py +3 -1
- mindspore/common/_register_for_tensor.py +0 -1
- mindspore/common/_stub_tensor.py +25 -4
- mindspore/common/_tensor_cpp_method.py +17 -0
- mindspore/common/_tensor_docs.py +6132 -0
- mindspore/common/api.py +98 -21
- mindspore/common/dtype.py +34 -34
- mindspore/common/dump.py +2 -1
- mindspore/common/file_system.py +8 -3
- mindspore/common/generator.py +2 -0
- mindspore/common/hook_handle.py +3 -1
- mindspore/common/initializer.py +3 -4
- mindspore/common/lazy_inline.py +8 -2
- mindspore/common/mindir_util.py +10 -2
- mindspore/common/parameter.py +31 -15
- mindspore/common/tensor.py +713 -1337
- mindspore/communication/__init__.py +1 -1
- mindspore/communication/_comm_helper.py +5 -0
- mindspore/communication/comm_func.py +215 -173
- mindspore/communication/management.py +23 -20
- mindspore/context.py +285 -191
- mindspore/dataset/__init__.py +23 -19
- mindspore/dataset/callback/ds_callback.py +2 -1
- mindspore/dataset/core/config.py +84 -3
- mindspore/dataset/engine/cache_admin.py +3 -3
- mindspore/dataset/engine/cache_client.py +5 -4
- mindspore/dataset/engine/datasets.py +192 -149
- mindspore/dataset/engine/datasets_audio.py +14 -0
- mindspore/dataset/engine/datasets_standard_format.py +11 -11
- mindspore/dataset/engine/datasets_text.py +38 -1
- mindspore/dataset/engine/datasets_user_defined.py +100 -66
- mindspore/dataset/engine/datasets_vision.py +81 -8
- mindspore/dataset/engine/iterators.py +281 -63
- mindspore/dataset/engine/obs/util.py +8 -0
- mindspore/dataset/engine/queue.py +40 -0
- mindspore/dataset/engine/samplers.py +26 -2
- mindspore/dataset/engine/serializer_deserializer.py +1 -1
- mindspore/dataset/engine/validators.py +43 -11
- mindspore/dataset/transforms/py_transforms_util.py +17 -0
- mindspore/dataset/transforms/transforms.py +29 -12
- mindspore/dataset/vision/validators.py +1 -2
- mindspore/device_context/__init__.py +21 -0
- mindspore/device_context/ascend/__init__.py +25 -0
- mindspore/device_context/ascend/device.py +72 -0
- mindspore/device_context/ascend/op_debug.py +94 -0
- mindspore/device_context/ascend/op_precision.py +193 -0
- mindspore/device_context/ascend/op_tuning.py +127 -0
- mindspore/device_context/cpu/__init__.py +25 -0
- mindspore/device_context/cpu/device.py +62 -0
- mindspore/device_context/cpu/op_tuning.py +43 -0
- mindspore/device_context/gpu/__init__.py +21 -0
- mindspore/device_context/gpu/device.py +70 -0
- mindspore/device_context/gpu/op_precision.py +67 -0
- mindspore/device_context/gpu/op_tuning.py +175 -0
- mindspore/device_manager.py +134 -0
- mindspore/dnnl.dll +0 -0
- mindspore/experimental/llm_boost/__init__.py +1 -0
- mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +211 -0
- mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
- mindspore/experimental/llm_boost/register.py +1 -0
- mindspore/experimental/optim/adadelta.py +26 -22
- mindspore/experimental/optim/adam.py +3 -0
- mindspore/experimental/optim/lr_scheduler.py +33 -24
- mindspore/experimental/optim/radam.py +33 -30
- mindspore/hal/device.py +28 -0
- mindspore/hal/event.py +17 -0
- mindspore/hal/memory.py +94 -3
- mindspore/hal/stream.py +91 -6
- mindspore/include/api/context.h +0 -1
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +12 -0
- mindspore/mindrecord/__init__.py +1 -1
- mindspore/mindrecord/config.py +17 -316
- mindspore/mindrecord/filereader.py +1 -9
- mindspore/mindrecord/filewriter.py +5 -15
- mindspore/mindrecord/mindpage.py +1 -9
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mint/__init__.py +824 -218
- mindspore/mint/distributed/__init__.py +66 -4
- mindspore/mint/distributed/distributed.py +2594 -44
- mindspore/mint/linalg/__init__.py +6 -0
- mindspore/mint/nn/__init__.py +473 -14
- mindspore/mint/nn/functional.py +486 -11
- mindspore/mint/nn/layer/__init__.py +17 -4
- mindspore/mint/nn/layer/_functions.py +330 -0
- mindspore/mint/nn/layer/activation.py +169 -1
- mindspore/mint/nn/layer/basic.py +123 -0
- mindspore/mint/nn/layer/conv.py +727 -0
- mindspore/mint/nn/layer/normalization.py +215 -19
- mindspore/mint/nn/layer/padding.py +797 -0
- mindspore/mint/nn/layer/pooling.py +170 -0
- mindspore/mint/optim/__init__.py +2 -1
- mindspore/mint/optim/adam.py +223 -0
- mindspore/mint/optim/adamw.py +26 -19
- mindspore/mint/special/__init__.py +2 -1
- mindspore/multiprocessing/__init__.py +5 -0
- mindspore/nn/cell.py +126 -19
- mindspore/nn/dynamic_lr.py +2 -1
- mindspore/nn/layer/activation.py +6 -6
- mindspore/nn/layer/basic.py +35 -25
- mindspore/nn/layer/channel_shuffle.py +3 -3
- mindspore/nn/layer/embedding.py +3 -3
- mindspore/nn/layer/normalization.py +8 -7
- mindspore/nn/layer/padding.py +4 -3
- mindspore/nn/layer/pooling.py +47 -13
- mindspore/nn/layer/rnn_cells.py +1 -1
- mindspore/nn/layer/rnns.py +2 -1
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +48 -26
- mindspore/nn/learning_rate_schedule.py +5 -3
- mindspore/nn/loss/loss.py +31 -36
- mindspore/nn/optim/ada_grad.py +1 -0
- mindspore/nn/optim/adadelta.py +2 -2
- mindspore/nn/optim/adam.py +1 -1
- mindspore/nn/optim/lars.py +1 -4
- mindspore/nn/optim/optimizer.py +1 -1
- mindspore/nn/optim/rprop.py +2 -2
- mindspore/nn/optim/thor.py +2 -1
- mindspore/nn/utils/init.py +13 -11
- mindspore/nn/wrap/cell_wrapper.py +4 -6
- mindspore/nn/wrap/loss_scale.py +3 -4
- mindspore/numpy/array_creations.py +60 -62
- mindspore/numpy/array_ops.py +148 -143
- mindspore/numpy/logic_ops.py +41 -42
- mindspore/numpy/math_ops.py +361 -359
- mindspore/numpy/utils.py +16 -16
- mindspore/numpy/utils_const.py +4 -4
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +2 -1
- mindspore/ops/_grad_experimental/grad_comm_ops.py +94 -13
- mindspore/ops/_grad_experimental/grad_debug_ops.py +6 -1
- mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
- mindspore/ops/_op_impl/cpu/__init__.py +1 -0
- mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
- mindspore/ops/_vmap/vmap_array_ops.py +20 -19
- mindspore/ops/_vmap/vmap_base.py +0 -2
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +19 -13
- mindspore/ops/_vmap/vmap_math_ops.py +11 -9
- mindspore/ops/_vmap/vmap_nn_ops.py +20 -34
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +149 -12
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -61
- mindspore/ops/auto_generate/gen_extend_func.py +554 -60
- mindspore/ops/auto_generate/gen_ops_def.py +1621 -115
- mindspore/ops/auto_generate/gen_ops_prim.py +8024 -3409
- mindspore/ops/auto_generate/pyboost_inner_prim.py +183 -79
- mindspore/ops/composite/base.py +1 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +229 -30
- mindspore/ops/composite/multitype_ops/pow_impl.py +0 -29
- mindspore/ops/function/__init__.py +12 -0
- mindspore/ops/function/array_func.py +561 -159
- mindspore/ops/function/clip_func.py +64 -0
- mindspore/ops/function/debug_func.py +28 -20
- mindspore/ops/function/image_func.py +1 -1
- mindspore/ops/function/linalg_func.py +5 -4
- mindspore/ops/function/math_func.py +1659 -290
- mindspore/ops/function/nn_func.py +988 -317
- mindspore/ops/function/parameter_func.py +3 -56
- mindspore/ops/function/random_func.py +243 -33
- mindspore/ops/function/sparse_unary_func.py +1 -1
- mindspore/ops/functional.py +18 -5
- mindspore/ops/functional_overload.py +897 -0
- mindspore/ops/operations/__init__.py +3 -2
- mindspore/ops/operations/_embedding_cache_ops.py +4 -4
- mindspore/ops/operations/_grad_ops.py +2 -34
- mindspore/ops/operations/_infer_ops.py +2 -1
- mindspore/ops/operations/_inner_ops.py +38 -8
- mindspore/ops/operations/array_ops.py +45 -303
- mindspore/ops/operations/comm_ops.py +19 -16
- mindspore/ops/operations/custom_ops.py +11 -55
- mindspore/ops/operations/debug_ops.py +42 -47
- mindspore/ops/operations/inner_ops.py +6 -4
- mindspore/ops/operations/linalg_ops.py +3 -2
- mindspore/ops/operations/manually_defined/ops_def.py +185 -104
- mindspore/ops/operations/math_ops.py +11 -216
- mindspore/ops/operations/nn_ops.py +146 -308
- mindspore/ops/primitive.py +23 -21
- mindspore/ops/tensor_method.py +1669 -0
- mindspore/ops_generate/aclnn_kernel_register_auto_cc_generator.py +110 -0
- mindspore/ops_generate/add_tensor_docs_generator.py +54 -0
- mindspore/ops_generate/arg_handler.py +0 -61
- mindspore/ops_generate/auto_grad_impl_cc_generator.py +135 -0
- mindspore/ops_generate/auto_grad_reg_cc_generator.py +93 -0
- mindspore/ops_generate/base_generator.py +11 -0
- mindspore/ops_generate/cpp_create_prim_instance_helper_generator.py +108 -0
- mindspore/ops_generate/functional_map_cpp_generator.py +491 -0
- mindspore/ops_generate/functional_overload_py_generator.py +110 -0
- mindspore/ops_generate/functions_cc_generator.py +233 -0
- mindspore/ops_generate/gen_aclnn_implement.py +110 -114
- mindspore/ops_generate/gen_constants.py +157 -3
- mindspore/ops_generate/gen_ops.py +245 -990
- mindspore/ops_generate/gen_pyboost_func.py +97 -998
- mindspore/ops_generate/gen_utils.py +119 -33
- mindspore/ops_generate/lite_ops_cpp_generator.py +155 -0
- mindspore/ops_generate/op_api_proto.py +206 -0
- mindspore/ops_generate/op_def_py_generator.py +131 -0
- mindspore/ops_generate/op_prim_py_generator.py +480 -0
- mindspore/ops_generate/op_proto.py +373 -108
- mindspore/ops_generate/op_template_parser.py +436 -0
- mindspore/ops_generate/ops_def_cc_generator.py +288 -0
- mindspore/ops_generate/ops_def_h_generator.py +74 -0
- mindspore/ops_generate/ops_name_h_generator.py +68 -0
- mindspore/ops_generate/ops_primitive_h_generator.py +81 -0
- mindspore/ops_generate/pyboost_functions_cpp_generator.py +370 -0
- mindspore/ops_generate/pyboost_functions_h_generator.py +68 -0
- mindspore/ops_generate/pyboost_functions_py_generator.py +148 -0
- mindspore/ops_generate/pyboost_grad_function_cpp_generator.py +154 -0
- mindspore/ops_generate/pyboost_inner_prim_generator.py +131 -0
- mindspore/ops_generate/pyboost_native_grad_functions_generator.py +268 -0
- mindspore/ops_generate/pyboost_op_cpp_code_generator.py +851 -0
- mindspore/ops_generate/pyboost_overload_functions_cpp_generator.py +344 -0
- mindspore/ops_generate/pyboost_utils.py +92 -33
- mindspore/ops_generate/template.py +294 -44
- mindspore/ops_generate/tensor_func_reg_cpp_generator.py +422 -0
- mindspore/parallel/__init__.py +3 -3
- mindspore/parallel/_auto_parallel_context.py +24 -33
- mindspore/parallel/_parallel_serialization.py +13 -2
- mindspore/parallel/_utils.py +4 -1
- mindspore/parallel/algo_parameter_config.py +1 -1
- mindspore/parallel/checkpoint_transform.py +44 -0
- mindspore/parallel/cluster/process_entity/_api.py +131 -37
- mindspore/parallel/cluster/process_entity/_utils.py +41 -6
- mindspore/parallel/cluster/run.py +20 -3
- mindspore/parallel/parameter_broadcast.py +1 -1
- mindspore/parallel/shard.py +3 -0
- mindspore/parallel/transform_safetensors.py +119 -253
- mindspore/profiler/__init__.py +17 -4
- mindspore/profiler/analysis/__init__.py +0 -0
- mindspore/profiler/analysis/parser/__init__.py +0 -0
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +166 -0
- mindspore/profiler/analysis/parser/base_parser.py +158 -0
- mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
- mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
- mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +261 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +84 -0
- mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
- mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
- mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
- mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +260 -0
- mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
- mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
- mindspore/profiler/analysis/task_manager.py +131 -0
- mindspore/profiler/analysis/time_converter.py +84 -0
- mindspore/profiler/analysis/viewer/__init__.py +0 -0
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +333 -0
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +252 -0
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +313 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +322 -0
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +265 -0
- mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
- mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +97 -0
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
- mindspore/profiler/analysis/work_flow.py +73 -0
- mindspore/profiler/common/ascend_msprof_exporter.py +138 -0
- mindspore/profiler/common/command_executor.py +90 -0
- mindspore/profiler/common/constant.py +174 -3
- mindspore/profiler/common/file_manager.py +208 -0
- mindspore/profiler/common/log.py +130 -0
- mindspore/profiler/common/msprof_cmd_tool.py +202 -0
- mindspore/profiler/common/path_manager.py +371 -0
- mindspore/profiler/common/process_bar.py +168 -0
- mindspore/profiler/common/process_pool.py +9 -3
- mindspore/profiler/common/profiler_context.py +476 -0
- mindspore/profiler/common/profiler_info.py +304 -0
- mindspore/profiler/common/profiler_output_path.py +284 -0
- mindspore/profiler/common/profiler_parameters.py +210 -0
- mindspore/profiler/common/profiler_path_manager.py +120 -0
- mindspore/profiler/common/record_function.py +76 -0
- mindspore/profiler/common/tlv_decoder.py +76 -0
- mindspore/profiler/common/util.py +75 -2
- mindspore/profiler/dynamic_profiler.py +270 -37
- mindspore/profiler/envprofiler.py +138 -0
- mindspore/profiler/mstx.py +199 -0
- mindspore/profiler/platform/__init__.py +21 -0
- mindspore/profiler/platform/base_profiler.py +40 -0
- mindspore/profiler/platform/cpu_profiler.py +124 -0
- mindspore/profiler/platform/gpu_profiler.py +74 -0
- mindspore/profiler/platform/npu_profiler.py +309 -0
- mindspore/profiler/profiler.py +580 -93
- mindspore/profiler/profiler_action_controller.py +187 -0
- mindspore/profiler/profiler_interface.py +114 -0
- mindspore/profiler/schedule.py +208 -0
- mindspore/rewrite/api/symbol_tree.py +1 -2
- mindspore/run_check/_check_version.py +2 -6
- mindspore/runtime/__init__.py +37 -0
- mindspore/runtime/device.py +27 -0
- mindspore/runtime/event.py +209 -0
- mindspore/runtime/executor.py +148 -0
- mindspore/runtime/memory.py +392 -0
- mindspore/runtime/stream.py +460 -0
- mindspore/runtime/thread_bind_core.py +401 -0
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +2 -2
- mindspore/train/_utils.py +53 -18
- mindspore/train/amp.py +8 -4
- mindspore/train/callback/_checkpoint.py +32 -18
- mindspore/train/callback/_early_stop.py +1 -1
- mindspore/train/callback/_flops_collector.py +105 -69
- mindspore/train/callback/_history.py +1 -1
- mindspore/train/callback/_summary_collector.py +44 -6
- mindspore/train/callback/_tft_register.py +31 -10
- mindspore/train/dataset_helper.py +11 -11
- mindspore/train/metrics/precision.py +4 -5
- mindspore/train/mind_ir_pb2.py +167 -46
- mindspore/train/model.py +13 -15
- mindspore/train/serialization.py +462 -76
- mindspore/train/summary/summary_record.py +1 -2
- mindspore/train/train_thor/model_thor.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +4 -2
- mindspore/utils/dryrun.py +138 -0
- mindspore/utils/runtime_execution_order_check.py +550 -0
- mindspore/version.py +1 -1
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/METADATA +2 -3
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/RECORD +362 -238
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/entry_points.txt +1 -1
- mindspore/common/_tensor_overload.py +0 -139
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/profiler/envprofiling.py +0 -254
- mindspore/profiler/profiling.py +0 -1926
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/WHEEL +0 -0
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/top_level.txt +0 -0
mindspore/context.py
CHANGED
|
@@ -26,7 +26,7 @@ from collections import namedtuple
|
|
|
26
26
|
from types import FunctionType
|
|
27
27
|
|
|
28
28
|
from mindspore import log as logger
|
|
29
|
-
from mindspore._c_expression import MSContext, ms_ctx_param
|
|
29
|
+
from mindspore._c_expression import MSContext, ms_ctx_param, CollectiveManager
|
|
30
30
|
from mindspore import _checkparam as Validator
|
|
31
31
|
from mindspore._checkparam import args_type_check
|
|
32
32
|
from mindspore.parallel._auto_parallel_context import _set_auto_parallel_context, _get_auto_parallel_context, \
|
|
@@ -254,9 +254,9 @@ class _Context:
|
|
|
254
254
|
|
|
255
255
|
def set_exec_order(self, exec_order):
|
|
256
256
|
"""
|
|
257
|
-
The execution order mode, support "bfs", "dfs"
|
|
257
|
+
The execution order mode, support "bfs", "dfs".
|
|
258
258
|
"""
|
|
259
|
-
exec_order_modes = ["bfs", "dfs"
|
|
259
|
+
exec_order_modes = ["bfs", "dfs"]
|
|
260
260
|
if exec_order not in exec_order_modes:
|
|
261
261
|
raise ValueError(f"For 'context.set_context', the argument 'exec_order' must be one of "
|
|
262
262
|
f"{exec_order_modes}, but got {exec_order}.")
|
|
@@ -289,6 +289,11 @@ class _Context:
|
|
|
289
289
|
if deterministic not in deterministic_options:
|
|
290
290
|
raise ValueError(f"For 'context.set_context', the argument 'deterministic' must be one of "
|
|
291
291
|
f"{deterministic_options}, but got {deterministic}.")
|
|
292
|
+
|
|
293
|
+
# Must wait for all async created groups to be initialized so that
|
|
294
|
+
# deterministic feature could be consistent between all processes.
|
|
295
|
+
CollectiveManager.get_instance().wait_all_comm_init()
|
|
296
|
+
|
|
292
297
|
self.set_param(ms_ctx_param.deterministic, deterministic)
|
|
293
298
|
|
|
294
299
|
hccl_deterministic = os.getenv("HCCL_DETERMINISTIC")
|
|
@@ -846,6 +851,8 @@ class _Context:
|
|
|
846
851
|
(ms_ctx_param.enable_allreduce_slice_to_reducescatter, bool),
|
|
847
852
|
"enable_interleave_split_concat_branch":
|
|
848
853
|
(ms_ctx_param.enable_interleave_split_concat_branch, bool),
|
|
854
|
+
"enable_interleave_parallel_branch":
|
|
855
|
+
(ms_ctx_param.enable_interleave_parallel_branch, bool),
|
|
849
856
|
"enable_offloading_packed_experts": (ms_ctx_param.enable_offloading_packed_experts, bool),
|
|
850
857
|
"compute_communicate_fusion_level":
|
|
851
858
|
(ms_ctx_param.compute_communicate_fusion_level, int),
|
|
@@ -936,6 +943,7 @@ def set_auto_parallel_context(**kwargs):
|
|
|
936
943
|
\ group_ckpt_save_file
|
|
937
944
|
\ auto_pipeline
|
|
938
945
|
\ dump_local_norm
|
|
946
|
+
\ dump_local_norm_path
|
|
939
947
|
\ dump_device_local_norm
|
|
940
948
|
=========================== ===========================
|
|
941
949
|
|
|
@@ -992,7 +1000,8 @@ def set_auto_parallel_context(**kwargs):
|
|
|
992
1000
|
dataset_strategy="data_parallel" is equal to full_batch=False, dataset_strategy="full_batch" is
|
|
993
1001
|
equal to full_batch=True. For execution mode is 'GRAPH_MODE' and dataset load into net by model
|
|
994
1002
|
parallel strategy likes ds_stra ((1, 8), (1, 8)), it requires using
|
|
995
|
-
set_auto_parallel_context(dataset_strategy=ds_stra).
|
|
1003
|
+
set_auto_parallel_context(dataset_strategy=ds_stra). The dataset sharding strategy is not
|
|
1004
|
+
affected by the currently configured parallel mode.
|
|
996
1005
|
enable_parallel_optimizer (bool): This is a developing feature, which shards the weight update computation for
|
|
997
1006
|
data parallel training in the benefit of time and memory saving. Currently, auto and semi auto
|
|
998
1007
|
parallel mode support all optimizers in both Ascend and GPU. Data parallel mode only supports
|
|
@@ -1016,7 +1025,7 @@ def set_auto_parallel_context(**kwargs):
|
|
|
1016
1025
|
|
|
1017
1026
|
- pipeline_interleave(bool): Indicates whether to enable the interleaved execution mode.
|
|
1018
1027
|
- pipeline_scheduler(str): Indicates the scheduling mode for pipeline parallelism. Only support
|
|
1019
|
-
``gpipe/1f1b``.
|
|
1028
|
+
``gpipe/1f1b/seqpipe``.
|
|
1020
1029
|
parallel_optimizer_config (dict): A dict contains the keys and values for setting the parallel optimizer
|
|
1021
1030
|
configure. The configure provides more detailed behavior control about parallel training
|
|
1022
1031
|
when parallel optimizer is enabled. The configure will be effective when we use
|
|
@@ -1091,6 +1100,8 @@ def set_auto_parallel_context(**kwargs):
|
|
|
1091
1100
|
dump_local_norm (bool): Whether to dump local_norm value, when the `parallel_mode` is set to
|
|
1092
1101
|
``semi_auto_parallel`` or ``auto_parallel``.
|
|
1093
1102
|
Default: ``False`` .
|
|
1103
|
+
dump_local_norm_path (str): The path to save dump files of local_norm value.
|
|
1104
|
+
Default: ``''`` .
|
|
1094
1105
|
dump_device_local_norm (bool): Whether to dump device_local_norm value, when the `parallel_mode` is set to
|
|
1095
1106
|
``semi_auto_parallel`` or ``auto_parallel``.
|
|
1096
1107
|
Default: ``False`` .
|
|
@@ -1171,6 +1182,7 @@ def reset_auto_parallel_context():
|
|
|
1171
1182
|
- fusion_threshold: 64.
|
|
1172
1183
|
- auto_pipeline: False.
|
|
1173
1184
|
- dump_local_norm: False.
|
|
1185
|
+
- dump_local_norm_path: ''.
|
|
1174
1186
|
- dump_device_local_norm: False.
|
|
1175
1187
|
|
|
1176
1188
|
Examples:
|
|
@@ -1184,7 +1196,8 @@ def reset_auto_parallel_context():
|
|
|
1184
1196
|
@args_type_check(offload_config=dict)
|
|
1185
1197
|
def set_offload_context(offload_config):
|
|
1186
1198
|
r"""
|
|
1187
|
-
Configure heterogeneous training detailed parameters to adjust the offload strategy.
|
|
1199
|
+
Configure heterogeneous training detailed parameters to adjust the offload strategy. This function is deprecated and
|
|
1200
|
+
will be removed in future versions.
|
|
1188
1201
|
|
|
1189
1202
|
Note:
|
|
1190
1203
|
The offload configuration is only used if the memory offload feature is enabled
|
|
@@ -1225,7 +1238,8 @@ def set_offload_context(offload_config):
|
|
|
1225
1238
|
def get_offload_context():
|
|
1226
1239
|
"""
|
|
1227
1240
|
Gets the offload configuration parameters. Configure through interface mindspore.set_offload_context().
|
|
1228
|
-
If the user is not set, the default configuration is obtained.
|
|
1241
|
+
If the user is not set, the default configuration is obtained. This function is deprecated and will be removed in
|
|
1242
|
+
future versions.
|
|
1229
1243
|
|
|
1230
1244
|
Returns:
|
|
1231
1245
|
Dict, heterogeneous training offload detailed configuration parameters.
|
|
@@ -1240,8 +1254,6 @@ def get_offload_context():
|
|
|
1240
1254
|
def _check_target_specific_cfgs(device, arg_key):
|
|
1241
1255
|
"""Checking whether a config is suitable for a specified device"""
|
|
1242
1256
|
device_cfgs = {
|
|
1243
|
-
'enable_graph_kernel': ['Ascend', 'GPU', 'CPU'],
|
|
1244
|
-
'graph_kernel_flags': ['Ascend', 'GPU', 'CPU'],
|
|
1245
1257
|
'enable_reduce_precision': ['Ascend'],
|
|
1246
1258
|
'print_file_path': ['Ascend'],
|
|
1247
1259
|
'variable_memory_max_size': ['Ascend'],
|
|
@@ -1280,9 +1292,52 @@ def _check_key(key):
|
|
|
1280
1292
|
raise ValueError(f"Please set '{key}' through parameter ascend_config")
|
|
1281
1293
|
|
|
1282
1294
|
|
|
1295
|
+
def _check_context_deprecated(key):
|
|
1296
|
+
"""Checking whether a context key will be deprecated."""
|
|
1297
|
+
deprecated_context_dict = {'save_graphs': 'env MS_DEV_SAVE_GRAPHS',
|
|
1298
|
+
'save_graphs_path': 'env MS_DEV_SAVE_GRAPHS_PATH',
|
|
1299
|
+
'precompile_only': 'env MS_DEV_PRECOMPILE_ONLY',
|
|
1300
|
+
'check_bprop': '',
|
|
1301
|
+
'max_call_depth': 'api mindspore.set_recursion_limit()',
|
|
1302
|
+
'grad_for_scalar': 'tensor derivative',
|
|
1303
|
+
'enable_compile_cache': 'env MS_COMPILER_CACHE_ENABLE',
|
|
1304
|
+
'enable_cache_path': 'env MS_COMPILER_CACHE_PATH',
|
|
1305
|
+
'debug_level': '',
|
|
1306
|
+
'device_target': 'api mindspore.set_device()',
|
|
1307
|
+
'device_id': 'api mindspore.set_device()',
|
|
1308
|
+
'deterministic': 'api mindspore.set_deterministic()',
|
|
1309
|
+
'inter_op_parallel_num': 'api mindspore.runtime.dispatch_threads_num()',
|
|
1310
|
+
'pynative_synchronize': 'api mindspore.runtime.launch_blocking()',
|
|
1311
|
+
'max_device_memory': 'api mindspore.runtime.set_memory()',
|
|
1312
|
+
'variable_memory_max_size': 'api mindspore.runtime.set_memory()',
|
|
1313
|
+
'mempool_block_size': 'api mindspore.runtime.set_memory()',
|
|
1314
|
+
'memory_optimize_level': 'api mindspore.runtime.set_memory()',
|
|
1315
|
+
'ascend_config': '''api mindspore.device_context.ascend.op_precision.precision_mode(),
|
|
1316
|
+
mindspore.device_context.ascend.op_precision.op_precision_mode(),
|
|
1317
|
+
mindspore.device_context.ascend.op_precision.matmul_allow_hf32(),
|
|
1318
|
+
mindspore.device_context.ascend.op_precision.conv_allow_hf32(),
|
|
1319
|
+
mindspore.device_context.ascend.op_tuning.op_compile()''',
|
|
1320
|
+
'aoe_tune_mode': 'api mindspore.device_context.ascend.op_tuning.aoe_tune_mode()',
|
|
1321
|
+
'aoe_config': 'api mindspore.device_context.ascend.op_tuning.aoe_job_type()',
|
|
1322
|
+
'op_timeout': 'api mindspore.device_context.ascend.op_debug.execute_timeout()',
|
|
1323
|
+
'op_debug_option': 'api mindspore.device_context.ascend.op_debug.debug_option()',
|
|
1324
|
+
'gpu_config': '''api mindspore.device_context.gpu.op_precision.conv_allow_tf32(),
|
|
1325
|
+
mindspore.device_context.gpu.op_precision.matmul_allow_tf32(),
|
|
1326
|
+
mindspore.device_context.gpu.op_precision.conv_fprop_algo(),
|
|
1327
|
+
mindspore.device_context.gpu.op_precision.conv_wgrad_algo(),
|
|
1328
|
+
mindspore.device_context.gpu.op_precision.conv_dgrad_algo()''',
|
|
1329
|
+
'runtime_num_threads': 'api mindspore.device_context.cpu.op_tuning.threads_num()',
|
|
1330
|
+
'memory_offload': "`device` parameter of `mindspore.Parameter`"}
|
|
1331
|
+
if key in deprecated_context_dict:
|
|
1332
|
+
log = f"For 'context.set_context', the parameter '{key}' will be deprecated and removed in a future version."
|
|
1333
|
+
if deprecated_context_dict.get(key) != '':
|
|
1334
|
+
log += f" Please use the {deprecated_context_dict.get(key)} instead."
|
|
1335
|
+
logger.warning(log)
|
|
1336
|
+
|
|
1337
|
+
|
|
1283
1338
|
@args_type_check(mode=int, precompile_only=bool, device_target=str, device_id=int, save_graphs=(bool, int),
|
|
1284
|
-
save_graphs_path=str,
|
|
1285
|
-
|
|
1339
|
+
save_graphs_path=str, aoe_tune_mode=str, aoe_config=dict,
|
|
1340
|
+
enable_reduce_precision=bool, variable_memory_max_size=str,
|
|
1286
1341
|
enable_auto_mixed_precision=bool, inter_op_parallel_num=int,
|
|
1287
1342
|
enable_graph_kernel=bool, reserve_class_name_in_scope=bool, check_bprop=bool,
|
|
1288
1343
|
max_device_memory=str, print_file_path=str, max_call_depth=int, env_config_path=str,
|
|
@@ -1291,7 +1346,7 @@ def _check_key(key):
|
|
|
1291
1346
|
op_timeout=int, deterministic=str, ascend_config=dict, jit_syntax_level=int, debug_level=int,
|
|
1292
1347
|
jit_enable_inplace_ops=bool, gpu_config=dict, jit_config=dict, enable_compile_cache=bool)
|
|
1293
1348
|
def set_context(**kwargs):
|
|
1294
|
-
"""
|
|
1349
|
+
r"""
|
|
1295
1350
|
Set context for running environment.
|
|
1296
1351
|
|
|
1297
1352
|
Context should be configured before running your program. If there is no configuration,
|
|
@@ -1302,105 +1357,127 @@ def set_context(**kwargs):
|
|
|
1302
1357
|
The mode is not recommended to be changed after net was initialized because the implementations of some
|
|
1303
1358
|
operations are different in graph mode and pynative mode. Default: ``PYNATIVE_MODE`` .
|
|
1304
1359
|
|
|
1305
|
-
Some configurations are device specific,
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
|
1311
|
-
|
|
1312
|
-
|
|
|
1313
|
-
|
|
|
1314
|
-
| |
|
|
1315
|
-
|
|
|
1316
|
-
| |
|
|
1317
|
-
|
|
|
1318
|
-
| |
|
|
1319
|
-
|
|
|
1320
|
-
| |
|
|
1321
|
-
|
|
1322
|
-
|
|
|
1323
|
-
|
|
1324
|
-
|
|
|
1325
|
-
|
|
|
1326
|
-
| |
|
|
1327
|
-
|
|
|
1328
|
-
| |
|
|
1329
|
-
|
|
|
1330
|
-
| |
|
|
1331
|
-
|
|
|
1332
|
-
| |
|
|
1333
|
-
|
|
|
1334
|
-
| |
|
|
1335
|
-
|
|
|
1336
|
-
| |
|
|
1337
|
-
|
|
|
1338
|
-
| |
|
|
1339
|
-
|
|
|
1340
|
-
| |
|
|
1341
|
-
|
|
1342
|
-
|
|
|
1343
|
-
|
|
1344
|
-
|
|
|
1345
|
-
|
|
|
1346
|
-
| |
|
|
1347
|
-
|
|
|
1348
|
-
| |
|
|
1349
|
-
|
|
|
1350
|
-
| |
|
|
1351
|
-
|
|
|
1352
|
-
| |
|
|
1353
|
-
|
|
|
1354
|
-
|
|
|
1355
|
-
|
|
|
1356
|
-
|
|
|
1357
|
-
|
|
|
1358
|
-
|
|
|
1359
|
-
|
|
|
1360
|
-
| |
|
|
1361
|
-
|
|
|
1362
|
-
| |
|
|
1363
|
-
|
|
|
1364
|
-
| |
|
|
1365
|
-
|
|
|
1366
|
-
| |
|
|
1367
|
-
|
|
|
1368
|
-
| |
|
|
1369
|
-
|
|
|
1370
|
-
| |
|
|
1371
|
-
|
|
|
1372
|
-
| |
|
|
1373
|
-
|
|
|
1374
|
-
| |
|
|
1375
|
-
|
|
|
1376
|
-
| |
|
|
1377
|
-
|
|
|
1378
|
-
| |
|
|
1379
|
-
|
|
|
1380
|
-
| |
|
|
1381
|
-
|
|
|
1382
|
-
| |
|
|
1383
|
-
|
|
|
1384
|
-
| |
|
|
1385
|
-
|
|
|
1386
|
-
| |
|
|
1387
|
-
|
|
1360
|
+
Some configurations are device specific, and some parameters will be deprecated and removed in the future version
|
|
1361
|
+
(marked ``D`` in the second column), please use the replacement in the fourth column.
|
|
1362
|
+
see the below table for details:
|
|
1363
|
+
|
|
1364
|
+
+-------------------------+------------------------------+---------------------------+----------------------------+
|
|
1365
|
+
| Function Classification | Configuration Parameters | Hardware Platform Support| Replacement |
|
|
1366
|
+
+=========================+==============================+===========================+============================+
|
|
1367
|
+
| System Configuration | device_id (D) | CPU/GPU/Ascend | :func:`~.set_device` |
|
|
1368
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1369
|
+
| | device_target (D) | CPU/GPU/Ascend | :func:`~.set_device` |
|
|
1370
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1371
|
+
| | max_device_memory(D) | GPU/Ascend | :func:`~.set_memory` |
|
|
1372
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1373
|
+
| | variable_memory_max_size (D) | Ascend | :func:`~.set_memory` |
|
|
1374
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1375
|
+
| | mempool_block_size (D) | GPU/Ascend | :func:`~.set_memory` |
|
|
1376
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1377
|
+
| | op_timeout (D) | Ascend | :func:`~.execute_timeout` |
|
|
1378
|
+
+-------------------------+------------------------------+---------------------------+----------------------------+
|
|
1379
|
+
| Debug Configuration | save_graphs (D) | CPU/GPU/Ascend | MS_DEV_SAVE_GRAPHS |
|
|
1380
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1381
|
+
| | save_graphs_path (D) | CPU/GPU/Ascend | MS_DEV_SAVE_GRAPHS_PATH |
|
|
1382
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1383
|
+
| | deterministic (D) | Ascend |:func:`~.set_deterministic` |
|
|
1384
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1385
|
+
| | print_file_path | Ascend | NA |
|
|
1386
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1387
|
+
| | env_config_path | CPU/GPU/Ascend | NA |
|
|
1388
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1389
|
+
| | precompile_only (D) | CPU/GPU/Ascend | MS_DEV_PRECOMPILE_ONLY |
|
|
1390
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1391
|
+
| | reserve_class_name_in_scope | CPU/GPU/Ascend | NA |
|
|
1392
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1393
|
+
| | pynative_synchronize (D) | CPU/GPU/Ascend | :func:`~.launch_blocking` |
|
|
1394
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1395
|
+
| | debug_level (D) | CPU/GPU/Ascend | NA |
|
|
1396
|
+
+-------------------------+------------------------------+---------------------------+----------------------------+
|
|
1397
|
+
| Executive Control | mode | CPU/GPU/Ascend | NA |
|
|
1398
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1399
|
+
| | enable_reduce_precision | Ascend | NA |
|
|
1400
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1401
|
+
| | aoe_tune_mode (D) | Ascend | :func:`~.aoe_tune_mode` |
|
|
1402
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1403
|
+
| | aoe_config (D) | Ascend | :func:`~.aoe_job_type` |
|
|
1404
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1405
|
+
| | check_bprop (D) | CPU/GPU/Ascend | NA |
|
|
1406
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1407
|
+
| | max_call_depth (D) | CPU/GPU/Ascend | :func:`~.set_recur\ |
|
|
1408
|
+
| | | | sion_limit` |
|
|
1409
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1410
|
+
| | grad_for_scalar (D) | CPU/GPU/Ascend | derivative |
|
|
1411
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1412
|
+
| | enable_compile_cache (D) | CPU/GPU/Ascend | MS_COMPILER_CACHE_ENABLE |
|
|
1413
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1414
|
+
| | inter_op_parallel_num (D) | CPU/GPU/Ascend | :func:`~.dispatch\ |
|
|
1415
|
+
| | | | _threads_num` |
|
|
1416
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1417
|
+
| |runtime_num_threads (D) | CPU/GPU/Ascend | :func:`~.threads_num` |
|
|
1418
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1419
|
+
| | compile_cache_path | CPU/GPU/Ascend | NA |
|
|
1420
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1421
|
+
| | disable_format_transform | GPU | NA |
|
|
1422
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1423
|
+
| | support_binary | CPU/GPU/Ascend | NA |
|
|
1424
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1425
|
+
| | memory_optimize_level (D) | CPU/GPU/Ascend | :func:`~.set_memory` |
|
|
1426
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1427
|
+
| | memory_offload | GPU/Ascend | NA |
|
|
1428
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1429
|
+
| | ascend_config (D) | Ascend | :func:`~.precision_mode` |
|
|
1430
|
+
| | | | |
|
|
1431
|
+
| | | | :func:`~.op_precision_mode`|
|
|
1432
|
+
| | | | |
|
|
1433
|
+
| | | | :func:`~.matmul_allow_hf32`|
|
|
1434
|
+
| | | | |
|
|
1435
|
+
| | | | :func:`~.conv_allow_hf32` |
|
|
1436
|
+
| | | | |
|
|
1437
|
+
| | | | :func:`~.op_compile` |
|
|
1438
|
+
| | | | |
|
|
1439
|
+
| | | | :func:`~.debug_option` |
|
|
1440
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1441
|
+
| | jit_syntax_level | CPU/GPU/Ascend | NA |
|
|
1442
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1443
|
+
| | gpu_config (D) | GPU | :func:`~.conv_allow_tf32` |
|
|
1444
|
+
| | | | |
|
|
1445
|
+
| | | | :func:`~.matmul_allow_tf32`|
|
|
1446
|
+
| | | | |
|
|
1447
|
+
| | | | :func:`~.conv_fprop_algo` |
|
|
1448
|
+
| | | | |
|
|
1449
|
+
| | | | :func:`~.conv_wgrad_algo` |
|
|
1450
|
+
| | | | |
|
|
1451
|
+
| | | | :func:`~.conv_dgrad_algo` |
|
|
1452
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1453
|
+
| | jit_config | CPU/GPU/Ascend | NA |
|
|
1454
|
+
| +------------------------------+---------------------------+----------------------------+
|
|
1455
|
+
| | exec_order | Ascend | NA |
|
|
1456
|
+
+-------------------------+------------------------------+---------------------------+----------------------------+
|
|
1388
1457
|
|
|
1389
1458
|
Args:
|
|
1390
1459
|
device_id (int): ID of the target device, the value must be in [0, device_num_per_host-1],
|
|
1391
|
-
while device_num_per_host should be no more than 4096. Default: ``0`` .
|
|
1460
|
+
while device_num_per_host should be no more than 4096. Default: ``0`` . This parameter will be deprecated
|
|
1461
|
+
and will be removed in future versions.Please use api :func:`mindspore.set_device`
|
|
1462
|
+
with 'device_target' instead.
|
|
1392
1463
|
device_target (str): The target device to run, support "Ascend", "GPU", and "CPU".
|
|
1393
|
-
If device target is not set, the version of MindSpore package is used.
|
|
1464
|
+
If device target is not set, the version of MindSpore package is used. This parameter will be deprecated
|
|
1465
|
+
and will be removed in future versions.Please use api :func:`mindspore.set_device`
|
|
1466
|
+
with 'device_id' instead.
|
|
1394
1467
|
max_device_memory (str): Set the maximum memory available for devices. The format is "xxGB".
|
|
1395
1468
|
Default: ``" 1024GB"`` . The actual used memory size is the minimum of the available memory of the device
|
|
1396
1469
|
and max_device_memory. 'max_device_memory' should be set before the program runs. When virtual memory is
|
|
1397
1470
|
enabled, a too small 'max_device_memory' will cause frequent defragmentation, affecting performance.
|
|
1398
|
-
|
|
1399
|
-
|
|
1471
|
+
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1472
|
+
api :func:`mindspore.runtime.set_memory` instead.
|
|
1473
|
+
variable_memory_max_size (str): This parameter will be deprecated and will be removed in future versions. Please
|
|
1474
|
+
use the api :func:`mindspore.runtime.set_memory` instead.
|
|
1400
1475
|
mempool_block_size (str): It takes effect when virtual memory is turned off, set the size of the memory pool
|
|
1401
1476
|
block for devices. The format is "xxGB". Default: ``"1GB"`` . Minimum size is "1G". The actual used memory
|
|
1402
1477
|
block size is the minimum of the available memory of the device and mempool_block_size. When there is
|
|
1403
1478
|
enough memory, the memory will be expanded by this value.
|
|
1479
|
+
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1480
|
+
api :func:`mindspore.runtime.set_memory` instead.
|
|
1404
1481
|
op_timeout (int): Set the maximum duration of executing an operator in seconds.
|
|
1405
1482
|
If the execution time exceeds this value, system will terminate the task.
|
|
1406
1483
|
0 means endless wait. The defaults for AI Core and AICPU operators vary on different hardware.
|
|
@@ -1408,6 +1485,8 @@ def set_context(**kwargs):
|
|
|
1408
1485
|
please refer to `Ascend Community document about aclrtSetOpExecuteTimeOut
|
|
1409
1486
|
<https://www.hiascend.com/document/detail/en/CANNCommunityEdition/600alphaX/infacldevg/aclcppdevg/aclcppdevg_03_0069.html>`_.
|
|
1410
1487
|
Default: ``900`` .
|
|
1488
|
+
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1489
|
+
api :func:`mindspore.device_context.ascend.op_debug.execute_timeout` instead.
|
|
1411
1490
|
save_graphs (bool or int): Whether to save intermediate compilation graphs. Default: ``0`` .
|
|
1412
1491
|
Available values are:
|
|
1413
1492
|
|
|
@@ -1422,10 +1501,14 @@ def set_context(**kwargs):
|
|
|
1422
1501
|
When the `save_graphs` attribute is set as ``True`` , ``1`` , ``2`` or ``3`` , attribute of
|
|
1423
1502
|
`save_graphs_path` is used to set the intermediate compilation graph storage path. By default, the graphs
|
|
1424
1503
|
are saved in the current directory.
|
|
1504
|
+
This parameter will be deprecated and removed in a future version. Please use the environment variable
|
|
1505
|
+
`MS_DEV_SAVE_GRAPHS` instead.
|
|
1425
1506
|
save_graphs_path (str): Path to save graphs. Default: ``"."``.
|
|
1426
1507
|
If the specified directory does not exist, the system will automatically create the directory.
|
|
1427
1508
|
During distributed training, graphs will be saved to the directory of
|
|
1428
1509
|
`save_graphs_path/rank_${rank_id}/`. `rank_id` is the ID of the current device in the cluster.
|
|
1510
|
+
This parameter will be deprecated and removed in a future version. Please use the environment variable
|
|
1511
|
+
`MS_DEV_SAVE_GRAPHS_PATH` instead.
|
|
1429
1512
|
deterministic (str): Whether to enable op run in deterministic mode. The value must be in the
|
|
1430
1513
|
range of ['ON', 'OFF'], and the default value is ``'OFF'`` .
|
|
1431
1514
|
|
|
@@ -1435,96 +1518,37 @@ def set_context(**kwargs):
|
|
|
1435
1518
|
When deterministic mode is on, model ops will be deterministic in Ascend. This means that if op run
|
|
1436
1519
|
multiple times with the same inputs on the same hardware, it will have the exact same outputs each time.
|
|
1437
1520
|
This is useful for debugging models.
|
|
1438
|
-
|
|
1439
|
-
|
|
1440
|
-
|
|
1441
|
-
|
|
1442
|
-
|
|
1443
|
-
|
|
1444
|
-
|
|
1445
|
-
When print data to file, the total output bytes of single print must be less then 2GB(limited by
|
|
1446
|
-
protobuf).
|
|
1447
|
-
env_config_path (str): Config path for DFX.
|
|
1448
|
-
Through mindspore.set_context(env_config_path="./mindspore_config.json")
|
|
1449
|
-
|
|
1450
|
-
configure RDR:
|
|
1451
|
-
|
|
1452
|
-
- enable: controls whether the RDR is enabled to collect the key data during training and
|
|
1453
|
-
save key data in the fault scenario. When set to ``true`` , the RDR will be turned on.
|
|
1454
|
-
When set to ``false`` , the RDR will be turned off.
|
|
1455
|
-
- mode: sets the mode of RDR on exporting data. When set to ``1`` , the RDR only exports data
|
|
1456
|
-
in the fault scenario. When set to ``2`` , the RDR exports data in the fault scenario and the
|
|
1457
|
-
normal end scenario. Default: ``1`` .
|
|
1458
|
-
- path: sets the path where RDR saves data. The current path must be absolute.
|
|
1459
|
-
|
|
1460
|
-
Memory reuse:
|
|
1461
|
-
|
|
1462
|
-
- mem_Reuse: controls whether the memory reuse function is turned on. When set to ``True`` ,
|
|
1463
|
-
the memory reuse function is turned on. When set to ``False`` , the memory reuse function is turned off.
|
|
1521
|
+
In distributed scenario, we suggest user to set deterministic mode before
|
|
1522
|
+
calling :func:`mindspore.communication.init` to enable deterministic operation for
|
|
1523
|
+
communication operators in the global communication group.
|
|
1524
|
+
This parameter will be deprecated and will be removed in
|
|
1525
|
+
future versions. Please use the api :func:`mindspore.set_deterministic` instead.
|
|
1526
|
+
print_file_path (str): This parameter will be deprecated and will be removed in future versions.
|
|
1527
|
+
env_config_path (str): This parameter will be deprecated and will be removed in future versions.
|
|
1464
1528
|
|
|
1465
1529
|
precompile_only (bool): Whether to only precompile the network. Default: ``False`` .
|
|
1466
1530
|
If set to ``True`` , the network will only be compiled, not executed.
|
|
1467
|
-
|
|
1468
|
-
|
|
1469
|
-
|
|
1470
|
-
For example:
|
|
1471
|
-
|
|
1472
|
-
Default/net-Net1/net-Net2 (reserve_class_name_in_scope=True)
|
|
1473
|
-
|
|
1474
|
-
Default/net/net (reserve_class_name_in_scope=False)
|
|
1475
|
-
|
|
1531
|
+
This parameter will be deprecated and removed in a future version. Please use the environment variable
|
|
1532
|
+
`MS_DEV_PRECOMPILE_ONLY` instead.
|
|
1533
|
+
reserve_class_name_in_scope (bool): This parameter will be deprecated and will be removed in future versions.
|
|
1476
1534
|
pynative_synchronize (bool): Whether to enable synchronous execution of the device in PyNative mode.
|
|
1477
1535
|
Default: ``False`` . When the value is set to ``False`` , the operator is executed asynchronously on the
|
|
1478
1536
|
device. When an error occurs in the execution of the operator, the specific error script code location
|
|
1479
1537
|
cannot be located, when the value is set to ``True`` , the operator is executed synchronously on the
|
|
1480
1538
|
device. It will reduce the execution performance of the program. At this time, when an error occurs in the
|
|
1481
1539
|
execution of the operator, the location of the error script code can be located according to the call stack
|
|
1482
|
-
of the error.
|
|
1540
|
+
of the error. This parameter will be deprecated and will be removed in future versions.Please use
|
|
1541
|
+
the api :func:`mindspore.runtime.launch_blocking` instead.
|
|
1483
1542
|
mode (int): Running in GRAPH_MODE(0) or PYNATIVE_MODE(1).
|
|
1484
1543
|
Both modes support all backends. Default: ``PYNATIVE_MODE`` .
|
|
1485
|
-
enable_graph_kernel (bool): Whether to enable graph kernel fusion to optimize network execution performance.
|
|
1486
|
-
Default: ``False`` .
|
|
1487
|
-
Indicates whether to enable image-computing convergence to optimize network execution performance.
|
|
1488
|
-
If enable_graph_kernel is set to ``True`` , acceleration can be enabled.
|
|
1489
|
-
For details of graph kernel fusion, please check
|
|
1490
|
-
`Enabling Graph Kernel Fusion
|
|
1491
|
-
<https://www.mindspore.cn/docs/en/master/model_train/optimize/graph_fusion_engine.html>`_.
|
|
1492
|
-
graph_kernel_flags (str):
|
|
1493
|
-
Optimization options of graph kernel fusion, and the priority is higher when it conflicts
|
|
1494
|
-
with enable_graph_kernel. Only for experienced users.
|
|
1495
|
-
For example,
|
|
1496
|
-
|
|
1497
|
-
.. code-block::
|
|
1498
|
-
|
|
1499
|
-
mindspore.set_context(graph_kernel_flags="--opt_level=2 --dump_as_text")
|
|
1500
|
-
|
|
1501
|
-
Some general options:
|
|
1502
|
-
|
|
1503
|
-
- opt_level: Set the optimization level.
|
|
1504
|
-
Default: ``2`` . Graph kernel fusion can be enabled equivalently by setting opt_level greater than 0.
|
|
1505
|
-
Available values are:
|
|
1506
|
-
|
|
1507
|
-
- 0: disables graph kernel fusion;
|
|
1508
|
-
- 1: enables the basic fusion of operators;
|
|
1509
|
-
- 2: includes all optimizations of level 1,
|
|
1510
|
-
and turns on more optimizations such as CSE, arithmetic simplification and so on;
|
|
1511
|
-
- 3: includes all optimizations of level 2, and turns on more optimizations such as SitchingFusion,
|
|
1512
|
-
ParallelFusion and so on. Optimizations of this level are radical and unstable in some scenarios.
|
|
1513
|
-
Be caution when using this level.
|
|
1514
|
-
|
|
1515
|
-
- dump_as_text: dumps detail info as text files. Default: ``False`` .
|
|
1516
|
-
- enable_cluster_ops: Add user-specified operator to the set of operators involved in fusion. For example,
|
|
1517
|
-
by setting ``--enable_cluster_ops=MatMul``, MatMul operator can be included in the fusion process.
|
|
1518
|
-
- enable_pass/disable_pass: Enable/disable user-specified custom fusion passes. See details in
|
|
1519
|
-
`Custom Fusion Pass
|
|
1520
|
-
<https://www.mindspore.cn/docs/en/master/model_train/custom_program/fusion_pass.html>`_.
|
|
1521
|
-
|
|
1522
1544
|
enable_reduce_precision (bool): Whether to enable precision reduction.
|
|
1523
1545
|
If the operator does not support the user-specified precision, the precision will
|
|
1524
1546
|
be changed automatically. Default: ``True`` .
|
|
1525
1547
|
aoe_tune_mode (str): AOE tuning mode setting, which is not set by default.
|
|
1526
1548
|
When set to ``"online"`` , the tuning in online function is turned on.
|
|
1527
1549
|
When set to ``"offline"`` , ge graph will be save for offline tuning.
|
|
1550
|
+
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1551
|
+
api :func:`mindspore.device_context.ascend.op_tuning.aoe_tune_mode` instead.
|
|
1528
1552
|
aoe_config (dict): Set the parameters specific to Ascend Optimization Engine. It is not set by default.
|
|
1529
1553
|
|
|
1530
1554
|
- job_type (str): Mode type setting, default value is ``"2"``.
|
|
@@ -1532,34 +1556,48 @@ def set_context(**kwargs):
|
|
|
1532
1556
|
- ``"1"``: subgraph tuning;
|
|
1533
1557
|
- ``"2"``: operator tuning.
|
|
1534
1558
|
|
|
1559
|
+
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1560
|
+
api :func:`mindspore.device_context.ascend.op_tuning.aoe_job_type` instead.
|
|
1561
|
+
|
|
1535
1562
|
check_bprop (bool): Whether to check back propagation nodes. The checking ensures that the shape and dtype
|
|
1536
1563
|
of back propagation node outputs is the same as input parameters. Default: ``False`` .
|
|
1564
|
+
This parameter will be deprecated and removed in a future version.
|
|
1537
1565
|
max_call_depth (int): Specify the maximum depth of function call. Must be positive integer. Default: ``1000`` .
|
|
1538
1566
|
The max_call_depth parameter needs to be set when the nested call is too deep or the number
|
|
1539
1567
|
of subgraphs is too large. If max_call_depth is set larger than before, the system max stack depth should be
|
|
1540
1568
|
set larger too, otherwise a `core dumped` exception may be raised because of system stack overflow.
|
|
1569
|
+
This parameter will be deprecated and removed in a future version. Please use the api
|
|
1570
|
+
:func:`mindspore.set_recursion_limit` instead.
|
|
1541
1571
|
grad_for_scalar (bool): Whether to get gradient for scalar. Default: ``False`` .
|
|
1542
1572
|
When grad_for_scalar is set to ``True`` , the function's scalar input can be derived.
|
|
1543
1573
|
The default value is ``False`` . Because the back-end does not support scaling operations currently,
|
|
1544
1574
|
this interface only supports simple operations that can be deduced by the front-end.
|
|
1545
|
-
|
|
1546
|
-
|
|
1547
|
-
|
|
1548
|
-
|
|
1549
|
-
|
|
1550
|
-
|
|
1575
|
+
This parameter will be deprecated and removed in a future version. Please take the tensor derivative.
|
|
1576
|
+
enable_compile_cache (bool): Whether to save or load the compiled cache of the graph.
|
|
1577
|
+
After enable_compile_cache is set to ``True`` , during the first execution, a compilation cache is
|
|
1578
|
+
generated and exported to a MINDIR file. When the network is executed again, if enable_compile_cache is
|
|
1579
|
+
still set to ``True`` and the network scripts are not changed, the compile cache is loaded.
|
|
1580
|
+
Note that only limited automatic detection for the changes of python scripts is supported by now,
|
|
1581
|
+
which means that there is a correctness risk. Default: ``False`` .
|
|
1551
1582
|
Currently, do not support the graph which is larger than 2G after compiled.
|
|
1552
1583
|
This is an experimental prototype that is subject to change and/or deletion.
|
|
1584
|
+
This parameter will be deprecated and removed in a future version. Please use the environment variable
|
|
1585
|
+
`MS_COMPILER_CACHE_ENABLE` instead.
|
|
1553
1586
|
compile_cache_path (str): Path to save the compile cache. Default: ``"."``.
|
|
1554
1587
|
If the specified directory does not exist, the system will automatically create the directory.
|
|
1555
1588
|
The cache will be saved to the directory of `compile_cache_path/rank_${rank_id}/`. The `rank_id` is
|
|
1556
1589
|
the ID of the current device in the cluster.
|
|
1590
|
+
This parameter will be deprecated and removed in a future version. Please use the environment variable
|
|
1591
|
+
`MS_COMPILER_CACHE_PATH` instead.
|
|
1557
1592
|
inter_op_parallel_num(int): The thread number of op parallel at the same time. Default value is ``0`` ,
|
|
1558
|
-
which means use the default num.
|
|
1593
|
+
which means use the default num. This parameter will be deprecated and will be removed in future versions.
|
|
1594
|
+
Please use the api :func:`mindspore.runtime.dispatch_threads_num` instead.
|
|
1559
1595
|
runtime_num_threads(int): The thread pool number of cpu kernel used in runtime,
|
|
1560
1596
|
which must bigger than or equal to 0. Default value is ``30`` , if you run many processes at
|
|
1561
1597
|
the same time, you should set the value smaller to avoid thread contention. If set runtime_num_threads to 1,
|
|
1562
1598
|
the runtime asynchronous pipeline capability cannot be enabled, which may affect performance.
|
|
1599
|
+
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1600
|
+
api :func:`mindspore.device_context.cpu.op_tuning.threads_num` instead.
|
|
1563
1601
|
disable_format_transform (bool): Whether to disable the automatic format transform function from NCHW to NHWC.
|
|
1564
1602
|
When the network training performance of fp16 is worse than fp32, `disable_format_transform` can be set to
|
|
1565
1603
|
``True`` to try to improve training performance. Default: ``False`` .
|
|
@@ -1567,6 +1605,7 @@ def set_context(**kwargs):
|
|
|
1567
1605
|
in graph mode, coulde set 'support_binary' to be ``True`` , and run once .py file. It would save the source
|
|
1568
1606
|
of the interfaces would be compiled by MindSpore to the interfaces definition .py file that should be
|
|
1569
1607
|
guaranteed to be writable. Then compile the .py file to the .pyc or .so file, and could run in Graph mode.
|
|
1608
|
+
Currently, this config option only support stand_alone.
|
|
1570
1609
|
memory_optimize_level (str): The memory optimize level.
|
|
1571
1610
|
On Ascend hardware platform, default: ``O1``, on other hardware platforms, default: ``O0``.
|
|
1572
1611
|
The value must be in ['O0', 'O1'].
|
|
@@ -1574,6 +1613,10 @@ def set_context(**kwargs):
|
|
|
1574
1613
|
- O0: priority performance option, disable SOMAS (Safe Optimized Memory Allocation Solver)
|
|
1575
1614
|
and some other memory optimizations.
|
|
1576
1615
|
- O1: priority memory option, enable SOMAS and some other memory optimizations.
|
|
1616
|
+
|
|
1617
|
+
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1618
|
+
api :func:`mindspore.runtime.set_memory` instead.
|
|
1619
|
+
|
|
1577
1620
|
memory_offload (str): Whether to enable the memory offload function. When it is enabled, the idle data will be
|
|
1578
1621
|
temporarily copied to the host side in the case of insufficient device memory. The value must be in the
|
|
1579
1622
|
range of ['ON', 'OFF'], and the default value is ``'OFF'`` .
|
|
@@ -1582,6 +1625,10 @@ def set_context(**kwargs):
|
|
|
1582
1625
|
when the graph compilation level is not 'O0'; This parameter does not take effect when
|
|
1583
1626
|
memory_optimize_level is set 'O1'.
|
|
1584
1627
|
- OFF: Turn off the memory Offload function.
|
|
1628
|
+
|
|
1629
|
+
This parameter is deprecated and will be removed in future versions. Please use the `device` parameter
|
|
1630
|
+
of `mindspore.Parameter` instead.
|
|
1631
|
+
|
|
1585
1632
|
ascend_config (dict): Set the parameters specific to Ascend hardware platform. It is not set by default.
|
|
1586
1633
|
The default value of `precision_mode`, `jit_compile` and
|
|
1587
1634
|
`atomic_clean_policy` are experimental parameters, may change in the future.
|
|
@@ -1607,10 +1654,15 @@ def set_context(**kwargs):
|
|
|
1607
1654
|
- allow_mix_precision_bf16: Automatic mixing precision, facing the whole network operator, according to
|
|
1608
1655
|
the built-in optimization strategy, automatically reduces the precision of some operators to bfloat16.
|
|
1609
1656
|
|
|
1657
|
+
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1658
|
+
api :func:`mindspore.device_context.ascend.op_precision.precision_mode` instead.
|
|
1659
|
+
|
|
1610
1660
|
- jit_compile (bool): Whether to select online compilation. When set to 'True', online compilation is
|
|
1611
1661
|
prioritized. When set to 'False', compiled operator binary files are prioritized to improve compilation
|
|
1612
1662
|
performance. The default settings are online compilation for static shape, and compiled operator binary
|
|
1613
1663
|
files for dynamic shape.
|
|
1664
|
+
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1665
|
+
api :func:`mindspore.device_context.ascend.op_tuning.op_compile` instead.
|
|
1614
1666
|
- atomic_clean_policy (int): The policy for cleaning memory occupied by atomic operators in the network.
|
|
1615
1667
|
Default: ``1`` .
|
|
1616
1668
|
|
|
@@ -1621,9 +1673,13 @@ def set_context(**kwargs):
|
|
|
1621
1673
|
- matmul_allow_hf32 (bool): Whether to convert FP32 to HF32 for Matmul operators. Default value: ``False``.
|
|
1622
1674
|
This is an experimental prototype that is subject to change and/or deletion.
|
|
1623
1675
|
For detailed information, please refer to `Ascend community <https://www.hiascend.com/>`_ .
|
|
1676
|
+
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1677
|
+
api :func:`mindspore.device_context.ascend.op_precision.matmul_allow_hf32` instead.
|
|
1624
1678
|
- conv_allow_hf32 (bool): Whether to convert FP32 to HF32 for Conv operators. Default value: ``True``.
|
|
1625
1679
|
This is an experimental prototype that is subject to change and/or deletion.
|
|
1626
1680
|
For detailed information, please refer to `Ascend community <https://www.hiascend.com/>`_ .
|
|
1681
|
+
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1682
|
+
api :func:`mindspore.device_context.ascend.op_precision.conv_allow_hf32` instead.
|
|
1627
1683
|
- exception_dump (str): Enable exception dump for Ascend operators, providing the input and output data for
|
|
1628
1684
|
failing Ascend operators. The value can be ``"0"`` , ``"1"`` and ``"2"``. For ``"0"`` , exception dump is
|
|
1629
1685
|
turned off; for ``"1"``, all inputs and outputs will be dumped for AICore exception operators;
|
|
@@ -1631,15 +1687,20 @@ def set_context(**kwargs):
|
|
|
1631
1687
|
but improving performance. Default: ``"2"`` .
|
|
1632
1688
|
- op_precision_mode (str): Path to config file of op precision mode. For detailed information, please refer
|
|
1633
1689
|
to `Ascend community <https://www.hiascend.com/>`_ .
|
|
1690
|
+
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1691
|
+
api :func:`mindspore.device_context.ascend.op_precision.op_precision_mode` instead.
|
|
1634
1692
|
- op_debug_option (str): Enable debugging options for Ascend operators, default not enabled.
|
|
1635
1693
|
The value currently only supports being set to ``"oom"``.
|
|
1636
1694
|
|
|
1637
1695
|
- ``"oom"``: When there is a memory out of bounds during the execution of an operator,
|
|
1638
1696
|
AscendCL will return an error code of ``EZ9999``.
|
|
1639
1697
|
|
|
1698
|
+
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1699
|
+
api :func:`mindspore.device_context.ascend.op_debug.debug_option` instead.
|
|
1700
|
+
|
|
1640
1701
|
- ge_options (dict): Set options for CANN. The options are divided into two categories: global and session.
|
|
1641
1702
|
This is an experimental prototype that is subject to change and/or deletion.
|
|
1642
|
-
For detailed information, please refer to `Ascend community <https://www.hiascend.com/document/detail/zh/canncommercial/
|
|
1703
|
+
For detailed information, please refer to `Ascend community <https://www.hiascend.com/document/detail/zh/canncommercial/80RC3/apiref/ascendgraphapi/atlasgeapi_07_0146.html>`_ .
|
|
1643
1704
|
The configuration options in `ge_options` may be duplicated with the options in `ascend_config`. If the
|
|
1644
1705
|
same configuration options are set in both `ascend_config` and `ge_options`, the one set in `ge_options`
|
|
1645
1706
|
shall prevail.
|
|
@@ -1663,11 +1724,11 @@ def set_context(**kwargs):
|
|
|
1663
1724
|
Default: False.
|
|
1664
1725
|
- enable_grad_comm_opt (bool): Enable overlap between dx ops and data parallel communication ops if True.
|
|
1665
1726
|
Currently, do not support
|
|
1666
|
-
`
|
|
1727
|
+
`O2 <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.JitConfig.html>`_
|
|
1667
1728
|
Default: False.
|
|
1668
1729
|
- enable_opt_shard_comm_opt (bool): Enable overlap between forward ops
|
|
1669
1730
|
and optimizer parallel allgather communication if True. Currently, do not support
|
|
1670
|
-
`
|
|
1731
|
+
`O2 <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.JitConfig.html>`_
|
|
1671
1732
|
Default: False.
|
|
1672
1733
|
- compute_communicate_fusion_level (int): Enable the fusion between compute and communicate.
|
|
1673
1734
|
Default: ``0``. Note: This function must be used with Ascend Training Solution 24.0.RC2 or later.
|
|
@@ -1700,6 +1761,10 @@ def set_context(**kwargs):
|
|
|
1700
1761
|
used in MoE parallel scenario. After splitting the input data, each slice of data is processed by the
|
|
1701
1762
|
MoE module, and then the branch results are concatenated. When the optimization is enable,
|
|
1702
1763
|
communication and computation will be executed in parallel between branches. Default: ``False``.
|
|
1764
|
+
- enable_interleave_parallel_branch (bool): Enable communication computation parallel optimization
|
|
1765
|
+
for parallel branches with ``parallel_branch`` attribute in branches merge node. It is typical
|
|
1766
|
+
used in MoE parallel scenario with routed and shared expert. When the optimization is enable,
|
|
1767
|
+
communication and computation will be executed in parallel between branches. Default: ``False``.
|
|
1703
1768
|
- host_scheduling_max_threshold(int): The max threshold to control whether the dynamic shape process is
|
|
1704
1769
|
used when run the static graph, the default value is 0. When the number of operations in the static graph
|
|
1705
1770
|
is less than the max threshold, this graph will be executed in dynamic shape process. In large model
|
|
@@ -1721,6 +1786,7 @@ def set_context(**kwargs):
|
|
|
1721
1786
|
compiling performance.
|
|
1722
1787
|
- ``DEBUG``: Used for debugging when errors occur, more information will be record in compiling process.
|
|
1723
1788
|
|
|
1789
|
+
This parameter will be deprecated and removed in a future version.
|
|
1724
1790
|
gpu_config (dict): Set the parameters specific to gpu hardware platform. It is not set by default.
|
|
1725
1791
|
Currently, only setting `conv_fprop_algo` and `conv_dgrad_algo` and `conv_wgrad_algo` and `conv_allow_tf32`
|
|
1726
1792
|
and `matmul_allow_tf32` are supported on GPU hardware platform.
|
|
@@ -1749,6 +1815,10 @@ def set_context(**kwargs):
|
|
|
1749
1815
|
sized workspace is needed to store intermediate results.
|
|
1750
1816
|
- winograd_nonfused: This algorithm uses the Winograd Transform approach to compute the convolution. A
|
|
1751
1817
|
significant workspace may be needed to store intermediate results.
|
|
1818
|
+
|
|
1819
|
+
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1820
|
+
api :func:`mindspore.device_context.gpu.op_tuning.conv_fprop_algo` instead.
|
|
1821
|
+
|
|
1752
1822
|
- conv_dgrad_algo (str): Specifies convolution data grad algorithm and the default value is 'normal',
|
|
1753
1823
|
The value range is as follows:
|
|
1754
1824
|
|
|
@@ -1768,6 +1838,10 @@ def set_context(**kwargs):
|
|
|
1768
1838
|
sized workspace is needed to store intermediate results. The results are deterministic.
|
|
1769
1839
|
- winograd_nonfused: This algorithm uses the Winograd Transform approach to compute the convolution.
|
|
1770
1840
|
A significant workspace may be needed to store intermediate results. The results are deterministic.
|
|
1841
|
+
|
|
1842
|
+
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1843
|
+
api :func:`mindspore.device_context.gpu.op_tuning.conv_dgrad_algo` instead.
|
|
1844
|
+
|
|
1771
1845
|
- conv_wgrad_algo (str): Specifies convolution filter grad algorithm and the default value is 'normal',
|
|
1772
1846
|
The value range is as follows:
|
|
1773
1847
|
|
|
@@ -1787,10 +1861,18 @@ def set_context(**kwargs):
|
|
|
1787
1861
|
- fft_tiling: This algorithm uses the Fast-Fourier Transform approach but splits the inputs into tiles.
|
|
1788
1862
|
A significant memory workspace is needed to store intermediate results but less than fft for large size
|
|
1789
1863
|
images. The results are deterministic.
|
|
1864
|
+
|
|
1865
|
+
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1866
|
+
api :func:`mindspore.device_context.gpu.op_tuning.conv_wgrad_algo` instead.
|
|
1867
|
+
|
|
1790
1868
|
- conv_allow_tf32 (bool): The flag below controls to allow Tensor core TF32 computation on CUDNN and the
|
|
1791
1869
|
default value is ``True``.
|
|
1870
|
+
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1871
|
+
api :func:`mindspore.device_context.gpu.op_precision.conv_allow_tf32` instead.
|
|
1792
1872
|
- matmul_allow_tf32 (bool): The flag below controls to allow Tensor core TF32 computation on CUBLAS and the
|
|
1793
1873
|
default value is ``False``.
|
|
1874
|
+
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1875
|
+
api :func:`mindspore.device_context.gpu.op_precision.matmul_allow_tf32` instead.
|
|
1794
1876
|
|
|
1795
1877
|
jit_config (dict): Set the global jit config for compile, take effect in network defined in Cell or jit
|
|
1796
1878
|
decorators. It is not set by default.
|
|
@@ -1813,17 +1895,14 @@ def set_context(**kwargs):
|
|
|
1813
1895
|
- ``"on"``: Enable infer mode, get better infer performance.
|
|
1814
1896
|
- ``"off"``: Disable infer mode, use forward to infer, performance is not good.
|
|
1815
1897
|
|
|
1816
|
-
exec_order (str): Set the sorting method for operator execution in GRAPH_MODE Currently, only
|
|
1817
|
-
methods are supported: bfs and
|
|
1898
|
+
exec_order (str): Set the sorting method for operator execution in GRAPH_MODE Currently, only two sorting
|
|
1899
|
+
methods are supported: bfs and dfs, and the default method is bfs.
|
|
1818
1900
|
|
|
1819
1901
|
- ``"bfs"``: The default sorting method, breadth priority, good communication masking, relatively good
|
|
1820
1902
|
performance.
|
|
1821
1903
|
- ``"dfs"``: An optional sorting method, depth-first sorting. The performance is relatively worse than that
|
|
1822
1904
|
of bfs execution order, but it occupies less memory. It is recommended to try dfs in scenarios where other
|
|
1823
1905
|
execution orders run out of memory (OOM).
|
|
1824
|
-
- ``"gpto"``: An optional sorting method. This method combines multiple execution orders and selects a
|
|
1825
|
-
method with relatively good performance. There may be some performance gains in scenarios with multiple
|
|
1826
|
-
replicas running in parallel.
|
|
1827
1906
|
|
|
1828
1907
|
Raises:
|
|
1829
1908
|
ValueError: If input key is not an attribute in context.
|
|
@@ -1836,8 +1915,6 @@ def set_context(**kwargs):
|
|
|
1836
1915
|
>>> ms.set_context(device_id=0)
|
|
1837
1916
|
>>> ms.set_context(save_graphs=True, save_graphs_path="./model.ms")
|
|
1838
1917
|
>>> ms.set_context(enable_reduce_precision=True)
|
|
1839
|
-
>>> ms.set_context(enable_graph_kernel=True)
|
|
1840
|
-
>>> ms.set_context(graph_kernel_flags="--opt_level=2 --dump_as_text")
|
|
1841
1918
|
>>> ms.set_context(reserve_class_name_in_scope=True)
|
|
1842
1919
|
>>> ms.set_context(variable_memory_max_size="6GB")
|
|
1843
1920
|
>>> ms.set_context(aoe_tune_mode="online")
|
|
@@ -1867,7 +1944,7 @@ def set_context(**kwargs):
|
|
|
1867
1944
|
>>> ms.set_context(gpu_config={"conv_fprop_algo": "performance", "conv_allow_tf32": True,
|
|
1868
1945
|
... "matmul_allow_tf32": True})
|
|
1869
1946
|
>>> ms.set_context(jit_config={"jit_level": "O0"})
|
|
1870
|
-
>>> ms.set_context(exec_order="
|
|
1947
|
+
>>> ms.set_context(exec_order="bfs")
|
|
1871
1948
|
"""
|
|
1872
1949
|
ctx = _context()
|
|
1873
1950
|
# set device target first
|
|
@@ -1877,14 +1954,22 @@ def set_context(**kwargs):
|
|
|
1877
1954
|
_check_ascend_device_context_initialized(device, kwargs)
|
|
1878
1955
|
|
|
1879
1956
|
for key, value in kwargs.items():
|
|
1957
|
+
_check_context_deprecated(key)
|
|
1880
1958
|
if key in ('enable_sparse', 'auto_tune_mode'):
|
|
1881
1959
|
logger.warning(f"For 'context.set_context', '{key}' parameter is deprecated, "
|
|
1882
1960
|
"and will be removed in the next version.")
|
|
1883
1961
|
continue
|
|
1884
|
-
if key in ('enable_auto_mixed_precision',
|
|
1962
|
+
if key in ('enable_auto_mixed_precision',):
|
|
1885
1963
|
logger.warning(f"For 'context.set_context', '{key}' parameter is deprecated. "
|
|
1886
1964
|
"For details, please see the interface parameter API comments")
|
|
1887
1965
|
continue
|
|
1966
|
+
if key == "print_file_path":
|
|
1967
|
+
logger.warning(f"For 'context.set_context', '{key}' parameter is deprecated due to changes in the behavior"
|
|
1968
|
+
f" of the print operator. Recommend not using this parameter and"
|
|
1969
|
+
f" directly viewing the screen output.")
|
|
1970
|
+
if key in ('reserve_class_name_in_scope', 'env_config_path'):
|
|
1971
|
+
logger.warning(f"For 'context.set_context', '{key}' parameter is deprecated, "
|
|
1972
|
+
"and will be removed in the next version.")
|
|
1888
1973
|
_check_key(key)
|
|
1889
1974
|
if key == 'save_graphs':
|
|
1890
1975
|
if value is True:
|
|
@@ -1903,6 +1988,14 @@ def set_context(**kwargs):
|
|
|
1903
1988
|
setattr(ctx, key, value)
|
|
1904
1989
|
ctx.set_param(ms_ctx_param.__members__[key], int(value))
|
|
1905
1990
|
continue
|
|
1991
|
+
if key == 'enable_graph_kernel':
|
|
1992
|
+
logger.warning(f"For 'context.set_context', '{key}' parameter is deprecated, "
|
|
1993
|
+
"and will be removed in the next version. "
|
|
1994
|
+
"Please use jit_config={'jit_level': 'O1'} instead.")
|
|
1995
|
+
if key == 'graph_kernel_flags':
|
|
1996
|
+
logger.warning(f"For 'context.set_context', '{key}' parameter is deprecated, "
|
|
1997
|
+
"and will be removed in the next version. "
|
|
1998
|
+
"Please use environ variable 'MS_DEV_GRAPH_KERNEL_FLAGS' instead.")
|
|
1906
1999
|
if not _check_target_specific_cfgs(device, key):
|
|
1907
2000
|
continue
|
|
1908
2001
|
if key in ctx.setters:
|
|
@@ -1920,6 +2013,7 @@ def set_context(**kwargs):
|
|
|
1920
2013
|
|
|
1921
2014
|
|
|
1922
2015
|
def get_context(attr_key):
|
|
2016
|
+
|
|
1923
2017
|
"""
|
|
1924
2018
|
Get context attribute value according to the input key.
|
|
1925
2019
|
If some attributes are not set, they will be automatically obtained.
|