mindspore 2.5.0__cp39-cp39-win_amd64.whl → 2.6.0rc1__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +6 -4
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -0
- mindspore/_checkparam.py +3 -33
- mindspore/_deprecated/__init__.py +17 -0
- mindspore/_deprecated/jit.py +198 -0
- mindspore/_extends/builtin_operations.py +1 -1
- mindspore/_extends/parse/__init__.py +6 -7
- mindspore/_extends/parse/compile_config.py +19 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
- mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
- mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
- mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
- mindspore/_extends/parse/parser.py +24 -193
- mindspore/_extends/parse/resources.py +1 -5
- mindspore/_extends/parse/standard_method.py +97 -74
- mindspore/_extends/pijit/__init__.py +2 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
- mindspore/_extends/pijit/tensor_func_list.py +27 -0
- mindspore/_extends/utils.py +1 -1
- mindspore/amp.py +4 -4
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/__init__.py +2 -2
- mindspore/boost/base.py +3 -7
- mindspore/boost/boost_cell_wrapper.py +2 -2
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +4 -3
- mindspore/common/_grad_function.py +56 -0
- mindspore/common/_pijit_context.py +14 -5
- mindspore/common/_register_for_tensor.py +1 -1
- mindspore/common/_stub_tensor.py +5 -10
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +1915 -3287
- mindspore/common/api.py +341 -354
- mindspore/common/auto_dynamic_shape.py +41 -44
- mindspore/common/dtype.py +5 -2
- mindspore/common/dump.py +7 -5
- mindspore/common/file_system.py +3 -0
- mindspore/common/hook_handle.py +5 -3
- mindspore/common/initializer.py +10 -6
- mindspore/common/jit_begin_end.py +94 -0
- mindspore/common/jit_config.py +6 -1
- mindspore/common/jit_context.py +76 -0
- mindspore/common/jit_trace.py +378 -0
- mindspore/common/lazy_inline.py +2 -2
- mindspore/common/mutable.py +5 -4
- mindspore/common/parameter.py +106 -39
- mindspore/common/seed.py +2 -2
- mindspore/common/sparse_tensor.py +23 -17
- mindspore/common/tensor.py +297 -714
- mindspore/communication/__init__.py +7 -5
- mindspore/communication/_comm_helper.py +47 -2
- mindspore/communication/comm_func.py +70 -53
- mindspore/communication/management.py +83 -17
- mindspore/context.py +214 -560
- mindspore/dataset/__init__.py +44 -20
- mindspore/dataset/audio/__init__.py +2 -8
- mindspore/dataset/audio/transforms.py +3 -17
- mindspore/dataset/core/config.py +3 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +102 -120
- mindspore/dataset/engine/datasets_audio.py +22 -22
- mindspore/dataset/engine/datasets_standard_format.py +43 -24
- mindspore/dataset/engine/datasets_text.py +78 -85
- mindspore/dataset/engine/datasets_user_defined.py +108 -76
- mindspore/dataset/engine/datasets_vision.py +111 -108
- mindspore/dataset/engine/iterators.py +5 -3
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
- mindspore/dataset/engine/samplers.py +279 -57
- mindspore/dataset/engine/serializer_deserializer.py +2 -1
- mindspore/dataset/engine/validators.py +10 -0
- mindspore/dataset/text/__init__.py +7 -6
- mindspore/dataset/text/transforms.py +6 -5
- mindspore/dataset/text/utils.py +3 -3
- mindspore/dataset/transforms/__init__.py +0 -9
- mindspore/dataset/transforms/transforms.py +3 -3
- mindspore/dataset/utils/browse_dataset.py +1 -1
- mindspore/dataset/vision/__init__.py +2 -9
- mindspore/dataset/vision/transforms.py +202 -158
- mindspore/dataset/vision/utils.py +7 -5
- mindspore/device_context/ascend/op_debug.py +60 -1
- mindspore/device_context/ascend/op_tuning.py +0 -4
- mindspore/device_manager.py +39 -3
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/es/embedding_service.py +35 -27
- mindspore/experimental/map_parameter.py +4 -4
- mindspore/experimental/optim/adadelta.py +22 -26
- mindspore/experimental/optim/adagrad.py +4 -4
- mindspore/experimental/optim/adam.py +4 -0
- mindspore/experimental/optim/adamax.py +4 -4
- mindspore/experimental/optim/adamw.py +4 -0
- mindspore/experimental/optim/asgd.py +1 -1
- mindspore/experimental/optim/lr_scheduler.py +40 -22
- mindspore/experimental/optim/radam.py +5 -5
- mindspore/experimental/optim/rprop.py +1 -1
- mindspore/experimental/optim/sgd.py +1 -1
- mindspore/hal/contiguous_tensors_handle.py +6 -10
- mindspore/hal/device.py +55 -81
- mindspore/hal/event.py +38 -55
- mindspore/hal/memory.py +93 -144
- mindspore/hal/stream.py +81 -125
- mindspore/include/dataset/constants.h +7 -4
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +40 -2
- mindspore/mindrecord/__init__.py +20 -7
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +131 -700
- mindspore/mint/distributed/__init__.py +5 -1
- mindspore/mint/distributed/distributed.py +194 -109
- mindspore/mint/linalg/__init__.py +2 -0
- mindspore/mint/nn/__init__.py +280 -18
- mindspore/mint/nn/functional.py +282 -64
- mindspore/mint/nn/layer/__init__.py +4 -0
- mindspore/mint/nn/layer/_functions.py +7 -3
- mindspore/mint/nn/layer/activation.py +120 -13
- mindspore/mint/nn/layer/conv.py +218 -24
- mindspore/mint/nn/layer/normalization.py +15 -16
- mindspore/mint/nn/layer/padding.py +1 -1
- mindspore/mint/nn/layer/pooling.py +66 -1
- mindspore/mint/optim/__init__.py +2 -1
- mindspore/mint/optim/sgd.py +171 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/__init__.py +4 -1
- mindspore/nn/cell.py +1250 -176
- mindspore/nn/layer/activation.py +23 -21
- mindspore/nn/layer/basic.py +22 -16
- mindspore/nn/layer/container.py +1 -1
- mindspore/nn/layer/conv.py +22 -17
- mindspore/nn/layer/embedding.py +9 -8
- mindspore/nn/layer/normalization.py +48 -42
- mindspore/nn/layer/pooling.py +75 -31
- mindspore/nn/layer/transformer.py +11 -10
- mindspore/nn/learning_rate_schedule.py +4 -2
- mindspore/nn/loss/loss.py +27 -19
- mindspore/nn/optim/ada_grad.py +6 -5
- mindspore/nn/optim/adadelta.py +9 -7
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +16 -12
- mindspore/nn/optim/adamax.py +8 -7
- mindspore/nn/optim/adasum.py +5 -5
- mindspore/nn/optim/asgd.py +1 -1
- mindspore/nn/optim/ftrl.py +11 -9
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/lazyadam.py +12 -10
- mindspore/nn/optim/momentum.py +7 -6
- mindspore/nn/optim/optimizer.py +2 -2
- mindspore/nn/optim/proximal_ada_grad.py +12 -10
- mindspore/nn/optim/rmsprop.py +13 -12
- mindspore/nn/optim/rprop.py +9 -7
- mindspore/nn/optim/sgd.py +9 -6
- mindspore/nn/optim/tft_wrapper.py +5 -2
- mindspore/nn/probability/bijector/bijector.py +17 -11
- mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
- mindspore/nn/probability/bijector/invert.py +2 -2
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +3 -2
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +1 -1
- mindspore/nn/probability/distribution/cauchy.py +4 -2
- mindspore/nn/probability/distribution/exponential.py +6 -7
- mindspore/nn/probability/distribution/gamma.py +2 -2
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/half_normal.py +5 -3
- mindspore/nn/probability/distribution/logistic.py +5 -3
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/uniform.py +5 -3
- mindspore/nn/reinforcement/_tensors_queue.py +1 -1
- mindspore/nn/reinforcement/tensor_array.py +1 -1
- mindspore/nn/wrap/__init__.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +178 -117
- mindspore/nn/wrap/grad_reducer.py +45 -36
- mindspore/nn/wrap/loss_scale.py +3 -3
- mindspore/numpy/array_creations.py +3 -3
- mindspore/numpy/array_ops.py +1 -1
- mindspore/numpy/math_ops.py +4 -4
- mindspore/numpy/utils.py +1 -2
- mindspore/numpy/utils_const.py +1 -2
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +3 -2
- mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
- mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
- mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
- mindspore/ops/_register_for_op.py +0 -11
- mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
- mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
- mindspore/ops/_vmap/vmap_array_ops.py +7 -6
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
- mindspore/ops/_vmap/vmap_math_ops.py +4 -7
- mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
- mindspore/ops/auto_generate/__init__.py +4 -3
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +102 -49
- mindspore/ops/auto_generate/gen_extend_func.py +281 -135
- mindspore/ops/auto_generate/gen_ops_def.py +2574 -2326
- mindspore/ops/auto_generate/gen_ops_prim.py +8566 -2755
- mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
- mindspore/ops/composite/__init__.py +2 -1
- mindspore/ops/composite/base.py +19 -24
- mindspore/ops/composite/math_ops.py +6 -16
- mindspore/ops/composite/multitype_ops/__init__.py +5 -2
- mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -3
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
- mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
- mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
- mindspore/ops/function/__init__.py +28 -2
- mindspore/ops/function/_add_attr_func.py +58 -0
- mindspore/ops/function/array_func.py +1629 -2345
- mindspore/ops/function/clip_func.py +38 -45
- mindspore/ops/function/debug_func.py +36 -44
- mindspore/ops/function/grad/__init__.py +1 -0
- mindspore/ops/function/grad/grad_func.py +104 -71
- mindspore/ops/function/image_func.py +1 -1
- mindspore/ops/function/linalg_func.py +46 -78
- mindspore/ops/function/math_func.py +3035 -3705
- mindspore/ops/function/nn_func.py +676 -241
- mindspore/ops/function/other_func.py +159 -1
- mindspore/ops/function/parameter_func.py +17 -30
- mindspore/ops/function/random_func.py +204 -361
- mindspore/ops/function/reshard_func.py +4 -70
- mindspore/ops/function/sparse_func.py +3 -3
- mindspore/ops/function/sparse_unary_func.py +5 -5
- mindspore/ops/function/spectral_func.py +25 -58
- mindspore/ops/function/vmap_func.py +24 -17
- mindspore/ops/functional.py +6 -4
- mindspore/ops/functional_overload.py +547 -4
- mindspore/ops/op_info_register.py +32 -244
- mindspore/ops/operations/__init__.py +10 -5
- mindspore/ops/operations/_custom_ops_utils.py +247 -0
- mindspore/ops/operations/_grad_ops.py +1 -10
- mindspore/ops/operations/_inner_ops.py +5 -76
- mindspore/ops/operations/_ms_kernel.py +4 -10
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +3 -2
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/array_ops.py +37 -22
- mindspore/ops/operations/comm_ops.py +150 -107
- mindspore/ops/operations/custom_ops.py +221 -23
- mindspore/ops/operations/debug_ops.py +115 -16
- mindspore/ops/operations/inner_ops.py +1 -1
- mindspore/ops/operations/linalg_ops.py +1 -58
- mindspore/ops/operations/manually_defined/_inner.py +1 -1
- mindspore/ops/operations/manually_defined/ops_def.py +746 -79
- mindspore/ops/operations/math_ops.py +21 -18
- mindspore/ops/operations/nn_ops.py +65 -191
- mindspore/ops/operations/other_ops.py +62 -9
- mindspore/ops/operations/random_ops.py +13 -7
- mindspore/ops/operations/reshard_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +2 -2
- mindspore/ops/primitive.py +43 -32
- mindspore/ops/tensor_method.py +232 -13
- mindspore/ops_generate/__init__.py +0 -5
- mindspore/ops_generate/aclnn/__init__.py +0 -0
- mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
- mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
- mindspore/ops_generate/api/__init__.py +0 -0
- mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
- mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
- mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
- mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
- mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
- mindspore/ops_generate/api/gen_api.py +103 -0
- mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
- mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
- mindspore/ops_generate/common/__init__.py +0 -0
- mindspore/ops_generate/common/gen_constants.py +91 -0
- mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
- mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
- mindspore/ops_generate/{template.py → common/template.py} +96 -84
- mindspore/ops_generate/gen_ops.py +23 -325
- mindspore/ops_generate/op_def/__init__.py +0 -0
- mindspore/ops_generate/op_def/gen_op_def.py +90 -0
- mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
- mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -7
- mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
- mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
- mindspore/ops_generate/op_def_py/__init__.py +0 -0
- mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
- mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
- mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
- mindspore/ops_generate/pyboost/__init__.py +0 -0
- mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
- mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
- mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
- mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
- mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
- mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
- mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
- mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
- mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
- mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
- mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
- mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
- mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
- mindspore/ops_generate/resources/__init__.py +0 -0
- mindspore/ops_generate/resources/resource_list.py +30 -0
- mindspore/ops_generate/resources/resource_loader.py +36 -0
- mindspore/ops_generate/resources/resource_manager.py +64 -0
- mindspore/ops_generate/resources/yaml_loader.py +88 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
- mindspore/parallel/__init__.py +6 -2
- mindspore/parallel/_auto_parallel_context.py +133 -6
- mindspore/parallel/_cell_wrapper.py +130 -15
- mindspore/parallel/_parallel_serialization.py +95 -4
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +7 -2
- mindspore/parallel/_tensor.py +142 -18
- mindspore/parallel/_utils.py +198 -25
- mindspore/parallel/algo_parameter_config.py +3 -3
- mindspore/parallel/auto_parallel.py +732 -0
- mindspore/parallel/checkpoint_convert.py +159 -0
- mindspore/parallel/checkpoint_transform.py +656 -37
- mindspore/parallel/cluster/process_entity/_api.py +151 -19
- mindspore/parallel/cluster/run.py +1 -1
- mindspore/parallel/function/__init__.py +24 -0
- mindspore/parallel/function/reshard_func.py +259 -0
- mindspore/parallel/nn/__init__.py +25 -0
- mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
- mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
- mindspore/parallel/parameter_broadcast.py +24 -13
- mindspore/parallel/shard.py +137 -61
- mindspore/parallel/transform_safetensors.py +287 -95
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +9 -5
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
- mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +22 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
- mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
- mindspore/profiler/common/constant.py +12 -0
- mindspore/profiler/common/msprof_cmd_tool.py +42 -23
- mindspore/profiler/common/path_manager.py +24 -0
- mindspore/profiler/common/profiler_context.py +26 -2
- mindspore/profiler/common/profiler_meta_data.py +74 -0
- mindspore/profiler/common/profiler_parameters.py +59 -18
- mindspore/profiler/common/profiler_path_manager.py +66 -7
- mindspore/profiler/dynamic_profiler.py +112 -79
- mindspore/profiler/envprofiler.py +26 -1
- mindspore/profiler/experimental_config.py +197 -0
- mindspore/profiler/mstx.py +57 -14
- mindspore/profiler/platform/npu_profiler.py +33 -7
- mindspore/profiler/profiler.py +541 -45
- mindspore/profiler/profiler_action_controller.py +1 -1
- mindspore/profiler/profiler_interface.py +4 -0
- mindspore/profiler/schedule.py +57 -22
- mindspore/rewrite/api/node.py +15 -13
- mindspore/rewrite/api/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +25 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/runtime/__init__.py +2 -2
- mindspore/runtime/executor.py +40 -11
- mindspore/runtime/memory.py +25 -8
- mindspore/safeguard/rewrite_obfuscation.py +12 -9
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +8 -8
- mindspore/train/_utils.py +35 -7
- mindspore/train/amp.py +1 -1
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +2 -16
- mindspore/train/callback/_checkpoint.py +24 -40
- mindspore/train/callback/_cluster_monitor.py +14 -18
- mindspore/train/callback/_flops_collector.py +2 -3
- mindspore/train/callback/_history.py +7 -4
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +0 -3
- mindspore/train/callback/_loss_monitor.py +2 -1
- mindspore/train/callback/_on_request_exit.py +6 -5
- mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
- mindspore/train/callback/_summary_collector.py +8 -13
- mindspore/train/callback/_time_monitor.py +2 -1
- mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +179 -103
- mindspore/train/data_sink.py +25 -2
- mindspore/train/dataset_helper.py +4 -5
- mindspore/train/loss_scale_manager.py +8 -7
- mindspore/train/metrics/accuracy.py +3 -3
- mindspore/train/metrics/confusion_matrix.py +9 -9
- mindspore/train/metrics/error.py +3 -3
- mindspore/train/metrics/hausdorff_distance.py +4 -4
- mindspore/train/metrics/mean_surface_distance.py +3 -3
- mindspore/train/metrics/metric.py +0 -12
- mindspore/train/metrics/occlusion_sensitivity.py +4 -2
- mindspore/train/metrics/precision.py +8 -6
- mindspore/train/metrics/recall.py +9 -9
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +19 -12
- mindspore/train/model.py +176 -103
- mindspore/train/serialization.py +246 -988
- mindspore/train/summary/_summary_adapter.py +2 -2
- mindspore/train/summary/summary_record.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +4 -2
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/utils.py +138 -4
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/METADATA +2 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/RECORD +483 -438
- mindspore/_install_custom.py +0 -43
- mindspore/common/_register_for_adapter.py +0 -74
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
- mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
- mindspore/ops_generate/gen_constants.py +0 -190
- mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
- mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
- /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/top_level.txt +0 -0
mindspore/context.py
CHANGED
|
@@ -335,8 +335,8 @@ class _Context:
|
|
|
335
335
|
default not enabled, only supports ``"oom"`` currently.
|
|
336
336
|
``"oom"``: Detect memory out of bounds.
|
|
337
337
|
- ge_options (dict): Global or session CANN options.
|
|
338
|
-
- exception_dump (str):
|
|
339
|
-
|
|
338
|
+
- exception_dump (str): Has been deprecated since MindSpore 2.6. Please use
|
|
339
|
+
api :func:`mindspore.device_context.ascend.op_debug.aclinit_config` instead.
|
|
340
340
|
- parallel_speed_up_json_path(Union[str, None]): The path to the parallel speed up json file.
|
|
341
341
|
If its value is None or '', it does not take effect. Default None.
|
|
342
342
|
- host_scheduling_max_threshold(int): The host scheduling max threshold.
|
|
@@ -370,7 +370,7 @@ class _Context:
|
|
|
370
370
|
'atomic_clean_policy': self._get_ascend_config_setter('atomic_clean_policy', str),
|
|
371
371
|
'matmul_allow_hf32': self._get_ascend_config_setter('matmul_allow_hf32', lambda v: "1" if v else "0"),
|
|
372
372
|
'conv_allow_hf32': self._get_ascend_config_setter('conv_allow_hf32', lambda v: "1" if v else "0"),
|
|
373
|
-
'exception_dump':
|
|
373
|
+
'exception_dump': lambda x: x,
|
|
374
374
|
'op_debug_option': self._set_op_debug_option,
|
|
375
375
|
'op_precision_mode': self._set_op_precision_mode,
|
|
376
376
|
'ge_options': self._set_ge_options,
|
|
@@ -383,11 +383,24 @@ class _Context:
|
|
|
383
383
|
'hccl_watchdog': self._set_hccl_watchdog,
|
|
384
384
|
'topo_order': self._set_topo_order
|
|
385
385
|
}
|
|
386
|
+
invalid_context_dict = {
|
|
387
|
+
'exception_dump': {'version': '2.6', 'interface': 'device_context.ascend.op_debug.aclinit_config()'}
|
|
388
|
+
}
|
|
386
389
|
ascend_cfg_set = tuple(ascend_cfg_modes.keys())
|
|
387
390
|
for ascend_key, ascend_value in ascend_config.items():
|
|
388
391
|
if ascend_key not in ascend_cfg_set:
|
|
389
392
|
raise ValueError(f"For 'context.set_context', the key of argument 'ascend_config' must be one of "
|
|
390
393
|
f"{ascend_cfg_set}, but got {ascend_key}.")
|
|
394
|
+
if ascend_key in invalid_context_dict:
|
|
395
|
+
key = invalid_context_dict.get(ascend_key)
|
|
396
|
+
deprecated_version, new_interface = key.get('version'), key.get('interface')
|
|
397
|
+
log = (
|
|
398
|
+
f"For 'ascend_config', the parameter '{ascend_key}' has been removed"
|
|
399
|
+
f" since MindSpore {deprecated_version} version."
|
|
400
|
+
)
|
|
401
|
+
if new_interface:
|
|
402
|
+
log += f" Please use the {new_interface} instead."
|
|
403
|
+
raise ValueError(log)
|
|
391
404
|
supported_modes = ascend_cfg_modes.get(ascend_key)
|
|
392
405
|
if isinstance(supported_modes, list) and ascend_value not in supported_modes:
|
|
393
406
|
raise ValueError(f"For 'ascend_config', the value of argument {ascend_key} must be one of "
|
|
@@ -836,14 +849,22 @@ class _Context:
|
|
|
836
849
|
f"'parallel_speed_up_json_path' is correct.")
|
|
837
850
|
try:
|
|
838
851
|
valid_option = {"recompute_comm_overlap": (ms_ctx_param.recompute_comm_overlap, bool),
|
|
852
|
+
"recomputation_communication_overlap": (ms_ctx_param.recompute_comm_overlap, bool),
|
|
839
853
|
"matmul_grad_comm_overlap": (ms_ctx_param.matmul_grad_comm_overlap, bool),
|
|
854
|
+
"grad_matmul_communication_overlap": (ms_ctx_param.matmul_grad_comm_overlap, bool),
|
|
840
855
|
"enable_task_opt": (ms_ctx_param.enable_task_opt, bool),
|
|
856
|
+
"enable_communication_fusion": (ms_ctx_param.enable_task_opt, bool),
|
|
841
857
|
"enable_grad_comm_opt": (ms_ctx_param.enable_grad_comm_opt, bool),
|
|
858
|
+
"grad_computation_allreduce_overlap": (ms_ctx_param.enable_grad_comm_opt, bool),
|
|
842
859
|
"recompute_allgather_overlap_fagrad":
|
|
843
860
|
(ms_ctx_param.recompute_allgather_overlap_fagrad, bool),
|
|
861
|
+
"grad_fa_allgather_overlap":
|
|
862
|
+
(ms_ctx_param.recompute_allgather_overlap_fagrad, bool),
|
|
844
863
|
"interleaved_matmul_comm": (ms_ctx_param.interleaved_matmul_comm, bool),
|
|
845
864
|
"bias_add_comm_swap": (ms_ctx_param.bias_add_comm_swap, bool),
|
|
865
|
+
"allreduce_and_biasadd_swap": (ms_ctx_param.bias_add_comm_swap, bool),
|
|
846
866
|
"enable_opt_shard_comm_opt": (ms_ctx_param.enable_opt_shard_comm_opt, bool),
|
|
867
|
+
"computation_allgather_overlap": (ms_ctx_param.enable_opt_shard_comm_opt, bool),
|
|
847
868
|
"enable_begin_end_inline_opt": (ms_ctx_param.enable_begin_end_inline_opt, bool),
|
|
848
869
|
"enable_concat_eliminate_opt": (ms_ctx_param.enable_concat_eliminate_opt, bool),
|
|
849
870
|
"interleaved_layernorm_comm": (ms_ctx_param.interleaved_layernorm_comm, bool),
|
|
@@ -856,10 +877,24 @@ class _Context:
|
|
|
856
877
|
"enable_offloading_packed_experts": (ms_ctx_param.enable_offloading_packed_experts, bool),
|
|
857
878
|
"compute_communicate_fusion_level":
|
|
858
879
|
(ms_ctx_param.compute_communicate_fusion_level, int),
|
|
880
|
+
"computation_communication_fusion_level":
|
|
881
|
+
(ms_ctx_param.compute_communicate_fusion_level, int),
|
|
859
882
|
"enable_flash_attention_load_balance":
|
|
860
883
|
(ms_ctx_param.enable_flash_attention_load_balance, bool),
|
|
884
|
+
"pp_1f1b_overlap":
|
|
885
|
+
(ms_ctx_param.pp_1f1b_overlap, str),
|
|
861
886
|
"dataset_broadcast_opt_level":
|
|
862
887
|
(ms_ctx_param.dataset_broadcast_opt_level, int)}
|
|
888
|
+
name_replace = {
|
|
889
|
+
"recompute_comm_overlap": "recomputation_communication_overlap",
|
|
890
|
+
"matmul_grad_comm_overlap": "grad_matmul_communication_overlap",
|
|
891
|
+
"recompute_allgather_overlap_fagrad": "grad_fa_allgather_overlap",
|
|
892
|
+
"enable_task_opt": "enable_communication_fusion",
|
|
893
|
+
"enable_grad_comm_opt": "grad_computation_allreduce_overlap",
|
|
894
|
+
"enable_opt_shard_comm_opt": "computation_allgather_overlap",
|
|
895
|
+
"compute_communicate_fusion_level": "computation_communication_fusion_level",
|
|
896
|
+
"dataset_broadcast_opt_level": "dataset_broadcast_opt_level",
|
|
897
|
+
"bias_add_comm_swap": "allreduce_and_biasadd_swap"}
|
|
863
898
|
with open(speedup_config_real_path, 'r') as f:
|
|
864
899
|
speedup_config = json.load(f)
|
|
865
900
|
for key, value in speedup_config.items():
|
|
@@ -867,10 +902,22 @@ class _Context:
|
|
|
867
902
|
raise TypeError("key {} is not a str".format(key))
|
|
868
903
|
if key not in valid_option:
|
|
869
904
|
raise ValueError("key {} should be one of {}.".format(key, valid_option.keys()))
|
|
905
|
+
if key in name_replace:
|
|
906
|
+
logger.warning(f"For 'context.set_context', '{key}' parameter is deprecated, "
|
|
907
|
+
"and will be removed in the next version, "
|
|
908
|
+
f"Please use '{name_replace.get(key)}' instead.")
|
|
870
909
|
set_func, valid_type = valid_option.get(key)
|
|
871
910
|
if not isinstance(value, valid_type):
|
|
872
911
|
raise TypeError(f"The value type of {key} must be {valid_type}, "
|
|
873
912
|
f"but got value is {value} and type is {type(value)}.")
|
|
913
|
+
if key == "pp_1f1b_overlap":
|
|
914
|
+
values = value.split(",")
|
|
915
|
+
for v in values:
|
|
916
|
+
if v not in ['AlltoAll', 'AlltoAllV', 'MorphAllGather',
|
|
917
|
+
'AllGather', 'ReduceScatter', 'MorphReduceScatter']:
|
|
918
|
+
raise ValueError("{} 's value should be subset of ['AlltoAll', 'AlltoAllV',"
|
|
919
|
+
" 'MorphAllGather', 'AllGather', 'ReduceScatter',"
|
|
920
|
+
" 'MorphReduceScatter'].".format(key))
|
|
874
921
|
self.set_param(set_func, value)
|
|
875
922
|
except (TypeError, ValueError) as exo:
|
|
876
923
|
raise ValueError(str(exo) + "\nFor 'context.set_context', "
|
|
@@ -916,12 +963,8 @@ def set_auto_parallel_context(**kwargs):
|
|
|
916
963
|
Set auto parallel context, only data parallel supported on CPU.
|
|
917
964
|
|
|
918
965
|
Note:
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
next task, interface :func:`mindspore.reset_auto_parallel_context` should be called to reset
|
|
922
|
-
the configuration.
|
|
923
|
-
Setting or changing parallel modes must be called before creating any Initializer, otherwise,
|
|
924
|
-
it may have RuntimeError when compiling the network.
|
|
966
|
+
Global parallel configuration. This interface will be deprecated in future versions, please use
|
|
967
|
+
the api :class:`mindspore.parallel.auto_parallel.AutoParallel` instead.
|
|
925
968
|
|
|
926
969
|
Some configurations are parallel mode specific, see the below table for details:
|
|
927
970
|
|
|
@@ -971,13 +1014,15 @@ def set_auto_parallel_context(**kwargs):
|
|
|
971
1014
|
- auto_parallel: Achieving parallelism automatically.
|
|
972
1015
|
search_mode (str): There are three kinds of shard strategy search modes: ``"recursive_programming"`` ,
|
|
973
1016
|
``"sharding_propagation"`` and ``"dynamic_programming"`` (Not recommended).
|
|
1017
|
+
Only works in ``"auto_parallel"`` mode.
|
|
974
1018
|
Default: ``"recursive_programming"`` .
|
|
975
1019
|
|
|
976
1020
|
- recursive_programming: Recursive programming search mode. In order to obtain optimal performance,
|
|
977
1021
|
it is recommended that users set the batch size to be greater than or equal to the product of
|
|
978
1022
|
the number of devices and the number of multi-copy parallelism.
|
|
979
1023
|
|
|
980
|
-
- sharding_propagation: Propagate shardings from configured ops to non-configured ops.
|
|
1024
|
+
- sharding_propagation: Propagate shardings from configured ops to non-configured ops. Dynamic
|
|
1025
|
+
shapes are not supported currently.
|
|
981
1026
|
|
|
982
1027
|
- dynamic_programming: Dynamic programming search mode.
|
|
983
1028
|
auto_parallel_search_mode (str): This is the old version of 'search_mode'. Here, remaining this attribute is
|
|
@@ -1001,7 +1046,8 @@ def set_auto_parallel_context(**kwargs):
|
|
|
1001
1046
|
equal to full_batch=True. For execution mode is 'GRAPH_MODE' and dataset load into net by model
|
|
1002
1047
|
parallel strategy likes ds_stra ((1, 8), (1, 8)), it requires using
|
|
1003
1048
|
set_auto_parallel_context(dataset_strategy=ds_stra). The dataset sharding strategy is not
|
|
1004
|
-
affected by the currently configured parallel mode.
|
|
1049
|
+
affected by the currently configured parallel mode. parallel strategy also supports tuple of
|
|
1050
|
+
Layout.
|
|
1005
1051
|
enable_parallel_optimizer (bool): This is a developing feature, which shards the weight update computation for
|
|
1006
1052
|
data parallel training in the benefit of time and memory saving. Currently, auto and semi auto
|
|
1007
1053
|
parallel mode support all optimizers in both Ascend and GPU. Data parallel mode only supports
|
|
@@ -1025,14 +1071,17 @@ def set_auto_parallel_context(**kwargs):
|
|
|
1025
1071
|
|
|
1026
1072
|
- pipeline_interleave(bool): Indicates whether to enable the interleaved execution mode.
|
|
1027
1073
|
- pipeline_scheduler(str): Indicates the scheduling mode for pipeline parallelism. Only support
|
|
1028
|
-
``gpipe/1f1b/seqpipe``.
|
|
1074
|
+
``gpipe/1f1b/seqpipe/seqvpp/seqsmartvpp``. When applying seqsmartvpp, the pipeline parallel
|
|
1075
|
+
must be an even number.
|
|
1029
1076
|
parallel_optimizer_config (dict): A dict contains the keys and values for setting the parallel optimizer
|
|
1030
1077
|
configure. The configure provides more detailed behavior control about parallel training
|
|
1031
1078
|
when parallel optimizer is enabled. The configure will be effective when we use
|
|
1032
1079
|
mindspore.set_auto_parallel_context(enable_parallel_optimizer=True).
|
|
1033
1080
|
It supports the following keys.
|
|
1034
1081
|
|
|
1035
|
-
- gradient_accumulation_shard(bool):
|
|
1082
|
+
- gradient_accumulation_shard(bool): Please using optimizer_level: ``level2`` to replace
|
|
1083
|
+
this config.
|
|
1084
|
+
If ``true`` , the accumulation gradient parameters will be
|
|
1036
1085
|
sharded across the data parallel devices. This will
|
|
1037
1086
|
introduce additional communication(ReduceScatter) at
|
|
1038
1087
|
each step when accumulate the gradients, but saves a
|
|
@@ -1043,7 +1092,8 @@ def set_auto_parallel_context(**kwargs):
|
|
|
1043
1092
|
|
|
1044
1093
|
- parallel_optimizer_threshold(int): Set the threshold of parallel optimizer. When parallel
|
|
1045
1094
|
optimizer is enabled, parameters with size smaller than this threshold will not be sharded
|
|
1046
|
-
across the devices. Parameter size
|
|
1095
|
+
across the devices. Parameter size is calculated as:
|
|
1096
|
+
shape[0] \* ... \* shape[n] \* size(dtype). Non-negative.
|
|
1047
1097
|
Unit: KB. Default: ``64`` .
|
|
1048
1098
|
|
|
1049
1099
|
- optimizer_weight_shard_size(int): Set the optimizer weight shard group size, if you want to
|
|
@@ -1054,6 +1104,17 @@ def set_auto_parallel_context(**kwargs):
|
|
|
1054
1104
|
communication group size will not take effect. Default value is ``-1`` , which means the
|
|
1055
1105
|
optimizer weight shard group size will be the size of data parallel group of each parameter.
|
|
1056
1106
|
|
|
1107
|
+
- optimizer_level(str, optional): optimizer_level configuration is used to specify
|
|
1108
|
+
the splitting level for optimizer sharding. It is important to note that the implementation
|
|
1109
|
+
of optimizer sharding in static graph is inconsistent with dynamic graph like megatron,
|
|
1110
|
+
but the memory optimization effect is the same. When optimizer_level= ``level1`` ,
|
|
1111
|
+
splitting is performed on weights and optimizer state. When optimizer_level= ``level2`` ,
|
|
1112
|
+
splitting is performed on weights, optimizer state, and gradients.
|
|
1113
|
+
When optimizer_level= ``level3`` , splitting is performed on weights, optimizer state,
|
|
1114
|
+
gradients, additionally, before the backward pass, the weights are further applied with
|
|
1115
|
+
allgather communication to release the memory used by the forward pass allgather.
|
|
1116
|
+
It must be one of [``level1``, ``level2``, ``level3``]. Default: ``level1``.
|
|
1117
|
+
|
|
1057
1118
|
comm_fusion (dict): A dict contains the types and configurations for setting the communication fusion. each
|
|
1058
1119
|
communication fusion config has two keys: "mode" and "config".
|
|
1059
1120
|
It supports following communication fusion types and configurations:
|
|
@@ -1128,7 +1189,7 @@ def set_auto_parallel_context(**kwargs):
|
|
|
1128
1189
|
>>> ms.set_auto_parallel_context(pipeline_stages=2)
|
|
1129
1190
|
>>> ms.set_auto_parallel_context(pipeline_stages=2, pipeline_result_broadcast=True)
|
|
1130
1191
|
>>> parallel_config = {"gradient_accumulation_shard": True, "parallel_optimizer_threshold": 24,
|
|
1131
|
-
... "optimizer_weight_shard_size": 2}
|
|
1192
|
+
... "optimizer_weight_shard_size": 2, "optimizer_level": "level3"}
|
|
1132
1193
|
>>> ms.set_auto_parallel_context(parallel_optimizer_config=parallel_config, enable_parallel_optimizer=True)
|
|
1133
1194
|
>>> config = {"allreduce": {"mode": "size", "config": 32}, "allgather": {"mode": "size", "config": 32}}
|
|
1134
1195
|
>>> ms.set_auto_parallel_context(comm_fusion=config)
|
|
@@ -1142,6 +1203,9 @@ def get_auto_parallel_context(attr_key):
|
|
|
1142
1203
|
"""
|
|
1143
1204
|
Get auto parallel context attribute value according to the key.
|
|
1144
1205
|
|
|
1206
|
+
Note:
|
|
1207
|
+
This interface will be deprecated in future versions.
|
|
1208
|
+
|
|
1145
1209
|
Args:
|
|
1146
1210
|
attr_key (str): The key of the attribute.
|
|
1147
1211
|
|
|
@@ -1161,7 +1225,8 @@ def get_auto_parallel_context(attr_key):
|
|
|
1161
1225
|
|
|
1162
1226
|
def reset_auto_parallel_context():
|
|
1163
1227
|
"""
|
|
1164
|
-
Reset auto parallel context attributes to the default values.
|
|
1228
|
+
Reset auto parallel context attributes to the default values. This interface will be deprecated in future
|
|
1229
|
+
versions, please use the api :class:`mindspore.parallel.auto_parallel.AutoParallel` instead.
|
|
1165
1230
|
|
|
1166
1231
|
- device_num: 1.
|
|
1167
1232
|
- global_rank: 0.
|
|
@@ -1278,7 +1343,7 @@ def _check_target_specific_cfgs(device, arg_key):
|
|
|
1278
1343
|
def _check_ascend_device_context_initialized(device_target, settings):
|
|
1279
1344
|
if device_target == 'Ascend' and is_initialized(device_target):
|
|
1280
1345
|
for key, _ in settings.items():
|
|
1281
|
-
if key in ('ascend_config', 'deterministic', 'jit_compile', '
|
|
1346
|
+
if key in ('ascend_config', 'deterministic', 'jit_compile', 'device_id'):
|
|
1282
1347
|
logger.warning(f"For 'context.set_context' in Ascend backend, the backend is already initialized, "
|
|
1283
1348
|
"please set it before the definition of any Tensor and Parameter, and the "
|
|
1284
1349
|
"instantiation and execution of any operation and net, otherwise the settings may not "
|
|
@@ -1328,12 +1393,24 @@ def _check_context_deprecated(key):
|
|
|
1328
1393
|
mindspore.device_context.gpu.op_precision.conv_dgrad_algo()''',
|
|
1329
1394
|
'runtime_num_threads': 'api mindspore.device_context.cpu.op_tuning.threads_num()',
|
|
1330
1395
|
'memory_offload': "`device` parameter of `mindspore.Parameter`"}
|
|
1396
|
+
invalid_context_dict = {
|
|
1397
|
+
'exception_dump': {'version': '2.6', 'interface': 'device_context.ascend.op_debug.aclinit_config()'}
|
|
1398
|
+
}
|
|
1331
1399
|
if key in deprecated_context_dict:
|
|
1332
1400
|
log = f"For 'context.set_context', the parameter '{key}' will be deprecated and removed in a future version."
|
|
1333
1401
|
if deprecated_context_dict.get(key) != '':
|
|
1334
1402
|
log += f" Please use the {deprecated_context_dict.get(key)} instead."
|
|
1335
1403
|
logger.warning(log)
|
|
1336
|
-
|
|
1404
|
+
if key in invalid_context_dict:
|
|
1405
|
+
info = invalid_context_dict.get(key)
|
|
1406
|
+
deprecated_version, new_interface = info.get('version'), info.get('interface')
|
|
1407
|
+
log = (
|
|
1408
|
+
f"For 'context.set_context', the parameter '{key}' has been removed"
|
|
1409
|
+
f" since MindSpore {deprecated_version} version."
|
|
1410
|
+
)
|
|
1411
|
+
if new_interface:
|
|
1412
|
+
log += f" Please use the {new_interface} instead."
|
|
1413
|
+
raise ValueError(log)
|
|
1337
1414
|
|
|
1338
1415
|
@args_type_check(mode=int, precompile_only=bool, device_target=str, device_id=int, save_graphs=(bool, int),
|
|
1339
1416
|
save_graphs_path=str, aoe_tune_mode=str, aoe_config=dict,
|
|
@@ -1347,565 +1424,141 @@ def _check_context_deprecated(key):
|
|
|
1347
1424
|
jit_enable_inplace_ops=bool, gpu_config=dict, jit_config=dict, enable_compile_cache=bool)
|
|
1348
1425
|
def set_context(**kwargs):
|
|
1349
1426
|
r"""
|
|
1350
|
-
Set context for running environment
|
|
1351
|
-
|
|
1352
|
-
Context should be configured before running your program. If there is no configuration,
|
|
1353
|
-
it will be automatically set according to the device target by default.
|
|
1354
|
-
|
|
1355
|
-
Note:
|
|
1356
|
-
Attribute name is required for setting attributes.
|
|
1357
|
-
The mode is not recommended to be changed after net was initialized because the implementations of some
|
|
1358
|
-
operations are different in graph mode and pynative mode. Default: ``PYNATIVE_MODE`` .
|
|
1359
|
-
|
|
1360
|
-
Some configurations are device specific, and some parameters will be deprecated and removed in the future version
|
|
1361
|
-
(marked ``D`` in the second column), please use the replacement in the fourth column.
|
|
1362
|
-
see the below table for details:
|
|
1363
|
-
|
|
1364
|
-
+-------------------------+------------------------------+---------------------------+----------------------------+
|
|
1365
|
-
| Function Classification | Configuration Parameters | Hardware Platform Support| Replacement |
|
|
1366
|
-
+=========================+==============================+===========================+============================+
|
|
1367
|
-
| System Configuration | device_id (D) | CPU/GPU/Ascend | :func:`~.set_device` |
|
|
1368
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1369
|
-
| | device_target (D) | CPU/GPU/Ascend | :func:`~.set_device` |
|
|
1370
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1371
|
-
| | max_device_memory(D) | GPU/Ascend | :func:`~.set_memory` |
|
|
1372
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1373
|
-
| | variable_memory_max_size (D) | Ascend | :func:`~.set_memory` |
|
|
1374
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1375
|
-
| | mempool_block_size (D) | GPU/Ascend | :func:`~.set_memory` |
|
|
1376
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1377
|
-
| | op_timeout (D) | Ascend | :func:`~.execute_timeout` |
|
|
1378
|
-
+-------------------------+------------------------------+---------------------------+----------------------------+
|
|
1379
|
-
| Debug Configuration | save_graphs (D) | CPU/GPU/Ascend | MS_DEV_SAVE_GRAPHS |
|
|
1380
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1381
|
-
| | save_graphs_path (D) | CPU/GPU/Ascend | MS_DEV_SAVE_GRAPHS_PATH |
|
|
1382
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1383
|
-
| | deterministic (D) | Ascend |:func:`~.set_deterministic` |
|
|
1384
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1385
|
-
| | print_file_path | Ascend | NA |
|
|
1386
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1387
|
-
| | env_config_path | CPU/GPU/Ascend | NA |
|
|
1388
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1389
|
-
| | precompile_only (D) | CPU/GPU/Ascend | MS_DEV_PRECOMPILE_ONLY |
|
|
1390
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1391
|
-
| | reserve_class_name_in_scope | CPU/GPU/Ascend | NA |
|
|
1392
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1393
|
-
| | pynative_synchronize (D) | CPU/GPU/Ascend | :func:`~.launch_blocking` |
|
|
1394
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1395
|
-
| | debug_level (D) | CPU/GPU/Ascend | NA |
|
|
1396
|
-
+-------------------------+------------------------------+---------------------------+----------------------------+
|
|
1397
|
-
| Executive Control | mode | CPU/GPU/Ascend | NA |
|
|
1398
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1399
|
-
| | enable_reduce_precision | Ascend | NA |
|
|
1400
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1401
|
-
| | aoe_tune_mode (D) | Ascend | :func:`~.aoe_tune_mode` |
|
|
1402
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1403
|
-
| | aoe_config (D) | Ascend | :func:`~.aoe_job_type` |
|
|
1404
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1405
|
-
| | check_bprop (D) | CPU/GPU/Ascend | NA |
|
|
1406
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1407
|
-
| | max_call_depth (D) | CPU/GPU/Ascend | :func:`~.set_recur\ |
|
|
1408
|
-
| | | | sion_limit` |
|
|
1409
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1410
|
-
| | grad_for_scalar (D) | CPU/GPU/Ascend | derivative |
|
|
1411
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1412
|
-
| | enable_compile_cache (D) | CPU/GPU/Ascend | MS_COMPILER_CACHE_ENABLE |
|
|
1413
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1414
|
-
| | inter_op_parallel_num (D) | CPU/GPU/Ascend | :func:`~.dispatch\ |
|
|
1415
|
-
| | | | _threads_num` |
|
|
1416
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1417
|
-
| |runtime_num_threads (D) | CPU/GPU/Ascend | :func:`~.threads_num` |
|
|
1418
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1419
|
-
| | compile_cache_path | CPU/GPU/Ascend | NA |
|
|
1420
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1421
|
-
| | disable_format_transform | GPU | NA |
|
|
1422
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1423
|
-
| | support_binary | CPU/GPU/Ascend | NA |
|
|
1424
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1425
|
-
| | memory_optimize_level (D) | CPU/GPU/Ascend | :func:`~.set_memory` |
|
|
1426
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1427
|
-
| | memory_offload | GPU/Ascend | NA |
|
|
1428
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1429
|
-
| | ascend_config (D) | Ascend | :func:`~.precision_mode` |
|
|
1430
|
-
| | | | |
|
|
1431
|
-
| | | | :func:`~.op_precision_mode`|
|
|
1432
|
-
| | | | |
|
|
1433
|
-
| | | | :func:`~.matmul_allow_hf32`|
|
|
1434
|
-
| | | | |
|
|
1435
|
-
| | | | :func:`~.conv_allow_hf32` |
|
|
1436
|
-
| | | | |
|
|
1437
|
-
| | | | :func:`~.op_compile` |
|
|
1438
|
-
| | | | |
|
|
1439
|
-
| | | | :func:`~.debug_option` |
|
|
1440
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1441
|
-
| | jit_syntax_level | CPU/GPU/Ascend | NA |
|
|
1442
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1443
|
-
| | gpu_config (D) | GPU | :func:`~.conv_allow_tf32` |
|
|
1444
|
-
| | | | |
|
|
1445
|
-
| | | | :func:`~.matmul_allow_tf32`|
|
|
1446
|
-
| | | | |
|
|
1447
|
-
| | | | :func:`~.conv_fprop_algo` |
|
|
1448
|
-
| | | | |
|
|
1449
|
-
| | | | :func:`~.conv_wgrad_algo` |
|
|
1450
|
-
| | | | |
|
|
1451
|
-
| | | | :func:`~.conv_dgrad_algo` |
|
|
1452
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1453
|
-
| | jit_config | CPU/GPU/Ascend | NA |
|
|
1454
|
-
| +------------------------------+---------------------------+----------------------------+
|
|
1455
|
-
| | exec_order | Ascend | NA |
|
|
1456
|
-
+-------------------------+------------------------------+---------------------------+----------------------------+
|
|
1427
|
+
Set context for running environment, this interface will be deprecated in future versions, and its
|
|
1428
|
+
parameter-related functionalities will be provided through new APIs.
|
|
1457
1429
|
|
|
1458
1430
|
Args:
|
|
1459
|
-
|
|
1460
|
-
|
|
1461
|
-
and
|
|
1462
|
-
|
|
1463
|
-
|
|
1464
|
-
|
|
1465
|
-
|
|
1466
|
-
|
|
1467
|
-
|
|
1468
|
-
|
|
1469
|
-
|
|
1470
|
-
|
|
1471
|
-
|
|
1472
|
-
|
|
1473
|
-
|
|
1474
|
-
|
|
1475
|
-
|
|
1476
|
-
|
|
1477
|
-
|
|
1478
|
-
|
|
1479
|
-
This parameter will be deprecated and
|
|
1480
|
-
api :func:`mindspore.runtime.set_memory` instead.
|
|
1481
|
-
|
|
1482
|
-
|
|
1483
|
-
|
|
1484
|
-
|
|
1485
|
-
|
|
1486
|
-
|
|
1487
|
-
|
|
1488
|
-
This parameter will be deprecated and
|
|
1431
|
+
mode (int): GRAPH_MODE(0) or PYNATIVE_MODE(1). Default ``PYNATIVE_MODE`` .
|
|
1432
|
+
device_id (int): ID of the target device. Default ``0`` . This parameter will be deprecated
|
|
1433
|
+
and removed in future versions. Please use the api :func:`mindspore.set_device` instead.
|
|
1434
|
+
device_target (str): The target device to run, support ``"Ascend"``, ``"GPU"``, and ``"CPU"``. This parameter
|
|
1435
|
+
will be deprecated and removed in future versions. Please use the api :func:`mindspore.set_device` instead.
|
|
1436
|
+
deterministic (str): Deterministic computation of operators. Default ``"OFF"`` .
|
|
1437
|
+
This parameter will be deprecated and removed in future versions. Please use the api
|
|
1438
|
+
:func:`mindspore.set_deterministic` instead.
|
|
1439
|
+
max_call_depth (int): The maximum depth of function call. Default ``1000`` .
|
|
1440
|
+
This parameter will be deprecated and removed in a future version. Please use the api
|
|
1441
|
+
:func:`mindspore.set_recursion_limit` instead.
|
|
1442
|
+
variable_memory_max_size (str): This parameter will be deprecated and removed in future versions.
|
|
1443
|
+
Please use the api :func:`mindspore.runtime.set_memory` instead.
|
|
1444
|
+
mempool_block_size (str): Set the size of the memory pool block for devices. Default ``"1GB"`` .
|
|
1445
|
+
This parameter will be deprecated and removed in future versions. Please use
|
|
1446
|
+
the api :func:`mindspore.runtime.set_memory` instead.
|
|
1447
|
+
memory_optimize_level (str): The memory optimize level. Default ``"O0"``.
|
|
1448
|
+
This parameter will be deprecated and removed in future versions. Please use
|
|
1449
|
+
the api :func:`mindspore.runtime.set_memory` instead.
|
|
1450
|
+
max_device_memory (str): Set the maximum memory available for devices.
|
|
1451
|
+
Default ``"1024GB"`` . This parameter will be deprecated and removed in future versions. Please use
|
|
1452
|
+
the api :func:`mindspore.runtime.set_memory` instead.
|
|
1453
|
+
pynative_synchronize (bool): Whether to enable synchronous execution of the device in PyNative mode.
|
|
1454
|
+
Default ``False`` . This parameter will be deprecated and removed in future versions.Please use
|
|
1455
|
+
the api :func:`mindspore.runtime.launch_blocking` instead.
|
|
1456
|
+
compile_cache_path (str): Path to save the compile cache. Default ``"."``.
|
|
1457
|
+
This parameter will be deprecated and removed in a future version. Please use the environment variable
|
|
1458
|
+
`MS_COMPILER_CACHE_PATH` instead.
|
|
1459
|
+
inter_op_parallel_num(int): The thread number of op parallel at the same time.
|
|
1460
|
+
Default ``0`` . This parameter will be deprecated and removed in future versions.
|
|
1461
|
+
Please use the api :func:`mindspore.runtime.dispatch_threads_num` instead.
|
|
1462
|
+
memory_offload (str): Whether to enable the memory offload function. Default ``"OFF"`` .
|
|
1463
|
+
This parameter will be deprecated and removed in future versions. Please use the api
|
|
1464
|
+
:func:`mindspore.nn.Cell.offload` instead.
|
|
1465
|
+
disable_format_transform (bool): Whether to disable the automatic format transform function from NCHW
|
|
1466
|
+
to NHWC. Default ``False`` . This parameter will be deprecated and removed in future versions. Please
|
|
1467
|
+
use the related parameter of :func:`mindspore.jit` instead.
|
|
1468
|
+
jit_syntax_level (int): Set JIT syntax support level. Default ``LAX`` . This parameter is deprecated
|
|
1469
|
+
and removed in future versions. Please use the related parameter of :func:`mindspore.jit` instead.
|
|
1470
|
+
jit_config (dict): Set the global jit config for compile. This parameter is deprecated
|
|
1471
|
+
and removed in future versions. Please use the related parameter of :func:`mindspore.jit` instead.
|
|
1472
|
+
exec_order (str): The sorting method for operator execution. This parameter is deprecated
|
|
1473
|
+
and removed in future versions. Please use the related parameter of :func:`mindspore.jit` instead.
|
|
1474
|
+
op_timeout (int): Set the maximum duration of executing an operator in seconds. Default ``900`` .
|
|
1475
|
+
This parameter will be deprecated and removed in future versions. Please use the
|
|
1489
1476
|
api :func:`mindspore.device_context.ascend.op_debug.execute_timeout` instead.
|
|
1490
|
-
|
|
1491
|
-
|
|
1492
|
-
|
|
1493
|
-
|
|
1494
|
-
|
|
1495
|
-
|
|
1496
|
-
|
|
1497
|
-
|
|
1498
|
-
|
|
1499
|
-
If you need quick problem locating, you can switch to ``1`` first.
|
|
1500
|
-
|
|
1501
|
-
When the `save_graphs` attribute is set as ``True`` , ``1`` , ``2`` or ``3`` , attribute of
|
|
1502
|
-
`save_graphs_path` is used to set the intermediate compilation graph storage path. By default, the graphs
|
|
1503
|
-
are saved in the current directory.
|
|
1477
|
+
aoe_tune_mode (str): AOE tuning mode.
|
|
1478
|
+
This parameter will be deprecated and removed in future versions. Please use the
|
|
1479
|
+
api :func:`mindspore.device_context.ascend.op_tuning.aoe_tune_mode` instead.
|
|
1480
|
+
aoe_config (dict): AOE-specific parameters. This parameter will be deprecated and removed in future
|
|
1481
|
+
versions. Please use the api :func:`mindspore.device_context.ascend.op_tuning.aoe_job_type` instead.
|
|
1482
|
+
runtime_num_threads(int): The thread pool number of cpu kernel used in runtime. Default ``30`` .
|
|
1483
|
+
This parameter will be deprecated and removed in future versions. Please use the
|
|
1484
|
+
api :func:`mindspore.device_context.cpu.op_tuning.threads_num` instead.
|
|
1485
|
+
save_graphs (bool or int): Whether to save intermediate compilation graphs. Default ``0`` .
|
|
1504
1486
|
This parameter will be deprecated and removed in a future version. Please use the environment variable
|
|
1505
1487
|
`MS_DEV_SAVE_GRAPHS` instead.
|
|
1506
|
-
save_graphs_path (str): Path to save graphs. Default
|
|
1507
|
-
If the specified directory does not exist, the system will automatically create the directory.
|
|
1508
|
-
During distributed training, graphs will be saved to the directory of
|
|
1509
|
-
`save_graphs_path/rank_${rank_id}/`. `rank_id` is the ID of the current device in the cluster.
|
|
1488
|
+
save_graphs_path (str): Path to save graphs. Default ``"."``.
|
|
1510
1489
|
This parameter will be deprecated and removed in a future version. Please use the environment variable
|
|
1511
1490
|
`MS_DEV_SAVE_GRAPHS_PATH` instead.
|
|
1512
|
-
|
|
1513
|
-
range of ['ON', 'OFF'], and the default value is ``'OFF'`` .
|
|
1514
|
-
|
|
1515
|
-
- "ON": Enable operator deterministic running mode.
|
|
1516
|
-
- "OFF": Disable operator deterministic running mode.
|
|
1517
|
-
|
|
1518
|
-
When deterministic mode is on, model ops will be deterministic in Ascend. This means that if op run
|
|
1519
|
-
multiple times with the same inputs on the same hardware, it will have the exact same outputs each time.
|
|
1520
|
-
This is useful for debugging models.
|
|
1521
|
-
In distributed scenario, we suggest user to set deterministic mode before
|
|
1522
|
-
calling :func:`mindspore.communication.init` to enable deterministic operation for
|
|
1523
|
-
communication operators in the global communication group.
|
|
1524
|
-
This parameter will be deprecated and will be removed in
|
|
1525
|
-
future versions. Please use the api :func:`mindspore.set_deterministic` instead.
|
|
1526
|
-
print_file_path (str): This parameter will be deprecated and will be removed in future versions.
|
|
1527
|
-
env_config_path (str): This parameter will be deprecated and will be removed in future versions.
|
|
1528
|
-
|
|
1529
|
-
precompile_only (bool): Whether to only precompile the network. Default: ``False`` .
|
|
1530
|
-
If set to ``True`` , the network will only be compiled, not executed.
|
|
1491
|
+
precompile_only (bool): Whether to only precompile the network. Default ``False`` .
|
|
1531
1492
|
This parameter will be deprecated and removed in a future version. Please use the environment variable
|
|
1532
1493
|
`MS_DEV_PRECOMPILE_ONLY` instead.
|
|
1533
|
-
reserve_class_name_in_scope (bool): This parameter will be deprecated and will be removed in future versions.
|
|
1534
|
-
pynative_synchronize (bool): Whether to enable synchronous execution of the device in PyNative mode.
|
|
1535
|
-
Default: ``False`` . When the value is set to ``False`` , the operator is executed asynchronously on the
|
|
1536
|
-
device. When an error occurs in the execution of the operator, the specific error script code location
|
|
1537
|
-
cannot be located, when the value is set to ``True`` , the operator is executed synchronously on the
|
|
1538
|
-
device. It will reduce the execution performance of the program. At this time, when an error occurs in the
|
|
1539
|
-
execution of the operator, the location of the error script code can be located according to the call stack
|
|
1540
|
-
of the error. This parameter will be deprecated and will be removed in future versions.Please use
|
|
1541
|
-
the api :func:`mindspore.runtime.launch_blocking` instead.
|
|
1542
|
-
mode (int): Running in GRAPH_MODE(0) or PYNATIVE_MODE(1).
|
|
1543
|
-
Both modes support all backends. Default: ``PYNATIVE_MODE`` .
|
|
1544
|
-
enable_reduce_precision (bool): Whether to enable precision reduction.
|
|
1545
|
-
If the operator does not support the user-specified precision, the precision will
|
|
1546
|
-
be changed automatically. Default: ``True`` .
|
|
1547
|
-
aoe_tune_mode (str): AOE tuning mode setting, which is not set by default.
|
|
1548
|
-
When set to ``"online"`` , the tuning in online function is turned on.
|
|
1549
|
-
When set to ``"offline"`` , ge graph will be save for offline tuning.
|
|
1550
|
-
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1551
|
-
api :func:`mindspore.device_context.ascend.op_tuning.aoe_tune_mode` instead.
|
|
1552
|
-
aoe_config (dict): Set the parameters specific to Ascend Optimization Engine. It is not set by default.
|
|
1553
|
-
|
|
1554
|
-
- job_type (str): Mode type setting, default value is ``"2"``.
|
|
1555
|
-
|
|
1556
|
-
- ``"1"``: subgraph tuning;
|
|
1557
|
-
- ``"2"``: operator tuning.
|
|
1558
|
-
|
|
1559
|
-
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1560
|
-
api :func:`mindspore.device_context.ascend.op_tuning.aoe_job_type` instead.
|
|
1561
|
-
|
|
1562
|
-
check_bprop (bool): Whether to check back propagation nodes. The checking ensures that the shape and dtype
|
|
1563
|
-
of back propagation node outputs is the same as input parameters. Default: ``False`` .
|
|
1564
|
-
This parameter will be deprecated and removed in a future version.
|
|
1565
|
-
max_call_depth (int): Specify the maximum depth of function call. Must be positive integer. Default: ``1000`` .
|
|
1566
|
-
The max_call_depth parameter needs to be set when the nested call is too deep or the number
|
|
1567
|
-
of subgraphs is too large. If max_call_depth is set larger than before, the system max stack depth should be
|
|
1568
|
-
set larger too, otherwise a `core dumped` exception may be raised because of system stack overflow.
|
|
1569
|
-
This parameter will be deprecated and removed in a future version. Please use the api
|
|
1570
|
-
:func:`mindspore.set_recursion_limit` instead.
|
|
1571
|
-
grad_for_scalar (bool): Whether to get gradient for scalar. Default: ``False`` .
|
|
1572
|
-
When grad_for_scalar is set to ``True`` , the function's scalar input can be derived.
|
|
1573
|
-
The default value is ``False`` . Because the back-end does not support scaling operations currently,
|
|
1574
|
-
this interface only supports simple operations that can be deduced by the front-end.
|
|
1575
|
-
This parameter will be deprecated and removed in a future version. Please take the tensor derivative.
|
|
1576
1494
|
enable_compile_cache (bool): Whether to save or load the compiled cache of the graph.
|
|
1577
|
-
|
|
1578
|
-
generated and exported to a MINDIR file. When the network is executed again, if enable_compile_cache is
|
|
1579
|
-
still set to ``True`` and the network scripts are not changed, the compile cache is loaded.
|
|
1580
|
-
Note that only limited automatic detection for the changes of python scripts is supported by now,
|
|
1581
|
-
which means that there is a correctness risk. Default: ``False`` .
|
|
1582
|
-
Currently, do not support the graph which is larger than 2G after compiled.
|
|
1583
|
-
This is an experimental prototype that is subject to change and/or deletion.
|
|
1495
|
+
Default ``False`` . This is an experimental prototype that is subject to change and/or deletion.
|
|
1584
1496
|
This parameter will be deprecated and removed in a future version. Please use the environment variable
|
|
1585
1497
|
`MS_COMPILER_CACHE_ENABLE` instead.
|
|
1586
|
-
|
|
1587
|
-
If the specified directory does not exist, the system will automatically create the directory.
|
|
1588
|
-
The cache will be saved to the directory of `compile_cache_path/rank_${rank_id}/`. The `rank_id` is
|
|
1589
|
-
the ID of the current device in the cluster.
|
|
1590
|
-
This parameter will be deprecated and removed in a future version. Please use the environment variable
|
|
1591
|
-
`MS_COMPILER_CACHE_PATH` instead.
|
|
1592
|
-
inter_op_parallel_num(int): The thread number of op parallel at the same time. Default value is ``0`` ,
|
|
1593
|
-
which means use the default num. This parameter will be deprecated and will be removed in future versions.
|
|
1594
|
-
Please use the api :func:`mindspore.runtime.dispatch_threads_num` instead.
|
|
1595
|
-
runtime_num_threads(int): The thread pool number of cpu kernel used in runtime,
|
|
1596
|
-
which must bigger than or equal to 0. Default value is ``30`` , if you run many processes at
|
|
1597
|
-
the same time, you should set the value smaller to avoid thread contention. If set runtime_num_threads to 1,
|
|
1598
|
-
the runtime asynchronous pipeline capability cannot be enabled, which may affect performance.
|
|
1599
|
-
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1600
|
-
api :func:`mindspore.device_context.cpu.op_tuning.threads_num` instead.
|
|
1601
|
-
disable_format_transform (bool): Whether to disable the automatic format transform function from NCHW to NHWC.
|
|
1602
|
-
When the network training performance of fp16 is worse than fp32, `disable_format_transform` can be set to
|
|
1603
|
-
``True`` to try to improve training performance. Default: ``False`` .
|
|
1604
|
-
support_binary (bool): Whether to support run .pyc or .so in graph mode. If want to support run .so or .pyc
|
|
1605
|
-
in graph mode, coulde set 'support_binary' to be ``True`` , and run once .py file. It would save the source
|
|
1606
|
-
of the interfaces would be compiled by MindSpore to the interfaces definition .py file that should be
|
|
1607
|
-
guaranteed to be writable. Then compile the .py file to the .pyc or .so file, and could run in Graph mode.
|
|
1608
|
-
Currently, this config option only support stand_alone.
|
|
1609
|
-
memory_optimize_level (str): The memory optimize level.
|
|
1610
|
-
On Ascend hardware platform, default: ``O1``, on other hardware platforms, default: ``O0``.
|
|
1611
|
-
The value must be in ['O0', 'O1'].
|
|
1612
|
-
|
|
1613
|
-
- O0: priority performance option, disable SOMAS (Safe Optimized Memory Allocation Solver)
|
|
1614
|
-
and some other memory optimizations.
|
|
1615
|
-
- O1: priority memory option, enable SOMAS and some other memory optimizations.
|
|
1616
|
-
|
|
1617
|
-
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1618
|
-
api :func:`mindspore.runtime.set_memory` instead.
|
|
1619
|
-
|
|
1620
|
-
memory_offload (str): Whether to enable the memory offload function. When it is enabled, the idle data will be
|
|
1621
|
-
temporarily copied to the host side in the case of insufficient device memory. The value must be in the
|
|
1622
|
-
range of ['ON', 'OFF'], and the default value is ``'OFF'`` .
|
|
1623
|
-
|
|
1624
|
-
- ON: Enable the memory Offload function. On Ascend hardware platform, this parameter does not take effect
|
|
1625
|
-
when the graph compilation level is not 'O0'; This parameter does not take effect when
|
|
1626
|
-
memory_optimize_level is set 'O1'.
|
|
1627
|
-
- OFF: Turn off the memory Offload function.
|
|
1628
|
-
|
|
1629
|
-
This parameter is deprecated and will be removed in future versions. Please use the `device` parameter
|
|
1630
|
-
of `mindspore.Parameter` instead.
|
|
1631
|
-
|
|
1632
|
-
ascend_config (dict): Set the parameters specific to Ascend hardware platform. It is not set by default.
|
|
1633
|
-
The default value of `precision_mode`, `jit_compile` and
|
|
1634
|
-
`atomic_clean_policy` are experimental parameters, may change in the future.
|
|
1635
|
-
|
|
1636
|
-
- precision_mode (str): Mixed precision mode setting, and the default value of inference network
|
|
1637
|
-
is ``force_fp16`` . The value range is as follows:
|
|
1638
|
-
|
|
1639
|
-
- force_fp16: When the operator supports both float16 and float32, select float16 directly.
|
|
1640
|
-
- allow_fp32_to_fp16: For cube operators, use the float16. For vector operators,
|
|
1641
|
-
prefer to keep the origin dtype, if the operator in model can support float32,
|
|
1642
|
-
it will keep original dtype, otherwise it will reduce to float16.
|
|
1643
|
-
- allow_mix_precision: Automatic mixing precision, facing the whole network operator, according
|
|
1644
|
-
to the built-in optimization strategy, automatically reduces the precision of some operators
|
|
1645
|
-
to float16 or bfloat16.
|
|
1646
|
-
- must_keep_origin_dtype: Keep the accuracy of the original drawing.
|
|
1647
|
-
- force_fp32: When the input of the matrix calculation operator is float16 and the output supports
|
|
1648
|
-
float16 and float32, output is forced to float32.
|
|
1649
|
-
- allow_fp32_to_bf16: For cube operators, use the bfloat16. For vector operators,
|
|
1650
|
-
prefer to keep the origin dtype, if the operator in model can support float32,
|
|
1651
|
-
it will keep original dtype, otherwise it will reduce to bfloat16.
|
|
1652
|
-
- allow_mix_precision_fp16: Automatic mixing precision, facing the whole network operator, automatically
|
|
1653
|
-
reduces the precision of some operators to float16 according to the built-in optimization strategy.
|
|
1654
|
-
- allow_mix_precision_bf16: Automatic mixing precision, facing the whole network operator, according to
|
|
1655
|
-
the built-in optimization strategy, automatically reduces the precision of some operators to bfloat16.
|
|
1656
|
-
|
|
1657
|
-
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1658
|
-
api :func:`mindspore.device_context.ascend.op_precision.precision_mode` instead.
|
|
1498
|
+
ascend_config (dict): Set the parameters specific to Ascend hardware platform.
|
|
1659
1499
|
|
|
1660
|
-
-
|
|
1661
|
-
|
|
1662
|
-
|
|
1663
|
-
|
|
1664
|
-
|
|
1665
|
-
|
|
1666
|
-
-
|
|
1667
|
-
|
|
1668
|
-
|
|
1669
|
-
- 0: The memory occupied by all atomic operators in the network is cleaned centrally.
|
|
1670
|
-
- 1: Memory is not cleaned centrally and each atomic operator in the network is cleaned separately.
|
|
1671
|
-
When the memory of the network exceeds the limit, you may try this cleaning policy, but it may cause
|
|
1672
|
-
performance loss.
|
|
1673
|
-
- matmul_allow_hf32 (bool): Whether to convert FP32 to HF32 for Matmul operators. Default value: ``False``.
|
|
1674
|
-
This is an experimental prototype that is subject to change and/or deletion.
|
|
1675
|
-
For detailed information, please refer to `Ascend community <https://www.hiascend.com/>`_ .
|
|
1676
|
-
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1500
|
+
- precision_mode (str): Mixed precision mode setting. Default ``"force_fp16"`` .
|
|
1501
|
+
This parameter will be deprecated and removed in future versions. Please use the
|
|
1502
|
+
api :func:`mindspore.device_context.ascend.op_precision.precision_mode` instead.
|
|
1503
|
+
- jit_compile (bool): Whether to select online compilation. This parameter will be deprecated and removed
|
|
1504
|
+
in future versions. Please use the api :func:`mindspore.device_context.ascend.op_tuning.op_compile`
|
|
1505
|
+
instead.
|
|
1506
|
+
- matmul_allow_hf32 (bool): Whether to convert FP32 to HF32 for Matmul operators. Default ``False``.
|
|
1507
|
+
This parameter will be deprecated and removed in future versions. Please use the
|
|
1677
1508
|
api :func:`mindspore.device_context.ascend.op_precision.matmul_allow_hf32` instead.
|
|
1678
|
-
- conv_allow_hf32 (bool): Whether to convert FP32 to HF32 for Conv operators. Default
|
|
1679
|
-
This
|
|
1680
|
-
For detailed information, please refer to `Ascend community <https://www.hiascend.com/>`_ .
|
|
1681
|
-
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1509
|
+
- conv_allow_hf32 (bool): Whether to convert FP32 to HF32 for Conv operators. Default ``True``.
|
|
1510
|
+
This parameter will be deprecated and removed in future versions. Please use the
|
|
1682
1511
|
api :func:`mindspore.device_context.ascend.op_precision.conv_allow_hf32` instead.
|
|
1683
|
-
-
|
|
1684
|
-
|
|
1685
|
-
turned off; for ``"1"``, all inputs and outputs will be dumped for AICore exception operators;
|
|
1686
|
-
for ``"2"``, inputs will be dumped for AICore exception operators, reducing the saved information
|
|
1687
|
-
but improving performance. Default: ``"2"`` .
|
|
1688
|
-
- op_precision_mode (str): Path to config file of op precision mode. For detailed information, please refer
|
|
1689
|
-
to `Ascend community <https://www.hiascend.com/>`_ .
|
|
1690
|
-
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1512
|
+
- op_precision_mode (str): Path to config file of op precision mode.
|
|
1513
|
+
This parameter will be deprecated and removed in future versions. Please use the
|
|
1691
1514
|
api :func:`mindspore.device_context.ascend.op_precision.op_precision_mode` instead.
|
|
1692
|
-
- op_debug_option (str): Enable debugging options for Ascend operators
|
|
1693
|
-
|
|
1694
|
-
|
|
1695
|
-
- ``"oom"``: When there is a memory out of bounds during the execution of an operator,
|
|
1696
|
-
AscendCL will return an error code of ``EZ9999``.
|
|
1697
|
-
|
|
1698
|
-
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1515
|
+
- op_debug_option (str): Enable debugging options for Ascend operators.
|
|
1516
|
+
This parameter will be deprecated and removed in future versions. Please use the
|
|
1699
1517
|
api :func:`mindspore.device_context.ascend.op_debug.debug_option` instead.
|
|
1700
|
-
|
|
1701
|
-
|
|
1702
|
-
|
|
1703
|
-
|
|
1704
|
-
|
|
1705
|
-
|
|
1706
|
-
|
|
1707
|
-
|
|
1708
|
-
|
|
1709
|
-
- session (dict): Set session options.
|
|
1710
|
-
|
|
1711
|
-
- parallel_speed_up_json_path(Union[str, None]): The path to the parallel speed up json file, configuration
|
|
1712
|
-
can refer to `parallel_speed_up.json
|
|
1713
|
-
<https://gitee.com/mindspore/mindspore/blob/master/config/parallel_speed_up.json>`_ .
|
|
1714
|
-
If its value is None or '', it does not take effect. Default None.
|
|
1715
|
-
|
|
1716
|
-
- recompute_comm_overlap (bool): Enable overlap between recompute ops and communication ops if True.
|
|
1717
|
-
Default: False.
|
|
1718
|
-
- matmul_grad_comm_overlap (bool): Enable overlap between dw matmul and
|
|
1719
|
-
tensor parallel communication ops if True. Default: False.
|
|
1720
|
-
- recompute_allgather_overlap_fagrad (bool): Enable overlap between duplicated allgather by recomputing
|
|
1721
|
-
in sequence parallel and flashattentionscoregrad ops if True. Default: False.
|
|
1722
|
-
- enable_task_opt (bool): Enable communication fusion to optimize the number of communication operator
|
|
1723
|
-
tasks if True.
|
|
1724
|
-
Default: False.
|
|
1725
|
-
- enable_grad_comm_opt (bool): Enable overlap between dx ops and data parallel communication ops if True.
|
|
1726
|
-
Currently, do not support
|
|
1727
|
-
`O2 <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.JitConfig.html>`_
|
|
1728
|
-
Default: False.
|
|
1729
|
-
- enable_opt_shard_comm_opt (bool): Enable overlap between forward ops
|
|
1730
|
-
and optimizer parallel allgather communication if True. Currently, do not support
|
|
1731
|
-
`O2 <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.JitConfig.html>`_
|
|
1732
|
-
Default: False.
|
|
1733
|
-
- compute_communicate_fusion_level (int): Enable the fusion between compute and communicate.
|
|
1734
|
-
Default: ``0``. Note: This function must be used with Ascend Training Solution 24.0.RC2 or later.
|
|
1735
|
-
|
|
1736
|
-
- 0: Disable fusion.
|
|
1737
|
-
|
|
1738
|
-
- 1: Apply fusion to forward nodes.
|
|
1739
|
-
|
|
1740
|
-
- 2: Apply fusion to backward nodes.
|
|
1741
|
-
|
|
1742
|
-
- 3: Apply fusion to all nodes.
|
|
1743
|
-
- dataset_broadcast_opt_level (int): Optimize the scenario that the dataset repeated reading. Only
|
|
1744
|
-
support O0/O1 jit level. It doesn't work in O2 mode. Default: ``0``.
|
|
1745
|
-
|
|
1746
|
-
- 0: Disable this optimize.
|
|
1747
|
-
|
|
1748
|
-
- 1: Optimize dataset reader between pipeline stage.
|
|
1749
|
-
|
|
1750
|
-
- 2: Optimize dataset reader within pipeline stage.
|
|
1751
|
-
|
|
1752
|
-
- 3: Optimize dataset reader with all scenes.
|
|
1753
|
-
- bias_add_comm_swap (bool): Enable node execution order swap communication operators and add operators
|
|
1754
|
-
if ``True``. Only 1-dimension bias node is supported. Default: ``False``.
|
|
1755
|
-
- enable_allreduce_slice_to_reducescatter (bool): Enable allreduce optimization. In the scenario where
|
|
1756
|
-
the batchmatmul model introduces allreduce in parallel, if the subsequent nodes are stridedslice
|
|
1757
|
-
operator with model parallel, allreduce will be optimized as reducescatter according to the identified
|
|
1758
|
-
patterns. Typical used in MoE module with groupwise alltoall. Default: ``False``.
|
|
1759
|
-
- enable_interleave_split_concat_branch (bool): Enable communication computation parallel optimization
|
|
1760
|
-
for branches formed by split and concat operators with ``enable_interleave`` attribute. It is typical
|
|
1761
|
-
used in MoE parallel scenario. After splitting the input data, each slice of data is processed by the
|
|
1762
|
-
MoE module, and then the branch results are concatenated. When the optimization is enable,
|
|
1763
|
-
communication and computation will be executed in parallel between branches. Default: ``False``.
|
|
1764
|
-
- enable_interleave_parallel_branch (bool): Enable communication computation parallel optimization
|
|
1765
|
-
for parallel branches with ``parallel_branch`` attribute in branches merge node. It is typical
|
|
1766
|
-
used in MoE parallel scenario with routed and shared expert. When the optimization is enable,
|
|
1767
|
-
communication and computation will be executed in parallel between branches. Default: ``False``.
|
|
1518
|
+
- ge_options (dict): Set options for CANN. This parameter will be deprecated and removed in future versions.
|
|
1519
|
+
Please use the related parameter of :func:`mindspore.jit` instead.
|
|
1520
|
+
- atomic_clean_policy (int): The policy for cleaning memory occupied by atomic operators in the network.
|
|
1521
|
+
Default ``1`` represents that memory is not cleaned centrally, ``0`` represents that memory is cleaned
|
|
1522
|
+
centrally. This parameter will be deprecated and removed in future versions. Please
|
|
1523
|
+
use the related parameter of :func:`mindspore.jit` instead.
|
|
1524
|
+
- exception_dump (str): Enable Ascend operator exception dump. Default ``"2"`` . This parameter has been
|
|
1525
|
+
deprecated and removed. Please use the api
|
|
1526
|
+
:func:`mindspore.device_context.ascend.op_debug.aclinit_config` instead.
|
|
1768
1527
|
- host_scheduling_max_threshold(int): The max threshold to control whether the dynamic shape process is
|
|
1769
|
-
used when run the static graph
|
|
1770
|
-
|
|
1771
|
-
|
|
1772
|
-
|
|
1773
|
-
|
|
1774
|
-
|
|
1775
|
-
|
|
1776
|
-
|
|
1777
|
-
- ``STRICT`` : Only basic syntax is supported, and execution performance is optimal. Can be used for MindIR
|
|
1778
|
-
load and export.
|
|
1779
|
-
- ``LAX`` : Compatible with all Python syntax as much as possible. However, execution performance may be
|
|
1780
|
-
affected and not optimal. Cannot be used for MindIR load and export due to some syntax that may not be
|
|
1781
|
-
able to be exported.
|
|
1782
|
-
|
|
1783
|
-
debug_level (int): Set config for debugging. Default value: ``RELEASE``.
|
|
1784
|
-
|
|
1785
|
-
- ``RELEASE``: Used for normally running, and some debug information will be discard to get a better
|
|
1786
|
-
compiling performance.
|
|
1787
|
-
- ``DEBUG``: Used for debugging when errors occur, more information will be record in compiling process.
|
|
1788
|
-
|
|
1789
|
-
This parameter will be deprecated and removed in a future version.
|
|
1528
|
+
used when run the static graph. Default ``0`` . This parameter will be deprecated and removed in future
|
|
1529
|
+
versions. Please use the related parameter of :func:`mindspore.jit` instead.
|
|
1530
|
+
- parallel_speed_up_json_path(Union[str, None]): The path to the parallel speed up json file.
|
|
1531
|
+
This parameter will be deprecated and removed in future versions. Please use the
|
|
1532
|
+
api :func:`mindspore.parallel.auto_parallel.AutoParallel.transformer_opt` instead.
|
|
1533
|
+
- hccl_watchdog (bool): Enable a thread to monitor the failure of collective communication.
|
|
1534
|
+
Default ``True`` .
|
|
1790
1535
|
gpu_config (dict): Set the parameters specific to gpu hardware platform. It is not set by default.
|
|
1791
|
-
Currently, only setting `conv_fprop_algo` and `conv_dgrad_algo` and `conv_wgrad_algo` and `conv_allow_tf32`
|
|
1792
|
-
and `matmul_allow_tf32` are supported on GPU hardware platform.
|
|
1793
|
-
|
|
1794
|
-
- conv_fprop_algo (str): Specifies convolution forward algorithm and the default value is 'normal',
|
|
1795
|
-
The value range is as follows:
|
|
1796
|
-
|
|
1797
|
-
- normal: Use the heuristic search algorithm.
|
|
1798
|
-
- performance: Use the trial search algorithm.
|
|
1799
|
-
- implicit_gemm: This algorithm expresses the convolution as a matrix product without actually explicitly
|
|
1800
|
-
forming the matrix that holds the input tensor data.
|
|
1801
|
-
- implicit_precomp_gemm: This algorithm expresses convolution as a matrix product without actually
|
|
1802
|
-
explicitly forming the matrix that holds the input tensor data, but still needs some memory workspace to
|
|
1803
|
-
precompute some indices in order to facilitate the implicit construction of the matrix that holds the
|
|
1804
|
-
input tensor data.
|
|
1805
|
-
- gemm: This algorithm expresses the convolution as an explicit matrix product. A significant memory
|
|
1806
|
-
workspace is needed to store the matrix that holds the input tensor data.
|
|
1807
|
-
- direct: This algorithm expresses the convolution as a direct convolution (for example, without
|
|
1808
|
-
implicitly or explicitly doing a matrix multiplication).
|
|
1809
|
-
- fft: This algorithm uses the Fast-Fourier Transform approach to compute the convolution. A significant
|
|
1810
|
-
memory workspace is needed to store intermediate results.
|
|
1811
|
-
- fft_tiling: This algorithm uses the Fast-Fourier Transform approach but splits the inputs into tiles.
|
|
1812
|
-
A significant memory workspace is needed to store intermediate results but less than fft algorithm for
|
|
1813
|
-
large size images.
|
|
1814
|
-
- winograd: This algorithm uses the Winograd Transform approach to compute the convolution. A reasonably
|
|
1815
|
-
sized workspace is needed to store intermediate results.
|
|
1816
|
-
- winograd_nonfused: This algorithm uses the Winograd Transform approach to compute the convolution. A
|
|
1817
|
-
significant workspace may be needed to store intermediate results.
|
|
1818
|
-
|
|
1819
|
-
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1820
|
-
api :func:`mindspore.device_context.gpu.op_tuning.conv_fprop_algo` instead.
|
|
1821
1536
|
|
|
1822
|
-
-
|
|
1823
|
-
|
|
1824
|
-
|
|
1825
|
-
|
|
1826
|
-
|
|
1827
|
-
- algo_0: This algorithm expresses the convolution as a sum of matrix products without actually explicitly
|
|
1828
|
-
forming the matrix that holds the input tensor data. The sum is done using the atomic add operation,
|
|
1829
|
-
thus the results are non-deterministic.
|
|
1830
|
-
- algo_1: This algorithm expresses the convolution as a matrix product without actually explicitly forming
|
|
1831
|
-
the matrix that holds the input tensor data. The results are deterministic.
|
|
1832
|
-
- fft: This algorithm uses a Fast-Fourier Transform approach to compute the convolution. A significant
|
|
1833
|
-
memory workspace is needed to store intermediate results. The results are deterministic.
|
|
1834
|
-
- fft_tiling: This algorithm uses the Fast-Fourier Transform approach but splits the inputs into tiles.
|
|
1835
|
-
A significant memory workspace is needed to store intermediate results but less than fft for large size
|
|
1836
|
-
images. The results are deterministic.
|
|
1837
|
-
- winograd: This algorithm uses the Winograd Transform approach to compute the convolution. A reasonably
|
|
1838
|
-
sized workspace is needed to store intermediate results. The results are deterministic.
|
|
1839
|
-
- winograd_nonfused: This algorithm uses the Winograd Transform approach to compute the convolution.
|
|
1840
|
-
A significant workspace may be needed to store intermediate results. The results are deterministic.
|
|
1841
|
-
|
|
1842
|
-
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1537
|
+
- conv_fprop_algo (str): Specifies convolution forward algorithm. Default ``"normal"`` .
|
|
1538
|
+
This parameter will be deprecated and removed in future versions. Please use the
|
|
1539
|
+
api :func:`mindspore.device_context.gpu.op_tuning.conv_fprop_algo` instead.
|
|
1540
|
+
- conv_dgrad_algo (str): Specifies convolution data grad algorithm. Default ``"normal"`` .
|
|
1541
|
+
This parameter will be deprecated and removed in future versions. Please use the
|
|
1843
1542
|
api :func:`mindspore.device_context.gpu.op_tuning.conv_dgrad_algo` instead.
|
|
1844
|
-
|
|
1845
|
-
|
|
1846
|
-
The value range is as follows:
|
|
1847
|
-
|
|
1848
|
-
- normal: Use the heuristic search algorithm.
|
|
1849
|
-
- performance: Use the trial search algorithm.
|
|
1850
|
-
- algo_0: This algorithm expresses the convolution as a sum of matrix products without actually explicitly
|
|
1851
|
-
forming the matrix that holds the input tensor data. The sum is done using the atomic add operation,
|
|
1852
|
-
thus the results are non-deterministic.
|
|
1853
|
-
- algo_1: This algorithm expresses the convolution as a matrix product without actually explicitly forming
|
|
1854
|
-
the matrix that holds the input tensor data. The results are deterministic.
|
|
1855
|
-
- fft: This algorithm uses a Fast-Fourier Transform approach to compute the convolution. A significant
|
|
1856
|
-
memory workspace is needed to store intermediate results. The results are deterministic.
|
|
1857
|
-
- algo_3: This algorithm is similar to algo_0 but uses some small workspace to precompute some indices.
|
|
1858
|
-
The results are also non-deterministic.
|
|
1859
|
-
- winograd_nonfused: This algorithm uses the Winograd Transform approach to compute the convolution.
|
|
1860
|
-
A significant workspace may be needed to store intermediate results. The results are deterministic.
|
|
1861
|
-
- fft_tiling: This algorithm uses the Fast-Fourier Transform approach but splits the inputs into tiles.
|
|
1862
|
-
A significant memory workspace is needed to store intermediate results but less than fft for large size
|
|
1863
|
-
images. The results are deterministic.
|
|
1864
|
-
|
|
1865
|
-
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1543
|
+
- conv_wgrad_algo (str): Specifies convolution filter grad algorithm. Default ``"normal"`` .
|
|
1544
|
+
This parameter will be deprecated and removed in future versions. Please use the
|
|
1866
1545
|
api :func:`mindspore.device_context.gpu.op_tuning.conv_wgrad_algo` instead.
|
|
1867
|
-
|
|
1868
|
-
|
|
1869
|
-
|
|
1870
|
-
This parameter will be deprecated and will be removed in future versions. Please use the
|
|
1546
|
+
- conv_allow_tf32 (bool): Controls to allow Tensor core TF32 computation on CUDNN.
|
|
1547
|
+
Default ``True``.
|
|
1548
|
+
This parameter will be deprecated and removed in future versions. Please use the
|
|
1871
1549
|
api :func:`mindspore.device_context.gpu.op_precision.conv_allow_tf32` instead.
|
|
1872
|
-
- matmul_allow_tf32 (bool):
|
|
1873
|
-
|
|
1874
|
-
This parameter will be deprecated and
|
|
1550
|
+
- matmul_allow_tf32 (bool): Controls to allow Tensor core TF32 computation on CUBLAS.
|
|
1551
|
+
Default ``False``.
|
|
1552
|
+
This parameter will be deprecated and removed in future versions. Please use the
|
|
1875
1553
|
api :func:`mindspore.device_context.gpu.op_precision.matmul_allow_tf32` instead.
|
|
1876
|
-
|
|
1877
|
-
|
|
1878
|
-
|
|
1879
|
-
|
|
1880
|
-
|
|
1881
|
-
|
|
1882
|
-
|
|
1883
|
-
|
|
1884
|
-
products are O0. In addition, The option of the dynamic shape must be O0 or O1, O2 is not supported.
|
|
1885
|
-
The value range is as follows:
|
|
1886
|
-
|
|
1887
|
-
- ``"O0"``: Except for optimizations that may affect functionality, all other optimizations are turned
|
|
1888
|
-
off, adopt KernelByKernel execution mode.
|
|
1889
|
-
- ``"O1"``: Using commonly used optimizations and automatic operator fusion optimizations,
|
|
1890
|
-
adopt KernelByKernel execution mode. This optimization level is experimental and is being improved.
|
|
1891
|
-
- ``"O2"``: Ultimate performance optimization, adopt Sink execution mode.
|
|
1892
|
-
|
|
1893
|
-
- infer_boost (str): Used to control the infer mode. Default: ``"off"`` . The value range is as follows:
|
|
1894
|
-
|
|
1895
|
-
- ``"on"``: Enable infer mode, get better infer performance.
|
|
1896
|
-
- ``"off"``: Disable infer mode, use forward to infer, performance is not good.
|
|
1897
|
-
|
|
1898
|
-
exec_order (str): Set the sorting method for operator execution in GRAPH_MODE Currently, only two sorting
|
|
1899
|
-
methods are supported: bfs and dfs, and the default method is bfs.
|
|
1900
|
-
|
|
1901
|
-
- ``"bfs"``: The default sorting method, breadth priority, good communication masking, relatively good
|
|
1902
|
-
performance.
|
|
1903
|
-
- ``"dfs"``: An optional sorting method, depth-first sorting. The performance is relatively worse than that
|
|
1904
|
-
of bfs execution order, but it occupies less memory. It is recommended to try dfs in scenarios where other
|
|
1905
|
-
execution orders run out of memory (OOM).
|
|
1906
|
-
|
|
1907
|
-
Raises:
|
|
1908
|
-
ValueError: If input key is not an attribute in context.
|
|
1554
|
+
print_file_path (str): This parameter will be deprecated and removed in future versions.
|
|
1555
|
+
env_config_path (str): This parameter will be deprecated and removed in future versions.
|
|
1556
|
+
debug_level (int): This parameter will be deprecated and removed in future versions.
|
|
1557
|
+
reserve_class_name_in_scope (bool): This parameter will be deprecated and removed in future versions.
|
|
1558
|
+
check_bprop (bool): This parameter will be deprecated and removed in future versions.
|
|
1559
|
+
enable_reduce_precision (bool): This parameter will be deprecated and removed in a future versions.
|
|
1560
|
+
grad_for_scalar (bool): This parameter will be deprecated and removed in future versions.
|
|
1561
|
+
support_binary (bool): Whether to support run .pyc or .so in graph mode.
|
|
1909
1562
|
|
|
1910
1563
|
Examples:
|
|
1911
1564
|
>>> import mindspore as ms
|
|
@@ -2113,7 +1766,7 @@ def set_ps_context(**kwargs):
|
|
|
2113
1766
|
enable_ps (bool): Whether to enable parameter server training mode.
|
|
2114
1767
|
Only after enable_ps is set True, the environment variables will be effective.
|
|
2115
1768
|
Default: ``False`` .
|
|
2116
|
-
config_file_path (
|
|
1769
|
+
config_file_path (str): Configuration file path used by recovery, parameter server training mode only
|
|
2117
1770
|
supports Server disaster recovery currently. Default: ``''`` .
|
|
2118
1771
|
scheduler_manage_port (int): Scheduler manage port used to scale out/in. Default: ``11202`` .
|
|
2119
1772
|
enable_ssl (bool): Set PS SSL mode enabled or disabled. Default: ``False`` .
|
|
@@ -2137,14 +1790,15 @@ def get_ps_context(attr_key):
|
|
|
2137
1790
|
Args:
|
|
2138
1791
|
attr_key (str): The key of the attribute:
|
|
2139
1792
|
|
|
2140
|
-
- enable_ps (bool): Whether to enable parameter server training mode. Default: ``False`` .
|
|
2141
|
-
- config_file_path (
|
|
1793
|
+
- enable_ps (bool, optional): Whether to enable parameter server training mode. Default: ``False`` .
|
|
1794
|
+
- config_file_path (str, optional): Configuration file path used by recovery,
|
|
1795
|
+
parameter server training mode only
|
|
2142
1796
|
supports Server disaster recovery currently. Default: ``''`` .
|
|
2143
|
-
- scheduler_manage_port (int): Scheduler manage port used to scale out/in. Default: ``11202`` .
|
|
2144
|
-
- enable_ssl (bool): Set PS SSL mode enabled or disabled. Default: ``False`` .
|
|
2145
|
-
- client_password (str): Password to decrypt the secret key stored in the client certificate.
|
|
1797
|
+
- scheduler_manage_port (int, optional): Scheduler manage port used to scale out/in. Default: ``11202`` .
|
|
1798
|
+
- enable_ssl (bool, optional): Set PS SSL mode enabled or disabled. Default: ``False`` .
|
|
1799
|
+
- client_password (str, optional): Password to decrypt the secret key stored in the client certificate.
|
|
2146
1800
|
Default: ``''`` .
|
|
2147
|
-
- server_password (str): Password to decrypt the secret key stored in the server certificate.
|
|
1801
|
+
- server_password (str, optional): Password to decrypt the secret key stored in the server certificate.
|
|
2148
1802
|
Default: ``''`` .
|
|
2149
1803
|
|
|
2150
1804
|
Returns:
|