mindspore 2.4.10__cp311-cp311-win_amd64.whl → 2.6.0rc1__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +13 -6
- mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -0
- mindspore/_checkparam.py +3 -38
- mindspore/_deprecated/__init__.py +17 -0
- mindspore/_deprecated/jit.py +198 -0
- mindspore/_extends/builtin_operations.py +1 -1
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/__init__.py +6 -7
- mindspore/_extends/parse/compile_config.py +83 -0
- mindspore/_extends/parse/deprecated/__init__.py +0 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +394 -0
- mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
- mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
- mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
- mindspore/_extends/parse/parser.py +46 -197
- mindspore/_extends/parse/resources.py +1 -5
- mindspore/_extends/parse/standard_method.py +217 -98
- mindspore/_extends/pijit/__init__.py +2 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +17 -12
- mindspore/_extends/pijit/tensor_func_list.py +27 -0
- mindspore/_extends/utils.py +1 -1
- mindspore/amp.py +11 -5
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/__init__.py +2 -2
- mindspore/boost/base.py +3 -7
- mindspore/boost/boost_cell_wrapper.py +138 -43
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +6 -3
- mindspore/common/_grad_function.py +56 -0
- mindspore/common/_pijit_context.py +14 -5
- mindspore/common/_register_for_tensor.py +1 -2
- mindspore/common/_stub_tensor.py +30 -14
- mindspore/common/_tensor_cpp_method.py +17 -0
- mindspore/common/_tensor_docs.py +4760 -0
- mindspore/common/api.py +435 -371
- mindspore/common/auto_dynamic_shape.py +41 -44
- mindspore/common/dtype.py +39 -36
- mindspore/common/dump.py +9 -6
- mindspore/common/file_system.py +9 -1
- mindspore/common/generator.py +2 -0
- mindspore/common/hook_handle.py +6 -2
- mindspore/common/initializer.py +13 -10
- mindspore/common/jit_begin_end.py +94 -0
- mindspore/common/jit_config.py +6 -1
- mindspore/common/jit_context.py +76 -0
- mindspore/common/jit_trace.py +378 -0
- mindspore/common/lazy_inline.py +9 -3
- mindspore/common/mindir_util.py +10 -2
- mindspore/common/mutable.py +5 -4
- mindspore/common/parameter.py +135 -52
- mindspore/common/seed.py +2 -2
- mindspore/common/sparse_tensor.py +23 -17
- mindspore/common/tensor.py +951 -1992
- mindspore/communication/__init__.py +7 -5
- mindspore/communication/_comm_helper.py +52 -2
- mindspore/communication/comm_func.py +240 -181
- mindspore/communication/management.py +95 -26
- mindspore/context.py +314 -566
- mindspore/dataset/__init__.py +65 -37
- mindspore/dataset/audio/__init__.py +2 -8
- mindspore/dataset/audio/transforms.py +3 -17
- mindspore/dataset/callback/ds_callback.py +2 -1
- mindspore/dataset/core/config.py +87 -6
- mindspore/dataset/engine/cache_admin.py +3 -3
- mindspore/dataset/engine/cache_client.py +6 -5
- mindspore/dataset/engine/datasets.py +292 -267
- mindspore/dataset/engine/datasets_audio.py +22 -8
- mindspore/dataset/engine/datasets_standard_format.py +46 -27
- mindspore/dataset/engine/datasets_text.py +78 -48
- mindspore/dataset/engine/datasets_user_defined.py +182 -116
- mindspore/dataset/engine/datasets_vision.py +120 -44
- mindspore/dataset/engine/iterators.py +283 -63
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
- mindspore/dataset/engine/obs/util.py +8 -0
- mindspore/dataset/engine/queue.py +40 -0
- mindspore/dataset/engine/samplers.py +289 -43
- mindspore/dataset/engine/serializer_deserializer.py +3 -2
- mindspore/dataset/engine/validators.py +53 -11
- mindspore/dataset/text/__init__.py +7 -6
- mindspore/dataset/text/transforms.py +6 -5
- mindspore/dataset/text/utils.py +3 -3
- mindspore/dataset/transforms/__init__.py +0 -9
- mindspore/dataset/transforms/py_transforms_util.py +17 -0
- mindspore/dataset/transforms/transforms.py +31 -14
- mindspore/dataset/utils/browse_dataset.py +1 -1
- mindspore/dataset/vision/__init__.py +2 -9
- mindspore/dataset/vision/transforms.py +202 -158
- mindspore/dataset/vision/utils.py +7 -5
- mindspore/dataset/vision/validators.py +1 -2
- mindspore/device_context/__init__.py +21 -0
- mindspore/device_context/ascend/__init__.py +25 -0
- mindspore/device_context/ascend/device.py +72 -0
- mindspore/device_context/ascend/op_debug.py +153 -0
- mindspore/device_context/ascend/op_precision.py +193 -0
- mindspore/device_context/ascend/op_tuning.py +123 -0
- mindspore/{ops_generate/gen_constants.py → device_context/cpu/__init__.py} +6 -17
- mindspore/device_context/cpu/device.py +62 -0
- mindspore/device_context/cpu/op_tuning.py +43 -0
- mindspore/device_context/gpu/__init__.py +21 -0
- mindspore/device_context/gpu/device.py +70 -0
- mindspore/device_context/gpu/op_precision.py +67 -0
- mindspore/device_context/gpu/op_tuning.py +175 -0
- mindspore/device_manager.py +170 -0
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/es/embedding_service.py +35 -27
- mindspore/experimental/llm_boost/__init__.py +1 -0
- mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +211 -0
- mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
- mindspore/experimental/llm_boost/register.py +1 -0
- mindspore/experimental/map_parameter.py +4 -4
- mindspore/experimental/optim/adadelta.py +6 -6
- mindspore/experimental/optim/adagrad.py +4 -4
- mindspore/experimental/optim/adam.py +7 -0
- mindspore/experimental/optim/adamax.py +4 -4
- mindspore/experimental/optim/adamw.py +4 -0
- mindspore/experimental/optim/asgd.py +1 -1
- mindspore/experimental/optim/lr_scheduler.py +73 -46
- mindspore/experimental/optim/radam.py +34 -31
- mindspore/experimental/optim/rprop.py +1 -1
- mindspore/experimental/optim/sgd.py +1 -1
- mindspore/hal/contiguous_tensors_handle.py +6 -10
- mindspore/hal/device.py +55 -53
- mindspore/hal/event.py +52 -52
- mindspore/hal/memory.py +157 -117
- mindspore/hal/stream.py +150 -109
- mindspore/include/api/context.h +0 -1
- mindspore/include/dataset/constants.h +7 -4
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +50 -0
- mindspore/mindrecord/__init__.py +21 -8
- mindspore/mindrecord/config.py +17 -316
- mindspore/mindrecord/filereader.py +1 -9
- mindspore/mindrecord/filewriter.py +5 -15
- mindspore/mindrecord/mindpage.py +1 -9
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +796 -759
- mindspore/mint/distributed/__init__.py +70 -4
- mindspore/mint/distributed/distributed.py +2679 -44
- mindspore/mint/linalg/__init__.py +8 -0
- mindspore/mint/nn/__init__.py +743 -22
- mindspore/mint/nn/functional.py +716 -23
- mindspore/mint/nn/layer/__init__.py +21 -4
- mindspore/mint/nn/layer/_functions.py +334 -0
- mindspore/mint/nn/layer/activation.py +276 -1
- mindspore/mint/nn/layer/basic.py +123 -0
- mindspore/mint/nn/layer/conv.py +921 -0
- mindspore/mint/nn/layer/normalization.py +223 -28
- mindspore/mint/nn/layer/padding.py +797 -0
- mindspore/mint/nn/layer/pooling.py +235 -0
- mindspore/mint/optim/__init__.py +3 -1
- mindspore/mint/optim/adam.py +223 -0
- mindspore/mint/optim/adamw.py +26 -19
- mindspore/mint/optim/sgd.py +171 -0
- mindspore/mint/special/__init__.py +2 -1
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/multiprocessing/__init__.py +5 -0
- mindspore/nn/__init__.py +4 -1
- mindspore/nn/cell.py +1370 -189
- mindspore/nn/dynamic_lr.py +2 -1
- mindspore/nn/layer/activation.py +29 -27
- mindspore/nn/layer/basic.py +51 -35
- mindspore/nn/layer/channel_shuffle.py +3 -3
- mindspore/nn/layer/container.py +1 -1
- mindspore/nn/layer/conv.py +22 -17
- mindspore/nn/layer/embedding.py +12 -11
- mindspore/nn/layer/normalization.py +56 -49
- mindspore/nn/layer/padding.py +4 -3
- mindspore/nn/layer/pooling.py +120 -42
- mindspore/nn/layer/rnn_cells.py +1 -1
- mindspore/nn/layer/rnns.py +2 -1
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +59 -36
- mindspore/nn/learning_rate_schedule.py +8 -4
- mindspore/nn/loss/loss.py +58 -55
- mindspore/nn/optim/ada_grad.py +7 -5
- mindspore/nn/optim/adadelta.py +11 -9
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +17 -13
- mindspore/nn/optim/adamax.py +8 -7
- mindspore/nn/optim/adasum.py +5 -5
- mindspore/nn/optim/asgd.py +1 -1
- mindspore/nn/optim/ftrl.py +11 -9
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/lars.py +1 -4
- mindspore/nn/optim/lazyadam.py +12 -10
- mindspore/nn/optim/momentum.py +7 -6
- mindspore/nn/optim/optimizer.py +3 -3
- mindspore/nn/optim/proximal_ada_grad.py +12 -10
- mindspore/nn/optim/rmsprop.py +13 -12
- mindspore/nn/optim/rprop.py +11 -9
- mindspore/nn/optim/sgd.py +9 -6
- mindspore/nn/optim/tft_wrapper.py +5 -2
- mindspore/nn/optim/thor.py +2 -1
- mindspore/nn/probability/bijector/bijector.py +17 -11
- mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
- mindspore/nn/probability/bijector/invert.py +2 -2
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +3 -2
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +1 -1
- mindspore/nn/probability/distribution/cauchy.py +4 -2
- mindspore/nn/probability/distribution/exponential.py +6 -7
- mindspore/nn/probability/distribution/gamma.py +2 -2
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/half_normal.py +5 -3
- mindspore/nn/probability/distribution/logistic.py +5 -3
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/uniform.py +5 -3
- mindspore/nn/reinforcement/_tensors_queue.py +1 -1
- mindspore/nn/reinforcement/tensor_array.py +1 -1
- mindspore/nn/utils/init.py +13 -11
- mindspore/nn/wrap/__init__.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +181 -122
- mindspore/nn/wrap/grad_reducer.py +45 -36
- mindspore/nn/wrap/loss_scale.py +6 -7
- mindspore/numpy/array_creations.py +63 -65
- mindspore/numpy/array_ops.py +149 -144
- mindspore/numpy/logic_ops.py +41 -42
- mindspore/numpy/math_ops.py +365 -363
- mindspore/numpy/utils.py +17 -18
- mindspore/numpy/utils_const.py +5 -6
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +5 -3
- mindspore/ops/_grad_experimental/grad_comm_ops.py +112 -16
- mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -2
- mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
- mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -0
- mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
- mindspore/ops/_register_for_op.py +0 -11
- mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
- mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -65
- mindspore/ops/_vmap/vmap_array_ops.py +27 -25
- mindspore/ops/_vmap/vmap_base.py +0 -2
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +21 -14
- mindspore/ops/_vmap/vmap_math_ops.py +15 -16
- mindspore/ops/_vmap/vmap_nn_ops.py +29 -42
- mindspore/ops/auto_generate/__init__.py +4 -3
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +236 -46
- mindspore/ops/auto_generate/gen_extend_func.py +764 -124
- mindspore/ops/auto_generate/gen_ops_def.py +4018 -2264
- mindspore/ops/auto_generate/gen_ops_prim.py +15463 -5037
- mindspore/ops/auto_generate/pyboost_inner_prim.py +221 -87
- mindspore/ops/composite/__init__.py +2 -1
- mindspore/ops/composite/base.py +20 -25
- mindspore/ops/composite/math_ops.py +6 -16
- mindspore/ops/composite/multitype_ops/__init__.py +5 -2
- mindspore/ops/composite/multitype_ops/_compile_utils.py +228 -30
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
- mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
- mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +2 -30
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
- mindspore/ops/function/__init__.py +40 -2
- mindspore/ops/function/_add_attr_func.py +58 -0
- mindspore/ops/function/array_func.py +2089 -2403
- mindspore/ops/function/clip_func.py +80 -23
- mindspore/ops/function/debug_func.py +57 -57
- mindspore/ops/function/grad/__init__.py +1 -0
- mindspore/ops/function/grad/grad_func.py +104 -71
- mindspore/ops/function/image_func.py +2 -2
- mindspore/ops/function/linalg_func.py +47 -78
- mindspore/ops/function/math_func.py +4501 -3802
- mindspore/ops/function/nn_func.py +1726 -620
- mindspore/ops/function/other_func.py +159 -1
- mindspore/ops/function/parameter_func.py +18 -84
- mindspore/ops/function/random_func.py +440 -387
- mindspore/ops/function/reshard_func.py +4 -70
- mindspore/ops/function/sparse_func.py +3 -3
- mindspore/ops/function/sparse_unary_func.py +6 -6
- mindspore/ops/function/spectral_func.py +25 -58
- mindspore/ops/function/vmap_func.py +24 -17
- mindspore/ops/functional.py +22 -7
- mindspore/ops/functional_overload.py +1440 -0
- mindspore/ops/op_info_register.py +32 -244
- mindspore/ops/operations/__init__.py +13 -7
- mindspore/ops/operations/_custom_ops_utils.py +247 -0
- mindspore/ops/operations/_embedding_cache_ops.py +4 -4
- mindspore/ops/operations/_grad_ops.py +2 -43
- mindspore/ops/operations/_infer_ops.py +2 -1
- mindspore/ops/operations/_inner_ops.py +43 -84
- mindspore/ops/operations/_ms_kernel.py +4 -10
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +3 -2
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/array_ops.py +81 -324
- mindspore/ops/operations/comm_ops.py +154 -108
- mindspore/ops/operations/custom_ops.py +232 -78
- mindspore/ops/operations/debug_ops.py +153 -59
- mindspore/ops/operations/inner_ops.py +7 -5
- mindspore/ops/operations/linalg_ops.py +1 -57
- mindspore/ops/operations/manually_defined/_inner.py +1 -1
- mindspore/ops/operations/manually_defined/ops_def.py +928 -180
- mindspore/ops/operations/math_ops.py +32 -234
- mindspore/ops/operations/nn_ops.py +210 -498
- mindspore/ops/operations/other_ops.py +62 -9
- mindspore/ops/operations/random_ops.py +13 -7
- mindspore/ops/operations/reshard_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +2 -2
- mindspore/ops/primitive.py +66 -53
- mindspore/ops/tensor_method.py +1888 -0
- mindspore/ops_generate/__init__.py +0 -5
- mindspore/ops_generate/aclnn/__init__.py +0 -0
- mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +135 -0
- mindspore/ops_generate/aclnn/gen_aclnn_implement.py +257 -0
- mindspore/ops_generate/api/__init__.py +0 -0
- mindspore/ops_generate/api/add_tensor_docs_generator.py +56 -0
- mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +105 -0
- mindspore/ops_generate/api/functional_map_cpp_generator.py +504 -0
- mindspore/ops_generate/api/functional_overload_py_generator.py +112 -0
- mindspore/ops_generate/api/functions_cc_generator.py +237 -0
- mindspore/ops_generate/api/gen_api.py +103 -0
- mindspore/ops_generate/api/op_api_proto.py +235 -0
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +461 -0
- mindspore/ops_generate/common/__init__.py +0 -0
- mindspore/ops_generate/common/base_generator.py +11 -0
- mindspore/ops_generate/common/gen_constants.py +91 -0
- mindspore/ops_generate/common/gen_utils.py +348 -0
- mindspore/ops_generate/common/op_proto.py +473 -0
- mindspore/ops_generate/common/template.py +523 -0
- mindspore/ops_generate/gen_ops.py +22 -1069
- mindspore/ops_generate/op_def/__init__.py +0 -0
- mindspore/ops_generate/op_def/gen_op_def.py +90 -0
- mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +191 -0
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +299 -0
- mindspore/ops_generate/op_def/ops_def_h_generator.py +74 -0
- mindspore/ops_generate/op_def/ops_name_h_generator.py +83 -0
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
- mindspore/ops_generate/op_def_py/__init__.py +0 -0
- mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
- mindspore/ops_generate/op_def_py/op_def_py_generator.py +132 -0
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +489 -0
- mindspore/ops_generate/pyboost/__init__.py +0 -0
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +139 -0
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +93 -0
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +175 -0
- mindspore/ops_generate/pyboost/op_template_parser.py +517 -0
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +407 -0
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +100 -0
- mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +148 -0
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +155 -0
- mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +132 -0
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +272 -0
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +938 -0
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +357 -0
- mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +179 -36
- mindspore/ops_generate/resources/__init__.py +0 -0
- mindspore/ops_generate/resources/resource_list.py +30 -0
- mindspore/ops_generate/resources/resource_loader.py +36 -0
- mindspore/ops_generate/resources/resource_manager.py +64 -0
- mindspore/ops_generate/resources/yaml_loader.py +88 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
- mindspore/parallel/__init__.py +7 -3
- mindspore/parallel/_auto_parallel_context.py +152 -34
- mindspore/parallel/_cell_wrapper.py +130 -15
- mindspore/parallel/_parallel_serialization.py +107 -5
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +7 -2
- mindspore/parallel/_tensor.py +142 -18
- mindspore/parallel/_utils.py +199 -23
- mindspore/parallel/algo_parameter_config.py +4 -4
- mindspore/parallel/auto_parallel.py +732 -0
- mindspore/parallel/checkpoint_convert.py +159 -0
- mindspore/parallel/checkpoint_transform.py +698 -35
- mindspore/parallel/cluster/process_entity/_api.py +276 -50
- mindspore/parallel/cluster/process_entity/_utils.py +41 -6
- mindspore/parallel/cluster/run.py +21 -4
- mindspore/parallel/function/__init__.py +24 -0
- mindspore/parallel/function/reshard_func.py +259 -0
- mindspore/parallel/nn/__init__.py +25 -0
- mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
- mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
- mindspore/parallel/parameter_broadcast.py +25 -14
- mindspore/parallel/shard.py +137 -58
- mindspore/parallel/transform_safetensors.py +363 -305
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +22 -5
- mindspore/profiler/analysis/__init__.py +0 -0
- mindspore/profiler/analysis/parser/__init__.py +0 -0
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +170 -0
- mindspore/profiler/analysis/parser/base_parser.py +158 -0
- mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
- mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
- mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +264 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +106 -0
- mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
- mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
- mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
- mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +415 -0
- mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
- mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
- mindspore/profiler/analysis/task_manager.py +131 -0
- mindspore/profiler/analysis/time_converter.py +84 -0
- mindspore/profiler/analysis/viewer/__init__.py +0 -0
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +372 -0
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +250 -0
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +320 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +327 -0
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +376 -0
- mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
- mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +96 -0
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
- mindspore/profiler/analysis/work_flow.py +73 -0
- mindspore/profiler/common/ascend_msprof_exporter.py +139 -0
- mindspore/profiler/common/command_executor.py +90 -0
- mindspore/profiler/common/constant.py +186 -3
- mindspore/profiler/common/file_manager.py +208 -0
- mindspore/profiler/common/log.py +130 -0
- mindspore/profiler/common/msprof_cmd_tool.py +221 -0
- mindspore/profiler/common/path_manager.py +395 -0
- mindspore/profiler/common/process_bar.py +168 -0
- mindspore/profiler/common/process_pool.py +9 -3
- mindspore/profiler/common/profiler_context.py +500 -0
- mindspore/profiler/common/profiler_info.py +304 -0
- mindspore/profiler/common/profiler_meta_data.py +74 -0
- mindspore/profiler/common/profiler_output_path.py +284 -0
- mindspore/profiler/common/profiler_parameters.py +251 -0
- mindspore/profiler/common/profiler_path_manager.py +179 -0
- mindspore/profiler/common/record_function.py +76 -0
- mindspore/profiler/common/tlv_decoder.py +76 -0
- mindspore/profiler/common/util.py +75 -2
- mindspore/profiler/dynamic_profiler.py +341 -75
- mindspore/profiler/envprofiler.py +163 -0
- mindspore/profiler/experimental_config.py +197 -0
- mindspore/profiler/mstx.py +242 -0
- mindspore/profiler/platform/__init__.py +21 -0
- mindspore/profiler/platform/base_profiler.py +40 -0
- mindspore/profiler/platform/cpu_profiler.py +124 -0
- mindspore/profiler/platform/gpu_profiler.py +74 -0
- mindspore/profiler/platform/npu_profiler.py +335 -0
- mindspore/profiler/profiler.py +1073 -90
- mindspore/profiler/profiler_action_controller.py +187 -0
- mindspore/profiler/profiler_interface.py +118 -0
- mindspore/profiler/schedule.py +243 -0
- mindspore/rewrite/api/node.py +15 -13
- mindspore/rewrite/api/symbol_tree.py +2 -3
- mindspore/run_check/_check_version.py +27 -20
- mindspore/run_check/run_check.py +1 -1
- mindspore/runtime/__init__.py +37 -0
- mindspore/runtime/device.py +27 -0
- mindspore/runtime/event.py +209 -0
- mindspore/runtime/executor.py +177 -0
- mindspore/runtime/memory.py +409 -0
- mindspore/runtime/stream.py +460 -0
- mindspore/runtime/thread_bind_core.py +401 -0
- mindspore/safeguard/rewrite_obfuscation.py +12 -9
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +8 -8
- mindspore/train/_utils.py +88 -25
- mindspore/train/amp.py +9 -5
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +2 -16
- mindspore/train/callback/_checkpoint.py +53 -55
- mindspore/train/callback/_cluster_monitor.py +14 -18
- mindspore/train/callback/_early_stop.py +1 -1
- mindspore/train/callback/_flops_collector.py +103 -68
- mindspore/train/callback/_history.py +8 -5
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +0 -3
- mindspore/train/callback/_loss_monitor.py +2 -1
- mindspore/train/callback/_on_request_exit.py +6 -5
- mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
- mindspore/train/callback/_summary_collector.py +52 -19
- mindspore/train/callback/_time_monitor.py +2 -1
- mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +204 -107
- mindspore/train/data_sink.py +25 -2
- mindspore/train/dataset_helper.py +15 -16
- mindspore/train/loss_scale_manager.py +8 -7
- mindspore/train/metrics/accuracy.py +3 -3
- mindspore/train/metrics/confusion_matrix.py +9 -9
- mindspore/train/metrics/error.py +3 -3
- mindspore/train/metrics/hausdorff_distance.py +4 -4
- mindspore/train/metrics/mean_surface_distance.py +3 -3
- mindspore/train/metrics/metric.py +0 -12
- mindspore/train/metrics/occlusion_sensitivity.py +4 -2
- mindspore/train/metrics/precision.py +11 -10
- mindspore/train/metrics/recall.py +9 -9
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +174 -46
- mindspore/train/model.py +184 -113
- mindspore/train/serialization.py +622 -978
- mindspore/train/summary/_summary_adapter.py +2 -2
- mindspore/train/summary/summary_record.py +2 -3
- mindspore/train/train_thor/model_thor.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +6 -3
- mindspore/utils/dryrun.py +140 -0
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/runtime_execution_order_check.py +550 -0
- mindspore/utils/utils.py +138 -4
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/METADATA +3 -3
- {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/RECORD +587 -418
- {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/entry_points.txt +1 -1
- mindspore/_install_custom.py +0 -43
- mindspore/common/_register_for_adapter.py +0 -74
- mindspore/common/_tensor_overload.py +0 -139
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -197
- mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
- mindspore/ops_generate/gen_aclnn_implement.py +0 -263
- mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
- mindspore/ops_generate/gen_pyboost_func.py +0 -1052
- mindspore/ops_generate/gen_utils.py +0 -209
- mindspore/ops_generate/op_proto.py +0 -145
- mindspore/ops_generate/template.py +0 -261
- mindspore/profiler/envprofiling.py +0 -254
- mindspore/profiler/profiling.py +0 -1926
- {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -495,7 +495,7 @@ def get_image_num_channels(image):
|
|
|
495
495
|
|
|
496
496
|
Raises:
|
|
497
497
|
RuntimeError: If the dimension of `image` is less than 2.
|
|
498
|
-
TypeError: If `image` is not of type
|
|
498
|
+
TypeError: If `image` is not of type numpy.ndarray or PIL Image.
|
|
499
499
|
|
|
500
500
|
Examples:
|
|
501
501
|
>>> import mindspore.dataset.vision as vision
|
|
@@ -528,7 +528,7 @@ def get_image_size(image):
|
|
|
528
528
|
|
|
529
529
|
Raises:
|
|
530
530
|
RuntimeError: If the dimension of `image` is less than 2.
|
|
531
|
-
TypeError: If `image` is not of type
|
|
531
|
+
TypeError: If `image` is not of type type numpy.ndarray or PIL Image.
|
|
532
532
|
|
|
533
533
|
Examples:
|
|
534
534
|
>>> import mindspore.dataset.vision as vision
|
|
@@ -635,10 +635,12 @@ def read_video(filename, start_pts=0, end_pts=None, pts_unit="pts"):
|
|
|
635
635
|
|
|
636
636
|
Args:
|
|
637
637
|
filename(str): The path to the video file to be read.
|
|
638
|
-
start_pts(Union[float, Fraction, int], optional): The start presentation timestamp of the video.
|
|
639
|
-
|
|
638
|
+
start_pts(Union[float, Fraction, int], optional): The start presentation timestamp of the video.
|
|
639
|
+
Default: ``0``.
|
|
640
|
+
end_pts(Union[float, Fraction, int], optional): The end presentation timestamp of the video.
|
|
641
|
+
Default: ``None``.
|
|
640
642
|
The None is represented by 2147483647.
|
|
641
|
-
pts_unit(str, optional): The unit of the timestamps. It can be any of ["pts", "sec"]. Default: "pts"
|
|
643
|
+
pts_unit(str, optional): The unit of the timestamps. It can be any of ["pts", "sec"]. Default: ``"pts"``.
|
|
642
644
|
|
|
643
645
|
Returns:
|
|
644
646
|
- numpy.ndarray, four dimensions uint8 data for video. The format is [T, H, W, C]. `T` is the number of frames,
|
|
@@ -24,7 +24,7 @@ from mindspore.dataset.core.validator_helpers import check_value, check_uint8, F
|
|
|
24
24
|
check_pos_float32, check_float32, check_2tuple, check_range, check_positive, INT32_MAX, INT32_MIN, \
|
|
25
25
|
parse_user_args, type_check, type_check_list, check_c_tensor_op, UINT8_MAX, UINT8_MIN, check_value_normalize_std, \
|
|
26
26
|
check_value_cutoff, check_value_ratio, check_odd, check_non_negative_float32, check_non_negative_int32, \
|
|
27
|
-
check_pos_int32, check_int32, check_tensor_op, deprecator_factory, check_valid_str
|
|
27
|
+
check_pos_int32, check_int32, check_tensor_op, deprecator_factory, check_valid_str
|
|
28
28
|
from mindspore.dataset.transforms.validators import check_transform_op_type
|
|
29
29
|
from .utils import Inter, Border, ImageBatchFormat, ConvertMode, SliceMode, AutoAugmentPolicy
|
|
30
30
|
|
|
@@ -358,7 +358,6 @@ def check_device_target(method):
|
|
|
358
358
|
def new_method(self, *args, **kwargs):
|
|
359
359
|
[device_target], _ = parse_user_args(method, *args, **kwargs)
|
|
360
360
|
check_valid_str(device_target, ["CPU", "Ascend"], "device_target")
|
|
361
|
-
check_independent_mode("Transform in Ascend mode", (device_target == "Ascend"))
|
|
362
361
|
return method(self, *args, **kwargs)
|
|
363
362
|
return new_method
|
|
364
363
|
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
# Copyright 2024 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
|
|
16
|
+
"""
|
|
17
|
+
The device context interface.
|
|
18
|
+
"""
|
|
19
|
+
import mindspore.device_context.gpu
|
|
20
|
+
import mindspore.device_context.cpu
|
|
21
|
+
import mindspore.device_context.ascend
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
# Copyright 2024 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
|
|
16
|
+
"""Ascend interfaces"""
|
|
17
|
+
|
|
18
|
+
from .device import device_count, is_available
|
|
19
|
+
from .op_debug import *
|
|
20
|
+
from .op_precision import *
|
|
21
|
+
from .op_tuning import *
|
|
22
|
+
|
|
23
|
+
__all__ = [
|
|
24
|
+
"device_count", "is_available"
|
|
25
|
+
]
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
# Copyright 2024 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
|
|
16
|
+
"""Device context ascend interfaces"""
|
|
17
|
+
import mindspore as ms
|
|
18
|
+
from mindspore._c_expression import MSContext
|
|
19
|
+
from mindspore import log as logger
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
from mindspore._c_expression import ascend_get_device_count
|
|
23
|
+
except ImportError:
|
|
24
|
+
pass
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def device_count():
|
|
28
|
+
"""
|
|
29
|
+
Return compute-capable device count of Ascend.
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
int, the number of compute-capable Ascend devices.
|
|
33
|
+
|
|
34
|
+
Examples:
|
|
35
|
+
>>> import mindspore as ms
|
|
36
|
+
>>> print(ms.device_context.ascend.device_count())
|
|
37
|
+
8
|
|
38
|
+
"""
|
|
39
|
+
if not MSContext.get_instance().is_pkg_support_device("Ascend") or not is_available():
|
|
40
|
+
raise RuntimeError(f"Device Ascend not exist.")
|
|
41
|
+
return ascend_get_device_count()
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def is_available():
|
|
45
|
+
"""
|
|
46
|
+
Returns whether ascend backend is available.
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
Bool, whether the ascend backend is available for this MindSpore package.
|
|
50
|
+
|
|
51
|
+
Examples:
|
|
52
|
+
>>> import mindspore as ms
|
|
53
|
+
>>> print(ms.device_context.ascend.is_available())
|
|
54
|
+
True
|
|
55
|
+
"""
|
|
56
|
+
# MindSpore will try to load plugins in "import mindspore", and availability status will be stored.
|
|
57
|
+
if not MSContext.get_instance().is_pkg_support_device("Ascend"):
|
|
58
|
+
logger.warning(f"Device Ascend is not available.")
|
|
59
|
+
load_plugin_error = MSContext.get_instance().load_plugin_error()
|
|
60
|
+
if load_plugin_error != "":
|
|
61
|
+
logger.warning(f"Here's error when loading plugin for MindSpore package."
|
|
62
|
+
f"Error message: {load_plugin_error}")
|
|
63
|
+
return False
|
|
64
|
+
return True
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _is_supported():
|
|
68
|
+
device_target = ms.context.get_context("device_target")
|
|
69
|
+
if device_target == 'CPU' or device_target == 'GPU':
|
|
70
|
+
logger.error(f"{device_target} device is not supported. Please use correct device")
|
|
71
|
+
return False
|
|
72
|
+
return True
|
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
# Copyright 2024 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
|
|
16
|
+
"""Op debug interfaces."""
|
|
17
|
+
from mindspore._checkparam import args_type_check
|
|
18
|
+
from .device import _is_supported
|
|
19
|
+
try:
|
|
20
|
+
from mindspore._c_expression import AscendOpDebugConf
|
|
21
|
+
except ImportError:
|
|
22
|
+
pass
|
|
23
|
+
|
|
24
|
+
function_status = {'execute_timeout': False, 'debug_option': False}
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@args_type_check(op_timeout=int)
|
|
28
|
+
def execute_timeout(op_timeout):
|
|
29
|
+
"""
|
|
30
|
+
Set the maximum duration of executing an operator in seconds. The framework operator execution timeout time
|
|
31
|
+
is ``900`` by default.
|
|
32
|
+
please refer to `Ascend Community document about aclrtSetOpExecuteTimeOut
|
|
33
|
+
<https://www.hiascend.com/document/detail/en/CANNCommunityEdition/600alphaX/infacldevg/aclcppdevg/aclcppdevg_03_0069.html>`_.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
op_timeout (int): Set the maximum duration of executing an operator in seconds.
|
|
37
|
+
If the execution time exceeds this value, system will terminate the task.
|
|
38
|
+
0 means endless wait. The defaults for AI Core and AI CPU operators vary on different hardware.
|
|
39
|
+
|
|
40
|
+
Examples:
|
|
41
|
+
>>> import mindspore as ms
|
|
42
|
+
>>> ms.device_context.ascend.op_debug.execute_timeout(100)
|
|
43
|
+
"""
|
|
44
|
+
if not function_status['execute_timeout']:
|
|
45
|
+
function_status['execute_timeout'] = True
|
|
46
|
+
if not _is_supported():
|
|
47
|
+
return
|
|
48
|
+
if op_timeout == AscendOpDebugConf.get_instance().execute_timeout():
|
|
49
|
+
return
|
|
50
|
+
# Check the configuration environment whether valid
|
|
51
|
+
if AscendOpDebugConf.get_instance().is_execute_timeout_configured():
|
|
52
|
+
raise RuntimeError("The 'execute_timeout' can not be set repeatedly.")
|
|
53
|
+
if op_timeout < 0:
|
|
54
|
+
raise ValueError("The num of execute_timeout must bigger than or equal to 0.")
|
|
55
|
+
AscendOpDebugConf.get_instance().set_execute_timeout(op_timeout)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def debug_option(option_value):
|
|
59
|
+
"""
|
|
60
|
+
Enable debugging options for Ascend operators, default not enabled.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
option_value(str): Ascend operators debugging configuration. Currently, only memory
|
|
64
|
+
access violation detection is supported.
|
|
65
|
+
The value currently only supports being set to ``"oom"``.
|
|
66
|
+
|
|
67
|
+
- ``"oom"``: When there is a memory out of bounds during the execution of an operator,
|
|
68
|
+
AscendCL will return an error code of ``EZ9999``.
|
|
69
|
+
|
|
70
|
+
Examples:
|
|
71
|
+
>>> import mindspore as ms
|
|
72
|
+
>>> ms.device_context.ascend.op_debug.debug_option("oom")
|
|
73
|
+
"""
|
|
74
|
+
if not function_status['debug_option']:
|
|
75
|
+
function_status['debug_option'] = True
|
|
76
|
+
if not _is_supported():
|
|
77
|
+
return
|
|
78
|
+
if option_value == AscendOpDebugConf.get_instance().debug_option():
|
|
79
|
+
return
|
|
80
|
+
# Check the configuration environment whether valid
|
|
81
|
+
if AscendOpDebugConf.get_instance().is_debug_option_configured():
|
|
82
|
+
raise RuntimeError("The 'debug_option' can not be set repeatedly.")
|
|
83
|
+
valid_order = {"oom"}
|
|
84
|
+
if not isinstance(option_value, str):
|
|
85
|
+
raise TypeError(
|
|
86
|
+
f"For 'device_context.ascend.op_debug.debug_option(option_value)', the type of 'option_value' must be str, "
|
|
87
|
+
f"but got {type(option_value)}."
|
|
88
|
+
)
|
|
89
|
+
if option_value not in valid_order:
|
|
90
|
+
raise ValueError(
|
|
91
|
+
f"For 'device_context.ascend.op_debug.debug_option(option_value)', the 'option_value' supports being set "
|
|
92
|
+
f"to 'oom' currently, but got {option_value}."
|
|
93
|
+
)
|
|
94
|
+
AscendOpDebugConf.get_instance().set_debug_option(option_value)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def aclinit_config(config):
|
|
98
|
+
"""
|
|
99
|
+
Configure the configuration items for the aclInit interface.
|
|
100
|
+
please refer to `Ascend Community document about aclInit.
|
|
101
|
+
<https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/800alpha003/apiref/appdevgapi/aclcppdevg_03_0022.html>`_.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
config(dict): When initializing AscendCL, you can enable or configure the
|
|
105
|
+
following features through this configuration interface.
|
|
106
|
+
|
|
107
|
+
- ``"max_opqueue_num"``: When executing using the single-operator model method, to save memory and balance
|
|
108
|
+
the performance of calls, you can configure the maximum length of the single-operator model mapping
|
|
109
|
+
queue through the max_opqueue_num parameter. If the length reaches the maximum, the system will first
|
|
110
|
+
delete the mapping information that has not been used for a long time and the cached single-operator
|
|
111
|
+
model, and then load the latest mapping information and the corresponding single-operator model.
|
|
112
|
+
If the maximum length of the mapping queue is not configured, the default maximum length is 20,000.
|
|
113
|
+
- ``"err_msg_mode"``: This parameter is used to control the level at which error information is retrieved,
|
|
114
|
+
either by process or by thread. The default level is by process. "0" indicating that error information
|
|
115
|
+
is retrieved by thread.
|
|
116
|
+
"1" is the default value, indicates that error information is retrieved by process.
|
|
117
|
+
- ``"dump"``: This parameter is used to enable exception dump for Ascend operators. The value can be set to
|
|
118
|
+
{"dump_scene": "lite_exception"}, {"dump_scene": "lite_exception:disable"}.
|
|
119
|
+
{"dump_scene": "lite_exception"} indicates that the exception dump is enabled.
|
|
120
|
+
{"dump_scene": "lite_exception:disable"} indicates that the exception dump is disabled.
|
|
121
|
+
{"dump_scene": "lite_exception"} is the default value, indicates that the exception dump is enabled.
|
|
122
|
+
|
|
123
|
+
Examples:
|
|
124
|
+
>>> import mindspore as ms
|
|
125
|
+
>>> ms.set_device("Ascend", 0)
|
|
126
|
+
>>> ms.device_context.ascend.op_debug.aclinit_config({"max_opqueue_num": "20000", "err_msg_mode": "1",
|
|
127
|
+
... "dump": {"dump_scene": "lite_exception"}})
|
|
128
|
+
"""
|
|
129
|
+
aclinit_cfg_modes = {
|
|
130
|
+
"max_opqueue_num": (str,),
|
|
131
|
+
"err_msg_mode": ['0', '1'],
|
|
132
|
+
"dump": [{"dump_scene": "lite_exception"}, {"dump_scene": "lite_exception:disable"}],
|
|
133
|
+
}
|
|
134
|
+
instance = AscendOpDebugConf.get_instance()
|
|
135
|
+
aclinit_cfg_setters = {
|
|
136
|
+
"max_opqueue_num": instance.set_max_opqueue_num,
|
|
137
|
+
"err_msg_mode": instance.set_err_msg_mode,
|
|
138
|
+
"dump": instance.set_lite_exception_dump
|
|
139
|
+
}
|
|
140
|
+
aclinit_cfg_set = tuple(aclinit_cfg_modes.keys())
|
|
141
|
+
for key, value in config.items():
|
|
142
|
+
if key not in aclinit_cfg_set:
|
|
143
|
+
raise ValueError(f"For 'ms.device_context.ascend.op_debug.aclinit_config', the key must be one of "
|
|
144
|
+
f"{aclinit_cfg_set}, but got {key}.")
|
|
145
|
+
supported_modes = aclinit_cfg_modes.get(key)
|
|
146
|
+
if isinstance(supported_modes, list) and value not in supported_modes:
|
|
147
|
+
raise ValueError(f"For 'ms.device_context.ascend.op_debug.aclinit_config', the value of argument {key} "
|
|
148
|
+
f"must be one of {supported_modes}, but got {value}.")
|
|
149
|
+
if isinstance(supported_modes, tuple) and not isinstance(value, supported_modes):
|
|
150
|
+
raise TypeError(f"For 'ms.device_context.ascend.op_debug.aclinit_config', the type of argument {key} "
|
|
151
|
+
f"must be one of {supported_modes}, but got {type(value)}.")
|
|
152
|
+
cfg_setter = aclinit_cfg_setters.get(key)
|
|
153
|
+
cfg_setter(value)
|
|
@@ -0,0 +1,193 @@
|
|
|
1
|
+
# Copyright 2024 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
|
|
16
|
+
"""Op precision interfaces."""
|
|
17
|
+
import os
|
|
18
|
+
from mindspore._checkparam import args_type_check
|
|
19
|
+
from .device import _is_supported
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
from mindspore._c_expression import AscendOpPrecisionConf
|
|
23
|
+
except ImportError:
|
|
24
|
+
pass
|
|
25
|
+
|
|
26
|
+
function_status = {'precision_mode': False, 'op_precision_mode': False,
|
|
27
|
+
'matmul_allow_hf32': False, 'conv_allow_hf32': False}
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def precision_mode(mode):
|
|
31
|
+
"""
|
|
32
|
+
Configure mixed precision mode setting. The framework set the configuration of Atlas training series
|
|
33
|
+
products to "force_fp16" by default, and set the configuration for other products such as the Atlas A2
|
|
34
|
+
training series products to "must_keep_origin_dtype" by default.
|
|
35
|
+
For detailed information, please refer to `Ascend community
|
|
36
|
+
<https://www.hiascend.com/document/detail/zh/canncommercial/80RC3/apiref/appdevgapi/aclcppdevg_03_1371.html/>`_ .
|
|
37
|
+
|
|
38
|
+
Note:
|
|
39
|
+
- The default value of `precision_mode` is experimental parameter, may change in the future.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
mode (str): The operator precision mode setting.
|
|
43
|
+
The value range is as follows:
|
|
44
|
+
|
|
45
|
+
- force_fp16: When the operator supports both float16 and float32, directly choose float16.
|
|
46
|
+
- allow_fp32_to_fp16: For matrix-type operators, use float16. For vector-type operators, prioritize
|
|
47
|
+
the original precision. If the operator in the network model supports float32, retain the original
|
|
48
|
+
precision float32. If the operator in the network model does not support float32, directly reduce
|
|
49
|
+
the precision to float16.
|
|
50
|
+
- allow_mix_precision: Automatic mixed precision, for all operators in the network, according to the
|
|
51
|
+
built-in optimization strategy, automatically reduce the precision of some operators to float16 or
|
|
52
|
+
bfloat16.
|
|
53
|
+
- must_keep_origin_dtype: Maintain the original precision.
|
|
54
|
+
- force_fp32: When the input of the matrix calculation operator is float16, and the output supports both
|
|
55
|
+
float16 and float32, force the output to be converted to float32.
|
|
56
|
+
- allow_fp32_to_bf16: For matrix-type operators, use bfloat16. For vector-type operators, prioritize the
|
|
57
|
+
original precision. If the operator in the network model supports float32, retain the original precision
|
|
58
|
+
float32. If the operator in the network model does not support float32, directly reduce the precision
|
|
59
|
+
to bfloat16.
|
|
60
|
+
- allow_mix_precision_fp16: Automatic mixed precision, for all operators in the network, according to
|
|
61
|
+
the built-in optimization strategy, automatically reduce the precision of some operators to float16.
|
|
62
|
+
- allow_mix_precision_bf16: Automatic mixed precision, for all operators in the network, according to
|
|
63
|
+
the built-in optimization strategy, automatically reduce the precision of some operators to bfloat16.
|
|
64
|
+
|
|
65
|
+
Examples:
|
|
66
|
+
>>> import mindspore as ms
|
|
67
|
+
>>> ms.device_context.ascend.op_precision.precision_mode("force_fp16")
|
|
68
|
+
"""
|
|
69
|
+
if not function_status['precision_mode']:
|
|
70
|
+
function_status['precision_mode'] = True
|
|
71
|
+
if not _is_supported():
|
|
72
|
+
return
|
|
73
|
+
if mode == AscendOpPrecisionConf.get_instance().precision_mode():
|
|
74
|
+
return
|
|
75
|
+
# Check the configuration environment whether valid
|
|
76
|
+
if AscendOpPrecisionConf.get_instance().is_precision_mode_configured():
|
|
77
|
+
raise RuntimeError("The 'precision_mode' can not be set repeatedly.")
|
|
78
|
+
supported_modes = [
|
|
79
|
+
"force_fp16",
|
|
80
|
+
"allow_fp32_to_fp16",
|
|
81
|
+
"allow_mix_precision",
|
|
82
|
+
"must_keep_origin_dtype",
|
|
83
|
+
"force_fp32",
|
|
84
|
+
"allow_fp32_to_bf16",
|
|
85
|
+
"allow_mix_precision_fp16",
|
|
86
|
+
"allow_mix_precision_bf16",
|
|
87
|
+
]
|
|
88
|
+
if mode not in supported_modes:
|
|
89
|
+
raise ValueError(f"For 'precision_mode', the value of mode {mode} must be one of "
|
|
90
|
+
f"{supported_modes}, but got {mode}.")
|
|
91
|
+
AscendOpPrecisionConf.get_instance().set_precision_mode(mode)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
@args_type_check(path=str)
|
|
95
|
+
def op_precision_mode(path):
|
|
96
|
+
"""
|
|
97
|
+
Path to config file of op precision mode.
|
|
98
|
+
For detailed information, please refer to `Ascend community
|
|
99
|
+
<https://www.hiascend.com/document/detail/zh/canncommercial/80RC3/apiref/appdevgapi/aclcppdevg_03_1371.html/>`_ .
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
path (str): Directory of the configuration file (.ini format) for setting the operator precision mode.
|
|
103
|
+
The directory can contain letters, digits, underscores (_), hyphens (-), and periods (.).
|
|
104
|
+
|
|
105
|
+
Examples:
|
|
106
|
+
>>> import mindspore as ms
|
|
107
|
+
>>> ms.device_context.ascend.op_precision.op_precision_mode("./op_precision_config_file")
|
|
108
|
+
"""
|
|
109
|
+
if not function_status['op_precision_mode']:
|
|
110
|
+
function_status['op_precision_mode'] = True
|
|
111
|
+
if not _is_supported():
|
|
112
|
+
return
|
|
113
|
+
if path == AscendOpPrecisionConf.get_instance().op_precision_mode():
|
|
114
|
+
return
|
|
115
|
+
# Check the configuration environment whether valid
|
|
116
|
+
if AscendOpPrecisionConf.get_instance().is_op_precision_mode_configured():
|
|
117
|
+
raise RuntimeError("The 'op_precision_mode' can not be set repeatedly.")
|
|
118
|
+
op_precision_path = path
|
|
119
|
+
real_path = os.path.realpath(op_precision_path)
|
|
120
|
+
if not os.path.exists(real_path):
|
|
121
|
+
raise ValueError(
|
|
122
|
+
f"For 'op_precision_mode', the 'path' is invalid, "
|
|
123
|
+
f"got '{op_precision_path}'."
|
|
124
|
+
)
|
|
125
|
+
AscendOpPrecisionConf.get_instance().set_op_precision_mode(path)
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def matmul_allow_hf32(value):
|
|
129
|
+
"""
|
|
130
|
+
Whether to convert FP32 to HF32 for Matmul operators. CANN disables FP32 to HF32
|
|
131
|
+
for Matmul operators by default.
|
|
132
|
+
For detailed information, please refer to `Ascend community
|
|
133
|
+
<https://www.hiascend.com/document/detail/zh/canncommercial/80RC3/apiref/appdevgapi/aclcppdevg_03_1371.html/>`_ .
|
|
134
|
+
|
|
135
|
+
Note:
|
|
136
|
+
- This is an experimental prototype that is subject to change and/or deletion.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
value (bool): Whether to convert FP32 to HF32 for Matmul operators
|
|
140
|
+
|
|
141
|
+
Examples:
|
|
142
|
+
>>> import mindspore as ms
|
|
143
|
+
>>> ms.device_context.ascend.op_precision.matmul_allow_hf32(True)
|
|
144
|
+
"""
|
|
145
|
+
if not function_status['matmul_allow_hf32']:
|
|
146
|
+
function_status['matmul_allow_hf32'] = True
|
|
147
|
+
if not _is_supported():
|
|
148
|
+
return
|
|
149
|
+
supported_modes = [True, False]
|
|
150
|
+
if value not in supported_modes:
|
|
151
|
+
raise ValueError(f"For 'matmul_allow_hf32', the type of input value must be one of "
|
|
152
|
+
f"{supported_modes}, but got {value}.")
|
|
153
|
+
is_enable = "1" if value else "0"
|
|
154
|
+
if is_enable == AscendOpPrecisionConf.get_instance().matmul_allow_hf32():
|
|
155
|
+
return
|
|
156
|
+
# Check the configuration environment whether valid
|
|
157
|
+
if AscendOpPrecisionConf.get_instance().is_matmul_allow_hf32_configured():
|
|
158
|
+
raise RuntimeError("The 'matmul_allow_hf32' can not be set repeatedly.")
|
|
159
|
+
AscendOpPrecisionConf.get_instance().set_matmul_allow_hf32(is_enable)
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def conv_allow_hf32(value):
|
|
163
|
+
"""
|
|
164
|
+
Whether to convert FP32 to HF32 for Conv operators. CANN enables FP32 to HF32
|
|
165
|
+
for Conv operators by default.
|
|
166
|
+
For detailed information, please refer to `Ascend community
|
|
167
|
+
<https://www.hiascend.com/document/detail/zh/canncommercial/80RC3/apiref/appdevgapi/aclcppdevg_03_1371.html/>`_ .
|
|
168
|
+
|
|
169
|
+
Note:
|
|
170
|
+
- This is an experimental prototype that is subject to change and/or deletion.
|
|
171
|
+
|
|
172
|
+
Args:
|
|
173
|
+
value (bool): Whether to convert FP32 to HF32 for Conv operators.
|
|
174
|
+
|
|
175
|
+
Examples:
|
|
176
|
+
>>> import mindspore as ms
|
|
177
|
+
>>> ms.device_context.ascend.op_precision.conv_allow_hf32(True)
|
|
178
|
+
"""
|
|
179
|
+
if not function_status['conv_allow_hf32']:
|
|
180
|
+
function_status['conv_allow_hf32'] = True
|
|
181
|
+
if not _is_supported():
|
|
182
|
+
return
|
|
183
|
+
supported_modes = [True, False]
|
|
184
|
+
if value not in supported_modes:
|
|
185
|
+
raise ValueError(f"For 'conv_allow_hf32', the type of input value must be one of "
|
|
186
|
+
f"{supported_modes}, but got {value}.")
|
|
187
|
+
is_enable = "1" if value else "0"
|
|
188
|
+
if is_enable == AscendOpPrecisionConf.get_instance().conv_allow_hf32():
|
|
189
|
+
return
|
|
190
|
+
# Check the configuration environment whether valid
|
|
191
|
+
if AscendOpPrecisionConf.get_instance().is_conv_allow_hf32_configured():
|
|
192
|
+
raise RuntimeError("The 'conv_allow_hf32' can not be set repeatedly.")
|
|
193
|
+
AscendOpPrecisionConf.get_instance().set_conv_allow_hf32(is_enable)
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
# Copyright 2024 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
|
|
16
|
+
"""Op tuning interfaces."""
|
|
17
|
+
try:
|
|
18
|
+
from mindspore._c_expression import AscendOpTuningConf
|
|
19
|
+
except ImportError:
|
|
20
|
+
pass
|
|
21
|
+
from .device import _is_supported
|
|
22
|
+
|
|
23
|
+
function_status = {'op_compile': False, 'aoe_tune_mode': False,
|
|
24
|
+
'aoe_job_type': False}
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def op_compile(value):
|
|
28
|
+
"""
|
|
29
|
+
Whether to select online compilation.The default settings by the framework are online compilation for static
|
|
30
|
+
shape, and compiled operator binary files for dynamic shape. The default settings may change in the future.
|
|
31
|
+
For detailed information, please refer to `Ascend community
|
|
32
|
+
<https://www.hiascend.com/document/detail/zh/canncommercial/80RC3/apiref/appdevgapi/aclcppdevg_03_1371.html/>`_ .
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
value (bool): Whether to select online compilation or not.
|
|
36
|
+
|
|
37
|
+
- ``True``: online compilation is prioritized.
|
|
38
|
+
- ``False``: compiled operator binary files are prioritized to improve compilation performance.
|
|
39
|
+
|
|
40
|
+
Examples:
|
|
41
|
+
>>> import mindspore as ms
|
|
42
|
+
>>> ms.device_context.ascend.op_tuning.op_compile(True)
|
|
43
|
+
"""
|
|
44
|
+
if not function_status['op_compile']:
|
|
45
|
+
function_status['op_compile'] = True
|
|
46
|
+
if not _is_supported():
|
|
47
|
+
return
|
|
48
|
+
if value == AscendOpTuningConf.get_instance().jit_compile():
|
|
49
|
+
return
|
|
50
|
+
# Check the configuration environment whether valid
|
|
51
|
+
if AscendOpTuningConf.get_instance().is_jit_compile_configured():
|
|
52
|
+
raise RuntimeError("The 'op_compile' can not be set repeatedly.")
|
|
53
|
+
supported_modes = [True, False]
|
|
54
|
+
if value not in supported_modes:
|
|
55
|
+
raise TypeError(f"For 'op_compile', the type of input value must be one of "
|
|
56
|
+
f"{supported_modes}, but got {value}.")
|
|
57
|
+
is_enable = "1" if value else "0"
|
|
58
|
+
AscendOpTuningConf.get_instance().set_jit_compile(is_enable)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def aoe_tune_mode(tune_mode):
|
|
62
|
+
"""
|
|
63
|
+
AOE tuning mode setting, which is not set by default.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
tune_mode (str): AOE tuning mode setting.
|
|
67
|
+
|
|
68
|
+
- ``"online"``: the online tuning function is turned on.
|
|
69
|
+
- ``"offline"``: ge graph will be saved for offline tuning.
|
|
70
|
+
|
|
71
|
+
Examples:
|
|
72
|
+
>>> import mindspore as ms
|
|
73
|
+
>>> ms.device_context.ascend.op_tuning.aoe_tune_mode("online")
|
|
74
|
+
"""
|
|
75
|
+
if not function_status['aoe_tune_mode']:
|
|
76
|
+
function_status['aoe_tune_mode'] = True
|
|
77
|
+
if not _is_supported():
|
|
78
|
+
return
|
|
79
|
+
if tune_mode == AscendOpTuningConf.get_instance().aoe_tune_mode():
|
|
80
|
+
return
|
|
81
|
+
# Check the configuration environment whether valid
|
|
82
|
+
if AscendOpTuningConf.get_instance().is_aoe_tune_mode_configured():
|
|
83
|
+
raise RuntimeError("The 'aoe_tune_mode' can not be set repeatedly.")
|
|
84
|
+
candidate = ["online", "offline"]
|
|
85
|
+
if tune_mode not in candidate:
|
|
86
|
+
raise ValueError(
|
|
87
|
+
f"For 'device_context.ascend.op_tuning.aoe_tune_mode', the argument 'tune_mode' must be in "
|
|
88
|
+
f"['online', 'offline'], but got {tune_mode}."
|
|
89
|
+
)
|
|
90
|
+
AscendOpTuningConf.get_instance().set_aoe_tune_mode(tune_mode)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def aoe_job_type(config):
|
|
94
|
+
"""
|
|
95
|
+
Set the parameters specific to Ascend Optimization Engine.It needs to be used in
|
|
96
|
+
conjunction with mindspore.device_context.op_tuning.aoe_tune_mode(tune_mode).
|
|
97
|
+
The framework set to "2" by default.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
config (str): Choose the tuning type.
|
|
101
|
+
|
|
102
|
+
- ``"1"``: Set to subgraph tuning.
|
|
103
|
+
- ``"2"``: Set to operator tuning.
|
|
104
|
+
|
|
105
|
+
Examples:
|
|
106
|
+
>>> import mindspore as ms
|
|
107
|
+
>>> ms.device_context.ascend.op_tuning.aoe_job_type("1")
|
|
108
|
+
"""
|
|
109
|
+
if not function_status['aoe_job_type']:
|
|
110
|
+
function_status['aoe_job_type'] = True
|
|
111
|
+
if not _is_supported():
|
|
112
|
+
return
|
|
113
|
+
if config == AscendOpTuningConf.get_instance().aoe_job_type():
|
|
114
|
+
return
|
|
115
|
+
# Check the configuration environment whether valid
|
|
116
|
+
if AscendOpTuningConf.get_instance().is_aoe_job_type_configured():
|
|
117
|
+
raise RuntimeError("The 'aoe_job_type' can not be set repeatedly.")
|
|
118
|
+
aoe_cfgs = ["1", "2"]
|
|
119
|
+
if config not in aoe_cfgs:
|
|
120
|
+
raise ValueError(
|
|
121
|
+
f"For 'aoe_job_type', the config must be one of {aoe_cfgs}, but got {config}."
|
|
122
|
+
)
|
|
123
|
+
AscendOpTuningConf.get_instance().set_aoe_job_type(config)
|