mindspore 2.5.0__cp310-cp310-win_amd64.whl → 2.6.0rc1__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +6 -4
- mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -0
- mindspore/_checkparam.py +3 -33
- mindspore/_deprecated/__init__.py +17 -0
- mindspore/_deprecated/jit.py +198 -0
- mindspore/_extends/builtin_operations.py +1 -1
- mindspore/_extends/parse/__init__.py +6 -7
- mindspore/_extends/parse/compile_config.py +19 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
- mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
- mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
- mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
- mindspore/_extends/parse/parser.py +24 -193
- mindspore/_extends/parse/resources.py +1 -5
- mindspore/_extends/parse/standard_method.py +97 -74
- mindspore/_extends/pijit/__init__.py +2 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
- mindspore/_extends/pijit/tensor_func_list.py +27 -0
- mindspore/_extends/utils.py +1 -1
- mindspore/amp.py +4 -4
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/__init__.py +2 -2
- mindspore/boost/base.py +3 -7
- mindspore/boost/boost_cell_wrapper.py +2 -2
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +4 -3
- mindspore/common/_grad_function.py +56 -0
- mindspore/common/_pijit_context.py +14 -5
- mindspore/common/_register_for_tensor.py +1 -1
- mindspore/common/_stub_tensor.py +5 -10
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +1915 -3287
- mindspore/common/api.py +341 -354
- mindspore/common/auto_dynamic_shape.py +41 -44
- mindspore/common/dtype.py +5 -2
- mindspore/common/dump.py +7 -5
- mindspore/common/file_system.py +3 -0
- mindspore/common/hook_handle.py +5 -3
- mindspore/common/initializer.py +10 -6
- mindspore/common/jit_begin_end.py +94 -0
- mindspore/common/jit_config.py +6 -1
- mindspore/common/jit_context.py +76 -0
- mindspore/common/jit_trace.py +378 -0
- mindspore/common/lazy_inline.py +2 -2
- mindspore/common/mutable.py +5 -4
- mindspore/common/parameter.py +106 -39
- mindspore/common/seed.py +2 -2
- mindspore/common/sparse_tensor.py +23 -17
- mindspore/common/tensor.py +297 -714
- mindspore/communication/__init__.py +7 -5
- mindspore/communication/_comm_helper.py +47 -2
- mindspore/communication/comm_func.py +70 -53
- mindspore/communication/management.py +83 -17
- mindspore/context.py +214 -560
- mindspore/dataset/__init__.py +44 -20
- mindspore/dataset/audio/__init__.py +2 -8
- mindspore/dataset/audio/transforms.py +3 -17
- mindspore/dataset/core/config.py +3 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +102 -120
- mindspore/dataset/engine/datasets_audio.py +22 -22
- mindspore/dataset/engine/datasets_standard_format.py +43 -24
- mindspore/dataset/engine/datasets_text.py +78 -85
- mindspore/dataset/engine/datasets_user_defined.py +108 -76
- mindspore/dataset/engine/datasets_vision.py +111 -108
- mindspore/dataset/engine/iterators.py +5 -3
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
- mindspore/dataset/engine/samplers.py +279 -57
- mindspore/dataset/engine/serializer_deserializer.py +2 -1
- mindspore/dataset/engine/validators.py +10 -0
- mindspore/dataset/text/__init__.py +7 -6
- mindspore/dataset/text/transforms.py +6 -5
- mindspore/dataset/text/utils.py +3 -3
- mindspore/dataset/transforms/__init__.py +0 -9
- mindspore/dataset/transforms/transforms.py +3 -3
- mindspore/dataset/utils/browse_dataset.py +1 -1
- mindspore/dataset/vision/__init__.py +2 -9
- mindspore/dataset/vision/transforms.py +202 -158
- mindspore/dataset/vision/utils.py +7 -5
- mindspore/device_context/ascend/op_debug.py +60 -1
- mindspore/device_context/ascend/op_tuning.py +0 -4
- mindspore/device_manager.py +39 -3
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/es/embedding_service.py +35 -27
- mindspore/experimental/map_parameter.py +4 -4
- mindspore/experimental/optim/adadelta.py +22 -26
- mindspore/experimental/optim/adagrad.py +4 -4
- mindspore/experimental/optim/adam.py +4 -0
- mindspore/experimental/optim/adamax.py +4 -4
- mindspore/experimental/optim/adamw.py +4 -0
- mindspore/experimental/optim/asgd.py +1 -1
- mindspore/experimental/optim/lr_scheduler.py +40 -22
- mindspore/experimental/optim/radam.py +5 -5
- mindspore/experimental/optim/rprop.py +1 -1
- mindspore/experimental/optim/sgd.py +1 -1
- mindspore/hal/contiguous_tensors_handle.py +6 -10
- mindspore/hal/device.py +55 -81
- mindspore/hal/event.py +38 -55
- mindspore/hal/memory.py +93 -144
- mindspore/hal/stream.py +81 -125
- mindspore/include/dataset/constants.h +7 -4
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +40 -2
- mindspore/mindrecord/__init__.py +20 -7
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +131 -700
- mindspore/mint/distributed/__init__.py +5 -1
- mindspore/mint/distributed/distributed.py +194 -109
- mindspore/mint/linalg/__init__.py +2 -0
- mindspore/mint/nn/__init__.py +280 -18
- mindspore/mint/nn/functional.py +282 -64
- mindspore/mint/nn/layer/__init__.py +4 -0
- mindspore/mint/nn/layer/_functions.py +7 -3
- mindspore/mint/nn/layer/activation.py +120 -13
- mindspore/mint/nn/layer/conv.py +218 -24
- mindspore/mint/nn/layer/normalization.py +15 -16
- mindspore/mint/nn/layer/padding.py +1 -1
- mindspore/mint/nn/layer/pooling.py +66 -1
- mindspore/mint/optim/__init__.py +2 -1
- mindspore/mint/optim/sgd.py +171 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/__init__.py +4 -1
- mindspore/nn/cell.py +1250 -176
- mindspore/nn/layer/activation.py +23 -21
- mindspore/nn/layer/basic.py +22 -16
- mindspore/nn/layer/container.py +1 -1
- mindspore/nn/layer/conv.py +22 -17
- mindspore/nn/layer/embedding.py +9 -8
- mindspore/nn/layer/normalization.py +48 -42
- mindspore/nn/layer/pooling.py +75 -31
- mindspore/nn/layer/transformer.py +11 -10
- mindspore/nn/learning_rate_schedule.py +4 -2
- mindspore/nn/loss/loss.py +27 -19
- mindspore/nn/optim/ada_grad.py +6 -5
- mindspore/nn/optim/adadelta.py +9 -7
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +16 -12
- mindspore/nn/optim/adamax.py +8 -7
- mindspore/nn/optim/adasum.py +5 -5
- mindspore/nn/optim/asgd.py +1 -1
- mindspore/nn/optim/ftrl.py +11 -9
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/lazyadam.py +12 -10
- mindspore/nn/optim/momentum.py +7 -6
- mindspore/nn/optim/optimizer.py +2 -2
- mindspore/nn/optim/proximal_ada_grad.py +12 -10
- mindspore/nn/optim/rmsprop.py +13 -12
- mindspore/nn/optim/rprop.py +9 -7
- mindspore/nn/optim/sgd.py +9 -6
- mindspore/nn/optim/tft_wrapper.py +5 -2
- mindspore/nn/probability/bijector/bijector.py +17 -11
- mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
- mindspore/nn/probability/bijector/invert.py +2 -2
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +3 -2
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +1 -1
- mindspore/nn/probability/distribution/cauchy.py +4 -2
- mindspore/nn/probability/distribution/exponential.py +6 -7
- mindspore/nn/probability/distribution/gamma.py +2 -2
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/half_normal.py +5 -3
- mindspore/nn/probability/distribution/logistic.py +5 -3
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/uniform.py +5 -3
- mindspore/nn/reinforcement/_tensors_queue.py +1 -1
- mindspore/nn/reinforcement/tensor_array.py +1 -1
- mindspore/nn/wrap/__init__.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +178 -117
- mindspore/nn/wrap/grad_reducer.py +45 -36
- mindspore/nn/wrap/loss_scale.py +3 -3
- mindspore/numpy/array_creations.py +3 -3
- mindspore/numpy/array_ops.py +1 -1
- mindspore/numpy/math_ops.py +4 -4
- mindspore/numpy/utils.py +1 -2
- mindspore/numpy/utils_const.py +1 -2
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +3 -2
- mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
- mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
- mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
- mindspore/ops/_register_for_op.py +0 -11
- mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
- mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
- mindspore/ops/_vmap/vmap_array_ops.py +7 -6
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
- mindspore/ops/_vmap/vmap_math_ops.py +4 -7
- mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
- mindspore/ops/auto_generate/__init__.py +4 -3
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +102 -49
- mindspore/ops/auto_generate/gen_extend_func.py +281 -135
- mindspore/ops/auto_generate/gen_ops_def.py +2574 -2326
- mindspore/ops/auto_generate/gen_ops_prim.py +8566 -2755
- mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
- mindspore/ops/composite/__init__.py +2 -1
- mindspore/ops/composite/base.py +19 -24
- mindspore/ops/composite/math_ops.py +6 -16
- mindspore/ops/composite/multitype_ops/__init__.py +5 -2
- mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -3
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
- mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
- mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
- mindspore/ops/function/__init__.py +28 -2
- mindspore/ops/function/_add_attr_func.py +58 -0
- mindspore/ops/function/array_func.py +1629 -2345
- mindspore/ops/function/clip_func.py +38 -45
- mindspore/ops/function/debug_func.py +36 -44
- mindspore/ops/function/grad/__init__.py +1 -0
- mindspore/ops/function/grad/grad_func.py +104 -71
- mindspore/ops/function/image_func.py +1 -1
- mindspore/ops/function/linalg_func.py +46 -78
- mindspore/ops/function/math_func.py +3035 -3705
- mindspore/ops/function/nn_func.py +676 -241
- mindspore/ops/function/other_func.py +159 -1
- mindspore/ops/function/parameter_func.py +17 -30
- mindspore/ops/function/random_func.py +204 -361
- mindspore/ops/function/reshard_func.py +4 -70
- mindspore/ops/function/sparse_func.py +3 -3
- mindspore/ops/function/sparse_unary_func.py +5 -5
- mindspore/ops/function/spectral_func.py +25 -58
- mindspore/ops/function/vmap_func.py +24 -17
- mindspore/ops/functional.py +6 -4
- mindspore/ops/functional_overload.py +547 -4
- mindspore/ops/op_info_register.py +32 -244
- mindspore/ops/operations/__init__.py +10 -5
- mindspore/ops/operations/_custom_ops_utils.py +247 -0
- mindspore/ops/operations/_grad_ops.py +1 -10
- mindspore/ops/operations/_inner_ops.py +5 -76
- mindspore/ops/operations/_ms_kernel.py +4 -10
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +3 -2
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/array_ops.py +37 -22
- mindspore/ops/operations/comm_ops.py +150 -107
- mindspore/ops/operations/custom_ops.py +221 -23
- mindspore/ops/operations/debug_ops.py +115 -16
- mindspore/ops/operations/inner_ops.py +1 -1
- mindspore/ops/operations/linalg_ops.py +1 -58
- mindspore/ops/operations/manually_defined/_inner.py +1 -1
- mindspore/ops/operations/manually_defined/ops_def.py +746 -79
- mindspore/ops/operations/math_ops.py +21 -18
- mindspore/ops/operations/nn_ops.py +65 -191
- mindspore/ops/operations/other_ops.py +62 -9
- mindspore/ops/operations/random_ops.py +13 -7
- mindspore/ops/operations/reshard_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +2 -2
- mindspore/ops/primitive.py +43 -32
- mindspore/ops/tensor_method.py +232 -13
- mindspore/ops_generate/__init__.py +0 -5
- mindspore/ops_generate/aclnn/__init__.py +0 -0
- mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
- mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
- mindspore/ops_generate/api/__init__.py +0 -0
- mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
- mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
- mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
- mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
- mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
- mindspore/ops_generate/api/gen_api.py +103 -0
- mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
- mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
- mindspore/ops_generate/common/__init__.py +0 -0
- mindspore/ops_generate/common/gen_constants.py +91 -0
- mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
- mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
- mindspore/ops_generate/{template.py → common/template.py} +96 -84
- mindspore/ops_generate/gen_ops.py +23 -325
- mindspore/ops_generate/op_def/__init__.py +0 -0
- mindspore/ops_generate/op_def/gen_op_def.py +90 -0
- mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
- mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -7
- mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
- mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
- mindspore/ops_generate/op_def_py/__init__.py +0 -0
- mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
- mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
- mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
- mindspore/ops_generate/pyboost/__init__.py +0 -0
- mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
- mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
- mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
- mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
- mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
- mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
- mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
- mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
- mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
- mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
- mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
- mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
- mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
- mindspore/ops_generate/resources/__init__.py +0 -0
- mindspore/ops_generate/resources/resource_list.py +30 -0
- mindspore/ops_generate/resources/resource_loader.py +36 -0
- mindspore/ops_generate/resources/resource_manager.py +64 -0
- mindspore/ops_generate/resources/yaml_loader.py +88 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
- mindspore/parallel/__init__.py +6 -2
- mindspore/parallel/_auto_parallel_context.py +133 -6
- mindspore/parallel/_cell_wrapper.py +130 -15
- mindspore/parallel/_parallel_serialization.py +95 -4
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +7 -2
- mindspore/parallel/_tensor.py +142 -18
- mindspore/parallel/_utils.py +198 -25
- mindspore/parallel/algo_parameter_config.py +3 -3
- mindspore/parallel/auto_parallel.py +732 -0
- mindspore/parallel/checkpoint_convert.py +159 -0
- mindspore/parallel/checkpoint_transform.py +656 -37
- mindspore/parallel/cluster/process_entity/_api.py +151 -19
- mindspore/parallel/cluster/run.py +1 -1
- mindspore/parallel/function/__init__.py +24 -0
- mindspore/parallel/function/reshard_func.py +259 -0
- mindspore/parallel/nn/__init__.py +25 -0
- mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
- mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
- mindspore/parallel/parameter_broadcast.py +24 -13
- mindspore/parallel/shard.py +137 -61
- mindspore/parallel/transform_safetensors.py +287 -95
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +9 -5
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
- mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +22 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
- mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
- mindspore/profiler/common/constant.py +12 -0
- mindspore/profiler/common/msprof_cmd_tool.py +42 -23
- mindspore/profiler/common/path_manager.py +24 -0
- mindspore/profiler/common/profiler_context.py +26 -2
- mindspore/profiler/common/profiler_meta_data.py +74 -0
- mindspore/profiler/common/profiler_parameters.py +59 -18
- mindspore/profiler/common/profiler_path_manager.py +66 -7
- mindspore/profiler/dynamic_profiler.py +112 -79
- mindspore/profiler/envprofiler.py +26 -1
- mindspore/profiler/experimental_config.py +197 -0
- mindspore/profiler/mstx.py +57 -14
- mindspore/profiler/platform/npu_profiler.py +33 -7
- mindspore/profiler/profiler.py +541 -45
- mindspore/profiler/profiler_action_controller.py +1 -1
- mindspore/profiler/profiler_interface.py +4 -0
- mindspore/profiler/schedule.py +57 -22
- mindspore/rewrite/api/node.py +15 -13
- mindspore/rewrite/api/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +25 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/runtime/__init__.py +2 -2
- mindspore/runtime/executor.py +40 -11
- mindspore/runtime/memory.py +25 -8
- mindspore/safeguard/rewrite_obfuscation.py +12 -9
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +8 -8
- mindspore/train/_utils.py +35 -7
- mindspore/train/amp.py +1 -1
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +2 -16
- mindspore/train/callback/_checkpoint.py +24 -40
- mindspore/train/callback/_cluster_monitor.py +14 -18
- mindspore/train/callback/_flops_collector.py +2 -3
- mindspore/train/callback/_history.py +7 -4
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +0 -3
- mindspore/train/callback/_loss_monitor.py +2 -1
- mindspore/train/callback/_on_request_exit.py +6 -5
- mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
- mindspore/train/callback/_summary_collector.py +8 -13
- mindspore/train/callback/_time_monitor.py +2 -1
- mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +179 -103
- mindspore/train/data_sink.py +25 -2
- mindspore/train/dataset_helper.py +4 -5
- mindspore/train/loss_scale_manager.py +8 -7
- mindspore/train/metrics/accuracy.py +3 -3
- mindspore/train/metrics/confusion_matrix.py +9 -9
- mindspore/train/metrics/error.py +3 -3
- mindspore/train/metrics/hausdorff_distance.py +4 -4
- mindspore/train/metrics/mean_surface_distance.py +3 -3
- mindspore/train/metrics/metric.py +0 -12
- mindspore/train/metrics/occlusion_sensitivity.py +4 -2
- mindspore/train/metrics/precision.py +8 -6
- mindspore/train/metrics/recall.py +9 -9
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +19 -12
- mindspore/train/model.py +176 -103
- mindspore/train/serialization.py +246 -988
- mindspore/train/summary/_summary_adapter.py +2 -2
- mindspore/train/summary/summary_record.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +4 -2
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/utils.py +138 -4
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/METADATA +2 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/RECORD +483 -438
- mindspore/_install_custom.py +0 -43
- mindspore/common/_register_for_adapter.py +0 -74
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
- mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
- mindspore/ops_generate/gen_constants.py +0 -190
- mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
- mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
- /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# Copyright 2025 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
|
|
16
|
+
"""Module of base class for resource loader."""
|
|
17
|
+
|
|
18
|
+
from abc import ABC, abstractmethod
|
|
19
|
+
from typing import Dict
|
|
20
|
+
|
|
21
|
+
from .resource_list import ResourceType
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class ResourceLoader(ABC):
|
|
25
|
+
"""
|
|
26
|
+
Abstract class for resource loader.
|
|
27
|
+
"""
|
|
28
|
+
@abstractmethod
|
|
29
|
+
def load(self) -> Dict[ResourceType, object]:
|
|
30
|
+
"""
|
|
31
|
+
Load resource.
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
Dict[ResourceType, object]: The resource type and resource object map.
|
|
35
|
+
"""
|
|
36
|
+
raise NotImplementedError
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
# Copyright 2025 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
|
|
16
|
+
"""Module managing resource."""
|
|
17
|
+
|
|
18
|
+
from common.op_proto import OpProtoLoader, DeprecatedOpProtoLoader, FuncOpProtoLoader
|
|
19
|
+
from api.op_api_proto import OpApiProtoLoader
|
|
20
|
+
|
|
21
|
+
from .resource_loader import ResourceLoader
|
|
22
|
+
from .resource_list import ResourceType
|
|
23
|
+
from .yaml_loader import OpDocYamlLoader, TensorMethodDocYamlLoader, MintFuncDocYamlLoader
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class ResourceManager():
|
|
27
|
+
"""
|
|
28
|
+
ResourceManager is a class for managing resources.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(self):
|
|
32
|
+
self.resource_map = {}
|
|
33
|
+
|
|
34
|
+
def register_resource(self, loader: ResourceLoader) -> None:
|
|
35
|
+
"""
|
|
36
|
+
Register resource.
|
|
37
|
+
"""
|
|
38
|
+
self.resource_map.update(loader.load())
|
|
39
|
+
|
|
40
|
+
def get_resource(self, type: ResourceType) -> object:
|
|
41
|
+
"""
|
|
42
|
+
Get resource by type.
|
|
43
|
+
"""
|
|
44
|
+
if type not in self.resource_map:
|
|
45
|
+
raise ValueError(f"Resource '{type.name}' not registered")
|
|
46
|
+
return self.resource_map[type]
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def prepare_resources() -> ResourceManager:
|
|
50
|
+
"""
|
|
51
|
+
Load needed resources.
|
|
52
|
+
"""
|
|
53
|
+
resource_mgr = ResourceManager()
|
|
54
|
+
resource_mgr.register_resource(OpProtoLoader())
|
|
55
|
+
resource_mgr.register_resource(DeprecatedOpProtoLoader())
|
|
56
|
+
resource_mgr.register_resource(FuncOpProtoLoader())
|
|
57
|
+
resource_mgr.register_resource(OpDocYamlLoader())
|
|
58
|
+
resource_mgr.register_resource(TensorMethodDocYamlLoader())
|
|
59
|
+
resource_mgr.register_resource(MintFuncDocYamlLoader())
|
|
60
|
+
resource_mgr.register_resource(OpApiProtoLoader(
|
|
61
|
+
resource_mgr.get_resource(ResourceType.OP_PROTO),
|
|
62
|
+
resource_mgr.get_resource(ResourceType.DEPRECATED_OP_PROTO),
|
|
63
|
+
resource_mgr.get_resource(ResourceType.FUNC_OP_PROTO)))
|
|
64
|
+
return resource_mgr
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
# Copyright 2025 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
|
|
16
|
+
"""Module loading ops yaml."""
|
|
17
|
+
|
|
18
|
+
import os
|
|
19
|
+
from typing import Sequence, Union
|
|
20
|
+
|
|
21
|
+
from common.gen_utils import safe_load_yaml_from_dir
|
|
22
|
+
import common.gen_constants as K
|
|
23
|
+
|
|
24
|
+
from .resource_loader import ResourceLoader
|
|
25
|
+
from .resource_list import ResourceType
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class YamlLoader(ResourceLoader):
|
|
29
|
+
"""
|
|
30
|
+
YamlLoader is a utility class for loading yaml files.
|
|
31
|
+
"""
|
|
32
|
+
def __init__(self, resouce_type: ResourceType, yaml_path: Union[Sequence[str], str]):
|
|
33
|
+
"""
|
|
34
|
+
Initialize YamlLoader.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
resouce_type (ResourceType): The type of the resource.
|
|
38
|
+
yaml_path (Union[Sequence[str], str]): The path to the yaml file or directory.
|
|
39
|
+
"""
|
|
40
|
+
self.type = resouce_type
|
|
41
|
+
if isinstance(yaml_path, str):
|
|
42
|
+
self.yaml_path = [yaml_path]
|
|
43
|
+
else:
|
|
44
|
+
self.yaml_path = yaml_path
|
|
45
|
+
|
|
46
|
+
def load(self) -> dict:
|
|
47
|
+
"""
|
|
48
|
+
Load yaml files.
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
tuple[int, object]: The resource id and the yaml dict.
|
|
52
|
+
"""
|
|
53
|
+
for yaml_path in self.yaml_path:
|
|
54
|
+
if not os.path.isdir(yaml_path):
|
|
55
|
+
raise ValueError(f"yaml path '{yaml_path}' not found")
|
|
56
|
+
|
|
57
|
+
yaml_dict = {}
|
|
58
|
+
for yaml_path in self.yaml_path:
|
|
59
|
+
yaml_dict.update(safe_load_yaml_from_dir(yaml_path))
|
|
60
|
+
|
|
61
|
+
return {self.type: yaml_dict}
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class OpDocYamlLoader(YamlLoader):
|
|
65
|
+
"""
|
|
66
|
+
OpDocYamlLoader is a class for loading op primitive doc yaml files.
|
|
67
|
+
"""
|
|
68
|
+
def __init__(self):
|
|
69
|
+
op_doc_yaml_path = os.path.join(K.WORK_DIR, K.MS_OP_DEF_YAML_PATH, "doc")
|
|
70
|
+
super().__init__(ResourceType.OP_DOC_YAML, op_doc_yaml_path)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class TensorMethodDocYamlLoader(YamlLoader):
|
|
74
|
+
"""
|
|
75
|
+
TensorMethodDocYamlLoader is a class for loading tensor method doc yaml files.
|
|
76
|
+
"""
|
|
77
|
+
def __init__(self):
|
|
78
|
+
tensor_method_doc_yaml_path = os.path.join(K.WORK_DIR, K.MS_TENSOR_METHOD_DOC_YAML_PATH)
|
|
79
|
+
super().__init__(ResourceType.TENSOR_METHOD_DOC_YAML, tensor_method_doc_yaml_path)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class MintFuncDocYamlLoader(YamlLoader):
|
|
83
|
+
"""
|
|
84
|
+
MintFuncDocYamlLoader is a class for loading mint func doc yaml files.
|
|
85
|
+
"""
|
|
86
|
+
def __init__(self):
|
|
87
|
+
mint_func_doc_yaml_path = os.path.join(K.WORK_DIR, K.MS_MINT_FUNC_DOC_YAML_PATH)
|
|
88
|
+
super().__init__(ResourceType.MINT_FUNC_DOC_YAML, mint_func_doc_yaml_path)
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
# Copyright 2025 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
"""
|
|
16
|
+
Generates mindspore/ccsrc/pybind_api/ir/tensor_py.cc which includes the CPython Tensor APIs.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
import os
|
|
20
|
+
import common.gen_constants as K
|
|
21
|
+
from common.gen_utils import save_file
|
|
22
|
+
import common.template as template
|
|
23
|
+
from common.template import Template
|
|
24
|
+
from common.base_generator import BaseGenerator
|
|
25
|
+
from pyboost import pyboost_utils
|
|
26
|
+
|
|
27
|
+
class TensorPyCppGenerator(BaseGenerator):
|
|
28
|
+
"""
|
|
29
|
+
This class is responsible for generating mindspore/ccsrc/pybind_api/ir/tensor_register/
|
|
30
|
+
auto_generate/tensor_py_gen.cc
|
|
31
|
+
"""
|
|
32
|
+
def __init__(self):
|
|
33
|
+
self.TENSOR_PY_CC_TEMPLATE = template.TENSOR_PY_CC_TEMPLATE
|
|
34
|
+
self.TENSOR_PY_H_TEMPLATE = template.TENSOR_PY_H_TEMPLATE
|
|
35
|
+
self.cpy_wrapper_template = Template(" DEFINE_TENSOR_METHOD_CPYWRAPPER(${pascal_api_name}) \\")
|
|
36
|
+
self.tensor_api_def_template = Template(
|
|
37
|
+
'{"${snake_api_name}"'
|
|
38
|
+
', (PyCFunction)TensorMethod${pascal_api_name}_CPyWrapper, METH_VARARGS | METH_KEYWORDS},'
|
|
39
|
+
)
|
|
40
|
+
self.stubtensor_api_def_template = Template(
|
|
41
|
+
'py::cpp_function TensorMethod${snake_api_name}_wrapper(\n'
|
|
42
|
+
' [](const py::object& self, const py::args& args, const py::kwargs& kwargs) {\n'
|
|
43
|
+
' return TensorMethod${pascal_api_name}(self, args, kwargs);\n'
|
|
44
|
+
' },\n'
|
|
45
|
+
' py::is_method(stubTensorClass)\n'
|
|
46
|
+
');\n'
|
|
47
|
+
'stubTensorClass.attr("${snake_api_name}") = TensorMethod${snake_api_name}_wrapper;'
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
def generate(self, work_path, tensor_method_protos, alias_func_mapping):
|
|
51
|
+
"""
|
|
52
|
+
Generates the content for the helper file and saves it to the specified path.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
work_path (str): The directory where the generated file will be saved.
|
|
56
|
+
tensor_method_protos (dict): A dict mapping from Tensor func API names to their proto lists.
|
|
57
|
+
alias_func_mapping (dict): A dictionary mapping function name to its alias function names.
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
None
|
|
61
|
+
"""
|
|
62
|
+
wrapper_defs = []
|
|
63
|
+
tensor_api_defs = []
|
|
64
|
+
stubtensor_api_defs = []
|
|
65
|
+
for api_name, _ in tensor_method_protos.items():
|
|
66
|
+
pascal_api_name = pyboost_utils.format_func_api_name(api_name)
|
|
67
|
+
snake_api_name = api_name
|
|
68
|
+
wrapper_defs.append(self.cpy_wrapper_template.replace(pascal_api_name=pascal_api_name))
|
|
69
|
+
tensor_api_defs.append(
|
|
70
|
+
self.tensor_api_def_template.replace(
|
|
71
|
+
snake_api_name=snake_api_name,
|
|
72
|
+
pascal_api_name=pascal_api_name
|
|
73
|
+
)
|
|
74
|
+
)
|
|
75
|
+
stubtensor_api_defs.append(
|
|
76
|
+
self.stubtensor_api_def_template.replace(
|
|
77
|
+
snake_api_name=snake_api_name,
|
|
78
|
+
pascal_api_name=pascal_api_name
|
|
79
|
+
)
|
|
80
|
+
)
|
|
81
|
+
if api_name in alias_func_mapping:
|
|
82
|
+
alias_api_names = alias_func_mapping[api_name]
|
|
83
|
+
for alias_api_name in alias_api_names:
|
|
84
|
+
snake_api_name = alias_api_name
|
|
85
|
+
tensor_api_defs.append(
|
|
86
|
+
self.tensor_api_def_template.replace(
|
|
87
|
+
snake_api_name=snake_api_name,
|
|
88
|
+
pascal_api_name=pascal_api_name
|
|
89
|
+
)
|
|
90
|
+
)
|
|
91
|
+
stubtensor_api_defs.append(
|
|
92
|
+
self.stubtensor_api_def_template.replace(
|
|
93
|
+
snake_api_name=snake_api_name,
|
|
94
|
+
pascal_api_name=pascal_api_name
|
|
95
|
+
)
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
# delete the ' \' for the last wrapper macro definition
|
|
99
|
+
wrapper_defs[-1] = wrapper_defs[-1][:-2]
|
|
100
|
+
|
|
101
|
+
file_str = self.TENSOR_PY_CC_TEMPLATE.replace(
|
|
102
|
+
tensor_api_defs=tensor_api_defs,
|
|
103
|
+
stubtensor_api_defs=stubtensor_api_defs
|
|
104
|
+
)
|
|
105
|
+
save_file(
|
|
106
|
+
os.path.join(work_path, K.TENSOR_PY_CC_PATH),
|
|
107
|
+
"tensor_py_gen.cc",
|
|
108
|
+
file_str
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
file_str = self.TENSOR_PY_H_TEMPLATE.replace(CPyWrapper_defs=wrapper_defs)
|
|
112
|
+
save_file(
|
|
113
|
+
os.path.join(work_path, K.TENSOR_PY_CC_PATH),
|
|
114
|
+
"tensor_py_gen.h",
|
|
115
|
+
file_str
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
def _format_api_name(api_name):
|
|
119
|
+
has_suffix = api_name.endswith("_")
|
|
120
|
+
parts = api_name.strip("_").split("_")
|
|
121
|
+
formatted_api_name = "".join(part.capitalize() for part in parts)
|
|
122
|
+
return formatted_api_name + '_' if has_suffix else formatted_api_name
|
mindspore/parallel/__init__.py
CHANGED
|
@@ -19,7 +19,9 @@ from mindspore.parallel.algo_parameter_config import get_algo_parameters, reset_
|
|
|
19
19
|
set_algo_parameters
|
|
20
20
|
from mindspore.parallel.checkpoint_transform import rank_list_for_transform, transform_checkpoint_by_rank, \
|
|
21
21
|
transform_checkpoints, merge_pipeline_strategys, sync_pipeline_shared_parameters, \
|
|
22
|
-
load_segmented_checkpoints, set_op_strategy_config
|
|
22
|
+
load_segmented_checkpoints, set_op_strategy_config, load_distributed_checkpoint, \
|
|
23
|
+
merge_sliced_parameter, restore_group_info_list, build_searched_strategy
|
|
24
|
+
from mindspore.parallel.checkpoint_convert import rank_list_for_convert, convert_checkpoint_by_rank, convert_checkpoints
|
|
23
25
|
from mindspore.parallel.parameter_broadcast import parameter_broadcast
|
|
24
26
|
from mindspore.parallel.shard import shard, Layout
|
|
25
27
|
from mindspore.parallel.transform_safetensors import unified_safetensors
|
|
@@ -27,4 +29,6 @@ from mindspore.parallel.transform_safetensors import unified_safetensors
|
|
|
27
29
|
__all__ = ["set_algo_parameters", "reset_algo_parameters", "get_algo_parameters", "rank_list_for_transform",
|
|
28
30
|
"transform_checkpoint_by_rank", "transform_checkpoints", "merge_pipeline_strategys", "shard",
|
|
29
31
|
"sync_pipeline_shared_parameters", "Layout", "parameter_broadcast", "load_segmented_checkpoints",
|
|
30
|
-
"unified_safetensors", "
|
|
32
|
+
"unified_safetensors", "load_distributed_checkpoint", "merge_sliced_parameter", "restore_group_info_list",
|
|
33
|
+
"build_searched_strategy", "set_op_strategy_config", "rank_list_for_convert",
|
|
34
|
+
"convert_checkpoint_by_rank", "convert_checkpoints"]
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2020-
|
|
1
|
+
# Copyright 2020-2025 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -21,6 +21,7 @@ from mindspore import context
|
|
|
21
21
|
import mindspore.log as logger
|
|
22
22
|
from mindspore.parallel._dp_allreduce_fusion import _set_fusion_strategy_by_idx, _set_fusion_strategy_by_size
|
|
23
23
|
from mindspore.parallel._ps_context import _is_role_pserver
|
|
24
|
+
from mindspore.parallel.shard import Layout
|
|
24
25
|
from mindspore._c_expression import AutoParallelContext
|
|
25
26
|
from mindspore._checkparam import args_type_check
|
|
26
27
|
from mindspore import _checkparam as Validator
|
|
@@ -63,6 +64,7 @@ class _ParallelOptimizerConfig:
|
|
|
63
64
|
GRADIENT_ACCUMULATION_SHARD = "gradient_accumulation_shard"
|
|
64
65
|
PARALLEL_OPTIMIZER_THRESHOLD = "parallel_optimizer_threshold"
|
|
65
66
|
OPTIMIZER_WEIGHT_SHARD_SIZE = "optimizer_weight_shard_size"
|
|
67
|
+
OPTIMIZER_LEVEL = "optimizer_level"
|
|
66
68
|
|
|
67
69
|
|
|
68
70
|
class _PipelineConfig:
|
|
@@ -77,6 +79,8 @@ class _PipelineScheduler:
|
|
|
77
79
|
PIPELINE_1F1B = "1f1b"
|
|
78
80
|
PIPELINE_GPIPE = "gpipe"
|
|
79
81
|
PIPELINE_SEQPIPE = "seqpipe"
|
|
82
|
+
PIPELINE_SEQVPP = "seqvpp"
|
|
83
|
+
PIPELINE_SEQSMARTVPP = "seqsmartvpp"
|
|
80
84
|
|
|
81
85
|
|
|
82
86
|
class _AutoParallelContext:
|
|
@@ -100,6 +104,7 @@ class _AutoParallelContext:
|
|
|
100
104
|
def __init__(self):
|
|
101
105
|
self._context_handle = AutoParallelContext.get_instance()
|
|
102
106
|
self._dataset_strategy_using_str = True
|
|
107
|
+
self._dataset_layout = None
|
|
103
108
|
|
|
104
109
|
def check_context_handle(self):
|
|
105
110
|
"""
|
|
@@ -441,6 +446,9 @@ class _AutoParallelContext:
|
|
|
441
446
|
raise ValueError("The context configuration parameter 'parallel_mode' only support 'stand_alone', "
|
|
442
447
|
"'data_parallel', 'hybrid_parallel', 'semi_auto_parallel' and 'auto_parallel', "
|
|
443
448
|
"but got the value : {}.".format(parallel_mode))
|
|
449
|
+
if run_mode == context.ParallelMode.DATA_PARALLEL and self.get_enable_parallel_optimizer():
|
|
450
|
+
logger.warning("'enable_parallel_optimizer' is not suggested in 'data_parallel' mode, "
|
|
451
|
+
"consider using 'semi_auto_parallel' or 'auto_parallel' mode.")
|
|
444
452
|
|
|
445
453
|
def get_parallel_mode(self):
|
|
446
454
|
"""Get parallel mode."""
|
|
@@ -585,6 +593,9 @@ class _AutoParallelContext:
|
|
|
585
593
|
if not isinstance(dataset_strategy, tuple):
|
|
586
594
|
raise TypeError("For 'set_auto_parallel_context', the argument 'dataset_strategy' "
|
|
587
595
|
"must be str or tuple type, but got the type : {}.".format(type(dataset_strategy)))
|
|
596
|
+
if dataset_strategy and isinstance(dataset_strategy[0], Layout):
|
|
597
|
+
self._set_dataset_strategy_layout(dataset_strategy)
|
|
598
|
+
return
|
|
588
599
|
for ele in dataset_strategy:
|
|
589
600
|
if not isinstance(ele, tuple):
|
|
590
601
|
raise TypeError("For 'set_auto_parallel_context', the element of argument "
|
|
@@ -599,8 +610,36 @@ class _AutoParallelContext:
|
|
|
599
610
|
self._dataset_strategy_using_str = False
|
|
600
611
|
self._context_handle.set_dataset_strategy(dataset_strategy)
|
|
601
612
|
|
|
613
|
+
def _set_dataset_strategy_layout(self, dataset_strategy):
|
|
614
|
+
"""set dataset layout to c++ by using pybind."""
|
|
615
|
+
dataset_devmat = []
|
|
616
|
+
dataset_tensormap = []
|
|
617
|
+
dataset_alias_name = []
|
|
618
|
+
self._dataset_layout = dataset_strategy
|
|
619
|
+
for ele in dataset_strategy:
|
|
620
|
+
if not isinstance(ele, Layout):
|
|
621
|
+
raise TypeError(f"All the dataset_strategy elements should be Layout, but got {type(ele)}")
|
|
622
|
+
layout_to_dict = ele.to_dict()
|
|
623
|
+
dataset_devmat.append(layout_to_dict["device_matrix"])
|
|
624
|
+
dataset_alias_name.append(layout_to_dict["alias_name"])
|
|
625
|
+
if layout_to_dict["interleaved_parallel"]:
|
|
626
|
+
raise ValueError("For dataset_strategy, layout does not support interleaved_parallel")
|
|
627
|
+
tensor_map = []
|
|
628
|
+
for value in layout_to_dict["tensor_map"]:
|
|
629
|
+
if isinstance(value, tuple):
|
|
630
|
+
tensor_map.append(value)
|
|
631
|
+
elif isinstance(value, int):
|
|
632
|
+
tensor_map.append((value,))
|
|
633
|
+
else:
|
|
634
|
+
raise TypeError(f"value in tensor map must be tuple or int, but got {type(value)}")
|
|
635
|
+
dataset_tensormap.append(tuple(tensor_map))
|
|
636
|
+
self._context_handle.set_dataset_layout(dataset_devmat, dataset_tensormap, dataset_alias_name)
|
|
637
|
+
|
|
638
|
+
|
|
602
639
|
def get_dataset_strategy(self):
|
|
603
640
|
"""Get dataset sharding strategy."""
|
|
641
|
+
if self._dataset_layout is not None:
|
|
642
|
+
return self._dataset_layout
|
|
604
643
|
self.check_context_handle()
|
|
605
644
|
if self._dataset_strategy_using_str:
|
|
606
645
|
if self._context_handle.get_full_batch():
|
|
@@ -888,6 +927,9 @@ class _AutoParallelContext:
|
|
|
888
927
|
"the argument 'enable_parallel_optimizer' must be bool, but got the type : {}."
|
|
889
928
|
.format(type(enable_parallel_optimizer)))
|
|
890
929
|
self._context_handle.set_enable_parallel_optimizer(enable_parallel_optimizer)
|
|
930
|
+
if enable_parallel_optimizer and self.get_parallel_mode() == context.ParallelMode.DATA_PARALLEL:
|
|
931
|
+
logger.warning("'enable_parallel_optimizer' is not suggested in 'data_parallel' mode, "
|
|
932
|
+
"consider using 'semi_auto_parallel' or 'auto_parallel' mode.")
|
|
891
933
|
|
|
892
934
|
def set_force_fp32_communication(self, force_fp32_communication):
|
|
893
935
|
"""
|
|
@@ -918,7 +960,7 @@ class _AutoParallelContext:
|
|
|
918
960
|
|
|
919
961
|
- pipeline_interleave(bool): Setting true enable interleave scheduler for pipeline parallelism. This
|
|
920
962
|
scheduler requires more memory but less bubble.
|
|
921
|
-
- pipeline_scheduler(
|
|
963
|
+
- pipeline_scheduler(str): There are two choices, "1f1b" and "gpipe". default is "1f1b"
|
|
922
964
|
|
|
923
965
|
- 1f1b: It requires less memory and bubble ratio, for it run backward pass when corresponding forward pass
|
|
924
966
|
finished.
|
|
@@ -954,7 +996,9 @@ class _AutoParallelContext:
|
|
|
954
996
|
|
|
955
997
|
Validator.check_string(pipeline_config[pp_scheduler], [_PipelineScheduler.PIPELINE_1F1B,
|
|
956
998
|
_PipelineScheduler.PIPELINE_GPIPE,
|
|
957
|
-
_PipelineScheduler.PIPELINE_SEQPIPE
|
|
999
|
+
_PipelineScheduler.PIPELINE_SEQPIPE,
|
|
1000
|
+
_PipelineScheduler.PIPELINE_SEQVPP,
|
|
1001
|
+
_PipelineScheduler.PIPELINE_SEQSMARTVPP])
|
|
958
1002
|
if not pipeline_config[pp_interleave] and pipeline_config[pp_scheduler] != _PipelineScheduler.PIPELINE_1F1B:
|
|
959
1003
|
raise ValueError(f"When pipeline_interleave is False, {pp_scheduler} is not supported")
|
|
960
1004
|
|
|
@@ -1003,10 +1047,12 @@ class _AutoParallelContext:
|
|
|
1003
1047
|
grad_shard_name = _ParallelOptimizerConfig.GRADIENT_ACCUMULATION_SHARD
|
|
1004
1048
|
threshold_name = _ParallelOptimizerConfig.PARALLEL_OPTIMIZER_THRESHOLD
|
|
1005
1049
|
optimizer_weight_shard_size_name = _ParallelOptimizerConfig.OPTIMIZER_WEIGHT_SHARD_SIZE
|
|
1050
|
+
optimizer_level_name = _ParallelOptimizerConfig.OPTIMIZER_LEVEL
|
|
1006
1051
|
|
|
1007
1052
|
for config_name in parallel_optimizer_config:
|
|
1008
1053
|
unknown_config = []
|
|
1009
|
-
if config_name not in [grad_shard_name, threshold_name, optimizer_weight_shard_size_name
|
|
1054
|
+
if config_name not in [grad_shard_name, threshold_name, optimizer_weight_shard_size_name,
|
|
1055
|
+
optimizer_level_name]:
|
|
1010
1056
|
unknown_config.append(config_name)
|
|
1011
1057
|
|
|
1012
1058
|
if unknown_config:
|
|
@@ -1017,6 +1063,10 @@ class _AutoParallelContext:
|
|
|
1017
1063
|
parallel_optimizer_config[grad_shard_name], grad_shard_name, grad_shard_name)
|
|
1018
1064
|
self._context_handle.set_grad_accumulation_shard(
|
|
1019
1065
|
parallel_optimizer_config[grad_shard_name])
|
|
1066
|
+
if optimizer_level_name in parallel_optimizer_config \
|
|
1067
|
+
and parallel_optimizer_config[optimizer_level_name] != "level2":
|
|
1068
|
+
raise ValueError(f"The optimizer_level is set as {parallel_optimizer_config[optimizer_level_name]}, "
|
|
1069
|
+
"thus cannot set grad_accumulation_shard as True.")
|
|
1020
1070
|
|
|
1021
1071
|
if threshold_name in parallel_optimizer_config:
|
|
1022
1072
|
Validator.check_non_negative_int(
|
|
@@ -1029,6 +1079,20 @@ class _AutoParallelContext:
|
|
|
1029
1079
|
Validator.check_positive_int(value)
|
|
1030
1080
|
self.set_optimizer_weight_shard_size(value)
|
|
1031
1081
|
|
|
1082
|
+
if optimizer_level_name in parallel_optimizer_config:
|
|
1083
|
+
optimizer_level = parallel_optimizer_config[optimizer_level_name]
|
|
1084
|
+
if optimizer_level not in ["level1", "level2", "level3"]:
|
|
1085
|
+
raise ValueError("Optimizer level should in ['level1', 'level2', 'level3'], but got {}"
|
|
1086
|
+
.format(optimizer_level))
|
|
1087
|
+
|
|
1088
|
+
if self._context_handle.get_grad_accumulation_shard() and optimizer_level != "level2":
|
|
1089
|
+
raise ValueError("The grad_accumulation shard is set, thus cannot set optimizer_level != 'level2'")
|
|
1090
|
+
if optimizer_level == "level2":
|
|
1091
|
+
self._context_handle.set_grad_accumulation_shard(True)
|
|
1092
|
+
if optimizer_level == "level3":
|
|
1093
|
+
self._context_handle.set_zero3(True)
|
|
1094
|
+
self._context_handle.set_grad_accumulation_shard(False)
|
|
1095
|
+
|
|
1032
1096
|
def get_grad_accumulation_shard(self):
|
|
1033
1097
|
"""Get grad accumulation shard."""
|
|
1034
1098
|
self.check_context_handle()
|
|
@@ -1136,6 +1200,7 @@ class _AutoParallelContext:
|
|
|
1136
1200
|
self.check_context_handle()
|
|
1137
1201
|
self._context_handle.reset()
|
|
1138
1202
|
_ParallelFusionConfig.reset()
|
|
1203
|
+
self._dataset_layout = None
|
|
1139
1204
|
|
|
1140
1205
|
def _check_and_default_group(self, group):
|
|
1141
1206
|
"""Validate the given group, if group is empty, returns a default fusion group"""
|
|
@@ -1245,6 +1310,36 @@ class _AutoParallelContext:
|
|
|
1245
1310
|
self.set_enable_all_gather_fusion(openstate)
|
|
1246
1311
|
self.set_enable_reduce_scatter_fusion(openstate)
|
|
1247
1312
|
|
|
1313
|
+
def set_auto_parallel_new_interface(self, auto_parallel_new_interface):
|
|
1314
|
+
"""
|
|
1315
|
+
Set AutoParallel(cell) new interface flag.
|
|
1316
|
+
|
|
1317
|
+
Args:
|
|
1318
|
+
auto_parallel_new_interface (bool): Mark whether to use the new interface.
|
|
1319
|
+
"""
|
|
1320
|
+
self.check_context_handle()
|
|
1321
|
+
self._context_handle.set_auto_parallel_new_interface(auto_parallel_new_interface)
|
|
1322
|
+
|
|
1323
|
+
def get_auto_parallel_new_interface(self):
|
|
1324
|
+
"""Get auto_parallel_new_interface."""
|
|
1325
|
+
self.check_context_handle()
|
|
1326
|
+
return self._context_handle.get_auto_parallel_new_interface()
|
|
1327
|
+
|
|
1328
|
+
def set_init_param_in_compile(self, init_param_in_compile):
|
|
1329
|
+
"""
|
|
1330
|
+
Set flag marking whether to init parameters in compiling process.
|
|
1331
|
+
|
|
1332
|
+
Args:
|
|
1333
|
+
init_param_in_compile (bool): Mark whether to init parameters in compiling process.
|
|
1334
|
+
"""
|
|
1335
|
+
self.check_context_handle()
|
|
1336
|
+
self._context_handle.set_init_param_in_compile(init_param_in_compile)
|
|
1337
|
+
|
|
1338
|
+
def get_init_param_in_compile(self):
|
|
1339
|
+
"""Get init_param_in_compile."""
|
|
1340
|
+
self.check_context_handle()
|
|
1341
|
+
return self._context_handle.get_init_param_in_compile()
|
|
1342
|
+
|
|
1248
1343
|
_AUTO_PARALLEL_CONTEXT = None
|
|
1249
1344
|
|
|
1250
1345
|
|
|
@@ -1295,7 +1390,10 @@ _set_auto_parallel_context_func_map = {
|
|
|
1295
1390
|
"comm_fusion": auto_parallel_context().set_comm_fusion,
|
|
1296
1391
|
"dump_local_norm": auto_parallel_context().set_dump_local_norm,
|
|
1297
1392
|
"dump_local_norm_path": auto_parallel_context().set_dump_local_norm_path,
|
|
1298
|
-
"dump_device_local_norm": auto_parallel_context().set_dump_device_local_norm
|
|
1393
|
+
"dump_device_local_norm": auto_parallel_context().set_dump_device_local_norm,
|
|
1394
|
+
"auto_parallel_new_interface": auto_parallel_context().set_auto_parallel_new_interface,
|
|
1395
|
+
"init_param_in_compile": auto_parallel_context().set_init_param_in_compile}
|
|
1396
|
+
|
|
1299
1397
|
|
|
1300
1398
|
_get_auto_parallel_context_func_map = {
|
|
1301
1399
|
"device_num": auto_parallel_context().get_device_num,
|
|
@@ -1330,7 +1428,9 @@ _get_auto_parallel_context_func_map = {
|
|
|
1330
1428
|
"full_batch_is_set": auto_parallel_context().get_full_batch_is_set,
|
|
1331
1429
|
"dump_local_norm": auto_parallel_context().get_dump_local_norm,
|
|
1332
1430
|
"dump_local_norm_path": auto_parallel_context().get_dump_local_norm_path,
|
|
1333
|
-
"dump_device_local_norm": auto_parallel_context().get_dump_device_local_norm
|
|
1431
|
+
"dump_device_local_norm": auto_parallel_context().get_dump_device_local_norm,
|
|
1432
|
+
"auto_parallel_new_interface": auto_parallel_context().get_auto_parallel_new_interface,
|
|
1433
|
+
"init_param_in_compile": auto_parallel_context().get_init_param_in_compile}
|
|
1334
1434
|
|
|
1335
1435
|
|
|
1336
1436
|
@args_type_check(device_num=int, global_rank=int, gradients_mean=bool, gradient_fp32_sync=bool,
|
|
@@ -1472,6 +1572,33 @@ def _get_auto_parallel_context(attr_key):
|
|
|
1472
1572
|
return get_func()
|
|
1473
1573
|
|
|
1474
1574
|
|
|
1575
|
+
def _get_all_auto_parallel_context():
|
|
1576
|
+
"""get auto parallel context before reset"""
|
|
1577
|
+
_auto_paralell_context_value_map = {}
|
|
1578
|
+
_pipeline_config = {}
|
|
1579
|
+
for key, value in _get_auto_parallel_context_func_map.items():
|
|
1580
|
+
if key == "pipeline_interleave":
|
|
1581
|
+
_pipeline_config[key] = value()
|
|
1582
|
+
elif key == "pipeline_scheduler":
|
|
1583
|
+
_pipeline_config[key] = value()
|
|
1584
|
+
else:
|
|
1585
|
+
_auto_paralell_context_value_map[key] = value()
|
|
1586
|
+
return _auto_paralell_context_value_map, _pipeline_config
|
|
1587
|
+
|
|
1588
|
+
|
|
1589
|
+
def _recover_auto_parallel_context(context_value_map, pp_config):
|
|
1590
|
+
"""set auto parallel context after transformation"""
|
|
1591
|
+
# set the same auto parallel context after transform
|
|
1592
|
+
from mindspore.context import reset_auto_parallel_context
|
|
1593
|
+
reset_auto_parallel_context()
|
|
1594
|
+
for key, value in context_value_map.items():
|
|
1595
|
+
# list is empty or full_batch_is_set is not needed to set
|
|
1596
|
+
if (isinstance(value, list) and not value) or (key == "full_batch_is_set"):
|
|
1597
|
+
continue
|
|
1598
|
+
_set_auto_parallel_context_func_map[key](value)
|
|
1599
|
+
_set_auto_parallel_context_func_map["pipeline_config"](pp_config)
|
|
1600
|
+
|
|
1601
|
+
|
|
1475
1602
|
def _reset_auto_parallel_context():
|
|
1476
1603
|
"""
|
|
1477
1604
|
Reset auto parallel context attributes to the default values:
|