mindspore 2.5.0__cp310-cp310-win_amd64.whl → 2.6.0__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +6 -4
- mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -0
- mindspore/_checkparam.py +3 -33
- mindspore/_deprecated/__init__.py +17 -0
- mindspore/_deprecated/jit.py +198 -0
- mindspore/_extends/builtin_operations.py +1 -1
- mindspore/_extends/parse/__init__.py +6 -7
- mindspore/_extends/parse/compile_config.py +19 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
- mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
- mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
- mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
- mindspore/_extends/parse/parser.py +25 -194
- mindspore/_extends/parse/resources.py +1 -5
- mindspore/_extends/parse/standard_method.py +109 -75
- mindspore/_extends/pijit/__init__.py +2 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
- mindspore/_extends/pijit/tensor_func_list.py +27 -0
- mindspore/_extends/utils.py +1 -1
- mindspore/amp.py +4 -4
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/__init__.py +2 -2
- mindspore/boost/base.py +3 -7
- mindspore/boost/boost_cell_wrapper.py +2 -2
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +4 -3
- mindspore/common/_grad_function.py +56 -0
- mindspore/common/_pijit_context.py +14 -5
- mindspore/common/_register_for_tensor.py +1 -1
- mindspore/common/_stub_tensor.py +5 -10
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +2014 -3386
- mindspore/common/api.py +386 -355
- mindspore/common/auto_dynamic_shape.py +41 -44
- mindspore/common/dtype.py +5 -2
- mindspore/common/dump.py +7 -5
- mindspore/common/file_system.py +3 -0
- mindspore/common/generator.py +3 -0
- mindspore/common/hook_handle.py +5 -3
- mindspore/common/initializer.py +10 -6
- mindspore/common/jit_begin_end.py +94 -0
- mindspore/common/jit_config.py +6 -1
- mindspore/common/jit_context.py +76 -0
- mindspore/common/jit_trace.py +378 -0
- mindspore/common/lazy_inline.py +2 -2
- mindspore/common/mutable.py +5 -4
- mindspore/common/parameter.py +106 -39
- mindspore/common/seed.py +2 -2
- mindspore/common/sparse_tensor.py +23 -17
- mindspore/common/tensor.py +332 -714
- mindspore/communication/__init__.py +7 -5
- mindspore/communication/_comm_helper.py +47 -2
- mindspore/communication/comm_func.py +70 -53
- mindspore/communication/management.py +83 -17
- mindspore/context.py +228 -571
- mindspore/dataset/__init__.py +44 -20
- mindspore/dataset/audio/__init__.py +2 -8
- mindspore/dataset/audio/transforms.py +3 -17
- mindspore/dataset/core/config.py +3 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +102 -120
- mindspore/dataset/engine/datasets_audio.py +22 -22
- mindspore/dataset/engine/datasets_standard_format.py +43 -24
- mindspore/dataset/engine/datasets_text.py +78 -85
- mindspore/dataset/engine/datasets_user_defined.py +109 -77
- mindspore/dataset/engine/datasets_vision.py +111 -108
- mindspore/dataset/engine/iterators.py +5 -3
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
- mindspore/dataset/engine/samplers.py +279 -57
- mindspore/dataset/engine/serializer_deserializer.py +2 -1
- mindspore/dataset/engine/validators.py +10 -0
- mindspore/dataset/text/__init__.py +7 -6
- mindspore/dataset/text/transforms.py +6 -5
- mindspore/dataset/text/utils.py +3 -3
- mindspore/dataset/transforms/__init__.py +0 -9
- mindspore/dataset/transforms/transforms.py +3 -3
- mindspore/dataset/utils/browse_dataset.py +1 -1
- mindspore/dataset/vision/__init__.py +2 -9
- mindspore/dataset/vision/transforms.py +202 -158
- mindspore/dataset/vision/utils.py +7 -5
- mindspore/device_context/ascend/op_debug.py +60 -1
- mindspore/device_context/ascend/op_tuning.py +0 -4
- mindspore/device_manager.py +39 -3
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/es/embedding_service.py +35 -27
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -2
- mindspore/experimental/map_parameter.py +4 -4
- mindspore/experimental/optim/adadelta.py +22 -26
- mindspore/experimental/optim/adagrad.py +4 -4
- mindspore/experimental/optim/adam.py +4 -0
- mindspore/experimental/optim/adamax.py +4 -4
- mindspore/experimental/optim/adamw.py +4 -0
- mindspore/experimental/optim/asgd.py +1 -1
- mindspore/experimental/optim/lr_scheduler.py +40 -22
- mindspore/experimental/optim/radam.py +5 -5
- mindspore/experimental/optim/rprop.py +1 -1
- mindspore/experimental/optim/sgd.py +1 -1
- mindspore/hal/contiguous_tensors_handle.py +6 -10
- mindspore/hal/device.py +55 -81
- mindspore/hal/event.py +38 -55
- mindspore/hal/memory.py +115 -147
- mindspore/hal/stream.py +81 -125
- mindspore/include/dataset/constants.h +7 -4
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +40 -2
- mindspore/mindrecord/__init__.py +20 -7
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +133 -702
- mindspore/mint/distributed/__init__.py +5 -1
- mindspore/mint/distributed/distributed.py +198 -113
- mindspore/mint/linalg/__init__.py +2 -0
- mindspore/mint/nn/__init__.py +280 -18
- mindspore/mint/nn/functional.py +282 -64
- mindspore/mint/nn/layer/__init__.py +4 -0
- mindspore/mint/nn/layer/_functions.py +7 -3
- mindspore/mint/nn/layer/activation.py +120 -13
- mindspore/mint/nn/layer/conv.py +234 -28
- mindspore/mint/nn/layer/normalization.py +15 -16
- mindspore/mint/nn/layer/padding.py +1 -1
- mindspore/mint/nn/layer/pooling.py +66 -1
- mindspore/mint/optim/__init__.py +2 -1
- mindspore/mint/optim/sgd.py +171 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/__init__.py +4 -1
- mindspore/nn/cell.py +1253 -179
- mindspore/nn/layer/activation.py +23 -21
- mindspore/nn/layer/basic.py +22 -16
- mindspore/nn/layer/container.py +1 -1
- mindspore/nn/layer/conv.py +53 -42
- mindspore/nn/layer/embedding.py +9 -8
- mindspore/nn/layer/normalization.py +48 -42
- mindspore/nn/layer/pooling.py +75 -31
- mindspore/nn/layer/transformer.py +11 -10
- mindspore/nn/learning_rate_schedule.py +4 -2
- mindspore/nn/loss/loss.py +27 -19
- mindspore/nn/optim/ada_grad.py +6 -5
- mindspore/nn/optim/adadelta.py +9 -7
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +18 -14
- mindspore/nn/optim/adamax.py +8 -7
- mindspore/nn/optim/adasum.py +5 -5
- mindspore/nn/optim/asgd.py +3 -1
- mindspore/nn/optim/ftrl.py +11 -9
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/lazyadam.py +12 -10
- mindspore/nn/optim/momentum.py +7 -6
- mindspore/nn/optim/optimizer.py +2 -2
- mindspore/nn/optim/proximal_ada_grad.py +12 -10
- mindspore/nn/optim/rmsprop.py +13 -12
- mindspore/nn/optim/rprop.py +9 -7
- mindspore/nn/optim/sgd.py +9 -6
- mindspore/nn/optim/tft_wrapper.py +5 -2
- mindspore/nn/probability/bijector/bijector.py +17 -11
- mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
- mindspore/nn/probability/bijector/invert.py +2 -2
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +3 -2
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +1 -1
- mindspore/nn/probability/distribution/cauchy.py +4 -2
- mindspore/nn/probability/distribution/exponential.py +6 -7
- mindspore/nn/probability/distribution/gamma.py +2 -2
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/half_normal.py +5 -3
- mindspore/nn/probability/distribution/logistic.py +5 -3
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/uniform.py +5 -3
- mindspore/nn/reinforcement/_tensors_queue.py +1 -1
- mindspore/nn/reinforcement/tensor_array.py +1 -1
- mindspore/nn/wrap/__init__.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +178 -117
- mindspore/nn/wrap/grad_reducer.py +45 -36
- mindspore/nn/wrap/loss_scale.py +3 -3
- mindspore/numpy/array_creations.py +3 -3
- mindspore/numpy/array_ops.py +1 -1
- mindspore/numpy/utils.py +1 -2
- mindspore/numpy/utils_const.py +1 -2
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +3 -2
- mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
- mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
- mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
- mindspore/ops/_register_for_op.py +0 -11
- mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
- mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
- mindspore/ops/_vmap/vmap_array_ops.py +32 -6
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
- mindspore/ops/_vmap/vmap_math_ops.py +4 -7
- mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
- mindspore/ops/auto_generate/__init__.py +4 -3
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +127 -52
- mindspore/ops/auto_generate/gen_extend_func.py +286 -208
- mindspore/ops/auto_generate/gen_ops_def.py +2783 -2335
- mindspore/ops/auto_generate/gen_ops_prim.py +8992 -2686
- mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
- mindspore/ops/composite/__init__.py +2 -1
- mindspore/ops/composite/base.py +19 -24
- mindspore/ops/composite/math_ops.py +6 -16
- mindspore/ops/composite/multitype_ops/__init__.py +5 -2
- mindspore/ops/composite/multitype_ops/_compile_utils.py +4 -5
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
- mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
- mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
- mindspore/ops/function/__init__.py +28 -2
- mindspore/ops/function/_add_attr_func.py +58 -0
- mindspore/ops/function/array_func.py +1631 -2347
- mindspore/ops/function/clip_func.py +38 -45
- mindspore/ops/function/debug_func.py +36 -44
- mindspore/ops/function/grad/__init__.py +1 -0
- mindspore/ops/function/grad/grad_func.py +104 -71
- mindspore/ops/function/image_func.py +1 -1
- mindspore/ops/function/linalg_func.py +46 -78
- mindspore/ops/function/math_func.py +3024 -3855
- mindspore/ops/function/nn_func.py +678 -274
- mindspore/ops/function/other_func.py +159 -1
- mindspore/ops/function/parameter_func.py +17 -30
- mindspore/ops/function/random_func.py +216 -361
- mindspore/ops/function/reshard_func.py +4 -70
- mindspore/ops/function/sparse_func.py +3 -3
- mindspore/ops/function/sparse_unary_func.py +5 -5
- mindspore/ops/function/spectral_func.py +25 -58
- mindspore/ops/function/vmap_func.py +26 -18
- mindspore/ops/functional.py +8 -5
- mindspore/ops/functional_overload.py +655 -4
- mindspore/ops/op_info_register.py +32 -244
- mindspore/ops/operations/__init__.py +21 -14
- mindspore/ops/operations/_custom_ops_utils.py +235 -0
- mindspore/ops/operations/_grad_ops.py +1 -10
- mindspore/ops/operations/_inner_ops.py +5 -76
- mindspore/ops/operations/_ms_kernel.py +4 -10
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +3 -2
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/array_ops.py +39 -24
- mindspore/ops/operations/comm_ops.py +150 -107
- mindspore/ops/operations/custom_ops.py +287 -32
- mindspore/ops/operations/debug_ops.py +119 -16
- mindspore/ops/operations/inner_ops.py +1 -1
- mindspore/ops/operations/linalg_ops.py +1 -58
- mindspore/ops/operations/manually_defined/_inner.py +1 -1
- mindspore/ops/operations/manually_defined/ops_def.py +746 -79
- mindspore/ops/operations/math_ops.py +21 -18
- mindspore/ops/operations/nn_ops.py +67 -224
- mindspore/ops/operations/other_ops.py +62 -9
- mindspore/ops/operations/random_ops.py +13 -7
- mindspore/ops/operations/reshard_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +2 -2
- mindspore/ops/primitive.py +43 -32
- mindspore/ops/tensor_method.py +243 -17
- mindspore/ops_generate/__init__.py +0 -5
- mindspore/ops_generate/aclnn/__init__.py +0 -0
- mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
- mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
- mindspore/ops_generate/api/__init__.py +0 -0
- mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
- mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
- mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
- mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
- mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
- mindspore/ops_generate/api/gen_api.py +103 -0
- mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
- mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
- mindspore/ops_generate/common/__init__.py +0 -0
- mindspore/ops_generate/common/gen_constants.py +91 -0
- mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
- mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
- mindspore/ops_generate/{template.py → common/template.py} +96 -84
- mindspore/ops_generate/gen_ops.py +23 -325
- mindspore/ops_generate/op_def/__init__.py +0 -0
- mindspore/ops_generate/op_def/gen_op_def.py +90 -0
- mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
- mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -10
- mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
- mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
- mindspore/ops_generate/op_def_py/__init__.py +0 -0
- mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
- mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
- mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
- mindspore/ops_generate/pyboost/__init__.py +0 -0
- mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
- mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
- mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
- mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
- mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
- mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
- mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
- mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
- mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
- mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
- mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
- mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
- mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
- mindspore/ops_generate/resources/__init__.py +0 -0
- mindspore/ops_generate/resources/resource_list.py +30 -0
- mindspore/ops_generate/resources/resource_loader.py +36 -0
- mindspore/ops_generate/resources/resource_manager.py +64 -0
- mindspore/ops_generate/resources/yaml_loader.py +88 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
- mindspore/parallel/__init__.py +6 -2
- mindspore/parallel/_auto_parallel_context.py +140 -12
- mindspore/parallel/_cell_wrapper.py +132 -15
- mindspore/parallel/_parallel_serialization.py +95 -4
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +7 -2
- mindspore/parallel/_tensor.py +142 -18
- mindspore/parallel/_utils.py +198 -25
- mindspore/parallel/algo_parameter_config.py +3 -3
- mindspore/parallel/auto_parallel.py +732 -0
- mindspore/parallel/checkpoint_convert.py +159 -0
- mindspore/parallel/checkpoint_transform.py +658 -37
- mindspore/parallel/cluster/process_entity/_api.py +151 -19
- mindspore/parallel/cluster/run.py +1 -1
- mindspore/parallel/function/__init__.py +24 -0
- mindspore/parallel/function/reshard_func.py +258 -0
- mindspore/parallel/nn/__init__.py +25 -0
- mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
- mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
- mindspore/parallel/parameter_broadcast.py +24 -13
- mindspore/parallel/shard.py +137 -62
- mindspore/parallel/transform_safetensors.py +288 -95
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +9 -5
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
- mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +25 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
- mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
- mindspore/profiler/common/constant.py +12 -0
- mindspore/profiler/common/msprof_cmd_tool.py +42 -23
- mindspore/profiler/common/path_manager.py +24 -0
- mindspore/profiler/common/profiler_context.py +26 -2
- mindspore/profiler/common/profiler_meta_data.py +74 -0
- mindspore/profiler/common/profiler_parameters.py +59 -18
- mindspore/profiler/common/profiler_path_manager.py +66 -7
- mindspore/profiler/dynamic_profiler.py +112 -79
- mindspore/profiler/envprofiler.py +26 -1
- mindspore/profiler/experimental_config.py +197 -0
- mindspore/profiler/mstx.py +57 -14
- mindspore/profiler/platform/npu_profiler.py +33 -7
- mindspore/profiler/profiler.py +541 -45
- mindspore/profiler/profiler_action_controller.py +1 -1
- mindspore/profiler/profiler_interface.py +4 -0
- mindspore/profiler/schedule.py +57 -22
- mindspore/rewrite/api/node.py +15 -13
- mindspore/rewrite/api/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +25 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/runtime/__init__.py +2 -2
- mindspore/runtime/executor.py +40 -11
- mindspore/runtime/memory.py +37 -13
- mindspore/safeguard/rewrite_obfuscation.py +12 -9
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +8 -8
- mindspore/train/_utils.py +43 -9
- mindspore/train/amp.py +1 -1
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +2 -16
- mindspore/train/callback/_checkpoint.py +24 -40
- mindspore/train/callback/_cluster_monitor.py +14 -18
- mindspore/train/callback/_flops_collector.py +2 -3
- mindspore/train/callback/_history.py +7 -4
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +0 -3
- mindspore/train/callback/_loss_monitor.py +2 -1
- mindspore/train/callback/_on_request_exit.py +6 -5
- mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
- mindspore/train/callback/_summary_collector.py +8 -13
- mindspore/train/callback/_time_monitor.py +2 -1
- mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +204 -105
- mindspore/train/data_sink.py +25 -2
- mindspore/train/dataset_helper.py +4 -5
- mindspore/train/loss_scale_manager.py +8 -7
- mindspore/train/metrics/accuracy.py +3 -3
- mindspore/train/metrics/confusion_matrix.py +9 -9
- mindspore/train/metrics/error.py +3 -3
- mindspore/train/metrics/hausdorff_distance.py +4 -4
- mindspore/train/metrics/mean_surface_distance.py +3 -3
- mindspore/train/metrics/metric.py +0 -12
- mindspore/train/metrics/occlusion_sensitivity.py +4 -2
- mindspore/train/metrics/precision.py +8 -6
- mindspore/train/metrics/recall.py +9 -9
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +19 -12
- mindspore/train/model.py +262 -127
- mindspore/train/serialization.py +246 -988
- mindspore/train/summary/_summary_adapter.py +2 -2
- mindspore/train/summary/summary_record.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +4 -2
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/runtime_execution_order_check.py +2 -0
- mindspore/utils/utils.py +138 -4
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/METADATA +2 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/RECORD +485 -440
- mindspore/_install_custom.py +0 -43
- mindspore/common/_register_for_adapter.py +0 -74
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
- mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
- mindspore/ops_generate/gen_constants.py +0 -190
- mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
- mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
- /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/WHEEL +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/entry_points.txt +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/top_level.txt +0 -0
|
@@ -20,18 +20,20 @@ Note that the APIs in the following list need to preset communication environmen
|
|
|
20
20
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
21
21
|
without any third-party or configuration file dependencies.
|
|
22
22
|
Please see the `msrun start up
|
|
23
|
-
<https://www.mindspore.cn/
|
|
23
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
24
24
|
for more details.
|
|
25
25
|
"""
|
|
26
26
|
|
|
27
27
|
from mindspore.communication.management import GlobalComm, init, release, get_rank, \
|
|
28
28
|
get_group_size, get_world_rank_from_group_rank, \
|
|
29
|
-
get_group_rank_from_world_rank, create_group,
|
|
30
|
-
|
|
29
|
+
get_group_rank_from_world_rank, create_group, get_comm_name, \
|
|
30
|
+
HCCL_WORLD_COMM_GROUP, NCCL_WORLD_COMM_GROUP, MCCL_WORLD_COMM_GROUP, get_local_rank, \
|
|
31
|
+
get_local_rank_size, destroy_group, get_process_group_ranks
|
|
31
32
|
|
|
32
33
|
|
|
33
34
|
__all__ = [
|
|
34
35
|
"GlobalComm", "init", "release", "get_rank", "get_group_size", "get_world_rank_from_group_rank",
|
|
35
|
-
"get_group_rank_from_world_rank", "create_group", "
|
|
36
|
-
"
|
|
36
|
+
"get_group_rank_from_world_rank", "create_group", "get_comm_name",
|
|
37
|
+
"HCCL_WORLD_COMM_GROUP", "NCCL_WORLD_COMM_GROUP", "MCCL_WORLD_COMM_GROUP", "get_local_rank",
|
|
38
|
+
"get_local_rank_size", "destroy_group", "get_process_group_ranks"
|
|
37
39
|
]
|
|
@@ -178,8 +178,17 @@ def check_parameter_available(func):
|
|
|
178
178
|
Wrapper. If not available, raise Error.
|
|
179
179
|
"""
|
|
180
180
|
def wrapper(*args, **kargs):
|
|
181
|
-
|
|
182
|
-
|
|
181
|
+
# This function list indicates these functions will return 0 or 1 value in standalone mode or
|
|
182
|
+
# not calling 'init' method.
|
|
183
|
+
standalone_bypass_check_func_list = [
|
|
184
|
+
"_get_rank_helper",
|
|
185
|
+
"_get_local_rank_helper",
|
|
186
|
+
"_get_size_helper",
|
|
187
|
+
"_get_local_size_helper"
|
|
188
|
+
]
|
|
189
|
+
if not GlobalComm.INITED and func.__name__ not in standalone_bypass_check_func_list:
|
|
190
|
+
raise RuntimeError(f"Distributed Communication has not been inited."
|
|
191
|
+
f"You can't invoke this interface yet. Please call `init()` method first.")
|
|
183
192
|
group = None
|
|
184
193
|
if "group" in kargs.keys():
|
|
185
194
|
group = kargs.get("group")
|
|
@@ -264,6 +273,11 @@ def _get_rank_helper(group):
|
|
|
264
273
|
if _check_bypass_rank_id_and_size():
|
|
265
274
|
rank_id = 0
|
|
266
275
|
return rank_id
|
|
276
|
+
if not GlobalComm.INITED:
|
|
277
|
+
# If 'RANK_ID' is not set, return 0 as default value.
|
|
278
|
+
logger.info(f"You are invoking this interface without calling `init` method."
|
|
279
|
+
"Return 'RANK_ID' env value instead. If 'RANK_ID' is not set, return 0 as default value.")
|
|
280
|
+
return int(os.getenv("RANK_ID", "0"))
|
|
267
281
|
if _hccl_test():
|
|
268
282
|
return hccl.get_rank_id(group)
|
|
269
283
|
rank_id = CollectiveManager.get_instance().get_rank_id(group)
|
|
@@ -288,6 +302,11 @@ def _get_local_rank_helper(group):
|
|
|
288
302
|
if _check_bypass_rank_id_and_size():
|
|
289
303
|
local_rank_id = 0
|
|
290
304
|
return local_rank_id
|
|
305
|
+
if not GlobalComm.INITED:
|
|
306
|
+
# If 'LOCAL_RANK' env is not set, return 0 as default value.
|
|
307
|
+
logger.info(f"You are invoking this interface without calling `init` method."
|
|
308
|
+
"Return 'LOCAL_RANK' env value instead. If 'LOCAL_RANK' is not set, return 0 as default value.")
|
|
309
|
+
return int(os.getenv("LOCAL_RANK", "0"))
|
|
291
310
|
if _hccl_test():
|
|
292
311
|
return hccl.get_local_rank_id(group)
|
|
293
312
|
rank_id = CollectiveManager.get_instance().get_local_rank_id(group)
|
|
@@ -312,6 +331,11 @@ def _get_size_helper(group):
|
|
|
312
331
|
if _check_bypass_rank_id_and_size():
|
|
313
332
|
size = 1
|
|
314
333
|
return size
|
|
334
|
+
if not GlobalComm.INITED:
|
|
335
|
+
# If 'LOCAL_RANK' env is not set, return 0 as default value.
|
|
336
|
+
logger.info(f"You are invoking this interface without calling `init` method."
|
|
337
|
+
"Return 'RANK_SIZE' env value instead. If 'RANK_SIZE' is not set, return 1 as default value.")
|
|
338
|
+
return int(os.getenv("RANK_SIZE", "1"))
|
|
315
339
|
if _hccl_test():
|
|
316
340
|
return hccl.get_rank_size(group)
|
|
317
341
|
size = CollectiveManager.get_instance().get_group_size(group)
|
|
@@ -333,6 +357,15 @@ def _get_local_size_helper(group):
|
|
|
333
357
|
Returns:
|
|
334
358
|
Integer. The local rank size where the calling process is being within specified group.
|
|
335
359
|
"""
|
|
360
|
+
if _check_bypass_rank_id_and_size():
|
|
361
|
+
size = 1
|
|
362
|
+
return size
|
|
363
|
+
if not GlobalComm.INITED:
|
|
364
|
+
# If 'LOCAL_RANK_SIZE' env is not set, return 0 as default value.
|
|
365
|
+
logger.info(f"You are invoking this interface without calling `init` method."
|
|
366
|
+
"Return 'LOCAL_RANK_SIZE' env value instead. If 'LOCAL_RANK_SIZE' is not set,"
|
|
367
|
+
"return 1 as default value.")
|
|
368
|
+
return int(os.getenv("LOCAL_RANK_SIZE", "1"))
|
|
336
369
|
size = CollectiveManager.get_instance().get_local_group_size(group)
|
|
337
370
|
return size
|
|
338
371
|
|
|
@@ -501,6 +534,18 @@ def _destroy_group_helper(group):
|
|
|
501
534
|
CollectiveManager.get_instance().destroy_group(group)
|
|
502
535
|
|
|
503
536
|
|
|
537
|
+
@check_parameter_available
|
|
538
|
+
def _get_comm_name_helper(group):
|
|
539
|
+
"""
|
|
540
|
+
The Helper to get inner_comm_name.
|
|
541
|
+
|
|
542
|
+
Args:
|
|
543
|
+
group (str): The user communication group.
|
|
544
|
+
|
|
545
|
+
"""
|
|
546
|
+
return CollectiveManager.get_instance().get_comm_name(group)
|
|
547
|
+
|
|
548
|
+
|
|
504
549
|
def _get_group_map():
|
|
505
550
|
"""Get the group map"""
|
|
506
551
|
return CollectiveManager.get_instance().get_group_map()
|
|
@@ -20,7 +20,7 @@ from mindspore.communication import GlobalComm, get_group_rank_from_world_rank,
|
|
|
20
20
|
from mindspore.communication.management import _get_group
|
|
21
21
|
from mindspore.communication._comm_helper import _get_group_rank_from_world_rank_from_cache_helper
|
|
22
22
|
from mindspore.common.tensor import Tensor
|
|
23
|
-
from mindspore._c_expression import
|
|
23
|
+
from mindspore._c_expression import TensorPy as Tensor_
|
|
24
24
|
from mindspore.ops import ReduceOp, cat
|
|
25
25
|
from mindspore.ops._primitive_cache import _get_cache_prim
|
|
26
26
|
from mindspore.ops.primitive import _primexpr
|
|
@@ -30,7 +30,7 @@ from mindspore.ops.auto_generate.gen_ops_prim import (inner_comm_all_reduce_op,
|
|
|
30
30
|
from mindspore._c_expression import CommHandle as CommHandle_
|
|
31
31
|
from mindspore._c_expression.typing import Type
|
|
32
32
|
from mindspore import jit_class
|
|
33
|
-
|
|
33
|
+
import mindspore as ms
|
|
34
34
|
|
|
35
35
|
__all__ = [
|
|
36
36
|
'all_reduce',
|
|
@@ -63,6 +63,12 @@ class CommHandle(CommHandle_):
|
|
|
63
63
|
handles will be created using Python.
|
|
64
64
|
"""
|
|
65
65
|
|
|
66
|
+
def __init__(self, handle=None, exec_sync=False):
|
|
67
|
+
super(CommHandle, self).__init__()
|
|
68
|
+
self.handle = handle
|
|
69
|
+
self.exec_sync = exec_sync
|
|
70
|
+
|
|
71
|
+
|
|
66
72
|
def wait(self):
|
|
67
73
|
r"""
|
|
68
74
|
The wait for asynchronous handles will not take effect for handles created on the Python side.
|
|
@@ -80,6 +86,10 @@ class CommHandle(CommHandle_):
|
|
|
80
86
|
[[2. 2. 2. 2. 2. 2. 2. 2.]
|
|
81
87
|
[2. 2. 2. 2. 2. 2. 2. 2.]]
|
|
82
88
|
"""
|
|
89
|
+
if self.handle:
|
|
90
|
+
self.handle.wait()
|
|
91
|
+
if self.exec_sync:
|
|
92
|
+
ms.runtime.synchronize()
|
|
83
93
|
|
|
84
94
|
|
|
85
95
|
default_handle = CommHandle()
|
|
@@ -220,7 +230,7 @@ def all_reduce(tensor, op=ReduceOp.SUM, group=GlobalComm.WORLD_COMM_GROUP, async
|
|
|
220
230
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
221
231
|
without any third-party or configuration file dependencies.
|
|
222
232
|
Please see the `msrun start up
|
|
223
|
-
<https://www.mindspore.cn/
|
|
233
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
224
234
|
for more details.
|
|
225
235
|
|
|
226
236
|
This example should be run with 2 devices.
|
|
@@ -285,7 +295,7 @@ def all_gather_into_tensor(tensor, group=GlobalComm.WORLD_COMM_GROUP, async_op=F
|
|
|
285
295
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
286
296
|
without any third-party or configuration file dependencies.
|
|
287
297
|
Please see the `msrun start up
|
|
288
|
-
<https://www.mindspore.cn/
|
|
298
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
289
299
|
for more details.
|
|
290
300
|
|
|
291
301
|
This example should be run with 2 devices.
|
|
@@ -355,7 +365,7 @@ def reduce_scatter_tensor(tensor, op=ReduceOp.SUM, group=GlobalComm.WORLD_COMM_G
|
|
|
355
365
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
356
366
|
without any third-party or configuration file dependencies.
|
|
357
367
|
Please see the `msrun start up
|
|
358
|
-
<https://www.mindspore.cn/
|
|
368
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
359
369
|
for more details.
|
|
360
370
|
|
|
361
371
|
This example should be run with 2 devices.
|
|
@@ -424,7 +434,7 @@ def reduce(tensor, dst, op=ReduceOp.SUM, group=GlobalComm.WORLD_COMM_GROUP):
|
|
|
424
434
|
without any third-party or configuration file dependencies.
|
|
425
435
|
|
|
426
436
|
Please see the `msrun start up
|
|
427
|
-
<https://www.mindspore.cn/
|
|
437
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
428
438
|
for more details.
|
|
429
439
|
|
|
430
440
|
This example should be run with 4 devices.
|
|
@@ -561,7 +571,7 @@ def batch_isend_irecv(p2p_op_list):
|
|
|
561
571
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
562
572
|
without any third-party or configuration file dependencies.
|
|
563
573
|
Please see the `msrun start up
|
|
564
|
-
<https://www.mindspore.cn/
|
|
574
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
565
575
|
for more details.
|
|
566
576
|
|
|
567
577
|
This example should be run with 2 devices.
|
|
@@ -679,7 +689,7 @@ def scatter_tensor(tensor, src=0, group=GlobalComm.WORLD_COMM_GROUP):
|
|
|
679
689
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
680
690
|
without any third-party or configuration file dependencies.
|
|
681
691
|
Please see the `msrun start up
|
|
682
|
-
<https://www.mindspore.cn/
|
|
692
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
683
693
|
for more details.
|
|
684
694
|
|
|
685
695
|
This example should be run with 2 devices.
|
|
@@ -744,7 +754,7 @@ def gather_into_tensor(tensor, dst=0, group=GlobalComm.WORLD_COMM_GROUP):
|
|
|
744
754
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
745
755
|
without any third-party or configuration file dependencies.
|
|
746
756
|
Please see the `msrun start up
|
|
747
|
-
<https://www.mindspore.cn/
|
|
757
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
748
758
|
for more details.
|
|
749
759
|
|
|
750
760
|
This example should be run with 2 devices.
|
|
@@ -805,7 +815,7 @@ def broadcast(tensor, src=0, group=GlobalComm.WORLD_COMM_GROUP):
|
|
|
805
815
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
806
816
|
without any third-party or configuration file dependencies.
|
|
807
817
|
Please see the `msrun start up
|
|
808
|
-
<https://www.mindspore.cn/
|
|
818
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
809
819
|
for more details.
|
|
810
820
|
|
|
811
821
|
This example should be run with 2 devices.
|
|
@@ -819,6 +829,7 @@ def broadcast(tensor, src=0, group=GlobalComm.WORLD_COMM_GROUP):
|
|
|
819
829
|
>>> comm.init()
|
|
820
830
|
>>> data = ms.Tensor(np.arange(8).reshape([2, 4]).astype(np.float32))
|
|
821
831
|
>>> out = comm.comm_func.broadcast(tensor=data, src=0)
|
|
832
|
+
>>> print(out)
|
|
822
833
|
[[0. 1. 2. 3.]
|
|
823
834
|
[4. 5. 6. 7.]]
|
|
824
835
|
|
|
@@ -858,7 +869,7 @@ def barrier(group=GlobalComm.WORLD_COMM_GROUP):
|
|
|
858
869
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
859
870
|
without any third-party or configuration file dependencies.
|
|
860
871
|
Please see the `msrun start up
|
|
861
|
-
<https://www.mindspore.cn/
|
|
872
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
862
873
|
for more details.
|
|
863
874
|
|
|
864
875
|
This example should be run with 2 devices.
|
|
@@ -869,6 +880,8 @@ def barrier(group=GlobalComm.WORLD_COMM_GROUP):
|
|
|
869
880
|
>>> # Launch 2 processes.
|
|
870
881
|
>>> comm.init()
|
|
871
882
|
>>> comm.comm_func.barrier()
|
|
883
|
+
>>> print("barrier finish!")
|
|
884
|
+
barrier finish!
|
|
872
885
|
|
|
873
886
|
Tutorial Examples:
|
|
874
887
|
- `Distributed Set Communication Primitives - Barrier
|
|
@@ -888,9 +901,9 @@ def _deal_comm_outputs(output, async_op, exec_sync=False):
|
|
|
888
901
|
if not async_op:
|
|
889
902
|
output[1].wait()
|
|
890
903
|
if exec_sync:
|
|
891
|
-
|
|
904
|
+
ms.runtime.synchronize()
|
|
892
905
|
return (output[0], None)
|
|
893
|
-
return output
|
|
906
|
+
return (output[0], CommHandle(output[1], exec_sync))
|
|
894
907
|
|
|
895
908
|
if not async_op:
|
|
896
909
|
return (output, None)
|
|
@@ -926,7 +939,7 @@ def send(tensor, dst=0, group=GlobalComm.WORLD_COMM_GROUP, tag=0):
|
|
|
926
939
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
927
940
|
without any third-party or configuration file dependencies.
|
|
928
941
|
Please see the `msrun start up
|
|
929
|
-
<https://www.mindspore.cn/
|
|
942
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
930
943
|
for more details.
|
|
931
944
|
|
|
932
945
|
This example should be run with 2 devices.
|
|
@@ -946,12 +959,13 @@ def send(tensor, dst=0, group=GlobalComm.WORLD_COMM_GROUP, tag=0):
|
|
|
946
959
|
>>>
|
|
947
960
|
>>>
|
|
948
961
|
>>> if rank < size / 2:
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
962
|
+
... _x = ms.Tensor(x)
|
|
963
|
+
... send(_x, rank + size // 2)
|
|
964
|
+
... else:
|
|
965
|
+
... _x2 = ms.Tensor(x2)
|
|
966
|
+
... output = recv(_x2, rank - size // 2)
|
|
967
|
+
... print(output)
|
|
968
|
+
rank1:
|
|
955
969
|
[[0.01 0.01]
|
|
956
970
|
[0.01 0.01]]
|
|
957
971
|
"""
|
|
@@ -1000,7 +1014,7 @@ def recv(tensor, src=0, group=GlobalComm.WORLD_COMM_GROUP, tag=0):
|
|
|
1000
1014
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
1001
1015
|
without any third-party or configuration file dependencies.
|
|
1002
1016
|
Please see the `msrun start up
|
|
1003
|
-
<https://www.mindspore.cn/
|
|
1017
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
1004
1018
|
for more details.
|
|
1005
1019
|
|
|
1006
1020
|
This example should be run with 2 devices.
|
|
@@ -1020,12 +1034,13 @@ def recv(tensor, src=0, group=GlobalComm.WORLD_COMM_GROUP, tag=0):
|
|
|
1020
1034
|
>>>
|
|
1021
1035
|
>>>
|
|
1022
1036
|
>>> if rank < size / 2:
|
|
1023
|
-
|
|
1024
|
-
|
|
1025
|
-
|
|
1026
|
-
|
|
1027
|
-
|
|
1028
|
-
|
|
1037
|
+
... _x = ms.Tensor(x)
|
|
1038
|
+
... send(_x, rank + size // 2)
|
|
1039
|
+
... else:
|
|
1040
|
+
... _x2 = ms.Tensor(x2)
|
|
1041
|
+
... output = recv(_x2, rank - size // 2)
|
|
1042
|
+
... print(output)
|
|
1043
|
+
rank1:
|
|
1029
1044
|
[[0.01 0.01]
|
|
1030
1045
|
[0.01 0.01]]
|
|
1031
1046
|
"""
|
|
@@ -1075,7 +1090,7 @@ def isend(tensor, dst=0, group=GlobalComm.WORLD_COMM_GROUP, tag=0):
|
|
|
1075
1090
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
1076
1091
|
without any third-party or configuration file dependencies.
|
|
1077
1092
|
Please see the `msrun start up
|
|
1078
|
-
<https://www.mindspore.cn/
|
|
1093
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
1079
1094
|
for more details.
|
|
1080
1095
|
|
|
1081
1096
|
This example should be run with 2 devices.
|
|
@@ -1095,13 +1110,14 @@ def isend(tensor, dst=0, group=GlobalComm.WORLD_COMM_GROUP, tag=0):
|
|
|
1095
1110
|
>>>
|
|
1096
1111
|
>>>
|
|
1097
1112
|
>>> if rank < size / 2:
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1113
|
+
... _x = ms.Tensor(x)
|
|
1114
|
+
... isend(_x, rank + size // 2)
|
|
1115
|
+
... else:
|
|
1116
|
+
... _x2 = ms.Tensor(x2)
|
|
1117
|
+
... output, handle = irecv(_x2, rank - size // 2)
|
|
1118
|
+
... handle.wait()
|
|
1119
|
+
... print(output)
|
|
1120
|
+
rank1:
|
|
1105
1121
|
[[0.01 0.01]
|
|
1106
1122
|
[0.01 0.01]]
|
|
1107
1123
|
"""
|
|
@@ -1153,7 +1169,7 @@ def irecv(tensor, src=0, group=GlobalComm.WORLD_COMM_GROUP, tag=0):
|
|
|
1153
1169
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
1154
1170
|
without any third-party or configuration file dependencies.
|
|
1155
1171
|
Please see the `msrun start up
|
|
1156
|
-
<https://www.mindspore.cn/
|
|
1172
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
1157
1173
|
for more details.
|
|
1158
1174
|
|
|
1159
1175
|
This example should be run with 2 devices.
|
|
@@ -1173,13 +1189,14 @@ def irecv(tensor, src=0, group=GlobalComm.WORLD_COMM_GROUP, tag=0):
|
|
|
1173
1189
|
>>>
|
|
1174
1190
|
>>>
|
|
1175
1191
|
>>> if rank < size / 2:
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
1192
|
+
... _x = ms.Tensor(x)
|
|
1193
|
+
... isend(_x, rank + size // 2)
|
|
1194
|
+
... else:
|
|
1195
|
+
... _x2 = ms.Tensor(x2)
|
|
1196
|
+
... output, handle = irecv(_x2, rank - size // 2)
|
|
1197
|
+
... handle.wait()
|
|
1198
|
+
... print(output)
|
|
1199
|
+
rank1:
|
|
1183
1200
|
[[0.01 0.01]
|
|
1184
1201
|
[0.01 0.01]]
|
|
1185
1202
|
"""
|
|
@@ -1229,7 +1246,7 @@ def all_to_all_with_output_shape(output_shape_list, input_tensor_list, group=Non
|
|
|
1229
1246
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
1230
1247
|
without any third-party or configuration file dependencies.
|
|
1231
1248
|
Please see the `msrun start up
|
|
1232
|
-
<https://www.mindspore.cn/
|
|
1249
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
1233
1250
|
for more details.
|
|
1234
1251
|
|
|
1235
1252
|
This example should be run with 2 devices.
|
|
@@ -1241,11 +1258,11 @@ def all_to_all_with_output_shape(output_shape_list, input_tensor_list, group=Non
|
|
|
1241
1258
|
>>> comm.init()
|
|
1242
1259
|
>>> this_rank = comm.get_rank()
|
|
1243
1260
|
>>> if this_rank == 0:
|
|
1244
|
-
|
|
1245
|
-
|
|
1261
|
+
... send_tensor_list = [ms.Tensor(1.), ms.Tensor([[2, 3], [4, 5.]])]
|
|
1262
|
+
... recv_tensor_list = [(), (2,)]
|
|
1246
1263
|
>>> if this_rank == 1:
|
|
1247
|
-
|
|
1248
|
-
|
|
1264
|
+
... send_tensor_list = [ms.Tensor([2, 2.]), ms.Tensor([4, 5, 6, 7.])]
|
|
1265
|
+
... recv_tensor_list = [(2, 2), (4,)]
|
|
1249
1266
|
>>> output, _ = comm.comm_func.all_to_all_with_output_shape(recv_tensor_list, send_tensor_list)
|
|
1250
1267
|
>>> print(output)
|
|
1251
1268
|
rank 0:
|
|
@@ -1280,7 +1297,6 @@ def all_to_all_with_output_shape(output_shape_list, input_tensor_list, group=Non
|
|
|
1280
1297
|
recv_shape_list.append(_shape)
|
|
1281
1298
|
|
|
1282
1299
|
send_flatten_tensor = cat(send_flatten_tensor)
|
|
1283
|
-
send_flatten_tensor = _contiguous(send_flatten_tensor)
|
|
1284
1300
|
group = GlobalComm.WORLD_COMM_GROUP if group is None else _get_group(group)
|
|
1285
1301
|
global _GROPU_SIZE_CACHE
|
|
1286
1302
|
if group not in _GROPU_SIZE_CACHE:
|
|
@@ -1345,7 +1361,8 @@ _ALL_TO_ALL_CACHE = {}
|
|
|
1345
1361
|
def all_to_all_single_with_output_shape(output_shape, tensor, output_split_sizes=None,
|
|
1346
1362
|
input_split_sizes=None, group=None, async_op=False):
|
|
1347
1363
|
"""
|
|
1348
|
-
|
|
1364
|
+
Based on the slice size of the user input, the input `tensor` is sliced and sent to other devices
|
|
1365
|
+
and receives the sliced chunks from the other devices, which are then merged into an output Tensor.
|
|
1349
1366
|
|
|
1350
1367
|
Note:
|
|
1351
1368
|
'output_shape' and 'tensor' shape should be match across ranks.
|
|
@@ -1365,8 +1382,8 @@ def all_to_all_single_with_output_shape(output_shape, tensor, output_split_sizes
|
|
|
1365
1382
|
|
|
1366
1383
|
Returns:
|
|
1367
1384
|
Tuple(Tensor, CommHandle), the output tensor is gathered concatenated from remote ranks.
|
|
1368
|
-
If the numel of tensor gathered from remote is zero, it will return a Tensor
|
|
1369
|
-
|
|
1385
|
+
If the numel of tensor gathered from remote is zero, it will return a Tensor with shape `()`,
|
|
1386
|
+
and value has no actual meanning. CommHandle is an async work handle, if `async_op` is set to True.
|
|
1370
1387
|
CommHandle will be None, when `async_op` is False.
|
|
1371
1388
|
|
|
1372
1389
|
Raises:
|
|
@@ -1383,7 +1400,7 @@ def all_to_all_single_with_output_shape(output_shape, tensor, output_split_sizes
|
|
|
1383
1400
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
1384
1401
|
without any third-party or configuration file dependencies.
|
|
1385
1402
|
Please see the `msrun start up
|
|
1386
|
-
<https://www.mindspore.cn/
|
|
1403
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
1387
1404
|
for more details.
|
|
1388
1405
|
|
|
1389
1406
|
This example should be run with 2 devices.
|