mindspore 2.5.0__cp39-cp39-win_amd64.whl → 2.6.0rc1__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +6 -4
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -0
- mindspore/_checkparam.py +3 -33
- mindspore/_deprecated/__init__.py +17 -0
- mindspore/_deprecated/jit.py +198 -0
- mindspore/_extends/builtin_operations.py +1 -1
- mindspore/_extends/parse/__init__.py +6 -7
- mindspore/_extends/parse/compile_config.py +19 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
- mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
- mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
- mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
- mindspore/_extends/parse/parser.py +24 -193
- mindspore/_extends/parse/resources.py +1 -5
- mindspore/_extends/parse/standard_method.py +97 -74
- mindspore/_extends/pijit/__init__.py +2 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
- mindspore/_extends/pijit/tensor_func_list.py +27 -0
- mindspore/_extends/utils.py +1 -1
- mindspore/amp.py +4 -4
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/__init__.py +2 -2
- mindspore/boost/base.py +3 -7
- mindspore/boost/boost_cell_wrapper.py +2 -2
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +4 -3
- mindspore/common/_grad_function.py +56 -0
- mindspore/common/_pijit_context.py +14 -5
- mindspore/common/_register_for_tensor.py +1 -1
- mindspore/common/_stub_tensor.py +5 -10
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +1915 -3287
- mindspore/common/api.py +341 -354
- mindspore/common/auto_dynamic_shape.py +41 -44
- mindspore/common/dtype.py +5 -2
- mindspore/common/dump.py +7 -5
- mindspore/common/file_system.py +3 -0
- mindspore/common/hook_handle.py +5 -3
- mindspore/common/initializer.py +10 -6
- mindspore/common/jit_begin_end.py +94 -0
- mindspore/common/jit_config.py +6 -1
- mindspore/common/jit_context.py +76 -0
- mindspore/common/jit_trace.py +378 -0
- mindspore/common/lazy_inline.py +2 -2
- mindspore/common/mutable.py +5 -4
- mindspore/common/parameter.py +106 -39
- mindspore/common/seed.py +2 -2
- mindspore/common/sparse_tensor.py +23 -17
- mindspore/common/tensor.py +297 -714
- mindspore/communication/__init__.py +7 -5
- mindspore/communication/_comm_helper.py +47 -2
- mindspore/communication/comm_func.py +70 -53
- mindspore/communication/management.py +83 -17
- mindspore/context.py +214 -560
- mindspore/dataset/__init__.py +44 -20
- mindspore/dataset/audio/__init__.py +2 -8
- mindspore/dataset/audio/transforms.py +3 -17
- mindspore/dataset/core/config.py +3 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +102 -120
- mindspore/dataset/engine/datasets_audio.py +22 -22
- mindspore/dataset/engine/datasets_standard_format.py +43 -24
- mindspore/dataset/engine/datasets_text.py +78 -85
- mindspore/dataset/engine/datasets_user_defined.py +108 -76
- mindspore/dataset/engine/datasets_vision.py +111 -108
- mindspore/dataset/engine/iterators.py +5 -3
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
- mindspore/dataset/engine/samplers.py +279 -57
- mindspore/dataset/engine/serializer_deserializer.py +2 -1
- mindspore/dataset/engine/validators.py +10 -0
- mindspore/dataset/text/__init__.py +7 -6
- mindspore/dataset/text/transforms.py +6 -5
- mindspore/dataset/text/utils.py +3 -3
- mindspore/dataset/transforms/__init__.py +0 -9
- mindspore/dataset/transforms/transforms.py +3 -3
- mindspore/dataset/utils/browse_dataset.py +1 -1
- mindspore/dataset/vision/__init__.py +2 -9
- mindspore/dataset/vision/transforms.py +202 -158
- mindspore/dataset/vision/utils.py +7 -5
- mindspore/device_context/ascend/op_debug.py +60 -1
- mindspore/device_context/ascend/op_tuning.py +0 -4
- mindspore/device_manager.py +39 -3
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/es/embedding_service.py +35 -27
- mindspore/experimental/map_parameter.py +4 -4
- mindspore/experimental/optim/adadelta.py +22 -26
- mindspore/experimental/optim/adagrad.py +4 -4
- mindspore/experimental/optim/adam.py +4 -0
- mindspore/experimental/optim/adamax.py +4 -4
- mindspore/experimental/optim/adamw.py +4 -0
- mindspore/experimental/optim/asgd.py +1 -1
- mindspore/experimental/optim/lr_scheduler.py +40 -22
- mindspore/experimental/optim/radam.py +5 -5
- mindspore/experimental/optim/rprop.py +1 -1
- mindspore/experimental/optim/sgd.py +1 -1
- mindspore/hal/contiguous_tensors_handle.py +6 -10
- mindspore/hal/device.py +55 -81
- mindspore/hal/event.py +38 -55
- mindspore/hal/memory.py +93 -144
- mindspore/hal/stream.py +81 -125
- mindspore/include/dataset/constants.h +7 -4
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +40 -2
- mindspore/mindrecord/__init__.py +20 -7
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +131 -700
- mindspore/mint/distributed/__init__.py +5 -1
- mindspore/mint/distributed/distributed.py +194 -109
- mindspore/mint/linalg/__init__.py +2 -0
- mindspore/mint/nn/__init__.py +280 -18
- mindspore/mint/nn/functional.py +282 -64
- mindspore/mint/nn/layer/__init__.py +4 -0
- mindspore/mint/nn/layer/_functions.py +7 -3
- mindspore/mint/nn/layer/activation.py +120 -13
- mindspore/mint/nn/layer/conv.py +218 -24
- mindspore/mint/nn/layer/normalization.py +15 -16
- mindspore/mint/nn/layer/padding.py +1 -1
- mindspore/mint/nn/layer/pooling.py +66 -1
- mindspore/mint/optim/__init__.py +2 -1
- mindspore/mint/optim/sgd.py +171 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/__init__.py +4 -1
- mindspore/nn/cell.py +1250 -176
- mindspore/nn/layer/activation.py +23 -21
- mindspore/nn/layer/basic.py +22 -16
- mindspore/nn/layer/container.py +1 -1
- mindspore/nn/layer/conv.py +22 -17
- mindspore/nn/layer/embedding.py +9 -8
- mindspore/nn/layer/normalization.py +48 -42
- mindspore/nn/layer/pooling.py +75 -31
- mindspore/nn/layer/transformer.py +11 -10
- mindspore/nn/learning_rate_schedule.py +4 -2
- mindspore/nn/loss/loss.py +27 -19
- mindspore/nn/optim/ada_grad.py +6 -5
- mindspore/nn/optim/adadelta.py +9 -7
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +16 -12
- mindspore/nn/optim/adamax.py +8 -7
- mindspore/nn/optim/adasum.py +5 -5
- mindspore/nn/optim/asgd.py +1 -1
- mindspore/nn/optim/ftrl.py +11 -9
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/lazyadam.py +12 -10
- mindspore/nn/optim/momentum.py +7 -6
- mindspore/nn/optim/optimizer.py +2 -2
- mindspore/nn/optim/proximal_ada_grad.py +12 -10
- mindspore/nn/optim/rmsprop.py +13 -12
- mindspore/nn/optim/rprop.py +9 -7
- mindspore/nn/optim/sgd.py +9 -6
- mindspore/nn/optim/tft_wrapper.py +5 -2
- mindspore/nn/probability/bijector/bijector.py +17 -11
- mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
- mindspore/nn/probability/bijector/invert.py +2 -2
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +3 -2
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +1 -1
- mindspore/nn/probability/distribution/cauchy.py +4 -2
- mindspore/nn/probability/distribution/exponential.py +6 -7
- mindspore/nn/probability/distribution/gamma.py +2 -2
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/half_normal.py +5 -3
- mindspore/nn/probability/distribution/logistic.py +5 -3
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/uniform.py +5 -3
- mindspore/nn/reinforcement/_tensors_queue.py +1 -1
- mindspore/nn/reinforcement/tensor_array.py +1 -1
- mindspore/nn/wrap/__init__.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +178 -117
- mindspore/nn/wrap/grad_reducer.py +45 -36
- mindspore/nn/wrap/loss_scale.py +3 -3
- mindspore/numpy/array_creations.py +3 -3
- mindspore/numpy/array_ops.py +1 -1
- mindspore/numpy/math_ops.py +4 -4
- mindspore/numpy/utils.py +1 -2
- mindspore/numpy/utils_const.py +1 -2
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +3 -2
- mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
- mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
- mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
- mindspore/ops/_register_for_op.py +0 -11
- mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
- mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
- mindspore/ops/_vmap/vmap_array_ops.py +7 -6
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
- mindspore/ops/_vmap/vmap_math_ops.py +4 -7
- mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
- mindspore/ops/auto_generate/__init__.py +4 -3
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +102 -49
- mindspore/ops/auto_generate/gen_extend_func.py +281 -135
- mindspore/ops/auto_generate/gen_ops_def.py +2574 -2326
- mindspore/ops/auto_generate/gen_ops_prim.py +8566 -2755
- mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
- mindspore/ops/composite/__init__.py +2 -1
- mindspore/ops/composite/base.py +19 -24
- mindspore/ops/composite/math_ops.py +6 -16
- mindspore/ops/composite/multitype_ops/__init__.py +5 -2
- mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -3
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
- mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
- mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
- mindspore/ops/function/__init__.py +28 -2
- mindspore/ops/function/_add_attr_func.py +58 -0
- mindspore/ops/function/array_func.py +1629 -2345
- mindspore/ops/function/clip_func.py +38 -45
- mindspore/ops/function/debug_func.py +36 -44
- mindspore/ops/function/grad/__init__.py +1 -0
- mindspore/ops/function/grad/grad_func.py +104 -71
- mindspore/ops/function/image_func.py +1 -1
- mindspore/ops/function/linalg_func.py +46 -78
- mindspore/ops/function/math_func.py +3035 -3705
- mindspore/ops/function/nn_func.py +676 -241
- mindspore/ops/function/other_func.py +159 -1
- mindspore/ops/function/parameter_func.py +17 -30
- mindspore/ops/function/random_func.py +204 -361
- mindspore/ops/function/reshard_func.py +4 -70
- mindspore/ops/function/sparse_func.py +3 -3
- mindspore/ops/function/sparse_unary_func.py +5 -5
- mindspore/ops/function/spectral_func.py +25 -58
- mindspore/ops/function/vmap_func.py +24 -17
- mindspore/ops/functional.py +6 -4
- mindspore/ops/functional_overload.py +547 -4
- mindspore/ops/op_info_register.py +32 -244
- mindspore/ops/operations/__init__.py +10 -5
- mindspore/ops/operations/_custom_ops_utils.py +247 -0
- mindspore/ops/operations/_grad_ops.py +1 -10
- mindspore/ops/operations/_inner_ops.py +5 -76
- mindspore/ops/operations/_ms_kernel.py +4 -10
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +3 -2
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/array_ops.py +37 -22
- mindspore/ops/operations/comm_ops.py +150 -107
- mindspore/ops/operations/custom_ops.py +221 -23
- mindspore/ops/operations/debug_ops.py +115 -16
- mindspore/ops/operations/inner_ops.py +1 -1
- mindspore/ops/operations/linalg_ops.py +1 -58
- mindspore/ops/operations/manually_defined/_inner.py +1 -1
- mindspore/ops/operations/manually_defined/ops_def.py +746 -79
- mindspore/ops/operations/math_ops.py +21 -18
- mindspore/ops/operations/nn_ops.py +65 -191
- mindspore/ops/operations/other_ops.py +62 -9
- mindspore/ops/operations/random_ops.py +13 -7
- mindspore/ops/operations/reshard_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +2 -2
- mindspore/ops/primitive.py +43 -32
- mindspore/ops/tensor_method.py +232 -13
- mindspore/ops_generate/__init__.py +0 -5
- mindspore/ops_generate/aclnn/__init__.py +0 -0
- mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
- mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
- mindspore/ops_generate/api/__init__.py +0 -0
- mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
- mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
- mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
- mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
- mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
- mindspore/ops_generate/api/gen_api.py +103 -0
- mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
- mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
- mindspore/ops_generate/common/__init__.py +0 -0
- mindspore/ops_generate/common/gen_constants.py +91 -0
- mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
- mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
- mindspore/ops_generate/{template.py → common/template.py} +96 -84
- mindspore/ops_generate/gen_ops.py +23 -325
- mindspore/ops_generate/op_def/__init__.py +0 -0
- mindspore/ops_generate/op_def/gen_op_def.py +90 -0
- mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
- mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -7
- mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
- mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
- mindspore/ops_generate/op_def_py/__init__.py +0 -0
- mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
- mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
- mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
- mindspore/ops_generate/pyboost/__init__.py +0 -0
- mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
- mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
- mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
- mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
- mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
- mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
- mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
- mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
- mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
- mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
- mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
- mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
- mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
- mindspore/ops_generate/resources/__init__.py +0 -0
- mindspore/ops_generate/resources/resource_list.py +30 -0
- mindspore/ops_generate/resources/resource_loader.py +36 -0
- mindspore/ops_generate/resources/resource_manager.py +64 -0
- mindspore/ops_generate/resources/yaml_loader.py +88 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
- mindspore/parallel/__init__.py +6 -2
- mindspore/parallel/_auto_parallel_context.py +133 -6
- mindspore/parallel/_cell_wrapper.py +130 -15
- mindspore/parallel/_parallel_serialization.py +95 -4
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +7 -2
- mindspore/parallel/_tensor.py +142 -18
- mindspore/parallel/_utils.py +198 -25
- mindspore/parallel/algo_parameter_config.py +3 -3
- mindspore/parallel/auto_parallel.py +732 -0
- mindspore/parallel/checkpoint_convert.py +159 -0
- mindspore/parallel/checkpoint_transform.py +656 -37
- mindspore/parallel/cluster/process_entity/_api.py +151 -19
- mindspore/parallel/cluster/run.py +1 -1
- mindspore/parallel/function/__init__.py +24 -0
- mindspore/parallel/function/reshard_func.py +259 -0
- mindspore/parallel/nn/__init__.py +25 -0
- mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
- mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
- mindspore/parallel/parameter_broadcast.py +24 -13
- mindspore/parallel/shard.py +137 -61
- mindspore/parallel/transform_safetensors.py +287 -95
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +9 -5
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
- mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +22 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
- mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
- mindspore/profiler/common/constant.py +12 -0
- mindspore/profiler/common/msprof_cmd_tool.py +42 -23
- mindspore/profiler/common/path_manager.py +24 -0
- mindspore/profiler/common/profiler_context.py +26 -2
- mindspore/profiler/common/profiler_meta_data.py +74 -0
- mindspore/profiler/common/profiler_parameters.py +59 -18
- mindspore/profiler/common/profiler_path_manager.py +66 -7
- mindspore/profiler/dynamic_profiler.py +112 -79
- mindspore/profiler/envprofiler.py +26 -1
- mindspore/profiler/experimental_config.py +197 -0
- mindspore/profiler/mstx.py +57 -14
- mindspore/profiler/platform/npu_profiler.py +33 -7
- mindspore/profiler/profiler.py +541 -45
- mindspore/profiler/profiler_action_controller.py +1 -1
- mindspore/profiler/profiler_interface.py +4 -0
- mindspore/profiler/schedule.py +57 -22
- mindspore/rewrite/api/node.py +15 -13
- mindspore/rewrite/api/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +25 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/runtime/__init__.py +2 -2
- mindspore/runtime/executor.py +40 -11
- mindspore/runtime/memory.py +25 -8
- mindspore/safeguard/rewrite_obfuscation.py +12 -9
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +8 -8
- mindspore/train/_utils.py +35 -7
- mindspore/train/amp.py +1 -1
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +2 -16
- mindspore/train/callback/_checkpoint.py +24 -40
- mindspore/train/callback/_cluster_monitor.py +14 -18
- mindspore/train/callback/_flops_collector.py +2 -3
- mindspore/train/callback/_history.py +7 -4
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +0 -3
- mindspore/train/callback/_loss_monitor.py +2 -1
- mindspore/train/callback/_on_request_exit.py +6 -5
- mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
- mindspore/train/callback/_summary_collector.py +8 -13
- mindspore/train/callback/_time_monitor.py +2 -1
- mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +179 -103
- mindspore/train/data_sink.py +25 -2
- mindspore/train/dataset_helper.py +4 -5
- mindspore/train/loss_scale_manager.py +8 -7
- mindspore/train/metrics/accuracy.py +3 -3
- mindspore/train/metrics/confusion_matrix.py +9 -9
- mindspore/train/metrics/error.py +3 -3
- mindspore/train/metrics/hausdorff_distance.py +4 -4
- mindspore/train/metrics/mean_surface_distance.py +3 -3
- mindspore/train/metrics/metric.py +0 -12
- mindspore/train/metrics/occlusion_sensitivity.py +4 -2
- mindspore/train/metrics/precision.py +8 -6
- mindspore/train/metrics/recall.py +9 -9
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +19 -12
- mindspore/train/model.py +176 -103
- mindspore/train/serialization.py +246 -988
- mindspore/train/summary/_summary_adapter.py +2 -2
- mindspore/train/summary/summary_record.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +4 -2
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/utils.py +138 -4
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/METADATA +2 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/RECORD +483 -438
- mindspore/_install_custom.py +0 -43
- mindspore/common/_register_for_adapter.py +0 -74
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
- mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
- mindspore/ops_generate/gen_constants.py +0 -190
- mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
- mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
- /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -55,10 +55,10 @@ class ReduceOp:
|
|
|
55
55
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
56
56
|
without any third-party or configuration file dependencies.
|
|
57
57
|
Please see the `msrun start up
|
|
58
|
-
<https://www.mindspore.cn/
|
|
58
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
59
59
|
for more details.
|
|
60
60
|
|
|
61
|
-
This example should be run with
|
|
61
|
+
This example should be run with 2 devices.
|
|
62
62
|
|
|
63
63
|
>>> import numpy as np
|
|
64
64
|
>>> import mindspore
|
|
@@ -144,7 +144,7 @@ class AllReduce(Primitive):
|
|
|
144
144
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
145
145
|
without any third-party or configuration file dependencies.
|
|
146
146
|
Please see the `msrun start up
|
|
147
|
-
<https://www.mindspore.cn/
|
|
147
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
148
148
|
for more details.
|
|
149
149
|
|
|
150
150
|
This example should be run with 2 devices.
|
|
@@ -234,7 +234,7 @@ class Reduce(PrimitiveWithInfer):
|
|
|
234
234
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method without any third-party
|
|
235
235
|
or configuration file dependencies.
|
|
236
236
|
Please see the `msrun start up
|
|
237
|
-
<https://www.mindspore.cn/
|
|
237
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
238
238
|
for more details.
|
|
239
239
|
|
|
240
240
|
This example should be run with 4 devices.
|
|
@@ -247,13 +247,13 @@ class Reduce(PrimitiveWithInfer):
|
|
|
247
247
|
>>> # Launch 4 processes.
|
|
248
248
|
>>> init()
|
|
249
249
|
>>> class ReduceNet(nn.Cell):
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
250
|
+
... def __init__(self):
|
|
251
|
+
... super(ReduceNet, self).__init__()
|
|
252
|
+
... self.reduce = ops.Reduce(dest_rank=1)
|
|
253
|
+
...
|
|
254
|
+
... def construct(self, x):
|
|
255
|
+
... out = self.reduce(x)
|
|
256
|
+
... return out
|
|
257
257
|
>>> input = Tensor(np.ones([2, 8]).astype(np.float32))
|
|
258
258
|
>>> net = ReduceNet()
|
|
259
259
|
>>> output = net(input)
|
|
@@ -318,7 +318,7 @@ class AllGather(PrimitiveWithInfer):
|
|
|
318
318
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
319
319
|
without any third-party or configuration file dependencies.
|
|
320
320
|
Please see the `msrun start up
|
|
321
|
-
<https://www.mindspore.cn/
|
|
321
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
322
322
|
for more details.
|
|
323
323
|
|
|
324
324
|
This example should be run with 2 devices.
|
|
@@ -541,7 +541,7 @@ class ReduceScatter(Primitive):
|
|
|
541
541
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
542
542
|
without any third-party or configuration file dependencies.
|
|
543
543
|
Please see the `msrun start up
|
|
544
|
-
<https://www.mindspore.cn/
|
|
544
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
545
545
|
for more details.
|
|
546
546
|
|
|
547
547
|
This example should be run with 2 devices.
|
|
@@ -679,7 +679,7 @@ class Broadcast(PrimitiveWithInfer):
|
|
|
679
679
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
680
680
|
without any third-party or configuration file dependencies.
|
|
681
681
|
Please see the `msrun start up
|
|
682
|
-
<https://www.mindspore.cn/
|
|
682
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
683
683
|
for more details.
|
|
684
684
|
|
|
685
685
|
This example should be run with 2 devices.
|
|
@@ -883,7 +883,7 @@ class AlltoAll(PrimitiveWithInfer):
|
|
|
883
883
|
split_count (int): On each process, divide blocks into split_count number.
|
|
884
884
|
split_dim (int): On each process, split blocks along the split_dim.
|
|
885
885
|
concat_dim (int): On each process, gather the received blocks along the concat_dimension.
|
|
886
|
-
group (str): The communication group to work on. Default: ``GlobalComm.WORLD_COMM_GROUP`` .
|
|
886
|
+
group (str, optional): The communication group to work on. Default: ``GlobalComm.WORLD_COMM_GROUP`` .
|
|
887
887
|
|
|
888
888
|
Inputs:
|
|
889
889
|
- **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
@@ -909,7 +909,7 @@ class AlltoAll(PrimitiveWithInfer):
|
|
|
909
909
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
910
910
|
without any third-party or configuration file dependencies.
|
|
911
911
|
Please see the `msrun start up
|
|
912
|
-
<https://www.mindspore.cn/
|
|
912
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
913
913
|
for more details.
|
|
914
914
|
|
|
915
915
|
This example should be run with 8 devices.
|
|
@@ -1031,7 +1031,7 @@ class NeighborExchangeV2(Primitive):
|
|
|
1031
1031
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
1032
1032
|
without any third-party or configuration file dependencies.
|
|
1033
1033
|
Please see the `msrun start up
|
|
1034
|
-
<https://www.mindspore.cn/
|
|
1034
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
1035
1035
|
for more details.
|
|
1036
1036
|
|
|
1037
1037
|
This example should be run with 2 devices.
|
|
@@ -1054,7 +1054,7 @@ class NeighborExchangeV2(Primitive):
|
|
|
1054
1054
|
... def construct(self, x):
|
|
1055
1055
|
... out = self.neighbor_exchangev2(x)
|
|
1056
1056
|
... return out
|
|
1057
|
-
|
|
1057
|
+
>>> class Net1(nn.Cell):
|
|
1058
1058
|
... def __init__(self):
|
|
1059
1059
|
... super(Net1, self).__init__()
|
|
1060
1060
|
... self.neighbor_exchangev2 = ops.NeighborExchangeV2(send_rank_ids=[0, -1, -1, -1, -1, -1, -1, -1],
|
|
@@ -1070,16 +1070,19 @@ class NeighborExchangeV2(Primitive):
|
|
|
1070
1070
|
>>> init()
|
|
1071
1071
|
>>> rank_id = int(os.getenv("RANK_ID"))
|
|
1072
1072
|
>>> if (rank_id % 2 == 0):
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1076
|
-
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
|
|
1080
|
-
|
|
1081
|
-
|
|
1073
|
+
... input_x = ms.Tensor(np.ones([1, 1, 2, 2]), dtype = ms.float32)
|
|
1074
|
+
... net = Net0()
|
|
1075
|
+
... output = net(input_x)
|
|
1076
|
+
... print(output)
|
|
1077
|
+
... else:
|
|
1078
|
+
... input_x = ms.Tensor(np.ones([1, 1, 2, 2]) * 2, dtype = ms.float32)
|
|
1079
|
+
... net = Net1()
|
|
1080
|
+
... output = net(input_x)
|
|
1081
|
+
... print(output)
|
|
1082
|
+
rank 0:
|
|
1082
1083
|
[[[[1. 1.], [1. 1.], [2. 2.]]]]
|
|
1084
|
+
rank 1:
|
|
1085
|
+
[[[[1. 1.], [2. 2.], [2. 2.]]]]
|
|
1083
1086
|
|
|
1084
1087
|
Tutorial Examples:
|
|
1085
1088
|
- `Distributed Set Communication Primitives - NeighborExchangeV2
|
|
@@ -1114,7 +1117,7 @@ class NeighborExchangeV2(Primitive):
|
|
|
1114
1117
|
|
|
1115
1118
|
class CollectiveScatter(Primitive):
|
|
1116
1119
|
r"""
|
|
1117
|
-
Scatter
|
|
1120
|
+
Scatter input data evently across the processes in the specified communication group.
|
|
1118
1121
|
|
|
1119
1122
|
Note:
|
|
1120
1123
|
The interface behavior only support Tensor input and scatter evenly.
|
|
@@ -1148,7 +1151,7 @@ class CollectiveScatter(Primitive):
|
|
|
1148
1151
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
1149
1152
|
without any third-party or configuration file dependencies.
|
|
1150
1153
|
Please see the `msrun start up
|
|
1151
|
-
<https://www.mindspore.cn/
|
|
1154
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
1152
1155
|
for more details.
|
|
1153
1156
|
|
|
1154
1157
|
This example should be run with 2 devices.
|
|
@@ -1161,12 +1164,12 @@ class CollectiveScatter(Primitive):
|
|
|
1161
1164
|
>>> # Launch 2 processes.
|
|
1162
1165
|
>>> init()
|
|
1163
1166
|
>>> class CollectiveScatterNet(nn.Cell):
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1167
|
+
... def __init__(self):
|
|
1168
|
+
... super(CollectiveScatterNet, self).__init__()
|
|
1169
|
+
... self.collective_scatter = ops.CollectiveScatter(src_rank=0)
|
|
1170
|
+
...
|
|
1171
|
+
... def construct(self, x):
|
|
1172
|
+
... return self.collective_scatter(x)
|
|
1170
1173
|
>>>
|
|
1171
1174
|
>>> input = Tensor(np.arange(8).reshape([4, 2]).astype(np.float32))
|
|
1172
1175
|
>>> net = CollectiveScatterNet()
|
|
@@ -1233,7 +1236,7 @@ class CollectiveGather(Primitive):
|
|
|
1233
1236
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
1234
1237
|
without any third-party or configuration file dependencies.
|
|
1235
1238
|
Please see the `msrun start up
|
|
1236
|
-
<https://www.mindspore.cn/
|
|
1239
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
1237
1240
|
for more details.
|
|
1238
1241
|
|
|
1239
1242
|
This example should be run with 4 devices.
|
|
@@ -1309,7 +1312,7 @@ class Barrier(PrimitiveWithInfer):
|
|
|
1309
1312
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
1310
1313
|
without any third-party or configuration file dependencies.
|
|
1311
1314
|
Please see the `msrun start up
|
|
1312
|
-
<https://www.mindspore.cn/
|
|
1315
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
1313
1316
|
for more details.
|
|
1314
1317
|
|
|
1315
1318
|
This example should be run with 2 devices.
|
|
@@ -1322,12 +1325,12 @@ class Barrier(PrimitiveWithInfer):
|
|
|
1322
1325
|
>>> # Launch 4 processes.
|
|
1323
1326
|
>>> init()
|
|
1324
1327
|
>>> class BarrierNet(nn.Cell):
|
|
1325
|
-
|
|
1326
|
-
|
|
1327
|
-
|
|
1328
|
-
|
|
1329
|
-
|
|
1330
|
-
|
|
1328
|
+
... def __init__(self):
|
|
1329
|
+
... super(BarrierNet, self).__init__()
|
|
1330
|
+
... self.barrier = ops.Barrier()
|
|
1331
|
+
...
|
|
1332
|
+
... def construct(self):
|
|
1333
|
+
... self.barrier()
|
|
1331
1334
|
>>> net = BarrierNet()
|
|
1332
1335
|
>>> net()
|
|
1333
1336
|
|
|
@@ -1354,10 +1357,10 @@ class Send(PrimitiveWithInfer):
|
|
|
1354
1357
|
Send tensors to the specified dest_rank.
|
|
1355
1358
|
|
|
1356
1359
|
Note:
|
|
1357
|
-
Send and Receive must be used in combination and have same sr_tag
|
|
1360
|
+
Send and Receive must be used in combination and have same `sr_tag`.
|
|
1358
1361
|
|
|
1359
1362
|
Args:
|
|
1360
|
-
sr_tag (int): The tag to identify the send/recv message. The message will
|
|
1363
|
+
sr_tag (int): The tag to identify the send/recv message. The message sent by this operator will
|
|
1361
1364
|
be received by the Receive op with the same "sr_tag".
|
|
1362
1365
|
dest_rank (int): A required integer identifying the destination rank.
|
|
1363
1366
|
group (str, optional): The communication group to work on. Default: ``GlobalComm.WORLD_COMM_GROUP``.
|
|
@@ -1383,31 +1386,53 @@ class Send(PrimitiveWithInfer):
|
|
|
1383
1386
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
1384
1387
|
without any third-party or configuration file dependencies.
|
|
1385
1388
|
Please see the `msrun start up
|
|
1386
|
-
<https://www.mindspore.cn/
|
|
1389
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
1387
1390
|
for more details.
|
|
1388
1391
|
|
|
1389
1392
|
This example should be run with 2 devices.
|
|
1390
1393
|
|
|
1394
|
+
>>> import os
|
|
1391
1395
|
>>> import numpy as np
|
|
1396
|
+
>>> import mindspore.ops as ops
|
|
1392
1397
|
>>> import mindspore.nn as nn
|
|
1398
|
+
>>> import mindspore as ms
|
|
1393
1399
|
>>> from mindspore.communication import init
|
|
1394
1400
|
>>> from mindspore import Tensor
|
|
1395
|
-
>>> from mindspore import ops
|
|
1396
1401
|
>>>
|
|
1402
|
+
>>> ms.set_context(mode=ms.GRAPH_MODE, jit_level="O2")
|
|
1397
1403
|
>>> init()
|
|
1404
|
+
>>>
|
|
1398
1405
|
>>> class SendNet(nn.Cell):
|
|
1399
|
-
|
|
1400
|
-
|
|
1401
|
-
|
|
1402
|
-
|
|
1406
|
+
... def __init__(self):
|
|
1407
|
+
... super(SendNet, self).__init__()
|
|
1408
|
+
... self.depend = ops.Depend()
|
|
1409
|
+
... self.send = ops.Send(sr_tag=0, dest_rank=1, group="hccl_world_group")
|
|
1410
|
+
...
|
|
1411
|
+
... def construct(self, x):
|
|
1412
|
+
... out = self.depend(x, self.send(x))
|
|
1413
|
+
... return out
|
|
1403
1414
|
>>>
|
|
1404
|
-
>>>
|
|
1405
|
-
|
|
1406
|
-
|
|
1415
|
+
>>> class ReceiveNet(nn.Cell):
|
|
1416
|
+
... def __init__(self):
|
|
1417
|
+
... super(ReceiveNet, self).__init__()
|
|
1418
|
+
... self.recv = ops.Receive(sr_tag=0, src_rank=0, shape=[2, 8], dtype=ms.float32,
|
|
1419
|
+
... group="hccl_world_group")
|
|
1420
|
+
...
|
|
1421
|
+
... def construct(self):
|
|
1422
|
+
... out = self.recv()
|
|
1423
|
+
... return out
|
|
1407
1424
|
>>>
|
|
1408
|
-
>>>
|
|
1409
|
-
|
|
1410
|
-
|
|
1425
|
+
>>> if __name__ == "__main__":
|
|
1426
|
+
... rank_id = os.environ["RANK_ID"]
|
|
1427
|
+
... rank_size = os.environ["RANK_SIZE"]
|
|
1428
|
+
... if rank_id == "0":
|
|
1429
|
+
... input_ = Tensor(np.ones([2, 8]).astype(np.float32))
|
|
1430
|
+
... send_net = SendNet()
|
|
1431
|
+
... output = send_net(input_)
|
|
1432
|
+
... else:
|
|
1433
|
+
... recv_net = ReceiveNet()
|
|
1434
|
+
... output = recv_net()
|
|
1435
|
+
... print(output.asnumpy())
|
|
1411
1436
|
|
|
1412
1437
|
Tutorial Examples:
|
|
1413
1438
|
- `Distributed Set Communication Primitives - Send
|
|
@@ -1435,11 +1460,11 @@ class Receive(PrimitiveWithInfer):
|
|
|
1435
1460
|
Receive tensors from src_rank.
|
|
1436
1461
|
|
|
1437
1462
|
Note:
|
|
1438
|
-
Send and Receive must be used in combination and have same sr_tag
|
|
1463
|
+
Send and Receive must be used in combination and have same `sr_tag`.
|
|
1439
1464
|
|
|
1440
1465
|
Args:
|
|
1441
|
-
sr_tag (int): A required integer identifying the send/recv message tag.
|
|
1442
|
-
|
|
1466
|
+
sr_tag (int): A required integer identifying the send/recv message tag. This operator will receive the tensor
|
|
1467
|
+
sent by the Send operator with the same `sr_tag` tag.
|
|
1443
1468
|
src_rank (int): A required integer identifying the source rank.
|
|
1444
1469
|
shape (list[int]): A required list identifying the shape of the tensor to be received.
|
|
1445
1470
|
dtype (Type): A required Type identifying the type of the tensor to be received. The supported types:
|
|
@@ -1452,7 +1477,7 @@ class Receive(PrimitiveWithInfer):
|
|
|
1452
1477
|
Tensor, output has the same shape as the Tensor sent by `Send` operation.
|
|
1453
1478
|
|
|
1454
1479
|
Raises:
|
|
1455
|
-
TypeError: If `group` is not a str.
|
|
1480
|
+
TypeError: If `src_rank` is not an int or `group` is not a str.
|
|
1456
1481
|
RuntimeError: If device target is invalid, or backend is invalid, or distributed initialization fails.
|
|
1457
1482
|
ValueError: If the local rank id of the calling process in the group
|
|
1458
1483
|
is larger than the group's rank size.
|
|
@@ -1467,30 +1492,53 @@ class Receive(PrimitiveWithInfer):
|
|
|
1467
1492
|
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
|
|
1468
1493
|
without any third-party or configuration file dependencies.
|
|
1469
1494
|
Please see the `msrun start up
|
|
1470
|
-
<https://www.mindspore.cn/
|
|
1495
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
1471
1496
|
for more details.
|
|
1472
1497
|
|
|
1473
1498
|
This example should be run with 2 devices.
|
|
1474
1499
|
|
|
1500
|
+
>>> import os
|
|
1475
1501
|
>>> import numpy as np
|
|
1502
|
+
>>> import mindspore.ops as ops
|
|
1476
1503
|
>>> import mindspore.nn as nn
|
|
1504
|
+
>>> import mindspore as ms
|
|
1477
1505
|
>>> from mindspore.communication import init
|
|
1478
1506
|
>>> from mindspore import Tensor
|
|
1479
|
-
>>> from mindspore import ops
|
|
1480
1507
|
>>>
|
|
1508
|
+
>>> ms.set_context(mode=ms.GRAPH_MODE, jit_level="O2")
|
|
1481
1509
|
>>> init()
|
|
1482
|
-
>>> class ReceiveNet(nn.Cell):
|
|
1483
|
-
>>> def __init__(self):
|
|
1484
|
-
>>> super(ReceiveNet, self).__init__()
|
|
1485
|
-
>>> self.recv = ops.Receive(sr_tag=0, src_rank=0, shape=[2, 8], dtype=ms.float32,
|
|
1486
|
-
>>> group="hccl_world_group")
|
|
1487
1510
|
>>>
|
|
1488
|
-
>>>
|
|
1489
|
-
|
|
1490
|
-
|
|
1511
|
+
>>> class SendNet(nn.Cell):
|
|
1512
|
+
... def __init__(self):
|
|
1513
|
+
... super(SendNet, self).__init__()
|
|
1514
|
+
... self.depend = ops.Depend()
|
|
1515
|
+
... self.send = ops.Send(sr_tag=0, dest_rank=1, group="hccl_world_group")
|
|
1516
|
+
...
|
|
1517
|
+
... def construct(self, x):
|
|
1518
|
+
... out = self.depend(x, self.send(x))
|
|
1519
|
+
... return out
|
|
1491
1520
|
>>>
|
|
1492
|
-
>>>
|
|
1493
|
-
|
|
1521
|
+
>>> class ReceiveNet(nn.Cell):
|
|
1522
|
+
... def __init__(self):
|
|
1523
|
+
... super(ReceiveNet, self).__init__()
|
|
1524
|
+
... self.recv = ops.Receive(sr_tag=0, src_rank=0, shape=[2, 8], dtype=ms.float32,
|
|
1525
|
+
... group="hccl_world_group")
|
|
1526
|
+
...
|
|
1527
|
+
... def construct(self):
|
|
1528
|
+
... out = self.recv()
|
|
1529
|
+
... return out
|
|
1530
|
+
>>>
|
|
1531
|
+
>>> if __name__ == "__main__":
|
|
1532
|
+
... rank_id = os.environ["RANK_ID"]
|
|
1533
|
+
... rank_size = os.environ["RANK_SIZE"]
|
|
1534
|
+
... if rank_id == "0":
|
|
1535
|
+
... input_ = Tensor(np.ones([2, 8]).astype(np.float32))
|
|
1536
|
+
... send_net = SendNet()
|
|
1537
|
+
... output = send_net(input_)
|
|
1538
|
+
... else:
|
|
1539
|
+
... recv_net = ReceiveNet()
|
|
1540
|
+
... output = recv_net()
|
|
1541
|
+
... print(output.asnumpy())
|
|
1494
1542
|
|
|
1495
1543
|
Tutorial Examples:
|
|
1496
1544
|
- `Distributed Set Communication Primitives - Receive
|
|
@@ -1845,7 +1893,7 @@ class BatchISendIRecv(PrimitiveWithInfer):
|
|
|
1845
1893
|
without any third-party or configuration file dependencies.
|
|
1846
1894
|
|
|
1847
1895
|
Please see the `msrun start up
|
|
1848
|
-
<https://www.mindspore.cn/
|
|
1896
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
1849
1897
|
for more details.
|
|
1850
1898
|
|
|
1851
1899
|
This example should be run with 2 devices.
|
|
@@ -1924,28 +1972,29 @@ class BatchISendIRecv(PrimitiveWithInfer):
|
|
|
1924
1972
|
|
|
1925
1973
|
class AlltoAllV(PrimitiveWithInfer):
|
|
1926
1974
|
"""
|
|
1927
|
-
|
|
1975
|
+
AllToAllV which support uneven scatter and gather compared with AllToAll.
|
|
1928
1976
|
|
|
1929
1977
|
Note:
|
|
1930
1978
|
- Only support flatten tensor as input. input tensor should be flattened and
|
|
1931
1979
|
concatenated before call this primitive.
|
|
1932
1980
|
|
|
1933
1981
|
Args:
|
|
1934
|
-
|
|
1935
|
-
|
|
1936
|
-
|
|
1937
|
-
|
|
1938
|
-
|
|
1982
|
+
group (str, optional): The communication group to work on. Default: ``GlobalComm.WORLD_COMM_GROUP``, which
|
|
1983
|
+
means ``"hccl_world_group"`` in Ascend.
|
|
1984
|
+
block_size (int, optional): The basic units for scatter and gather numel by `send_numel_list`
|
|
1985
|
+
and `recv_numel_list`.
|
|
1986
|
+
Default: ``1``.
|
|
1939
1987
|
|
|
1940
1988
|
Inputs:
|
|
1941
1989
|
- **input_x** (Tensor) - flatten tensor to scatter. The shape of tensor is :math:`(x_1)`.
|
|
1990
|
+
- **send_numel_list** (Union[tuple[int], list[int], Tensor]) - split numel to scatter to different remote rank.
|
|
1991
|
+
The actual distributed data numel is :math:`(send_numel_list * block_size * input_x.dtype)`.
|
|
1992
|
+
- **recv_numel_list** (Union[tuple[int], list[int], Tensor]) - split numel to gather from different remote rank.
|
|
1993
|
+
The actual aggregated data numel is :math:`(recv_numel_list * block_size * input_x.dtype)`.
|
|
1942
1994
|
|
|
1943
1995
|
Outputs:
|
|
1944
|
-
Tensor
|
|
1945
|
-
If gather result is empty, it will return a Tensor with
|
|
1946
|
-
|
|
1947
|
-
Raises:
|
|
1948
|
-
TypeError: If 'send_numel_list' or 'recv_numel_list' is not type of tuple and list.
|
|
1996
|
+
Tensor, flattened and concatenated tensor gather from remote ranks.
|
|
1997
|
+
If gather result is empty, it will return a Tensor with shape `()`, and value has no actual meaning.
|
|
1949
1998
|
|
|
1950
1999
|
Supported Platforms:
|
|
1951
2000
|
``Ascend``
|
|
@@ -1958,13 +2007,11 @@ class AlltoAllV(PrimitiveWithInfer):
|
|
|
1958
2007
|
without any third-party or configuration file dependencies.
|
|
1959
2008
|
|
|
1960
2009
|
Please see the `msrun start up
|
|
1961
|
-
<https://www.mindspore.cn/
|
|
2010
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
1962
2011
|
for more details.
|
|
1963
2012
|
|
|
1964
2013
|
This example should be run with 2 devices.
|
|
1965
2014
|
|
|
1966
|
-
>>> import numpy as np
|
|
1967
|
-
>>> import mindspore as ms
|
|
1968
2015
|
>>> from mindspore import ops
|
|
1969
2016
|
>>> import mindspore.nn as nn
|
|
1970
2017
|
>>> from mindspore.communication import init, get_rank
|
|
@@ -1975,20 +2022,22 @@ class AlltoAllV(PrimitiveWithInfer):
|
|
|
1975
2022
|
>>> class Net(nn.Cell):
|
|
1976
2023
|
... def __init__(self):
|
|
1977
2024
|
... super(Net, self).__init__()
|
|
1978
|
-
...
|
|
1979
|
-
... self.all_to_all = ops.AlltoAllV([1, 2], [1, 2])
|
|
1980
|
-
... else:
|
|
1981
|
-
... self.all_to_all = ops.AlltoAllV([2, 1], [2, 1])
|
|
1982
|
-
...
|
|
1983
|
-
... def construct(self, x):
|
|
1984
|
-
... return self.all_to_all(x)
|
|
2025
|
+
... self.all_to_all = ops.AlltoAllV()
|
|
1985
2026
|
...
|
|
2027
|
+
... def construct(self, x, send_numel_list, recv_numel_list):
|
|
2028
|
+
... return self.all_to_all(x, send_numel_list, recv_numel_list)
|
|
2029
|
+
>>> send_numel_list = []
|
|
2030
|
+
>>> recv_numel_list = []
|
|
1986
2031
|
>>> if rank == 0:
|
|
1987
|
-
|
|
2032
|
+
... send_tensor = Tensor([0, 1, 2.])
|
|
2033
|
+
... send_numel_list = [1, 2]
|
|
2034
|
+
... recv_numel_list = [1, 2]
|
|
1988
2035
|
>>> elif rank == 1:
|
|
1989
|
-
|
|
2036
|
+
... send_tensor = Tensor([3, 4, 5.])
|
|
2037
|
+
... send_numel_list = [2, 1]
|
|
2038
|
+
... recv_numel_list = [2, 1]
|
|
1990
2039
|
>>> net = Net()
|
|
1991
|
-
>>> output = net(send_tensor)
|
|
2040
|
+
>>> output = net(send_tensor, send_numel_list, recv_numel_list)
|
|
1992
2041
|
>>> print(output)
|
|
1993
2042
|
rank 0:
|
|
1994
2043
|
[0. 3. 4]
|
|
@@ -1998,15 +2047,9 @@ class AlltoAllV(PrimitiveWithInfer):
|
|
|
1998
2047
|
"""
|
|
1999
2048
|
|
|
2000
2049
|
@prim_attr_register
|
|
2001
|
-
def __init__(self,
|
|
2002
|
-
validator.check_value_type("send_numel_list", send_numel_list, [tuple, list], self.name)
|
|
2003
|
-
validator.check_value_type("recv_numel_list", recv_numel_list, [tuple, list], self.name)
|
|
2050
|
+
def __init__(self, group=GlobalComm.WORLD_COMM_GROUP, block_size=1):
|
|
2004
2051
|
self.group = GlobalComm.WORLD_COMM_GROUP if group is None else _get_group(group)
|
|
2005
|
-
self.send_numel_list = send_numel_list
|
|
2006
|
-
self.recv_numel_list = recv_numel_list
|
|
2007
|
-
self.split_sizes_empty = split_sizes_empty
|
|
2008
2052
|
self.rank_size = get_group_size(self.group)
|
|
2009
|
-
|
|
2010
2053
|
self.add_prim_attr('group', self.group)
|
|
2011
|
-
|
|
2012
|
-
self.add_prim_attr('
|
|
2054
|
+
validator.check_value_type("block_size", block_size, [int], self.name)
|
|
2055
|
+
self.add_prim_attr('block_size', self.block_size)
|