mindspore 2.5.0__cp310-cp310-win_amd64.whl → 2.6.0rc1__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +6 -4
- mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -0
- mindspore/_checkparam.py +3 -33
- mindspore/_deprecated/__init__.py +17 -0
- mindspore/_deprecated/jit.py +198 -0
- mindspore/_extends/builtin_operations.py +1 -1
- mindspore/_extends/parse/__init__.py +6 -7
- mindspore/_extends/parse/compile_config.py +19 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
- mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
- mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
- mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
- mindspore/_extends/parse/parser.py +24 -193
- mindspore/_extends/parse/resources.py +1 -5
- mindspore/_extends/parse/standard_method.py +97 -74
- mindspore/_extends/pijit/__init__.py +2 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
- mindspore/_extends/pijit/tensor_func_list.py +27 -0
- mindspore/_extends/utils.py +1 -1
- mindspore/amp.py +4 -4
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/__init__.py +2 -2
- mindspore/boost/base.py +3 -7
- mindspore/boost/boost_cell_wrapper.py +2 -2
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +4 -3
- mindspore/common/_grad_function.py +56 -0
- mindspore/common/_pijit_context.py +14 -5
- mindspore/common/_register_for_tensor.py +1 -1
- mindspore/common/_stub_tensor.py +5 -10
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +1915 -3287
- mindspore/common/api.py +341 -354
- mindspore/common/auto_dynamic_shape.py +41 -44
- mindspore/common/dtype.py +5 -2
- mindspore/common/dump.py +7 -5
- mindspore/common/file_system.py +3 -0
- mindspore/common/hook_handle.py +5 -3
- mindspore/common/initializer.py +10 -6
- mindspore/common/jit_begin_end.py +94 -0
- mindspore/common/jit_config.py +6 -1
- mindspore/common/jit_context.py +76 -0
- mindspore/common/jit_trace.py +378 -0
- mindspore/common/lazy_inline.py +2 -2
- mindspore/common/mutable.py +5 -4
- mindspore/common/parameter.py +106 -39
- mindspore/common/seed.py +2 -2
- mindspore/common/sparse_tensor.py +23 -17
- mindspore/common/tensor.py +297 -714
- mindspore/communication/__init__.py +7 -5
- mindspore/communication/_comm_helper.py +47 -2
- mindspore/communication/comm_func.py +70 -53
- mindspore/communication/management.py +83 -17
- mindspore/context.py +214 -560
- mindspore/dataset/__init__.py +44 -20
- mindspore/dataset/audio/__init__.py +2 -8
- mindspore/dataset/audio/transforms.py +3 -17
- mindspore/dataset/core/config.py +3 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +102 -120
- mindspore/dataset/engine/datasets_audio.py +22 -22
- mindspore/dataset/engine/datasets_standard_format.py +43 -24
- mindspore/dataset/engine/datasets_text.py +78 -85
- mindspore/dataset/engine/datasets_user_defined.py +108 -76
- mindspore/dataset/engine/datasets_vision.py +111 -108
- mindspore/dataset/engine/iterators.py +5 -3
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
- mindspore/dataset/engine/samplers.py +279 -57
- mindspore/dataset/engine/serializer_deserializer.py +2 -1
- mindspore/dataset/engine/validators.py +10 -0
- mindspore/dataset/text/__init__.py +7 -6
- mindspore/dataset/text/transforms.py +6 -5
- mindspore/dataset/text/utils.py +3 -3
- mindspore/dataset/transforms/__init__.py +0 -9
- mindspore/dataset/transforms/transforms.py +3 -3
- mindspore/dataset/utils/browse_dataset.py +1 -1
- mindspore/dataset/vision/__init__.py +2 -9
- mindspore/dataset/vision/transforms.py +202 -158
- mindspore/dataset/vision/utils.py +7 -5
- mindspore/device_context/ascend/op_debug.py +60 -1
- mindspore/device_context/ascend/op_tuning.py +0 -4
- mindspore/device_manager.py +39 -3
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/es/embedding_service.py +35 -27
- mindspore/experimental/map_parameter.py +4 -4
- mindspore/experimental/optim/adadelta.py +22 -26
- mindspore/experimental/optim/adagrad.py +4 -4
- mindspore/experimental/optim/adam.py +4 -0
- mindspore/experimental/optim/adamax.py +4 -4
- mindspore/experimental/optim/adamw.py +4 -0
- mindspore/experimental/optim/asgd.py +1 -1
- mindspore/experimental/optim/lr_scheduler.py +40 -22
- mindspore/experimental/optim/radam.py +5 -5
- mindspore/experimental/optim/rprop.py +1 -1
- mindspore/experimental/optim/sgd.py +1 -1
- mindspore/hal/contiguous_tensors_handle.py +6 -10
- mindspore/hal/device.py +55 -81
- mindspore/hal/event.py +38 -55
- mindspore/hal/memory.py +93 -144
- mindspore/hal/stream.py +81 -125
- mindspore/include/dataset/constants.h +7 -4
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +40 -2
- mindspore/mindrecord/__init__.py +20 -7
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +131 -700
- mindspore/mint/distributed/__init__.py +5 -1
- mindspore/mint/distributed/distributed.py +194 -109
- mindspore/mint/linalg/__init__.py +2 -0
- mindspore/mint/nn/__init__.py +280 -18
- mindspore/mint/nn/functional.py +282 -64
- mindspore/mint/nn/layer/__init__.py +4 -0
- mindspore/mint/nn/layer/_functions.py +7 -3
- mindspore/mint/nn/layer/activation.py +120 -13
- mindspore/mint/nn/layer/conv.py +218 -24
- mindspore/mint/nn/layer/normalization.py +15 -16
- mindspore/mint/nn/layer/padding.py +1 -1
- mindspore/mint/nn/layer/pooling.py +66 -1
- mindspore/mint/optim/__init__.py +2 -1
- mindspore/mint/optim/sgd.py +171 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/__init__.py +4 -1
- mindspore/nn/cell.py +1250 -176
- mindspore/nn/layer/activation.py +23 -21
- mindspore/nn/layer/basic.py +22 -16
- mindspore/nn/layer/container.py +1 -1
- mindspore/nn/layer/conv.py +22 -17
- mindspore/nn/layer/embedding.py +9 -8
- mindspore/nn/layer/normalization.py +48 -42
- mindspore/nn/layer/pooling.py +75 -31
- mindspore/nn/layer/transformer.py +11 -10
- mindspore/nn/learning_rate_schedule.py +4 -2
- mindspore/nn/loss/loss.py +27 -19
- mindspore/nn/optim/ada_grad.py +6 -5
- mindspore/nn/optim/adadelta.py +9 -7
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +16 -12
- mindspore/nn/optim/adamax.py +8 -7
- mindspore/nn/optim/adasum.py +5 -5
- mindspore/nn/optim/asgd.py +1 -1
- mindspore/nn/optim/ftrl.py +11 -9
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/lazyadam.py +12 -10
- mindspore/nn/optim/momentum.py +7 -6
- mindspore/nn/optim/optimizer.py +2 -2
- mindspore/nn/optim/proximal_ada_grad.py +12 -10
- mindspore/nn/optim/rmsprop.py +13 -12
- mindspore/nn/optim/rprop.py +9 -7
- mindspore/nn/optim/sgd.py +9 -6
- mindspore/nn/optim/tft_wrapper.py +5 -2
- mindspore/nn/probability/bijector/bijector.py +17 -11
- mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
- mindspore/nn/probability/bijector/invert.py +2 -2
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +3 -2
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +1 -1
- mindspore/nn/probability/distribution/cauchy.py +4 -2
- mindspore/nn/probability/distribution/exponential.py +6 -7
- mindspore/nn/probability/distribution/gamma.py +2 -2
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/half_normal.py +5 -3
- mindspore/nn/probability/distribution/logistic.py +5 -3
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/uniform.py +5 -3
- mindspore/nn/reinforcement/_tensors_queue.py +1 -1
- mindspore/nn/reinforcement/tensor_array.py +1 -1
- mindspore/nn/wrap/__init__.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +178 -117
- mindspore/nn/wrap/grad_reducer.py +45 -36
- mindspore/nn/wrap/loss_scale.py +3 -3
- mindspore/numpy/array_creations.py +3 -3
- mindspore/numpy/array_ops.py +1 -1
- mindspore/numpy/math_ops.py +4 -4
- mindspore/numpy/utils.py +1 -2
- mindspore/numpy/utils_const.py +1 -2
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +3 -2
- mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
- mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
- mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
- mindspore/ops/_register_for_op.py +0 -11
- mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
- mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
- mindspore/ops/_vmap/vmap_array_ops.py +7 -6
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
- mindspore/ops/_vmap/vmap_math_ops.py +4 -7
- mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
- mindspore/ops/auto_generate/__init__.py +4 -3
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +102 -49
- mindspore/ops/auto_generate/gen_extend_func.py +281 -135
- mindspore/ops/auto_generate/gen_ops_def.py +2574 -2326
- mindspore/ops/auto_generate/gen_ops_prim.py +8566 -2755
- mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
- mindspore/ops/composite/__init__.py +2 -1
- mindspore/ops/composite/base.py +19 -24
- mindspore/ops/composite/math_ops.py +6 -16
- mindspore/ops/composite/multitype_ops/__init__.py +5 -2
- mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -3
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
- mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
- mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
- mindspore/ops/function/__init__.py +28 -2
- mindspore/ops/function/_add_attr_func.py +58 -0
- mindspore/ops/function/array_func.py +1629 -2345
- mindspore/ops/function/clip_func.py +38 -45
- mindspore/ops/function/debug_func.py +36 -44
- mindspore/ops/function/grad/__init__.py +1 -0
- mindspore/ops/function/grad/grad_func.py +104 -71
- mindspore/ops/function/image_func.py +1 -1
- mindspore/ops/function/linalg_func.py +46 -78
- mindspore/ops/function/math_func.py +3035 -3705
- mindspore/ops/function/nn_func.py +676 -241
- mindspore/ops/function/other_func.py +159 -1
- mindspore/ops/function/parameter_func.py +17 -30
- mindspore/ops/function/random_func.py +204 -361
- mindspore/ops/function/reshard_func.py +4 -70
- mindspore/ops/function/sparse_func.py +3 -3
- mindspore/ops/function/sparse_unary_func.py +5 -5
- mindspore/ops/function/spectral_func.py +25 -58
- mindspore/ops/function/vmap_func.py +24 -17
- mindspore/ops/functional.py +6 -4
- mindspore/ops/functional_overload.py +547 -4
- mindspore/ops/op_info_register.py +32 -244
- mindspore/ops/operations/__init__.py +10 -5
- mindspore/ops/operations/_custom_ops_utils.py +247 -0
- mindspore/ops/operations/_grad_ops.py +1 -10
- mindspore/ops/operations/_inner_ops.py +5 -76
- mindspore/ops/operations/_ms_kernel.py +4 -10
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +3 -2
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/array_ops.py +37 -22
- mindspore/ops/operations/comm_ops.py +150 -107
- mindspore/ops/operations/custom_ops.py +221 -23
- mindspore/ops/operations/debug_ops.py +115 -16
- mindspore/ops/operations/inner_ops.py +1 -1
- mindspore/ops/operations/linalg_ops.py +1 -58
- mindspore/ops/operations/manually_defined/_inner.py +1 -1
- mindspore/ops/operations/manually_defined/ops_def.py +746 -79
- mindspore/ops/operations/math_ops.py +21 -18
- mindspore/ops/operations/nn_ops.py +65 -191
- mindspore/ops/operations/other_ops.py +62 -9
- mindspore/ops/operations/random_ops.py +13 -7
- mindspore/ops/operations/reshard_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +2 -2
- mindspore/ops/primitive.py +43 -32
- mindspore/ops/tensor_method.py +232 -13
- mindspore/ops_generate/__init__.py +0 -5
- mindspore/ops_generate/aclnn/__init__.py +0 -0
- mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
- mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
- mindspore/ops_generate/api/__init__.py +0 -0
- mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
- mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
- mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
- mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
- mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
- mindspore/ops_generate/api/gen_api.py +103 -0
- mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
- mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
- mindspore/ops_generate/common/__init__.py +0 -0
- mindspore/ops_generate/common/gen_constants.py +91 -0
- mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
- mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
- mindspore/ops_generate/{template.py → common/template.py} +96 -84
- mindspore/ops_generate/gen_ops.py +23 -325
- mindspore/ops_generate/op_def/__init__.py +0 -0
- mindspore/ops_generate/op_def/gen_op_def.py +90 -0
- mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
- mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -7
- mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
- mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
- mindspore/ops_generate/op_def_py/__init__.py +0 -0
- mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
- mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
- mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
- mindspore/ops_generate/pyboost/__init__.py +0 -0
- mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
- mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
- mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
- mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
- mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
- mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
- mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
- mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
- mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
- mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
- mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
- mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
- mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
- mindspore/ops_generate/resources/__init__.py +0 -0
- mindspore/ops_generate/resources/resource_list.py +30 -0
- mindspore/ops_generate/resources/resource_loader.py +36 -0
- mindspore/ops_generate/resources/resource_manager.py +64 -0
- mindspore/ops_generate/resources/yaml_loader.py +88 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
- mindspore/parallel/__init__.py +6 -2
- mindspore/parallel/_auto_parallel_context.py +133 -6
- mindspore/parallel/_cell_wrapper.py +130 -15
- mindspore/parallel/_parallel_serialization.py +95 -4
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +7 -2
- mindspore/parallel/_tensor.py +142 -18
- mindspore/parallel/_utils.py +198 -25
- mindspore/parallel/algo_parameter_config.py +3 -3
- mindspore/parallel/auto_parallel.py +732 -0
- mindspore/parallel/checkpoint_convert.py +159 -0
- mindspore/parallel/checkpoint_transform.py +656 -37
- mindspore/parallel/cluster/process_entity/_api.py +151 -19
- mindspore/parallel/cluster/run.py +1 -1
- mindspore/parallel/function/__init__.py +24 -0
- mindspore/parallel/function/reshard_func.py +259 -0
- mindspore/parallel/nn/__init__.py +25 -0
- mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
- mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
- mindspore/parallel/parameter_broadcast.py +24 -13
- mindspore/parallel/shard.py +137 -61
- mindspore/parallel/transform_safetensors.py +287 -95
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +9 -5
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
- mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +22 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
- mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
- mindspore/profiler/common/constant.py +12 -0
- mindspore/profiler/common/msprof_cmd_tool.py +42 -23
- mindspore/profiler/common/path_manager.py +24 -0
- mindspore/profiler/common/profiler_context.py +26 -2
- mindspore/profiler/common/profiler_meta_data.py +74 -0
- mindspore/profiler/common/profiler_parameters.py +59 -18
- mindspore/profiler/common/profiler_path_manager.py +66 -7
- mindspore/profiler/dynamic_profiler.py +112 -79
- mindspore/profiler/envprofiler.py +26 -1
- mindspore/profiler/experimental_config.py +197 -0
- mindspore/profiler/mstx.py +57 -14
- mindspore/profiler/platform/npu_profiler.py +33 -7
- mindspore/profiler/profiler.py +541 -45
- mindspore/profiler/profiler_action_controller.py +1 -1
- mindspore/profiler/profiler_interface.py +4 -0
- mindspore/profiler/schedule.py +57 -22
- mindspore/rewrite/api/node.py +15 -13
- mindspore/rewrite/api/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +25 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/runtime/__init__.py +2 -2
- mindspore/runtime/executor.py +40 -11
- mindspore/runtime/memory.py +25 -8
- mindspore/safeguard/rewrite_obfuscation.py +12 -9
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +8 -8
- mindspore/train/_utils.py +35 -7
- mindspore/train/amp.py +1 -1
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +2 -16
- mindspore/train/callback/_checkpoint.py +24 -40
- mindspore/train/callback/_cluster_monitor.py +14 -18
- mindspore/train/callback/_flops_collector.py +2 -3
- mindspore/train/callback/_history.py +7 -4
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +0 -3
- mindspore/train/callback/_loss_monitor.py +2 -1
- mindspore/train/callback/_on_request_exit.py +6 -5
- mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
- mindspore/train/callback/_summary_collector.py +8 -13
- mindspore/train/callback/_time_monitor.py +2 -1
- mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +179 -103
- mindspore/train/data_sink.py +25 -2
- mindspore/train/dataset_helper.py +4 -5
- mindspore/train/loss_scale_manager.py +8 -7
- mindspore/train/metrics/accuracy.py +3 -3
- mindspore/train/metrics/confusion_matrix.py +9 -9
- mindspore/train/metrics/error.py +3 -3
- mindspore/train/metrics/hausdorff_distance.py +4 -4
- mindspore/train/metrics/mean_surface_distance.py +3 -3
- mindspore/train/metrics/metric.py +0 -12
- mindspore/train/metrics/occlusion_sensitivity.py +4 -2
- mindspore/train/metrics/precision.py +8 -6
- mindspore/train/metrics/recall.py +9 -9
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +19 -12
- mindspore/train/model.py +176 -103
- mindspore/train/serialization.py +246 -988
- mindspore/train/summary/_summary_adapter.py +2 -2
- mindspore/train/summary/summary_record.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +4 -2
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/utils.py +138 -4
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/METADATA +2 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/RECORD +483 -438
- mindspore/_install_custom.py +0 -43
- mindspore/common/_register_for_adapter.py +0 -74
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
- mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
- mindspore/ops_generate/gen_constants.py +0 -190
- mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
- mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
- /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -36,7 +36,7 @@ class Poisson(Distribution):
|
|
|
36
36
|
where :math:`\lambda` is the rate of the distribution.
|
|
37
37
|
|
|
38
38
|
Args:
|
|
39
|
-
rate (list, numpy.ndarray, Tensor): The rate of the Poisson distribution. Default: ``None`` .
|
|
39
|
+
rate (list, numpy.ndarray, Tensor): The rate of the Poisson distribution. :math:`\lambda` in the formula. Default: ``None`` .
|
|
40
40
|
seed (int): The seed used in sampling. The global seed is used if it is ``None`` . Default: ``None`` .
|
|
41
41
|
dtype (mindspore.dtype): The type of the event samples. Default: ``mstype.float32`` .
|
|
42
42
|
name (str): The name of the distribution. Default: ``'Poisson'`` .
|
|
@@ -36,8 +36,10 @@ class Uniform(Distribution):
|
|
|
36
36
|
Where :math:`a, b` are the lower and upper bound respectively.
|
|
37
37
|
|
|
38
38
|
Args:
|
|
39
|
-
low (int, float, list, numpy.ndarray, Tensor): The lower bound of the distribution.
|
|
40
|
-
|
|
39
|
+
low (int, float, list, numpy.ndarray, Tensor): The lower bound of the distribution.
|
|
40
|
+
:math:`a` in the formula. Default: ``None`` .
|
|
41
|
+
high (int, float, list, numpy.ndarray, Tensor): The upper bound of the distribution.
|
|
42
|
+
:math:`b` in the formula. Default: ``None`` .
|
|
41
43
|
seed (int): The seed uses in sampling. The global seed is used if it is ``None`` . Default: ``None`` .
|
|
42
44
|
dtype (mindspore.dtype): The type of the event samples. Default: ``mstype.float32`` .
|
|
43
45
|
name (str): The name of the distribution. Default: ``'Uniform'`` .
|
|
@@ -49,7 +51,7 @@ class Uniform(Distribution):
|
|
|
49
51
|
|
|
50
52
|
Raises:
|
|
51
53
|
ValueError: When high <= low.
|
|
52
|
-
TypeError: When the input `dtype` is not a subclass of float.
|
|
54
|
+
TypeError: When the input `dtype` is not a float or a subclass of float.
|
|
53
55
|
|
|
54
56
|
Supported Platforms:
|
|
55
57
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -34,7 +34,7 @@ class TensorsQueue(Cell):
|
|
|
34
34
|
dtype (mindspore.dtype): the data type in the TensorsQueue. Each tensor should have the same dtype.
|
|
35
35
|
shapes (tuple[int64]): the shape of each element in TensorsQueue.
|
|
36
36
|
size (int): the size of the TensorsQueue.
|
|
37
|
-
name (
|
|
37
|
+
name (str): the name of this TensorsQueue. Default: "TQ".
|
|
38
38
|
|
|
39
39
|
Raises:
|
|
40
40
|
TypeError: If `dtype` is not mindspore number type.
|
|
@@ -34,7 +34,7 @@ class TensorArray(Cell):
|
|
|
34
34
|
element_shape (tuple[int]): the shape of each tensor in a TensorArray.
|
|
35
35
|
dynamic_size (bool): if ``true`` , the size of TensorArray can be increased. Default: ``True`` .
|
|
36
36
|
size (int): if dynamic_size=False, `size` means the max_size of the TensorArray.
|
|
37
|
-
name (
|
|
37
|
+
name (str): the name of this TensorArray. Default: ``"TA"`` .
|
|
38
38
|
|
|
39
39
|
Supported Platforms:
|
|
40
40
|
``GPU`` ``CPU``
|
mindspore/nn/wrap/__init__.py
CHANGED
|
@@ -20,8 +20,8 @@ Use the Wrapper to combine the loss or build the training steps.
|
|
|
20
20
|
from __future__ import absolute_import
|
|
21
21
|
|
|
22
22
|
from mindspore.nn.wrap.cell_wrapper import ForwardValueAndGrad, TrainOneStepCell, WithLossCell, WithGradCell, \
|
|
23
|
-
WithEvalCell, ParameterUpdate, GetNextSingleOp, VirtualDatasetCellTriple,
|
|
24
|
-
|
|
23
|
+
WithEvalCell, ParameterUpdate, GetNextSingleOp, VirtualDatasetCellTriple, GradAccumulationCell, \
|
|
24
|
+
MicroBatchInterleaved, PipelineCell
|
|
25
25
|
from mindspore.nn.wrap.loss_scale import TrainOneStepWithLossScaleCell,\
|
|
26
26
|
DynamicLossScaleUpdateCell, FixedLossScaleUpdateCell
|
|
27
27
|
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer, PipelineGradReducer
|
|
@@ -34,16 +34,16 @@ __all__ = [
|
|
|
34
34
|
"TrainOneStepCell",
|
|
35
35
|
"WithLossCell",
|
|
36
36
|
"WithGradCell",
|
|
37
|
-
"MicroBatchInterleaved",
|
|
38
|
-
"PipelineCell",
|
|
39
37
|
"WithEvalCell",
|
|
40
38
|
"GetNextSingleOp",
|
|
41
39
|
"TrainOneStepWithLossScaleCell",
|
|
42
40
|
"DistributedGradReducer",
|
|
43
|
-
"PipelineGradReducer",
|
|
44
41
|
"ParameterUpdate",
|
|
45
42
|
"DynamicLossScaleUpdateCell",
|
|
46
43
|
"FixedLossScaleUpdateCell",
|
|
47
44
|
"VirtualDatasetCellTriple",
|
|
48
|
-
"GradAccumulationCell"
|
|
45
|
+
"GradAccumulationCell",
|
|
46
|
+
"MicroBatchInterleaved",
|
|
47
|
+
"PipelineCell",
|
|
48
|
+
"PipelineGradReducer"
|
|
49
49
|
]
|
|
@@ -329,9 +329,11 @@ class TrainOneStepCell(Cell):
|
|
|
329
329
|
Args:
|
|
330
330
|
network (Cell): The training network. The network only supports single output.
|
|
331
331
|
optimizer (Union[Cell]): Optimizer for updating the network parameters.
|
|
332
|
-
sens (numbers.Number): The scaling number to be filled as the input of backpropagation.
|
|
332
|
+
sens (numbers.Number, optional): The scaling number to be filled as the input of backpropagation.
|
|
333
|
+
Default value is
|
|
333
334
|
``None`` , which is ``1.0`` .
|
|
334
|
-
return_grad (bool): Whether to return gradient. If ``True``,
|
|
335
|
+
return_grad (bool, optional): Whether to return gradient. If ``True``,
|
|
336
|
+
it will return the gradient in the form of a dict
|
|
335
337
|
while returning loss. The key of the dict is the parameter name corresponding to the gradient, and value
|
|
336
338
|
is the gradient value. Default value is ``False`` .
|
|
337
339
|
|
|
@@ -530,6 +532,20 @@ class _VirtualDatasetCell(Cell):
|
|
|
530
532
|
return self._backbone(*output)
|
|
531
533
|
|
|
532
534
|
|
|
535
|
+
def _pipeline_clear_grad(accu_grad, grad):
|
|
536
|
+
accu_grad = F.depend(accu_grad, grad)
|
|
537
|
+
zeros = F.zeros_like(accu_grad)
|
|
538
|
+
return F.assign(accu_grad, zeros)
|
|
539
|
+
|
|
540
|
+
def grad_scale(scale, grad):
|
|
541
|
+
"""grad_scale"""
|
|
542
|
+
new_grad = scale * grad
|
|
543
|
+
grad = ops.depend(grad, new_grad)
|
|
544
|
+
zeros = F.zeros_like(grad)
|
|
545
|
+
new_grad = ops.depend(new_grad, F.assign(grad, zeros))
|
|
546
|
+
return new_grad
|
|
547
|
+
|
|
548
|
+
|
|
533
549
|
@_primexpr
|
|
534
550
|
def _check_shape_value_on_axis_divided_by_target_value(input_shape, micro_size):
|
|
535
551
|
if F.isconstant(input_shape[0]) is False:
|
|
@@ -572,119 +588,13 @@ class _MicroBatch(Cell):
|
|
|
572
588
|
return micro_inputs
|
|
573
589
|
|
|
574
590
|
|
|
575
|
-
class MicroBatchInterleaved(Cell):
|
|
576
|
-
"""
|
|
577
|
-
This function splits the input at the 0th into interleave_num pieces and then performs
|
|
578
|
-
the computation of the wrapped cell. Application scenario: When there is model parallelism in semi-automatic mode
|
|
579
|
-
and network, if the first slice data is calculating forward, the second slice data will execute the
|
|
580
|
-
communication operators at the same time, to achieve the performance acceleration of communication and computing
|
|
581
|
-
concurrency.
|
|
582
|
-
|
|
583
|
-
Args:
|
|
584
|
-
network (Cell): The target network to wrap.
|
|
585
|
-
interleave_num (int, optional): split num of batch size. Default: ``2`` .
|
|
586
|
-
|
|
587
|
-
Inputs:
|
|
588
|
-
tuple[Tensor]. It's the same with the input of the `network` .
|
|
589
|
-
|
|
590
|
-
Outputs:
|
|
591
|
-
The wrapped input. The output of the input `network` should be a Tensor.
|
|
592
|
-
|
|
593
|
-
Supported Platforms:
|
|
594
|
-
``Ascend`` ``GPU``
|
|
595
|
-
|
|
596
|
-
Examples:
|
|
597
|
-
>>> import mindspore.nn as nn
|
|
598
|
-
>>> # Define the network structure of LeNet5. Refer to
|
|
599
|
-
>>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
|
|
600
|
-
>>> net = LeNet5()
|
|
601
|
-
>>> net = nn.MicroBatchInterleaved(net, 2)
|
|
602
|
-
"""
|
|
603
|
-
def __init__(self, network, interleave_num=2):
|
|
604
|
-
super(MicroBatchInterleaved, self).__init__(auto_prefix=False)
|
|
605
|
-
if not isinstance(interleave_num, int):
|
|
606
|
-
raise TypeError("For 'MicroBatchInterleaved', the argument 'interleave_num' must be integer, "
|
|
607
|
-
"but got the type : {}.".format(type(interleave_num)))
|
|
608
|
-
if interleave_num <= 0:
|
|
609
|
-
raise ValueError("For 'MicroBatchInterleaved', the argument 'interleave_num' must be large than 0, "
|
|
610
|
-
"but got {}.".format(interleave_num))
|
|
611
|
-
self.network = network
|
|
612
|
-
self.interleave_num = interleave_num
|
|
613
|
-
self.interleave_inputs = nn.CellList()
|
|
614
|
-
self.add = P.Add().add_prim_attr("micro_interleaved_add_flag", True)
|
|
615
|
-
for _ in range(interleave_num):
|
|
616
|
-
interleave_data = _MicroBatch(interleave_num)
|
|
617
|
-
interleave_data.strided_slice.add_prim_attr("strided_slice_flag", True)
|
|
618
|
-
interleave_data.strided_slice.add_prim_attr("interleave_num", interleave_num)
|
|
619
|
-
self.interleave_inputs.append(interleave_data)
|
|
620
|
-
self._get_attr_from_cell(network)
|
|
621
|
-
|
|
622
|
-
def construct(self, *inputs):
|
|
623
|
-
output = 0.0
|
|
624
|
-
for i in range(self.interleave_num):
|
|
625
|
-
interleave_input = self.interleave_inputs[i](i, *inputs)
|
|
626
|
-
output = self.add(output, self.network(*interleave_input))
|
|
627
|
-
return output
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
class PipelineCell(Cell):
|
|
631
|
-
"""
|
|
632
|
-
Slice MiniBatch into finer-grained MicroBatch for use in pipeline-parallel training.
|
|
633
|
-
|
|
634
|
-
Note:
|
|
635
|
-
micro_size must be greater or equal to pipeline stages.
|
|
636
|
-
|
|
637
|
-
Args:
|
|
638
|
-
network (Cell): The target network to wrap.
|
|
639
|
-
micro_size (int): MicroBatch size.
|
|
640
|
-
|
|
641
|
-
Supported Platforms:
|
|
642
|
-
``Ascend`` ``GPU``
|
|
643
|
-
|
|
644
|
-
Examples:
|
|
645
|
-
>>> import mindspore.nn as nn
|
|
646
|
-
>>> # Define the network structure of LeNet5. Refer to
|
|
647
|
-
>>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
|
|
648
|
-
>>> net = LeNet5()
|
|
649
|
-
>>> net = nn.PipelineCell(net, 4)
|
|
650
|
-
"""
|
|
651
|
-
def __init__(self, network, micro_size):
|
|
652
|
-
super(PipelineCell, self).__init__(auto_prefix=False)
|
|
653
|
-
self.network = network
|
|
654
|
-
self.micro_inputs = nn.CellList()
|
|
655
|
-
self.micro_size = micro_size
|
|
656
|
-
self.add_list = []
|
|
657
|
-
if not isinstance(network, Cell):
|
|
658
|
-
raise TypeError("For 'PipelineCell', the argument 'network' must cell type, "
|
|
659
|
-
"but got the type : {}.".format(type(network)))
|
|
660
|
-
if not isinstance(micro_size, int):
|
|
661
|
-
raise TypeError("For 'PipelineCell', the argument 'micro_size' must be integer, "
|
|
662
|
-
"but got the type : {}.".format(type(micro_size)))
|
|
663
|
-
if micro_size <= 0:
|
|
664
|
-
raise ValueError("For 'PipelineCell', the argument 'micro_size' must be large than 0, "
|
|
665
|
-
"but got {}.".format(micro_size))
|
|
666
|
-
for i in range(micro_size):
|
|
667
|
-
micro_input = _MicroBatch(micro_size)
|
|
668
|
-
self.micro_inputs.append(micro_input)
|
|
669
|
-
self.add = P.Add().add_prim_attr("pipeline_end", i)
|
|
670
|
-
self.add_list.append(self.add)
|
|
671
|
-
self._get_attr_from_cell(network)
|
|
672
|
-
|
|
673
|
-
def construct(self, *inputs):
|
|
674
|
-
ret = None
|
|
675
|
-
for i in range(self.micro_size):
|
|
676
|
-
micro_input = self.micro_inputs[i](i, *inputs)
|
|
677
|
-
output = self.network(*micro_input)
|
|
678
|
-
if ret is not None:
|
|
679
|
-
ret = self.add_list[i](ret, output)
|
|
680
|
-
else:
|
|
681
|
-
ret = output
|
|
682
|
-
return ret
|
|
683
|
-
|
|
684
591
|
class GradAccumulationCell(Cell):
|
|
685
592
|
"""
|
|
686
593
|
Wrap the network with Micro Batch to enable the grad accumulation in semi_auto_parallel/auto_parallel mode.
|
|
687
594
|
|
|
595
|
+
Note:
|
|
596
|
+
The api will be deprecated, please use the api :class:`mindspore.parallel.nn.GradAccumulation` instead.
|
|
597
|
+
|
|
688
598
|
Args:
|
|
689
599
|
network (Cell): The target network to wrap.
|
|
690
600
|
micro_size (int): MicroBatch size.
|
|
@@ -734,12 +644,6 @@ class GradAccumulationCell(Cell):
|
|
|
734
644
|
return ret
|
|
735
645
|
|
|
736
646
|
|
|
737
|
-
def _pipeline_clear_grad(accu_grad, grad):
|
|
738
|
-
accu_grad = F.depend(accu_grad, grad)
|
|
739
|
-
zeros = F.zeros_like(accu_grad)
|
|
740
|
-
return F.assign(accu_grad, zeros)
|
|
741
|
-
|
|
742
|
-
|
|
743
647
|
class _TrainGradAccuStepCell(TrainOneStepCell):
|
|
744
648
|
"""
|
|
745
649
|
Wraps the network with an optimizer in pipeline mode.
|
|
@@ -751,6 +655,13 @@ class _TrainGradAccuStepCell(TrainOneStepCell):
|
|
|
751
655
|
self.opt_shard = _get_enable_parallel_optimizer()
|
|
752
656
|
self._get_attr_from_cell(network)
|
|
753
657
|
self.enable_tft = False
|
|
658
|
+
if not self.sense_flag:
|
|
659
|
+
micro_size = 1.0
|
|
660
|
+
for _, cell in network.cells_and_names():
|
|
661
|
+
if hasattr(cell, 'micro_size'):
|
|
662
|
+
micro_size = cell.micro_size
|
|
663
|
+
break
|
|
664
|
+
self.sens = 1 / micro_size
|
|
754
665
|
|
|
755
666
|
def construct(self, *inputs):
|
|
756
667
|
if not self.sense_flag:
|
|
@@ -774,8 +685,10 @@ class _TrainGradAccuStepCell(TrainOneStepCell):
|
|
|
774
685
|
grads = self.grad_no_sens(self.network, self.weights)(*inputs)
|
|
775
686
|
accu_grads = ops.depend(self.accu_grads, grads)
|
|
776
687
|
if self.opt_shard:
|
|
688
|
+
grads = self.hyper_map(F.partial(grad_scale, self.sens), grads)
|
|
777
689
|
succ = self.optimizer(grads)
|
|
778
690
|
else:
|
|
691
|
+
accu_grads = self.hyper_map(F.partial(grad_scale, self.sens), accu_grads)
|
|
779
692
|
succ = self.optimizer(accu_grads)
|
|
780
693
|
loss = ops.depend(loss, succ)
|
|
781
694
|
clear = self.hyper_map(_pipeline_clear_grad, accu_grads, grads)
|
|
@@ -964,3 +877,151 @@ class _BroadCastCell(Cell):
|
|
|
964
877
|
params = self.broadcast(params)
|
|
965
878
|
new_params = self.map_(F.partial(_cast_datatype), datatypes, params)
|
|
966
879
|
return new_params
|
|
880
|
+
|
|
881
|
+
|
|
882
|
+
class PipelineCell(Cell):
|
|
883
|
+
"""
|
|
884
|
+
Slice MiniBatch into finer-grained MicroBatch for use in pipeline-parallel training.
|
|
885
|
+
|
|
886
|
+
Note:
|
|
887
|
+
- micro_size must be greater or equal to pipeline stages.
|
|
888
|
+
- The api will be deprecated, please use the api :class:`mindspore.parallel.nn.Pipeline` instead.
|
|
889
|
+
|
|
890
|
+
Args:
|
|
891
|
+
network (Cell): The target network to wrap.
|
|
892
|
+
micro_size (int): MicroBatch size.
|
|
893
|
+
stage_config (dict, optional): The stage configuration for each cell's execution in pipeline parallel.
|
|
894
|
+
Default ``None``.
|
|
895
|
+
|
|
896
|
+
Supported Platforms:
|
|
897
|
+
``Ascend`` ``GPU``
|
|
898
|
+
|
|
899
|
+
Examples:
|
|
900
|
+
>>> import mindspore.nn as nn
|
|
901
|
+
>>> # Define the network structure of LeNet5. Refer to
|
|
902
|
+
>>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
|
|
903
|
+
>>> net = LeNet5()
|
|
904
|
+
>>> net = nn.PipelineCell(net, 4)
|
|
905
|
+
"""
|
|
906
|
+
def __init__(self, network, micro_size, stage_config=None):
|
|
907
|
+
super(PipelineCell, self).__init__(auto_prefix=False)
|
|
908
|
+
self.network = network
|
|
909
|
+
self.micro_inputs = nn.CellList()
|
|
910
|
+
self.micro_size = micro_size
|
|
911
|
+
self.add_list = []
|
|
912
|
+
if not isinstance(network, Cell):
|
|
913
|
+
raise TypeError("For 'PipelineCell', the argument 'network' must cell type, "
|
|
914
|
+
"but got the type : {}.".format(type(network)))
|
|
915
|
+
if not isinstance(micro_size, int):
|
|
916
|
+
raise TypeError("For 'PipelineCell', the argument 'micro_size' must be integer, "
|
|
917
|
+
"but got the type : {}.".format(type(micro_size)))
|
|
918
|
+
if micro_size <= 0:
|
|
919
|
+
raise ValueError("For 'PipelineCell', the argument 'micro_size' must be large than 0, "
|
|
920
|
+
"but got {}.".format(micro_size))
|
|
921
|
+
for i in range(micro_size):
|
|
922
|
+
micro_input = _MicroBatch(micro_size)
|
|
923
|
+
self.micro_inputs.append(micro_input)
|
|
924
|
+
self.add = P.Add().add_prim_attr("pipeline_end", i)
|
|
925
|
+
self.add_list.append(self.add)
|
|
926
|
+
self._get_attr_from_cell(network)
|
|
927
|
+
|
|
928
|
+
# prase stage_config
|
|
929
|
+
config_dict = {}
|
|
930
|
+
if stage_config is not None:
|
|
931
|
+
for cell_name, stage_num in stage_config.items():
|
|
932
|
+
config_cell_name = cell_name
|
|
933
|
+
config_stage_num = stage_num
|
|
934
|
+
config_dict[config_cell_name] = config_stage_num
|
|
935
|
+
|
|
936
|
+
# set cell.stage_config
|
|
937
|
+
for cell_name, cell in self.network.cells_and_names():
|
|
938
|
+
for config_cell_name, config_stage_num in config_dict.copy().items():
|
|
939
|
+
if not cell_name or not config_cell_name:
|
|
940
|
+
continue
|
|
941
|
+
if cell_name == config_cell_name:
|
|
942
|
+
setattr(cell, "pipeline_stage", config_stage_num)
|
|
943
|
+
del config_dict[config_cell_name]
|
|
944
|
+
|
|
945
|
+
for config_cell_name, config_stage_num in config_dict.copy().items():
|
|
946
|
+
if str(network) == config_cell_name:
|
|
947
|
+
setattr(network, "pipeline_stage", config_stage_num)
|
|
948
|
+
del config_dict[config_cell_name]
|
|
949
|
+
|
|
950
|
+
# if there are any config elements left, print them
|
|
951
|
+
if config_dict:
|
|
952
|
+
for config_cell_name, config_stage_num in config_dict.items():
|
|
953
|
+
print("pipeline_cell stage_config set pipeline_stage fail!")
|
|
954
|
+
print("config cell name:" + str(config_cell_name) +
|
|
955
|
+
" config stage num:" + str(config_stage_num))
|
|
956
|
+
print("network:" + str(self.network))
|
|
957
|
+
print("cell name available:")
|
|
958
|
+
for cell_name, cell in self.network.cells_and_names():
|
|
959
|
+
print(cell_name)
|
|
960
|
+
raise KeyError("For 'PipelineCell', the argument 'stage_config' : {} is not "
|
|
961
|
+
"found in 'network' : {}".format(config_dict, network))
|
|
962
|
+
|
|
963
|
+
def construct(self, *inputs):
|
|
964
|
+
ret = None
|
|
965
|
+
for i in range(self.micro_size):
|
|
966
|
+
micro_input = self.micro_inputs[i](i, *inputs)
|
|
967
|
+
output = self.network(*micro_input)
|
|
968
|
+
if ret is not None:
|
|
969
|
+
ret = self.add_list[i](ret, output)
|
|
970
|
+
else:
|
|
971
|
+
ret = output
|
|
972
|
+
return ret
|
|
973
|
+
|
|
974
|
+
|
|
975
|
+
class MicroBatchInterleaved(Cell):
|
|
976
|
+
"""
|
|
977
|
+
This function splits the input at the 0th into interleave_num pieces and then performs
|
|
978
|
+
the computation of the wrapped cell. Application scenario: When there is model parallelism in semi-automatic mode
|
|
979
|
+
and network, if the first slice data is calculating forward, the second slice data will execute the
|
|
980
|
+
communication operators at the same time, to achieve the performance acceleration of communication and computing
|
|
981
|
+
concurrency.
|
|
982
|
+
|
|
983
|
+
Args:
|
|
984
|
+
network (Cell): The target network to wrap.
|
|
985
|
+
interleave_num (int, optional): split num of batch size. Default: ``2`` .
|
|
986
|
+
|
|
987
|
+
Inputs:
|
|
988
|
+
tuple[Tensor]. It's the same with the input of the `network` .
|
|
989
|
+
|
|
990
|
+
Outputs:
|
|
991
|
+
The wrapped input. The output of the input `network` should be a Tensor.
|
|
992
|
+
|
|
993
|
+
Supported Platforms:
|
|
994
|
+
``Ascend`` ``GPU``
|
|
995
|
+
|
|
996
|
+
Examples:
|
|
997
|
+
>>> import mindspore.nn as nn
|
|
998
|
+
>>> # Define the network structure of LeNet5. Refer to
|
|
999
|
+
>>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
|
|
1000
|
+
>>> net = LeNet5()
|
|
1001
|
+
>>> net = nn.MicroBatchInterleaved(net, 2)
|
|
1002
|
+
"""
|
|
1003
|
+
def __init__(self, network, interleave_num=2):
|
|
1004
|
+
super(MicroBatchInterleaved, self).__init__(auto_prefix=False)
|
|
1005
|
+
if not isinstance(interleave_num, int):
|
|
1006
|
+
raise TypeError("For 'MicroBatchInterleaved', the argument 'interleave_num' must be integer, "
|
|
1007
|
+
"but got the type : {}.".format(type(interleave_num)))
|
|
1008
|
+
if interleave_num <= 0:
|
|
1009
|
+
raise ValueError("For 'MicroBatchInterleaved', the argument 'interleave_num' must be large than 0, "
|
|
1010
|
+
"but got {}.".format(interleave_num))
|
|
1011
|
+
self.network = network
|
|
1012
|
+
self.interleave_num = interleave_num
|
|
1013
|
+
self.interleave_inputs = nn.CellList()
|
|
1014
|
+
self.add = P.Add().add_prim_attr("micro_interleaved_add_flag", True)
|
|
1015
|
+
for _ in range(interleave_num):
|
|
1016
|
+
interleave_data = _MicroBatch(interleave_num)
|
|
1017
|
+
interleave_data.strided_slice.add_prim_attr("strided_slice_flag", True)
|
|
1018
|
+
interleave_data.strided_slice.add_prim_attr("interleave_num", interleave_num)
|
|
1019
|
+
self.interleave_inputs.append(interleave_data)
|
|
1020
|
+
self._get_attr_from_cell(network)
|
|
1021
|
+
|
|
1022
|
+
def construct(self, *inputs):
|
|
1023
|
+
output = 0.0
|
|
1024
|
+
for i in range(self.interleave_num):
|
|
1025
|
+
interleave_input = self.interleave_inputs[i](i, *inputs)
|
|
1026
|
+
output = self.add(output, self.network(*interleave_input))
|
|
1027
|
+
return output
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright
|
|
1
|
+
# Copyright 2025 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -18,7 +18,6 @@ from __future__ import absolute_import
|
|
|
18
18
|
from mindspore import context
|
|
19
19
|
from mindspore import log as logger
|
|
20
20
|
from mindspore.nn.cell import Cell
|
|
21
|
-
from mindspore.nn.layer import Identity
|
|
22
21
|
from mindspore.communication.management import GlobalComm, get_group_size
|
|
23
22
|
from mindspore.common.sparse_tensor import RowTensorInner
|
|
24
23
|
from mindspore.ops import functional as F, composite as C, operations as P
|
|
@@ -28,30 +27,13 @@ import mindspore.common.dtype as mstype
|
|
|
28
27
|
from mindspore.common.sparse_tensor import Tensor
|
|
29
28
|
from mindspore.common.api import jit
|
|
30
29
|
from mindspore.common.parameter import Parameter
|
|
30
|
+
from mindspore.nn.layer import Identity
|
|
31
31
|
from mindspore.parallel._utils import _get_enable_parallel_optimizer
|
|
32
32
|
|
|
33
|
-
|
|
34
|
-
grad_scale = C.MultitypeFuncGraph("grad_scale")
|
|
35
|
-
shard_grad_scale = C.MultitypeFuncGraph("shard_grad_scale")
|
|
36
|
-
reciprocal = P.Reciprocal()
|
|
33
|
+
__all__ = ['DistributedGradReducer']
|
|
37
34
|
|
|
38
35
|
|
|
39
|
-
|
|
40
|
-
def tensor_grad_scale_pipeline(scale, grad, accu_grad):
|
|
41
|
-
accu_grad = F.depend(accu_grad, grad)
|
|
42
|
-
new_grad = accu_grad * reciprocal(scale)
|
|
43
|
-
accu_grad = F.depend(accu_grad, new_grad)
|
|
44
|
-
zeros = F.tensor_mul(accu_grad, 0.0)
|
|
45
|
-
new_grad = F.depend(new_grad, F.assign(accu_grad, zeros))
|
|
46
|
-
return new_grad
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
@shard_grad_scale.register("Tensor", "Tensor", "Tensor")
|
|
50
|
-
def tensor_shard_grad_scale_pipeline(scale, grad, accu_grad):
|
|
51
|
-
new_grad = grad * reciprocal(scale)
|
|
52
|
-
accu_grad = F.depend(accu_grad, new_grad)
|
|
53
|
-
new_grad = F.depend(new_grad, F.assign(accu_grad, F.zeros_like(accu_grad)))
|
|
54
|
-
return new_grad
|
|
36
|
+
reduce_opt = C.MultitypeFuncGraph("reduce_opt")
|
|
55
37
|
|
|
56
38
|
|
|
57
39
|
def _init_allreduce_operators(length, split_indices, group=GlobalComm.WORLD_COMM_GROUP):
|
|
@@ -335,14 +317,14 @@ class DistributedGradReducer(Cell):
|
|
|
335
317
|
|
|
336
318
|
For the Ascend devices, users need to prepare the rank table, set rank_id and device_id.
|
|
337
319
|
Please see the `rank table Startup
|
|
338
|
-
<https://www.mindspore.cn/
|
|
320
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/rank_table.html>`_
|
|
339
321
|
for more details.
|
|
340
322
|
|
|
341
323
|
For the GPU devices, users need to prepare the host file and mpi, please see the `mpirun Startup
|
|
342
|
-
<https://www.mindspore.cn/
|
|
324
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/mpirun.html>`_ .
|
|
343
325
|
|
|
344
326
|
For the CPU device, users need to write a dynamic cluster startup script, please see the `Dynamic Cluster
|
|
345
|
-
Startup <https://www.mindspore.cn/
|
|
327
|
+
Startup <https://www.mindspore.cn/tutorials/en/master/parallel/dynamic_cluster.html>`_ .
|
|
346
328
|
|
|
347
329
|
This example should be run with multiple devices.
|
|
348
330
|
|
|
@@ -427,7 +409,8 @@ class DistributedGradReducer(Cell):
|
|
|
427
409
|
self.degree = degree
|
|
428
410
|
self.degree = Tensor(1.0 / self.degree, mstype.float32)
|
|
429
411
|
|
|
430
|
-
self.allreduce_filter = tuple((x.layerwise_parallel is False) and
|
|
412
|
+
self.allreduce_filter = tuple((x.layerwise_parallel is False) and
|
|
413
|
+
(not x.param_info.is_in_pynative_shard) for x in parameters)
|
|
431
414
|
is_parallel_optimizer = context.get_auto_parallel_context("enable_parallel_optimizer")
|
|
432
415
|
split_indices = auto_parallel_context().get_all_reduce_fusion_split_indices()
|
|
433
416
|
if is_parallel_optimizer and split_indices:
|
|
@@ -447,7 +430,7 @@ class DistributedGradReducer(Cell):
|
|
|
447
430
|
self.mode = context.get_context("mode")
|
|
448
431
|
self.enable_tuple_broaden = True
|
|
449
432
|
|
|
450
|
-
@jit
|
|
433
|
+
@jit(backend="ms_backend")
|
|
451
434
|
def construct(self, grads):
|
|
452
435
|
"""
|
|
453
436
|
Under certain circumstances, the data precision of grads could be mixed with float16 and float32. Thus, the
|
|
@@ -488,13 +471,39 @@ class DistributedGradReducer(Cell):
|
|
|
488
471
|
raise RuntimeError("{} can not use DistributedGradReducer in graph mode".format(parallel_mode))
|
|
489
472
|
|
|
490
473
|
|
|
474
|
+
grad_scale = C.MultitypeFuncGraph("grad_scale")
|
|
475
|
+
shard_grad_scale = C.MultitypeFuncGraph("shard_grad_scale")
|
|
476
|
+
reciprocal = P.Reciprocal()
|
|
477
|
+
|
|
478
|
+
|
|
479
|
+
@grad_scale.register("Tensor", "Tensor", "Tensor")
|
|
480
|
+
def tensor_grad_scale_pipeline(scale, grad, accu_grad):
|
|
481
|
+
accu_grad = F.depend(accu_grad, grad)
|
|
482
|
+
new_grad = accu_grad * reciprocal(scale)
|
|
483
|
+
accu_grad = F.depend(accu_grad, new_grad)
|
|
484
|
+
zeros = F.tensor_mul(accu_grad, 0.0)
|
|
485
|
+
new_grad = F.depend(new_grad, F.assign(accu_grad, zeros))
|
|
486
|
+
return new_grad
|
|
487
|
+
|
|
488
|
+
|
|
489
|
+
@shard_grad_scale.register("Tensor", "Tensor", "Tensor")
|
|
490
|
+
def tensor_shard_grad_scale_pipeline(scale, grad, accu_grad):
|
|
491
|
+
new_grad = grad * reciprocal(scale)
|
|
492
|
+
accu_grad = F.depend(accu_grad, new_grad)
|
|
493
|
+
new_grad = F.depend(new_grad, F.assign(accu_grad, F.zeros_like(accu_grad)))
|
|
494
|
+
return new_grad
|
|
495
|
+
|
|
496
|
+
|
|
491
497
|
class PipelineGradReducer(Cell):
|
|
492
498
|
"""
|
|
493
499
|
PipelineGradReducer is a gradient reducer for pipeline parallelism.
|
|
494
500
|
|
|
501
|
+
Note:
|
|
502
|
+
The api will be deprecated, please use the api :class:`mindspore.parallel.nn.PipelineGradReducer` instead.
|
|
503
|
+
|
|
495
504
|
Args:
|
|
496
505
|
parameters (list): the parameters to be updated.
|
|
497
|
-
scale_sense (float): the scale sense of the gradient. Default: 1.0
|
|
506
|
+
scale_sense (float, optional): the scale sense of the gradient. Default: ``1.0``.
|
|
498
507
|
|
|
499
508
|
Raise:
|
|
500
509
|
RuntimeError: If the mode is not graph mode.
|
|
@@ -509,11 +518,11 @@ class PipelineGradReducer(Cell):
|
|
|
509
518
|
|
|
510
519
|
For the Ascend devices, users need to prepare the rank table, set rank_id and device_id.
|
|
511
520
|
Please see the `rank table Startup
|
|
512
|
-
<https://www.mindspore.cn/
|
|
521
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/rank_table.html>`_
|
|
513
522
|
for more details.
|
|
514
523
|
|
|
515
524
|
For the GPU devices, users need to prepare the host file and mpi, please see the `mpirun Startup
|
|
516
|
-
<https://www.mindspore.cn/
|
|
525
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/mpirun.html>`_ .
|
|
517
526
|
|
|
518
527
|
This example should be run with multiple devices.
|
|
519
528
|
|
|
@@ -554,7 +563,7 @@ class PipelineGradReducer(Cell):
|
|
|
554
563
|
>>> net.layer3.pipeline_stage = 1
|
|
555
564
|
>>> loss_fn = nn.CrossEntropyLoss()
|
|
556
565
|
>>> optimizer = nn.SGD(net.trainable_params(), 1e-2)
|
|
557
|
-
>>> net_with_loss = nn.
|
|
566
|
+
>>> net_with_loss = nn.Pipeline(nn.WithLossCell(net, loss_fn), 2)
|
|
558
567
|
>>> net_with_loss.set_train()
|
|
559
568
|
>>> def forward_fn(inputs, target):
|
|
560
569
|
... loss = net_with_loss(inputs, target)
|
|
@@ -576,7 +585,7 @@ class PipelineGradReducer(Cell):
|
|
|
576
585
|
>>> print(loss)
|
|
577
586
|
46.36721
|
|
578
587
|
"""
|
|
579
|
-
def __init__(self, parameters, scale_sense=1.0):
|
|
588
|
+
def __init__(self, parameters, scale_sense=1.0, opt_shard=None):
|
|
580
589
|
super(PipelineGradReducer, self).__init__(auto_prefix=False)
|
|
581
590
|
self._check_mode()
|
|
582
591
|
self.accu_grads = parameters.clone(prefix="accu_grads", init="zeros")
|
|
@@ -584,7 +593,10 @@ class PipelineGradReducer(Cell):
|
|
|
584
593
|
self.degree = Tensor(1, mstype.float32)
|
|
585
594
|
self.scale_sense = Parameter(scale_sense, name='scale_sense')
|
|
586
595
|
self.hyper_map = C.HyperMap()
|
|
587
|
-
|
|
596
|
+
if opt_shard is None:
|
|
597
|
+
self.opt_shard = _get_enable_parallel_optimizer()
|
|
598
|
+
else:
|
|
599
|
+
self.opt_shard = opt_shard
|
|
588
600
|
|
|
589
601
|
@jit
|
|
590
602
|
def construct(self, grads):
|
|
@@ -603,6 +615,3 @@ class PipelineGradReducer(Cell):
|
|
|
603
615
|
mode = context.get_context('mode')
|
|
604
616
|
if mode != context.GRAPH_MODE:
|
|
605
617
|
raise RuntimeError(f"PipelineGradReducer only support graph mode, but get {mode}")
|
|
606
|
-
parallel_mode = context.get_auto_parallel_context('parallel_mode')
|
|
607
|
-
if parallel_mode not in (context.ParallelMode.SEMI_AUTO_PARALLEL, context.ParallelMode.AUTO_PARALLEL):
|
|
608
|
-
raise RuntimeError(f"{parallel_mode} can not use PipelineGradReducer in graph mode")
|
mindspore/nn/wrap/loss_scale.py
CHANGED
|
@@ -92,8 +92,8 @@ class DynamicLossScaleUpdateCell(Cell):
|
|
|
92
92
|
Dynamic Loss scale update cell.
|
|
93
93
|
|
|
94
94
|
For loss scaling training, the initial loss scaling value will be set to be `loss_scale_value`.
|
|
95
|
-
In each training step, the loss scaling value will be decreased by `
|
|
96
|
-
when there is an overflow. And it will be increased by
|
|
95
|
+
In each training step, the loss scaling value will be decreased by :math:`loss\_scale/scale\_factor`
|
|
96
|
+
when there is an overflow. And it will be increased by :math:`loss\_scale * scale\_factor` if there is no
|
|
97
97
|
overflow for a continuous `scale_window` steps.
|
|
98
98
|
|
|
99
99
|
`get_update_cell` method of :class:`mindspore.amp.DynamicLossScaleManager` will return this class. It will be called
|
|
@@ -506,7 +506,7 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell):
|
|
|
506
506
|
overflow = AllFinite()(compute_output)
|
|
507
507
|
|
|
508
508
|
if self.is_distributed:
|
|
509
|
-
overflow = P.Cast()(overflow, mstype.
|
|
509
|
+
overflow = P.Cast()(overflow, mstype.float32)
|
|
510
510
|
overflow = P.Cast()(self.allreduce(overflow), mstype.bool_)
|
|
511
511
|
return overflow
|
|
512
512
|
|
|
@@ -30,7 +30,7 @@ from mindspore.ops.primitive import constexpr, _primexpr
|
|
|
30
30
|
from mindspore.ops.function.random_func import _get_seed
|
|
31
31
|
from mindspore.nn.layer.basic import tril as nn_tril
|
|
32
32
|
from mindspore.nn.layer.basic import triu as nn_triu
|
|
33
|
-
from mindspore._c_expression import
|
|
33
|
+
from mindspore._c_expression import TensorPy as Tensor_
|
|
34
34
|
|
|
35
35
|
from mindspore.numpy.utils import _check_input_for_asarray, _deep_list, _deep_tensor_to_nparray, \
|
|
36
36
|
_check_input_tensor, _convert_64_to_32, _get_dtype_from_scalar, \
|
|
@@ -133,7 +133,7 @@ def asarray_const(a, dtype=None):
|
|
|
133
133
|
elif dtype == mstype.int64:
|
|
134
134
|
dtype = mstype.int32
|
|
135
135
|
if a.size == 0:
|
|
136
|
-
a =
|
|
136
|
+
a = Tensor(a)
|
|
137
137
|
|
|
138
138
|
if isinstance(a, onp.ndarray) and dtype is None:
|
|
139
139
|
if a.dtype is onp.dtype('object'):
|
|
@@ -945,7 +945,7 @@ def identity(n, dtype=mstype.float32):
|
|
|
945
945
|
@constexpr
|
|
946
946
|
def empty_compile(dtype, shape):
|
|
947
947
|
"""Returns an empty Tensor."""
|
|
948
|
-
return
|
|
948
|
+
return Tensor(dtype=dtype, shape=shape)
|
|
949
949
|
|
|
950
950
|
|
|
951
951
|
def empty(shape, dtype=mstype.float32):
|
mindspore/numpy/array_ops.py
CHANGED