mindspore 2.5.0__cp39-cp39-win_amd64.whl → 2.6.0rc1__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +6 -4
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -0
- mindspore/_checkparam.py +3 -33
- mindspore/_deprecated/__init__.py +17 -0
- mindspore/_deprecated/jit.py +198 -0
- mindspore/_extends/builtin_operations.py +1 -1
- mindspore/_extends/parse/__init__.py +6 -7
- mindspore/_extends/parse/compile_config.py +19 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
- mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
- mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
- mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
- mindspore/_extends/parse/parser.py +24 -193
- mindspore/_extends/parse/resources.py +1 -5
- mindspore/_extends/parse/standard_method.py +97 -74
- mindspore/_extends/pijit/__init__.py +2 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
- mindspore/_extends/pijit/tensor_func_list.py +27 -0
- mindspore/_extends/utils.py +1 -1
- mindspore/amp.py +4 -4
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/__init__.py +2 -2
- mindspore/boost/base.py +3 -7
- mindspore/boost/boost_cell_wrapper.py +2 -2
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +4 -3
- mindspore/common/_grad_function.py +56 -0
- mindspore/common/_pijit_context.py +14 -5
- mindspore/common/_register_for_tensor.py +1 -1
- mindspore/common/_stub_tensor.py +5 -10
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +1915 -3287
- mindspore/common/api.py +341 -354
- mindspore/common/auto_dynamic_shape.py +41 -44
- mindspore/common/dtype.py +5 -2
- mindspore/common/dump.py +7 -5
- mindspore/common/file_system.py +3 -0
- mindspore/common/hook_handle.py +5 -3
- mindspore/common/initializer.py +10 -6
- mindspore/common/jit_begin_end.py +94 -0
- mindspore/common/jit_config.py +6 -1
- mindspore/common/jit_context.py +76 -0
- mindspore/common/jit_trace.py +378 -0
- mindspore/common/lazy_inline.py +2 -2
- mindspore/common/mutable.py +5 -4
- mindspore/common/parameter.py +106 -39
- mindspore/common/seed.py +2 -2
- mindspore/common/sparse_tensor.py +23 -17
- mindspore/common/tensor.py +297 -714
- mindspore/communication/__init__.py +7 -5
- mindspore/communication/_comm_helper.py +47 -2
- mindspore/communication/comm_func.py +70 -53
- mindspore/communication/management.py +83 -17
- mindspore/context.py +214 -560
- mindspore/dataset/__init__.py +44 -20
- mindspore/dataset/audio/__init__.py +2 -8
- mindspore/dataset/audio/transforms.py +3 -17
- mindspore/dataset/core/config.py +3 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +102 -120
- mindspore/dataset/engine/datasets_audio.py +22 -22
- mindspore/dataset/engine/datasets_standard_format.py +43 -24
- mindspore/dataset/engine/datasets_text.py +78 -85
- mindspore/dataset/engine/datasets_user_defined.py +108 -76
- mindspore/dataset/engine/datasets_vision.py +111 -108
- mindspore/dataset/engine/iterators.py +5 -3
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
- mindspore/dataset/engine/samplers.py +279 -57
- mindspore/dataset/engine/serializer_deserializer.py +2 -1
- mindspore/dataset/engine/validators.py +10 -0
- mindspore/dataset/text/__init__.py +7 -6
- mindspore/dataset/text/transforms.py +6 -5
- mindspore/dataset/text/utils.py +3 -3
- mindspore/dataset/transforms/__init__.py +0 -9
- mindspore/dataset/transforms/transforms.py +3 -3
- mindspore/dataset/utils/browse_dataset.py +1 -1
- mindspore/dataset/vision/__init__.py +2 -9
- mindspore/dataset/vision/transforms.py +202 -158
- mindspore/dataset/vision/utils.py +7 -5
- mindspore/device_context/ascend/op_debug.py +60 -1
- mindspore/device_context/ascend/op_tuning.py +0 -4
- mindspore/device_manager.py +39 -3
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/es/embedding_service.py +35 -27
- mindspore/experimental/map_parameter.py +4 -4
- mindspore/experimental/optim/adadelta.py +22 -26
- mindspore/experimental/optim/adagrad.py +4 -4
- mindspore/experimental/optim/adam.py +4 -0
- mindspore/experimental/optim/adamax.py +4 -4
- mindspore/experimental/optim/adamw.py +4 -0
- mindspore/experimental/optim/asgd.py +1 -1
- mindspore/experimental/optim/lr_scheduler.py +40 -22
- mindspore/experimental/optim/radam.py +5 -5
- mindspore/experimental/optim/rprop.py +1 -1
- mindspore/experimental/optim/sgd.py +1 -1
- mindspore/hal/contiguous_tensors_handle.py +6 -10
- mindspore/hal/device.py +55 -81
- mindspore/hal/event.py +38 -55
- mindspore/hal/memory.py +93 -144
- mindspore/hal/stream.py +81 -125
- mindspore/include/dataset/constants.h +7 -4
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +40 -2
- mindspore/mindrecord/__init__.py +20 -7
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +131 -700
- mindspore/mint/distributed/__init__.py +5 -1
- mindspore/mint/distributed/distributed.py +194 -109
- mindspore/mint/linalg/__init__.py +2 -0
- mindspore/mint/nn/__init__.py +280 -18
- mindspore/mint/nn/functional.py +282 -64
- mindspore/mint/nn/layer/__init__.py +4 -0
- mindspore/mint/nn/layer/_functions.py +7 -3
- mindspore/mint/nn/layer/activation.py +120 -13
- mindspore/mint/nn/layer/conv.py +218 -24
- mindspore/mint/nn/layer/normalization.py +15 -16
- mindspore/mint/nn/layer/padding.py +1 -1
- mindspore/mint/nn/layer/pooling.py +66 -1
- mindspore/mint/optim/__init__.py +2 -1
- mindspore/mint/optim/sgd.py +171 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/__init__.py +4 -1
- mindspore/nn/cell.py +1250 -176
- mindspore/nn/layer/activation.py +23 -21
- mindspore/nn/layer/basic.py +22 -16
- mindspore/nn/layer/container.py +1 -1
- mindspore/nn/layer/conv.py +22 -17
- mindspore/nn/layer/embedding.py +9 -8
- mindspore/nn/layer/normalization.py +48 -42
- mindspore/nn/layer/pooling.py +75 -31
- mindspore/nn/layer/transformer.py +11 -10
- mindspore/nn/learning_rate_schedule.py +4 -2
- mindspore/nn/loss/loss.py +27 -19
- mindspore/nn/optim/ada_grad.py +6 -5
- mindspore/nn/optim/adadelta.py +9 -7
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +16 -12
- mindspore/nn/optim/adamax.py +8 -7
- mindspore/nn/optim/adasum.py +5 -5
- mindspore/nn/optim/asgd.py +1 -1
- mindspore/nn/optim/ftrl.py +11 -9
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/lazyadam.py +12 -10
- mindspore/nn/optim/momentum.py +7 -6
- mindspore/nn/optim/optimizer.py +2 -2
- mindspore/nn/optim/proximal_ada_grad.py +12 -10
- mindspore/nn/optim/rmsprop.py +13 -12
- mindspore/nn/optim/rprop.py +9 -7
- mindspore/nn/optim/sgd.py +9 -6
- mindspore/nn/optim/tft_wrapper.py +5 -2
- mindspore/nn/probability/bijector/bijector.py +17 -11
- mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
- mindspore/nn/probability/bijector/invert.py +2 -2
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +3 -2
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +1 -1
- mindspore/nn/probability/distribution/cauchy.py +4 -2
- mindspore/nn/probability/distribution/exponential.py +6 -7
- mindspore/nn/probability/distribution/gamma.py +2 -2
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/half_normal.py +5 -3
- mindspore/nn/probability/distribution/logistic.py +5 -3
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/uniform.py +5 -3
- mindspore/nn/reinforcement/_tensors_queue.py +1 -1
- mindspore/nn/reinforcement/tensor_array.py +1 -1
- mindspore/nn/wrap/__init__.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +178 -117
- mindspore/nn/wrap/grad_reducer.py +45 -36
- mindspore/nn/wrap/loss_scale.py +3 -3
- mindspore/numpy/array_creations.py +3 -3
- mindspore/numpy/array_ops.py +1 -1
- mindspore/numpy/math_ops.py +4 -4
- mindspore/numpy/utils.py +1 -2
- mindspore/numpy/utils_const.py +1 -2
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +3 -2
- mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
- mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
- mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
- mindspore/ops/_register_for_op.py +0 -11
- mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
- mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
- mindspore/ops/_vmap/vmap_array_ops.py +7 -6
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
- mindspore/ops/_vmap/vmap_math_ops.py +4 -7
- mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
- mindspore/ops/auto_generate/__init__.py +4 -3
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +102 -49
- mindspore/ops/auto_generate/gen_extend_func.py +281 -135
- mindspore/ops/auto_generate/gen_ops_def.py +2574 -2326
- mindspore/ops/auto_generate/gen_ops_prim.py +8566 -2755
- mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
- mindspore/ops/composite/__init__.py +2 -1
- mindspore/ops/composite/base.py +19 -24
- mindspore/ops/composite/math_ops.py +6 -16
- mindspore/ops/composite/multitype_ops/__init__.py +5 -2
- mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -3
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
- mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
- mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
- mindspore/ops/function/__init__.py +28 -2
- mindspore/ops/function/_add_attr_func.py +58 -0
- mindspore/ops/function/array_func.py +1629 -2345
- mindspore/ops/function/clip_func.py +38 -45
- mindspore/ops/function/debug_func.py +36 -44
- mindspore/ops/function/grad/__init__.py +1 -0
- mindspore/ops/function/grad/grad_func.py +104 -71
- mindspore/ops/function/image_func.py +1 -1
- mindspore/ops/function/linalg_func.py +46 -78
- mindspore/ops/function/math_func.py +3035 -3705
- mindspore/ops/function/nn_func.py +676 -241
- mindspore/ops/function/other_func.py +159 -1
- mindspore/ops/function/parameter_func.py +17 -30
- mindspore/ops/function/random_func.py +204 -361
- mindspore/ops/function/reshard_func.py +4 -70
- mindspore/ops/function/sparse_func.py +3 -3
- mindspore/ops/function/sparse_unary_func.py +5 -5
- mindspore/ops/function/spectral_func.py +25 -58
- mindspore/ops/function/vmap_func.py +24 -17
- mindspore/ops/functional.py +6 -4
- mindspore/ops/functional_overload.py +547 -4
- mindspore/ops/op_info_register.py +32 -244
- mindspore/ops/operations/__init__.py +10 -5
- mindspore/ops/operations/_custom_ops_utils.py +247 -0
- mindspore/ops/operations/_grad_ops.py +1 -10
- mindspore/ops/operations/_inner_ops.py +5 -76
- mindspore/ops/operations/_ms_kernel.py +4 -10
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +3 -2
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/array_ops.py +37 -22
- mindspore/ops/operations/comm_ops.py +150 -107
- mindspore/ops/operations/custom_ops.py +221 -23
- mindspore/ops/operations/debug_ops.py +115 -16
- mindspore/ops/operations/inner_ops.py +1 -1
- mindspore/ops/operations/linalg_ops.py +1 -58
- mindspore/ops/operations/manually_defined/_inner.py +1 -1
- mindspore/ops/operations/manually_defined/ops_def.py +746 -79
- mindspore/ops/operations/math_ops.py +21 -18
- mindspore/ops/operations/nn_ops.py +65 -191
- mindspore/ops/operations/other_ops.py +62 -9
- mindspore/ops/operations/random_ops.py +13 -7
- mindspore/ops/operations/reshard_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +2 -2
- mindspore/ops/primitive.py +43 -32
- mindspore/ops/tensor_method.py +232 -13
- mindspore/ops_generate/__init__.py +0 -5
- mindspore/ops_generate/aclnn/__init__.py +0 -0
- mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
- mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
- mindspore/ops_generate/api/__init__.py +0 -0
- mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
- mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
- mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
- mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
- mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
- mindspore/ops_generate/api/gen_api.py +103 -0
- mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
- mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
- mindspore/ops_generate/common/__init__.py +0 -0
- mindspore/ops_generate/common/gen_constants.py +91 -0
- mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
- mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
- mindspore/ops_generate/{template.py → common/template.py} +96 -84
- mindspore/ops_generate/gen_ops.py +23 -325
- mindspore/ops_generate/op_def/__init__.py +0 -0
- mindspore/ops_generate/op_def/gen_op_def.py +90 -0
- mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
- mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -7
- mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
- mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
- mindspore/ops_generate/op_def_py/__init__.py +0 -0
- mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
- mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
- mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
- mindspore/ops_generate/pyboost/__init__.py +0 -0
- mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
- mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
- mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
- mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
- mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
- mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
- mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
- mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
- mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
- mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
- mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
- mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
- mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
- mindspore/ops_generate/resources/__init__.py +0 -0
- mindspore/ops_generate/resources/resource_list.py +30 -0
- mindspore/ops_generate/resources/resource_loader.py +36 -0
- mindspore/ops_generate/resources/resource_manager.py +64 -0
- mindspore/ops_generate/resources/yaml_loader.py +88 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
- mindspore/parallel/__init__.py +6 -2
- mindspore/parallel/_auto_parallel_context.py +133 -6
- mindspore/parallel/_cell_wrapper.py +130 -15
- mindspore/parallel/_parallel_serialization.py +95 -4
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +7 -2
- mindspore/parallel/_tensor.py +142 -18
- mindspore/parallel/_utils.py +198 -25
- mindspore/parallel/algo_parameter_config.py +3 -3
- mindspore/parallel/auto_parallel.py +732 -0
- mindspore/parallel/checkpoint_convert.py +159 -0
- mindspore/parallel/checkpoint_transform.py +656 -37
- mindspore/parallel/cluster/process_entity/_api.py +151 -19
- mindspore/parallel/cluster/run.py +1 -1
- mindspore/parallel/function/__init__.py +24 -0
- mindspore/parallel/function/reshard_func.py +259 -0
- mindspore/parallel/nn/__init__.py +25 -0
- mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
- mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
- mindspore/parallel/parameter_broadcast.py +24 -13
- mindspore/parallel/shard.py +137 -61
- mindspore/parallel/transform_safetensors.py +287 -95
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +9 -5
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
- mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +22 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
- mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
- mindspore/profiler/common/constant.py +12 -0
- mindspore/profiler/common/msprof_cmd_tool.py +42 -23
- mindspore/profiler/common/path_manager.py +24 -0
- mindspore/profiler/common/profiler_context.py +26 -2
- mindspore/profiler/common/profiler_meta_data.py +74 -0
- mindspore/profiler/common/profiler_parameters.py +59 -18
- mindspore/profiler/common/profiler_path_manager.py +66 -7
- mindspore/profiler/dynamic_profiler.py +112 -79
- mindspore/profiler/envprofiler.py +26 -1
- mindspore/profiler/experimental_config.py +197 -0
- mindspore/profiler/mstx.py +57 -14
- mindspore/profiler/platform/npu_profiler.py +33 -7
- mindspore/profiler/profiler.py +541 -45
- mindspore/profiler/profiler_action_controller.py +1 -1
- mindspore/profiler/profiler_interface.py +4 -0
- mindspore/profiler/schedule.py +57 -22
- mindspore/rewrite/api/node.py +15 -13
- mindspore/rewrite/api/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +25 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/runtime/__init__.py +2 -2
- mindspore/runtime/executor.py +40 -11
- mindspore/runtime/memory.py +25 -8
- mindspore/safeguard/rewrite_obfuscation.py +12 -9
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +8 -8
- mindspore/train/_utils.py +35 -7
- mindspore/train/amp.py +1 -1
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +2 -16
- mindspore/train/callback/_checkpoint.py +24 -40
- mindspore/train/callback/_cluster_monitor.py +14 -18
- mindspore/train/callback/_flops_collector.py +2 -3
- mindspore/train/callback/_history.py +7 -4
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +0 -3
- mindspore/train/callback/_loss_monitor.py +2 -1
- mindspore/train/callback/_on_request_exit.py +6 -5
- mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
- mindspore/train/callback/_summary_collector.py +8 -13
- mindspore/train/callback/_time_monitor.py +2 -1
- mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +179 -103
- mindspore/train/data_sink.py +25 -2
- mindspore/train/dataset_helper.py +4 -5
- mindspore/train/loss_scale_manager.py +8 -7
- mindspore/train/metrics/accuracy.py +3 -3
- mindspore/train/metrics/confusion_matrix.py +9 -9
- mindspore/train/metrics/error.py +3 -3
- mindspore/train/metrics/hausdorff_distance.py +4 -4
- mindspore/train/metrics/mean_surface_distance.py +3 -3
- mindspore/train/metrics/metric.py +0 -12
- mindspore/train/metrics/occlusion_sensitivity.py +4 -2
- mindspore/train/metrics/precision.py +8 -6
- mindspore/train/metrics/recall.py +9 -9
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +19 -12
- mindspore/train/model.py +176 -103
- mindspore/train/serialization.py +246 -988
- mindspore/train/summary/_summary_adapter.py +2 -2
- mindspore/train/summary/summary_record.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +4 -2
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/utils.py +138 -4
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/METADATA +2 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/RECORD +483 -438
- mindspore/_install_custom.py +0 -43
- mindspore/common/_register_for_adapter.py +0 -74
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
- mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
- mindspore/ops_generate/gen_constants.py +0 -190
- mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
- mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
- /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -19,7 +19,7 @@
|
|
|
19
19
|
from __future__ import absolute_import
|
|
20
20
|
from mindspore import Tensor, CSRTensor, COOTensor, Parameter
|
|
21
21
|
from mindspore import dtype as mstype
|
|
22
|
-
from mindspore._c_expression import
|
|
22
|
+
from mindspore._c_expression import TensorPy as Tensor_
|
|
23
23
|
from mindspore.common import mutable
|
|
24
24
|
from mindspore.common.generator import default_generator
|
|
25
25
|
import mindspore.common._monad as monad
|
|
@@ -28,7 +28,7 @@ from mindspore.ops.composite.base import _append, _insert, _pop, _list_clear, _r
|
|
|
28
28
|
_extend, _dict_setitem, _dict_clear, _haskey, _update, _fromkeys
|
|
29
29
|
from mindspore.ops.operations._sequence_ops import TensorToTuple
|
|
30
30
|
from mindspore.ops.auto_generate import trace_v2_op, inplace_addmm_op, inplace_index_put_op, inplace_normal_op, inplace_index_add_op
|
|
31
|
-
from mindspore.ops.auto_generate import inplace_copy_op
|
|
31
|
+
from mindspore.ops.auto_generate import inplace_copy_op, inplace_uniform_op, inplace_erfinv_op
|
|
32
32
|
from mindspore.ops.auto_generate import inplace_scatter_add as inplace_scatter_add_
|
|
33
33
|
|
|
34
34
|
from ... import _checkparam as validator
|
|
@@ -39,7 +39,7 @@ from ...ops import operations as P
|
|
|
39
39
|
from ...ops import composite
|
|
40
40
|
from ...ops.operations import array_ops
|
|
41
41
|
from ...ops.composite import MultitypeFuncGraph, env_get, hyper_add, \
|
|
42
|
-
zeros_like, ones_like,
|
|
42
|
+
zeros_like, ones_like, multitype_ops, _ones_like_for_grad
|
|
43
43
|
from ...ops.composite.multitype_ops import _constexpr_utils as const_utils
|
|
44
44
|
from ...ops.composite.multitype_ops import _compile_utils as compile_utils
|
|
45
45
|
from ...ops.operations._inner_ops import Format
|
|
@@ -52,7 +52,7 @@ from ...ops.operations._sequence_ops import ListAppend, ListInsert, SequenceMax,
|
|
|
52
52
|
SequenceIndex
|
|
53
53
|
|
|
54
54
|
__all__ = ['MultitypeFuncGraph', 'env_get',
|
|
55
|
-
'hyper_add', 'zeros_like', 'ones_like']
|
|
55
|
+
'hyper_add', 'zeros_like', 'ones_like', '_ones_like_for_grad']
|
|
56
56
|
|
|
57
57
|
shape_ = P.Shape()
|
|
58
58
|
dtype_ = P.DType()
|
|
@@ -421,7 +421,7 @@ def hasattr(x, attr): # pylint: disable=redefined-builtin
|
|
|
421
421
|
|
|
422
422
|
Args:
|
|
423
423
|
x (object): Input object.
|
|
424
|
-
attr (
|
|
424
|
+
attr (str): The name of attribute
|
|
425
425
|
|
|
426
426
|
Returns:
|
|
427
427
|
Boolean value, indicates whether the object x has attribute attr.
|
|
@@ -1232,6 +1232,13 @@ def pow(x, y): # pylint: disable=redefined-builtin
|
|
|
1232
1232
|
return F.pow(x, y)
|
|
1233
1233
|
|
|
1234
1234
|
|
|
1235
|
+
def put_(x, index, source, accumulate=False): # pylint: disable=redefined-builtin
|
|
1236
|
+
"""
|
|
1237
|
+
Copies the elements from source into the positions specified by index.
|
|
1238
|
+
"""
|
|
1239
|
+
return F.put_(x, index, source, accumulate)
|
|
1240
|
+
|
|
1241
|
+
|
|
1235
1242
|
def log(x):
|
|
1236
1243
|
"""
|
|
1237
1244
|
Calculate the logarithm of Tensor.
|
|
@@ -1274,12 +1281,12 @@ def logcumsumexp(input, axis):
|
|
|
1274
1281
|
return F.logcumsumexp(input, axis)
|
|
1275
1282
|
|
|
1276
1283
|
|
|
1277
|
-
def logsumexp(input,
|
|
1284
|
+
def logsumexp(input, dim, keepdim=False):
|
|
1278
1285
|
"""
|
|
1279
1286
|
Reduces a dimension of a tensor by calculating exponential for all elements in the dimension,
|
|
1280
1287
|
then calculate logarithm of the sum.
|
|
1281
1288
|
"""
|
|
1282
|
-
return F.logsumexp(input,
|
|
1289
|
+
return F.logsumexp(input, dim, keepdim)
|
|
1283
1290
|
|
|
1284
1291
|
|
|
1285
1292
|
def round_(x):
|
|
@@ -1289,12 +1296,11 @@ def round_(x):
|
|
|
1289
1296
|
return F.round(x)
|
|
1290
1297
|
|
|
1291
1298
|
|
|
1292
|
-
def roll(x, shifts, dims):
|
|
1299
|
+
def roll(x, shifts, dims=None):
|
|
1293
1300
|
"""
|
|
1294
1301
|
Rolls the elements of a tensor along an axis.
|
|
1295
1302
|
"""
|
|
1296
|
-
|
|
1297
|
-
return F.Roll(shifts, dims)(x)
|
|
1303
|
+
return F.roll(x, shifts, dims)
|
|
1298
1304
|
|
|
1299
1305
|
|
|
1300
1306
|
def rot90(x, k, dims):
|
|
@@ -1362,11 +1368,11 @@ def remainder(input, divisor):
|
|
|
1362
1368
|
return F.remainder(input, divisor)
|
|
1363
1369
|
|
|
1364
1370
|
|
|
1365
|
-
def unique_consecutive(input,
|
|
1371
|
+
def unique_consecutive(input, return_inverse=False, return_counts=False, dim=None):
|
|
1366
1372
|
"""
|
|
1367
1373
|
Returns the elements that are unique in each consecutive group of equivalent elements in the input tensor.
|
|
1368
1374
|
"""
|
|
1369
|
-
return F.unique_consecutive(input,
|
|
1375
|
+
return F.unique_consecutive(input, return_inverse, return_counts, dim)
|
|
1370
1376
|
|
|
1371
1377
|
|
|
1372
1378
|
def unique_with_pad(x, pad_num):
|
|
@@ -1850,7 +1856,7 @@ def searchsorted(x, v, side='left', sorter=None):
|
|
|
1850
1856
|
|
|
1851
1857
|
if side not in ('left', 'right'):
|
|
1852
1858
|
raise ValueError(f"For 'Tensor.searchsorted', the argument 'side' should be one of in "
|
|
1853
|
-
|
|
1859
|
+
f"['left', 'right'], but got {side}.")
|
|
1854
1860
|
if not isinstance(v, Tensor):
|
|
1855
1861
|
v = const_utils.make_tensor(v)
|
|
1856
1862
|
if sorter is not None:
|
|
@@ -2177,71 +2183,53 @@ def nanmedian(input, axis=-1, keepdims=False):
|
|
|
2177
2183
|
return F.nanmedian(input, axis, keepdims)
|
|
2178
2184
|
|
|
2179
2185
|
|
|
2180
|
-
def repeat(x,
|
|
2186
|
+
def repeat(x, *args, repeats=None):
|
|
2181
2187
|
"""
|
|
2182
2188
|
Repeat elements of an array.
|
|
2183
2189
|
|
|
2184
2190
|
Args:
|
|
2185
2191
|
x (Tensor): Input tensor.
|
|
2186
|
-
|
|
2187
|
-
|
|
2188
|
-
|
|
2189
|
-
use the flattened input tensor, and return a flat output tensor.
|
|
2192
|
+
args (*int): To simulate an overload like ``repeat(x, *repeats: int)``.
|
|
2193
|
+
repeats (Union[int, tuple[int], list[int]]): The number of repetitions of `a` along
|
|
2194
|
+
each axis. Requires that ``len(repeats) >= x.rank``.
|
|
2190
2195
|
|
|
2191
2196
|
Returns:
|
|
2192
|
-
Tensor,
|
|
2197
|
+
Tensor, the repeated output array.
|
|
2193
2198
|
|
|
2194
2199
|
Raises:
|
|
2195
|
-
ValueError: if axis is out of range.
|
|
2196
2200
|
TypeError: if input is not a Tensor.
|
|
2197
2201
|
|
|
2198
2202
|
Supported Platforms:
|
|
2199
2203
|
``Ascend`` ``GPU`` ``CPU``
|
|
2200
2204
|
|
|
2201
2205
|
Examples:
|
|
2202
|
-
>>>
|
|
2203
|
-
>>>
|
|
2204
|
-
>>>
|
|
2205
|
-
|
|
2206
|
-
|
|
2207
|
-
|
|
2208
|
-
|
|
2209
|
-
|
|
2210
|
-
|
|
2211
|
-
|
|
2212
|
-
|
|
2213
|
-
|
|
2214
|
-
|
|
2215
|
-
|
|
2216
|
-
|
|
2217
|
-
|
|
2218
|
-
|
|
2219
|
-
|
|
2220
|
-
if
|
|
2221
|
-
|
|
2222
|
-
|
|
2223
|
-
|
|
2224
|
-
|
|
2225
|
-
|
|
2226
|
-
|
|
2227
|
-
|
|
2228
|
-
|
|
2229
|
-
|
|
2230
|
-
if len(repeats) == 1:
|
|
2231
|
-
repeats = repeats[0]
|
|
2232
|
-
if repeats == 0:
|
|
2233
|
-
return empty_tensor(x.dtype)
|
|
2234
|
-
return repeat_elements(x, repeats, axis)
|
|
2235
|
-
size = x.shape[axis]
|
|
2236
|
-
if len(repeats) != size:
|
|
2237
|
-
const_utils.raise_value_error(
|
|
2238
|
-
'operands could not be broadcast together')
|
|
2239
|
-
subs = P.Split(axis, size)(x)
|
|
2240
|
-
repeated_subs = []
|
|
2241
|
-
for sub_item, rep in zip(subs, repeats):
|
|
2242
|
-
if rep != 0:
|
|
2243
|
-
repeated_subs.append(repeat_elements(sub_item, rep, axis))
|
|
2244
|
-
return P.Concat(axis)(repeated_subs)
|
|
2206
|
+
>>> from mindspore import Tensor
|
|
2207
|
+
>>> a = tensor([0, 1, 2])
|
|
2208
|
+
>>> output = a.repeat(2, 2) # same as a.repeat((2, 2))
|
|
2209
|
+
>>> print(output)
|
|
2210
|
+
[[0 1 2 0 1 2]
|
|
2211
|
+
[0 1 2 0 1 2]]
|
|
2212
|
+
"""
|
|
2213
|
+
# only simulate 2 overload of repeat. Further check by F.tile
|
|
2214
|
+
if repeats is None:
|
|
2215
|
+
# no `repeats`: called by positional arguments like ``x.repeat(...)``
|
|
2216
|
+
if len(args) == 1 and isinstance(args[0], (list, tuple)):
|
|
2217
|
+
repeats = tuple(args[0]) # transform ``x.repeat([x0, x1, ...])`` (list type) to tuple
|
|
2218
|
+
else:
|
|
2219
|
+
repeats = args # called as variable-length parameter like ``x.repeat(x0, x1, ...)``
|
|
2220
|
+
else:
|
|
2221
|
+
if args: # simulate an exception thrown by Python interpreter
|
|
2222
|
+
raise TypeError("repeat() got multiple values for argument 'repeat'")
|
|
2223
|
+
# transform named argument with list type like ``x.repeat(repeats=[x0, x1, ...])`` to tuple
|
|
2224
|
+
if isinstance(repeats, list):
|
|
2225
|
+
repeats = tuple(repeats)
|
|
2226
|
+
x_rank = F.rank(x)
|
|
2227
|
+
if len(repeats) < x_rank:
|
|
2228
|
+
raise ValueError(
|
|
2229
|
+
"For repeat, number of items of repeats can not be smaller than the number of "
|
|
2230
|
+
f"dimensions of self tensor, but got repeats with {len(repeats)}"
|
|
2231
|
+
f" items and rank of self Tensor is {x_rank}.")
|
|
2232
|
+
return F.tile(x, repeats)
|
|
2245
2233
|
|
|
2246
2234
|
|
|
2247
2235
|
def repeat_interleave(x, repeats, dim=None):
|
|
@@ -2395,7 +2383,7 @@ def bool_func(*data):
|
|
|
2395
2383
|
def cast_to_int(*data):
|
|
2396
2384
|
target = data[0]
|
|
2397
2385
|
if isinstance(target, (Tensor, Tensor_)):
|
|
2398
|
-
target = Tensor(target
|
|
2386
|
+
target = Tensor(target)
|
|
2399
2387
|
if len(data) == 1:
|
|
2400
2388
|
return int(target)
|
|
2401
2389
|
return int(target, data[1])
|
|
@@ -2433,7 +2421,7 @@ def int_func(*data):
|
|
|
2433
2421
|
@constexpr
|
|
2434
2422
|
def cast_to_float(data):
|
|
2435
2423
|
if isinstance(data, (Tensor, Tensor_)):
|
|
2436
|
-
data = Tensor(data
|
|
2424
|
+
data = Tensor(data)
|
|
2437
2425
|
return float(data)
|
|
2438
2426
|
|
|
2439
2427
|
|
|
@@ -2496,6 +2484,18 @@ def tuple_func(data):
|
|
|
2496
2484
|
return ret
|
|
2497
2485
|
|
|
2498
2486
|
|
|
2487
|
+
def dict_func(data):
|
|
2488
|
+
"""Implementation of `dict`."""
|
|
2489
|
+
if isinstance(data, (tuple, list)):
|
|
2490
|
+
keys = F.make_tuple()
|
|
2491
|
+
values = F.make_tuple()
|
|
2492
|
+
for pair in data:
|
|
2493
|
+
keys = keys + F.make_tuple(pair[0])
|
|
2494
|
+
values = values + F.make_tuple(pair[1])
|
|
2495
|
+
return F.make_dict(keys, values)
|
|
2496
|
+
raise TypeError('Currently, dict() only supports tuple or list input.')
|
|
2497
|
+
|
|
2498
|
+
|
|
2499
2499
|
def ms_zip(*data):
|
|
2500
2500
|
"""Packs elements in the corresponding positions in multiple sequences into tuples."""
|
|
2501
2501
|
x = ()
|
|
@@ -2605,7 +2605,7 @@ def ms_max_one_element(x):
|
|
|
2605
2605
|
def ms_max(*data):
|
|
2606
2606
|
"""Implementation of `max`."""
|
|
2607
2607
|
len_data = get_max_min_data_len(data)
|
|
2608
|
-
if len_data <= 0:
|
|
2608
|
+
if len_data <= 0: # pylint: disable=no-else-raise
|
|
2609
2609
|
raise TypeError("max() requires 1 argument at least.")
|
|
2610
2610
|
elif len_data == 1:
|
|
2611
2611
|
x = data[0]
|
|
@@ -2681,7 +2681,7 @@ def ms_min_one_element(x):
|
|
|
2681
2681
|
def ms_min(*data):
|
|
2682
2682
|
"""Implementation of `min`."""
|
|
2683
2683
|
len_data = get_max_min_data_len(data)
|
|
2684
|
-
if len_data <= 0:
|
|
2684
|
+
if len_data <= 0: # pylint: disable=no-else-raise
|
|
2685
2685
|
raise TypeError("min() requires 1 argument at least.")
|
|
2686
2686
|
elif len_data == 1:
|
|
2687
2687
|
x = data[0]
|
|
@@ -3214,7 +3214,7 @@ def random_categorical(x, num_sample, seed=0, dtype=mstype.int64):
|
|
|
3214
3214
|
@constexpr
|
|
3215
3215
|
def empty_tensor(dtype):
|
|
3216
3216
|
"""Return empty tensor"""
|
|
3217
|
-
return
|
|
3217
|
+
return Tensor([], dtype)
|
|
3218
3218
|
|
|
3219
3219
|
|
|
3220
3220
|
@constexpr
|
|
@@ -3291,7 +3291,7 @@ check_bool = constexpr(validator.check_bool)
|
|
|
3291
3291
|
@constexpr
|
|
3292
3292
|
def empty_compile(dtype, shape):
|
|
3293
3293
|
"""Returns an empty Tensor."""
|
|
3294
|
-
return
|
|
3294
|
+
return Tensor(dtype=dtype, shape=shape)
|
|
3295
3295
|
|
|
3296
3296
|
|
|
3297
3297
|
def tensor_bool(x):
|
|
@@ -3414,8 +3414,8 @@ def normal_(input, mean=0, std=1, *, generator=None):
|
|
|
3414
3414
|
"""
|
|
3415
3415
|
if generator is None:
|
|
3416
3416
|
generator = default_generator
|
|
3417
|
-
|
|
3418
|
-
|
|
3417
|
+
|
|
3418
|
+
seed, offset = generator._step(generator_step_)
|
|
3419
3419
|
return inplace_normal_op(input, mean, std, seed, offset)
|
|
3420
3420
|
|
|
3421
3421
|
|
|
@@ -3698,11 +3698,11 @@ def sparse_ndim_(x):
|
|
|
3698
3698
|
return F.tuple_len(x.shape)
|
|
3699
3699
|
|
|
3700
3700
|
|
|
3701
|
-
def bernoulli(input,
|
|
3701
|
+
def bernoulli(input, *, generator=None):
|
|
3702
3702
|
"""
|
|
3703
3703
|
Randomly draws binary numbers from a Bernoulli distribution.
|
|
3704
3704
|
"""
|
|
3705
|
-
return F.
|
|
3705
|
+
return F.bernoulli_ext(input, generator=generator)
|
|
3706
3706
|
|
|
3707
3707
|
|
|
3708
3708
|
def gather_nd(input_x, indices):
|
|
@@ -3935,7 +3935,7 @@ def atanh(x):
|
|
|
3935
3935
|
return F.atanh(x)
|
|
3936
3936
|
|
|
3937
3937
|
|
|
3938
|
-
def baddbmm(x, batch1, batch2, beta=1, alpha=1):
|
|
3938
|
+
def baddbmm(x, batch1, batch2, *, beta=1, alpha=1):
|
|
3939
3939
|
r"""
|
|
3940
3940
|
For details, please refer to :func:`mindspore.ops.baddbmm`.
|
|
3941
3941
|
"""
|
|
@@ -4103,6 +4103,16 @@ def erfinv(input):
|
|
|
4103
4103
|
return F.erfinv(input)
|
|
4104
4104
|
|
|
4105
4105
|
|
|
4106
|
+
def erfinv_(input):
|
|
4107
|
+
r"""
|
|
4108
|
+
For details, please refer to :func:`mindspore.Tensor.erfinv_`.
|
|
4109
|
+
|
|
4110
|
+
.. warning::
|
|
4111
|
+
This is an experimental API that is subject to change or deletion.
|
|
4112
|
+
"""
|
|
4113
|
+
return inplace_erfinv_op(input)
|
|
4114
|
+
|
|
4115
|
+
|
|
4106
4116
|
def less_equal(input, other):
|
|
4107
4117
|
r"""
|
|
4108
4118
|
Computes the boolean value of :math:`input\_x <= other` element-wise.
|
|
@@ -4467,6 +4477,19 @@ def uniform(input, from_=0., to=1., generator=None):
|
|
|
4467
4477
|
return F.uniform_ext(input, from_, to, generator)
|
|
4468
4478
|
|
|
4469
4479
|
|
|
4480
|
+
def uniform_(input, from_=0, to=1, *, generator=None):
|
|
4481
|
+
r"""
|
|
4482
|
+
For details, please refer to :func:`mindspore.Tensor.uniform_`.
|
|
4483
|
+
|
|
4484
|
+
.. warning::
|
|
4485
|
+
This is an experimental API that is subject to change or deletion.
|
|
4486
|
+
"""
|
|
4487
|
+
if generator is None:
|
|
4488
|
+
generator = default_generator
|
|
4489
|
+
seed, offset = generator._step(generator_step_) # pylint: disable=protected-access
|
|
4490
|
+
return inplace_uniform_op(input, from_, to, seed, offset)
|
|
4491
|
+
|
|
4492
|
+
|
|
4470
4493
|
def amin(input, axis=None, keep_dims=False):
|
|
4471
4494
|
r"""
|
|
4472
4495
|
For details, please refer to :func:`mindspore.ops.amin`.
|
|
@@ -18,6 +18,6 @@ Helper module for pijit analyze
|
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
from .pijit_func_white_list import _func_map as pijit_func_white_list_map
|
|
21
|
+
from .tensor_func_list import get_tensor_method_name
|
|
21
22
|
|
|
22
|
-
|
|
23
|
-
__all__ = ['pijit_func_white_list_map']
|
|
23
|
+
__all__ = ['pijit_func_white_list_map', "get_tensor_method_name"]
|
|
@@ -29,7 +29,7 @@ from mindspore.common.api import jit
|
|
|
29
29
|
from mindspore.common.tensor import Tensor
|
|
30
30
|
from mindspore.common._register_for_tensor import Registry
|
|
31
31
|
from mindspore._c_expression import MetaFuncGraph_, function_id
|
|
32
|
-
from mindspore._c_expression import
|
|
32
|
+
from mindspore._c_expression import TensorPy as Tensor_
|
|
33
33
|
from mindspore._extends.parse.resources import convert_object_map
|
|
34
34
|
from mindspore import _checkparam as validator
|
|
35
35
|
from mindspore import Parameter, ParameterTuple
|
|
@@ -49,11 +49,12 @@ from mindspore.train.data_sink import _init_sink_dataset
|
|
|
49
49
|
from mindspore.train.summary import SummaryRecord
|
|
50
50
|
from mindspore.train._utils import _exec_datagraph
|
|
51
51
|
from mindspore.train.summary.writer import BaseWriter
|
|
52
|
-
from mindspore.train.serialization import _exec_save, load, export_split_mindir,
|
|
52
|
+
from mindspore.train.serialization import _exec_save, load, export_split_mindir, _parse_ckpt_proto, \
|
|
53
53
|
_generate_front_info_for_param_data_file, _get_data_file, _encrypt_data, _split_save, _save_mindir_together, \
|
|
54
54
|
_load_into_param_dict
|
|
55
55
|
from mindspore.parallel import _cost_model_context
|
|
56
56
|
from mindspore.parallel._offload_context import offload_context
|
|
57
|
+
from mindspore.parallel._utils import _is_in_data_parallel_mode
|
|
57
58
|
from mindspore.run_check._check_version import check_version_and_env_config
|
|
58
59
|
from mindspore.dataset.callback.ds_callback import DSCallback, WaitedDSCallback
|
|
59
60
|
from mindspore.dataset.transforms.c_transforms import TensorOperation as CTensorOperation, OneHot as COneHot, \
|
|
@@ -360,6 +361,7 @@ FUNC_KEY_DICT_ITEMS = 22 # dict.items
|
|
|
360
361
|
FUNC_KEY_PRIMITIVE_ASSIGN = 23 # mindspore.ops.assign, Primitive("Assign")
|
|
361
362
|
FUNC_KEY_TENSOR_SETITEM = 24 # Tensor.__setitem__
|
|
362
363
|
FUNC_KEY_TENSOR_ASSIGN_VALUE = 25 # Tensor.assign_value
|
|
364
|
+
FUNC_KEY_TENSOR_IS_CONTIGUOUS = 26 # Tensor.is_contiguous
|
|
363
365
|
|
|
364
366
|
# Initialized only once. This map will initialize by c++ when start pijit.
|
|
365
367
|
# key is customer if fuzzy match. (Primitive, constexpr, primexpr, MetaFuncGraph)
|
|
@@ -376,19 +378,19 @@ _func_map = {
|
|
|
376
378
|
constexpr_key: FUNC_KEY_CONSTEXPR,
|
|
377
379
|
primexpr_key: FUNC_KEY_PRIMEXPR,
|
|
378
380
|
meta_func_graph_key: FUNC_KEY_META_FUNCG_RAPH,
|
|
379
|
-
|
|
381
|
+
function_id(GraphCell.__call__): FUNC_KEY_GRAPH_CELL,
|
|
380
382
|
id(psjit_code): FUNC_KEY_PSJIT_CODE,
|
|
381
|
-
|
|
382
|
-
|
|
383
|
+
function_id(_get_cache_prim): FUNC_KEY_GET_CACHE_PRIM,
|
|
384
|
+
function_id(Registry.get): FUNC_KEY_REGISTRY_GET,
|
|
383
385
|
|
|
384
386
|
# tensor side-effect
|
|
385
387
|
primitive_assign_key: FUNC_KEY_PRIMITIVE_ASSIGN,
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
388
|
+
function_id(F.assign): FUNC_KEY_PRIMITIVE_ASSIGN,
|
|
389
|
+
function_id(Tensor.assign_value): FUNC_KEY_TENSOR_ASSIGN_VALUE,
|
|
390
|
+
function_id(Tensor.__setitem__): FUNC_KEY_TENSOR_SETITEM,
|
|
389
391
|
|
|
390
392
|
# Tensor method
|
|
391
|
-
|
|
393
|
+
function_id(Tensor.astype): FUNC_KEY_TENSOR_ASTYPE,
|
|
392
394
|
|
|
393
395
|
# types.BuiltinFunctionType
|
|
394
396
|
function_id(isinstance): FUNC_KEY_BUILTIN_FUNC,
|
|
@@ -448,6 +450,7 @@ _func_map = {
|
|
|
448
450
|
function_id(str.isalnum): FUNC_KEY_BUILTIN_FUNC,
|
|
449
451
|
function_id(str.isidentifier): FUNC_KEY_BUILTIN_FUNC,
|
|
450
452
|
function_id(str.isprintable): FUNC_KEY_BUILTIN_FUNC,
|
|
453
|
+
function_id(str.replace): FUNC_KEY_BUILTIN_FUNC,
|
|
451
454
|
function_id(str.format): FUNC_KEY_BUILTIN_FUNC,
|
|
452
455
|
function_id(str.format_map): FUNC_KEY_BUILTIN_FUNC,
|
|
453
456
|
function_id(str.__format__): FUNC_KEY_BUILTIN_FUNC,
|
|
@@ -472,7 +475,7 @@ _func_map = {
|
|
|
472
475
|
function_id(Tensor_.getitem_index_info): FUNC_KEY_BUILTIN_FUNC,
|
|
473
476
|
function_id(Tensor_.get_bytes): FUNC_KEY_BUILTIN_FUNC,
|
|
474
477
|
function_id(Tensor_.is_init): FUNC_KEY_BUILTIN_FUNC,
|
|
475
|
-
function_id(Tensor_.is_contiguous):
|
|
478
|
+
function_id(Tensor_.is_contiguous): FUNC_KEY_TENSOR_IS_CONTIGUOUS,
|
|
476
479
|
function_id(Tensor_.stride): FUNC_KEY_BUILTIN_FUNC,
|
|
477
480
|
# Tensor_.asnumpy need real tensor value
|
|
478
481
|
|
|
@@ -488,6 +491,7 @@ _func_map = {
|
|
|
488
491
|
function_id(validator.check_number_range): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
489
492
|
function_id(validator.check_is_int): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
490
493
|
function_id(validator.check_is_number): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
494
|
+
function_id(validator.check_positive_int_sequence): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
491
495
|
function_id(np_version_valid): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
492
496
|
function_id(_is_initialized): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
493
497
|
function_id(_set_elegant_exit_handle): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
@@ -496,7 +500,9 @@ _func_map = {
|
|
|
496
500
|
function_id(get_rank_size): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
497
501
|
function_id(get_rank_id): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
498
502
|
function_id(offload_context): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
503
|
+
function_id(_is_in_data_parallel_mode): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
499
504
|
function_id(check_version_and_env_config): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
505
|
+
function_id(Tensor.tolist): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
500
506
|
|
|
501
507
|
# inner function
|
|
502
508
|
function_id(type_size_in_bytes): FUNC_KEY_BUILTIN_FUNC,
|
|
@@ -530,7 +536,6 @@ _func_map = {
|
|
|
530
536
|
function_id(_exec_save): FUNC_KEY_PIJIT_FORBIDDEN,
|
|
531
537
|
function_id(load): FUNC_KEY_PIJIT_FORBIDDEN,
|
|
532
538
|
function_id(export_split_mindir): FUNC_KEY_PIJIT_FORBIDDEN,
|
|
533
|
-
function_id(obfuscate_model): FUNC_KEY_PIJIT_FORBIDDEN,
|
|
534
539
|
function_id(_parse_ckpt_proto): FUNC_KEY_PIJIT_FORBIDDEN,
|
|
535
540
|
function_id(_generate_front_info_for_param_data_file): FUNC_KEY_PIJIT_FORBIDDEN,
|
|
536
541
|
function_id(_get_data_file): FUNC_KEY_PIJIT_FORBIDDEN,
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
# Copyright 2025 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
"""Store and get tensor method"""
|
|
16
|
+
from mindspore import Tensor
|
|
17
|
+
from mindspore._c_expression import function_id
|
|
18
|
+
|
|
19
|
+
tensor_method_id_to_name = {}
|
|
20
|
+
for method_name in dir(Tensor):
|
|
21
|
+
method_id = function_id(getattr(Tensor, method_name))
|
|
22
|
+
tensor_method_id_to_name[method_id] = method_name
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def get_tensor_method_name(id):
|
|
26
|
+
"""Get method name by function id"""
|
|
27
|
+
return tensor_method_id_to_name.get(id, None)
|
mindspore/_extends/utils.py
CHANGED
mindspore/amp.py
CHANGED
|
@@ -99,12 +99,12 @@ def _grad_scale(scale, grad):
|
|
|
99
99
|
return grad * scale.astype(grad.dtype)
|
|
100
100
|
|
|
101
101
|
|
|
102
|
-
@jit
|
|
102
|
+
@jit(backend="ms_backend")
|
|
103
103
|
def _grad_scale_map(scale_value, inputs):
|
|
104
104
|
return _hypermap(_partial(_grad_scale, scale_value), inputs)
|
|
105
105
|
|
|
106
106
|
|
|
107
|
-
@jit
|
|
107
|
+
@jit(backend="ms_backend")
|
|
108
108
|
def _grad_unscale_map(scale_value, inputs):
|
|
109
109
|
return _hypermap(_partial(_grad_unscale, scale_value), inputs)
|
|
110
110
|
|
|
@@ -116,7 +116,7 @@ def _overflow(inputs):
|
|
|
116
116
|
return 1 - status.all()
|
|
117
117
|
|
|
118
118
|
|
|
119
|
-
@jit
|
|
119
|
+
@jit(backend="ms_backend")
|
|
120
120
|
def _all_finite(inputs, check_overflow_mode, enable_allfinite):
|
|
121
121
|
"""all finite check"""
|
|
122
122
|
if _ascend_target():
|
|
@@ -325,7 +325,7 @@ class StaticLossScaler(LossScaler):
|
|
|
325
325
|
|
|
326
326
|
class DynamicLossScaler(LossScaler):
|
|
327
327
|
r"""
|
|
328
|
-
|
|
328
|
+
Manager for dynamically adjusting the loss scaling factor.
|
|
329
329
|
|
|
330
330
|
Dynamic loss scaling tries to determine the largest loss scale value that
|
|
331
331
|
will keep gradients finite. It does this by increasing the loss scale every
|
mindspore/atlprov.dll
CHANGED
|
Binary file
|
mindspore/avcodec-59.dll
CHANGED
|
Binary file
|
mindspore/avdevice-59.dll
CHANGED
|
Binary file
|
mindspore/avfilter-8.dll
CHANGED
|
Binary file
|
mindspore/avformat-59.dll
CHANGED
|
Binary file
|
mindspore/avutil-57.dll
CHANGED
|
Binary file
|
mindspore/boost/__init__.py
CHANGED
|
@@ -13,8 +13,8 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
# ============================================================================
|
|
15
15
|
"""
|
|
16
|
-
Boost
|
|
17
|
-
|
|
16
|
+
Boost is able to automatically optimize network performance, e.g., by reducing BN, gradient freezing,
|
|
17
|
+
and accumulating gradients to achieve network acceleration.
|
|
18
18
|
|
|
19
19
|
Note:
|
|
20
20
|
This feature is a beta feature, and we are still improving its functionality.
|
mindspore/boost/base.py
CHANGED
|
@@ -21,15 +21,12 @@ import math
|
|
|
21
21
|
import copy
|
|
22
22
|
import numpy as np
|
|
23
23
|
from scipy import linalg as la
|
|
24
|
-
from mindspore.context import ParallelMode
|
|
25
24
|
import mindspore.nn as nn
|
|
26
25
|
from mindspore.nn.optim import LARS
|
|
27
26
|
from mindspore import log as logger
|
|
28
27
|
from mindspore.common import Parameter
|
|
29
|
-
from mindspore.communication.management import get_group_size
|
|
28
|
+
from mindspore.communication.management import get_rank, get_group_size
|
|
30
29
|
from mindspore.train.serialization import load_checkpoint
|
|
31
|
-
from mindspore.parallel._utils import _get_global_rank
|
|
32
|
-
from mindspore.parallel._auto_parallel_context import auto_parallel_context
|
|
33
30
|
from mindspore.boost.less_batch_normalization import CommonHeadLastFN
|
|
34
31
|
|
|
35
32
|
|
|
@@ -329,7 +326,7 @@ def _get_local_pca_mat_path(weight_load_dir, pca_mat_path, n_component, device_n
|
|
|
329
326
|
if os.path.exists(save_pca_end_path):
|
|
330
327
|
os.remove(save_pca_end_path)
|
|
331
328
|
|
|
332
|
-
rank =
|
|
329
|
+
rank = get_rank()
|
|
333
330
|
local_pca_mat_path = full_pca_mat_path[:-4] + "_rank_" + str(rank) + ".npy"
|
|
334
331
|
if os.path.exists(local_pca_mat_path):
|
|
335
332
|
os.remove(local_pca_mat_path)
|
|
@@ -498,8 +495,7 @@ def _save_local_pca_mat(pca_mat, full_pca_mat_path, n_component):
|
|
|
498
495
|
full_pca_mat_path (str): the path of full pca mat.
|
|
499
496
|
n_component (int): pca component.
|
|
500
497
|
"""
|
|
501
|
-
|
|
502
|
-
rank_size = 1 if parallel_mode == ParallelMode.STAND_ALONE else get_group_size()
|
|
498
|
+
rank_size = get_group_size()
|
|
503
499
|
local_dim = math.ceil(n_component // rank_size)
|
|
504
500
|
for rank_id in range(rank_size):
|
|
505
501
|
start_index = rank_id * local_dim
|
|
@@ -21,7 +21,7 @@ from mindspore.nn.wrap import TrainOneStepCell
|
|
|
21
21
|
import mindspore.context as context
|
|
22
22
|
from mindspore.context import ParallelMode
|
|
23
23
|
from mindspore.parallel._utils import _get_global_rank, _get_device_num, _get_gradients_mean
|
|
24
|
-
from mindspore.communication.management import get_group_size, create_group
|
|
24
|
+
from mindspore.communication.management import get_rank, get_group_size, create_group
|
|
25
25
|
from mindspore.nn.cell import Cell
|
|
26
26
|
from mindspore.nn import SequentialCell
|
|
27
27
|
from mindspore.common import Tensor
|
|
@@ -388,7 +388,7 @@ class BoostTrainOneStepCell(TrainOneStepCell):
|
|
|
388
388
|
gamma = self.auto_boost.gamma
|
|
389
389
|
alpha = self.auto_boost.alpha
|
|
390
390
|
sigma = self.auto_boost.sigma
|
|
391
|
-
_rank =
|
|
391
|
+
_rank = get_rank()
|
|
392
392
|
_rank_size = 1 if self.parallel_mode == ParallelMode.STAND_ALONE else get_group_size()
|
|
393
393
|
n_components = self.auto_boost.n_components
|
|
394
394
|
timeout = self.auto_boost.timeout
|
mindspore/c1.dll
CHANGED
|
Binary file
|
mindspore/c1xx.dll
CHANGED
|
Binary file
|
mindspore/c2.dll
CHANGED
|
Binary file
|
mindspore/common/__init__.py
CHANGED
|
@@ -15,7 +15,7 @@
|
|
|
15
15
|
"""Top-level reference to dtype of common module."""
|
|
16
16
|
from __future__ import absolute_import
|
|
17
17
|
from mindspore.common import dtype
|
|
18
|
-
from mindspore.common.api import
|
|
18
|
+
from mindspore.common.api import ms_memory_recycle, jit, jit_class, _no_grad, \
|
|
19
19
|
flops_collection, set_recursion_limit
|
|
20
20
|
from mindspore.common.dtype import Type, int8, byte, int16, short, int32, intc, int64, intp, \
|
|
21
21
|
uint8, ubyte, uint16, ushort, uint32, uintc, uint64, uintp, float16, half, \
|
|
@@ -39,6 +39,7 @@ from mindspore.common import generator
|
|
|
39
39
|
from mindspore.common.generator import (
|
|
40
40
|
Generator, default_generator, seed, manual_seed, initial_seed, get_rng_state, set_rng_state)
|
|
41
41
|
from mindspore.ops.function.array_func import is_tensor, from_numpy
|
|
42
|
+
from mindspore.common._grad_function import _Function
|
|
42
43
|
|
|
43
44
|
# symbols from dtype
|
|
44
45
|
__all__ = [
|
|
@@ -70,7 +71,7 @@ __all__ = [
|
|
|
70
71
|
|
|
71
72
|
__all__.extend([
|
|
72
73
|
"tensor", "Tensor", "RowTensor", "SparseTensor", "COOTensor", "CSRTensor", # tensor
|
|
73
|
-
|
|
74
|
+
'jit', 'jit_class', '_no_grad', # api
|
|
74
75
|
"Parameter", "ParameterTuple", # parameter
|
|
75
76
|
"dtype",
|
|
76
77
|
"set_seed", "get_seed", "manual_seed", # random seed
|
|
@@ -83,6 +84,6 @@ __all__.extend([
|
|
|
83
84
|
"no_inline",
|
|
84
85
|
"Symbol",
|
|
85
86
|
"recompute",
|
|
86
|
-
"is_tensor", "from_numpy",
|
|
87
|
+
"is_tensor", "from_numpy", "_Function"
|
|
87
88
|
])
|
|
88
89
|
__all__.extend(generator.__all__)
|