mindspore 2.5.0__cp310-cp310-win_amd64.whl → 2.6.0__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +6 -4
- mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -0
- mindspore/_checkparam.py +3 -33
- mindspore/_deprecated/__init__.py +17 -0
- mindspore/_deprecated/jit.py +198 -0
- mindspore/_extends/builtin_operations.py +1 -1
- mindspore/_extends/parse/__init__.py +6 -7
- mindspore/_extends/parse/compile_config.py +19 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
- mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
- mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
- mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
- mindspore/_extends/parse/parser.py +25 -194
- mindspore/_extends/parse/resources.py +1 -5
- mindspore/_extends/parse/standard_method.py +109 -75
- mindspore/_extends/pijit/__init__.py +2 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
- mindspore/_extends/pijit/tensor_func_list.py +27 -0
- mindspore/_extends/utils.py +1 -1
- mindspore/amp.py +4 -4
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/__init__.py +2 -2
- mindspore/boost/base.py +3 -7
- mindspore/boost/boost_cell_wrapper.py +2 -2
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +4 -3
- mindspore/common/_grad_function.py +56 -0
- mindspore/common/_pijit_context.py +14 -5
- mindspore/common/_register_for_tensor.py +1 -1
- mindspore/common/_stub_tensor.py +5 -10
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +2014 -3386
- mindspore/common/api.py +386 -355
- mindspore/common/auto_dynamic_shape.py +41 -44
- mindspore/common/dtype.py +5 -2
- mindspore/common/dump.py +7 -5
- mindspore/common/file_system.py +3 -0
- mindspore/common/generator.py +3 -0
- mindspore/common/hook_handle.py +5 -3
- mindspore/common/initializer.py +10 -6
- mindspore/common/jit_begin_end.py +94 -0
- mindspore/common/jit_config.py +6 -1
- mindspore/common/jit_context.py +76 -0
- mindspore/common/jit_trace.py +378 -0
- mindspore/common/lazy_inline.py +2 -2
- mindspore/common/mutable.py +5 -4
- mindspore/common/parameter.py +106 -39
- mindspore/common/seed.py +2 -2
- mindspore/common/sparse_tensor.py +23 -17
- mindspore/common/tensor.py +332 -714
- mindspore/communication/__init__.py +7 -5
- mindspore/communication/_comm_helper.py +47 -2
- mindspore/communication/comm_func.py +70 -53
- mindspore/communication/management.py +83 -17
- mindspore/context.py +228 -571
- mindspore/dataset/__init__.py +44 -20
- mindspore/dataset/audio/__init__.py +2 -8
- mindspore/dataset/audio/transforms.py +3 -17
- mindspore/dataset/core/config.py +3 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +102 -120
- mindspore/dataset/engine/datasets_audio.py +22 -22
- mindspore/dataset/engine/datasets_standard_format.py +43 -24
- mindspore/dataset/engine/datasets_text.py +78 -85
- mindspore/dataset/engine/datasets_user_defined.py +109 -77
- mindspore/dataset/engine/datasets_vision.py +111 -108
- mindspore/dataset/engine/iterators.py +5 -3
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
- mindspore/dataset/engine/samplers.py +279 -57
- mindspore/dataset/engine/serializer_deserializer.py +2 -1
- mindspore/dataset/engine/validators.py +10 -0
- mindspore/dataset/text/__init__.py +7 -6
- mindspore/dataset/text/transforms.py +6 -5
- mindspore/dataset/text/utils.py +3 -3
- mindspore/dataset/transforms/__init__.py +0 -9
- mindspore/dataset/transforms/transforms.py +3 -3
- mindspore/dataset/utils/browse_dataset.py +1 -1
- mindspore/dataset/vision/__init__.py +2 -9
- mindspore/dataset/vision/transforms.py +202 -158
- mindspore/dataset/vision/utils.py +7 -5
- mindspore/device_context/ascend/op_debug.py +60 -1
- mindspore/device_context/ascend/op_tuning.py +0 -4
- mindspore/device_manager.py +39 -3
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/es/embedding_service.py +35 -27
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -2
- mindspore/experimental/map_parameter.py +4 -4
- mindspore/experimental/optim/adadelta.py +22 -26
- mindspore/experimental/optim/adagrad.py +4 -4
- mindspore/experimental/optim/adam.py +4 -0
- mindspore/experimental/optim/adamax.py +4 -4
- mindspore/experimental/optim/adamw.py +4 -0
- mindspore/experimental/optim/asgd.py +1 -1
- mindspore/experimental/optim/lr_scheduler.py +40 -22
- mindspore/experimental/optim/radam.py +5 -5
- mindspore/experimental/optim/rprop.py +1 -1
- mindspore/experimental/optim/sgd.py +1 -1
- mindspore/hal/contiguous_tensors_handle.py +6 -10
- mindspore/hal/device.py +55 -81
- mindspore/hal/event.py +38 -55
- mindspore/hal/memory.py +115 -147
- mindspore/hal/stream.py +81 -125
- mindspore/include/dataset/constants.h +7 -4
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +40 -2
- mindspore/mindrecord/__init__.py +20 -7
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +133 -702
- mindspore/mint/distributed/__init__.py +5 -1
- mindspore/mint/distributed/distributed.py +198 -113
- mindspore/mint/linalg/__init__.py +2 -0
- mindspore/mint/nn/__init__.py +280 -18
- mindspore/mint/nn/functional.py +282 -64
- mindspore/mint/nn/layer/__init__.py +4 -0
- mindspore/mint/nn/layer/_functions.py +7 -3
- mindspore/mint/nn/layer/activation.py +120 -13
- mindspore/mint/nn/layer/conv.py +234 -28
- mindspore/mint/nn/layer/normalization.py +15 -16
- mindspore/mint/nn/layer/padding.py +1 -1
- mindspore/mint/nn/layer/pooling.py +66 -1
- mindspore/mint/optim/__init__.py +2 -1
- mindspore/mint/optim/sgd.py +171 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/__init__.py +4 -1
- mindspore/nn/cell.py +1253 -179
- mindspore/nn/layer/activation.py +23 -21
- mindspore/nn/layer/basic.py +22 -16
- mindspore/nn/layer/container.py +1 -1
- mindspore/nn/layer/conv.py +53 -42
- mindspore/nn/layer/embedding.py +9 -8
- mindspore/nn/layer/normalization.py +48 -42
- mindspore/nn/layer/pooling.py +75 -31
- mindspore/nn/layer/transformer.py +11 -10
- mindspore/nn/learning_rate_schedule.py +4 -2
- mindspore/nn/loss/loss.py +27 -19
- mindspore/nn/optim/ada_grad.py +6 -5
- mindspore/nn/optim/adadelta.py +9 -7
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +18 -14
- mindspore/nn/optim/adamax.py +8 -7
- mindspore/nn/optim/adasum.py +5 -5
- mindspore/nn/optim/asgd.py +3 -1
- mindspore/nn/optim/ftrl.py +11 -9
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/lazyadam.py +12 -10
- mindspore/nn/optim/momentum.py +7 -6
- mindspore/nn/optim/optimizer.py +2 -2
- mindspore/nn/optim/proximal_ada_grad.py +12 -10
- mindspore/nn/optim/rmsprop.py +13 -12
- mindspore/nn/optim/rprop.py +9 -7
- mindspore/nn/optim/sgd.py +9 -6
- mindspore/nn/optim/tft_wrapper.py +5 -2
- mindspore/nn/probability/bijector/bijector.py +17 -11
- mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
- mindspore/nn/probability/bijector/invert.py +2 -2
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +3 -2
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +1 -1
- mindspore/nn/probability/distribution/cauchy.py +4 -2
- mindspore/nn/probability/distribution/exponential.py +6 -7
- mindspore/nn/probability/distribution/gamma.py +2 -2
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/half_normal.py +5 -3
- mindspore/nn/probability/distribution/logistic.py +5 -3
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/uniform.py +5 -3
- mindspore/nn/reinforcement/_tensors_queue.py +1 -1
- mindspore/nn/reinforcement/tensor_array.py +1 -1
- mindspore/nn/wrap/__init__.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +178 -117
- mindspore/nn/wrap/grad_reducer.py +45 -36
- mindspore/nn/wrap/loss_scale.py +3 -3
- mindspore/numpy/array_creations.py +3 -3
- mindspore/numpy/array_ops.py +1 -1
- mindspore/numpy/utils.py +1 -2
- mindspore/numpy/utils_const.py +1 -2
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +3 -2
- mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
- mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
- mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
- mindspore/ops/_register_for_op.py +0 -11
- mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
- mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
- mindspore/ops/_vmap/vmap_array_ops.py +32 -6
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
- mindspore/ops/_vmap/vmap_math_ops.py +4 -7
- mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
- mindspore/ops/auto_generate/__init__.py +4 -3
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +127 -52
- mindspore/ops/auto_generate/gen_extend_func.py +286 -208
- mindspore/ops/auto_generate/gen_ops_def.py +2783 -2335
- mindspore/ops/auto_generate/gen_ops_prim.py +8992 -2686
- mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
- mindspore/ops/composite/__init__.py +2 -1
- mindspore/ops/composite/base.py +19 -24
- mindspore/ops/composite/math_ops.py +6 -16
- mindspore/ops/composite/multitype_ops/__init__.py +5 -2
- mindspore/ops/composite/multitype_ops/_compile_utils.py +4 -5
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
- mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
- mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
- mindspore/ops/function/__init__.py +28 -2
- mindspore/ops/function/_add_attr_func.py +58 -0
- mindspore/ops/function/array_func.py +1631 -2347
- mindspore/ops/function/clip_func.py +38 -45
- mindspore/ops/function/debug_func.py +36 -44
- mindspore/ops/function/grad/__init__.py +1 -0
- mindspore/ops/function/grad/grad_func.py +104 -71
- mindspore/ops/function/image_func.py +1 -1
- mindspore/ops/function/linalg_func.py +46 -78
- mindspore/ops/function/math_func.py +3024 -3855
- mindspore/ops/function/nn_func.py +678 -274
- mindspore/ops/function/other_func.py +159 -1
- mindspore/ops/function/parameter_func.py +17 -30
- mindspore/ops/function/random_func.py +216 -361
- mindspore/ops/function/reshard_func.py +4 -70
- mindspore/ops/function/sparse_func.py +3 -3
- mindspore/ops/function/sparse_unary_func.py +5 -5
- mindspore/ops/function/spectral_func.py +25 -58
- mindspore/ops/function/vmap_func.py +26 -18
- mindspore/ops/functional.py +8 -5
- mindspore/ops/functional_overload.py +655 -4
- mindspore/ops/op_info_register.py +32 -244
- mindspore/ops/operations/__init__.py +21 -14
- mindspore/ops/operations/_custom_ops_utils.py +235 -0
- mindspore/ops/operations/_grad_ops.py +1 -10
- mindspore/ops/operations/_inner_ops.py +5 -76
- mindspore/ops/operations/_ms_kernel.py +4 -10
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +3 -2
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/array_ops.py +39 -24
- mindspore/ops/operations/comm_ops.py +150 -107
- mindspore/ops/operations/custom_ops.py +287 -32
- mindspore/ops/operations/debug_ops.py +119 -16
- mindspore/ops/operations/inner_ops.py +1 -1
- mindspore/ops/operations/linalg_ops.py +1 -58
- mindspore/ops/operations/manually_defined/_inner.py +1 -1
- mindspore/ops/operations/manually_defined/ops_def.py +746 -79
- mindspore/ops/operations/math_ops.py +21 -18
- mindspore/ops/operations/nn_ops.py +67 -224
- mindspore/ops/operations/other_ops.py +62 -9
- mindspore/ops/operations/random_ops.py +13 -7
- mindspore/ops/operations/reshard_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +2 -2
- mindspore/ops/primitive.py +43 -32
- mindspore/ops/tensor_method.py +243 -17
- mindspore/ops_generate/__init__.py +0 -5
- mindspore/ops_generate/aclnn/__init__.py +0 -0
- mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
- mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
- mindspore/ops_generate/api/__init__.py +0 -0
- mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
- mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
- mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
- mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
- mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
- mindspore/ops_generate/api/gen_api.py +103 -0
- mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
- mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
- mindspore/ops_generate/common/__init__.py +0 -0
- mindspore/ops_generate/common/gen_constants.py +91 -0
- mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
- mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
- mindspore/ops_generate/{template.py → common/template.py} +96 -84
- mindspore/ops_generate/gen_ops.py +23 -325
- mindspore/ops_generate/op_def/__init__.py +0 -0
- mindspore/ops_generate/op_def/gen_op_def.py +90 -0
- mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
- mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -10
- mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
- mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
- mindspore/ops_generate/op_def_py/__init__.py +0 -0
- mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
- mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
- mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
- mindspore/ops_generate/pyboost/__init__.py +0 -0
- mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
- mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
- mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
- mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
- mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
- mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
- mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
- mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
- mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
- mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
- mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
- mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
- mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
- mindspore/ops_generate/resources/__init__.py +0 -0
- mindspore/ops_generate/resources/resource_list.py +30 -0
- mindspore/ops_generate/resources/resource_loader.py +36 -0
- mindspore/ops_generate/resources/resource_manager.py +64 -0
- mindspore/ops_generate/resources/yaml_loader.py +88 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
- mindspore/parallel/__init__.py +6 -2
- mindspore/parallel/_auto_parallel_context.py +140 -12
- mindspore/parallel/_cell_wrapper.py +132 -15
- mindspore/parallel/_parallel_serialization.py +95 -4
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +7 -2
- mindspore/parallel/_tensor.py +142 -18
- mindspore/parallel/_utils.py +198 -25
- mindspore/parallel/algo_parameter_config.py +3 -3
- mindspore/parallel/auto_parallel.py +732 -0
- mindspore/parallel/checkpoint_convert.py +159 -0
- mindspore/parallel/checkpoint_transform.py +658 -37
- mindspore/parallel/cluster/process_entity/_api.py +151 -19
- mindspore/parallel/cluster/run.py +1 -1
- mindspore/parallel/function/__init__.py +24 -0
- mindspore/parallel/function/reshard_func.py +258 -0
- mindspore/parallel/nn/__init__.py +25 -0
- mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
- mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
- mindspore/parallel/parameter_broadcast.py +24 -13
- mindspore/parallel/shard.py +137 -62
- mindspore/parallel/transform_safetensors.py +288 -95
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +9 -5
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
- mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +25 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
- mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
- mindspore/profiler/common/constant.py +12 -0
- mindspore/profiler/common/msprof_cmd_tool.py +42 -23
- mindspore/profiler/common/path_manager.py +24 -0
- mindspore/profiler/common/profiler_context.py +26 -2
- mindspore/profiler/common/profiler_meta_data.py +74 -0
- mindspore/profiler/common/profiler_parameters.py +59 -18
- mindspore/profiler/common/profiler_path_manager.py +66 -7
- mindspore/profiler/dynamic_profiler.py +112 -79
- mindspore/profiler/envprofiler.py +26 -1
- mindspore/profiler/experimental_config.py +197 -0
- mindspore/profiler/mstx.py +57 -14
- mindspore/profiler/platform/npu_profiler.py +33 -7
- mindspore/profiler/profiler.py +541 -45
- mindspore/profiler/profiler_action_controller.py +1 -1
- mindspore/profiler/profiler_interface.py +4 -0
- mindspore/profiler/schedule.py +57 -22
- mindspore/rewrite/api/node.py +15 -13
- mindspore/rewrite/api/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +25 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/runtime/__init__.py +2 -2
- mindspore/runtime/executor.py +40 -11
- mindspore/runtime/memory.py +37 -13
- mindspore/safeguard/rewrite_obfuscation.py +12 -9
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +8 -8
- mindspore/train/_utils.py +43 -9
- mindspore/train/amp.py +1 -1
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +2 -16
- mindspore/train/callback/_checkpoint.py +24 -40
- mindspore/train/callback/_cluster_monitor.py +14 -18
- mindspore/train/callback/_flops_collector.py +2 -3
- mindspore/train/callback/_history.py +7 -4
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +0 -3
- mindspore/train/callback/_loss_monitor.py +2 -1
- mindspore/train/callback/_on_request_exit.py +6 -5
- mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
- mindspore/train/callback/_summary_collector.py +8 -13
- mindspore/train/callback/_time_monitor.py +2 -1
- mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +204 -105
- mindspore/train/data_sink.py +25 -2
- mindspore/train/dataset_helper.py +4 -5
- mindspore/train/loss_scale_manager.py +8 -7
- mindspore/train/metrics/accuracy.py +3 -3
- mindspore/train/metrics/confusion_matrix.py +9 -9
- mindspore/train/metrics/error.py +3 -3
- mindspore/train/metrics/hausdorff_distance.py +4 -4
- mindspore/train/metrics/mean_surface_distance.py +3 -3
- mindspore/train/metrics/metric.py +0 -12
- mindspore/train/metrics/occlusion_sensitivity.py +4 -2
- mindspore/train/metrics/precision.py +8 -6
- mindspore/train/metrics/recall.py +9 -9
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +19 -12
- mindspore/train/model.py +262 -127
- mindspore/train/serialization.py +246 -988
- mindspore/train/summary/_summary_adapter.py +2 -2
- mindspore/train/summary/summary_record.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +4 -2
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/runtime_execution_order_check.py +2 -0
- mindspore/utils/utils.py +138 -4
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/METADATA +2 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/RECORD +485 -440
- mindspore/_install_custom.py +0 -43
- mindspore/common/_register_for_adapter.py +0 -74
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
- mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
- mindspore/ops_generate/gen_constants.py +0 -190
- mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
- mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
- /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/WHEEL +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/entry_points.txt +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/top_level.txt +0 -0
mindspore/mint/__init__.py
CHANGED
|
@@ -54,7 +54,7 @@ from mindspore.ops.functional_overload import min
|
|
|
54
54
|
from mindspore.ops.auto_generate import sinc
|
|
55
55
|
from mindspore.ops.auto_generate import sinh
|
|
56
56
|
from mindspore.ops.auto_generate import cosh
|
|
57
|
-
from mindspore.ops.
|
|
57
|
+
from mindspore.ops.functional_overload import xlogy
|
|
58
58
|
# 5
|
|
59
59
|
from mindspore.ops.auto_generate import cumsum_ext as cumsum
|
|
60
60
|
# 6
|
|
@@ -63,7 +63,7 @@ from mindspore.ops.auto_generate import stack_ext as stack
|
|
|
63
63
|
# 7
|
|
64
64
|
from mindspore.ops.function.array_func import unsqueeze
|
|
65
65
|
# 8
|
|
66
|
-
from mindspore.ops.auto_generate import
|
|
66
|
+
from mindspore.ops.auto_generate import transpose_ext_view as transpose
|
|
67
67
|
from mindspore.ops.auto_generate import batch_norm_elemt
|
|
68
68
|
from mindspore.ops.auto_generate import batch_norm_gather_stats_with_counts
|
|
69
69
|
from mindspore.ops.auto_generate import batch_norm_stats
|
|
@@ -116,7 +116,7 @@ from mindspore.ops.functional import searchsorted
|
|
|
116
116
|
# 31
|
|
117
117
|
|
|
118
118
|
# 32
|
|
119
|
-
|
|
119
|
+
from mindspore.ops.function.math_func import einsum_ext as einsum
|
|
120
120
|
# 33
|
|
121
121
|
|
|
122
122
|
# 34
|
|
@@ -156,7 +156,7 @@ from mindspore.ops.functional import tile
|
|
|
156
156
|
# 51
|
|
157
157
|
|
|
158
158
|
# 52
|
|
159
|
-
|
|
159
|
+
from mindspore.ops.functional_overload import addcdiv
|
|
160
160
|
# 53
|
|
161
161
|
|
|
162
162
|
# 54
|
|
@@ -168,7 +168,8 @@ from mindspore.ops.function.math_func import norm_ext as norm
|
|
|
168
168
|
# 57
|
|
169
169
|
from mindspore.ops.functional import broadcast_to
|
|
170
170
|
# 58
|
|
171
|
-
from mindspore.ops.
|
|
171
|
+
from mindspore.ops.functional_overload import greater_equal, ge
|
|
172
|
+
|
|
172
173
|
# 59
|
|
173
174
|
from mindspore.ops.functional import square
|
|
174
175
|
# 60
|
|
@@ -307,6 +308,8 @@ from mindspore.ops.function.math_func import arccos_ext as arccos
|
|
|
307
308
|
from mindspore.ops.function.math_func import acosh_ext as acosh
|
|
308
309
|
from mindspore.ops.function.math_func import arccosh_ext as arccosh
|
|
309
310
|
# 172
|
|
311
|
+
from mindspore.ops.function.math_func import addcmul_ext as addcmul
|
|
312
|
+
|
|
310
313
|
from mindspore.ops.function.math_func import asin_ext as asin
|
|
311
314
|
from mindspore.ops.function.math_func import arcsin_ext as arcsin
|
|
312
315
|
# 173
|
|
@@ -328,6 +331,9 @@ from mindspore.ops.function.math_func import round
|
|
|
328
331
|
# 182
|
|
329
332
|
from mindspore.ops.function.math_func import bernoulli_ext as bernoulli
|
|
330
333
|
|
|
334
|
+
# 201
|
|
335
|
+
from mindspore.ops.auto_generate import diag_ext as diag
|
|
336
|
+
|
|
331
337
|
# 204
|
|
332
338
|
from mindspore.ops.auto_generate import erfc
|
|
333
339
|
# 207
|
|
@@ -358,7 +364,7 @@ from mindspore.ops.function.array_func import scatter_add_ext as scatter_add
|
|
|
358
364
|
# 289
|
|
359
365
|
from mindspore.ops.auto_generate import sign
|
|
360
366
|
|
|
361
|
-
from mindspore.ops.auto_generate import
|
|
367
|
+
from mindspore.ops.auto_generate import select_ext_view as select
|
|
362
368
|
|
|
363
369
|
# 301
|
|
364
370
|
from mindspore.ops.function.math_func import tan
|
|
@@ -411,12 +417,14 @@ from mindspore.ops.function.math_func import frac_ext as frac
|
|
|
411
417
|
|
|
412
418
|
# 538
|
|
413
419
|
from mindspore.ops.function.math_func import histc_ext as histc
|
|
414
|
-
|
|
420
|
+
# 549
|
|
421
|
+
from mindspore.ops.functional_overload import kthvalue
|
|
415
422
|
# 552
|
|
416
423
|
from mindspore.ops.auto_generate import log10_ext as log10
|
|
417
424
|
|
|
418
425
|
# 553
|
|
419
426
|
from mindspore.ops.auto_generate import logaddexp_ext as logaddexp
|
|
427
|
+
from mindspore.ops.auto_generate import logaddexp2
|
|
420
428
|
|
|
421
429
|
# 557
|
|
422
430
|
from mindspore.ops.auto_generate import logsumexp_ext as logsumexp
|
|
@@ -424,6 +432,9 @@ from mindspore.ops.auto_generate import logsumexp_ext as logsumexp
|
|
|
424
432
|
# 582
|
|
425
433
|
from mindspore.ops.function.math_func import std_mean_ext as std_mean
|
|
426
434
|
|
|
435
|
+
# 584
|
|
436
|
+
from mindspore.ops.function.array_func import take
|
|
437
|
+
|
|
427
438
|
# 588
|
|
428
439
|
from mindspore.ops.function.math_func import var_mean_ext as var_mean
|
|
429
440
|
|
|
@@ -433,6 +444,9 @@ from mindspore.ops.function.math_func import nan_to_num
|
|
|
433
444
|
# 613
|
|
434
445
|
from mindspore.ops.functional_overload import nansum
|
|
435
446
|
|
|
447
|
+
# 615
|
|
448
|
+
from mindspore.ops.auto_generate import triangular_solve
|
|
449
|
+
|
|
436
450
|
# 664
|
|
437
451
|
from mindspore.ops.function.array_func import meshgrid_ext as meshgrid
|
|
438
452
|
|
|
@@ -445,12 +459,27 @@ from mindspore.ops.function.math_func import float_power_ext as float_power
|
|
|
445
459
|
# 708
|
|
446
460
|
from mindspore.ops.function.math_func import std_ext as std
|
|
447
461
|
|
|
462
|
+
# 719
|
|
463
|
+
from mindspore.ops.functional_overload import add
|
|
464
|
+
|
|
465
|
+
# 720
|
|
466
|
+
from mindspore.ops.functional_overload import sub
|
|
467
|
+
|
|
468
|
+
# 739
|
|
469
|
+
from mindspore.ops.function.array_func import hstack
|
|
470
|
+
|
|
471
|
+
# 826
|
|
472
|
+
from mindspore.ops.functional_overload import floor_divide
|
|
473
|
+
|
|
448
474
|
# 887
|
|
449
475
|
from mindspore.ops.auto_generate import log2_ext as log2
|
|
450
476
|
|
|
451
477
|
# 889
|
|
452
478
|
from mindspore.ops.function.math_func import isnan_ext as isnan
|
|
453
479
|
|
|
480
|
+
# 916
|
|
481
|
+
from mindspore.ops.auto_generate import index_add_ext as index_add
|
|
482
|
+
|
|
454
483
|
# 1007
|
|
455
484
|
from mindspore.ops.auto_generate import t_ext as t
|
|
456
485
|
from mindspore.ops.auto_generate.pyboost_inner_prim import squeeze_impl
|
|
@@ -461,120 +490,42 @@ from mindspore.ops.auto_generate.gen_ops_prim import equal_ext_op
|
|
|
461
490
|
from mindspore.ops.function.array_func import unbind_ext as unbind
|
|
462
491
|
|
|
463
492
|
|
|
464
|
-
def add(input, other, *, alpha=1):
|
|
465
|
-
r"""
|
|
466
|
-
Adds scaled other value to input Tensor.
|
|
467
|
-
|
|
468
|
-
.. math::
|
|
469
|
-
|
|
470
|
-
out_{i} = input_{i} + alpha \times other_{i}
|
|
471
|
-
|
|
472
|
-
Note:
|
|
473
|
-
- When the two inputs have different shapes,
|
|
474
|
-
they must be able to broadcast to a common shape.
|
|
475
|
-
- The two inputs and alpha comply with the implicit type conversion rules to make the data types
|
|
476
|
-
consistent.
|
|
477
|
-
|
|
478
|
-
Args:
|
|
479
|
-
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
480
|
-
a bool or a tensor whose data type is
|
|
481
|
-
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
482
|
-
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
483
|
-
other (Union[Tensor, number.Number, bool]): The second input, is a number.Number or
|
|
484
|
-
a bool or a tensor whose data type is
|
|
485
|
-
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
486
|
-
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
487
|
-
|
|
488
|
-
Keyword Args:
|
|
489
|
-
alpha (number.Number): A scaling factor applied to `other`, default: ``1``.
|
|
490
|
-
|
|
491
|
-
Returns:
|
|
492
|
-
Tensor with a shape that is the same as the broadcasted shape of the input `input` and `other`,
|
|
493
|
-
and the data type is the one with higher precision or higher digits among the two inputs and alpha.
|
|
494
|
-
|
|
495
|
-
Raises:
|
|
496
|
-
TypeError: If the type of `input`, `other`, or `alpha` is not one of the following: Tensor, number.Number, bool.
|
|
497
|
-
TypeError: If `alpha` is of type float but `input` and `other` are not of type float.
|
|
498
|
-
TypeError: If `alpha` is of type bool but `input` and `other` are not of type bool.
|
|
499
|
-
|
|
500
|
-
Supported Platforms:
|
|
501
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
502
|
-
|
|
503
|
-
Examples:
|
|
504
|
-
>>> import numpy as np
|
|
505
|
-
>>> import mindspore
|
|
506
|
-
>>> from mindspore import Tensor
|
|
507
|
-
>>> from mindspore import mint
|
|
508
|
-
>>> x = Tensor(1, mindspore.int32)
|
|
509
|
-
>>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
|
|
510
|
-
>>> alpha = 0.5
|
|
511
|
-
>>> output = mint.add(x, y, alpha=alpha)
|
|
512
|
-
>>> print(output)
|
|
513
|
-
[3. 3.5 4.]
|
|
514
|
-
>>> # the data type of x is int32, the data type of y is float32,
|
|
515
|
-
>>> # alpha is a float, and the output is the data format of higher precision float32.
|
|
516
|
-
>>> print(output.dtype)
|
|
517
|
-
Float32
|
|
518
|
-
"""
|
|
519
|
-
return ops.auto_generate.add_ext(input, other, alpha)
|
|
520
|
-
|
|
521
|
-
|
|
522
493
|
def any(input, dim=None, keepdim=False):
|
|
523
494
|
r"""
|
|
524
|
-
|
|
525
|
-
reduce a dimension of `input` along the `dim`. Determine whether the dimensions of the output and input are the
|
|
526
|
-
same by controlling `keepdim`.
|
|
527
|
-
|
|
528
|
-
Note:
|
|
529
|
-
The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
495
|
+
Tests if any element in `input` evaluates to `True` along the given axes.
|
|
530
496
|
|
|
531
497
|
Args:
|
|
532
|
-
input (Tensor):
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
Default: ``None`` , all dimensions are reduced.
|
|
537
|
-
keepdim (bool, optional): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
538
|
-
If ``False`` , don't keep these dimensions. Default : ``False`` .
|
|
498
|
+
input (Tensor): The input tensor.
|
|
499
|
+
dim (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce. If ``None`` ,
|
|
500
|
+
all dimensions are reduced. Default ``None`` .
|
|
501
|
+
keepdim (bool, optional): Whether the output tensor has dim retained or not. Default ``False`` .
|
|
539
502
|
|
|
540
503
|
Returns:
|
|
541
|
-
Tensor
|
|
542
|
-
|
|
543
|
-
- If `dim` is ``None`` , and `keepdim` is ``False`` ,
|
|
544
|
-
the output is a 0-D Tensor representing the "logical OR" of all elements in the input Tensor.
|
|
545
|
-
- If `dim` is int, such as 2, and `keepdim` is ``False`` ,
|
|
546
|
-
the shape of output is :math:`(input_1, input_3, ..., input_R)`.
|
|
547
|
-
- If `dim` is tuple(int) or list(int), such as (2, 3), and `keepdim` is ``False`` ,
|
|
548
|
-
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
549
|
-
- If `dim` is 1-D Tensor, such as [2, 3], and `keepdim` is ``False`` ,
|
|
550
|
-
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
551
|
-
|
|
552
|
-
Raises:
|
|
553
|
-
TypeError: If `keepdim` is not a bool.
|
|
554
|
-
TypeError: If `input` is not a Tensor.
|
|
555
|
-
TypeError: If `dim` is not one of the following: int, tuple, list or Tensor.
|
|
504
|
+
Tensor
|
|
556
505
|
|
|
557
506
|
Supported Platforms:
|
|
558
507
|
``Ascend`` ``GPU`` ``CPU``
|
|
559
508
|
|
|
560
509
|
Examples:
|
|
561
|
-
>>> import
|
|
562
|
-
>>>
|
|
563
|
-
>>>
|
|
564
|
-
>>> # case 1:
|
|
565
|
-
>>>
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
>>>
|
|
569
|
-
(
|
|
570
|
-
|
|
571
|
-
>>>
|
|
572
|
-
>>>
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
>>>
|
|
576
|
-
>>>
|
|
577
|
-
[ True
|
|
510
|
+
>>> import mindspore
|
|
511
|
+
>>> input = mindspore.tensor([[True, False], [True, True]])
|
|
512
|
+
>>>
|
|
513
|
+
>>> # case 1: By default, mindspore.mint.any tests along all the axes.
|
|
514
|
+
>>> mindspore.mint.any(input)
|
|
515
|
+
Tensor(shape=[], dtype=Bool, value= True)
|
|
516
|
+
>>>
|
|
517
|
+
>>> # case 2: Reduces a dimension along dim 1, with keepdim False.
|
|
518
|
+
>>> mindspore.mint.any(input, dim=1)
|
|
519
|
+
Tensor(shape=[2], dtype=Bool, value= [ True, True])
|
|
520
|
+
>>>
|
|
521
|
+
>>> # case 3: Reduces a dimension along dim (0, 1), with keepdim False.
|
|
522
|
+
>>> mindspore.mint.any(input, dim=(0,1))
|
|
523
|
+
Tensor(shape=[], dtype=Bool, value= True)
|
|
524
|
+
>>>
|
|
525
|
+
>>> # case 4: Reduces a dimension along dim [0, 1], with keepdim True.
|
|
526
|
+
>>> mindspore.mint.any(input, dim=[0,1], keepdim=True)
|
|
527
|
+
Tensor(shape=[1, 1], dtype=Bool, value=
|
|
528
|
+
[[ True]])
|
|
578
529
|
"""
|
|
579
530
|
return ops.functional.any(input, dim, keepdim)
|
|
580
531
|
|
|
@@ -583,78 +534,57 @@ def all(input, dim=None, keepdim=False):
|
|
|
583
534
|
r"""
|
|
584
535
|
all(input) -> Tensor
|
|
585
536
|
|
|
586
|
-
|
|
537
|
+
Tests if all element in `input` evaluates to `True`.
|
|
587
538
|
|
|
588
539
|
Args:
|
|
589
|
-
input (Tensor):
|
|
590
|
-
any number of additional dimensions.
|
|
540
|
+
input (Tensor): The input Tensor.
|
|
591
541
|
|
|
592
542
|
Returns:
|
|
593
|
-
Tensor
|
|
594
|
-
|
|
595
|
-
Raises:
|
|
596
|
-
TypeError: If `input` is not a Tensor.
|
|
543
|
+
Tensor
|
|
597
544
|
|
|
598
545
|
Supported Platforms:
|
|
599
546
|
``Ascend`` ``GPU`` ``CPU``
|
|
600
547
|
|
|
601
548
|
Examples:
|
|
602
|
-
>>> import
|
|
603
|
-
>>>
|
|
604
|
-
>>>
|
|
605
|
-
>>> # case 1: Reduces a dimension by the "logicalAND" of all elements in the dimension.
|
|
606
|
-
>>> output = mint.all(x)
|
|
549
|
+
>>> import mindspore
|
|
550
|
+
>>> input = mindspore.tensor([[True, False], [True, True]])
|
|
551
|
+
>>> output = mindspore.mint.all(input)
|
|
607
552
|
>>> print(output)
|
|
608
553
|
False
|
|
609
554
|
|
|
610
555
|
.. function:: all(input, dim, keepdim=False) -> Tensor
|
|
611
556
|
:noindex:
|
|
612
557
|
|
|
613
|
-
|
|
614
|
-
reduce a dimension of `input` along the `dim`. Determine whether the dimensions of the output and input are the
|
|
615
|
-
same by controlling `keepdim`.
|
|
616
|
-
|
|
617
|
-
Note:
|
|
618
|
-
The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
558
|
+
Tests if all element in `input` evaluates to `True` along the given axes.
|
|
619
559
|
|
|
620
560
|
Args:
|
|
621
|
-
input (Tensor):
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
keepdim (bool, optional): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
626
|
-
If ``False`` , don't keep these dimensions. Default : ``False`` .
|
|
561
|
+
input (Tensor): The input tensor.
|
|
562
|
+
dim (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. If ``None`` ,
|
|
563
|
+
all dimensions are reduced. Default ``None`` .
|
|
564
|
+
keepdim (bool, optional): Whether the output tensor has dim retained or not. Default ``False`` .
|
|
627
565
|
|
|
628
566
|
Returns:
|
|
629
|
-
Tensor
|
|
630
|
-
|
|
631
|
-
- If `dim` is int, such as 2, and `keepdim` is ``False`` ,
|
|
632
|
-
the shape of output is :math:`(input_1, input_3, ..., input_R)`.
|
|
633
|
-
- If `dim` is tuple(int) or list(int), such as (2, 3), and `keepdim` is ``False`` ,
|
|
634
|
-
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
635
|
-
- If `dim` is 1-D Tensor, such as [2, 3], and `keepdim` is ``False`` ,
|
|
636
|
-
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
637
|
-
|
|
638
|
-
Raises:
|
|
639
|
-
TypeError: If `keepdim` is not a bool.
|
|
640
|
-
TypeError: If `input` is not a Tensor.
|
|
641
|
-
TypeError: If `dim` is not one of the following: int, tuple, list or Tensor.
|
|
567
|
+
Tensor
|
|
642
568
|
|
|
643
569
|
Supported Platforms:
|
|
644
570
|
``Ascend`` ``GPU`` ``CPU``
|
|
645
571
|
|
|
646
572
|
Examples:
|
|
647
|
-
>>> import
|
|
648
|
-
>>>
|
|
649
|
-
>>>
|
|
650
|
-
>>> # case 1: Reduces a dimension along
|
|
651
|
-
>>>
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
>>> # case 2: Reduces a dimension along
|
|
655
|
-
>>>
|
|
656
|
-
|
|
657
|
-
|
|
573
|
+
>>> import mindspore
|
|
574
|
+
>>> input = mindspore.tensor([[True, False], [True, True]])
|
|
575
|
+
>>>
|
|
576
|
+
>>> # case 1: Reduces a dimension along dim 1, with keepdim False.
|
|
577
|
+
>>> mindspore.mint.all(input, dim=1)
|
|
578
|
+
Tensor(shape=[2], dtype=Bool, value= [False, True])
|
|
579
|
+
>>>
|
|
580
|
+
>>> # case 2: Reduces a dimension along dim (0, 1), with keepdim False.
|
|
581
|
+
>>> mindspore.mint.all(input, dim=(0,1))
|
|
582
|
+
Tensor(shape=[], dtype=Bool, value= False)
|
|
583
|
+
>>>
|
|
584
|
+
>>> # case 3: Reduces a dimension along dim [0, 1], with keepdim True.
|
|
585
|
+
>>> mindspore.mint.all(input, dim=[0,1], keepdim=True)
|
|
586
|
+
Tensor(shape=[1, 1], dtype=Bool, value=
|
|
587
|
+
[[False]])
|
|
658
588
|
"""
|
|
659
589
|
return ops.function.math_func.all(input, dim, keepdim)
|
|
660
590
|
|
|
@@ -847,487 +777,6 @@ def softmax(input, dim, *, dtype=None):
|
|
|
847
777
|
return softmax_ext(input, dim, dtype)
|
|
848
778
|
|
|
849
779
|
|
|
850
|
-
def _einsum_convert_sublist_to_label(num, ell_num=False):
|
|
851
|
-
"""Convert sublist to label."""
|
|
852
|
-
if num == Ellipsis or ell_num and num == 52:
|
|
853
|
-
return '...'
|
|
854
|
-
if 0 <= num < 26:
|
|
855
|
-
return chr(num + ord('A'))
|
|
856
|
-
if 26 <= num < 52:
|
|
857
|
-
return chr(num + ord('a') - 26)
|
|
858
|
-
raise ValueError(
|
|
859
|
-
f'For einsum, the number in sublist must be in range [0, 52), but got {num}')
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
def _einsum_convert_label_to_index(label):
|
|
863
|
-
"""Convert label to index."""
|
|
864
|
-
label_num = ord(label)
|
|
865
|
-
if ord('A') <= label_num <= ord('Z'):
|
|
866
|
-
return label_num - ord('A')
|
|
867
|
-
if ord('a') <= label_num <= ord('z'):
|
|
868
|
-
return label_num - ord('a') + 26
|
|
869
|
-
if label_num == ord('.'):
|
|
870
|
-
return 52
|
|
871
|
-
raise ValueError(
|
|
872
|
-
f'For einsum, the label in equation must be in [a-zA-Z] or ., but got {label}')
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
def _einsum_convert_sublist(equation, *operands):
|
|
876
|
-
"""Convert the sublist to an equation operand if the received input is a sublist format."""
|
|
877
|
-
if isinstance(equation, Tensor):
|
|
878
|
-
equation_tmp = ''
|
|
879
|
-
for i, lst in enumerate(operands):
|
|
880
|
-
if i % 2 == 0:
|
|
881
|
-
for _, num in enumerate(lst):
|
|
882
|
-
equation_tmp += _einsum_convert_sublist_to_label(num)
|
|
883
|
-
if i in (len(operands) - 1, len(operands) - 2):
|
|
884
|
-
continue
|
|
885
|
-
equation_tmp += ','
|
|
886
|
-
if len(operands) % 2 == 0:
|
|
887
|
-
equation_tmp += '->'
|
|
888
|
-
for _, num in enumerate(operands[-1]):
|
|
889
|
-
equation_tmp += _einsum_convert_sublist_to_label(num)
|
|
890
|
-
operands_tmp = list([equation]) + list(operands[1:-1:2])
|
|
891
|
-
else:
|
|
892
|
-
operands_tmp = list([equation]) + list(operands[1::2])
|
|
893
|
-
equation = equation_tmp
|
|
894
|
-
operands = tuple(operands_tmp)
|
|
895
|
-
if len(operands) == 0: # pylint: disable=len-as-condition
|
|
896
|
-
raise ValueError(
|
|
897
|
-
"For einsum, the 'operands' must have at least one operand.")
|
|
898
|
-
return equation, operands
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
def _einsum_check_inputargs(equation, operands):
|
|
902
|
-
"""Check equation and operands."""
|
|
903
|
-
if not isinstance(equation, str):
|
|
904
|
-
raise TypeError(
|
|
905
|
-
f"For einsum, 'equation' must be a str, but got {type(equation)}.")
|
|
906
|
-
for operand in operands:
|
|
907
|
-
if not isinstance(operand, Tensor):
|
|
908
|
-
raise TypeError(
|
|
909
|
-
f"For einsum, members of 'operands' must be Tensor, but got {type(operand)}.")
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
@constexpr
|
|
913
|
-
def _einsum_parse_equation(equation):
|
|
914
|
-
"""Parse equation."""
|
|
915
|
-
l_equation = ''
|
|
916
|
-
r_equation = ''
|
|
917
|
-
equation = equation.replace(' ', '')
|
|
918
|
-
|
|
919
|
-
if '->' in equation:
|
|
920
|
-
l_equation, r_equation = equation.split('->', 1)
|
|
921
|
-
if l_equation == '':
|
|
922
|
-
raise ValueError(
|
|
923
|
-
'For einsum, equation must contain characters to the left fo the arrow.')
|
|
924
|
-
else:
|
|
925
|
-
l_equation = equation
|
|
926
|
-
|
|
927
|
-
if ',' in l_equation:
|
|
928
|
-
l_equationlst = l_equation.split(",")
|
|
929
|
-
else:
|
|
930
|
-
l_equationlst = [l_equation]
|
|
931
|
-
|
|
932
|
-
l_equationlst = []
|
|
933
|
-
|
|
934
|
-
for subequation in l_equation.split(','):
|
|
935
|
-
if '.' in subequation and ('...' not in subequation or subequation.count('.') != 3):
|
|
936
|
-
raise ValueError(f"For einsum, an ellipsis in the equation must include three continuous \'.\', "
|
|
937
|
-
f"and can only be found once.")
|
|
938
|
-
subequation_lst = [_einsum_convert_label_to_index(
|
|
939
|
-
label) for label in subequation.replace('...', '.')]
|
|
940
|
-
l_equationlst.append(subequation_lst)
|
|
941
|
-
|
|
942
|
-
if "." in r_equation and ('...' not in r_equation or r_equation.count('.') != 3):
|
|
943
|
-
raise ValueError(f"For einsum, an ellipsis in the equation must include three continuous \'.\', "
|
|
944
|
-
f"and can only be found once.")
|
|
945
|
-
r_equationlst = [_einsum_convert_label_to_index(
|
|
946
|
-
label) for label in r_equation.replace('...', '.')]
|
|
947
|
-
|
|
948
|
-
return l_equationlst, r_equationlst, ('->' in equation)
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
def _einsum_parse_labels(l_equationlst, operands):
|
|
952
|
-
"""Parse left script of equation."""
|
|
953
|
-
align_rank = 0
|
|
954
|
-
max_labels = 53
|
|
955
|
-
ellipsis_dimnum = 0
|
|
956
|
-
labels_count = [0] * max_labels
|
|
957
|
-
|
|
958
|
-
if len(operands) != len(l_equationlst):
|
|
959
|
-
raise ValueError(f"For einsum, 'operands' is not equal to specified in the 'equation', "
|
|
960
|
-
f"but got {len(operands)} and {len(l_equationlst)}.")
|
|
961
|
-
|
|
962
|
-
for idx, sub_equ in enumerate(l_equationlst):
|
|
963
|
-
start_dim = 0
|
|
964
|
-
label_num = 0
|
|
965
|
-
operand_shape = list(operands[idx].shape)
|
|
966
|
-
for label in sub_equ:
|
|
967
|
-
dim_num = 1
|
|
968
|
-
label_num += 1
|
|
969
|
-
end_dim = start_dim + 1
|
|
970
|
-
|
|
971
|
-
# Label is ellipsis
|
|
972
|
-
if label == 52:
|
|
973
|
-
end_dim = len(operand_shape) - len(sub_equ) + label_num
|
|
974
|
-
dim_num = end_dim - start_dim
|
|
975
|
-
if ellipsis_dimnum != 0 and ellipsis_dimnum != dim_num:
|
|
976
|
-
raise ValueError(f"For einsum, an ellipsis in 'equation' can only represent the same numbers of "
|
|
977
|
-
f"dimensions in 'operands'.")
|
|
978
|
-
ellipsis_dimnum = dim_num
|
|
979
|
-
if labels_count[label] == 0:
|
|
980
|
-
align_rank += dim_num
|
|
981
|
-
labels_count[label] += 1
|
|
982
|
-
start_dim += dim_num
|
|
983
|
-
if label_num != len(sub_equ) or start_dim != len(operand_shape):
|
|
984
|
-
raise ValueError(f"For einsum, the numbers of labels specified in the 'equation' does not match "
|
|
985
|
-
f"'operands[{idx}]'.")
|
|
986
|
-
return ellipsis_dimnum, labels_count, align_rank
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
def _einsum_infer_output(r_equationlst, arrow_exist, ellipsis_dimnum, labels_count):
|
|
990
|
-
"""Parse right script of equation and infer output shape."""
|
|
991
|
-
idx = 0
|
|
992
|
-
idle_idx = -1
|
|
993
|
-
output_rank = 0
|
|
994
|
-
labels_perm_idx = [idle_idx] * 53
|
|
995
|
-
|
|
996
|
-
if arrow_exist:
|
|
997
|
-
for label in r_equationlst:
|
|
998
|
-
if labels_count[label] != 0:
|
|
999
|
-
if labels_perm_idx[label] != idle_idx:
|
|
1000
|
-
raise ValueError(f"For einsum, '{_einsum_convert_sublist_to_label(label, True)}' or {label} in "
|
|
1001
|
-
f"sublist format has appears more than once in output subscript.")
|
|
1002
|
-
dimnum = 1
|
|
1003
|
-
if label == 52:
|
|
1004
|
-
dimnum = ellipsis_dimnum
|
|
1005
|
-
labels_perm_idx[label] = idx
|
|
1006
|
-
output_rank += dimnum
|
|
1007
|
-
idx += dimnum
|
|
1008
|
-
else:
|
|
1009
|
-
raise ValueError(f"For einsum, the label to the right of arrow in the 'equation' must appear on "
|
|
1010
|
-
f"left, but '{_einsum_convert_sublist_to_label(label, True)}' does not.")
|
|
1011
|
-
else:
|
|
1012
|
-
if labels_count[52] != 0:
|
|
1013
|
-
output_rank += ellipsis_dimnum
|
|
1014
|
-
labels_perm_idx[52] = idx
|
|
1015
|
-
idx += ellipsis_dimnum
|
|
1016
|
-
for label, count in enumerate(labels_count):
|
|
1017
|
-
if count == 1:
|
|
1018
|
-
output_rank += 1
|
|
1019
|
-
labels_perm_idx[label] = idx
|
|
1020
|
-
idx += 1
|
|
1021
|
-
|
|
1022
|
-
for label, count in enumerate(labels_count):
|
|
1023
|
-
if count != 0 and labels_perm_idx[label] == idle_idx:
|
|
1024
|
-
labels_perm_idx[label] = idx
|
|
1025
|
-
idx += 1
|
|
1026
|
-
|
|
1027
|
-
return output_rank, labels_perm_idx
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
def _einsum_adjust_operands(operands, l_equationlst, ellipsis_dimnum, labels_perm_idx, align_rank):
|
|
1031
|
-
"""Align operands to output as possible."""
|
|
1032
|
-
# Unsqueeze miss dimensions to make all operands has same rank, compute diagonal if operand has same label.
|
|
1033
|
-
# Then use _labels_perm_idx to transpose all operands to align dimensions with output.
|
|
1034
|
-
adjust_operands = []
|
|
1035
|
-
for idx, operand in enumerate(operands):
|
|
1036
|
-
idle_dim = -1
|
|
1037
|
-
align_axis = [idle_dim] * align_rank
|
|
1038
|
-
label_dims = [idle_dim] * 53
|
|
1039
|
-
dim = 0
|
|
1040
|
-
|
|
1041
|
-
for label in l_equationlst[idx]:
|
|
1042
|
-
if label_dims[label] != idle_dim:
|
|
1043
|
-
operand = ops.diagonal(operand, 0, label_dims[label], dim)
|
|
1044
|
-
diag_perm = []
|
|
1045
|
-
diag_dim = 0
|
|
1046
|
-
for i in range(len(operand.shape)):
|
|
1047
|
-
if i == label_dims[label]:
|
|
1048
|
-
diag_perm.append(len(operand.shape) - 1)
|
|
1049
|
-
else:
|
|
1050
|
-
diag_perm.append(diag_dim)
|
|
1051
|
-
diag_dim += 1
|
|
1052
|
-
operand = permute(operand, tuple(diag_perm))
|
|
1053
|
-
else:
|
|
1054
|
-
label_dims[label] = dim
|
|
1055
|
-
if label == 52:
|
|
1056
|
-
for ell_idx in range(ellipsis_dimnum):
|
|
1057
|
-
align_axis[labels_perm_idx[label] + ell_idx] = dim
|
|
1058
|
-
dim += 1
|
|
1059
|
-
else:
|
|
1060
|
-
align_axis[labels_perm_idx[label]] = dim
|
|
1061
|
-
dim += 1
|
|
1062
|
-
if len(operand.shape) < align_rank:
|
|
1063
|
-
for i, axis in enumerate(align_axis):
|
|
1064
|
-
if axis == idle_dim:
|
|
1065
|
-
align_axis[i] = dim
|
|
1066
|
-
dim += 1
|
|
1067
|
-
missing_dims = [1] * (align_rank - len(operand.shape))
|
|
1068
|
-
operand_shape = list(operand.shape) + missing_dims
|
|
1069
|
-
operand = reshape(operand, operand_shape)
|
|
1070
|
-
operand = permute(operand, tuple(align_axis))
|
|
1071
|
-
adjust_operands.append(operand)
|
|
1072
|
-
return adjust_operands
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
def _einsum_find_dimlastop(align_rank, operands, adjust_operands):
|
|
1076
|
-
"""Find dim last operand."""
|
|
1077
|
-
dim_last_op = [0] * align_rank
|
|
1078
|
-
has_zero_dim = False
|
|
1079
|
-
for dim in range(align_rank):
|
|
1080
|
-
broadcast_dim = adjust_operands[0].shape[dim]
|
|
1081
|
-
for idx in range(1, len(adjust_operands)):
|
|
1082
|
-
other_dim = adjust_operands[idx].shape[dim]
|
|
1083
|
-
if broadcast_dim != other_dim and broadcast_dim != 1 and other_dim != 1:
|
|
1084
|
-
err_msg = "For einsum, operands do not broadcast after align to output [shapes :origin -> adjust]:"
|
|
1085
|
-
for i in range(len(operands)):
|
|
1086
|
-
err_msg += f" {operands[i].shape} -> {adjust_operands[i].shape}"
|
|
1087
|
-
raise ValueError(err_msg)
|
|
1088
|
-
if other_dim != 1:
|
|
1089
|
-
dim_last_op[dim] = idx
|
|
1090
|
-
broadcast_dim = other_dim
|
|
1091
|
-
has_zero_dim = has_zero_dim or broadcast_dim == 0
|
|
1092
|
-
return dim_last_op, has_zero_dim
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
def _einsum_multiplication(sum_dims, l_tensor, r_tensor):
|
|
1096
|
-
"""Compute bmm for einsum."""
|
|
1097
|
-
batch_dims = []
|
|
1098
|
-
lonly_dims = []
|
|
1099
|
-
ronly_dims = []
|
|
1100
|
-
batch_size = 1
|
|
1101
|
-
lonly_size = 1
|
|
1102
|
-
ronly_size = 1
|
|
1103
|
-
sum_size = 1
|
|
1104
|
-
|
|
1105
|
-
l_shape = l_tensor.shape
|
|
1106
|
-
r_shape = r_tensor.shape
|
|
1107
|
-
|
|
1108
|
-
# Compute sum if dim is in sum_dims and get shapes for bmm
|
|
1109
|
-
for i in range(len(l_shape)):
|
|
1110
|
-
sum_l = l_shape[i] > 1
|
|
1111
|
-
sum_r = r_shape[i] > 1
|
|
1112
|
-
if i in sum_dims:
|
|
1113
|
-
if sum_l and sum_r:
|
|
1114
|
-
sum_size *= l_shape[i]
|
|
1115
|
-
elif sum_l:
|
|
1116
|
-
l_tensor = sum(l_tensor, i, True)
|
|
1117
|
-
elif sum_r:
|
|
1118
|
-
r_tensor = sum(r_tensor, i, True)
|
|
1119
|
-
elif sum_l and sum_r:
|
|
1120
|
-
batch_dims.append(i)
|
|
1121
|
-
batch_size *= l_shape[i]
|
|
1122
|
-
elif sum_l:
|
|
1123
|
-
lonly_dims.append(i)
|
|
1124
|
-
lonly_size *= l_shape[i]
|
|
1125
|
-
else:
|
|
1126
|
-
ronly_dims.append(i)
|
|
1127
|
-
ronly_size *= r_shape[i]
|
|
1128
|
-
|
|
1129
|
-
# Compute the einsum bmm operators pipeline.
|
|
1130
|
-
# The whole operators pipeline is transpose(in) -> reshape(in) -> bmm(in) -> reshape(out) -> transpose(out).
|
|
1131
|
-
l_reshape_shape = (batch_size, lonly_size, sum_size)
|
|
1132
|
-
r_reshape_shape = (batch_size, sum_size, ronly_size)
|
|
1133
|
-
|
|
1134
|
-
out_reshape_shape = [l_shape[dim] for dim in batch_dims]
|
|
1135
|
-
out_reshape_shape += [l_shape[dim] for dim in lonly_dims]
|
|
1136
|
-
out_reshape_shape += [1 for _ in sum_dims]
|
|
1137
|
-
out_reshape_shape += [r_shape[dim] for dim in ronly_dims]
|
|
1138
|
-
|
|
1139
|
-
l_perm_axis = batch_dims + lonly_dims + sum_dims + ronly_dims
|
|
1140
|
-
r_perm_axis = batch_dims + sum_dims + ronly_dims + lonly_dims
|
|
1141
|
-
out_perm_axis = [-1] * len(out_reshape_shape)
|
|
1142
|
-
|
|
1143
|
-
out_dim = 0
|
|
1144
|
-
for idx in range(len(l_perm_axis)):
|
|
1145
|
-
out_perm_axis[l_perm_axis[idx]] = out_dim
|
|
1146
|
-
out_dim += 1
|
|
1147
|
-
|
|
1148
|
-
l_tensor = permute(l_tensor, tuple(l_perm_axis))
|
|
1149
|
-
l_tensor = reshape(l_tensor, l_reshape_shape)
|
|
1150
|
-
|
|
1151
|
-
r_tensor = permute(r_tensor, tuple(r_perm_axis))
|
|
1152
|
-
r_tensor = reshape(r_tensor, r_reshape_shape)
|
|
1153
|
-
|
|
1154
|
-
output = bmm(l_tensor, r_tensor)
|
|
1155
|
-
output = reshape(output, out_reshape_shape)
|
|
1156
|
-
output = permute(output, tuple(out_perm_axis))
|
|
1157
|
-
|
|
1158
|
-
output_origin_shape = output.shape
|
|
1159
|
-
output_squeeze_shape = []
|
|
1160
|
-
for dim in range(len(output_origin_shape)):
|
|
1161
|
-
if dim not in sum_dims:
|
|
1162
|
-
output_squeeze_shape.append(output_origin_shape[dim])
|
|
1163
|
-
|
|
1164
|
-
return reshape(output, output_squeeze_shape)
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
def _einsum(equation, operands):
|
|
1168
|
-
'''Einsum main process'''
|
|
1169
|
-
_l_equationlst, _r_equationlst, _arrow_exist = _einsum_parse_equation(
|
|
1170
|
-
equation)
|
|
1171
|
-
_ellipsis_dimnum, _labels_count, _align_rank = _einsum_parse_labels(
|
|
1172
|
-
_l_equationlst, operands)
|
|
1173
|
-
_output_rank, _labels_perm_idx = _einsum_infer_output(
|
|
1174
|
-
_r_equationlst, _arrow_exist, _ellipsis_dimnum, _labels_count)
|
|
1175
|
-
_adjust_operands = _einsum_adjust_operands(operands, _l_equationlst, _ellipsis_dimnum, _labels_perm_idx,
|
|
1176
|
-
_align_rank)
|
|
1177
|
-
_dim_last_op, _has_zero_dim = _einsum_find_dimlastop(
|
|
1178
|
-
_align_rank, operands, _adjust_operands)
|
|
1179
|
-
_result = _adjust_operands[0]
|
|
1180
|
-
|
|
1181
|
-
# Fast path if operands has zero dim.
|
|
1182
|
-
if _has_zero_dim:
|
|
1183
|
-
output_shape = []
|
|
1184
|
-
for dim in range(_output_rank):
|
|
1185
|
-
output_shape.append(_adjust_operands[_dim_last_op[dim]].shape[dim])
|
|
1186
|
-
return zeros(output_shape, dtype=_result.dtype)
|
|
1187
|
-
|
|
1188
|
-
# Sum or squeeze dimensions that is 1 for all rest operands.
|
|
1189
|
-
_reduce_dim = _output_rank
|
|
1190
|
-
for dim in range(_output_rank, _align_rank):
|
|
1191
|
-
if _dim_last_op[dim] == 0:
|
|
1192
|
-
if _result.shape[_reduce_dim] == 1:
|
|
1193
|
-
_result = squeeze(_result, _reduce_dim)
|
|
1194
|
-
else:
|
|
1195
|
-
_result = sum(_result, _reduce_dim)
|
|
1196
|
-
else:
|
|
1197
|
-
_reduce_dim += 1
|
|
1198
|
-
|
|
1199
|
-
# Compute multiplication if operands are more than two.
|
|
1200
|
-
for i in range(1, len(_adjust_operands)):
|
|
1201
|
-
operand = _adjust_operands[i]
|
|
1202
|
-
dim = _output_rank
|
|
1203
|
-
sum_dims = []
|
|
1204
|
-
for j in range(_output_rank, _align_rank):
|
|
1205
|
-
if _dim_last_op[j] < i:
|
|
1206
|
-
operand = squeeze(operand, dim)
|
|
1207
|
-
elif _dim_last_op[j] == i:
|
|
1208
|
-
if _result.shape[dim] == 1:
|
|
1209
|
-
operand = sum(operand, dim)
|
|
1210
|
-
_result = squeeze(_result, dim)
|
|
1211
|
-
else:
|
|
1212
|
-
sum_dims.append(dim)
|
|
1213
|
-
dim += 1
|
|
1214
|
-
else:
|
|
1215
|
-
dim += 1
|
|
1216
|
-
|
|
1217
|
-
if sum_dims == []:
|
|
1218
|
-
_result = mul(_result, operand)
|
|
1219
|
-
elif len(sum_dims) == len(_result.shape):
|
|
1220
|
-
_result = ops.auto_generate.dot(flatten(_result), flatten(operand))
|
|
1221
|
-
else:
|
|
1222
|
-
_result = _einsum_multiplication(sum_dims, _result, operand)
|
|
1223
|
-
|
|
1224
|
-
return _result
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
def einsum(equation, *operands):
|
|
1228
|
-
r"""
|
|
1229
|
-
According to the Einstein summation Convention (Einsum),
|
|
1230
|
-
the product of the input tensor elements is summed along the specified dimension.
|
|
1231
|
-
You can use this operator to perform diagonal, reducesum, transpose, matmul, mul, inner product operations, etc.
|
|
1232
|
-
|
|
1233
|
-
Note:
|
|
1234
|
-
The sublist format is also supported. For example, mint.einsum(op1, sublist1, op2, sublist2, ..., sublist_out).
|
|
1235
|
-
In this format, equation can be derived by the sublists which are made up of Python's Ellipsis and list of
|
|
1236
|
-
integers in [0, 52). Each operand is followed by a sublist and an output sublist is at the end.
|
|
1237
|
-
Dynamic shape, dynamic rank input is not supported in `graph mode (mode=mindspore.GRAPH_MODE)
|
|
1238
|
-
<https://www.mindspore.cn/docs/en/master/model_train/program_form/static_graph.html>`_.
|
|
1239
|
-
|
|
1240
|
-
.. warning::
|
|
1241
|
-
This is an experimental API that is subject to change or deletion.
|
|
1242
|
-
|
|
1243
|
-
Args:
|
|
1244
|
-
equation (str): Notation based on the Einstein summation convention, represent the operation you want to do.
|
|
1245
|
-
the value can contain only letters, commas, ellipsis and arrow. The letters(must be in [a-zA-Z]) represent
|
|
1246
|
-
input tensor dimension, commas(,) represent separate tensors, ellipsis indicates the tensor dimension that
|
|
1247
|
-
you do not care about, the left of the arrow indicates the input tensors, and the right of it indicates the
|
|
1248
|
-
desired output dimension. If there are no arrows in the equation, the letters that appear exactly once in
|
|
1249
|
-
the equation will be part of the output, sorted in increasing alphabetical order. The output is computed by
|
|
1250
|
-
multiplying the input operands element-wise, with their dimensions aligned based on the letters, and then
|
|
1251
|
-
summing out the dimensions whose letters are not part of the output. If there is one arrow in the equation,
|
|
1252
|
-
the output letters must appear at least once for some input operand and at most once for the output.
|
|
1253
|
-
operands (Tensor): Input tensor used for calculation. The dtype of the tensor must be the same.
|
|
1254
|
-
|
|
1255
|
-
Returns:
|
|
1256
|
-
Tensor, the shape of it can be obtained from the `equation` , and the dtype is the same as input tensors.
|
|
1257
|
-
|
|
1258
|
-
Raises:
|
|
1259
|
-
TypeError: If `equation` is invalid, or the `equation` does not match the input tensor.
|
|
1260
|
-
ValueError: If the number in sublist is not in [0, 52) in sublist format.
|
|
1261
|
-
|
|
1262
|
-
Supported Platforms:
|
|
1263
|
-
``Ascend``
|
|
1264
|
-
|
|
1265
|
-
Examples:
|
|
1266
|
-
>>> import mindspore
|
|
1267
|
-
>>> import numpy as np
|
|
1268
|
-
>>> from mindspore import Tensor, mint
|
|
1269
|
-
>>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
|
|
1270
|
-
>>> equation = "i->"
|
|
1271
|
-
>>> output = mint.einsum(equation, x)
|
|
1272
|
-
>>> print(output)
|
|
1273
|
-
[7.]
|
|
1274
|
-
>>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
|
|
1275
|
-
>>> y = Tensor(np.array([2.0, 4.0, 3.0]), mindspore.float32)
|
|
1276
|
-
>>> equation = "i,i->i"
|
|
1277
|
-
>>> output = mint.einsum(equation, x, y)
|
|
1278
|
-
>>> print(output)
|
|
1279
|
-
[ 2. 8. 12.]
|
|
1280
|
-
>>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
|
|
1281
|
-
>>> y = Tensor(np.array([[2.0, 3.0], [1.0, 2.0], [4.0, 5.0]]), mindspore.float32)
|
|
1282
|
-
>>> equation = "ij,jk->ik"
|
|
1283
|
-
>>> output = mint.einsum(equation, x, y)
|
|
1284
|
-
>>> print(output)
|
|
1285
|
-
[[16. 22.]
|
|
1286
|
-
[37. 52.]]
|
|
1287
|
-
>>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
|
|
1288
|
-
>>> equation = "ij->ji"
|
|
1289
|
-
>>> output = mint.einsum(equation, x)
|
|
1290
|
-
>>> print(output)
|
|
1291
|
-
[[1. 4.]
|
|
1292
|
-
[2. 5.]
|
|
1293
|
-
[3. 6.]]
|
|
1294
|
-
>>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
|
|
1295
|
-
>>> equation = "ij->j"
|
|
1296
|
-
>>> output = mint.einsum(equation, x)
|
|
1297
|
-
>>> print(output)
|
|
1298
|
-
[5. 7. 9.]
|
|
1299
|
-
>>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
|
|
1300
|
-
>>> equation = "...->"
|
|
1301
|
-
>>> output = mint.einsum(equation, x)
|
|
1302
|
-
>>> print(output)
|
|
1303
|
-
[21.]
|
|
1304
|
-
>>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
|
|
1305
|
-
>>> y = Tensor(np.array([2.0, 4.0, 1.0]), mindspore.float32)
|
|
1306
|
-
>>> equation = "j,i->ji"
|
|
1307
|
-
>>> output = mint.einsum(equation, x, y)
|
|
1308
|
-
>>> print(output)
|
|
1309
|
-
[[ 2. 4. 1.]
|
|
1310
|
-
[ 4. 8. 2.]
|
|
1311
|
-
[ 6. 12. 3.]]
|
|
1312
|
-
>>> x = mindspore.Tensor([1, 2, 3, 4], mindspore.float32)
|
|
1313
|
-
>>> y = mindspore.Tensor([1, 2], mindspore.float32)
|
|
1314
|
-
>>> output = mint.einsum(x, [..., 1], y, [..., 2], [..., 1, 2])
|
|
1315
|
-
>>> print(output)
|
|
1316
|
-
[[1. 2.]
|
|
1317
|
-
[2. 4.]
|
|
1318
|
-
[3. 6.]
|
|
1319
|
-
[4. 8.]]
|
|
1320
|
-
"""
|
|
1321
|
-
_equation, _operands = _einsum_convert_sublist(equation, *operands)
|
|
1322
|
-
_einsum_check_inputargs(_equation, _operands)
|
|
1323
|
-
|
|
1324
|
-
for operand in _operands:
|
|
1325
|
-
if ops.is_sequence_shape_unknown(operand.shape) or ops.is_sequence_value_unknown(operand.shape):
|
|
1326
|
-
raise ValueError(f"For einsum, the element of 'operands' can't be dynamic shape or dynamic rank.")
|
|
1327
|
-
|
|
1328
|
-
return _einsum(_equation, _operands)
|
|
1329
|
-
|
|
1330
|
-
|
|
1331
780
|
def equal(input, other):
|
|
1332
781
|
r"""
|
|
1333
782
|
Computes the equivalence between two tensors.
|
|
@@ -1909,64 +1358,6 @@ def squeeze(input, dim):
|
|
|
1909
1358
|
return squeeze_impl(input, dim)
|
|
1910
1359
|
|
|
1911
1360
|
|
|
1912
|
-
def sub(input, other, *, alpha=1):
|
|
1913
|
-
r"""
|
|
1914
|
-
Subtracts scaled other value from input Tensor.
|
|
1915
|
-
|
|
1916
|
-
.. math::
|
|
1917
|
-
|
|
1918
|
-
out_{i} = input_{i} - alpha \times other_{i}
|
|
1919
|
-
|
|
1920
|
-
Note:
|
|
1921
|
-
- When the two inputs have different shapes,
|
|
1922
|
-
they must be able to broadcast to a common shape.
|
|
1923
|
-
- The two inputs and alpha comply with the implicit type conversion rules to make the data types
|
|
1924
|
-
consistent.
|
|
1925
|
-
|
|
1926
|
-
Args:
|
|
1927
|
-
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
1928
|
-
a bool or a tensor whose data type is
|
|
1929
|
-
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
1930
|
-
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
1931
|
-
other (Union[Tensor, number.Number, bool]): The second input, is a number.Number or
|
|
1932
|
-
a bool or a tensor whose data type is
|
|
1933
|
-
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
1934
|
-
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
1935
|
-
|
|
1936
|
-
Keyword Args:
|
|
1937
|
-
alpha (number.Number, optional): A scaling factor applied to `other`, default ``1``.
|
|
1938
|
-
|
|
1939
|
-
Returns:
|
|
1940
|
-
Tensor with a shape that is the same as the broadcasted shape of the input `input` and `other`,
|
|
1941
|
-
and the data type is the one with higher precision or higher digits among the two inputs and alpha.
|
|
1942
|
-
|
|
1943
|
-
Raises:
|
|
1944
|
-
TypeError: If the type of `input`, `other`, or `alpha` is not one of the following: Tensor, number.Number, bool.
|
|
1945
|
-
TypeError: If `alpha` is of type float but `input` and `other` are not of type float.
|
|
1946
|
-
TypeError: If `alpha` is of type bool but `input` and `other` are not of type bool.
|
|
1947
|
-
|
|
1948
|
-
Supported Platforms:
|
|
1949
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1950
|
-
|
|
1951
|
-
Examples:
|
|
1952
|
-
>>> import numpy as np
|
|
1953
|
-
>>> import mindspore
|
|
1954
|
-
>>> from mindspore import Tensor
|
|
1955
|
-
>>> from mindspore import mint
|
|
1956
|
-
>>> x = Tensor(np.array([4, 5, 6]).astype(np.float32))
|
|
1957
|
-
>>> y = Tensor(1, mindspore.int32)
|
|
1958
|
-
>>> alpha = 0.5
|
|
1959
|
-
>>> output = mint.sub(x, y, alpha=alpha)
|
|
1960
|
-
>>> print(output)
|
|
1961
|
-
[3.5 4.5 5.5]
|
|
1962
|
-
>>> # the data type of x is float32, the data type of y is int32,
|
|
1963
|
-
>>> # alpha is a float, and the output is the data format of higher precision float32.
|
|
1964
|
-
>>> print(output.dtype)
|
|
1965
|
-
Float32
|
|
1966
|
-
"""
|
|
1967
|
-
return ops.auto_generate.sub_ext(input, other, alpha)
|
|
1968
|
-
|
|
1969
|
-
|
|
1970
1361
|
def swapaxes(input, axis0, axis1):
|
|
1971
1362
|
'''
|
|
1972
1363
|
Alias for :func:`mindspore.mint.transpose` . The `input` corresponds to the `input` in the reference interface,
|
|
@@ -2008,7 +1399,7 @@ def unique_consecutive(input, return_inverse=False, return_counts=False, dim=Non
|
|
|
2008
1399
|
maps to the position in the output. Default: ``False`` .
|
|
2009
1400
|
return_counts (bool, optional): Whether to return the counts of each unique element. Default: ``False`` .
|
|
2010
1401
|
dim (int, optional): The dimension to apply unique. If ``None`` , the unique of the flattened input is
|
|
2011
|
-
returned. If specified, it must be int32 or int64. Default: ``None`` .
|
|
1402
|
+
returned. If the dimension is specified, it must be int32 or int64. Default: ``None`` .
|
|
2012
1403
|
|
|
2013
1404
|
Returns:
|
|
2014
1405
|
A tensor or a tuple of tensors containing tensor objects (`output`, `inverse_indices`, `counts`).
|
|
@@ -2175,8 +1566,6 @@ def cdist(x1, x2, p=2.0, compute_mode='use_mm_for_euclid_dist_if_necessary'):
|
|
|
2175
1566
|
|
|
2176
1567
|
Note:
|
|
2177
1568
|
On Ascend, the supported dtypes are float16 and float32.
|
|
2178
|
-
On CPU, the supported dtypes are float16 and float32.
|
|
2179
|
-
On GPU, the supported dtypes are float32 and float64.
|
|
2180
1569
|
|
|
2181
1570
|
Args:
|
|
2182
1571
|
x1 (Tensor): Input tensor of shape :math:`(B, P, M)`.
|
|
@@ -2351,7 +1740,7 @@ __all__ = [
|
|
|
2351
1740
|
# 51
|
|
2352
1741
|
'permute',
|
|
2353
1742
|
# 52
|
|
2354
|
-
|
|
1743
|
+
'addcdiv',
|
|
2355
1744
|
# 53
|
|
2356
1745
|
|
|
2357
1746
|
# 54
|
|
@@ -2536,7 +1925,8 @@ __all__ = [
|
|
|
2536
1925
|
|
|
2537
1926
|
# 182
|
|
2538
1927
|
'bernoulli',
|
|
2539
|
-
|
|
1928
|
+
# 201
|
|
1929
|
+
'diag',
|
|
2540
1930
|
# 207
|
|
2541
1931
|
'expm1',
|
|
2542
1932
|
# 204
|
|
@@ -2620,11 +2010,15 @@ __all__ = [
|
|
|
2620
2010
|
# 538
|
|
2621
2011
|
'histc',
|
|
2622
2012
|
|
|
2013
|
+
# 549
|
|
2014
|
+
'kthvalue',
|
|
2015
|
+
|
|
2623
2016
|
# 552
|
|
2624
2017
|
'log10',
|
|
2625
2018
|
|
|
2626
2019
|
# 553
|
|
2627
2020
|
'logaddexp',
|
|
2021
|
+
'logaddexp2',
|
|
2628
2022
|
|
|
2629
2023
|
# 557
|
|
2630
2024
|
'logsumexp',
|
|
@@ -2632,6 +2026,9 @@ __all__ = [
|
|
|
2632
2026
|
# 582
|
|
2633
2027
|
'std_mean',
|
|
2634
2028
|
|
|
2029
|
+
# 584
|
|
2030
|
+
'take',
|
|
2031
|
+
|
|
2635
2032
|
# 588
|
|
2636
2033
|
'var_mean',
|
|
2637
2034
|
|
|
@@ -2644,6 +2041,9 @@ __all__ = [
|
|
|
2644
2041
|
# 613
|
|
2645
2042
|
'nansum',
|
|
2646
2043
|
|
|
2044
|
+
# 615
|
|
2045
|
+
'triangular_solve',
|
|
2046
|
+
|
|
2647
2047
|
# 664
|
|
2648
2048
|
'meshgrid',
|
|
2649
2049
|
|
|
@@ -2656,12 +2056,43 @@ __all__ = [
|
|
|
2656
2056
|
# 708
|
|
2657
2057
|
'std',
|
|
2658
2058
|
|
|
2059
|
+
# 739
|
|
2060
|
+
'hstack',
|
|
2061
|
+
|
|
2062
|
+
# 826
|
|
2063
|
+
'floor_divide',
|
|
2064
|
+
|
|
2659
2065
|
# 887
|
|
2660
2066
|
'log2',
|
|
2661
2067
|
|
|
2662
2068
|
# 889
|
|
2663
2069
|
'isnan',
|
|
2664
2070
|
|
|
2071
|
+
# 890
|
|
2072
|
+
|
|
2073
|
+
# 891
|
|
2074
|
+
|
|
2075
|
+
# 892
|
|
2076
|
+
|
|
2077
|
+
# 893
|
|
2078
|
+
|
|
2079
|
+
# 894
|
|
2080
|
+
|
|
2081
|
+
# 895
|
|
2082
|
+
|
|
2083
|
+
# 896
|
|
2084
|
+
|
|
2085
|
+
# 897
|
|
2086
|
+
|
|
2087
|
+
# 898
|
|
2088
|
+
|
|
2089
|
+
# 899
|
|
2090
|
+
|
|
2091
|
+
# 900
|
|
2092
|
+
|
|
2093
|
+
# 916
|
|
2094
|
+
'index_add',
|
|
2095
|
+
|
|
2665
2096
|
# 1007
|
|
2666
2097
|
't',
|
|
2667
2098
|
|