mindspore 2.5.0__cp311-cp311-win_amd64.whl → 2.6.0__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +6 -4
- mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -0
- mindspore/_checkparam.py +3 -33
- mindspore/_deprecated/__init__.py +17 -0
- mindspore/_deprecated/jit.py +198 -0
- mindspore/_extends/builtin_operations.py +1 -1
- mindspore/_extends/parse/__init__.py +6 -7
- mindspore/_extends/parse/compile_config.py +19 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
- mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
- mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
- mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
- mindspore/_extends/parse/parser.py +25 -194
- mindspore/_extends/parse/resources.py +1 -5
- mindspore/_extends/parse/standard_method.py +109 -75
- mindspore/_extends/pijit/__init__.py +2 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
- mindspore/_extends/pijit/tensor_func_list.py +27 -0
- mindspore/_extends/utils.py +1 -1
- mindspore/amp.py +4 -4
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/__init__.py +2 -2
- mindspore/boost/base.py +3 -7
- mindspore/boost/boost_cell_wrapper.py +2 -2
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +4 -3
- mindspore/common/_grad_function.py +56 -0
- mindspore/common/_pijit_context.py +14 -5
- mindspore/common/_register_for_tensor.py +1 -1
- mindspore/common/_stub_tensor.py +5 -10
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +2014 -3386
- mindspore/common/api.py +386 -355
- mindspore/common/auto_dynamic_shape.py +41 -44
- mindspore/common/dtype.py +5 -2
- mindspore/common/dump.py +7 -5
- mindspore/common/file_system.py +3 -0
- mindspore/common/generator.py +3 -0
- mindspore/common/hook_handle.py +5 -3
- mindspore/common/initializer.py +10 -6
- mindspore/common/jit_begin_end.py +94 -0
- mindspore/common/jit_config.py +6 -1
- mindspore/common/jit_context.py +76 -0
- mindspore/common/jit_trace.py +378 -0
- mindspore/common/lazy_inline.py +2 -2
- mindspore/common/mutable.py +5 -4
- mindspore/common/parameter.py +106 -39
- mindspore/common/seed.py +2 -2
- mindspore/common/sparse_tensor.py +23 -17
- mindspore/common/tensor.py +332 -714
- mindspore/communication/__init__.py +7 -5
- mindspore/communication/_comm_helper.py +47 -2
- mindspore/communication/comm_func.py +70 -53
- mindspore/communication/management.py +83 -17
- mindspore/context.py +228 -571
- mindspore/dataset/__init__.py +44 -20
- mindspore/dataset/audio/__init__.py +2 -8
- mindspore/dataset/audio/transforms.py +3 -17
- mindspore/dataset/core/config.py +3 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +102 -120
- mindspore/dataset/engine/datasets_audio.py +22 -22
- mindspore/dataset/engine/datasets_standard_format.py +43 -24
- mindspore/dataset/engine/datasets_text.py +78 -85
- mindspore/dataset/engine/datasets_user_defined.py +109 -77
- mindspore/dataset/engine/datasets_vision.py +111 -108
- mindspore/dataset/engine/iterators.py +5 -3
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
- mindspore/dataset/engine/samplers.py +279 -57
- mindspore/dataset/engine/serializer_deserializer.py +2 -1
- mindspore/dataset/engine/validators.py +10 -0
- mindspore/dataset/text/__init__.py +7 -6
- mindspore/dataset/text/transforms.py +6 -5
- mindspore/dataset/text/utils.py +3 -3
- mindspore/dataset/transforms/__init__.py +0 -9
- mindspore/dataset/transforms/transforms.py +3 -3
- mindspore/dataset/utils/browse_dataset.py +1 -1
- mindspore/dataset/vision/__init__.py +2 -9
- mindspore/dataset/vision/transforms.py +202 -158
- mindspore/dataset/vision/utils.py +7 -5
- mindspore/device_context/ascend/op_debug.py +60 -1
- mindspore/device_context/ascend/op_tuning.py +0 -4
- mindspore/device_manager.py +39 -3
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/es/embedding_service.py +35 -27
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -2
- mindspore/experimental/map_parameter.py +4 -4
- mindspore/experimental/optim/adadelta.py +22 -26
- mindspore/experimental/optim/adagrad.py +4 -4
- mindspore/experimental/optim/adam.py +4 -0
- mindspore/experimental/optim/adamax.py +4 -4
- mindspore/experimental/optim/adamw.py +4 -0
- mindspore/experimental/optim/asgd.py +1 -1
- mindspore/experimental/optim/lr_scheduler.py +40 -22
- mindspore/experimental/optim/radam.py +5 -5
- mindspore/experimental/optim/rprop.py +1 -1
- mindspore/experimental/optim/sgd.py +1 -1
- mindspore/hal/contiguous_tensors_handle.py +6 -10
- mindspore/hal/device.py +55 -81
- mindspore/hal/event.py +38 -55
- mindspore/hal/memory.py +115 -147
- mindspore/hal/stream.py +81 -125
- mindspore/include/dataset/constants.h +7 -4
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +40 -2
- mindspore/mindrecord/__init__.py +20 -7
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +133 -702
- mindspore/mint/distributed/__init__.py +5 -1
- mindspore/mint/distributed/distributed.py +198 -113
- mindspore/mint/linalg/__init__.py +2 -0
- mindspore/mint/nn/__init__.py +280 -18
- mindspore/mint/nn/functional.py +282 -64
- mindspore/mint/nn/layer/__init__.py +4 -0
- mindspore/mint/nn/layer/_functions.py +7 -3
- mindspore/mint/nn/layer/activation.py +120 -13
- mindspore/mint/nn/layer/conv.py +234 -28
- mindspore/mint/nn/layer/normalization.py +15 -16
- mindspore/mint/nn/layer/padding.py +1 -1
- mindspore/mint/nn/layer/pooling.py +66 -1
- mindspore/mint/optim/__init__.py +2 -1
- mindspore/mint/optim/sgd.py +171 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/__init__.py +4 -1
- mindspore/nn/cell.py +1253 -179
- mindspore/nn/layer/activation.py +23 -21
- mindspore/nn/layer/basic.py +22 -16
- mindspore/nn/layer/container.py +1 -1
- mindspore/nn/layer/conv.py +53 -42
- mindspore/nn/layer/embedding.py +9 -8
- mindspore/nn/layer/normalization.py +48 -42
- mindspore/nn/layer/pooling.py +75 -31
- mindspore/nn/layer/transformer.py +11 -10
- mindspore/nn/learning_rate_schedule.py +4 -2
- mindspore/nn/loss/loss.py +27 -19
- mindspore/nn/optim/ada_grad.py +6 -5
- mindspore/nn/optim/adadelta.py +9 -7
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +18 -14
- mindspore/nn/optim/adamax.py +8 -7
- mindspore/nn/optim/adasum.py +5 -5
- mindspore/nn/optim/asgd.py +3 -1
- mindspore/nn/optim/ftrl.py +11 -9
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/lazyadam.py +12 -10
- mindspore/nn/optim/momentum.py +7 -6
- mindspore/nn/optim/optimizer.py +2 -2
- mindspore/nn/optim/proximal_ada_grad.py +12 -10
- mindspore/nn/optim/rmsprop.py +13 -12
- mindspore/nn/optim/rprop.py +9 -7
- mindspore/nn/optim/sgd.py +9 -6
- mindspore/nn/optim/tft_wrapper.py +5 -2
- mindspore/nn/probability/bijector/bijector.py +17 -11
- mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
- mindspore/nn/probability/bijector/invert.py +2 -2
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +3 -2
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +1 -1
- mindspore/nn/probability/distribution/cauchy.py +4 -2
- mindspore/nn/probability/distribution/exponential.py +6 -7
- mindspore/nn/probability/distribution/gamma.py +2 -2
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/half_normal.py +5 -3
- mindspore/nn/probability/distribution/logistic.py +5 -3
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/uniform.py +5 -3
- mindspore/nn/reinforcement/_tensors_queue.py +1 -1
- mindspore/nn/reinforcement/tensor_array.py +1 -1
- mindspore/nn/wrap/__init__.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +178 -117
- mindspore/nn/wrap/grad_reducer.py +45 -36
- mindspore/nn/wrap/loss_scale.py +3 -3
- mindspore/numpy/array_creations.py +3 -3
- mindspore/numpy/array_ops.py +1 -1
- mindspore/numpy/utils.py +1 -2
- mindspore/numpy/utils_const.py +1 -2
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +3 -2
- mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
- mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
- mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
- mindspore/ops/_register_for_op.py +0 -11
- mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
- mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
- mindspore/ops/_vmap/vmap_array_ops.py +32 -6
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
- mindspore/ops/_vmap/vmap_math_ops.py +4 -7
- mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
- mindspore/ops/auto_generate/__init__.py +4 -3
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +127 -52
- mindspore/ops/auto_generate/gen_extend_func.py +286 -208
- mindspore/ops/auto_generate/gen_ops_def.py +2783 -2335
- mindspore/ops/auto_generate/gen_ops_prim.py +8992 -2686
- mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
- mindspore/ops/composite/__init__.py +2 -1
- mindspore/ops/composite/base.py +19 -24
- mindspore/ops/composite/math_ops.py +6 -16
- mindspore/ops/composite/multitype_ops/__init__.py +5 -2
- mindspore/ops/composite/multitype_ops/_compile_utils.py +4 -5
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
- mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
- mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
- mindspore/ops/function/__init__.py +28 -2
- mindspore/ops/function/_add_attr_func.py +58 -0
- mindspore/ops/function/array_func.py +1631 -2347
- mindspore/ops/function/clip_func.py +38 -45
- mindspore/ops/function/debug_func.py +36 -44
- mindspore/ops/function/grad/__init__.py +1 -0
- mindspore/ops/function/grad/grad_func.py +104 -71
- mindspore/ops/function/image_func.py +1 -1
- mindspore/ops/function/linalg_func.py +46 -78
- mindspore/ops/function/math_func.py +3024 -3855
- mindspore/ops/function/nn_func.py +678 -274
- mindspore/ops/function/other_func.py +159 -1
- mindspore/ops/function/parameter_func.py +17 -30
- mindspore/ops/function/random_func.py +216 -361
- mindspore/ops/function/reshard_func.py +4 -70
- mindspore/ops/function/sparse_func.py +3 -3
- mindspore/ops/function/sparse_unary_func.py +5 -5
- mindspore/ops/function/spectral_func.py +25 -58
- mindspore/ops/function/vmap_func.py +26 -18
- mindspore/ops/functional.py +8 -5
- mindspore/ops/functional_overload.py +655 -4
- mindspore/ops/op_info_register.py +32 -244
- mindspore/ops/operations/__init__.py +21 -14
- mindspore/ops/operations/_custom_ops_utils.py +235 -0
- mindspore/ops/operations/_grad_ops.py +1 -10
- mindspore/ops/operations/_inner_ops.py +5 -76
- mindspore/ops/operations/_ms_kernel.py +4 -10
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +3 -2
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/array_ops.py +39 -24
- mindspore/ops/operations/comm_ops.py +150 -107
- mindspore/ops/operations/custom_ops.py +287 -32
- mindspore/ops/operations/debug_ops.py +119 -16
- mindspore/ops/operations/inner_ops.py +1 -1
- mindspore/ops/operations/linalg_ops.py +1 -58
- mindspore/ops/operations/manually_defined/_inner.py +1 -1
- mindspore/ops/operations/manually_defined/ops_def.py +746 -79
- mindspore/ops/operations/math_ops.py +21 -18
- mindspore/ops/operations/nn_ops.py +67 -224
- mindspore/ops/operations/other_ops.py +62 -9
- mindspore/ops/operations/random_ops.py +13 -7
- mindspore/ops/operations/reshard_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +2 -2
- mindspore/ops/primitive.py +43 -32
- mindspore/ops/tensor_method.py +243 -17
- mindspore/ops_generate/__init__.py +0 -5
- mindspore/ops_generate/aclnn/__init__.py +0 -0
- mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
- mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
- mindspore/ops_generate/api/__init__.py +0 -0
- mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
- mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
- mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
- mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
- mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
- mindspore/ops_generate/api/gen_api.py +103 -0
- mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
- mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
- mindspore/ops_generate/common/__init__.py +0 -0
- mindspore/ops_generate/common/gen_constants.py +91 -0
- mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
- mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
- mindspore/ops_generate/{template.py → common/template.py} +96 -84
- mindspore/ops_generate/gen_ops.py +23 -325
- mindspore/ops_generate/op_def/__init__.py +0 -0
- mindspore/ops_generate/op_def/gen_op_def.py +90 -0
- mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
- mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -10
- mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
- mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
- mindspore/ops_generate/op_def_py/__init__.py +0 -0
- mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
- mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
- mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
- mindspore/ops_generate/pyboost/__init__.py +0 -0
- mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
- mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
- mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
- mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
- mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
- mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
- mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
- mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
- mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
- mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
- mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
- mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
- mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
- mindspore/ops_generate/resources/__init__.py +0 -0
- mindspore/ops_generate/resources/resource_list.py +30 -0
- mindspore/ops_generate/resources/resource_loader.py +36 -0
- mindspore/ops_generate/resources/resource_manager.py +64 -0
- mindspore/ops_generate/resources/yaml_loader.py +88 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
- mindspore/parallel/__init__.py +6 -2
- mindspore/parallel/_auto_parallel_context.py +140 -12
- mindspore/parallel/_cell_wrapper.py +132 -15
- mindspore/parallel/_parallel_serialization.py +95 -4
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +7 -2
- mindspore/parallel/_tensor.py +142 -18
- mindspore/parallel/_utils.py +198 -25
- mindspore/parallel/algo_parameter_config.py +3 -3
- mindspore/parallel/auto_parallel.py +732 -0
- mindspore/parallel/checkpoint_convert.py +159 -0
- mindspore/parallel/checkpoint_transform.py +658 -37
- mindspore/parallel/cluster/process_entity/_api.py +151 -19
- mindspore/parallel/cluster/run.py +1 -1
- mindspore/parallel/function/__init__.py +24 -0
- mindspore/parallel/function/reshard_func.py +258 -0
- mindspore/parallel/nn/__init__.py +25 -0
- mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
- mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
- mindspore/parallel/parameter_broadcast.py +24 -13
- mindspore/parallel/shard.py +137 -62
- mindspore/parallel/transform_safetensors.py +288 -95
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +9 -5
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
- mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +25 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
- mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
- mindspore/profiler/common/constant.py +12 -0
- mindspore/profiler/common/msprof_cmd_tool.py +42 -23
- mindspore/profiler/common/path_manager.py +24 -0
- mindspore/profiler/common/profiler_context.py +26 -2
- mindspore/profiler/common/profiler_meta_data.py +74 -0
- mindspore/profiler/common/profiler_parameters.py +59 -18
- mindspore/profiler/common/profiler_path_manager.py +66 -7
- mindspore/profiler/dynamic_profiler.py +112 -79
- mindspore/profiler/envprofiler.py +26 -1
- mindspore/profiler/experimental_config.py +197 -0
- mindspore/profiler/mstx.py +57 -14
- mindspore/profiler/platform/npu_profiler.py +33 -7
- mindspore/profiler/profiler.py +541 -45
- mindspore/profiler/profiler_action_controller.py +1 -1
- mindspore/profiler/profiler_interface.py +4 -0
- mindspore/profiler/schedule.py +57 -22
- mindspore/rewrite/api/node.py +15 -13
- mindspore/rewrite/api/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +25 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/runtime/__init__.py +2 -2
- mindspore/runtime/executor.py +40 -11
- mindspore/runtime/memory.py +37 -13
- mindspore/safeguard/rewrite_obfuscation.py +12 -9
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +8 -8
- mindspore/train/_utils.py +43 -9
- mindspore/train/amp.py +1 -1
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +2 -16
- mindspore/train/callback/_checkpoint.py +24 -40
- mindspore/train/callback/_cluster_monitor.py +14 -18
- mindspore/train/callback/_flops_collector.py +2 -3
- mindspore/train/callback/_history.py +7 -4
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +0 -3
- mindspore/train/callback/_loss_monitor.py +2 -1
- mindspore/train/callback/_on_request_exit.py +6 -5
- mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
- mindspore/train/callback/_summary_collector.py +8 -13
- mindspore/train/callback/_time_monitor.py +2 -1
- mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +204 -105
- mindspore/train/data_sink.py +25 -2
- mindspore/train/dataset_helper.py +4 -5
- mindspore/train/loss_scale_manager.py +8 -7
- mindspore/train/metrics/accuracy.py +3 -3
- mindspore/train/metrics/confusion_matrix.py +9 -9
- mindspore/train/metrics/error.py +3 -3
- mindspore/train/metrics/hausdorff_distance.py +4 -4
- mindspore/train/metrics/mean_surface_distance.py +3 -3
- mindspore/train/metrics/metric.py +0 -12
- mindspore/train/metrics/occlusion_sensitivity.py +4 -2
- mindspore/train/metrics/precision.py +8 -6
- mindspore/train/metrics/recall.py +9 -9
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +19 -12
- mindspore/train/model.py +262 -127
- mindspore/train/serialization.py +246 -988
- mindspore/train/summary/_summary_adapter.py +2 -2
- mindspore/train/summary/summary_record.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +4 -2
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/runtime_execution_order_check.py +2 -0
- mindspore/utils/utils.py +138 -4
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/METADATA +2 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/RECORD +485 -440
- mindspore/_install_custom.py +0 -43
- mindspore/common/_register_for_adapter.py +0 -74
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
- mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
- mindspore/ops_generate/gen_constants.py +0 -190
- mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
- mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
- /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/WHEEL +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/entry_points.txt +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0.dist-info}/top_level.txt +0 -0
mindspore/common/tensor.py
CHANGED
|
@@ -31,9 +31,9 @@ from mindspore.common.hook_handle import _TensorHookHandle
|
|
|
31
31
|
|
|
32
32
|
from mindspore.common._utils import get_slice_num
|
|
33
33
|
from mindspore.common._register_for_tensor import tensor_operator_registry
|
|
34
|
-
from mindspore._c_expression import
|
|
34
|
+
from mindspore._c_expression import TensorPy as TensorPy_
|
|
35
35
|
from mindspore import _checkparam as validator
|
|
36
|
-
from mindspore._checkparam import
|
|
36
|
+
from mindspore._checkparam import is_stub_tensor, check_hook_fn
|
|
37
37
|
from mindspore._check_jit_forbidden_api import jit_forbidden_register
|
|
38
38
|
from mindspore.common.symbol import Symbol
|
|
39
39
|
|
|
@@ -45,8 +45,8 @@ np_types = (np.int8, np.int16, np.int32, np.int64,
|
|
|
45
45
|
|
|
46
46
|
def _check_input_data_type(input_data):
|
|
47
47
|
"""Check the type of input_data for Tensor"""
|
|
48
|
-
validator.check_value_type('input_data', input_data,
|
|
49
|
-
|
|
48
|
+
validator.check_value_type('input_data', input_data, (TensorPy_, Tensor, np.ndarray, np.str_, list, tuple, float,
|
|
49
|
+
int, bool, complex, bytes),
|
|
50
50
|
'Tensor')
|
|
51
51
|
valid_dtypes = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64,
|
|
52
52
|
np.float16, np.float32, np.float64, np.bool_, np.str_, np.complex64, np.complex128)
|
|
@@ -72,13 +72,116 @@ def _check_input_data_type(input_data):
|
|
|
72
72
|
f"For Tensor, the input_data is {input_data} that contain unsupported element.")
|
|
73
73
|
|
|
74
74
|
|
|
75
|
-
|
|
75
|
+
def _set_symbolic_shape(shape):
|
|
76
|
+
"""Set symbolic_shape"""
|
|
77
|
+
symbolic_shape = None
|
|
78
|
+
if shape is None:
|
|
79
|
+
return None, None
|
|
80
|
+
if isinstance(shape, numbers.Number):
|
|
81
|
+
shape = (shape,)
|
|
82
|
+
symbolic_shape = None
|
|
83
|
+
return shape, symbolic_shape
|
|
84
|
+
if isinstance(shape, Symbol):
|
|
85
|
+
symbolic_shape = [shape]
|
|
86
|
+
shape = (None,)
|
|
87
|
+
return shape, symbolic_shape
|
|
88
|
+
if isinstance(shape, (list, tuple)) and any(isinstance(s, Symbol) for s in shape):
|
|
89
|
+
symbolic_shape = [item.to_dict() if isinstance(item, Symbol) else item for item in shape]
|
|
90
|
+
shape_without_symbol = (None if isinstance(item, Symbol) else item for item in shape)
|
|
91
|
+
shape = list(shape_without_symbol) if isinstance(shape, list) else tuple(shape_without_symbol)
|
|
92
|
+
return shape, symbolic_shape
|
|
93
|
+
return shape, symbolic_shape
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def _convert_stub_tensor(input_data):
|
|
97
|
+
"""Convert input to stub tensor"""
|
|
98
|
+
if not is_stub_tensor(input_data):
|
|
99
|
+
return input_data
|
|
100
|
+
return input_data.stub_sync()
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def _convert_numpy_array(input_data):
|
|
104
|
+
"""Convert inpyt to numpy array"""
|
|
105
|
+
if not isinstance(input_data, np_types):
|
|
106
|
+
return input_data
|
|
107
|
+
return np.array(input_data)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def _check_device(device):
|
|
111
|
+
"""Check device"""
|
|
112
|
+
if device is not None and device != "CPU":
|
|
113
|
+
raise ValueError(f"Only 'CPU' is supported for device, but got {device}.")
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def _set_default_dtype(input_data, dtype):
|
|
117
|
+
"""Set tensor default dtype"""
|
|
118
|
+
if isinstance(input_data, (float, list, tuple)):
|
|
119
|
+
if np.array(input_data).dtype == np.float64:
|
|
120
|
+
return mstype.float32
|
|
121
|
+
if isinstance(input_data, (int, list, tuple)):
|
|
122
|
+
if np.array(input_data).dtype in (np.int32, np.int64):
|
|
123
|
+
return mstype.int64
|
|
124
|
+
return dtype
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def _set_dtype(input_data, dtype):
|
|
128
|
+
"""Set and check dtype"""
|
|
129
|
+
if dtype is not None:
|
|
130
|
+
validator.check_type_name('dtype', dtype, mstype.number_type + (mstype.bool_, mstype.string), "Tensor")
|
|
131
|
+
return dtype
|
|
132
|
+
return _set_default_dtype(input_data, dtype)
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def _init(input_data=None, dtype=None, shape=None, init=None, const_arg=False, device=None):
|
|
76
136
|
"""
|
|
77
|
-
|
|
137
|
+
Verifying parameters. Will sink to C++
|
|
78
138
|
"""
|
|
139
|
+
validator.check_value_type('const_arg', const_arg, bool, 'Tensor')
|
|
140
|
+
_check_device(device)
|
|
141
|
+
|
|
142
|
+
if isinstance(input_data, (Tensor, TensorPy_)) and dtype is not None:
|
|
143
|
+
logger.info("It is suggested to use 'Tensor.astype()' to convert the dtype of a Tensor.")
|
|
144
|
+
_cast = tensor_operator_registry.get("cast")
|
|
145
|
+
input_data = _cast(input_data, dtype)
|
|
146
|
+
|
|
147
|
+
input_data = _convert_stub_tensor(input_data)
|
|
148
|
+
|
|
149
|
+
if input_data is None and shape is None and init is None and dtype is not None:
|
|
150
|
+
validator.check_type_name('dtype', dtype, mstype.number_type + (mstype.bool_, mstype.string), "Tensor")
|
|
151
|
+
logger.warning(f"For 'Tensor', if 'dtype' is not None, 'input_data', 'shape' or 'init' must not be None.")
|
|
152
|
+
return {"dtype": dtype, "shape": [-2], "init": init, "const_arg": const_arg, "device": device}
|
|
153
|
+
|
|
154
|
+
# If input data is numpy number, convert it to np array
|
|
155
|
+
input_data = _convert_numpy_array(input_data)
|
|
156
|
+
shape, symbolic_shape = _set_symbolic_shape(shape)
|
|
157
|
+
_check_tensor_input(input_data, dtype, shape, init)
|
|
79
158
|
|
|
159
|
+
# If input_data is tuple/list/numpy.ndarray, it's support in check_type method.
|
|
160
|
+
if (isinstance(shape, (list, tuple)) and None in shape) or init is not None:
|
|
161
|
+
shape = _check_tensor_dynamic_shape(dtype, shape, init)
|
|
162
|
+
return {"dtype": dtype, "shape": shape, "init": init, "const_arg": const_arg, "device": device,
|
|
163
|
+
"symbolic_shape": symbolic_shape}
|
|
80
164
|
|
|
81
|
-
|
|
165
|
+
if input_data is None and dtype is not None and shape is not None:
|
|
166
|
+
validator.check_type_name('dtype', dtype, mstype.number_type + (mstype.bool_, mstype.string), "Tensor")
|
|
167
|
+
return {"dtype": dtype, "shape": shape, "init": init, "const_arg": const_arg, "device": device,
|
|
168
|
+
"symbolic_shape": symbolic_shape}
|
|
169
|
+
|
|
170
|
+
_check_input_data_type(input_data)
|
|
171
|
+
dtype = _set_dtype(input_data, dtype)
|
|
172
|
+
|
|
173
|
+
if isinstance(input_data, np.ndarray) and (not input_data.flags['FORC']):
|
|
174
|
+
input_data = np.ascontiguousarray(input_data)
|
|
175
|
+
|
|
176
|
+
if dtype is not None:
|
|
177
|
+
return {"input_data": input_data, "dtype": dtype, "init": init, "const_arg": const_arg, "device": device,
|
|
178
|
+
"symbolic_shape": symbolic_shape}
|
|
179
|
+
|
|
180
|
+
return {"input_data": input_data, "init": init, "const_arg": const_arg, "device": device,
|
|
181
|
+
"symbolic_shape": symbolic_shape}
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
def tensor(input_data=None, dtype=None, shape=None, init=None, const_arg=False):
|
|
82
185
|
"""
|
|
83
186
|
Create a new Tensor in Cell.construct() or function decorated by @jit.
|
|
84
187
|
|
|
@@ -86,11 +189,11 @@ def tensor(input_data=None, dtype=None, shape=None, init=None, internal=False, c
|
|
|
86
189
|
based on the `dtype` argument.
|
|
87
190
|
|
|
88
191
|
Please refer to `Creating and Using Tensor
|
|
89
|
-
<https://www.mindspore.cn/
|
|
192
|
+
<https://www.mindspore.cn/tutorials/en/master/compile/static_graph.html#mindspore-user-defined-data-types>`_ .
|
|
90
193
|
|
|
91
194
|
The difference between it and the Tensor class is that it adds
|
|
92
195
|
`Annotation
|
|
93
|
-
<https://www.mindspore.cn/
|
|
196
|
+
<https://www.mindspore.cn/tutorials/en/master/compile/static_graph.html#annotation-type>`_
|
|
94
197
|
which can prevent the generation of AnyType compared to the Tensor class.
|
|
95
198
|
|
|
96
199
|
The arguments and return values are the same as the Tensor class. Also see: :class:`mindspore.Tensor`.
|
|
@@ -110,10 +213,16 @@ def tensor(input_data=None, dtype=None, shape=None, init=None, internal=False, c
|
|
|
110
213
|
>>> print(y)
|
|
111
214
|
[1. 2. 3.]
|
|
112
215
|
"""
|
|
113
|
-
return Tensor(input_data, dtype, shape, init,
|
|
216
|
+
return Tensor(input_data, dtype, shape, init, const_arg) # @jit.typing: () -> tensor_type[{dtype}]
|
|
114
217
|
|
|
115
218
|
|
|
116
|
-
class
|
|
219
|
+
class _TensorMeta(abc.ABCMeta, type(TensorPy_)):
|
|
220
|
+
"""
|
|
221
|
+
Meta class for Tensor. Used internally.
|
|
222
|
+
"""
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
class Tensor(TensorPy_, metaclass=_TensorMeta):
|
|
117
226
|
"""
|
|
118
227
|
Tensor is a data structure that stores an n-dimensional array.
|
|
119
228
|
|
|
@@ -121,12 +230,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
121
230
|
- If `init` interface is used to initialize `Tensor`, the `Tensor.init_data` API needs to be called to load the
|
|
122
231
|
actual data to `Tensor`.
|
|
123
232
|
- All modes of CPU and GPU, and Atlas training series with `graph mode (mode=mindspore.GRAPH_MODE)
|
|
124
|
-
<https://www.mindspore.cn/
|
|
233
|
+
<https://www.mindspore.cn/tutorials/en/master/compile/static_graph.html>`_ do not supported
|
|
125
234
|
in-place operations yet.
|
|
126
235
|
|
|
127
236
|
Warning:
|
|
128
|
-
|
|
129
|
-
|
|
237
|
+
To convert dtype of a `Tensor`, it is recommended to use `Tensor.astype()` rather than
|
|
238
|
+
`Tensor(sourceTensor, dtype=newDtype)`.
|
|
130
239
|
|
|
131
240
|
Args:
|
|
132
241
|
input_data (Union[Tensor, float, int, bool, tuple, list, numpy.ndarray]): The data to be stored. It can be
|
|
@@ -141,10 +250,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
141
250
|
init (Initializer): The information of init data.
|
|
142
251
|
`init` is used for delayed initialization in parallel mode, when using init, `dtype` and `shape` must be
|
|
143
252
|
set. Default: ``None`` .
|
|
144
|
-
internal (bool): Whether it is created by the framework.
|
|
145
|
-
``'True'`` means that the tensor is created by framework.
|
|
146
|
-
``'False'`` means that the tensor is created by user.
|
|
147
|
-
Default: ``False`` .
|
|
148
253
|
const_arg (bool): Whether the tensor is a constant when it is used for the argument of a network.
|
|
149
254
|
Default: ``False`` .
|
|
150
255
|
device(str): This parameter is reserved and does not need to be configured.
|
|
@@ -155,8 +260,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
155
260
|
|
|
156
261
|
Note:
|
|
157
262
|
The default value ``None`` of `input_data` works as a placeholder,
|
|
158
|
-
it does not mean that we can create a NoneType
|
|
159
|
-
Tensor.
|
|
263
|
+
it does not mean that we can create a NoneType Tensor.
|
|
160
264
|
Tensor with `shape` contains 0 is not fully tested and supported.
|
|
161
265
|
|
|
162
266
|
Examples:
|
|
@@ -211,89 +315,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
211
315
|
"""
|
|
212
316
|
delta_seed = 0
|
|
213
317
|
|
|
214
|
-
def __init__(self, input_data=None, dtype=None, shape=None, init=None, internal=False, const_arg=False,
|
|
215
|
-
device=None):
|
|
216
|
-
self.init_finished = False
|
|
217
|
-
if isinstance(input_data, (Tensor, Tensor_)) and dtype is not None:
|
|
218
|
-
logger.info("It is suggested to use 'Tensor.astype()' to convert the dtype of a Tensor.")
|
|
219
|
-
_cast = tensor_operator_registry.get("cast")
|
|
220
|
-
input_data = _cast(input_data, dtype)
|
|
221
|
-
|
|
222
|
-
if is_stub_tensor(input_data):
|
|
223
|
-
input_data = input_data.stub_sync()
|
|
224
|
-
|
|
225
|
-
if internal:
|
|
226
|
-
if input_data is not None:
|
|
227
|
-
Tensor_.__init__(self, input_data)
|
|
228
|
-
else:
|
|
229
|
-
if input_data is None and shape is None and init is None and dtype is not None:
|
|
230
|
-
validator.check_type_name('dtype', dtype, mstype.number_type +
|
|
231
|
-
(mstype.bool_, mstype.string), "Tensor")
|
|
232
|
-
Tensor_.__init__(self, dtype, [-2])
|
|
233
|
-
logger.warning(f"For 'Tensor', if 'dtype' is not None, 'input_data', 'shape' "
|
|
234
|
-
f"or 'init' must not be None.")
|
|
235
|
-
else:
|
|
236
|
-
# If input data is numpy number, convert it to np array
|
|
237
|
-
if isinstance(input_data, np_types):
|
|
238
|
-
input_data = np.array(input_data)
|
|
239
|
-
|
|
240
|
-
if shape is not None:
|
|
241
|
-
if isinstance(shape, numbers.Number):
|
|
242
|
-
shape = (shape,)
|
|
243
|
-
elif isinstance(shape, Symbol):
|
|
244
|
-
self.symbolic_shape = [shape]
|
|
245
|
-
shape = (None,)
|
|
246
|
-
elif isinstance(shape, (list, tuple)) and any(isinstance(s, Symbol) for s in shape):
|
|
247
|
-
self.symbolic_shape = [item.to_dict() if isinstance(item, Symbol) else item for item in shape]
|
|
248
|
-
shape_without_symbol = (None if isinstance(item, Symbol) else item for item in shape)
|
|
249
|
-
shape = list(shape_without_symbol) if isinstance(shape, list) else tuple(shape_without_symbol)
|
|
250
|
-
|
|
251
|
-
_check_tensor_input(input_data, dtype, shape, init)
|
|
252
|
-
|
|
253
|
-
# If input_data is tuple/list/numpy.ndarray, it's support in check_type method.
|
|
254
|
-
if (isinstance(shape, (list, tuple)) and None in shape) or init is not None:
|
|
255
|
-
shape = _check_tensor_dynamic_shape(dtype, shape, init)
|
|
256
|
-
Tensor_.__init__(self, dtype, shape)
|
|
257
|
-
else:
|
|
258
|
-
_check_input_data_type(input_data)
|
|
259
|
-
if dtype is not None:
|
|
260
|
-
validator.check_type_name('dtype', dtype, mstype.number_type +
|
|
261
|
-
(mstype.bool_, mstype.string), "Tensor")
|
|
262
|
-
else:
|
|
263
|
-
dtype = self._set_default_dtype(input_data, dtype)
|
|
264
|
-
|
|
265
|
-
if isinstance(input_data, np.ndarray) and (not input_data.flags['FORC']):
|
|
266
|
-
input_data = np.ascontiguousarray(input_data)
|
|
267
|
-
|
|
268
|
-
if dtype is not None:
|
|
269
|
-
Tensor_.__init__(self, input_data, dtype)
|
|
270
|
-
else:
|
|
271
|
-
Tensor_.__init__(self, input_data)
|
|
272
|
-
validator.check_value_type('const_arg', const_arg, bool, 'Tensor')
|
|
273
|
-
|
|
274
|
-
if device is not None and device != "CPU":
|
|
275
|
-
raise ValueError(f"Only 'CPU' is supported for device, but got {device}.")
|
|
276
|
-
|
|
277
|
-
self.const_arg = const_arg
|
|
278
|
-
self.virtual_flag = False
|
|
279
|
-
self.init = init
|
|
280
|
-
self.init_finished = True
|
|
281
|
-
|
|
282
|
-
# if cur Tensor is a index value of another Tensor,
|
|
283
|
-
# parent_tensor_ set to another Tensor
|
|
284
|
-
# index_of_parent_ will set to the index
|
|
285
|
-
self.parent_tensor_ = None
|
|
286
|
-
self.index_of_parent_ = None
|
|
287
|
-
|
|
288
|
-
self.slice_num_of_persistent_data_ = None
|
|
289
|
-
self.slice_shape_of_persistent_data_ = None
|
|
290
|
-
|
|
291
|
-
# the auto gradient information
|
|
292
|
-
self._grad = None
|
|
293
|
-
self._grad_fn = None
|
|
294
|
-
self._requires_grad = False
|
|
295
|
-
self._retain_grad = False
|
|
296
|
-
|
|
297
318
|
@classmethod
|
|
298
319
|
def __subclasshook__(cls, sub):
|
|
299
320
|
"""
|
|
@@ -304,16 +325,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
304
325
|
return True
|
|
305
326
|
return NotImplemented
|
|
306
327
|
|
|
307
|
-
@staticmethod
|
|
308
|
-
def _set_default_dtype(input_data, dtype):
|
|
309
|
-
"""Set tensor default dtype"""
|
|
310
|
-
if isinstance(input_data, (float, list, tuple)):
|
|
311
|
-
if np.array(input_data).dtype == np.float64:
|
|
312
|
-
return mstype.float32
|
|
313
|
-
if isinstance(input_data, (int, list, tuple)):
|
|
314
|
-
if np.array(input_data).dtype in (np.int32, np.int64):
|
|
315
|
-
return mstype.int64
|
|
316
|
-
return dtype
|
|
317
328
|
|
|
318
329
|
def __deepcopy__(self, memodict):
|
|
319
330
|
new_obj = Tensor(self)
|
|
@@ -324,8 +335,8 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
324
335
|
|
|
325
336
|
def __repr__(self):
|
|
326
337
|
if self.init_finished:
|
|
327
|
-
|
|
328
|
-
return
|
|
338
|
+
TensorPy_.data_sync(self, True)
|
|
339
|
+
return TensorPy_.__repr__(self)
|
|
329
340
|
return ''
|
|
330
341
|
|
|
331
342
|
def __eq__(self, other):
|
|
@@ -365,38 +376,32 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
365
376
|
raise ValueError(message)
|
|
366
377
|
|
|
367
378
|
def __int__(self):
|
|
368
|
-
|
|
369
|
-
|
|
379
|
+
try:
|
|
380
|
+
data = self._item()
|
|
381
|
+
return int(data)
|
|
382
|
+
except ValueError:
|
|
383
|
+
raise ValueError("Only one element tensors can be converted to Python scalars")
|
|
384
|
+
|
|
370
385
|
|
|
371
386
|
def __float__(self):
|
|
372
|
-
|
|
373
|
-
|
|
387
|
+
try:
|
|
388
|
+
data = self._item()
|
|
389
|
+
return float(data)
|
|
390
|
+
except ValueError:
|
|
391
|
+
raise ValueError("Only one element tensors can be converted to Python scalars")
|
|
374
392
|
|
|
375
393
|
def __index__(self):
|
|
376
|
-
|
|
377
|
-
|
|
394
|
+
try:
|
|
395
|
+
data = self._item()
|
|
396
|
+
if not isinstance(data, (int, bool)):
|
|
397
|
+
raise ValueError
|
|
398
|
+
return int(data)
|
|
399
|
+
except ValueError:
|
|
378
400
|
raise ValueError("Only integer tensors of a single element can be converted to an index.")
|
|
379
|
-
return self._convert_scalar_(data, int,
|
|
380
|
-
"Only integer tensors of a single element can be converted to an index.")
|
|
381
401
|
|
|
382
402
|
def __pos__(self):
|
|
383
403
|
return self
|
|
384
404
|
|
|
385
|
-
def __and__(self, other):
|
|
386
|
-
if isinstance(other, (int, bool, float, Tensor)):
|
|
387
|
-
return tensor_operator_registry.get('bitwise_and')(self, other)
|
|
388
|
-
raise TypeError("Unsupported operand type(s) for &: 'Tensor' and '{}'".format(type(other)))
|
|
389
|
-
|
|
390
|
-
def __xor__(self, other):
|
|
391
|
-
if isinstance(other, (int, bool, float, Tensor)):
|
|
392
|
-
return tensor_operator_registry.get('bitwise_xor')(self, other)
|
|
393
|
-
raise TypeError("Unsupported operand type(s) for ^: 'Tensor' and '{}'".format(type(other)))
|
|
394
|
-
|
|
395
|
-
def __or__(self, other):
|
|
396
|
-
if isinstance(other, (int, bool, float, Tensor)):
|
|
397
|
-
return tensor_operator_registry.get('bitwise_or')(self, other)
|
|
398
|
-
raise TypeError("Unsupported operand type(s) for |: 'Tensor' and '{}'".format(type(other)))
|
|
399
|
-
|
|
400
405
|
def __radd__(self, other):
|
|
401
406
|
return self.__add__(other)
|
|
402
407
|
|
|
@@ -415,9 +420,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
415
420
|
def __rmatmul__(self, other):
|
|
416
421
|
return tensor_operator_registry.get('__matmul__')(other, self)
|
|
417
422
|
|
|
418
|
-
def __imatmul__(self, other):
|
|
419
|
-
return self.__matmul__(other)
|
|
420
|
-
|
|
421
423
|
def __truediv__(self, other):
|
|
422
424
|
return tensor_operator_registry.get('__truediv__')(self, other)
|
|
423
425
|
|
|
@@ -442,9 +444,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
442
444
|
def __rfloordiv__(self, other):
|
|
443
445
|
return tensor_operator_registry.get('__floordiv__')(other, self)
|
|
444
446
|
|
|
445
|
-
def __ifloordiv__(self, other):
|
|
446
|
-
return self.__floordiv__(other)
|
|
447
|
-
|
|
448
447
|
def __lt__(self, other):
|
|
449
448
|
out = tensor_operator_registry.get('__lt__')(self, other)
|
|
450
449
|
return out
|
|
@@ -474,7 +473,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
474
473
|
|
|
475
474
|
def __getstate__(self):
|
|
476
475
|
state = self.__dict__.copy()
|
|
477
|
-
state["value"] =
|
|
476
|
+
state["value"] = TensorPy_.__getstate__(self)
|
|
478
477
|
return state
|
|
479
478
|
|
|
480
479
|
def __setstate__(self, state):
|
|
@@ -483,7 +482,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
483
482
|
else:
|
|
484
483
|
value = state.pop("value")
|
|
485
484
|
self.__dict__.update(state)
|
|
486
|
-
|
|
485
|
+
TensorPy_.__setstate__(self, value)
|
|
487
486
|
|
|
488
487
|
def __array__(self, dtype=None):
|
|
489
488
|
"""support create numpy array from tensor."""
|
|
@@ -526,6 +525,42 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
526
525
|
"""__setitem__ process, called by TensorPy::TensorSetItem"""
|
|
527
526
|
return tensor_operator_registry.get('_tensor_setitem')(self, index, value)
|
|
528
527
|
|
|
528
|
+
@property
|
|
529
|
+
def _dtensor_info(self):
|
|
530
|
+
"""
|
|
531
|
+
Return the distributed tensor information. For details,
|
|
532
|
+
please refer to :class:`mindspore.parallel.DistributedTensorInfo`.
|
|
533
|
+
|
|
534
|
+
Examples:
|
|
535
|
+
>>> from mindspore import Tensor
|
|
536
|
+
>>> import numpy as np
|
|
537
|
+
>>> x = Tensor(np.array([[1, 2], [3, 4]]))
|
|
538
|
+
>>> print(x._dtensor_info)
|
|
539
|
+
None
|
|
540
|
+
"""
|
|
541
|
+
if not hasattr(self, '_dist_tensor_info'):
|
|
542
|
+
self._dist_tensor_info = None
|
|
543
|
+
return self._dist_tensor_info
|
|
544
|
+
|
|
545
|
+
@_dtensor_info.setter
|
|
546
|
+
def _dtensor_info(self, input_dtensor_info):
|
|
547
|
+
"""
|
|
548
|
+
Set the distributed tensor information to current tensor.
|
|
549
|
+
|
|
550
|
+
Args:
|
|
551
|
+
input_dtensor_info (DistributedTensorInfo): The distributed tensor information.
|
|
552
|
+
|
|
553
|
+
Examples:
|
|
554
|
+
>>> from mindspore import Tensor, Layout, _DistributedTensorInfo
|
|
555
|
+
>>> import numpy as np
|
|
556
|
+
>>> layout = Layout((2, 2), ("dp", "mp"))
|
|
557
|
+
>>> src_layout = layout("dp", "mp")
|
|
558
|
+
>>> distributed_info = _DistributedTensorInfo(src_layout)
|
|
559
|
+
>>> x = Tensor(np.array([[1, 2], [3, 4]]))
|
|
560
|
+
>>> x._dtensor_info = distributed_info
|
|
561
|
+
"""
|
|
562
|
+
self._dist_tensor_info = input_dtensor_info
|
|
563
|
+
|
|
529
564
|
@property
|
|
530
565
|
def shape(self):
|
|
531
566
|
"""
|
|
@@ -591,83 +626,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
591
626
|
"""
|
|
592
627
|
return len(self._shape)
|
|
593
628
|
|
|
594
|
-
@property
|
|
595
|
-
def grad(self):
|
|
596
|
-
r"""
|
|
597
|
-
Get the gradient value.
|
|
598
|
-
"""
|
|
599
|
-
return self._grad
|
|
600
|
-
|
|
601
|
-
@grad.setter
|
|
602
|
-
def grad(self, grad):
|
|
603
|
-
r"""
|
|
604
|
-
Set the gradient value.
|
|
605
|
-
"""
|
|
606
|
-
self._grad = grad
|
|
607
|
-
|
|
608
|
-
@property
|
|
609
|
-
def grad_fn(self):
|
|
610
|
-
r"""
|
|
611
|
-
The function for backward.
|
|
612
|
-
"""
|
|
613
|
-
return self._grad_fn
|
|
614
|
-
|
|
615
|
-
@grad_fn.setter
|
|
616
|
-
def grad_fn(self, grad_fn):
|
|
617
|
-
r"""
|
|
618
|
-
Set the function for backward.
|
|
619
|
-
"""
|
|
620
|
-
self._grad_fn = grad_fn
|
|
621
|
-
|
|
622
|
-
@property
|
|
623
|
-
def is_leaf(self):
|
|
624
|
-
r"""
|
|
625
|
-
Whether the stub tensor is leaf.
|
|
626
|
-
They will be a leaf if they have requires_grad and requires_grad is False,
|
|
627
|
-
Or they were created by user.
|
|
628
|
-
"""
|
|
629
|
-
return self._requires_grad is False or self._grad_fn is None
|
|
630
|
-
|
|
631
|
-
@property
|
|
632
|
-
def requires_grad(self):
|
|
633
|
-
r"""
|
|
634
|
-
Whether the stub tensor need requires grad.
|
|
635
|
-
"""
|
|
636
|
-
return self._requires_grad
|
|
637
|
-
|
|
638
|
-
@requires_grad.setter
|
|
639
|
-
def requires_grad(self, requires_grad):
|
|
640
|
-
r"""
|
|
641
|
-
Mark the stub tensor whether need requires gradient.
|
|
642
|
-
"""
|
|
643
|
-
self._requires_grad = requires_grad
|
|
644
|
-
|
|
645
|
-
def retain_grad(self):
|
|
646
|
-
r"""
|
|
647
|
-
Enable the stub tensor which is not non-leaf to have the grad during backward().
|
|
648
|
-
"""
|
|
649
|
-
if not self._requires_grad:
|
|
650
|
-
RuntimeError("can't retain_grad on Tensor that has requires_grad = False.")
|
|
651
|
-
self._retain_grad = self._grad_fn is not None
|
|
652
|
-
|
|
653
|
-
@property
|
|
654
|
-
def retains_grad(self):
|
|
655
|
-
r"""
|
|
656
|
-
Is True if the stub tensor is non-leaf and its grad is enabled to be populated during backward().
|
|
657
|
-
"""
|
|
658
|
-
return self._retain_grad
|
|
659
|
-
|
|
660
|
-
def backward(self, grad=None):
|
|
661
|
-
r"""
|
|
662
|
-
Calculate the gradient.
|
|
663
|
-
"""
|
|
664
|
-
if grad is None:
|
|
665
|
-
grad = Tensor(np.ones(self.shape), self.dtype)
|
|
666
|
-
if self._grad_fn is not None:
|
|
667
|
-
self._grad_fn.apply(grad)
|
|
668
|
-
elif self._requires_grad:
|
|
669
|
-
self._grad = grad
|
|
670
|
-
|
|
671
629
|
@property
|
|
672
630
|
def H(self):
|
|
673
631
|
"""
|
|
@@ -797,7 +755,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
797
755
|
if isinstance(array, np.ndarray) and not array.flags['C_CONTIGUOUS']:
|
|
798
756
|
array = np.ascontiguousarray(array)
|
|
799
757
|
|
|
800
|
-
return
|
|
758
|
+
return TensorPy_.from_numpy(array)
|
|
801
759
|
|
|
802
760
|
def ndimension(self):
|
|
803
761
|
r"""
|
|
@@ -1010,7 +968,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1010
968
|
>>> print(x.get_bytes())
|
|
1011
969
|
b'\x01\x00\x02\x00\x03\x00'
|
|
1012
970
|
"""
|
|
1013
|
-
return
|
|
971
|
+
return TensorPy_.get_bytes(self)
|
|
1014
972
|
|
|
1015
973
|
def asnumpy(self):
|
|
1016
974
|
"""
|
|
@@ -1033,7 +991,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1033
991
|
"""
|
|
1034
992
|
if self.has_init:
|
|
1035
993
|
self.init_data()
|
|
1036
|
-
return
|
|
994
|
+
return TensorPy_.asnumpy(self)
|
|
1037
995
|
|
|
1038
996
|
def numpy(self):
|
|
1039
997
|
"""
|
|
@@ -1050,7 +1008,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1050
1008
|
Returns:
|
|
1051
1009
|
True or False
|
|
1052
1010
|
"""
|
|
1053
|
-
return
|
|
1011
|
+
return TensorPy_.is_persistent_data(self)
|
|
1054
1012
|
|
|
1055
1013
|
def asnumpy_of_slice_persistent_data(self, param_key, slice_index):
|
|
1056
1014
|
"""
|
|
@@ -1061,7 +1019,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1061
1019
|
Returns:
|
|
1062
1020
|
A numpy ndarray which shares the same underlying storage with the slice of tensor data.
|
|
1063
1021
|
"""
|
|
1064
|
-
return
|
|
1022
|
+
return TensorPy_.asnumpy_of_slice_persistent_data(self, param_key, slice_index)
|
|
1065
1023
|
|
|
1066
1024
|
def slice_num_of_persistent_data(self):
|
|
1067
1025
|
"""
|
|
@@ -1153,7 +1111,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1153
1111
|
>>> print(y.is_contiguous())
|
|
1154
1112
|
False
|
|
1155
1113
|
"""
|
|
1156
|
-
return
|
|
1114
|
+
return TensorPy_.is_contiguous(self)
|
|
1157
1115
|
|
|
1158
1116
|
def stride(self, dim=None):
|
|
1159
1117
|
"""
|
|
@@ -1161,10 +1119,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1161
1119
|
When no parameters are passed in, a list of stride for all dimensions is returned.
|
|
1162
1120
|
|
|
1163
1121
|
Args:
|
|
1164
|
-
dim (int): The dim of stride from one element to the next.
|
|
1122
|
+
dim (int, optional): The dim of stride from one element to the next. Default: ``None``.
|
|
1165
1123
|
|
|
1166
1124
|
Returns:
|
|
1167
|
-
Int, the
|
|
1125
|
+
Int, returns the step size necessary to jump from one element to the next in the specified dimension.
|
|
1168
1126
|
|
|
1169
1127
|
Raises:
|
|
1170
1128
|
TypeError: `dim` is not an int.
|
|
@@ -1175,7 +1133,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1175
1133
|
>>> x.stride()
|
|
1176
1134
|
[5, 1]
|
|
1177
1135
|
"""
|
|
1178
|
-
stride =
|
|
1136
|
+
stride = TensorPy_.stride(self)
|
|
1179
1137
|
if dim is None:
|
|
1180
1138
|
return stride
|
|
1181
1139
|
return stride[dim]
|
|
@@ -1194,7 +1152,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1194
1152
|
>>> print(ret)
|
|
1195
1153
|
0
|
|
1196
1154
|
"""
|
|
1197
|
-
return
|
|
1155
|
+
return TensorPy_.storage_offset(self)
|
|
1198
1156
|
|
|
1199
1157
|
def register_hook(self, hook):
|
|
1200
1158
|
"""
|
|
@@ -1205,13 +1163,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1205
1163
|
which may be modified by returning a new output gradient.
|
|
1206
1164
|
- The `hook` should have the following signature:
|
|
1207
1165
|
hook(grad) -> New output gradient, but can not return None or not set return value.
|
|
1166
|
+
- Higher-order differentiation does not support tensor `register_hook`.
|
|
1208
1167
|
- The following constraints must be met under graph mode:
|
|
1209
1168
|
|
|
1210
1169
|
- The `hook` must satisfy the syntax constraints of the graph mode.
|
|
1211
|
-
- Registering `hook` for `Parameter` is not supported in the graph (i.e., function `Cell.construct` or
|
|
1212
|
-
function decorated by `@jit`).
|
|
1213
1170
|
- It is not supported to delete `hook` inside graph.
|
|
1214
|
-
|
|
1171
|
+
- It is not supported to register `hook` after the `Tensor` is used before.
|
|
1172
|
+
- It is not supported to register multiple `hooks` for a `Tensor` inside graph.
|
|
1215
1173
|
- Register `hook` in the graph will return then `Tensor` it self.
|
|
1216
1174
|
|
|
1217
1175
|
Args:
|
|
@@ -1245,10 +1203,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1245
1203
|
>>> print(output)
|
|
1246
1204
|
(Tensor(shape=[], dtype=Float32, value=8), Tensor(shape=[], dtype=Float32, value=6))
|
|
1247
1205
|
"""
|
|
1248
|
-
|
|
1249
|
-
return _TensorHookHandle(self)
|
|
1206
|
+
check_hook_fn(hook)
|
|
1250
1207
|
handle = _TensorHookHandle(self)
|
|
1251
|
-
handle.id =
|
|
1208
|
+
handle.id = TensorPy_.register_hook(self, hook)
|
|
1252
1209
|
return handle
|
|
1253
1210
|
|
|
1254
1211
|
def _remove_hook(self):
|
|
@@ -1266,13 +1223,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1266
1223
|
>>> print(y)
|
|
1267
1224
|
None
|
|
1268
1225
|
"""
|
|
1269
|
-
|
|
1270
|
-
|
|
1271
|
-
def addcdiv(self, tensor1, tensor2, value=1):
|
|
1272
|
-
r"""
|
|
1273
|
-
For details, please refer to :func:`mindspore.ops.addcdiv`.
|
|
1274
|
-
"""
|
|
1275
|
-
return tensor_operator_registry.get('addcdiv')(self, tensor1, tensor2, value)
|
|
1226
|
+
TensorPy_._flush_from_cache(self)
|
|
1276
1227
|
|
|
1277
1228
|
def addcmul(self, tensor1, tensor2, value=1):
|
|
1278
1229
|
r"""
|
|
@@ -1307,12 +1258,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1307
1258
|
"""
|
|
1308
1259
|
return tensor_operator_registry.get('angle')(self)
|
|
1309
1260
|
|
|
1310
|
-
def baddbmm(self, batch1, batch2, beta=1, alpha=1):
|
|
1311
|
-
r"""
|
|
1312
|
-
For details, please refer to :func:`mindspore.ops.baddbmm`.
|
|
1313
|
-
"""
|
|
1314
|
-
return tensor_operator_registry.get('baddbmm')(self, batch1, batch2, beta=beta, alpha=alpha)
|
|
1315
|
-
|
|
1316
1261
|
def view(self, *shape):
|
|
1317
1262
|
"""
|
|
1318
1263
|
Reshape the tensor according to the input shape. It's the same as :func:`mindspore.Tensor.reshape`,
|
|
@@ -1342,24 +1287,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1342
1287
|
shape = shape[0]
|
|
1343
1288
|
return tensor_operator_registry.get('reshape')(self, shape)
|
|
1344
1289
|
|
|
1345
|
-
def bitwise_and(self, other):
|
|
1346
|
-
"""
|
|
1347
|
-
For details, please refer to :func:`mindspore.ops.bitwise_and`.
|
|
1348
|
-
"""
|
|
1349
|
-
return tensor_operator_registry.get('bitwise_and')(self, other)
|
|
1350
|
-
|
|
1351
|
-
def bitwise_or(self, other):
|
|
1352
|
-
"""
|
|
1353
|
-
For details, please refer to :func:`mindspore.ops.bitwise_or`.
|
|
1354
|
-
"""
|
|
1355
|
-
return tensor_operator_registry.get('bitwise_or')(self, other)
|
|
1356
|
-
|
|
1357
|
-
def bitwise_xor(self, other):
|
|
1358
|
-
"""
|
|
1359
|
-
For details, please refer to :func:`mindspore.ops.bitwise_xor`.
|
|
1360
|
-
"""
|
|
1361
|
-
return tensor_operator_registry.get('bitwise_xor')(self, other)
|
|
1362
|
-
|
|
1363
1290
|
def bitwise_left_shift(self, other):
|
|
1364
1291
|
"""
|
|
1365
1292
|
For details, please refer to :func:`mindspore.ops.bitwise_left_shift`.
|
|
@@ -1392,12 +1319,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1392
1319
|
"""
|
|
1393
1320
|
return tensor_operator_registry.get('ger')(self, vec2)
|
|
1394
1321
|
|
|
1395
|
-
def ge(self, x):
|
|
1396
|
-
"""
|
|
1397
|
-
For details, please refer to :func:`mindspore.ops.ge`.
|
|
1398
|
-
"""
|
|
1399
|
-
return tensor_operator_registry.get('ge')(self, x)
|
|
1400
|
-
|
|
1401
1322
|
def broadcast_to(self, shape):
|
|
1402
1323
|
"""
|
|
1403
1324
|
For details, please refer to :func:`mindspore.ops.broadcast_to`.
|
|
@@ -1463,15 +1384,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1463
1384
|
"""
|
|
1464
1385
|
return tensor_operator_registry.get('floor_')(self)
|
|
1465
1386
|
|
|
1466
|
-
def floor_divide(self, other):
|
|
1467
|
-
"""
|
|
1468
|
-
For details, please refer to :func:`mindspore.ops.floor_divide`.
|
|
1469
|
-
|
|
1470
|
-
.. warning::
|
|
1471
|
-
This is an experimental API that is subject to change or deletion.
|
|
1472
|
-
"""
|
|
1473
|
-
return tensor_operator_registry.get('floor_divide')(self, other)
|
|
1474
|
-
|
|
1475
1387
|
# pylint: disable=redefined-builtin
|
|
1476
1388
|
def norm(self, ord=None, dim=None, keepdim=False, *, dtype=None):
|
|
1477
1389
|
"""
|
|
@@ -1508,18 +1420,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1508
1420
|
validator.check_value_type('eps', eps, (float,), 'Tensor.logit')
|
|
1509
1421
|
return tensor_operator_registry.get('logit')(self, eps)
|
|
1510
1422
|
|
|
1511
|
-
def logaddexp(self, other):
|
|
1512
|
-
r"""
|
|
1513
|
-
For details, please refer to :func:`mindspore.ops.logaddexp`.
|
|
1514
|
-
"""
|
|
1515
|
-
return tensor_operator_registry.get('logaddexp')(self, other)
|
|
1516
|
-
|
|
1517
|
-
def logaddexp2(self, other):
|
|
1518
|
-
r"""
|
|
1519
|
-
For details, please refer to :func:`mindspore.ops.logaddexp2`.
|
|
1520
|
-
"""
|
|
1521
|
-
return tensor_operator_registry.get('logaddexp2')(self, other)
|
|
1522
|
-
|
|
1523
1423
|
def logcumsumexp(self, axis):
|
|
1524
1424
|
r"""
|
|
1525
1425
|
For details, please refer to :func:`mindspore.ops.logcumsumexp`.
|
|
@@ -1529,16 +1429,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1529
1429
|
"""
|
|
1530
1430
|
return tensor_operator_registry.get('logcumsumexp')(self, axis)
|
|
1531
1431
|
|
|
1532
|
-
def logsumexp(self, axis, keepdims=False):
|
|
1533
|
-
r"""
|
|
1534
|
-
For details, please refer to :func:`mindspore.ops.logsumexp`.
|
|
1535
|
-
|
|
1536
|
-
Note:
|
|
1537
|
-
The input parameter `keepdims` of the inputs has the same meaning as the input parameter `keep_dims` in
|
|
1538
|
-
:func:`mindspore.ops.logsumexp`.
|
|
1539
|
-
"""
|
|
1540
|
-
return tensor_operator_registry.get('logsumexp')(self, axis, keepdims)
|
|
1541
|
-
|
|
1542
1432
|
def logdet(self):
|
|
1543
1433
|
r"""
|
|
1544
1434
|
For details, please refer to :func:`mindspore.ops.logdet`.
|
|
@@ -1563,12 +1453,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1563
1453
|
"""
|
|
1564
1454
|
return tensor_operator_registry.get('isreal')(self)
|
|
1565
1455
|
|
|
1566
|
-
def is_complex(self):
|
|
1567
|
-
r"""
|
|
1568
|
-
For details, please refer to :func:`mindspore.ops.is_complex`.
|
|
1569
|
-
"""
|
|
1570
|
-
return tensor_operator_registry.get('is_complex')(self)
|
|
1571
|
-
|
|
1572
1456
|
def inv(self):
|
|
1573
1457
|
r"""
|
|
1574
1458
|
For details, please refer to :func:`mindspore.ops.inv`.
|
|
@@ -1674,12 +1558,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1674
1558
|
reshape_op = tensor_operator_registry.get('reshape')
|
|
1675
1559
|
return reshape_op(self, (-1,))
|
|
1676
1560
|
|
|
1677
|
-
def roll(self, shifts, dims):
|
|
1678
|
-
"""
|
|
1679
|
-
For details, please refer to :func:`mindspore.ops.roll`.
|
|
1680
|
-
"""
|
|
1681
|
-
return tensor_operator_registry.get('roll')(shifts, dims)(self)
|
|
1682
|
-
|
|
1683
1561
|
def rot90(self, k, dims):
|
|
1684
1562
|
r"""
|
|
1685
1563
|
For details, please refer to :func:`mindspore.ops.rot90`.
|
|
@@ -1712,23 +1590,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1712
1590
|
|
|
1713
1591
|
def numel(self):
|
|
1714
1592
|
r"""
|
|
1715
|
-
|
|
1716
|
-
|
|
1717
|
-
Returns:
|
|
1718
|
-
int. A scalar representing the total of elements in the Tensor.
|
|
1719
|
-
|
|
1720
|
-
Supported Platforms:
|
|
1721
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1722
|
-
|
|
1723
|
-
Examples:
|
|
1724
|
-
>>> import mindspore
|
|
1725
|
-
>>> import numpy as np
|
|
1726
|
-
>>> from mindspore import Tensor
|
|
1727
|
-
>>> input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
|
|
1728
|
-
>>> print(input_x.numel())
|
|
1729
|
-
4
|
|
1593
|
+
For details, please refer to :func:`mindspore.ops.numel`.
|
|
1730
1594
|
"""
|
|
1731
|
-
return self.
|
|
1595
|
+
return self._size
|
|
1732
1596
|
|
|
1733
1597
|
def permute(self, *axis):
|
|
1734
1598
|
"""
|
|
@@ -1830,46 +1694,49 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1830
1694
|
|
|
1831
1695
|
def argmax_with_value(self, axis=0, keep_dims=False):
|
|
1832
1696
|
"""
|
|
1833
|
-
|
|
1834
|
-
|
|
1835
|
-
Note:
|
|
1836
|
-
- In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
|
|
1837
|
-
- If there are multiple maximum values, the index of the first maximum value is used.
|
|
1838
|
-
- The value range of `axis` is [-dims, dims - 1]. `dims` is the dimension length of this tensor.
|
|
1697
|
+
Return the maximum values and their indices along the given axis of the tensor.
|
|
1839
1698
|
|
|
1840
1699
|
Args:
|
|
1841
|
-
axis (int, optional):
|
|
1842
|
-
|
|
1843
|
-
|
|
1700
|
+
axis (Union[int, None], optional): Specify the axis for computation. If ``None`` , compute all elements in
|
|
1701
|
+
the tensor. Default ``0`` .
|
|
1702
|
+
keep_dims (bool, optional): Whether the output tensor has dim retained. Default ``False`` .
|
|
1844
1703
|
|
|
1845
1704
|
Returns:
|
|
1846
|
-
|
|
1847
|
-
tensor.
|
|
1848
|
-
|
|
1849
|
-
- **index** (Tensor) - The index for the maximum value of the input tensor.
|
|
1850
|
-
If `keep_dims` is ``true`` , the shape of
|
|
1851
|
-
output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`. Otherwise, the shape is
|
|
1852
|
-
:math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)` .
|
|
1853
|
-
- **value** (Tensor) - The maximum value of input tensor, with the same shape as index.
|
|
1854
|
-
|
|
1855
|
-
Raises:
|
|
1856
|
-
TypeError: If `keep_dims` is not a bool.
|
|
1857
|
-
TypeError: If `axis` is not an int.
|
|
1705
|
+
Tuple(max, max_indices) of 2 tensors.
|
|
1858
1706
|
|
|
1859
1707
|
Supported Platforms:
|
|
1860
1708
|
``Ascend`` ``GPU`` ``CPU``
|
|
1861
1709
|
|
|
1862
1710
|
Examples:
|
|
1863
|
-
>>> import numpy as np
|
|
1864
1711
|
>>> import mindspore
|
|
1865
|
-
>>>
|
|
1866
|
-
|
|
1867
|
-
|
|
1868
|
-
>>>
|
|
1869
|
-
|
|
1870
|
-
|
|
1871
|
-
|
|
1872
|
-
|
|
1712
|
+
>>> x = mindspore.tensor([[9, 3, 4, 5],
|
|
1713
|
+
... [5, 2, 7, 4],
|
|
1714
|
+
... [8, 1, 3, 6]])
|
|
1715
|
+
>>> # case 1: By default, compute the maximum along axis 0.
|
|
1716
|
+
>>> x.argmax_with_value()
|
|
1717
|
+
(Tensor(shape=[4], dtype=Int64, value= [9, 3, 7, 6]),
|
|
1718
|
+
Tensor(shape=[4], dtype=Int64, value= [0, 0, 1, 2]))
|
|
1719
|
+
>>>
|
|
1720
|
+
>>> # case 2: Compute the maximum along axis 1.
|
|
1721
|
+
>>> x.argmax_with_value(axis=1)
|
|
1722
|
+
(Tensor(shape=[3], dtype=Int64, value= [9, 7, 8]),
|
|
1723
|
+
Tensor(shape=[3], dtype=Int64, value= [0, 2, 0]))
|
|
1724
|
+
>>>
|
|
1725
|
+
>>> # case 3: If keep_dims=True, the output shape will be same of that of the input.
|
|
1726
|
+
>>> x.argmax_with_value(axis=1, keep_dims=True)
|
|
1727
|
+
(Tensor(shape=[3, 1], dtype=Int64, value=
|
|
1728
|
+
[[9],
|
|
1729
|
+
[7],
|
|
1730
|
+
[8]]),
|
|
1731
|
+
Tensor(shape=[3, 1], dtype=Int64, value=
|
|
1732
|
+
[[0],
|
|
1733
|
+
[2],
|
|
1734
|
+
[0]]))
|
|
1735
|
+
>>>
|
|
1736
|
+
>>> # case 4: If axis=None, compute the maximum of all elements.
|
|
1737
|
+
>>> x.argmax_with_value(axis=None, keep_dims=True)
|
|
1738
|
+
(Tensor(shape=[], dtype=Int64, value= 9),
|
|
1739
|
+
Tensor(shape=[], dtype=Int64, value= 0))
|
|
1873
1740
|
"""
|
|
1874
1741
|
if self.shape == ():
|
|
1875
1742
|
return (self, Tensor(0))
|
|
@@ -1877,46 +1744,49 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1877
1744
|
|
|
1878
1745
|
def argmin_with_value(self, axis=0, keep_dims=False):
|
|
1879
1746
|
"""
|
|
1880
|
-
|
|
1881
|
-
|
|
1882
|
-
Note:
|
|
1883
|
-
- In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
|
|
1884
|
-
- If there are multiple minimum values, the index of the first minimum value is used.
|
|
1885
|
-
- The value range of `axis` is [-dims, dims - 1]. `dims` is the dimension length of this tensor.
|
|
1747
|
+
Return the minimum values and their indices along the given axis of the tensor.
|
|
1886
1748
|
|
|
1887
1749
|
Args:
|
|
1888
|
-
axis (int, optional):
|
|
1889
|
-
|
|
1890
|
-
|
|
1750
|
+
axis (Union[int, None], optional): Specify the axis for computation. If ``None`` , compute all elements in
|
|
1751
|
+
the tensor. Default ``0`` .
|
|
1752
|
+
keep_dims (bool, optional): Whether the output tensor has dim retained. Default ``False`` .
|
|
1891
1753
|
|
|
1892
1754
|
Returns:
|
|
1893
|
-
|
|
1894
|
-
tensor.
|
|
1895
|
-
|
|
1896
|
-
- **index** (Tensor) - The index for the minimum value of the input tensor.
|
|
1897
|
-
If `keep_dims` is true, the shape of
|
|
1898
|
-
output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`. Otherwise, the shape is
|
|
1899
|
-
:math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)` .
|
|
1900
|
-
- **value** (Tensor) - The minimum value of input tensor, with the same shape as index.
|
|
1901
|
-
|
|
1902
|
-
Raises:
|
|
1903
|
-
TypeError: If `keep_dims` is not a bool.
|
|
1904
|
-
TypeError: If `axis` is not an int.
|
|
1755
|
+
Tuple(min, min_indices) of 2 tensors.
|
|
1905
1756
|
|
|
1906
1757
|
Supported Platforms:
|
|
1907
1758
|
``Ascend`` ``GPU`` ``CPU``
|
|
1908
1759
|
|
|
1909
1760
|
Examples:
|
|
1910
|
-
>>> import numpy as np
|
|
1911
1761
|
>>> import mindspore
|
|
1912
|
-
>>>
|
|
1913
|
-
|
|
1914
|
-
|
|
1915
|
-
>>>
|
|
1916
|
-
|
|
1917
|
-
|
|
1918
|
-
|
|
1919
|
-
|
|
1762
|
+
>>> x = mindspore.tensor([[2, 5, 1, 6],
|
|
1763
|
+
... [3, -7, -2, 4],
|
|
1764
|
+
... [8, -4, 1, -3]])
|
|
1765
|
+
>>> # case 1: By default, compute the minimum along axis 0.
|
|
1766
|
+
>>> x.argmin_with_value()
|
|
1767
|
+
(Tensor(shape=[4], dtype=Int64, value= [ 2, -7, -2, -3]),
|
|
1768
|
+
Tensor(shape=[4], dtype=Int64, value= [0, 1, 1, 2]))
|
|
1769
|
+
>>>
|
|
1770
|
+
>>> # case 2: Compute the minimum along axis 1.
|
|
1771
|
+
>>> x.argmin_with_value(axis=1)
|
|
1772
|
+
(Tensor(shape=[3], dtype=Int64, value= [ 1, -7, -4]),
|
|
1773
|
+
Tensor(shape=[3], dtype=Int64, value= [2, 1, 1]))
|
|
1774
|
+
>>>
|
|
1775
|
+
>>> # case 3: If keep_dims=True, the output shape will be same of that of the input.
|
|
1776
|
+
>>> x.argmin_with_value(axis=1, keep_dims=True)
|
|
1777
|
+
(Tensor(shape=[3, 1], dtype=Int64, value=
|
|
1778
|
+
[[ 1],
|
|
1779
|
+
[-7],
|
|
1780
|
+
[-4]]),
|
|
1781
|
+
Tensor(shape=[3, 1], dtype=Int64, value=
|
|
1782
|
+
[[2],
|
|
1783
|
+
[1],
|
|
1784
|
+
[1]]))
|
|
1785
|
+
>>>
|
|
1786
|
+
>>> # case 4: If axis=None, compute the minimum of all elements.
|
|
1787
|
+
>>> x.argmin_with_value(axis=None, keep_dims=True)
|
|
1788
|
+
(Tensor(shape=[], dtype=Int64, value= -7),
|
|
1789
|
+
Tensor(shape=[], dtype=Int64, value= 0))
|
|
1920
1790
|
"""
|
|
1921
1791
|
if self.shape == ():
|
|
1922
1792
|
return (self, Tensor(0))
|
|
@@ -1982,37 +1852,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1982
1852
|
x = x.astype(origin_dtype)
|
|
1983
1853
|
return x
|
|
1984
1854
|
|
|
1985
|
-
def copy_(self, src, non_blocking=False):
|
|
1986
|
-
"""
|
|
1987
|
-
Copies the elements from src into self tensor and returns self.
|
|
1988
|
-
|
|
1989
|
-
.. warning::
|
|
1990
|
-
This is an experimental API that is subject to change or deletion.
|
|
1991
|
-
The `src` tensor must be broadcastable with the `self` tensor. It may be of a different data type.
|
|
1992
|
-
|
|
1993
|
-
Args:
|
|
1994
|
-
src (Tensor): the source tensor to copy from.
|
|
1995
|
-
non_blocking (bool): no effect currently.
|
|
1996
|
-
|
|
1997
|
-
Returns:
|
|
1998
|
-
Return self Tensor.
|
|
1999
|
-
|
|
2000
|
-
Supported Platforms:
|
|
2001
|
-
``Ascend``
|
|
2002
|
-
|
|
2003
|
-
Examples:
|
|
2004
|
-
>>> import numpy as np
|
|
2005
|
-
>>> from mindspore import Tensor
|
|
2006
|
-
>>> a = Tensor(np.ones((3,3)).astype("float32"))
|
|
2007
|
-
>>> b = Tensor(np.zeros((3,3)).astype("float32"))
|
|
2008
|
-
>>> a.copy_(b)
|
|
2009
|
-
>>> print(a)
|
|
2010
|
-
[[0. 0. 0.]
|
|
2011
|
-
[0. 0. 0.]
|
|
2012
|
-
[0. 0. 0.]]
|
|
2013
|
-
"""
|
|
2014
|
-
return tensor_operator_registry.get("copy_")(self, src)
|
|
2015
|
-
|
|
2016
1855
|
def scatter_add_(self, dim, index, src):
|
|
2017
1856
|
"""
|
|
2018
1857
|
Add all elements in `src` to the index specified by `index` to `self` along dimension specified by `dim`,
|
|
@@ -2087,48 +1926,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2087
1926
|
|
|
2088
1927
|
def scatter_sub(self, indices, updates):
|
|
2089
1928
|
"""
|
|
2090
|
-
|
|
2091
|
-
`indices`, with values from `updates`. When multiple values are provided for the same
|
|
2092
|
-
index, the result of the update will be to subtract these values respectively. This operation is almost
|
|
2093
|
-
equivalent to using :class:`mindspore.ops.ScatterNdSub` , except that the updates are applied on output `Tensor`
|
|
2094
|
-
instead of input `Parameter`.
|
|
2095
|
-
|
|
2096
|
-
The last axis of `indices` is the depth of each index vectors. For each index vector,
|
|
2097
|
-
there must be a corresponding value in `updates`. The shape of `updates` should be
|
|
2098
|
-
equal to the shape of `self[indices]`. For more details, see Examples.
|
|
2099
|
-
|
|
2100
|
-
Note:
|
|
2101
|
-
On GPU, if some values of the `indices` are out of bound, instead of raising an index error,
|
|
2102
|
-
the corresponding `updates` will not be updated to self tensor. On CPU, if some values of
|
|
2103
|
-
the `indices` are out of bound, raising an index error. On Ascend, out of bound checking is
|
|
2104
|
-
not supported, if some values of the `indices` are out of bound, unknown errors may be caused.
|
|
2105
|
-
|
|
2106
|
-
Args:
|
|
2107
|
-
indices (Tensor): The index of input tensor whose data type is int32 or int64.
|
|
2108
|
-
The rank must be at least 2.
|
|
2109
|
-
updates (Tensor): The tensor to update the input tensor, has the same type as input,
|
|
2110
|
-
and updates.shape should be equal to indices.shape[:-1] + self.shape[indices.shape[-1]:].
|
|
2111
|
-
|
|
2112
|
-
Returns:
|
|
2113
|
-
Tensor, has the same shape and type as self tensor.
|
|
2114
|
-
|
|
2115
|
-
Raises:
|
|
2116
|
-
TypeError: If dtype of `indices` is neither int32 nor int64.
|
|
2117
|
-
ValueError: If length of shape of self tensor is less than the last dimension of shape of `indices`.
|
|
2118
|
-
|
|
2119
|
-
Supported Platforms:
|
|
2120
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2121
|
-
|
|
2122
|
-
Examples:
|
|
2123
|
-
>>> import numpy as np
|
|
2124
|
-
>>> from mindspore import Tensor
|
|
2125
|
-
>>> x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]).astype('float32'))
|
|
2126
|
-
>>> indices = Tensor(np.array([[0, 0], [0, 0]]).astype('int32'))
|
|
2127
|
-
>>> updates = Tensor(np.array([1.0, 2.2]).astype('float32'))
|
|
2128
|
-
>>> output = x.scatter_sub(indices, updates)
|
|
2129
|
-
>>> print(output)
|
|
2130
|
-
[[-3.3000002 0.3 3.6 ]
|
|
2131
|
-
[ 0.4 0.5 -3.2 ]]
|
|
1929
|
+
For details, please refer to :func:`mindspore.ops.tensor_scatter_sub`.
|
|
2132
1930
|
"""
|
|
2133
1931
|
return tensor_operator_registry.get('tensor_scatter_sub')(self, indices, updates)
|
|
2134
1932
|
|
|
@@ -2277,7 +2075,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2277
2075
|
opt_shard_group(str): Optimizer shard group which is used in auto or semi auto parallel mode
|
|
2278
2076
|
to get one shard of a parameter's slice. For more information about optimizer parallel, please refer to:
|
|
2279
2077
|
`Optimizer Parallel
|
|
2280
|
-
<https://www.mindspore.cn/
|
|
2078
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/optimizer_parallel.html>`_.
|
|
2281
2079
|
Default: ``None``.
|
|
2282
2080
|
|
|
2283
2081
|
Returns:
|
|
@@ -2358,9 +2156,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2358
2156
|
|
|
2359
2157
|
# At embedding cache scenes. When size of tensor is out of range, we store data to persistent storage
|
|
2360
2158
|
if slice_num_of_persistent_data > 1:
|
|
2361
|
-
self.assign_value(
|
|
2159
|
+
self.assign_value(TensorPy_.persistent_data_from_numpy(data, slice_num_of_persistent_data))
|
|
2362
2160
|
else:
|
|
2363
|
-
self.assign_value(
|
|
2161
|
+
self.assign_value(TensorPy_.from_numpy(data))
|
|
2364
2162
|
return self
|
|
2365
2163
|
|
|
2366
2164
|
def resize(self, *new_shape):
|
|
@@ -2512,73 +2310,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2512
2310
|
"""
|
|
2513
2311
|
return tensor_operator_registry.get('tracev2')(self, offset, axis1, axis2, dtype)
|
|
2514
2312
|
|
|
2515
|
-
def take(self, indices, axis=None, mode='clip'):
|
|
2516
|
-
"""
|
|
2517
|
-
Takes elements from a tensor along an axis.
|
|
2518
|
-
|
|
2519
|
-
Args:
|
|
2520
|
-
indices (Tensor): The indices with shape :math:`(Nj...)` of the values to extract.
|
|
2521
|
-
axis (int, optional): The axis over which to select values. By default,
|
|
2522
|
-
the flattened input tensor is used. Default: ``None`` .
|
|
2523
|
-
mode (str, optional): Support ``'raise'``, ``'wrap'``, ``'clip'``.
|
|
2524
|
-
|
|
2525
|
-
- ``raise``: Raises an error;
|
|
2526
|
-
|
|
2527
|
-
- ``wrap``: Wraps around;
|
|
2528
|
-
|
|
2529
|
-
- ``clip``: Clips to the range. ``'clip'`` mode means that all indices that are
|
|
2530
|
-
too large are replaced by the index that addresses the last element
|
|
2531
|
-
along that axis. Note that this disables indexing with negative numbers.
|
|
2532
|
-
|
|
2533
|
-
Default: ``'clip'`` .
|
|
2534
|
-
|
|
2535
|
-
Returns:
|
|
2536
|
-
Tensor, the indexed result.
|
|
2537
|
-
|
|
2538
|
-
Raises:
|
|
2539
|
-
ValueError: If `axis` is out of range, or `mode` has values other than ('raise', 'wrap', 'clip')
|
|
2540
|
-
|
|
2541
|
-
Supported Platforms:
|
|
2542
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2543
|
-
|
|
2544
|
-
Examples:
|
|
2545
|
-
>>> import numpy as np
|
|
2546
|
-
>>> from mindspore import Tensor
|
|
2547
|
-
>>> a = Tensor(np.array([4, 3, 5, 7, 6, 8]))
|
|
2548
|
-
>>> indices = Tensor(np.array([0, 1, 4]))
|
|
2549
|
-
>>> output = a.take(indices)
|
|
2550
|
-
>>> print(output)
|
|
2551
|
-
[4 3 6]
|
|
2552
|
-
"""
|
|
2553
|
-
if mode not in ('raise', 'wrap', 'clip'):
|
|
2554
|
-
raise ValueError(f"For 'Tensor.take', the argument 'mode' should be one of in ['raise', 'wrap', 'clip'],"
|
|
2555
|
-
f" but got {mode}.")
|
|
2556
|
-
if axis is None:
|
|
2557
|
-
a = self.ravel()
|
|
2558
|
-
axis = 0
|
|
2559
|
-
else:
|
|
2560
|
-
a = self
|
|
2561
|
-
ndim = a.ndim
|
|
2562
|
-
validator.check_axis_in_range(axis, ndim)
|
|
2563
|
-
axis = axis + ndim if axis < 0 else axis
|
|
2564
|
-
|
|
2565
|
-
shape_a = a.shape
|
|
2566
|
-
shape_indices = indices.shape
|
|
2567
|
-
size_indices = indices.size
|
|
2568
|
-
indices = tensor_operator_registry.get('check_indices')(shape_a[axis], indices, mode)
|
|
2569
|
-
|
|
2570
|
-
# reshapes indices to shape (Ni..., Nj..., Nk)
|
|
2571
|
-
shape_ni = shape_a[:axis]
|
|
2572
|
-
shape_nk = shape_a[axis + 1:]
|
|
2573
|
-
shape_out = shape_ni + shape_indices + shape_nk
|
|
2574
|
-
shape_indices = tuple(size_indices if i == axis else 1 for i in range(ndim))
|
|
2575
|
-
indices = indices.reshape(shape_indices)
|
|
2576
|
-
shape_indices = shape_ni + (indices.size,) + shape_nk
|
|
2577
|
-
indices = tensor_operator_registry.get('broadcast_to')(indices, shape_indices)
|
|
2578
|
-
|
|
2579
|
-
res = tensor_operator_registry.get('gather_d')(a, axis, indices)
|
|
2580
|
-
return res.reshape(shape_out)
|
|
2581
|
-
|
|
2582
2313
|
def choose(self, choices, mode='clip'):
|
|
2583
2314
|
"""
|
|
2584
2315
|
Construct a tensor from an index tensor and a list of tensors to choose from.
|
|
@@ -2659,34 +2390,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2659
2390
|
|
|
2660
2391
|
def searchsorted(self, v, side='left', sorter=None):
|
|
2661
2392
|
"""
|
|
2662
|
-
|
|
2663
|
-
|
|
2664
|
-
Args:
|
|
2665
|
-
v (Union[int, float, bool, list, tuple, Tensor]): Values to insert into the tensor.
|
|
2666
|
-
side (str, optional): If 'left', the index of the first suitable
|
|
2667
|
-
location found is given. If 'right', return the last such index. If there is
|
|
2668
|
-
no suitable index, return either 0 or N (where N is the length of the tensor).
|
|
2669
|
-
Default: ``left`` .
|
|
2670
|
-
sorter (Union[int, list, tuple, Tensor]): optional tensor of
|
|
2671
|
-
integer indices that sort the tensor into ascending order on the innermost dimension
|
|
2672
|
-
and the type must be int64. They are typically the result of argsort. Default: ``None`` .
|
|
2673
|
-
CPU and GPU can only use default values
|
|
2674
|
-
|
|
2675
|
-
Returns:
|
|
2676
|
-
Tensor, array of insertion points with the same shape as `v`.
|
|
2677
|
-
|
|
2678
|
-
Raises:
|
|
2679
|
-
ValueError: If argument for `side` or `sorter` is invalid.
|
|
2680
|
-
|
|
2681
|
-
Supported Platforms:
|
|
2682
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2683
|
-
|
|
2684
|
-
Examples:
|
|
2685
|
-
>>> import numpy as np
|
|
2686
|
-
>>> from mindspore import Tensor
|
|
2687
|
-
>>> x = Tensor(np.array([1, 2, 3, 4, 5]))
|
|
2688
|
-
>>> print(x.searchsorted(3))
|
|
2689
|
-
2
|
|
2393
|
+
For details, please refer to :func:`mindspore.ops.searchsorted`.
|
|
2690
2394
|
"""
|
|
2691
2395
|
if side not in ('left', 'right'):
|
|
2692
2396
|
raise ValueError(f"For 'Tensor.searchsorted', the argument 'side' should be one of in "
|
|
@@ -2711,7 +2415,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2711
2415
|
r"""
|
|
2712
2416
|
For details, please refer to :func:`mindspore.ops.gather_nd`.
|
|
2713
2417
|
"""
|
|
2714
|
-
validator.check_value_type('indices', indices, (Tensor,
|
|
2418
|
+
validator.check_value_type('indices', indices, (Tensor, TensorPy_,), 'Tensor.gather_nd')
|
|
2715
2419
|
return tensor_operator_registry.get('gather_nd')(self, indices)
|
|
2716
2420
|
|
|
2717
2421
|
def uniform(self, from_=0., to=1., generator=None):
|
|
@@ -2748,7 +2452,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2748
2452
|
|
|
2749
2453
|
def uniform_(self, from_=0, to=1, *, generator=None):
|
|
2750
2454
|
r"""
|
|
2751
|
-
Update the `self`
|
|
2455
|
+
Update the `self` Tensor in place by generating random numbers sampled from uniform distribution in the
|
|
2752
2456
|
half-open interval :math:`[from\_, to)`.
|
|
2753
2457
|
|
|
2754
2458
|
.. math::
|
|
@@ -2791,6 +2495,41 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2791
2495
|
"""
|
|
2792
2496
|
return tensor_operator_registry.get('uniform_')(self, from_=from_, to=to, generator=generator)
|
|
2793
2497
|
|
|
2498
|
+
|
|
2499
|
+
def exponential_(self, lambd=1, *, generator=None):
|
|
2500
|
+
r"""
|
|
2501
|
+
Fills `self` tensor with elements drawn from the exponential distribution:
|
|
2502
|
+
|
|
2503
|
+
.. math::
|
|
2504
|
+
f(x) = \lambda \exp(-\lambda x)
|
|
2505
|
+
|
|
2506
|
+
.. warning::
|
|
2507
|
+
- It is only supported on Atlas A2 Training Series Products.
|
|
2508
|
+
- This is an experimental API that is subject to change or deletion.
|
|
2509
|
+
|
|
2510
|
+
Args:
|
|
2511
|
+
lambd (float, optional): Parameters of exponential distribution. Default: ``1``.
|
|
2512
|
+
|
|
2513
|
+
Keyword Args:
|
|
2514
|
+
generator (Generator, optional): a pseudorandom number generator.
|
|
2515
|
+
Default: ``None`` .
|
|
2516
|
+
|
|
2517
|
+
Returns:
|
|
2518
|
+
Tensor, with same shape and same data type with input.
|
|
2519
|
+
|
|
2520
|
+
Supported Platforms:
|
|
2521
|
+
``Ascend``
|
|
2522
|
+
|
|
2523
|
+
Examples:
|
|
2524
|
+
>>> import mindspore
|
|
2525
|
+
>>> x = mindspore.Tensor([1, 2, 3.0])
|
|
2526
|
+
>>> out = x.exponential_(2)
|
|
2527
|
+
>>> print(out.shape)
|
|
2528
|
+
(3,)
|
|
2529
|
+
"""
|
|
2530
|
+
return tensor_operator_registry.get('exponential_')(self, lambd=lambd, generator=generator)
|
|
2531
|
+
|
|
2532
|
+
|
|
2794
2533
|
def sum_to_size(self, *size):
|
|
2795
2534
|
r"""
|
|
2796
2535
|
Sum self Tensor to the `size`. `size` must be expandable to the Tensor size.
|
|
@@ -2847,84 +2586,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2847
2586
|
"""
|
|
2848
2587
|
return tensor_operator_registry.get('nanmedian')(self, axis, keepdims)
|
|
2849
2588
|
|
|
2850
|
-
def
|
|
2851
|
-
"""
|
|
2852
|
-
Repeat elements of a tensor.
|
|
2853
|
-
|
|
2854
|
-
Args:
|
|
2855
|
-
repeats (Union[int, tuple, list]): The number of repetitions for each element.
|
|
2856
|
-
`repeats` is broadcasted to fit the shape of the given axis.
|
|
2857
|
-
axis (int, optional): The axis along which to repeat values. By default,
|
|
2858
|
-
use the flattened input tensor, and return a flat output tensor. Default: ``None``.
|
|
2859
|
-
|
|
2860
|
-
Returns:
|
|
2861
|
-
Tensor, has the same shape as input tensor except along the given axis.
|
|
2862
|
-
|
|
2863
|
-
Raises:
|
|
2864
|
-
ValueError: If the axis is out of range.
|
|
2865
|
-
TypeError: If arguments have types not specified above.
|
|
2866
|
-
|
|
2867
|
-
See also:
|
|
2868
|
-
- :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
2869
|
-
- :func:`mindspore.Tensor.resize`: Changes shape and size of tensor in-place.
|
|
2870
|
-
|
|
2871
|
-
Supported Platforms:
|
|
2872
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2873
|
-
|
|
2874
|
-
Examples:
|
|
2875
|
-
>>> import numpy as np
|
|
2876
|
-
>>> from mindspore import Tensor
|
|
2877
|
-
>>> x = Tensor(np.array(3))
|
|
2878
|
-
>>> print(x.repeat(4))
|
|
2879
|
-
[3 3 3 3]
|
|
2880
|
-
>>> x = Tensor(np.array([[1, 2],[3, 4]]))
|
|
2881
|
-
>>> print(x.repeat(2))
|
|
2882
|
-
[1 1 2 2 3 3 4 4]
|
|
2883
|
-
>>> print(x.repeat(3, axis=1))
|
|
2884
|
-
[[1 1 1 2 2 2]
|
|
2885
|
-
[3 3 3 4 4 4]]
|
|
2886
|
-
>>> print(x.repeat([1,2], axis=0))
|
|
2887
|
-
[[1 2]
|
|
2888
|
-
[3 4]
|
|
2889
|
-
[3 4]]
|
|
2890
|
-
"""
|
|
2891
|
-
if not isinstance(repeats, (tuple, list)):
|
|
2892
|
-
repeats = (repeats,)
|
|
2893
|
-
for index, element in enumerate(repeats):
|
|
2894
|
-
if not isinstance(element, int):
|
|
2895
|
-
raise TypeError(f"For 'Tensor.repeat', each element in {repeats} should be int, but got "
|
|
2896
|
-
f"{type(element)} at index {index}.")
|
|
2897
|
-
input_x = self
|
|
2898
|
-
if axis is None:
|
|
2899
|
-
input_x = self.ravel()
|
|
2900
|
-
axis = 0
|
|
2901
|
-
if axis is not None and not isinstance(axis, int):
|
|
2902
|
-
raise TypeError(f"For 'Tensor.repeat', the argument 'axis' should be int, but got {type(axis)}.")
|
|
2903
|
-
validator.check_axis_in_range(axis, input_x.ndim)
|
|
2904
|
-
axis = axis + input_x.ndim if axis < 0 else axis
|
|
2905
|
-
|
|
2906
|
-
if len(repeats) == 1:
|
|
2907
|
-
repeats = repeats[0]
|
|
2908
|
-
if repeats == 0:
|
|
2909
|
-
return Tensor_(input_x.dtype, (0,))
|
|
2910
|
-
return tensor_operator_registry.get('repeat_elements')(input_x, repeats, axis)
|
|
2911
|
-
size = input_x.shape[axis]
|
|
2912
|
-
if len(repeats) != size:
|
|
2913
|
-
raise ValueError(f"For 'Tensor.repeat', the length of 'repeats' must be the same as the shape of the "
|
|
2914
|
-
f"original tensor in the 'axis' dimension, but got the length of 'repeats' "
|
|
2915
|
-
f"{len(repeats)}, the shape of the original tensor in the 'axis' dimension {size}.")
|
|
2916
|
-
subs = tensor_operator_registry.get('tensor_split')(input_x, size, axis)
|
|
2917
|
-
repeated_subs = []
|
|
2918
|
-
for sub, rep in zip(subs, repeats):
|
|
2919
|
-
if rep != 0:
|
|
2920
|
-
repeated_subs.append(tensor_operator_registry.get('repeat_elements')(sub, rep, axis))
|
|
2921
|
-
return tensor_operator_registry.get('concatenate')(repeated_subs, axis)
|
|
2922
|
-
|
|
2923
|
-
def bernoulli(self, p=0.5, seed=None):
|
|
2589
|
+
def bernoulli(self, *, generator=None):
|
|
2924
2590
|
r"""
|
|
2925
|
-
For details, please refer to :func:`mindspore.
|
|
2591
|
+
For details, please refer to :func:`mindspore.mint.bernoulli`.
|
|
2926
2592
|
"""
|
|
2927
|
-
return tensor_operator_registry.get('bernoulli')(self,
|
|
2593
|
+
return tensor_operator_registry.get('bernoulli')(self, generator=generator)
|
|
2928
2594
|
|
|
2929
2595
|
def random_(self, from_=0, to=None, *, generator=None):
|
|
2930
2596
|
r"""
|
|
@@ -2936,10 +2602,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2936
2602
|
|
|
2937
2603
|
Args:
|
|
2938
2604
|
from\_ (Union[number.Number, Tensor], optional): the lower bound of the generated random number.
|
|
2939
|
-
It can be a scalar value or a
|
|
2605
|
+
It can be a scalar value or a Tensor of any dimension with only a single element. Default: 0.
|
|
2940
2606
|
to (Union[number.Number, Tensor], optional): the upper bound of the generated random number.
|
|
2941
2607
|
By default it's the upper limit of the input data type.
|
|
2942
|
-
It can be a scalar value or a
|
|
2608
|
+
It can be a scalar value or a Tensor of any dimension with only a single element.
|
|
2943
2609
|
Default: ``None``.
|
|
2944
2610
|
|
|
2945
2611
|
Keyword Args:
|
|
@@ -2978,7 +2644,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2978
2644
|
"""
|
|
2979
2645
|
For details, please refer to :func:`mindspore.ops.gather_elements`.
|
|
2980
2646
|
"""
|
|
2981
|
-
validator.check_value_type('index', index, (Tensor,
|
|
2647
|
+
validator.check_value_type('index', index, (Tensor, TensorPy_,), 'Tensor.gather_elements')
|
|
2982
2648
|
return tensor_operator_registry.get('gather_elements')(self, dim, index)
|
|
2983
2649
|
|
|
2984
2650
|
def nonzero(self, *, as_tuple=False):
|
|
@@ -3166,7 +2832,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3166
2832
|
>>> print(out2)
|
|
3167
2833
|
1
|
|
3168
2834
|
"""
|
|
3169
|
-
|
|
2835
|
+
if self.ndim == 1 and self.size == 0:
|
|
2836
|
+
return []
|
|
2837
|
+
return self._tolist()
|
|
2838
|
+
|
|
3170
2839
|
|
|
3171
2840
|
def unsorted_segment_min(self, segment_ids, num_segments):
|
|
3172
2841
|
r"""
|
|
@@ -3186,14 +2855,15 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3186
2855
|
"""
|
|
3187
2856
|
return tensor_operator_registry.get('unsorted_segment_prod')(self, segment_ids, num_segments)
|
|
3188
2857
|
|
|
3189
|
-
def unique_consecutive(self,
|
|
2858
|
+
def unique_consecutive(self, return_inverse=False, return_counts=False, dim=None):
|
|
3190
2859
|
"""
|
|
3191
2860
|
For details, please refer to :func:`mindspore.ops.unique_consecutive`.
|
|
3192
2861
|
"""
|
|
3193
|
-
output, idx, counts
|
|
3194
|
-
|
|
2862
|
+
output, idx, counts =\
|
|
2863
|
+
tensor_operator_registry.get("unique_consecutive")(return_inverse, return_counts, dim)(self)
|
|
2864
|
+
if return_inverse and return_counts:
|
|
3195
2865
|
return output, idx, counts
|
|
3196
|
-
if
|
|
2866
|
+
if return_inverse:
|
|
3197
2867
|
return output, idx
|
|
3198
2868
|
if return_counts:
|
|
3199
2869
|
return output, counts
|
|
@@ -3205,12 +2875,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3205
2875
|
"""
|
|
3206
2876
|
return tensor_operator_registry.get("unique_with_pad")(self, pad_num)
|
|
3207
2877
|
|
|
3208
|
-
def diag(self):
|
|
3209
|
-
r"""
|
|
3210
|
-
For details, please refer to :func:`mindspore.ops.diag`.
|
|
3211
|
-
"""
|
|
3212
|
-
return tensor_operator_registry.get('diag')(self)
|
|
3213
|
-
|
|
3214
2878
|
def diagflat(self, offset=0):
|
|
3215
2879
|
r"""
|
|
3216
2880
|
For details, please refer to :func:`mindspore.ops.diagflat`.
|
|
@@ -3248,13 +2912,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3248
2912
|
"""
|
|
3249
2913
|
return tensor_operator_registry.get('dsplit')(self, indices_or_sections)
|
|
3250
2914
|
|
|
3251
|
-
def xlogy(self, y):
|
|
3252
|
-
r"""
|
|
3253
|
-
For details, please refer to :func:`mindspore.ops.xlogy`.
|
|
3254
|
-
The parameter `y` of the current interface is the same as the parameter `other` of the reference interface.
|
|
3255
|
-
"""
|
|
3256
|
-
return tensor_operator_registry.get("xlogy")(self, y)
|
|
3257
|
-
|
|
3258
2915
|
def eigvals(self):
|
|
3259
2916
|
r"""
|
|
3260
2917
|
For details, please refer to :func:`mindspore.ops.eigvals`.
|
|
@@ -3382,7 +3039,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3382
3039
|
"""
|
|
3383
3040
|
if self.dtype == other.dtype:
|
|
3384
3041
|
return self
|
|
3385
|
-
return
|
|
3042
|
+
return TensorPy_.type_as(self, other)
|
|
3386
3043
|
|
|
3387
3044
|
|
|
3388
3045
|
def bool(self):
|
|
@@ -3603,12 +3260,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3603
3260
|
"""
|
|
3604
3261
|
return tensor_operator_registry.get('conj')(self)
|
|
3605
3262
|
|
|
3606
|
-
def count_nonzero(self, axis=(), keep_dims=False, dtype=mstype.int32):
|
|
3607
|
-
r"""
|
|
3608
|
-
For details, please refer to :func:`mindspore.ops.count_nonzero`.
|
|
3609
|
-
"""
|
|
3610
|
-
return tensor_operator_registry.get('count_nonzero')(self, axis, keep_dims, dtype)
|
|
3611
|
-
|
|
3612
3263
|
def cross(self, other, dim=None):
|
|
3613
3264
|
r"""
|
|
3614
3265
|
For details, please refer to :func:`mindspore.ops.cross`.
|
|
@@ -3686,16 +3337,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3686
3337
|
"""
|
|
3687
3338
|
return tensor_operator_registry.get('equal')(self, other)
|
|
3688
3339
|
|
|
3689
|
-
def index_add(self, dim, index, source, *, alpha=1):
|
|
3690
|
-
r"""
|
|
3691
|
-
For details, please refer to :func:`mindspore.ops.index_add`.
|
|
3692
|
-
The corresponding relationships between the parameters of `Tensor.index_add` and :func:`mindspore.ops.index_add`
|
|
3693
|
-
are as follows: `dim` -> `axis`, `index` -> `indices`, `source * alpha` -> `y`.
|
|
3694
|
-
"""
|
|
3695
|
-
check_is_number(alpha, (int, float))
|
|
3696
|
-
source = tensor_operator_registry.get('__mul__')(source, alpha)
|
|
3697
|
-
return tensor_operator_registry.get('index_add')(self, indices=index, y=source, axis=dim)
|
|
3698
|
-
|
|
3699
3340
|
def index_add_(self, dim, index, source, *, alpha=1):
|
|
3700
3341
|
r"""
|
|
3701
3342
|
Accumulate the elements of `alpha` times `source` into the `self` by adding to the index
|
|
@@ -3802,35 +3443,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3802
3443
|
"""
|
|
3803
3444
|
return tensor_operator_registry.get('is_floating_point')(self)
|
|
3804
3445
|
|
|
3805
|
-
def is_signed(self):
|
|
3806
|
-
"""
|
|
3807
|
-
Judge whether the data type of tensor is a signed data type.
|
|
3808
|
-
|
|
3809
|
-
Returns:
|
|
3810
|
-
Bool. If the dtype of `self` is a signed data type, return True. Otherwise, return False.
|
|
3811
|
-
|
|
3812
|
-
Supported Platforms:
|
|
3813
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3814
|
-
|
|
3815
|
-
Examples:
|
|
3816
|
-
>>> import mindspore as ms
|
|
3817
|
-
>>> x = ms.Tensor([1, 2, 3], ms.int64)
|
|
3818
|
-
>>> y = ms.Tensor([1, 2, 3], ms.uint64)
|
|
3819
|
-
>>> output = x.is_signed()
|
|
3820
|
-
>>> output2 = y.is_signed()
|
|
3821
|
-
>>> print(output)
|
|
3822
|
-
True
|
|
3823
|
-
>>> print(output2)
|
|
3824
|
-
False
|
|
3825
|
-
"""
|
|
3826
|
-
return self.dtype in mstype.signed_type
|
|
3827
|
-
|
|
3828
|
-
def logical_xor(self, other):
|
|
3829
|
-
r"""
|
|
3830
|
-
For details, please refer to :func:`mindspore.ops.logical_xor`.
|
|
3831
|
-
"""
|
|
3832
|
-
return tensor_operator_registry.get('logical_xor')(self, other)
|
|
3833
|
-
|
|
3834
3446
|
def lstsq(self, A):
|
|
3835
3447
|
r"""
|
|
3836
3448
|
This interface is deprecated from version 2.4 and will be removed in a future version.
|
|
@@ -4222,7 +3834,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4222
3834
|
mode = context.get_context("mode")
|
|
4223
3835
|
if mode != context.PYNATIVE_MODE:
|
|
4224
3836
|
raise ValueError(f"The method of 'move_to' only supported in pynative mode, but got: {mode}.")
|
|
4225
|
-
return
|
|
3837
|
+
return TensorPy_.move_to(self, to, blocking)
|
|
4226
3838
|
|
|
4227
3839
|
def _offload(self):
|
|
4228
3840
|
r"""
|
|
@@ -4237,7 +3849,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4237
3849
|
>>> x = ms.Tensor([1, 2, 3], ms.int64)
|
|
4238
3850
|
>>> x._offload()
|
|
4239
3851
|
"""
|
|
4240
|
-
return
|
|
3852
|
+
return TensorPy_._offload(self, False)
|
|
4241
3853
|
|
|
4242
3854
|
def _data_ptr(self):
|
|
4243
3855
|
r"""
|
|
@@ -4254,7 +3866,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4254
3866
|
>>> x = ms.Tensor([1, 2, 3], ms.int64)
|
|
4255
3867
|
>>> data_ptr = x._data_ptr()
|
|
4256
3868
|
"""
|
|
4257
|
-
return
|
|
3869
|
+
return TensorPy_._data_ptr(self)
|
|
4258
3870
|
|
|
4259
3871
|
def normal_(self, mean=0, std=1, *, generator=None):
|
|
4260
3872
|
r"""
|
|
@@ -4296,6 +3908,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4296
3908
|
return tensor_operator_registry.get('normal_')(self, mean=mean, std=std, generator=generator)
|
|
4297
3909
|
|
|
4298
3910
|
|
|
3911
|
+
def triangular_solve(self, A, upper=True, transpose=False, unitriangular=False):
|
|
3912
|
+
r"""
|
|
3913
|
+
For details, please refer to :func:`mindspore.mint.triangular_solve`.
|
|
3914
|
+
"""
|
|
3915
|
+
return tensor_operator_registry.get('triangular_solve')(self, A, upper, transpose, unitriangular)
|
|
3916
|
+
|
|
4299
3917
|
def _vm_compare(*args):
|
|
4300
3918
|
"""Implement `vm_compare` for tensor."""
|
|
4301
3919
|
if args:
|