mindspore 2.4.10__cp310-cp310-win_amd64.whl → 2.6.0__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +13 -6
- mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -0
- mindspore/_checkparam.py +3 -38
- mindspore/_deprecated/__init__.py +17 -0
- mindspore/_deprecated/jit.py +198 -0
- mindspore/_extends/builtin_operations.py +1 -1
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/__init__.py +6 -7
- mindspore/_extends/parse/compile_config.py +83 -0
- mindspore/_extends/parse/deprecated/__init__.py +0 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +394 -0
- mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
- mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
- mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
- mindspore/_extends/parse/parser.py +47 -198
- mindspore/_extends/parse/resources.py +1 -5
- mindspore/_extends/parse/standard_method.py +229 -99
- mindspore/_extends/pijit/__init__.py +2 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +17 -12
- mindspore/_extends/pijit/tensor_func_list.py +27 -0
- mindspore/_extends/utils.py +1 -1
- mindspore/amp.py +11 -5
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/__init__.py +2 -2
- mindspore/boost/base.py +3 -7
- mindspore/boost/boost_cell_wrapper.py +138 -43
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +6 -3
- mindspore/common/_grad_function.py +56 -0
- mindspore/common/_pijit_context.py +14 -5
- mindspore/common/_register_for_tensor.py +1 -2
- mindspore/common/_stub_tensor.py +30 -14
- mindspore/common/_tensor_cpp_method.py +17 -0
- mindspore/common/_tensor_docs.py +4760 -0
- mindspore/common/api.py +480 -372
- mindspore/common/auto_dynamic_shape.py +41 -44
- mindspore/common/dtype.py +39 -36
- mindspore/common/dump.py +9 -6
- mindspore/common/file_system.py +9 -1
- mindspore/common/generator.py +5 -0
- mindspore/common/hook_handle.py +6 -2
- mindspore/common/initializer.py +13 -10
- mindspore/common/jit_begin_end.py +94 -0
- mindspore/common/jit_config.py +6 -1
- mindspore/common/jit_context.py +76 -0
- mindspore/common/jit_trace.py +378 -0
- mindspore/common/lazy_inline.py +9 -3
- mindspore/common/mindir_util.py +10 -2
- mindspore/common/mutable.py +5 -4
- mindspore/common/parameter.py +135 -52
- mindspore/common/seed.py +2 -2
- mindspore/common/sparse_tensor.py +23 -17
- mindspore/common/tensor.py +975 -1981
- mindspore/communication/__init__.py +7 -5
- mindspore/communication/_comm_helper.py +52 -2
- mindspore/communication/comm_func.py +240 -181
- mindspore/communication/management.py +95 -26
- mindspore/context.py +324 -573
- mindspore/dataset/__init__.py +65 -37
- mindspore/dataset/audio/__init__.py +2 -8
- mindspore/dataset/audio/transforms.py +3 -17
- mindspore/dataset/callback/ds_callback.py +2 -1
- mindspore/dataset/core/config.py +87 -6
- mindspore/dataset/engine/cache_admin.py +3 -3
- mindspore/dataset/engine/cache_client.py +6 -5
- mindspore/dataset/engine/datasets.py +292 -267
- mindspore/dataset/engine/datasets_audio.py +22 -8
- mindspore/dataset/engine/datasets_standard_format.py +46 -27
- mindspore/dataset/engine/datasets_text.py +78 -48
- mindspore/dataset/engine/datasets_user_defined.py +183 -117
- mindspore/dataset/engine/datasets_vision.py +120 -44
- mindspore/dataset/engine/iterators.py +283 -63
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
- mindspore/dataset/engine/obs/util.py +8 -0
- mindspore/dataset/engine/queue.py +40 -0
- mindspore/dataset/engine/samplers.py +289 -43
- mindspore/dataset/engine/serializer_deserializer.py +3 -2
- mindspore/dataset/engine/validators.py +53 -11
- mindspore/dataset/text/__init__.py +7 -6
- mindspore/dataset/text/transforms.py +6 -5
- mindspore/dataset/text/utils.py +3 -3
- mindspore/dataset/transforms/__init__.py +0 -9
- mindspore/dataset/transforms/py_transforms_util.py +17 -0
- mindspore/dataset/transforms/transforms.py +31 -14
- mindspore/dataset/utils/browse_dataset.py +1 -1
- mindspore/dataset/vision/__init__.py +2 -9
- mindspore/dataset/vision/transforms.py +202 -158
- mindspore/dataset/vision/utils.py +7 -5
- mindspore/dataset/vision/validators.py +1 -2
- mindspore/device_context/__init__.py +21 -0
- mindspore/device_context/ascend/__init__.py +25 -0
- mindspore/device_context/ascend/device.py +72 -0
- mindspore/device_context/ascend/op_debug.py +153 -0
- mindspore/device_context/ascend/op_precision.py +193 -0
- mindspore/device_context/ascend/op_tuning.py +123 -0
- mindspore/{ops_generate/gen_constants.py → device_context/cpu/__init__.py} +6 -17
- mindspore/device_context/cpu/device.py +62 -0
- mindspore/device_context/cpu/op_tuning.py +43 -0
- mindspore/device_context/gpu/__init__.py +21 -0
- mindspore/device_context/gpu/device.py +70 -0
- mindspore/device_context/gpu/op_precision.py +67 -0
- mindspore/device_context/gpu/op_tuning.py +175 -0
- mindspore/device_manager.py +170 -0
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/es/embedding_service.py +35 -27
- mindspore/experimental/llm_boost/__init__.py +1 -0
- mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +209 -0
- mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
- mindspore/experimental/llm_boost/register.py +1 -0
- mindspore/experimental/map_parameter.py +4 -4
- mindspore/experimental/optim/adadelta.py +6 -6
- mindspore/experimental/optim/adagrad.py +4 -4
- mindspore/experimental/optim/adam.py +7 -0
- mindspore/experimental/optim/adamax.py +4 -4
- mindspore/experimental/optim/adamw.py +4 -0
- mindspore/experimental/optim/asgd.py +1 -1
- mindspore/experimental/optim/lr_scheduler.py +73 -46
- mindspore/experimental/optim/radam.py +34 -31
- mindspore/experimental/optim/rprop.py +1 -1
- mindspore/experimental/optim/sgd.py +1 -1
- mindspore/hal/contiguous_tensors_handle.py +6 -10
- mindspore/hal/device.py +55 -53
- mindspore/hal/event.py +52 -52
- mindspore/hal/memory.py +179 -120
- mindspore/hal/stream.py +150 -109
- mindspore/include/api/context.h +0 -1
- mindspore/include/dataset/constants.h +7 -4
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +50 -0
- mindspore/mindrecord/__init__.py +21 -8
- mindspore/mindrecord/config.py +17 -316
- mindspore/mindrecord/filereader.py +1 -9
- mindspore/mindrecord/filewriter.py +5 -15
- mindspore/mindrecord/mindpage.py +1 -9
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +798 -761
- mindspore/mint/distributed/__init__.py +70 -4
- mindspore/mint/distributed/distributed.py +2679 -44
- mindspore/mint/linalg/__init__.py +8 -0
- mindspore/mint/nn/__init__.py +743 -22
- mindspore/mint/nn/functional.py +716 -23
- mindspore/mint/nn/layer/__init__.py +21 -4
- mindspore/mint/nn/layer/_functions.py +334 -0
- mindspore/mint/nn/layer/activation.py +276 -1
- mindspore/mint/nn/layer/basic.py +123 -0
- mindspore/mint/nn/layer/conv.py +933 -0
- mindspore/mint/nn/layer/normalization.py +223 -28
- mindspore/mint/nn/layer/padding.py +797 -0
- mindspore/mint/nn/layer/pooling.py +235 -0
- mindspore/mint/optim/__init__.py +3 -1
- mindspore/mint/optim/adam.py +223 -0
- mindspore/mint/optim/adamw.py +26 -19
- mindspore/mint/optim/sgd.py +171 -0
- mindspore/mint/special/__init__.py +2 -1
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/multiprocessing/__init__.py +5 -0
- mindspore/nn/__init__.py +4 -1
- mindspore/nn/cell.py +1373 -192
- mindspore/nn/dynamic_lr.py +2 -1
- mindspore/nn/layer/activation.py +29 -27
- mindspore/nn/layer/basic.py +51 -35
- mindspore/nn/layer/channel_shuffle.py +3 -3
- mindspore/nn/layer/container.py +1 -1
- mindspore/nn/layer/conv.py +53 -42
- mindspore/nn/layer/embedding.py +12 -11
- mindspore/nn/layer/normalization.py +56 -49
- mindspore/nn/layer/padding.py +4 -3
- mindspore/nn/layer/pooling.py +120 -42
- mindspore/nn/layer/rnn_cells.py +1 -1
- mindspore/nn/layer/rnns.py +2 -1
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +59 -36
- mindspore/nn/learning_rate_schedule.py +8 -4
- mindspore/nn/loss/loss.py +58 -55
- mindspore/nn/optim/ada_grad.py +7 -5
- mindspore/nn/optim/adadelta.py +11 -9
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +19 -15
- mindspore/nn/optim/adamax.py +8 -7
- mindspore/nn/optim/adasum.py +5 -5
- mindspore/nn/optim/asgd.py +3 -1
- mindspore/nn/optim/ftrl.py +11 -9
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/lars.py +1 -4
- mindspore/nn/optim/lazyadam.py +12 -10
- mindspore/nn/optim/momentum.py +7 -6
- mindspore/nn/optim/optimizer.py +3 -3
- mindspore/nn/optim/proximal_ada_grad.py +12 -10
- mindspore/nn/optim/rmsprop.py +13 -12
- mindspore/nn/optim/rprop.py +11 -9
- mindspore/nn/optim/sgd.py +9 -6
- mindspore/nn/optim/tft_wrapper.py +5 -2
- mindspore/nn/optim/thor.py +2 -1
- mindspore/nn/probability/bijector/bijector.py +17 -11
- mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
- mindspore/nn/probability/bijector/invert.py +2 -2
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +3 -2
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +1 -1
- mindspore/nn/probability/distribution/cauchy.py +4 -2
- mindspore/nn/probability/distribution/exponential.py +6 -7
- mindspore/nn/probability/distribution/gamma.py +2 -2
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/half_normal.py +5 -3
- mindspore/nn/probability/distribution/logistic.py +5 -3
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/uniform.py +5 -3
- mindspore/nn/reinforcement/_tensors_queue.py +1 -1
- mindspore/nn/reinforcement/tensor_array.py +1 -1
- mindspore/nn/utils/init.py +13 -11
- mindspore/nn/wrap/__init__.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +181 -122
- mindspore/nn/wrap/grad_reducer.py +45 -36
- mindspore/nn/wrap/loss_scale.py +6 -7
- mindspore/numpy/array_creations.py +63 -65
- mindspore/numpy/array_ops.py +149 -144
- mindspore/numpy/logic_ops.py +41 -42
- mindspore/numpy/math_ops.py +361 -359
- mindspore/numpy/utils.py +17 -18
- mindspore/numpy/utils_const.py +5 -6
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +5 -3
- mindspore/ops/_grad_experimental/grad_comm_ops.py +112 -16
- mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -2
- mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
- mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -0
- mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
- mindspore/ops/_register_for_op.py +0 -11
- mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
- mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -65
- mindspore/ops/_vmap/vmap_array_ops.py +52 -25
- mindspore/ops/_vmap/vmap_base.py +0 -2
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +21 -14
- mindspore/ops/_vmap/vmap_math_ops.py +15 -16
- mindspore/ops/_vmap/vmap_nn_ops.py +29 -42
- mindspore/ops/auto_generate/__init__.py +4 -3
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +258 -46
- mindspore/ops/auto_generate/gen_extend_func.py +757 -185
- mindspore/ops/auto_generate/gen_ops_def.py +4197 -2243
- mindspore/ops/auto_generate/gen_ops_prim.py +16976 -6055
- mindspore/ops/auto_generate/pyboost_inner_prim.py +221 -87
- mindspore/ops/composite/__init__.py +2 -1
- mindspore/ops/composite/base.py +20 -25
- mindspore/ops/composite/math_ops.py +6 -16
- mindspore/ops/composite/multitype_ops/__init__.py +5 -2
- mindspore/ops/composite/multitype_ops/_compile_utils.py +228 -30
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
- mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
- mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +2 -30
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
- mindspore/ops/function/__init__.py +40 -2
- mindspore/ops/function/_add_attr_func.py +58 -0
- mindspore/ops/function/array_func.py +2089 -2403
- mindspore/ops/function/clip_func.py +80 -23
- mindspore/ops/function/debug_func.py +57 -57
- mindspore/ops/function/grad/__init__.py +1 -0
- mindspore/ops/function/grad/grad_func.py +104 -71
- mindspore/ops/function/image_func.py +2 -2
- mindspore/ops/function/linalg_func.py +47 -78
- mindspore/ops/function/math_func.py +4351 -3813
- mindspore/ops/function/nn_func.py +1712 -637
- mindspore/ops/function/other_func.py +159 -1
- mindspore/ops/function/parameter_func.py +18 -84
- mindspore/ops/function/random_func.py +452 -387
- mindspore/ops/function/reshard_func.py +4 -70
- mindspore/ops/function/sparse_func.py +3 -3
- mindspore/ops/function/sparse_unary_func.py +6 -6
- mindspore/ops/function/spectral_func.py +25 -58
- mindspore/ops/function/vmap_func.py +26 -18
- mindspore/ops/functional.py +23 -7
- mindspore/ops/functional_overload.py +1548 -0
- mindspore/ops/op_info_register.py +32 -244
- mindspore/ops/operations/__init__.py +23 -15
- mindspore/ops/operations/_custom_ops_utils.py +235 -0
- mindspore/ops/operations/_embedding_cache_ops.py +4 -4
- mindspore/ops/operations/_grad_ops.py +2 -43
- mindspore/ops/operations/_infer_ops.py +2 -1
- mindspore/ops/operations/_inner_ops.py +43 -84
- mindspore/ops/operations/_ms_kernel.py +4 -10
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +3 -2
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/array_ops.py +81 -324
- mindspore/ops/operations/comm_ops.py +154 -108
- mindspore/ops/operations/custom_ops.py +298 -87
- mindspore/ops/operations/debug_ops.py +157 -59
- mindspore/ops/operations/inner_ops.py +7 -5
- mindspore/ops/operations/linalg_ops.py +1 -57
- mindspore/ops/operations/manually_defined/_inner.py +1 -1
- mindspore/ops/operations/manually_defined/ops_def.py +928 -180
- mindspore/ops/operations/math_ops.py +32 -234
- mindspore/ops/operations/nn_ops.py +212 -531
- mindspore/ops/operations/other_ops.py +62 -9
- mindspore/ops/operations/random_ops.py +13 -7
- mindspore/ops/operations/reshard_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +2 -2
- mindspore/ops/primitive.py +66 -53
- mindspore/ops/tensor_method.py +1895 -0
- mindspore/ops_generate/__init__.py +0 -5
- mindspore/ops_generate/aclnn/__init__.py +0 -0
- mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +135 -0
- mindspore/ops_generate/aclnn/gen_aclnn_implement.py +257 -0
- mindspore/ops_generate/api/__init__.py +0 -0
- mindspore/ops_generate/api/add_tensor_docs_generator.py +56 -0
- mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +105 -0
- mindspore/ops_generate/api/functional_map_cpp_generator.py +504 -0
- mindspore/ops_generate/api/functional_overload_py_generator.py +112 -0
- mindspore/ops_generate/api/functions_cc_generator.py +237 -0
- mindspore/ops_generate/api/gen_api.py +103 -0
- mindspore/ops_generate/api/op_api_proto.py +235 -0
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +461 -0
- mindspore/ops_generate/common/__init__.py +0 -0
- mindspore/ops_generate/common/base_generator.py +11 -0
- mindspore/ops_generate/common/gen_constants.py +91 -0
- mindspore/ops_generate/common/gen_utils.py +348 -0
- mindspore/ops_generate/common/op_proto.py +473 -0
- mindspore/ops_generate/common/template.py +523 -0
- mindspore/ops_generate/gen_ops.py +22 -1069
- mindspore/ops_generate/op_def/__init__.py +0 -0
- mindspore/ops_generate/op_def/gen_op_def.py +90 -0
- mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +191 -0
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +296 -0
- mindspore/ops_generate/op_def/ops_def_h_generator.py +74 -0
- mindspore/ops_generate/op_def/ops_name_h_generator.py +83 -0
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
- mindspore/ops_generate/op_def_py/__init__.py +0 -0
- mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
- mindspore/ops_generate/op_def_py/op_def_py_generator.py +132 -0
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +489 -0
- mindspore/ops_generate/pyboost/__init__.py +0 -0
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +139 -0
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +93 -0
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +175 -0
- mindspore/ops_generate/pyboost/op_template_parser.py +517 -0
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +407 -0
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +100 -0
- mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +148 -0
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +155 -0
- mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +132 -0
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +272 -0
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +938 -0
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +357 -0
- mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +179 -36
- mindspore/ops_generate/resources/__init__.py +0 -0
- mindspore/ops_generate/resources/resource_list.py +30 -0
- mindspore/ops_generate/resources/resource_loader.py +36 -0
- mindspore/ops_generate/resources/resource_manager.py +64 -0
- mindspore/ops_generate/resources/yaml_loader.py +88 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
- mindspore/parallel/__init__.py +7 -3
- mindspore/parallel/_auto_parallel_context.py +159 -40
- mindspore/parallel/_cell_wrapper.py +132 -15
- mindspore/parallel/_parallel_serialization.py +107 -5
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +7 -2
- mindspore/parallel/_tensor.py +142 -18
- mindspore/parallel/_utils.py +199 -23
- mindspore/parallel/algo_parameter_config.py +4 -4
- mindspore/parallel/auto_parallel.py +732 -0
- mindspore/parallel/checkpoint_convert.py +159 -0
- mindspore/parallel/checkpoint_transform.py +700 -35
- mindspore/parallel/cluster/process_entity/_api.py +276 -50
- mindspore/parallel/cluster/process_entity/_utils.py +41 -6
- mindspore/parallel/cluster/run.py +21 -4
- mindspore/parallel/function/__init__.py +24 -0
- mindspore/parallel/function/reshard_func.py +258 -0
- mindspore/parallel/nn/__init__.py +25 -0
- mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
- mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
- mindspore/parallel/parameter_broadcast.py +25 -14
- mindspore/parallel/shard.py +137 -59
- mindspore/parallel/transform_safetensors.py +364 -305
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +22 -5
- mindspore/profiler/analysis/__init__.py +0 -0
- mindspore/profiler/analysis/parser/__init__.py +0 -0
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +170 -0
- mindspore/profiler/analysis/parser/base_parser.py +158 -0
- mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
- mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
- mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +264 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +109 -0
- mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
- mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
- mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
- mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +415 -0
- mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
- mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
- mindspore/profiler/analysis/task_manager.py +131 -0
- mindspore/profiler/analysis/time_converter.py +84 -0
- mindspore/profiler/analysis/viewer/__init__.py +0 -0
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +372 -0
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +250 -0
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +320 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +327 -0
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +376 -0
- mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
- mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +96 -0
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
- mindspore/profiler/analysis/work_flow.py +73 -0
- mindspore/profiler/common/ascend_msprof_exporter.py +139 -0
- mindspore/profiler/common/command_executor.py +90 -0
- mindspore/profiler/common/constant.py +186 -3
- mindspore/profiler/common/file_manager.py +208 -0
- mindspore/profiler/common/log.py +130 -0
- mindspore/profiler/common/msprof_cmd_tool.py +221 -0
- mindspore/profiler/common/path_manager.py +395 -0
- mindspore/profiler/common/process_bar.py +168 -0
- mindspore/profiler/common/process_pool.py +9 -3
- mindspore/profiler/common/profiler_context.py +500 -0
- mindspore/profiler/common/profiler_info.py +304 -0
- mindspore/profiler/common/profiler_meta_data.py +74 -0
- mindspore/profiler/common/profiler_output_path.py +284 -0
- mindspore/profiler/common/profiler_parameters.py +251 -0
- mindspore/profiler/common/profiler_path_manager.py +179 -0
- mindspore/profiler/common/record_function.py +76 -0
- mindspore/profiler/common/tlv_decoder.py +76 -0
- mindspore/profiler/common/util.py +75 -2
- mindspore/profiler/dynamic_profiler.py +341 -75
- mindspore/profiler/envprofiler.py +163 -0
- mindspore/profiler/experimental_config.py +197 -0
- mindspore/profiler/mstx.py +242 -0
- mindspore/profiler/platform/__init__.py +21 -0
- mindspore/profiler/platform/base_profiler.py +40 -0
- mindspore/profiler/platform/cpu_profiler.py +124 -0
- mindspore/profiler/platform/gpu_profiler.py +74 -0
- mindspore/profiler/platform/npu_profiler.py +335 -0
- mindspore/profiler/profiler.py +1073 -90
- mindspore/profiler/profiler_action_controller.py +187 -0
- mindspore/profiler/profiler_interface.py +118 -0
- mindspore/profiler/schedule.py +243 -0
- mindspore/rewrite/api/node.py +15 -13
- mindspore/rewrite/api/symbol_tree.py +2 -3
- mindspore/run_check/_check_version.py +27 -20
- mindspore/run_check/run_check.py +1 -1
- mindspore/runtime/__init__.py +37 -0
- mindspore/runtime/device.py +27 -0
- mindspore/runtime/event.py +209 -0
- mindspore/runtime/executor.py +177 -0
- mindspore/runtime/memory.py +416 -0
- mindspore/runtime/stream.py +460 -0
- mindspore/runtime/thread_bind_core.py +401 -0
- mindspore/safeguard/rewrite_obfuscation.py +12 -9
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +8 -8
- mindspore/train/_utils.py +96 -27
- mindspore/train/amp.py +9 -5
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +2 -16
- mindspore/train/callback/_checkpoint.py +53 -55
- mindspore/train/callback/_cluster_monitor.py +14 -18
- mindspore/train/callback/_early_stop.py +1 -1
- mindspore/train/callback/_flops_collector.py +103 -68
- mindspore/train/callback/_history.py +8 -5
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +0 -3
- mindspore/train/callback/_loss_monitor.py +2 -1
- mindspore/train/callback/_on_request_exit.py +6 -5
- mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
- mindspore/train/callback/_summary_collector.py +52 -19
- mindspore/train/callback/_time_monitor.py +2 -1
- mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +228 -108
- mindspore/train/data_sink.py +25 -2
- mindspore/train/dataset_helper.py +15 -16
- mindspore/train/loss_scale_manager.py +8 -7
- mindspore/train/metrics/accuracy.py +3 -3
- mindspore/train/metrics/confusion_matrix.py +9 -9
- mindspore/train/metrics/error.py +3 -3
- mindspore/train/metrics/hausdorff_distance.py +4 -4
- mindspore/train/metrics/mean_surface_distance.py +3 -3
- mindspore/train/metrics/metric.py +0 -12
- mindspore/train/metrics/occlusion_sensitivity.py +4 -2
- mindspore/train/metrics/precision.py +11 -10
- mindspore/train/metrics/recall.py +9 -9
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +174 -46
- mindspore/train/model.py +269 -136
- mindspore/train/serialization.py +622 -978
- mindspore/train/summary/_summary_adapter.py +2 -2
- mindspore/train/summary/summary_record.py +2 -3
- mindspore/train/train_thor/model_thor.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +6 -3
- mindspore/utils/dryrun.py +140 -0
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/runtime_execution_order_check.py +552 -0
- mindspore/utils/utils.py +138 -4
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/METADATA +3 -3
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/RECORD +587 -418
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/entry_points.txt +1 -1
- mindspore/_install_custom.py +0 -43
- mindspore/common/_register_for_adapter.py +0 -74
- mindspore/common/_tensor_overload.py +0 -139
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -197
- mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
- mindspore/ops_generate/gen_aclnn_implement.py +0 -263
- mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
- mindspore/ops_generate/gen_pyboost_func.py +0 -1052
- mindspore/ops_generate/gen_utils.py +0 -209
- mindspore/ops_generate/op_proto.py +0 -145
- mindspore/ops_generate/template.py +0 -261
- mindspore/profiler/envprofiling.py +0 -254
- mindspore/profiler/profiling.py +0 -1926
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/WHEEL +0 -0
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/top_level.txt +0 -0
mindspore/common/tensor.py
CHANGED
|
@@ -31,14 +31,13 @@ from mindspore.common.hook_handle import _TensorHookHandle
|
|
|
31
31
|
|
|
32
32
|
from mindspore.common._utils import get_slice_num
|
|
33
33
|
from mindspore.common._register_for_tensor import tensor_operator_registry
|
|
34
|
-
from mindspore.
|
|
35
|
-
max_mint, mean_mint, min_mint, split_mint, sub_mint)
|
|
36
|
-
from mindspore._c_expression import Tensor as Tensor_
|
|
34
|
+
from mindspore._c_expression import TensorPy as TensorPy_
|
|
37
35
|
from mindspore import _checkparam as validator
|
|
38
|
-
from mindspore._checkparam import
|
|
36
|
+
from mindspore._checkparam import is_stub_tensor, check_hook_fn
|
|
39
37
|
from mindspore._check_jit_forbidden_api import jit_forbidden_register
|
|
40
38
|
from mindspore.common.symbol import Symbol
|
|
41
39
|
|
|
40
|
+
|
|
42
41
|
np_types = (np.int8, np.int16, np.int32, np.int64,
|
|
43
42
|
np.uint8, np.uint16, np.uint32, np.uint64, np.float16,
|
|
44
43
|
np.float32, np.float64, np.bool_, np.complex64, np.complex128)
|
|
@@ -46,8 +45,8 @@ np_types = (np.int8, np.int16, np.int32, np.int64,
|
|
|
46
45
|
|
|
47
46
|
def _check_input_data_type(input_data):
|
|
48
47
|
"""Check the type of input_data for Tensor"""
|
|
49
|
-
validator.check_value_type('input_data', input_data,
|
|
50
|
-
|
|
48
|
+
validator.check_value_type('input_data', input_data, (TensorPy_, Tensor, np.ndarray, np.str_, list, tuple, float,
|
|
49
|
+
int, bool, complex, bytes),
|
|
51
50
|
'Tensor')
|
|
52
51
|
valid_dtypes = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64,
|
|
53
52
|
np.float16, np.float32, np.float64, np.bool_, np.str_, np.complex64, np.complex128)
|
|
@@ -73,13 +72,116 @@ def _check_input_data_type(input_data):
|
|
|
73
72
|
f"For Tensor, the input_data is {input_data} that contain unsupported element.")
|
|
74
73
|
|
|
75
74
|
|
|
76
|
-
|
|
75
|
+
def _set_symbolic_shape(shape):
|
|
76
|
+
"""Set symbolic_shape"""
|
|
77
|
+
symbolic_shape = None
|
|
78
|
+
if shape is None:
|
|
79
|
+
return None, None
|
|
80
|
+
if isinstance(shape, numbers.Number):
|
|
81
|
+
shape = (shape,)
|
|
82
|
+
symbolic_shape = None
|
|
83
|
+
return shape, symbolic_shape
|
|
84
|
+
if isinstance(shape, Symbol):
|
|
85
|
+
symbolic_shape = [shape]
|
|
86
|
+
shape = (None,)
|
|
87
|
+
return shape, symbolic_shape
|
|
88
|
+
if isinstance(shape, (list, tuple)) and any(isinstance(s, Symbol) for s in shape):
|
|
89
|
+
symbolic_shape = [item.to_dict() if isinstance(item, Symbol) else item for item in shape]
|
|
90
|
+
shape_without_symbol = (None if isinstance(item, Symbol) else item for item in shape)
|
|
91
|
+
shape = list(shape_without_symbol) if isinstance(shape, list) else tuple(shape_without_symbol)
|
|
92
|
+
return shape, symbolic_shape
|
|
93
|
+
return shape, symbolic_shape
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def _convert_stub_tensor(input_data):
|
|
97
|
+
"""Convert input to stub tensor"""
|
|
98
|
+
if not is_stub_tensor(input_data):
|
|
99
|
+
return input_data
|
|
100
|
+
return input_data.stub_sync()
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def _convert_numpy_array(input_data):
|
|
104
|
+
"""Convert inpyt to numpy array"""
|
|
105
|
+
if not isinstance(input_data, np_types):
|
|
106
|
+
return input_data
|
|
107
|
+
return np.array(input_data)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def _check_device(device):
|
|
111
|
+
"""Check device"""
|
|
112
|
+
if device is not None and device != "CPU":
|
|
113
|
+
raise ValueError(f"Only 'CPU' is supported for device, but got {device}.")
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def _set_default_dtype(input_data, dtype):
|
|
117
|
+
"""Set tensor default dtype"""
|
|
118
|
+
if isinstance(input_data, (float, list, tuple)):
|
|
119
|
+
if np.array(input_data).dtype == np.float64:
|
|
120
|
+
return mstype.float32
|
|
121
|
+
if isinstance(input_data, (int, list, tuple)):
|
|
122
|
+
if np.array(input_data).dtype in (np.int32, np.int64):
|
|
123
|
+
return mstype.int64
|
|
124
|
+
return dtype
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def _set_dtype(input_data, dtype):
|
|
128
|
+
"""Set and check dtype"""
|
|
129
|
+
if dtype is not None:
|
|
130
|
+
validator.check_type_name('dtype', dtype, mstype.number_type + (mstype.bool_, mstype.string), "Tensor")
|
|
131
|
+
return dtype
|
|
132
|
+
return _set_default_dtype(input_data, dtype)
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def _init(input_data=None, dtype=None, shape=None, init=None, const_arg=False, device=None):
|
|
77
136
|
"""
|
|
78
|
-
|
|
137
|
+
Verifying parameters. Will sink to C++
|
|
79
138
|
"""
|
|
139
|
+
validator.check_value_type('const_arg', const_arg, bool, 'Tensor')
|
|
140
|
+
_check_device(device)
|
|
141
|
+
|
|
142
|
+
if isinstance(input_data, (Tensor, TensorPy_)) and dtype is not None:
|
|
143
|
+
logger.info("It is suggested to use 'Tensor.astype()' to convert the dtype of a Tensor.")
|
|
144
|
+
_cast = tensor_operator_registry.get("cast")
|
|
145
|
+
input_data = _cast(input_data, dtype)
|
|
146
|
+
|
|
147
|
+
input_data = _convert_stub_tensor(input_data)
|
|
148
|
+
|
|
149
|
+
if input_data is None and shape is None and init is None and dtype is not None:
|
|
150
|
+
validator.check_type_name('dtype', dtype, mstype.number_type + (mstype.bool_, mstype.string), "Tensor")
|
|
151
|
+
logger.warning(f"For 'Tensor', if 'dtype' is not None, 'input_data', 'shape' or 'init' must not be None.")
|
|
152
|
+
return {"dtype": dtype, "shape": [-2], "init": init, "const_arg": const_arg, "device": device}
|
|
153
|
+
|
|
154
|
+
# If input data is numpy number, convert it to np array
|
|
155
|
+
input_data = _convert_numpy_array(input_data)
|
|
156
|
+
shape, symbolic_shape = _set_symbolic_shape(shape)
|
|
157
|
+
_check_tensor_input(input_data, dtype, shape, init)
|
|
158
|
+
|
|
159
|
+
# If input_data is tuple/list/numpy.ndarray, it's support in check_type method.
|
|
160
|
+
if (isinstance(shape, (list, tuple)) and None in shape) or init is not None:
|
|
161
|
+
shape = _check_tensor_dynamic_shape(dtype, shape, init)
|
|
162
|
+
return {"dtype": dtype, "shape": shape, "init": init, "const_arg": const_arg, "device": device,
|
|
163
|
+
"symbolic_shape": symbolic_shape}
|
|
164
|
+
|
|
165
|
+
if input_data is None and dtype is not None and shape is not None:
|
|
166
|
+
validator.check_type_name('dtype', dtype, mstype.number_type + (mstype.bool_, mstype.string), "Tensor")
|
|
167
|
+
return {"dtype": dtype, "shape": shape, "init": init, "const_arg": const_arg, "device": device,
|
|
168
|
+
"symbolic_shape": symbolic_shape}
|
|
169
|
+
|
|
170
|
+
_check_input_data_type(input_data)
|
|
171
|
+
dtype = _set_dtype(input_data, dtype)
|
|
172
|
+
|
|
173
|
+
if isinstance(input_data, np.ndarray) and (not input_data.flags['FORC']):
|
|
174
|
+
input_data = np.ascontiguousarray(input_data)
|
|
80
175
|
|
|
176
|
+
if dtype is not None:
|
|
177
|
+
return {"input_data": input_data, "dtype": dtype, "init": init, "const_arg": const_arg, "device": device,
|
|
178
|
+
"symbolic_shape": symbolic_shape}
|
|
81
179
|
|
|
82
|
-
|
|
180
|
+
return {"input_data": input_data, "init": init, "const_arg": const_arg, "device": device,
|
|
181
|
+
"symbolic_shape": symbolic_shape}
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
def tensor(input_data=None, dtype=None, shape=None, init=None, const_arg=False):
|
|
83
185
|
"""
|
|
84
186
|
Create a new Tensor in Cell.construct() or function decorated by @jit.
|
|
85
187
|
|
|
@@ -87,11 +189,11 @@ def tensor(input_data=None, dtype=None, shape=None, init=None, internal=False, c
|
|
|
87
189
|
based on the `dtype` argument.
|
|
88
190
|
|
|
89
191
|
Please refer to `Creating and Using Tensor
|
|
90
|
-
<https://www.mindspore.cn/
|
|
192
|
+
<https://www.mindspore.cn/tutorials/en/master/compile/static_graph.html#mindspore-user-defined-data-types>`_ .
|
|
91
193
|
|
|
92
194
|
The difference between it and the Tensor class is that it adds
|
|
93
195
|
`Annotation
|
|
94
|
-
<https://www.mindspore.cn/
|
|
196
|
+
<https://www.mindspore.cn/tutorials/en/master/compile/static_graph.html#annotation-type>`_
|
|
95
197
|
which can prevent the generation of AnyType compared to the Tensor class.
|
|
96
198
|
|
|
97
199
|
The arguments and return values are the same as the Tensor class. Also see: :class:`mindspore.Tensor`.
|
|
@@ -111,20 +213,29 @@ def tensor(input_data=None, dtype=None, shape=None, init=None, internal=False, c
|
|
|
111
213
|
>>> print(y)
|
|
112
214
|
[1. 2. 3.]
|
|
113
215
|
"""
|
|
114
|
-
return Tensor(input_data, dtype, shape, init,
|
|
216
|
+
return Tensor(input_data, dtype, shape, init, const_arg) # @jit.typing: () -> tensor_type[{dtype}]
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
class _TensorMeta(abc.ABCMeta, type(TensorPy_)):
|
|
220
|
+
"""
|
|
221
|
+
Meta class for Tensor. Used internally.
|
|
222
|
+
"""
|
|
115
223
|
|
|
116
224
|
|
|
117
|
-
class Tensor(
|
|
225
|
+
class Tensor(TensorPy_, metaclass=_TensorMeta):
|
|
118
226
|
"""
|
|
119
227
|
Tensor is a data structure that stores an n-dimensional array.
|
|
120
228
|
|
|
121
229
|
Note:
|
|
122
|
-
If `init` interface is used to initialize `Tensor`, the `Tensor.init_data` API needs to be called to load the
|
|
123
|
-
|
|
230
|
+
- If `init` interface is used to initialize `Tensor`, the `Tensor.init_data` API needs to be called to load the
|
|
231
|
+
actual data to `Tensor`.
|
|
232
|
+
- All modes of CPU and GPU, and Atlas training series with `graph mode (mode=mindspore.GRAPH_MODE)
|
|
233
|
+
<https://www.mindspore.cn/tutorials/en/master/compile/static_graph.html>`_ do not supported
|
|
234
|
+
in-place operations yet.
|
|
124
235
|
|
|
125
236
|
Warning:
|
|
126
|
-
|
|
127
|
-
|
|
237
|
+
To convert dtype of a `Tensor`, it is recommended to use `Tensor.astype()` rather than
|
|
238
|
+
`Tensor(sourceTensor, dtype=newDtype)`.
|
|
128
239
|
|
|
129
240
|
Args:
|
|
130
241
|
input_data (Union[Tensor, float, int, bool, tuple, list, numpy.ndarray]): The data to be stored. It can be
|
|
@@ -139,10 +250,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
139
250
|
init (Initializer): The information of init data.
|
|
140
251
|
`init` is used for delayed initialization in parallel mode, when using init, `dtype` and `shape` must be
|
|
141
252
|
set. Default: ``None`` .
|
|
142
|
-
internal (bool): Whether it is created by the framework.
|
|
143
|
-
``'True'`` means that the tensor is created by framework.
|
|
144
|
-
``'False'`` means that the tensor is created by user.
|
|
145
|
-
Default: ``False`` .
|
|
146
253
|
const_arg (bool): Whether the tensor is a constant when it is used for the argument of a network.
|
|
147
254
|
Default: ``False`` .
|
|
148
255
|
device(str): This parameter is reserved and does not need to be configured.
|
|
@@ -153,8 +260,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
153
260
|
|
|
154
261
|
Note:
|
|
155
262
|
The default value ``None`` of `input_data` works as a placeholder,
|
|
156
|
-
it does not mean that we can create a NoneType
|
|
157
|
-
Tensor.
|
|
263
|
+
it does not mean that we can create a NoneType Tensor.
|
|
158
264
|
Tensor with `shape` contains 0 is not fully tested and supported.
|
|
159
265
|
|
|
160
266
|
Examples:
|
|
@@ -209,89 +315,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
209
315
|
"""
|
|
210
316
|
delta_seed = 0
|
|
211
317
|
|
|
212
|
-
def __init__(self, input_data=None, dtype=None, shape=None, init=None, internal=False, const_arg=False,
|
|
213
|
-
device=None):
|
|
214
|
-
self.init_finished = False
|
|
215
|
-
if isinstance(input_data, (Tensor, Tensor_)) and dtype is not None:
|
|
216
|
-
logger.info("It is suggested to use 'Tensor.astype()' to convert the dtype of a Tensor.")
|
|
217
|
-
_cast = tensor_operator_registry.get("cast")
|
|
218
|
-
input_data = _cast(input_data, dtype)
|
|
219
|
-
|
|
220
|
-
if is_stub_tensor(input_data):
|
|
221
|
-
input_data = input_data.stub_sync()
|
|
222
|
-
|
|
223
|
-
if internal:
|
|
224
|
-
if input_data is not None:
|
|
225
|
-
Tensor_.__init__(self, input_data)
|
|
226
|
-
else:
|
|
227
|
-
if input_data is None and shape is None and init is None and dtype is not None:
|
|
228
|
-
validator.check_type_name('dtype', dtype, mstype.number_type +
|
|
229
|
-
(mstype.bool_, mstype.string), "Tensor")
|
|
230
|
-
Tensor_.__init__(self, dtype, [-2])
|
|
231
|
-
logger.warning(f"For 'Tensor', if 'dtype' is not None, 'input_data', 'shape' "
|
|
232
|
-
f"or 'init' must not be None.")
|
|
233
|
-
else:
|
|
234
|
-
# If input data is numpy number, convert it to np array
|
|
235
|
-
if isinstance(input_data, np_types):
|
|
236
|
-
input_data = np.array(input_data)
|
|
237
|
-
|
|
238
|
-
if shape is not None:
|
|
239
|
-
if isinstance(shape, numbers.Number):
|
|
240
|
-
shape = (shape,)
|
|
241
|
-
elif isinstance(shape, Symbol):
|
|
242
|
-
self.symbolic_shape = [shape]
|
|
243
|
-
shape = (None,)
|
|
244
|
-
elif isinstance(shape, (list, tuple)) and any(isinstance(s, Symbol) for s in shape):
|
|
245
|
-
self.symbolic_shape = [item.to_dict() if isinstance(item, Symbol) else item for item in shape]
|
|
246
|
-
shape_without_symbol = (None if isinstance(item, Symbol) else item for item in shape)
|
|
247
|
-
shape = list(shape_without_symbol) if isinstance(shape, list) else tuple(shape_without_symbol)
|
|
248
|
-
|
|
249
|
-
_check_tensor_input(input_data, dtype, shape, init)
|
|
250
|
-
|
|
251
|
-
# If input_data is tuple/list/numpy.ndarray, it's support in check_type method.
|
|
252
|
-
if (isinstance(shape, (list, tuple)) and None in shape) or init is not None:
|
|
253
|
-
shape = _check_tensor_dynamic_shape(dtype, shape, init)
|
|
254
|
-
Tensor_.__init__(self, dtype, shape)
|
|
255
|
-
else:
|
|
256
|
-
_check_input_data_type(input_data)
|
|
257
|
-
if dtype is not None:
|
|
258
|
-
validator.check_type_name('dtype', dtype, mstype.number_type +
|
|
259
|
-
(mstype.bool_, mstype.string), "Tensor")
|
|
260
|
-
else:
|
|
261
|
-
dtype = self._set_default_dtype(input_data, dtype)
|
|
262
|
-
|
|
263
|
-
if isinstance(input_data, np.ndarray) and (not input_data.flags['FORC']):
|
|
264
|
-
input_data = np.ascontiguousarray(input_data)
|
|
265
|
-
|
|
266
|
-
if dtype is not None:
|
|
267
|
-
Tensor_.__init__(self, input_data, dtype)
|
|
268
|
-
else:
|
|
269
|
-
Tensor_.__init__(self, input_data)
|
|
270
|
-
validator.check_value_type('const_arg', const_arg, bool, 'Tensor')
|
|
271
|
-
|
|
272
|
-
if device is not None and device != "CPU":
|
|
273
|
-
raise ValueError(f"Only 'CPU' is supported for device, but got {device}.")
|
|
274
|
-
|
|
275
|
-
self.const_arg = const_arg
|
|
276
|
-
self.virtual_flag = False
|
|
277
|
-
self.init = init
|
|
278
|
-
self.init_finished = True
|
|
279
|
-
|
|
280
|
-
# if cur Tensor is a index value of another Tensor,
|
|
281
|
-
# parent_tensor_ set to another Tensor
|
|
282
|
-
# index_of_parent_ will set to the index
|
|
283
|
-
self.parent_tensor_ = None
|
|
284
|
-
self.index_of_parent_ = None
|
|
285
|
-
|
|
286
|
-
self.slice_num_of_persistent_data_ = None
|
|
287
|
-
self.slice_shape_of_persistent_data_ = None
|
|
288
|
-
|
|
289
|
-
# the auto gradient information
|
|
290
|
-
self._grad = None
|
|
291
|
-
self._grad_fn = None
|
|
292
|
-
self._requires_grad = False
|
|
293
|
-
self._retain_grad = False
|
|
294
|
-
|
|
295
318
|
@classmethod
|
|
296
319
|
def __subclasshook__(cls, sub):
|
|
297
320
|
"""
|
|
@@ -302,16 +325,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
302
325
|
return True
|
|
303
326
|
return NotImplemented
|
|
304
327
|
|
|
305
|
-
@staticmethod
|
|
306
|
-
def _set_default_dtype(input_data, dtype):
|
|
307
|
-
"""Set tensor default dtype"""
|
|
308
|
-
if isinstance(input_data, (float, list, tuple)):
|
|
309
|
-
if np.array(input_data).dtype == np.float64:
|
|
310
|
-
return mstype.float32
|
|
311
|
-
if isinstance(input_data, (int, list, tuple)):
|
|
312
|
-
if np.array(input_data).dtype in (np.int32, np.int64):
|
|
313
|
-
return mstype.int64
|
|
314
|
-
return dtype
|
|
315
328
|
|
|
316
329
|
def __deepcopy__(self, memodict):
|
|
317
330
|
new_obj = Tensor(self)
|
|
@@ -322,8 +335,8 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
322
335
|
|
|
323
336
|
def __repr__(self):
|
|
324
337
|
if self.init_finished:
|
|
325
|
-
|
|
326
|
-
return
|
|
338
|
+
TensorPy_.data_sync(self, True)
|
|
339
|
+
return TensorPy_.__repr__(self)
|
|
327
340
|
return ''
|
|
328
341
|
|
|
329
342
|
def __eq__(self, other):
|
|
@@ -352,12 +365,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
352
365
|
return out
|
|
353
366
|
|
|
354
367
|
def __bool__(self):
|
|
355
|
-
|
|
356
|
-
if data.shape == ():
|
|
357
|
-
return bool(data)
|
|
358
|
-
if data.shape == (1,):
|
|
359
|
-
return bool(data[0])
|
|
360
|
-
raise ValueError("The truth value of an array with more than one element is ambiguous.")
|
|
368
|
+
return bool(self._item())
|
|
361
369
|
|
|
362
370
|
@staticmethod
|
|
363
371
|
def _convert_scalar_(data, func, message):
|
|
@@ -368,79 +376,50 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
368
376
|
raise ValueError(message)
|
|
369
377
|
|
|
370
378
|
def __int__(self):
|
|
371
|
-
|
|
372
|
-
|
|
379
|
+
try:
|
|
380
|
+
data = self._item()
|
|
381
|
+
return int(data)
|
|
382
|
+
except ValueError:
|
|
383
|
+
raise ValueError("Only one element tensors can be converted to Python scalars")
|
|
384
|
+
|
|
373
385
|
|
|
374
386
|
def __float__(self):
|
|
375
|
-
|
|
376
|
-
|
|
387
|
+
try:
|
|
388
|
+
data = self._item()
|
|
389
|
+
return float(data)
|
|
390
|
+
except ValueError:
|
|
391
|
+
raise ValueError("Only one element tensors can be converted to Python scalars")
|
|
377
392
|
|
|
378
393
|
def __index__(self):
|
|
379
|
-
|
|
380
|
-
|
|
394
|
+
try:
|
|
395
|
+
data = self._item()
|
|
396
|
+
if not isinstance(data, (int, bool)):
|
|
397
|
+
raise ValueError
|
|
398
|
+
return int(data)
|
|
399
|
+
except ValueError:
|
|
381
400
|
raise ValueError("Only integer tensors of a single element can be converted to an index.")
|
|
382
|
-
return self._convert_scalar_(data, int,
|
|
383
|
-
"Only integer tensors of a single element can be converted to an index.")
|
|
384
401
|
|
|
385
402
|
def __pos__(self):
|
|
386
403
|
return self
|
|
387
404
|
|
|
388
|
-
def __abs__(self):
|
|
389
|
-
return tensor_operator_registry.get('abs')(self)
|
|
390
|
-
|
|
391
|
-
@add_mint
|
|
392
|
-
def __add__(self, other):
|
|
393
|
-
return tensor_operator_registry.get('__add__')(self, other)
|
|
394
|
-
|
|
395
|
-
def __and__(self, other):
|
|
396
|
-
if isinstance(other, (int, bool, float, Tensor)):
|
|
397
|
-
return tensor_operator_registry.get('bitwise_and')(self, other)
|
|
398
|
-
raise TypeError("Unsupported operand type(s) for &: 'Tensor' and '{}'".format(type(other)))
|
|
399
|
-
|
|
400
|
-
def __xor__(self, other):
|
|
401
|
-
if isinstance(other, (int, bool, float, Tensor)):
|
|
402
|
-
return tensor_operator_registry.get('bitwise_xor')(self, other)
|
|
403
|
-
raise TypeError("Unsupported operand type(s) for ^: 'Tensor' and '{}'".format(type(other)))
|
|
404
|
-
|
|
405
|
-
def __or__(self, other):
|
|
406
|
-
if isinstance(other, (int, bool, float, Tensor)):
|
|
407
|
-
return tensor_operator_registry.get('bitwise_or')(self, other)
|
|
408
|
-
raise TypeError("Unsupported operand type(s) for |: 'Tensor' and '{}'".format(type(other)))
|
|
409
|
-
|
|
410
405
|
def __radd__(self, other):
|
|
411
406
|
return self.__add__(other)
|
|
412
407
|
|
|
413
|
-
def __iadd__(self, other):
|
|
414
|
-
return self.__add__(other)
|
|
415
|
-
|
|
416
|
-
@sub_mint
|
|
417
|
-
def __sub__(self, other):
|
|
418
|
-
return tensor_operator_registry.get('__sub__')(self, other)
|
|
419
|
-
|
|
420
408
|
def __rsub__(self, other):
|
|
421
409
|
return tensor_operator_registry.get('__sub__')(other, self)
|
|
422
410
|
|
|
423
|
-
def __isub__(self, other):
|
|
424
|
-
return self.__sub__(other)
|
|
425
|
-
|
|
426
411
|
def __mul__(self, other):
|
|
427
412
|
return tensor_operator_registry.get('__mul__')(self, other)
|
|
428
413
|
|
|
429
414
|
def __rmul__(self, other):
|
|
430
415
|
return self.__mul__(other)
|
|
431
416
|
|
|
432
|
-
def __imul__(self, other):
|
|
433
|
-
return self.__mul__(other)
|
|
434
|
-
|
|
435
417
|
def __matmul__(self, other):
|
|
436
418
|
return tensor_operator_registry.get('__matmul__')(self, other)
|
|
437
419
|
|
|
438
420
|
def __rmatmul__(self, other):
|
|
439
421
|
return tensor_operator_registry.get('__matmul__')(other, self)
|
|
440
422
|
|
|
441
|
-
def __imatmul__(self, other):
|
|
442
|
-
return self.__matmul__(other)
|
|
443
|
-
|
|
444
423
|
def __truediv__(self, other):
|
|
445
424
|
return tensor_operator_registry.get('__truediv__')(self, other)
|
|
446
425
|
|
|
@@ -456,9 +435,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
456
435
|
def __imod__(self, other):
|
|
457
436
|
return self.__mod__(other)
|
|
458
437
|
|
|
459
|
-
def __pow__(self, other):
|
|
460
|
-
return tensor_operator_registry.get('__pow__')(self, other)
|
|
461
|
-
|
|
462
438
|
def __rpow__(self, other):
|
|
463
439
|
return tensor_operator_registry.get('__rpow__')(self, other)
|
|
464
440
|
|
|
@@ -468,9 +444,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
468
444
|
def __rfloordiv__(self, other):
|
|
469
445
|
return tensor_operator_registry.get('__floordiv__')(other, self)
|
|
470
446
|
|
|
471
|
-
def __ifloordiv__(self, other):
|
|
472
|
-
return self.__floordiv__(other)
|
|
473
|
-
|
|
474
447
|
def __lt__(self, other):
|
|
475
448
|
out = tensor_operator_registry.get('__lt__')(self, other)
|
|
476
449
|
return out
|
|
@@ -479,25 +452,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
479
452
|
out = tensor_operator_registry.get('__le__')(self, other)
|
|
480
453
|
return out
|
|
481
454
|
|
|
482
|
-
def __getitem__(self, index):
|
|
483
|
-
out = tensor_operator_registry.get('__getitem__')(self, index)
|
|
484
|
-
if out is not self:
|
|
485
|
-
out.parent_tensor_ = self
|
|
486
|
-
out.index_of_parent_ = index
|
|
487
|
-
return out
|
|
488
|
-
|
|
489
|
-
def __setitem__(self, index, value):
|
|
490
|
-
out = tensor_operator_registry.get('__setitem__')(self, index, value)
|
|
491
|
-
if isinstance(out, tuple):
|
|
492
|
-
if self.parent_tensor_ is not None and self.index_of_parent_ is not None:
|
|
493
|
-
self.parent_tensor_.__setitem__(self.index_of_parent_, out[0])
|
|
494
|
-
return self
|
|
495
|
-
return self
|
|
496
|
-
self.assign_value(out)
|
|
497
|
-
if self.parent_tensor_ is not None and self.index_of_parent_ is not None:
|
|
498
|
-
self.parent_tensor_.__setitem__(self.index_of_parent_, self)
|
|
499
|
-
return self
|
|
500
|
-
|
|
501
455
|
def __gt__(self, other):
|
|
502
456
|
out = tensor_operator_registry.get('__gt__')(self, other)
|
|
503
457
|
return out
|
|
@@ -519,7 +473,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
519
473
|
|
|
520
474
|
def __getstate__(self):
|
|
521
475
|
state = self.__dict__.copy()
|
|
522
|
-
state["value"] =
|
|
476
|
+
state["value"] = TensorPy_.__getstate__(self)
|
|
523
477
|
return state
|
|
524
478
|
|
|
525
479
|
def __setstate__(self, state):
|
|
@@ -528,12 +482,96 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
528
482
|
else:
|
|
529
483
|
value = state.pop("value")
|
|
530
484
|
self.__dict__.update(state)
|
|
531
|
-
|
|
485
|
+
TensorPy_.__setstate__(self, value)
|
|
486
|
+
|
|
487
|
+
def __array__(self, dtype=None):
|
|
488
|
+
"""support create numpy array from tensor."""
|
|
489
|
+
if dtype is None:
|
|
490
|
+
return self.asnumpy()
|
|
491
|
+
return self.asnumpy().astype(dtype, copy=False)
|
|
492
|
+
|
|
493
|
+
def __contains__(self, element):
|
|
494
|
+
"""support 'in' operator."""
|
|
495
|
+
if isinstance(element, (Tensor, numbers.Number)):
|
|
496
|
+
return (element == self).any().item()
|
|
497
|
+
return False
|
|
498
|
+
|
|
499
|
+
def _getitem_origin(self, index):
|
|
500
|
+
"""__getitem__ origin process, called by TensorPy::TensorGetItem"""
|
|
501
|
+
out = tensor_operator_registry.get('_tensor_getitem_origin')(self, index)
|
|
502
|
+
if out is not self:
|
|
503
|
+
out.parent_tensor_ = self
|
|
504
|
+
out.index_of_parent_ = index
|
|
505
|
+
return out
|
|
506
|
+
|
|
507
|
+
def _setitem_origin(self, index, value):
|
|
508
|
+
"""__setitem__ origin process, called by TensorPy::TensorSetItem"""
|
|
509
|
+
out = tensor_operator_registry.get('_tensor_setitem_origin')(self, index, value)
|
|
510
|
+
if isinstance(out, tuple):
|
|
511
|
+
if self.parent_tensor_ is not None and self.index_of_parent_ is not None:
|
|
512
|
+
self.parent_tensor_.__setitem__(self.index_of_parent_, out[0])
|
|
513
|
+
return self
|
|
514
|
+
return self
|
|
515
|
+
self.assign_value(out)
|
|
516
|
+
if self.parent_tensor_ is not None and self.index_of_parent_ is not None:
|
|
517
|
+
self.parent_tensor_.__setitem__(self.index_of_parent_, self)
|
|
518
|
+
return self
|
|
519
|
+
|
|
520
|
+
def _getitem(self, index):
|
|
521
|
+
"""__getitem__ process, called by TensorPy::TensorGetItem"""
|
|
522
|
+
return tensor_operator_registry.get('_tensor_getitem')(self, index)
|
|
523
|
+
|
|
524
|
+
def _setitem(self, index, value):
|
|
525
|
+
"""__setitem__ process, called by TensorPy::TensorSetItem"""
|
|
526
|
+
return tensor_operator_registry.get('_tensor_setitem')(self, index, value)
|
|
527
|
+
|
|
528
|
+
@property
|
|
529
|
+
def _dtensor_info(self):
|
|
530
|
+
"""
|
|
531
|
+
Return the distributed tensor information. For details,
|
|
532
|
+
please refer to :class:`mindspore.parallel.DistributedTensorInfo`.
|
|
533
|
+
|
|
534
|
+
Examples:
|
|
535
|
+
>>> from mindspore import Tensor
|
|
536
|
+
>>> import numpy as np
|
|
537
|
+
>>> x = Tensor(np.array([[1, 2], [3, 4]]))
|
|
538
|
+
>>> print(x._dtensor_info)
|
|
539
|
+
None
|
|
540
|
+
"""
|
|
541
|
+
if not hasattr(self, '_dist_tensor_info'):
|
|
542
|
+
self._dist_tensor_info = None
|
|
543
|
+
return self._dist_tensor_info
|
|
544
|
+
|
|
545
|
+
@_dtensor_info.setter
|
|
546
|
+
def _dtensor_info(self, input_dtensor_info):
|
|
547
|
+
"""
|
|
548
|
+
Set the distributed tensor information to current tensor.
|
|
549
|
+
|
|
550
|
+
Args:
|
|
551
|
+
input_dtensor_info (DistributedTensorInfo): The distributed tensor information.
|
|
552
|
+
|
|
553
|
+
Examples:
|
|
554
|
+
>>> from mindspore import Tensor, Layout, _DistributedTensorInfo
|
|
555
|
+
>>> import numpy as np
|
|
556
|
+
>>> layout = Layout((2, 2), ("dp", "mp"))
|
|
557
|
+
>>> src_layout = layout("dp", "mp")
|
|
558
|
+
>>> distributed_info = _DistributedTensorInfo(src_layout)
|
|
559
|
+
>>> x = Tensor(np.array([[1, 2], [3, 4]]))
|
|
560
|
+
>>> x._dtensor_info = distributed_info
|
|
561
|
+
"""
|
|
562
|
+
self._dist_tensor_info = input_dtensor_info
|
|
532
563
|
|
|
533
564
|
@property
|
|
534
565
|
def shape(self):
|
|
535
566
|
"""
|
|
536
567
|
For details, please refer to :func:`mindspore.ops.shape`.
|
|
568
|
+
|
|
569
|
+
Examples:
|
|
570
|
+
>>> from mindspore import Tensor
|
|
571
|
+
>>> import numpy as np
|
|
572
|
+
>>> x = Tensor(np.array([[1, 2], [3, 4]]))
|
|
573
|
+
>>> print(x.shape)
|
|
574
|
+
(2, 2)
|
|
537
575
|
"""
|
|
538
576
|
return self._shape
|
|
539
577
|
|
|
@@ -546,7 +584,16 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
546
584
|
|
|
547
585
|
@property
|
|
548
586
|
def dtype(self):
|
|
549
|
-
"""
|
|
587
|
+
"""
|
|
588
|
+
Return the dtype of the tensor (:class:`mindspore.dtype`).
|
|
589
|
+
|
|
590
|
+
Examples:
|
|
591
|
+
>>> from mindspore import Tensor
|
|
592
|
+
>>> import numpy as np
|
|
593
|
+
>>> x = Tensor(np.array([1, 2], dtype=np.float32))
|
|
594
|
+
>>> print(x.dtype)
|
|
595
|
+
Float32
|
|
596
|
+
"""
|
|
550
597
|
return self._dtype
|
|
551
598
|
|
|
552
599
|
@property
|
|
@@ -579,83 +626,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
579
626
|
"""
|
|
580
627
|
return len(self._shape)
|
|
581
628
|
|
|
582
|
-
@property
|
|
583
|
-
def grad(self):
|
|
584
|
-
r"""
|
|
585
|
-
Get the gradient value.
|
|
586
|
-
"""
|
|
587
|
-
return self._grad
|
|
588
|
-
|
|
589
|
-
@grad.setter
|
|
590
|
-
def grad(self, grad):
|
|
591
|
-
r"""
|
|
592
|
-
Set the gradient value.
|
|
593
|
-
"""
|
|
594
|
-
self._grad = grad
|
|
595
|
-
|
|
596
|
-
@property
|
|
597
|
-
def grad_fn(self):
|
|
598
|
-
r"""
|
|
599
|
-
The function for backward.
|
|
600
|
-
"""
|
|
601
|
-
return self._grad_fn
|
|
602
|
-
|
|
603
|
-
@grad_fn.setter
|
|
604
|
-
def grad_fn(self, grad_fn):
|
|
605
|
-
r"""
|
|
606
|
-
Set the function for backward.
|
|
607
|
-
"""
|
|
608
|
-
self._grad_fn = grad_fn
|
|
609
|
-
|
|
610
|
-
@property
|
|
611
|
-
def is_leaf(self):
|
|
612
|
-
r"""
|
|
613
|
-
Whether the stub tensor is leaf.
|
|
614
|
-
They will be a leaf if they have requires_grad and requires_grad is False,
|
|
615
|
-
Or they were created by user.
|
|
616
|
-
"""
|
|
617
|
-
return self._requires_grad is False or self._grad_fn is None
|
|
618
|
-
|
|
619
|
-
@property
|
|
620
|
-
def requires_grad(self):
|
|
621
|
-
r"""
|
|
622
|
-
Whether the stub tensor need requires grad.
|
|
623
|
-
"""
|
|
624
|
-
return self._requires_grad
|
|
625
|
-
|
|
626
|
-
@requires_grad.setter
|
|
627
|
-
def requires_grad(self, requires_grad):
|
|
628
|
-
r"""
|
|
629
|
-
Mark the stub tensor whether need requires gradient.
|
|
630
|
-
"""
|
|
631
|
-
self._requires_grad = requires_grad
|
|
632
|
-
|
|
633
|
-
def retain_grad(self):
|
|
634
|
-
r"""
|
|
635
|
-
Enable the stub tensor which is not non-leaf to have the grad during backward().
|
|
636
|
-
"""
|
|
637
|
-
if not self._requires_grad:
|
|
638
|
-
RuntimeError("can't retain_grad on Tensor that has requires_grad = False.")
|
|
639
|
-
self._retain_grad = self._grad_fn is not None
|
|
640
|
-
|
|
641
|
-
@property
|
|
642
|
-
def retains_grad(self):
|
|
643
|
-
r"""
|
|
644
|
-
Is True if the stub tensor is non-leaf and its grad is enabled to be populated during backward().
|
|
645
|
-
"""
|
|
646
|
-
return self._retain_grad
|
|
647
|
-
|
|
648
|
-
def backward(self, grad=None):
|
|
649
|
-
r"""
|
|
650
|
-
Calculate the gradient.
|
|
651
|
-
"""
|
|
652
|
-
if grad is None:
|
|
653
|
-
grad = Tensor(np.ones(self.shape), self.dtype)
|
|
654
|
-
if self._grad_fn is not None:
|
|
655
|
-
self._grad_fn.apply(grad)
|
|
656
|
-
elif self._requires_grad:
|
|
657
|
-
self._grad = grad
|
|
658
|
-
|
|
659
629
|
@property
|
|
660
630
|
def H(self):
|
|
661
631
|
"""
|
|
@@ -785,11 +755,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
785
755
|
if isinstance(array, np.ndarray) and not array.flags['C_CONTIGUOUS']:
|
|
786
756
|
array = np.ascontiguousarray(array)
|
|
787
757
|
|
|
788
|
-
return
|
|
758
|
+
return TensorPy_.from_numpy(array)
|
|
789
759
|
|
|
790
760
|
def ndimension(self):
|
|
791
761
|
r"""
|
|
792
|
-
Alias for :
|
|
762
|
+
Alias for :attr:`mindspore.Tensor.ndim`.
|
|
793
763
|
"""
|
|
794
764
|
return len(self._shape)
|
|
795
765
|
|
|
@@ -821,30 +791,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
821
791
|
self.const_arg = const_arg
|
|
822
792
|
return self
|
|
823
793
|
|
|
824
|
-
def arccosh(self):
|
|
825
|
-
r"""
|
|
826
|
-
For details, please refer to :func:`mindspore.ops.arccosh`.
|
|
827
|
-
"""
|
|
828
|
-
return tensor_operator_registry.get('acosh')(self)
|
|
829
|
-
|
|
830
|
-
def arcsin(self):
|
|
831
|
-
r"""
|
|
832
|
-
For details, please refer to :func:`mindspore.ops.arcsin`.
|
|
833
|
-
"""
|
|
834
|
-
return tensor_operator_registry.get('asin')(self)
|
|
835
|
-
|
|
836
|
-
def arctan(self):
|
|
837
|
-
r"""
|
|
838
|
-
For details, please refer to :func:`mindspore.ops.arctan`.
|
|
839
|
-
"""
|
|
840
|
-
return tensor_operator_registry.get('atan')(self)
|
|
841
|
-
|
|
842
|
-
def arctan2(self, other):
|
|
843
|
-
r"""
|
|
844
|
-
For details, please refer to :func:`mindspore.ops.arctan2`.
|
|
845
|
-
"""
|
|
846
|
-
return tensor_operator_registry.get('atan2')(self, other)
|
|
847
|
-
|
|
848
794
|
def cauchy(self, median=0.0, sigma=1.0):
|
|
849
795
|
r"""
|
|
850
796
|
Fills the tensor with numbers drawn from the Cauchy distribution. It is
|
|
@@ -942,31 +888,17 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
942
888
|
self.assign_value_cpp(value)
|
|
943
889
|
return self
|
|
944
890
|
|
|
945
|
-
def
|
|
946
|
-
r"""
|
|
947
|
-
For details, please refer to :func:`mindspore.ops.bincount`.
|
|
948
|
-
"""
|
|
949
|
-
return tensor_operator_registry.get('bincount')(self, weights, minlength)
|
|
950
|
-
|
|
951
|
-
def chunk(self, chunks, axis=0):
|
|
952
|
-
r"""
|
|
953
|
-
For details, please refer to :func:`mindspore.ops.chunk`.
|
|
954
|
-
"""
|
|
955
|
-
return tensor_operator_registry.get('chunk')(self, chunks, axis)
|
|
956
|
-
|
|
957
|
-
@item_mint
|
|
958
|
-
def item(self, index=None):
|
|
891
|
+
def item(self):
|
|
959
892
|
"""
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
Args:
|
|
963
|
-
index (Union[None, int, tuple(int)]): The index in Tensor. Default: ``None``.
|
|
893
|
+
Return the value of this tensor as standard Python number.
|
|
894
|
+
This only works for tensors with one element.
|
|
964
895
|
|
|
965
896
|
Returns:
|
|
966
897
|
A scalar, type is defined by the dtype of the Tensor.
|
|
967
898
|
|
|
968
899
|
Raises:
|
|
969
|
-
ValueError: If the
|
|
900
|
+
ValueError: If the count of value in tensor is more than one.
|
|
901
|
+
TypeError: The type of element in tensor is not supported.
|
|
970
902
|
|
|
971
903
|
Supported Platforms:
|
|
972
904
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -974,19 +906,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
974
906
|
Examples:
|
|
975
907
|
>>> import mindspore as ms
|
|
976
908
|
>>> from mindspore import Tensor
|
|
977
|
-
>>> x = Tensor([[1, 2, 3], [4, 5, 6]], ms.float32)
|
|
978
|
-
>>> print(x.item((0, 1)))
|
|
979
|
-
2.0
|
|
980
909
|
>>> x = Tensor(1.2, ms.float32)
|
|
981
910
|
>>> print(x.item())
|
|
982
911
|
1.2
|
|
983
912
|
"""
|
|
984
|
-
|
|
985
|
-
if index is not None:
|
|
986
|
-
output = self.asnumpy().item(index)
|
|
987
|
-
else:
|
|
988
|
-
output = self.asnumpy().item()
|
|
989
|
-
return output
|
|
913
|
+
return self._item()
|
|
990
914
|
|
|
991
915
|
def itemset(self, *args):
|
|
992
916
|
r"""
|
|
@@ -998,7 +922,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
998
922
|
Args:
|
|
999
923
|
args (Union[(numbers.Number), (int/tuple(int), numbers.Number)]): The arguments that
|
|
1000
924
|
specify the index and value. If `args` contain one argument (a scalar),
|
|
1001
|
-
it is only used in case tensor is of size 1. If `args`
|
|
925
|
+
it is only used in case tensor is of size 1. If `args` contains two
|
|
1002
926
|
arguments, the last argument is the value to be set and must be a
|
|
1003
927
|
scalar, the first argument specifies a single tensor element location.
|
|
1004
928
|
It is either an int or a tuple.
|
|
@@ -1044,7 +968,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1044
968
|
>>> print(x.get_bytes())
|
|
1045
969
|
b'\x01\x00\x02\x00\x03\x00'
|
|
1046
970
|
"""
|
|
1047
|
-
return
|
|
971
|
+
return TensorPy_.get_bytes(self)
|
|
1048
972
|
|
|
1049
973
|
def asnumpy(self):
|
|
1050
974
|
"""
|
|
@@ -1067,9 +991,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1067
991
|
"""
|
|
1068
992
|
if self.has_init:
|
|
1069
993
|
self.init_data()
|
|
1070
|
-
return
|
|
994
|
+
return TensorPy_.asnumpy(self)
|
|
1071
995
|
|
|
1072
|
-
def numpy(self
|
|
996
|
+
def numpy(self):
|
|
1073
997
|
"""
|
|
1074
998
|
Alias for :func:`mindspore.Tensor.asnumpy`.
|
|
1075
999
|
"""
|
|
@@ -1084,7 +1008,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1084
1008
|
Returns:
|
|
1085
1009
|
True or False
|
|
1086
1010
|
"""
|
|
1087
|
-
return
|
|
1011
|
+
return TensorPy_.is_persistent_data(self)
|
|
1088
1012
|
|
|
1089
1013
|
def asnumpy_of_slice_persistent_data(self, param_key, slice_index):
|
|
1090
1014
|
"""
|
|
@@ -1095,7 +1019,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1095
1019
|
Returns:
|
|
1096
1020
|
A numpy ndarray which shares the same underlying storage with the slice of tensor data.
|
|
1097
1021
|
"""
|
|
1098
|
-
return
|
|
1022
|
+
return TensorPy_.asnumpy_of_slice_persistent_data(self, param_key, slice_index)
|
|
1099
1023
|
|
|
1100
1024
|
def slice_num_of_persistent_data(self):
|
|
1101
1025
|
"""
|
|
@@ -1118,14 +1042,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1118
1042
|
"""
|
|
1119
1043
|
return tensor_operator_registry.get('select_scatter')(self, src, axis, index)
|
|
1120
1044
|
|
|
1121
|
-
def histc(self, bins=100, min=0., max=0.):
|
|
1122
|
-
"""
|
|
1123
|
-
For details, please refer to :func:`mindspore.ops.histc`.
|
|
1124
|
-
"""
|
|
1125
|
-
validator.check_value_type('min', min, (int, float,), 'Tensor.histc')
|
|
1126
|
-
validator.check_value_type('max', max, (int, float,), 'Tensor.histc')
|
|
1127
|
-
return tensor_operator_registry.get('histc')(self, bins, float(min), float(max))
|
|
1128
|
-
|
|
1129
1045
|
def geqrf(self):
|
|
1130
1046
|
"""
|
|
1131
1047
|
For details, please refer to :func:`mindspore.ops.geqrf`.
|
|
@@ -1175,6 +1091,8 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1175
1091
|
>>> print(z.is_contiguous())
|
|
1176
1092
|
True
|
|
1177
1093
|
"""
|
|
1094
|
+
if not self._need_contiguous():
|
|
1095
|
+
return self
|
|
1178
1096
|
return tensor_operator_registry.get('contiguous')(self)
|
|
1179
1097
|
|
|
1180
1098
|
def is_contiguous(self):
|
|
@@ -1193,7 +1111,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1193
1111
|
>>> print(y.is_contiguous())
|
|
1194
1112
|
False
|
|
1195
1113
|
"""
|
|
1196
|
-
return
|
|
1114
|
+
return TensorPy_.is_contiguous(self)
|
|
1197
1115
|
|
|
1198
1116
|
def stride(self, dim=None):
|
|
1199
1117
|
"""
|
|
@@ -1201,10 +1119,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1201
1119
|
When no parameters are passed in, a list of stride for all dimensions is returned.
|
|
1202
1120
|
|
|
1203
1121
|
Args:
|
|
1204
|
-
dim (int): The dim of stride from one element to the next.
|
|
1122
|
+
dim (int, optional): The dim of stride from one element to the next. Default: ``None``.
|
|
1205
1123
|
|
|
1206
1124
|
Returns:
|
|
1207
|
-
Int, the
|
|
1125
|
+
Int, returns the step size necessary to jump from one element to the next in the specified dimension.
|
|
1208
1126
|
|
|
1209
1127
|
Raises:
|
|
1210
1128
|
TypeError: `dim` is not an int.
|
|
@@ -1215,7 +1133,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1215
1133
|
>>> x.stride()
|
|
1216
1134
|
[5, 1]
|
|
1217
1135
|
"""
|
|
1218
|
-
stride =
|
|
1136
|
+
stride = TensorPy_.stride(self)
|
|
1219
1137
|
if dim is None:
|
|
1220
1138
|
return stride
|
|
1221
1139
|
return stride[dim]
|
|
@@ -1234,28 +1152,35 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1234
1152
|
>>> print(ret)
|
|
1235
1153
|
0
|
|
1236
1154
|
"""
|
|
1237
|
-
return
|
|
1155
|
+
return TensorPy_.storage_offset(self)
|
|
1238
1156
|
|
|
1239
|
-
def register_hook(self,
|
|
1157
|
+
def register_hook(self, hook):
|
|
1240
1158
|
"""
|
|
1241
1159
|
Registers a backward hook for tensor.
|
|
1242
1160
|
|
|
1243
1161
|
Note:
|
|
1244
|
-
- The `
|
|
1245
|
-
- The 'hook_fn' must be defined as the following code. `grad` is the gradient passed to the tensor,
|
|
1162
|
+
- The `hook` must be defined as the following code. `grad` is the gradient passed to the tensor,
|
|
1246
1163
|
which may be modified by returning a new output gradient.
|
|
1247
|
-
- The
|
|
1248
|
-
|
|
1164
|
+
- The `hook` should have the following signature:
|
|
1165
|
+
hook(grad) -> New output gradient, but can not return None or not set return value.
|
|
1166
|
+
- Higher-order differentiation does not support tensor `register_hook`.
|
|
1167
|
+
- The following constraints must be met under graph mode:
|
|
1168
|
+
|
|
1169
|
+
- The `hook` must satisfy the syntax constraints of the graph mode.
|
|
1170
|
+
- It is not supported to delete `hook` inside graph.
|
|
1171
|
+
- It is not supported to register `hook` after the `Tensor` is used before.
|
|
1172
|
+
- It is not supported to register multiple `hooks` for a `Tensor` inside graph.
|
|
1173
|
+
- Register `hook` in the graph will return then `Tensor` it self.
|
|
1249
1174
|
|
|
1250
1175
|
Args:
|
|
1251
|
-
|
|
1176
|
+
hook (function): Python function. Tensor backward hook function.
|
|
1252
1177
|
|
|
1253
1178
|
Returns:
|
|
1254
|
-
A handle corresponding to the `
|
|
1179
|
+
A handle corresponding to the `hook` . The handle can be used to remove the added `hook` by calling
|
|
1255
1180
|
`handle.remove()` .
|
|
1256
1181
|
|
|
1257
1182
|
Raises:
|
|
1258
|
-
TypeError: If the `
|
|
1183
|
+
TypeError: If the `hook` is not a function of python.
|
|
1259
1184
|
|
|
1260
1185
|
Supported Platforms:
|
|
1261
1186
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1278,12 +1203,14 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1278
1203
|
>>> print(output)
|
|
1279
1204
|
(Tensor(shape=[], dtype=Float32, value=8), Tensor(shape=[], dtype=Float32, value=6))
|
|
1280
1205
|
"""
|
|
1281
|
-
|
|
1282
|
-
|
|
1283
|
-
handle =
|
|
1284
|
-
handle.id = Tensor_.register_hook(self, hook_fn)
|
|
1206
|
+
check_hook_fn(hook)
|
|
1207
|
+
handle = _TensorHookHandle(self)
|
|
1208
|
+
handle.id = TensorPy_.register_hook(self, hook)
|
|
1285
1209
|
return handle
|
|
1286
1210
|
|
|
1211
|
+
def _remove_hook(self):
|
|
1212
|
+
pass
|
|
1213
|
+
|
|
1287
1214
|
def flush_from_cache(self):
|
|
1288
1215
|
"""
|
|
1289
1216
|
Flush cache data to host if tensor is cache enable.
|
|
@@ -1296,13 +1223,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1296
1223
|
>>> print(y)
|
|
1297
1224
|
None
|
|
1298
1225
|
"""
|
|
1299
|
-
|
|
1300
|
-
|
|
1301
|
-
def addcdiv(self, tensor1, tensor2, value=1):
|
|
1302
|
-
r"""
|
|
1303
|
-
For details, please refer to :func:`mindspore.ops.addcdiv`.
|
|
1304
|
-
"""
|
|
1305
|
-
return tensor_operator_registry.get('addcdiv')(self, tensor1, tensor2, value)
|
|
1226
|
+
TensorPy_._flush_from_cache(self)
|
|
1306
1227
|
|
|
1307
1228
|
def addcmul(self, tensor1, tensor2, value=1):
|
|
1308
1229
|
r"""
|
|
@@ -1310,94 +1231,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1310
1231
|
"""
|
|
1311
1232
|
return tensor_operator_registry.get('addcmul')(self, tensor1, tensor2, value)
|
|
1312
1233
|
|
|
1313
|
-
@add_mint
|
|
1314
|
-
def add(self, other):
|
|
1315
|
-
r"""
|
|
1316
|
-
For details, please refer to :func:`mindspore.ops.add`.
|
|
1317
|
-
"""
|
|
1318
|
-
return tensor_operator_registry.get('add')(self, other)
|
|
1319
|
-
|
|
1320
|
-
def add_(self, other, *, alpha=1):
|
|
1321
|
-
"""
|
|
1322
|
-
inplace update self by following compute:
|
|
1323
|
-
self = self + other * alpha.
|
|
1324
|
-
|
|
1325
|
-
.. warning::
|
|
1326
|
-
This is an experimental API that is subject to change or deletion.
|
|
1327
|
-
The `other` tensor must be broadcastable with the `self` tensor. It may be of a different data type.
|
|
1328
|
-
|
|
1329
|
-
Args:
|
|
1330
|
-
other (Tensor): the source tensor Add to self Tensor.
|
|
1331
|
-
alpha (Number): no effect currently.
|
|
1332
|
-
|
|
1333
|
-
Returns:
|
|
1334
|
-
Return self Tensor.
|
|
1335
|
-
|
|
1336
|
-
Supported Platforms:
|
|
1337
|
-
``Ascend``
|
|
1338
|
-
|
|
1339
|
-
Examples:
|
|
1340
|
-
>>> import numpy as np
|
|
1341
|
-
>>> from mindspore import Tensor
|
|
1342
|
-
>>> a = Tensor(np.ones((2,3)).astype("float32"))
|
|
1343
|
-
>>> b = Tensor(np.ones((2,3)).astype("float32"))
|
|
1344
|
-
>>> a.add_(b)
|
|
1345
|
-
>>> print(a)
|
|
1346
|
-
[[2. 2. 2.]
|
|
1347
|
-
[2. 2. 2.]]
|
|
1348
|
-
"""
|
|
1349
|
-
if isinstance(other, (int, float)):
|
|
1350
|
-
ret = tensor_operator_registry.get("adds_")(self, other, alpha)
|
|
1351
|
-
else:
|
|
1352
|
-
ret = tensor_operator_registry.get("add_")(self, other, alpha)
|
|
1353
|
-
return ret
|
|
1354
|
-
|
|
1355
|
-
def subtract(self, other, *, alpha=1):
|
|
1356
|
-
r"""
|
|
1357
|
-
For details, please refer to :func:`mindspore.ops.subtract`.
|
|
1358
|
-
"""
|
|
1359
|
-
return tensor_operator_registry.get('sub')(self, alpha * other)
|
|
1360
|
-
|
|
1361
|
-
def true_divide(self, value):
|
|
1362
|
-
r"""
|
|
1363
|
-
Alias for Tensor.div() with :math:`rounding\_mode=None`.
|
|
1364
|
-
For details, please refer to :func:`mindspore.ops.div`.
|
|
1365
|
-
"""
|
|
1366
|
-
return tensor_operator_registry.get('div')(self, value, rounding_mode=None)
|
|
1367
|
-
|
|
1368
|
-
def triu(self, diagonal=0):
|
|
1369
|
-
r"""
|
|
1370
|
-
For details, please refer to :func:`mindspore.ops.triu`.
|
|
1371
|
-
|
|
1372
|
-
.. warning::
|
|
1373
|
-
This is an experimental API that is subject to change or deletion.
|
|
1374
|
-
|
|
1375
|
-
"""
|
|
1376
|
-
validator.check_value_type('diagonal', diagonal, [int], 'triu')
|
|
1377
|
-
return tensor_operator_registry.get('triu')(self, diagonal)
|
|
1378
|
-
|
|
1379
|
-
def addbmm(self, batch1, batch2, *, beta=1, alpha=1):
|
|
1380
|
-
r"""
|
|
1381
|
-
For details, please refer to :func:`mindspore.ops.addbmm`.
|
|
1382
|
-
"""
|
|
1383
|
-
return tensor_operator_registry.get('addbmm')(self, batch1, batch2, beta=beta, alpha=alpha)
|
|
1384
|
-
|
|
1385
|
-
def addmm(self, mat1, mat2, *, beta=1, alpha=1):
|
|
1386
|
-
r"""
|
|
1387
|
-
For details, please refer to :func:`mindspore.ops.addmm`.
|
|
1388
|
-
"""
|
|
1389
|
-
return tensor_operator_registry.get('addmm')(self, mat1, mat2, beta=beta, alpha=alpha)
|
|
1390
|
-
|
|
1391
1234
|
def addmm_(self, mat1, mat2, *, beta=1, alpha=1):
|
|
1392
1235
|
r"""
|
|
1393
|
-
|
|
1394
|
-
|
|
1395
|
-
.. note::
|
|
1396
|
-
The output results are directly updated in the Tensor.
|
|
1236
|
+
In-place version of :func:`mindspore.Tensor.addmm`.
|
|
1397
1237
|
|
|
1398
1238
|
.. warning::
|
|
1399
1239
|
This is an experimental API that is subject to change or deletion.
|
|
1400
|
-
|
|
1401
1240
|
"""
|
|
1402
1241
|
return tensor_operator_registry.get('addmm_')(self, mat1, mat2, beta=beta, alpha=alpha)
|
|
1403
1242
|
|
|
@@ -1413,38 +1252,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1413
1252
|
"""
|
|
1414
1253
|
return tensor_operator_registry.get('adjoint')(self)
|
|
1415
1254
|
|
|
1416
|
-
def all(self, axis=None, keep_dims=False):
|
|
1417
|
-
r"""
|
|
1418
|
-
For details, please refer to :func:`mindspore.ops.all`.
|
|
1419
|
-
"""
|
|
1420
|
-
return tensor_operator_registry.get('all')(self, axis, keep_dims)
|
|
1421
|
-
|
|
1422
1255
|
def angle(self):
|
|
1423
1256
|
r"""
|
|
1424
1257
|
For details, please refer to :func:`mindspore.ops.angle`.
|
|
1425
1258
|
"""
|
|
1426
1259
|
return tensor_operator_registry.get('angle')(self)
|
|
1427
1260
|
|
|
1428
|
-
def any(self, axis=None, keep_dims=False):
|
|
1429
|
-
r"""
|
|
1430
|
-
For details, please refer to :func:`mindspore.ops.any`.
|
|
1431
|
-
"""
|
|
1432
|
-
if axis is None:
|
|
1433
|
-
axis = ()
|
|
1434
|
-
return tensor_operator_registry.get('any')(self, axis, keep_dims)
|
|
1435
|
-
|
|
1436
|
-
def atan2(self, other):
|
|
1437
|
-
r"""
|
|
1438
|
-
For details, please refer to :func:`mindspore.ops.atan2`.
|
|
1439
|
-
"""
|
|
1440
|
-
return tensor_operator_registry.get('atan2')(self, other)
|
|
1441
|
-
|
|
1442
|
-
def baddbmm(self, batch1, batch2, beta=1, alpha=1):
|
|
1443
|
-
r"""
|
|
1444
|
-
For details, please refer to :func:`mindspore.ops.baddbmm`.
|
|
1445
|
-
"""
|
|
1446
|
-
return tensor_operator_registry.get('baddbmm')(self, batch1, batch2, beta=beta, alpha=alpha)
|
|
1447
|
-
|
|
1448
1261
|
def view(self, *shape):
|
|
1449
1262
|
"""
|
|
1450
1263
|
Reshape the tensor according to the input shape. It's the same as :func:`mindspore.Tensor.reshape`,
|
|
@@ -1474,64 +1287,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1474
1287
|
shape = shape[0]
|
|
1475
1288
|
return tensor_operator_registry.get('reshape')(self, shape)
|
|
1476
1289
|
|
|
1477
|
-
def
|
|
1478
|
-
|
|
1479
|
-
|
|
1480
|
-
|
|
1481
|
-
|
|
1482
|
-
other(Tensor): The returned Tensor has the same shape as `other`.
|
|
1483
|
-
|
|
1484
|
-
Returns:
|
|
1485
|
-
Tensor, has the same shape as `other`.
|
|
1486
|
-
|
|
1487
|
-
Raises:
|
|
1488
|
-
TypeError: If `other` is not a Tensor.
|
|
1489
|
-
|
|
1490
|
-
Supported Platforms:
|
|
1491
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1492
|
-
|
|
1493
|
-
Examples:
|
|
1494
|
-
>>> from mindspore import Tensor
|
|
1495
|
-
>>> from mindspore import dtype as mstype
|
|
1496
|
-
>>> a = Tensor([[1, 2, 3], [2, 3, 4]], mstype.float32)
|
|
1497
|
-
>>> b = Tensor([1, 1, 1, 1, 1, 1], mstype.float32)
|
|
1498
|
-
>>> output = a.view_as(b)
|
|
1499
|
-
>>> print(output)
|
|
1500
|
-
[1. 2. 3. 2. 3. 4.]
|
|
1501
|
-
"""
|
|
1502
|
-
if not isinstance(other, (Tensor, Tensor_)):
|
|
1503
|
-
raise TypeError(f"For view_as, the input other must be a Tensor, but got {type(other)}")
|
|
1504
|
-
return self.view(other.shape)
|
|
1505
|
-
|
|
1506
|
-
def t(self):
|
|
1507
|
-
r"""
|
|
1508
|
-
For details, please refer to :func:`mindspore.ops.t`.
|
|
1509
|
-
"""
|
|
1510
|
-
return tensor_operator_registry.get("t")(self)
|
|
1511
|
-
|
|
1512
|
-
def bitwise_and(self, other):
|
|
1513
|
-
"""
|
|
1514
|
-
For details, please refer to :func:`mindspore.ops.bitwise_and`.
|
|
1515
|
-
"""
|
|
1516
|
-
return tensor_operator_registry.get('bitwise_and')(self, other)
|
|
1517
|
-
|
|
1518
|
-
def bitwise_or(self, other):
|
|
1519
|
-
"""
|
|
1520
|
-
For details, please refer to :func:`mindspore.ops.bitwise_or`.
|
|
1521
|
-
"""
|
|
1522
|
-
return tensor_operator_registry.get('bitwise_or')(self, other)
|
|
1523
|
-
|
|
1524
|
-
def bitwise_xor(self, other):
|
|
1525
|
-
"""
|
|
1526
|
-
For details, please refer to :func:`mindspore.ops.bitwise_xor`.
|
|
1527
|
-
"""
|
|
1528
|
-
return tensor_operator_registry.get('bitwise_xor')(self, other)
|
|
1529
|
-
|
|
1530
|
-
def bitwise_left_shift(self, other):
|
|
1531
|
-
"""
|
|
1532
|
-
For details, please refer to :func:`mindspore.ops.bitwise_left_shift`.
|
|
1533
|
-
"""
|
|
1534
|
-
return tensor_operator_registry.get('bitwise_left_shift')(self, other)
|
|
1290
|
+
def bitwise_left_shift(self, other):
|
|
1291
|
+
"""
|
|
1292
|
+
For details, please refer to :func:`mindspore.ops.bitwise_left_shift`.
|
|
1293
|
+
"""
|
|
1294
|
+
return tensor_operator_registry.get('bitwise_left_shift')(self, other)
|
|
1535
1295
|
|
|
1536
1296
|
def bitwise_right_shift(self, other):
|
|
1537
1297
|
"""
|
|
@@ -1541,12 +1301,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1541
1301
|
other = _cast(other, self.dtype)
|
|
1542
1302
|
return tensor_operator_registry.get('bitwise_right_shift')(self, other)
|
|
1543
1303
|
|
|
1544
|
-
def scatter(self, axis, index, src):
|
|
1545
|
-
"""
|
|
1546
|
-
For details, please refer to :func:`mindspore.ops.scatter`.
|
|
1547
|
-
"""
|
|
1548
|
-
return tensor_operator_registry.get('scatter')(self, axis, index, src)
|
|
1549
|
-
|
|
1550
1304
|
def scatter_mul(self, indices, updates):
|
|
1551
1305
|
"""
|
|
1552
1306
|
For details, please refer to :func:`mindspore.ops.scatter_mul`.
|
|
@@ -1565,126 +1319,55 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1565
1319
|
"""
|
|
1566
1320
|
return tensor_operator_registry.get('ger')(self, vec2)
|
|
1567
1321
|
|
|
1568
|
-
def gt(self, x):
|
|
1569
|
-
"""
|
|
1570
|
-
For details, please refer to :func:`mindspore.ops.gt`.
|
|
1571
|
-
"""
|
|
1572
|
-
return tensor_operator_registry.get('gt')(self, x)
|
|
1573
|
-
|
|
1574
|
-
def ge(self, x):
|
|
1575
|
-
"""
|
|
1576
|
-
For details, please refer to :func:`mindspore.ops.ge`.
|
|
1577
|
-
"""
|
|
1578
|
-
return tensor_operator_registry.get('ge')(self, x)
|
|
1579
|
-
|
|
1580
1322
|
def broadcast_to(self, shape):
|
|
1581
1323
|
"""
|
|
1582
1324
|
For details, please refer to :func:`mindspore.ops.broadcast_to`.
|
|
1583
1325
|
"""
|
|
1584
1326
|
return tensor_operator_registry.get('broadcast_to')(self, shape)
|
|
1585
1327
|
|
|
1586
|
-
def expand_as(self, x):
|
|
1587
|
-
"""
|
|
1588
|
-
Expand the dimension of target tensor to the dimension of input tensor.
|
|
1589
|
-
|
|
1590
|
-
Args:
|
|
1591
|
-
x (Tensor): The input tensor. The shape of the input tensor must obey
|
|
1592
|
-
the broadcasting rule.
|
|
1593
|
-
|
|
1594
|
-
Returns:
|
|
1595
|
-
Tensor, has the same dimension as input tensor.
|
|
1596
|
-
|
|
1597
|
-
Examples:
|
|
1598
|
-
>>> import numpy as np
|
|
1599
|
-
>>> from mindspore import Tensor
|
|
1600
|
-
>>> from mindspore import dtype as mstype
|
|
1601
|
-
>>> x = Tensor([1, 2, 3], dtype=mstype.float32)
|
|
1602
|
-
>>> y = Tensor(np.ones((2, 3)), dtype=mstype.float32)
|
|
1603
|
-
>>> output = x.expand_as(y)
|
|
1604
|
-
>>> print(output)
|
|
1605
|
-
[[1. 2. 3.]
|
|
1606
|
-
[1. 2. 3.]]
|
|
1607
|
-
"""
|
|
1608
|
-
return tensor_operator_registry.get('broadcast_to')(self, x.shape)
|
|
1609
|
-
|
|
1610
|
-
def exp(self):
|
|
1611
|
-
"""
|
|
1612
|
-
For details, please refer to :func:`mindspore.ops.exp`.
|
|
1613
|
-
"""
|
|
1614
|
-
return tensor_operator_registry.get('exp')(self)
|
|
1615
|
-
|
|
1616
1328
|
def real(self):
|
|
1617
1329
|
r"""
|
|
1618
1330
|
For details, please refer to :func:`mindspore.ops.real`.
|
|
1619
1331
|
"""
|
|
1620
1332
|
return tensor_operator_registry.get('real')(self)
|
|
1621
1333
|
|
|
1622
|
-
def
|
|
1334
|
+
def tanh_(self):
|
|
1623
1335
|
r"""
|
|
1624
|
-
|
|
1625
|
-
"""
|
|
1626
|
-
return tensor_operator_registry.get('rsqrt')(self)
|
|
1336
|
+
Computes hyperbolic tangent of self inplace element-wise. The Tanh function is defined as:
|
|
1627
1337
|
|
|
1628
|
-
|
|
1629
|
-
r"""
|
|
1630
|
-
For details, please refer to :func:`mindspore.ops.reciprocal`.
|
|
1631
|
-
"""
|
|
1632
|
-
return tensor_operator_registry.get('reciprocal')(self)
|
|
1338
|
+
.. math::
|
|
1633
1339
|
|
|
1634
|
-
|
|
1635
|
-
"""
|
|
1636
|
-
For details, please refer to :func:`mindspore.ops.sqrt`.
|
|
1637
|
-
"""
|
|
1638
|
-
return tensor_operator_registry.get('sqrt')(self)
|
|
1340
|
+
tanh(x_i) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1},
|
|
1639
1341
|
|
|
1640
|
-
|
|
1641
|
-
"""
|
|
1642
|
-
For details, please refer to :func:`mindspore.ops.square`.
|
|
1643
|
-
"""
|
|
1644
|
-
return tensor_operator_registry.get('square')(self)
|
|
1342
|
+
where :math:`x_i` is an element of the input Tensor.
|
|
1645
1343
|
|
|
1646
|
-
|
|
1647
|
-
def sub(self, y):
|
|
1648
|
-
r"""
|
|
1649
|
-
For details, please refer to :func:`mindspore.ops.sub`.
|
|
1650
|
-
"""
|
|
1651
|
-
return tensor_operator_registry.get('sub')(self, y)
|
|
1344
|
+
Tanh Activation Function Graph:
|
|
1652
1345
|
|
|
1653
|
-
|
|
1654
|
-
|
|
1655
|
-
For details, please refer to :func:`mindspore.ops.tan`.
|
|
1656
|
-
"""
|
|
1657
|
-
return tensor_operator_registry.get('tan')(self)
|
|
1346
|
+
.. image:: ../../images/Tanh.png
|
|
1347
|
+
:align: center
|
|
1658
1348
|
|
|
1659
|
-
|
|
1660
|
-
|
|
1661
|
-
For details, please refer to :func:`mindspore.ops.tanh`.
|
|
1662
|
-
"""
|
|
1663
|
-
return tensor_operator_registry.get('tanh')(self)
|
|
1349
|
+
.. warning::
|
|
1350
|
+
- This is an experimental API that is subject ot change or deletion.
|
|
1664
1351
|
|
|
1665
|
-
|
|
1666
|
-
|
|
1667
|
-
For details, please refer to :func:`mindspore.ops.cosh`.
|
|
1668
|
-
"""
|
|
1669
|
-
return tensor_operator_registry.get('cosh')(self)
|
|
1352
|
+
Returns:
|
|
1353
|
+
Tensor, with the same type and shape as the `self`.
|
|
1670
1354
|
|
|
1671
|
-
|
|
1672
|
-
|
|
1673
|
-
For details, please refer to :func:`mindspore.ops.acos`.
|
|
1674
|
-
"""
|
|
1675
|
-
return tensor_operator_registry.get('acos')(self)
|
|
1355
|
+
Raises:
|
|
1356
|
+
TypeError: If `self` is not a Tensor.
|
|
1676
1357
|
|
|
1677
|
-
|
|
1678
|
-
|
|
1679
|
-
Alias for :func:`mindspore.Tensor.acos`.
|
|
1680
|
-
"""
|
|
1681
|
-
return self.acos()
|
|
1358
|
+
Supported Platforms:
|
|
1359
|
+
``Ascend``
|
|
1682
1360
|
|
|
1683
|
-
|
|
1684
|
-
|
|
1685
|
-
|
|
1361
|
+
Examples:
|
|
1362
|
+
>>> import mindspore
|
|
1363
|
+
>>> import numpy as np
|
|
1364
|
+
>>> from mindspore import Tensor
|
|
1365
|
+
>>> x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
|
|
1366
|
+
>>> output = x.tanh_()
|
|
1367
|
+
>>> print(output)
|
|
1368
|
+
[0.7615941 0.9640276 0.9950547 0.9993293 0.9999092]
|
|
1686
1369
|
"""
|
|
1687
|
-
return tensor_operator_registry.get('
|
|
1370
|
+
return tensor_operator_registry.get('tanh_')(self)
|
|
1688
1371
|
|
|
1689
1372
|
def cov(self, *, correction=1, fweights=None, aweights=None):
|
|
1690
1373
|
r"""
|
|
@@ -1692,62 +1375,14 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1692
1375
|
"""
|
|
1693
1376
|
return tensor_operator_registry.get('cov')(self, correction=correction, fweights=fweights, aweights=aweights)
|
|
1694
1377
|
|
|
1695
|
-
def
|
|
1696
|
-
"""
|
|
1697
|
-
For details, please refer to :func:`mindspore.ops.acosh`.
|
|
1698
|
-
"""
|
|
1699
|
-
return tensor_operator_registry.get('acosh')(self)
|
|
1700
|
-
|
|
1701
|
-
def asin(self):
|
|
1378
|
+
def floor_(self):
|
|
1702
1379
|
r"""
|
|
1703
|
-
|
|
1704
|
-
"""
|
|
1705
|
-
return tensor_operator_registry.get('asin')(self)
|
|
1706
|
-
|
|
1707
|
-
def abs(self):
|
|
1708
|
-
"""
|
|
1709
|
-
For details, please refer to :func:`mindspore.ops.abs`.
|
|
1710
|
-
"""
|
|
1711
|
-
return tensor_operator_registry.get('abs')(self)
|
|
1712
|
-
|
|
1713
|
-
def absolute(self):
|
|
1714
|
-
"""
|
|
1715
|
-
Alias for :func:`mindspore.Tensor.abs`.
|
|
1716
|
-
"""
|
|
1717
|
-
return self.abs()
|
|
1718
|
-
|
|
1719
|
-
def ceil(self):
|
|
1720
|
-
"""
|
|
1721
|
-
For details, please refer to :func:`mindspore.ops.ceil`.
|
|
1722
|
-
"""
|
|
1723
|
-
return tensor_operator_registry.get('ceil')(self)
|
|
1724
|
-
|
|
1725
|
-
def floor(self):
|
|
1726
|
-
"""
|
|
1727
|
-
For details, please refer to :func:`mindspore.ops.floor`.
|
|
1728
|
-
"""
|
|
1729
|
-
return tensor_operator_registry.get('floor')(self)
|
|
1730
|
-
|
|
1731
|
-
def floor_divide(self, other):
|
|
1732
|
-
"""
|
|
1733
|
-
For details, please refer to :func:`mindspore.ops.floor_divide`.
|
|
1380
|
+
In-place version of :func:`mindspore.Tensor.floor`.
|
|
1734
1381
|
|
|
1735
1382
|
.. warning::
|
|
1736
1383
|
This is an experimental API that is subject to change or deletion.
|
|
1737
1384
|
"""
|
|
1738
|
-
return tensor_operator_registry.get('
|
|
1739
|
-
|
|
1740
|
-
def lerp(self, end, weight):
|
|
1741
|
-
"""
|
|
1742
|
-
For details, please refer to :func:`mindspore.ops.lerp`.
|
|
1743
|
-
"""
|
|
1744
|
-
return tensor_operator_registry.get('lerp')(self, end, weight)
|
|
1745
|
-
|
|
1746
|
-
def negative(self):
|
|
1747
|
-
r"""
|
|
1748
|
-
For details, please refer to :func:`mindspore.ops.negative`.
|
|
1749
|
-
"""
|
|
1750
|
-
return tensor_operator_registry.get("negative")(self)
|
|
1385
|
+
return tensor_operator_registry.get('floor_')(self)
|
|
1751
1386
|
|
|
1752
1387
|
# pylint: disable=redefined-builtin
|
|
1753
1388
|
def norm(self, ord=None, dim=None, keepdim=False, *, dtype=None):
|
|
@@ -1764,7 +1399,8 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1764
1399
|
|
|
1765
1400
|
def approximate_equal(self, other, tolerance=1e-5):
|
|
1766
1401
|
r"""
|
|
1767
|
-
For details, please refer to :func:`mindspore.ops.approximate_equal
|
|
1402
|
+
For details, please refer to :func:`mindspore.ops.approximate_equal`,
|
|
1403
|
+
The parameter `other` of current interface is the same as the parameter `y` of the reference interface.
|
|
1768
1404
|
"""
|
|
1769
1405
|
validator.check_isinstance("x", self, Tensor)
|
|
1770
1406
|
validator.check_isinstance("y", other, Tensor)
|
|
@@ -1775,12 +1411,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1775
1411
|
tensor_operator_registry.get('__sub__')(input_x, input_y)
|
|
1776
1412
|
), tolerance)
|
|
1777
1413
|
|
|
1778
|
-
def log1p(self):
|
|
1779
|
-
r"""
|
|
1780
|
-
For details, please refer to :func:`mindspore.ops.log1p`.
|
|
1781
|
-
"""
|
|
1782
|
-
return tensor_operator_registry.get('log1p')(self)
|
|
1783
|
-
|
|
1784
1414
|
def logit(self, eps=None):
|
|
1785
1415
|
r"""
|
|
1786
1416
|
For details, please refer to :func:`mindspore.ops.logit`.
|
|
@@ -1790,18 +1420,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1790
1420
|
validator.check_value_type('eps', eps, (float,), 'Tensor.logit')
|
|
1791
1421
|
return tensor_operator_registry.get('logit')(self, eps)
|
|
1792
1422
|
|
|
1793
|
-
def logaddexp(self, other):
|
|
1794
|
-
r"""
|
|
1795
|
-
For details, please refer to :func:`mindspore.ops.logaddexp`.
|
|
1796
|
-
"""
|
|
1797
|
-
return tensor_operator_registry.get('logaddexp')(self, other)
|
|
1798
|
-
|
|
1799
|
-
def logaddexp2(self, other):
|
|
1800
|
-
r"""
|
|
1801
|
-
For details, please refer to :func:`mindspore.ops.logaddexp2`.
|
|
1802
|
-
"""
|
|
1803
|
-
return tensor_operator_registry.get('logaddexp2')(self, other)
|
|
1804
|
-
|
|
1805
1423
|
def logcumsumexp(self, axis):
|
|
1806
1424
|
r"""
|
|
1807
1425
|
For details, please refer to :func:`mindspore.ops.logcumsumexp`.
|
|
@@ -1811,12 +1429,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1811
1429
|
"""
|
|
1812
1430
|
return tensor_operator_registry.get('logcumsumexp')(self, axis)
|
|
1813
1431
|
|
|
1814
|
-
def logsumexp(self, axis, keepdims=False):
|
|
1815
|
-
r"""
|
|
1816
|
-
For details, please refer to :func:`mindspore.ops.logsumexp`.
|
|
1817
|
-
"""
|
|
1818
|
-
return tensor_operator_registry.get('logsumexp')(self, axis, keepdims)
|
|
1819
|
-
|
|
1820
1432
|
def logdet(self):
|
|
1821
1433
|
r"""
|
|
1822
1434
|
For details, please refer to :func:`mindspore.ops.logdet`.
|
|
@@ -1825,22 +1437,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1825
1437
|
|
|
1826
1438
|
def i0(self):
|
|
1827
1439
|
r"""
|
|
1828
|
-
For details, please refer to :func:`mindspore.ops.
|
|
1440
|
+
For details, please refer to :func:`mindspore.ops.bessel_i0`.
|
|
1829
1441
|
"""
|
|
1830
1442
|
return tensor_operator_registry.get('i0')(self)
|
|
1831
1443
|
|
|
1832
|
-
def isclose(self, x2, rtol=1e-05, atol=1e-08, equal_nan=False):
|
|
1833
|
-
"""
|
|
1834
|
-
For details, please refer to :func:`mindspore.ops.isclose`.
|
|
1835
|
-
"""
|
|
1836
|
-
return tensor_operator_registry.get('isclose')(self, x2, rtol, atol, equal_nan)
|
|
1837
|
-
|
|
1838
|
-
def isneginf(self):
|
|
1839
|
-
r"""
|
|
1840
|
-
For details, please refer to :func:`mindspore.ops.isneginf`.
|
|
1841
|
-
"""
|
|
1842
|
-
return tensor_operator_registry.get('isneginf')(self)
|
|
1843
|
-
|
|
1844
1444
|
def isposinf(self):
|
|
1845
1445
|
r"""
|
|
1846
1446
|
For details, please refer to :func:`mindspore.ops.isposinf`.
|
|
@@ -1853,67 +1453,18 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1853
1453
|
"""
|
|
1854
1454
|
return tensor_operator_registry.get('isreal')(self)
|
|
1855
1455
|
|
|
1856
|
-
def isfinite(self):
|
|
1857
|
-
r"""
|
|
1858
|
-
For details, please refer to :func:`mindspore.ops.isfinite`.
|
|
1859
|
-
"""
|
|
1860
|
-
return tensor_operator_registry.get('isfinite')(self)
|
|
1861
|
-
|
|
1862
|
-
def is_complex(self):
|
|
1863
|
-
r"""
|
|
1864
|
-
For details, please refer to :func:`mindspore.ops.is_complex`.
|
|
1865
|
-
"""
|
|
1866
|
-
return tensor_operator_registry.get('is_complex')(self)
|
|
1867
|
-
|
|
1868
1456
|
def inv(self):
|
|
1869
1457
|
r"""
|
|
1870
1458
|
For details, please refer to :func:`mindspore.ops.inv`.
|
|
1871
1459
|
"""
|
|
1872
1460
|
return tensor_operator_registry.get('inv')(self)
|
|
1873
1461
|
|
|
1874
|
-
def inverse(self):
|
|
1875
|
-
r"""
|
|
1876
|
-
For details, please refer to :func:`mindspore.ops.inverse`.
|
|
1877
|
-
"""
|
|
1878
|
-
return tensor_operator_registry.get('inverse')(self)
|
|
1879
|
-
|
|
1880
1462
|
def invert(self):
|
|
1881
1463
|
r"""
|
|
1882
1464
|
For details, please refer to :func:`mindspore.ops.invert`.
|
|
1883
1465
|
"""
|
|
1884
1466
|
return tensor_operator_registry.get('invert')(self)
|
|
1885
1467
|
|
|
1886
|
-
def pow(self, exponent):
|
|
1887
|
-
r"""
|
|
1888
|
-
For details, please refer to :func:`mindspore.ops.pow`.
|
|
1889
|
-
"""
|
|
1890
|
-
return tensor_operator_registry.get('pow')(self, exponent)
|
|
1891
|
-
|
|
1892
|
-
def log(self):
|
|
1893
|
-
"""
|
|
1894
|
-
For details, please refer to :func:`mindspore.ops.log`.
|
|
1895
|
-
"""
|
|
1896
|
-
return tensor_operator_registry.get('log')(self)
|
|
1897
|
-
|
|
1898
|
-
def log10(self):
|
|
1899
|
-
r"""
|
|
1900
|
-
For details, please refer to :func:`mindspore.ops.log10`.
|
|
1901
|
-
"""
|
|
1902
|
-
return tensor_operator_registry.get('log10')(self)
|
|
1903
|
-
|
|
1904
|
-
def log2(self):
|
|
1905
|
-
r"""
|
|
1906
|
-
For details, please refer to :func:`mindspore.ops.log2`.
|
|
1907
|
-
"""
|
|
1908
|
-
return tensor_operator_registry.get('log2')(self)
|
|
1909
|
-
|
|
1910
|
-
@mean_mint
|
|
1911
|
-
def mean(self, axis=None, keep_dims=False):
|
|
1912
|
-
"""
|
|
1913
|
-
For details, please refer to :func:`mindspore.ops.mean`.
|
|
1914
|
-
"""
|
|
1915
|
-
return tensor_operator_registry.get('mean')(self, axis, keep_dims)
|
|
1916
|
-
|
|
1917
1468
|
def amin(self, axis=None, keepdims=False, *, initial=None, where=None):
|
|
1918
1469
|
"""
|
|
1919
1470
|
For details, please refer to :func:`mindspore.ops.amin`.
|
|
@@ -1925,6 +1476,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1925
1476
|
def reverse(self, axis):
|
|
1926
1477
|
"""
|
|
1927
1478
|
For details, please refer to :func:`mindspore.ops.flip`.
|
|
1479
|
+
The `axis` parameter in `Tensor.reverse` is equivalent to the `dims` parameter in :func:`mindspore.ops.flip`.
|
|
1928
1480
|
"""
|
|
1929
1481
|
return tensor_operator_registry.get('flip')(self, axis)
|
|
1930
1482
|
|
|
@@ -1948,84 +1500,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1948
1500
|
"""
|
|
1949
1501
|
return tensor_operator_registry.get("reverse_sequence")(self, seq_lengths, seq_dim, batch_dim)
|
|
1950
1502
|
|
|
1951
|
-
def prod(self, axis=None, keep_dims=False, dtype=None):
|
|
1952
|
-
"""
|
|
1953
|
-
For details, please refer to :func:`mindspore.ops.prod`.
|
|
1954
|
-
"""
|
|
1955
|
-
return tensor_operator_registry.get('prod')(self, axis, keep_dims, dtype)
|
|
1956
|
-
|
|
1957
|
-
def select(self, condition, y):
|
|
1958
|
-
r"""
|
|
1959
|
-
For details, please refer to :func:`mindspore.ops.select`.
|
|
1960
|
-
"""
|
|
1961
|
-
if not isinstance(condition, Tensor):
|
|
1962
|
-
raise TypeError(f"For 'Tensor.select', the argument 'condition' should be Tensor,"
|
|
1963
|
-
f" but got {type(condition)}.")
|
|
1964
|
-
if not isinstance(y, (Tensor, int, float)):
|
|
1965
|
-
raise TypeError(f"For 'Tensor.select', the argument 'y' should be Tensor, int or float,"
|
|
1966
|
-
f" but got {type(y)}.")
|
|
1967
|
-
if isinstance(y, int) and self.dtype != mstype.int32:
|
|
1968
|
-
raise TypeError(f"For 'Tensor.select', if the argument 'y' is int,"
|
|
1969
|
-
f" then the tensor type should be int32 but got {self.dtype}")
|
|
1970
|
-
if isinstance(y, float) and self.dtype != mstype.float32:
|
|
1971
|
-
raise TypeError(f"For 'Tensor.select', if the argument 'y' is float,"
|
|
1972
|
-
f" then the tensor type should be float32 but got {self.dtype}")
|
|
1973
|
-
input_y = y
|
|
1974
|
-
if isinstance(y, (int, float)):
|
|
1975
|
-
input_y = tensor_operator_registry.get('zeros_like')(self) + y
|
|
1976
|
-
if isinstance(y, int):
|
|
1977
|
-
input_y = tensor_operator_registry.get('cast')(input_y, mstype.int32)
|
|
1978
|
-
else:
|
|
1979
|
-
input_y = tensor_operator_registry.get('cast')(input_y, mstype.float32)
|
|
1980
|
-
return tensor_operator_registry.get('select')(condition, self, input_y)
|
|
1981
|
-
|
|
1982
|
-
def transpose(self, *axes):
|
|
1983
|
-
r"""
|
|
1984
|
-
For details, please refer to :func:`mindspore.ops.transpose`.
|
|
1985
|
-
"""
|
|
1986
|
-
perm = validator.check_transpose_axis(axes, self.ndim)
|
|
1987
|
-
return tensor_operator_registry.get('transpose')(self, perm)
|
|
1988
|
-
|
|
1989
1503
|
def col2im(self, output_size, kernel_size, dilation, padding_value, stride):
|
|
1990
1504
|
"""
|
|
1991
1505
|
For details, please refer to :func:`mindspore.ops.col2im`.
|
|
1992
1506
|
"""
|
|
1993
1507
|
return tensor_operator_registry.get('col2im')(self, output_size, kernel_size, dilation, padding_value, stride)
|
|
1994
1508
|
|
|
1995
|
-
def reshape(self, *shape):
|
|
1996
|
-
r"""
|
|
1997
|
-
Rearranges the input Tensor based on the given `shape` .
|
|
1998
|
-
|
|
1999
|
-
The `shape` can only have one -1 at most, in which case it's inferred from the remaining dimensions and
|
|
2000
|
-
the number of elements in the input.
|
|
2001
|
-
|
|
2002
|
-
Args:
|
|
2003
|
-
shape (Union[int, tuple[int], list[int]]): If `shape` is a tuple or list, its elements should be
|
|
2004
|
-
integers, and only constant value is allowed. i.e., :math:`(y_1, y_2, ..., y_S)`.
|
|
2005
|
-
|
|
2006
|
-
Returns:
|
|
2007
|
-
Tensor, If the given `shape` does not contain -1, the `shape` of tensor is :math:`(y_1, y_2, ..., y_S)`.
|
|
2008
|
-
If the k-th position in the given `shape` is -1, the `shape` of tensor is :math:`(y_1, ..., y_{k-1},
|
|
2009
|
-
\frac{\prod_{i=1}^{R}x_{i}}{y_1\times ...\times y_{k-1}\times y_{k+1}\times...\times y_S} , y_{k+1},
|
|
2010
|
-
..., y_S)`, in where the shape of input tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
2011
|
-
|
|
2012
|
-
Supported Platforms:
|
|
2013
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2014
|
-
|
|
2015
|
-
Examples:
|
|
2016
|
-
>>> import mindspore
|
|
2017
|
-
>>> import numpy as np
|
|
2018
|
-
>>> from mindspore import Tensor, ops
|
|
2019
|
-
>>> input = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
|
|
2020
|
-
>>> output = input.reshape(3, 2)
|
|
2021
|
-
>>> print(output)
|
|
2022
|
-
[[-0.1 0.3]
|
|
2023
|
-
[ 3.6 0.4]
|
|
2024
|
-
[ 0.5 -3.2]]
|
|
2025
|
-
"""
|
|
2026
|
-
new_shape = validator.check_reshape_shp(shape)
|
|
2027
|
-
return tensor_operator_registry.get('reshape')(self, new_shape)
|
|
2028
|
-
|
|
2029
1509
|
def reshape_as(self, other):
|
|
2030
1510
|
"""
|
|
2031
1511
|
Change the shape of the Tensor to the shape of `other` without changing the data.
|
|
@@ -2078,18 +1558,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2078
1558
|
reshape_op = tensor_operator_registry.get('reshape')
|
|
2079
1559
|
return reshape_op(self, (-1,))
|
|
2080
1560
|
|
|
2081
|
-
def round(self, decimals=0):
|
|
2082
|
-
"""
|
|
2083
|
-
For details, please refer to :func:`mindspore.ops.round`.
|
|
2084
|
-
"""
|
|
2085
|
-
return tensor_operator_registry.get('round')(self, decimals=decimals)
|
|
2086
|
-
|
|
2087
|
-
def roll(self, shifts, dims):
|
|
2088
|
-
"""
|
|
2089
|
-
For details, please refer to :func:`mindspore.ops.roll`.
|
|
2090
|
-
"""
|
|
2091
|
-
return tensor_operator_registry.get('roll')(shifts, dims)(self)
|
|
2092
|
-
|
|
2093
1561
|
def rot90(self, k, dims):
|
|
2094
1562
|
r"""
|
|
2095
1563
|
For details, please refer to :func:`mindspore.ops.rot90`.
|
|
@@ -2102,18 +1570,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2102
1570
|
"""
|
|
2103
1571
|
return tensor_operator_registry.get('deg2rad')(self)
|
|
2104
1572
|
|
|
2105
|
-
def dot(self, other):
|
|
2106
|
-
r"""
|
|
2107
|
-
For details, please refer to :func:`mindspore.ops.dot`.
|
|
2108
|
-
"""
|
|
2109
|
-
return tensor_operator_registry.get('dot')(self, other)
|
|
2110
|
-
|
|
2111
|
-
def outer(self, vec2):
|
|
2112
|
-
r"""
|
|
2113
|
-
For details, please refer to :func:`mindspore.ops.outer`.
|
|
2114
|
-
"""
|
|
2115
|
-
return tensor_operator_registry.get('outer')(self, vec2)
|
|
2116
|
-
|
|
2117
1573
|
def rad2deg(self):
|
|
2118
1574
|
r"""
|
|
2119
1575
|
For details, please refer to :func:`mindspore.ops.rad2deg`.
|
|
@@ -2130,16 +1586,18 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2130
1586
|
r"""
|
|
2131
1587
|
Alias for :func:`mindspore.Tensor.numel`.
|
|
2132
1588
|
"""
|
|
2133
|
-
return
|
|
1589
|
+
return self.size
|
|
2134
1590
|
|
|
2135
1591
|
def numel(self):
|
|
2136
1592
|
r"""
|
|
2137
1593
|
For details, please refer to :func:`mindspore.ops.numel`.
|
|
2138
1594
|
"""
|
|
2139
|
-
return
|
|
1595
|
+
return self._size
|
|
2140
1596
|
|
|
2141
1597
|
def permute(self, *axis):
|
|
2142
1598
|
"""
|
|
1599
|
+
Tensor.permute supports unpacking the `axis` argument automatically when it is passed as an indefinite number of
|
|
1600
|
+
positional arguments, which has a slight difference from the input parameter of :func:`mindspore.ops.permute`.
|
|
2143
1601
|
For details, please refer to :func:`mindspore.ops.permute`.
|
|
2144
1602
|
"""
|
|
2145
1603
|
perm = validator.check_transpose_axis(axis, self.ndim)
|
|
@@ -2151,19 +1609,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2151
1609
|
"""
|
|
2152
1610
|
return tensor_operator_registry.get("positive")(self)
|
|
2153
1611
|
|
|
2154
|
-
def remainder(self, divisor):
|
|
2155
|
-
r"""
|
|
2156
|
-
For details, please refer to :func:`mindspore.ops.remainder`.
|
|
2157
|
-
"""
|
|
2158
|
-
return tensor_operator_registry.get('remainder')(self, divisor)
|
|
2159
|
-
|
|
2160
|
-
@flatten_mint
|
|
2161
|
-
def flatten(self, order='C', *, start_dim=0, end_dim=-1):
|
|
2162
|
-
r"""
|
|
2163
|
-
For details, please refer to :func:`mindspore.ops.flatten`.
|
|
2164
|
-
"""
|
|
2165
|
-
return tensor_operator_registry.get('flatten')(self, order, start_dim=start_dim, end_dim=end_dim)
|
|
2166
|
-
|
|
2167
1612
|
def float_power(self, other):
|
|
2168
1613
|
r"""
|
|
2169
1614
|
For details, please refer to :func:`mindspore.ops.float_power`.
|
|
@@ -2178,22 +1623,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2178
1623
|
|
|
2179
1624
|
def fmin(self, other):
|
|
2180
1625
|
r"""
|
|
2181
|
-
|
|
1626
|
+
This interface is deprecated from version 2.4 and will be removed in a future version.
|
|
2182
1627
|
"""
|
|
2183
1628
|
return tensor_operator_registry.get('fmin')(self, other)
|
|
2184
1629
|
|
|
2185
|
-
def fmod(self, other):
|
|
2186
|
-
r"""
|
|
2187
|
-
For details, please refer to :func:`mindspore.ops.fmod`.
|
|
2188
|
-
"""
|
|
2189
|
-
return tensor_operator_registry.get('fmod')(self, other)
|
|
2190
|
-
|
|
2191
|
-
def narrow(self, axis, start, length):
|
|
2192
|
-
"""
|
|
2193
|
-
For details, please refer to :func:`mindspore.ops.narrow`.
|
|
2194
|
-
"""
|
|
2195
|
-
return tensor_operator_registry.get('narrow')(self, axis, start, length)
|
|
2196
|
-
|
|
2197
1630
|
def swapaxes(self, axis0, axis1):
|
|
2198
1631
|
"""
|
|
2199
1632
|
For details, please refer to :func:`mindspore.ops.swapaxes`.
|
|
@@ -2218,20 +1651,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2218
1651
|
"""
|
|
2219
1652
|
return tensor_operator_registry.get('slogdet')(self)
|
|
2220
1653
|
|
|
2221
|
-
def tril(self, diagonal=0):
|
|
2222
|
-
"""
|
|
2223
|
-
For details, please refer to :func:`mindspore.ops.tril`.
|
|
2224
|
-
"""
|
|
2225
|
-
return tensor_operator_registry.get('tril')(self, diagonal)
|
|
2226
|
-
|
|
2227
|
-
def unsqueeze(self, dim):
|
|
2228
|
-
"""
|
|
2229
|
-
For details, please refer to :func:`mindspore.ops.unsqueeze`.
|
|
2230
|
-
"""
|
|
2231
|
-
validator.check_is_int(dim, 'dim')
|
|
2232
|
-
validator.check_int_range(dim, -self.ndim - 1, self.ndim + 1, validator.INC_LEFT, 'dim')
|
|
2233
|
-
return tensor_operator_registry.get('unsqueeze')(self, dim)
|
|
2234
|
-
|
|
2235
1654
|
def expand_dims(self, axis):
|
|
2236
1655
|
"""
|
|
2237
1656
|
For details, please refer to :func:`mindspore.ops.expand_dims`.
|
|
@@ -2271,66 +1690,53 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2271
1690
|
dtype = _check_astype_and_convert(dtype)
|
|
2272
1691
|
if not copy and dtype == self.dtype:
|
|
2273
1692
|
return self
|
|
2274
|
-
return
|
|
2275
|
-
|
|
2276
|
-
def argmax(self, axis=None, keepdims=False):
|
|
2277
|
-
"""
|
|
2278
|
-
For details, please refer to :func:`mindspore.ops.argmax`.
|
|
2279
|
-
"""
|
|
2280
|
-
out = tensor_operator_registry.get('argmax')(self, axis, keepdims)
|
|
2281
|
-
return out
|
|
2282
|
-
|
|
2283
|
-
def argmin(self, axis=None, keepdims=False):
|
|
2284
|
-
"""
|
|
2285
|
-
For details, please refer to :func:`mindspore.ops.argmin`.
|
|
2286
|
-
"""
|
|
2287
|
-
out = tensor_operator_registry.get('argmin')(self, axis, keepdims)
|
|
2288
|
-
return out
|
|
1693
|
+
return self.to(dtype)
|
|
2289
1694
|
|
|
2290
1695
|
def argmax_with_value(self, axis=0, keep_dims=False):
|
|
2291
1696
|
"""
|
|
2292
|
-
|
|
2293
|
-
|
|
2294
|
-
Compute the max value of input Tensor on the specified axis, and return the max value and index.
|
|
2295
|
-
|
|
2296
|
-
Note:
|
|
2297
|
-
- In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
|
|
2298
|
-
- If there are multiple maximum values, the index of the first maximum value is used.
|
|
2299
|
-
- The value range of `axis` is [-dims, dims - 1]. `dims` is the dimension length of this tensor.
|
|
1697
|
+
Return the maximum values and their indices along the given axis of the tensor.
|
|
2300
1698
|
|
|
2301
1699
|
Args:
|
|
2302
|
-
axis (int):
|
|
2303
|
-
|
|
2304
|
-
|
|
1700
|
+
axis (Union[int, None], optional): Specify the axis for computation. If ``None`` , compute all elements in
|
|
1701
|
+
the tensor. Default ``0`` .
|
|
1702
|
+
keep_dims (bool, optional): Whether the output tensor has dim retained. Default ``False`` .
|
|
2305
1703
|
|
|
2306
1704
|
Returns:
|
|
2307
|
-
|
|
2308
|
-
tensor.
|
|
2309
|
-
|
|
2310
|
-
- **index** (Tensor) - The index for the maximum value of the input tensor.
|
|
2311
|
-
If `keep_dims` is ``true`` , the shape of
|
|
2312
|
-
output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`. Otherwise, the shape is
|
|
2313
|
-
:math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)` .
|
|
2314
|
-
- **value** (Tensor) - The maximum value of input tensor, with the same shape as index.
|
|
2315
|
-
|
|
2316
|
-
Raises:
|
|
2317
|
-
TypeError: If `keep_dims` is not a bool.
|
|
2318
|
-
TypeError: If `axis` is not an int.
|
|
1705
|
+
Tuple(max, max_indices) of 2 tensors.
|
|
2319
1706
|
|
|
2320
1707
|
Supported Platforms:
|
|
2321
1708
|
``Ascend`` ``GPU`` ``CPU``
|
|
2322
1709
|
|
|
2323
1710
|
Examples:
|
|
2324
|
-
>>> import numpy as np
|
|
2325
1711
|
>>> import mindspore
|
|
2326
|
-
>>>
|
|
2327
|
-
|
|
2328
|
-
|
|
2329
|
-
>>>
|
|
2330
|
-
|
|
2331
|
-
|
|
2332
|
-
|
|
2333
|
-
|
|
1712
|
+
>>> x = mindspore.tensor([[9, 3, 4, 5],
|
|
1713
|
+
... [5, 2, 7, 4],
|
|
1714
|
+
... [8, 1, 3, 6]])
|
|
1715
|
+
>>> # case 1: By default, compute the maximum along axis 0.
|
|
1716
|
+
>>> x.argmax_with_value()
|
|
1717
|
+
(Tensor(shape=[4], dtype=Int64, value= [9, 3, 7, 6]),
|
|
1718
|
+
Tensor(shape=[4], dtype=Int64, value= [0, 0, 1, 2]))
|
|
1719
|
+
>>>
|
|
1720
|
+
>>> # case 2: Compute the maximum along axis 1.
|
|
1721
|
+
>>> x.argmax_with_value(axis=1)
|
|
1722
|
+
(Tensor(shape=[3], dtype=Int64, value= [9, 7, 8]),
|
|
1723
|
+
Tensor(shape=[3], dtype=Int64, value= [0, 2, 0]))
|
|
1724
|
+
>>>
|
|
1725
|
+
>>> # case 3: If keep_dims=True, the output shape will be same of that of the input.
|
|
1726
|
+
>>> x.argmax_with_value(axis=1, keep_dims=True)
|
|
1727
|
+
(Tensor(shape=[3, 1], dtype=Int64, value=
|
|
1728
|
+
[[9],
|
|
1729
|
+
[7],
|
|
1730
|
+
[8]]),
|
|
1731
|
+
Tensor(shape=[3, 1], dtype=Int64, value=
|
|
1732
|
+
[[0],
|
|
1733
|
+
[2],
|
|
1734
|
+
[0]]))
|
|
1735
|
+
>>>
|
|
1736
|
+
>>> # case 4: If axis=None, compute the maximum of all elements.
|
|
1737
|
+
>>> x.argmax_with_value(axis=None, keep_dims=True)
|
|
1738
|
+
(Tensor(shape=[], dtype=Int64, value= 9),
|
|
1739
|
+
Tensor(shape=[], dtype=Int64, value= 0))
|
|
2334
1740
|
"""
|
|
2335
1741
|
if self.shape == ():
|
|
2336
1742
|
return (self, Tensor(0))
|
|
@@ -2338,68 +1744,54 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2338
1744
|
|
|
2339
1745
|
def argmin_with_value(self, axis=0, keep_dims=False):
|
|
2340
1746
|
"""
|
|
2341
|
-
|
|
2342
|
-
|
|
2343
|
-
Note:
|
|
2344
|
-
- In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
|
|
2345
|
-
- If there are multiple minimum values, the index of the first minimum value is used.
|
|
2346
|
-
- The value range of `axis` is [-dims, dims - 1]. `dims` is the dimension length of this tensor.
|
|
1747
|
+
Return the minimum values and their indices along the given axis of the tensor.
|
|
2347
1748
|
|
|
2348
1749
|
Args:
|
|
2349
|
-
axis (int):
|
|
2350
|
-
|
|
2351
|
-
|
|
1750
|
+
axis (Union[int, None], optional): Specify the axis for computation. If ``None`` , compute all elements in
|
|
1751
|
+
the tensor. Default ``0`` .
|
|
1752
|
+
keep_dims (bool, optional): Whether the output tensor has dim retained. Default ``False`` .
|
|
2352
1753
|
|
|
2353
1754
|
Returns:
|
|
2354
|
-
|
|
2355
|
-
tensor.
|
|
2356
|
-
|
|
2357
|
-
- **index** (Tensor) - The index for the minimum value of the input tensor.
|
|
2358
|
-
If `keep_dims` is true, the shape of
|
|
2359
|
-
output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`. Otherwise, the shape is
|
|
2360
|
-
:math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)` .
|
|
2361
|
-
- **value** (Tensor) - The minimum value of input tensor, with the same shape as index.
|
|
2362
|
-
|
|
2363
|
-
Raises:
|
|
2364
|
-
TypeError: If `keep_dims` is not a bool.
|
|
2365
|
-
TypeError: If `axis` is not an int.
|
|
1755
|
+
Tuple(min, min_indices) of 2 tensors.
|
|
2366
1756
|
|
|
2367
1757
|
Supported Platforms:
|
|
2368
1758
|
``Ascend`` ``GPU`` ``CPU``
|
|
2369
1759
|
|
|
2370
1760
|
Examples:
|
|
2371
|
-
>>> import numpy as np
|
|
2372
1761
|
>>> import mindspore
|
|
2373
|
-
>>>
|
|
2374
|
-
|
|
2375
|
-
|
|
2376
|
-
>>>
|
|
2377
|
-
|
|
2378
|
-
|
|
2379
|
-
|
|
2380
|
-
|
|
1762
|
+
>>> x = mindspore.tensor([[2, 5, 1, 6],
|
|
1763
|
+
... [3, -7, -2, 4],
|
|
1764
|
+
... [8, -4, 1, -3]])
|
|
1765
|
+
>>> # case 1: By default, compute the minimum along axis 0.
|
|
1766
|
+
>>> x.argmin_with_value()
|
|
1767
|
+
(Tensor(shape=[4], dtype=Int64, value= [ 2, -7, -2, -3]),
|
|
1768
|
+
Tensor(shape=[4], dtype=Int64, value= [0, 1, 1, 2]))
|
|
1769
|
+
>>>
|
|
1770
|
+
>>> # case 2: Compute the minimum along axis 1.
|
|
1771
|
+
>>> x.argmin_with_value(axis=1)
|
|
1772
|
+
(Tensor(shape=[3], dtype=Int64, value= [ 1, -7, -4]),
|
|
1773
|
+
Tensor(shape=[3], dtype=Int64, value= [2, 1, 1]))
|
|
1774
|
+
>>>
|
|
1775
|
+
>>> # case 3: If keep_dims=True, the output shape will be same of that of the input.
|
|
1776
|
+
>>> x.argmin_with_value(axis=1, keep_dims=True)
|
|
1777
|
+
(Tensor(shape=[3, 1], dtype=Int64, value=
|
|
1778
|
+
[[ 1],
|
|
1779
|
+
[-7],
|
|
1780
|
+
[-4]]),
|
|
1781
|
+
Tensor(shape=[3, 1], dtype=Int64, value=
|
|
1782
|
+
[[2],
|
|
1783
|
+
[1],
|
|
1784
|
+
[1]]))
|
|
1785
|
+
>>>
|
|
1786
|
+
>>> # case 4: If axis=None, compute the minimum of all elements.
|
|
1787
|
+
>>> x.argmin_with_value(axis=None, keep_dims=True)
|
|
1788
|
+
(Tensor(shape=[], dtype=Int64, value= -7),
|
|
1789
|
+
Tensor(shape=[], dtype=Int64, value= 0))
|
|
2381
1790
|
"""
|
|
2382
1791
|
if self.shape == ():
|
|
2383
1792
|
return (self, Tensor(0))
|
|
2384
1793
|
return tensor_operator_registry.get('argmin_with_value')(self, axis, keep_dims)
|
|
2385
1794
|
|
|
2386
|
-
def cumsum(self, axis=None, dtype=None):
|
|
2387
|
-
"""
|
|
2388
|
-
For details, please refer to :func:`mindspore.ops.cumsum`.
|
|
2389
|
-
"""
|
|
2390
|
-
x = self
|
|
2391
|
-
original_dtype = x.dtype
|
|
2392
|
-
# If original tensor is int, and has precision less then int32, convert to int32
|
|
2393
|
-
if x.dtype in (mstype.bool_, mstype.int8, mstype.int16, mstype.uint8, mstype.int16):
|
|
2394
|
-
x = x.astype(mstype.int32)
|
|
2395
|
-
if axis is None:
|
|
2396
|
-
x = x.ravel()
|
|
2397
|
-
axis = 0
|
|
2398
|
-
validator.check_axis_in_range(axis, x.ndim)
|
|
2399
|
-
if dtype is not None and original_dtype != dtype:
|
|
2400
|
-
return tensor_operator_registry.get('cumsum')()(x, axis).astype(dtype, copy=False)
|
|
2401
|
-
return tensor_operator_registry.get('cumsum')()(x, axis)
|
|
2402
|
-
|
|
2403
1795
|
def cummin(self, axis):
|
|
2404
1796
|
r"""
|
|
2405
1797
|
For details, please refer to :func:`mindspore.ops.cummin`.
|
|
@@ -2418,12 +1810,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2418
1810
|
"""
|
|
2419
1811
|
return tensor_operator_registry.get('index_fill')(self, axis, index, value)
|
|
2420
1812
|
|
|
2421
|
-
def index_select(self, axis, index):
|
|
2422
|
-
"""
|
|
2423
|
-
For details, please refer to :func:`mindspore.ops.index_select`.
|
|
2424
|
-
"""
|
|
2425
|
-
return tensor_operator_registry.get('index_select')(self, axis, index)
|
|
2426
|
-
|
|
2427
1813
|
def inplace_update(self, v, indices):
|
|
2428
1814
|
"""
|
|
2429
1815
|
For details, please refer to :func:`mindspore.ops.inplace_update`.
|
|
@@ -2466,234 +1852,81 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2466
1852
|
x = x.astype(origin_dtype)
|
|
2467
1853
|
return x
|
|
2468
1854
|
|
|
2469
|
-
def
|
|
1855
|
+
def scatter_add_(self, dim, index, src):
|
|
2470
1856
|
"""
|
|
2471
|
-
|
|
1857
|
+
Add all elements in `src` to the index specified by `index` to `self` along dimension specified by `dim`,
|
|
1858
|
+
`scatter_add` is an in-place operation.
|
|
1859
|
+
The ranks of `self`, `index` and `src` must be greater or equal to 1.
|
|
2472
1860
|
|
|
2473
|
-
|
|
2474
|
-
This is an experimental API that is subject to change or deletion.
|
|
2475
|
-
The `src` tensor must be broadcastable with the `self` tensor. It may be of a different data type.
|
|
1861
|
+
For a 3-D tensor, the operation updates `self` as follows:
|
|
2476
1862
|
|
|
2477
|
-
|
|
2478
|
-
src (Tensor): the source tensor to copy from.
|
|
2479
|
-
non_blocking (bool): no effect currently.
|
|
1863
|
+
.. code-block::
|
|
2480
1864
|
|
|
2481
|
-
|
|
2482
|
-
Return self Tensor.
|
|
1865
|
+
self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
|
|
2483
1866
|
|
|
2484
|
-
|
|
2485
|
-
``Ascend``
|
|
1867
|
+
self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
|
|
2486
1868
|
|
|
2487
|
-
|
|
2488
|
-
>>> import numpy as np
|
|
2489
|
-
>>> from mindspore import Tensor
|
|
2490
|
-
>>> a = Tensor(np.ones((3,3)).astype("float32"))
|
|
2491
|
-
>>> b = Tensor(np.zeros((3,3)).astype("float32"))
|
|
2492
|
-
>>> a.copy_(b)
|
|
2493
|
-
>>> print(a)
|
|
2494
|
-
[[0. 0. 0.]
|
|
2495
|
-
[0. 0. 0.]
|
|
2496
|
-
[0. 0. 0.]]
|
|
2497
|
-
"""
|
|
2498
|
-
return tensor_operator_registry.get("copy_")(self, src)
|
|
2499
|
-
|
|
2500
|
-
@max_mint
|
|
2501
|
-
def max(self, axis=None, keepdims=False, *, initial=None, where=True, return_indices=False):
|
|
2502
|
-
"""
|
|
2503
|
-
Return the maximum of a tensor or maximum along an axis.
|
|
2504
|
-
|
|
2505
|
-
Note:
|
|
2506
|
-
When `axis` is ``None``, `keepdims` and subsequent parameters
|
|
2507
|
-
have no effect. At the same time, the index is fixed to return 0.
|
|
1869
|
+
self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
|
|
2508
1870
|
|
|
2509
1871
|
Args:
|
|
2510
|
-
|
|
2511
|
-
|
|
2512
|
-
|
|
2513
|
-
|
|
2514
|
-
|
|
2515
|
-
|
|
2516
|
-
|
|
2517
|
-
broadcast correctly against the input array. Default: ``False`` .
|
|
2518
|
-
|
|
2519
|
-
Keyword Args:
|
|
2520
|
-
initial (scalar, optional):
|
|
2521
|
-
The minimum value of an output element. Must be present to allow
|
|
2522
|
-
computation on empty slice. Default: ``None`` .
|
|
2523
|
-
where (bool Tensor, optional):
|
|
2524
|
-
A boolean tensor which is broadcasted to match the dimensions of array,
|
|
2525
|
-
and selects elements to include in the reduction. If non-default value
|
|
2526
|
-
is passed, initial must also be provided. Default: ``True`` .
|
|
2527
|
-
return_indices (bool, optional): Whether to return the index of the maximum value.
|
|
2528
|
-
Default: ``False`` . If `axis` is a list or tuple of ints, it must be ``False`` .
|
|
2529
|
-
|
|
2530
|
-
Returns:
|
|
2531
|
-
Tensor or scalar, maximum of input tensor. If `axis` is ``None`` , the result is a scalar
|
|
2532
|
-
value. If `axis` is given, the result is a tensor of dimension ``self.ndim - 1``.
|
|
2533
|
-
|
|
2534
|
-
Raises:
|
|
2535
|
-
TypeError: If arguments have types not specified above.
|
|
2536
|
-
|
|
2537
|
-
See also:
|
|
2538
|
-
- :func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
|
|
2539
|
-
- :func:`mindspore.Tensor.argmax`: Return the indices of the maximum values along an axis.
|
|
2540
|
-
- :func:`mindspore.Tensor.min`: Return the minimum of a tensor or minimum along an axis.
|
|
2541
|
-
|
|
2542
|
-
Supported Platforms:
|
|
2543
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2544
|
-
|
|
2545
|
-
Examples:
|
|
2546
|
-
>>> import numpy as np
|
|
2547
|
-
>>> from mindspore import Tensor
|
|
2548
|
-
>>> a = Tensor(np.arange(4).reshape((2, 2)).astype('float32'))
|
|
2549
|
-
>>> output = a.max()
|
|
2550
|
-
>>> print(output)
|
|
2551
|
-
3.0
|
|
2552
|
-
>>> value, indices = a.max(axis=0, return_indices=True)
|
|
2553
|
-
>>> print(value)
|
|
2554
|
-
[2. 3.]
|
|
2555
|
-
>>> print(indices)
|
|
2556
|
-
[1 1]
|
|
2557
|
-
"""
|
|
2558
|
-
if isinstance(axis, (list, tuple)):
|
|
2559
|
-
reduce_ = tensor_operator_registry.get("reduce")
|
|
2560
|
-
reduce_max = tensor_operator_registry.get("reduce_max")
|
|
2561
|
-
maximum = tensor_operator_registry.get("maximum")
|
|
2562
|
-
return reduce_(self, reduce_max(keepdims), cmp_fn=maximum, axis=axis, keepdims=keepdims,
|
|
2563
|
-
initial=initial, where=where)
|
|
2564
|
-
values, indices = tensor_operator_registry.get("max")(self, axis, keepdims, initial=initial, where=where)
|
|
2565
|
-
if not return_indices:
|
|
2566
|
-
return values
|
|
2567
|
-
return values, indices
|
|
2568
|
-
|
|
2569
|
-
@min_mint
|
|
2570
|
-
def min(self, axis=None, keepdims=False, *, initial=None, where=True, return_indices=False):
|
|
2571
|
-
"""
|
|
2572
|
-
Return the minimum of a tensor or minimum along an axis.
|
|
2573
|
-
|
|
2574
|
-
Note:
|
|
2575
|
-
When `axis` is ``None``, `keepdims` and subsequent parameters
|
|
2576
|
-
have no effect. At the same time, the index is fixed to return 0.
|
|
2577
|
-
|
|
2578
|
-
Args:
|
|
2579
|
-
axis (Union[None, int, list, tuple of ints], optional): An axis or
|
|
2580
|
-
axes along which to operate. By default, flattened input is used. If
|
|
2581
|
-
`axis` is a tuple of ints, the minimum is selected over multiple axes,
|
|
2582
|
-
instead of a single axis or all the axes as before. Default: ``None`` .
|
|
2583
|
-
keepdims (bool, optional):
|
|
2584
|
-
If ``True`` , the axes which are reduced are left in the
|
|
2585
|
-
result as dimensions with size one. With this option, the result will
|
|
2586
|
-
broadcast correctly against the input array. Default: ``False`` .
|
|
2587
|
-
|
|
2588
|
-
Keyword Args:
|
|
2589
|
-
initial (scalar, optional):
|
|
2590
|
-
The minimum value of an output element. Must be present to allow
|
|
2591
|
-
computation on empty slice. Default: ``None`` .
|
|
2592
|
-
where (Tensor[bool], optional):
|
|
2593
|
-
A boolean tensor which is broadcasted to match the dimensions of array,
|
|
2594
|
-
and selects elements to include in the reduction. If non-default value
|
|
2595
|
-
is passed, initial must also be provided. Default: ``True`` .
|
|
2596
|
-
return_indices (bool, optional): Whether to return the index of the minimum value. Default: ``False`` .
|
|
2597
|
-
If `axis` is a list or tuple of ints, it must be ``False`` .
|
|
1872
|
+
dim (int): Which dim to scatter. Accepted range is [-r, r) where r = rank(`self`).
|
|
1873
|
+
index (Tensor): The index of `self` to do scatter operation whose data type must
|
|
1874
|
+
be int32 or int64. Same rank as `self`. Except for the dimension
|
|
1875
|
+
specified by `dim`, size of each dimension of `index` must be less than or equal to the size of
|
|
1876
|
+
the corresponding dimension of `self`.
|
|
1877
|
+
src (Tensor): The tensor doing the scatter operation with `self`, has the same type as `self` and
|
|
1878
|
+
the size of each dimension must be greater than or equal to that of `index`.
|
|
2598
1879
|
|
|
2599
1880
|
Returns:
|
|
2600
|
-
Tensor
|
|
2601
|
-
value. If `axis` is given, the result is a tensor of dimension ``self.ndim - 1``.
|
|
1881
|
+
Tensor, has the same shape and type as `self`.
|
|
2602
1882
|
|
|
2603
1883
|
Raises:
|
|
2604
|
-
TypeError: If
|
|
2605
|
-
|
|
2606
|
-
|
|
2607
|
-
|
|
2608
|
-
|
|
2609
|
-
|
|
1884
|
+
TypeError: If `index` is neither int32 nor int64.
|
|
1885
|
+
ValueError: If anyone of the rank among `self`, `index` and `src` is less than 1.
|
|
1886
|
+
ValueError: If the ranks of `self`, `index` and `src` are not the same.
|
|
1887
|
+
ValueError: The size of any dimension of `index` except the dimension specified by `dim` is
|
|
1888
|
+
greater than the size of the corresponding dimension of `self`.
|
|
1889
|
+
ValueError: If the size of any dimension of `src` is less than that of `index`.
|
|
2610
1890
|
|
|
2611
1891
|
Supported Platforms:
|
|
2612
|
-
``Ascend``
|
|
1892
|
+
``Ascend``
|
|
2613
1893
|
|
|
2614
1894
|
Examples:
|
|
2615
1895
|
>>> import numpy as np
|
|
1896
|
+
>>> import mindspore as ms
|
|
2616
1897
|
>>> from mindspore import Tensor
|
|
2617
|
-
>>>
|
|
2618
|
-
>>>
|
|
2619
|
-
>>>
|
|
2620
|
-
|
|
2621
|
-
>>>
|
|
2622
|
-
|
|
2623
|
-
|
|
2624
|
-
>>>
|
|
2625
|
-
>>>
|
|
2626
|
-
|
|
2627
|
-
>>>
|
|
2628
|
-
|
|
2629
|
-
[
|
|
2630
|
-
|
|
2631
|
-
|
|
2632
|
-
[0.
|
|
2633
|
-
>>>
|
|
2634
|
-
[
|
|
2635
|
-
|
|
2636
|
-
|
|
2637
|
-
|
|
2638
|
-
|
|
2639
|
-
|
|
2640
|
-
|
|
2641
|
-
|
|
2642
|
-
|
|
2643
|
-
if not return_indices:
|
|
2644
|
-
return values
|
|
2645
|
-
return values, indices
|
|
2646
|
-
|
|
2647
|
-
def scatter_add(self, indices, updates):
|
|
2648
|
-
"""
|
|
2649
|
-
For details, please refer to :func:`mindspore.ops.scatter_add`.
|
|
1898
|
+
>>> input = Tensor(np.array([[1, 2, 3, 4, 5]]), dtype=ms.float32)
|
|
1899
|
+
>>> src = Tensor(np.array([[8, 8]]), dtype=ms.float32)
|
|
1900
|
+
>>> index = Tensor(np.array([[2, 4]]), dtype=ms.int64)
|
|
1901
|
+
>>> out = input.scatter_add_(1, index, src)
|
|
1902
|
+
>>> print(out)
|
|
1903
|
+
[[1. 2. 11. 4. 13.]]
|
|
1904
|
+
>>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
|
|
1905
|
+
>>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
|
|
1906
|
+
>>> index = Tensor(np.array([[0, 0, 0], [2, 2, 2], [4, 4, 4]]), dtype=ms.int64)
|
|
1907
|
+
>>> out = input.scatter_add_(0, index, src)
|
|
1908
|
+
>>> print(out)
|
|
1909
|
+
[[1. 2. 3. 0. 0.]
|
|
1910
|
+
[0. 0. 0. 0. 0.]
|
|
1911
|
+
[4. 5. 6. 0. 0.]
|
|
1912
|
+
[0. 0. 0. 0. 0.]
|
|
1913
|
+
[7. 8. 9. 0. 0.]]
|
|
1914
|
+
>>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
|
|
1915
|
+
>>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
|
|
1916
|
+
>>> index = Tensor(np.array([[0, 2, 4], [0, 2, 4], [0, 2, 4]]), dtype=ms.int64)
|
|
1917
|
+
>>> out = input.scatter_add_(1, index, src)
|
|
1918
|
+
>>> print(out)
|
|
1919
|
+
[[1. 0. 2. 0. 3.]
|
|
1920
|
+
[4. 0. 5. 0. 6.]
|
|
1921
|
+
[7. 0. 8. 0. 9.]
|
|
1922
|
+
[0. 0. 0. 0. 0.]
|
|
1923
|
+
[0. 0. 0. 0. 0.]]
|
|
2650
1924
|
"""
|
|
2651
|
-
return tensor_operator_registry.get("
|
|
1925
|
+
return tensor_operator_registry.get("inplace_scatter_add")(self, dim, index, src)
|
|
2652
1926
|
|
|
2653
1927
|
def scatter_sub(self, indices, updates):
|
|
2654
1928
|
"""
|
|
2655
|
-
|
|
2656
|
-
`indices`, with values from `updates`. When multiple values are provided for the same
|
|
2657
|
-
index, the result of the update will be to subtract these values respectively. This operation is almost
|
|
2658
|
-
equivalent to using :class:`mindspore.ops.ScatterNdSub` , except that the updates are applied on output `Tensor`
|
|
2659
|
-
instead of input `Parameter`.
|
|
2660
|
-
|
|
2661
|
-
The last axis of `indices` is the depth of each index vectors. For each index vector,
|
|
2662
|
-
there must be a corresponding value in `updates`. The shape of `updates` should be
|
|
2663
|
-
equal to the shape of `self[indices]`. For more details, see Examples.
|
|
2664
|
-
|
|
2665
|
-
Note:
|
|
2666
|
-
On GPU, if some values of the `indices` are out of bound, instead of raising an index error,
|
|
2667
|
-
the corresponding `updates` will not be updated to self tensor. On CPU, if some values of
|
|
2668
|
-
the `indices` are out of bound, raising an index error. On Ascend, out of bound checking is
|
|
2669
|
-
not supported, if some values of the `indices` are out of bound, unknown errors may be caused.
|
|
2670
|
-
|
|
2671
|
-
Args:
|
|
2672
|
-
indices (Tensor): The index of input tensor whose data type is int32 or int64.
|
|
2673
|
-
The rank must be at least 2.
|
|
2674
|
-
updates (Tensor): The tensor to update the input tensor, has the same type as input,
|
|
2675
|
-
and updates.shape should be equal to indices.shape[:-1] + self.shape[indices.shape[-1]:].
|
|
2676
|
-
|
|
2677
|
-
Returns:
|
|
2678
|
-
Tensor, has the same shape and type as self tensor.
|
|
2679
|
-
|
|
2680
|
-
Raises:
|
|
2681
|
-
TypeError: If dtype of `indices` is neither int32 nor int64.
|
|
2682
|
-
ValueError: If length of shape of self tensor is less than the last dimension of shape of `indices`.
|
|
2683
|
-
|
|
2684
|
-
Supported Platforms:
|
|
2685
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2686
|
-
|
|
2687
|
-
Examples:
|
|
2688
|
-
>>> import numpy as np
|
|
2689
|
-
>>> from mindspore import Tensor
|
|
2690
|
-
>>> x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]).astype('float32'))
|
|
2691
|
-
>>> indices = Tensor(np.array([[0, 0], [0, 0]]).astype('int32'))
|
|
2692
|
-
>>> updates = Tensor(np.array([1.0, 2.2]).astype('float32'))
|
|
2693
|
-
>>> output = x.scatter_sub(indices, updates)
|
|
2694
|
-
>>> print(output)
|
|
2695
|
-
[[-3.3000002 0.3 3.6 ]
|
|
2696
|
-
[ 0.4 0.5 -3.2 ]]
|
|
1929
|
+
For details, please refer to :func:`mindspore.ops.tensor_scatter_sub`.
|
|
2697
1930
|
"""
|
|
2698
1931
|
return tensor_operator_registry.get('tensor_scatter_sub')(self, indices, updates)
|
|
2699
1932
|
|
|
@@ -2774,18 +2007,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2774
2007
|
"""
|
|
2775
2008
|
return tensor_operator_registry.get('fill_diagonal')(fill_value, wrap)(self)
|
|
2776
2009
|
|
|
2777
|
-
def masked_fill(self, mask, value):
|
|
2778
|
-
"""
|
|
2779
|
-
For details, please refer to :func:`mindspore.ops.masked_fill`.
|
|
2780
|
-
"""
|
|
2781
|
-
if isinstance(value, (float, int)):
|
|
2782
|
-
value = tensor_operator_registry.get("scalar_to_tensor")(value, self.dtype)
|
|
2783
|
-
if not isinstance(mask, Tensor):
|
|
2784
|
-
raise TypeError("For 'Tensor.masked_fill', the type of the argument 'mask' must be Tensor, but "
|
|
2785
|
-
"got {}.".format(type(mask)))
|
|
2786
|
-
validator.check_type_name('mask', mask.dtype, [mstype.bool_], "Tensor")
|
|
2787
|
-
return tensor_operator_registry.get("masked_fill")(self, mask, value)
|
|
2788
|
-
|
|
2789
2010
|
def ptp(self, axis=None, keepdims=False):
|
|
2790
2011
|
"""
|
|
2791
2012
|
The name of the function comes from the acronym for "peak to peak". Calculate the difference between the
|
|
@@ -2827,25 +2048,16 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2827
2048
|
validator.check_axis_type(axis, True, True, False)
|
|
2828
2049
|
axis = validator.check_axis_valid(axis, self.ndim)
|
|
2829
2050
|
|
|
2830
|
-
return self.max(axis, keepdims) - self.min(axis, keepdims)
|
|
2831
|
-
|
|
2832
|
-
def minimum(self, other):
|
|
2833
|
-
r"""
|
|
2834
|
-
For details, please refer to :func:`mindspore.ops.minimum`.
|
|
2835
|
-
"""
|
|
2836
|
-
return tensor_operator_registry.get('minimum')(self, other)
|
|
2837
|
-
|
|
2838
|
-
def clamp(self, min=None, max=None):
|
|
2839
|
-
r"""
|
|
2840
|
-
For details, please refer to :func:`mindspore.ops.clamp`.
|
|
2841
|
-
"""
|
|
2842
|
-
return tensor_operator_registry.get('clamp')(self, min, max)
|
|
2051
|
+
return self.max(axis, keepdims) - self.min(axis, keepdims)
|
|
2843
2052
|
|
|
2844
|
-
def
|
|
2053
|
+
def clamp_(self, min=None, max=None):
|
|
2845
2054
|
r"""
|
|
2846
|
-
|
|
2055
|
+
In-place version of :func:`mindspore.Tensor.clamp`.
|
|
2056
|
+
|
|
2057
|
+
.. warning::
|
|
2058
|
+
This is an experimental API that is subject to change or deletion.
|
|
2847
2059
|
"""
|
|
2848
|
-
return
|
|
2060
|
+
return tensor_operator_registry.get('clamp_')(self, min, max)
|
|
2849
2061
|
|
|
2850
2062
|
def init_data(self, slice_index=None, shape=None, opt_shard_group=None):
|
|
2851
2063
|
"""
|
|
@@ -2863,7 +2075,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2863
2075
|
opt_shard_group(str): Optimizer shard group which is used in auto or semi auto parallel mode
|
|
2864
2076
|
to get one shard of a parameter's slice. For more information about optimizer parallel, please refer to:
|
|
2865
2077
|
`Optimizer Parallel
|
|
2866
|
-
<https://www.mindspore.cn/
|
|
2078
|
+
<https://www.mindspore.cn/tutorials/en/master/parallel/optimizer_parallel.html>`_.
|
|
2867
2079
|
Default: ``None``.
|
|
2868
2080
|
|
|
2869
2081
|
Returns:
|
|
@@ -2944,9 +2156,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2944
2156
|
|
|
2945
2157
|
# At embedding cache scenes. When size of tensor is out of range, we store data to persistent storage
|
|
2946
2158
|
if slice_num_of_persistent_data > 1:
|
|
2947
|
-
self.assign_value(
|
|
2159
|
+
self.assign_value(TensorPy_.persistent_data_from_numpy(data, slice_num_of_persistent_data))
|
|
2948
2160
|
else:
|
|
2949
|
-
self.assign_value(
|
|
2161
|
+
self.assign_value(TensorPy_.from_numpy(data))
|
|
2950
2162
|
return self
|
|
2951
2163
|
|
|
2952
2164
|
def resize(self, *new_shape):
|
|
@@ -3007,7 +2219,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3007
2219
|
|
|
3008
2220
|
def det(self):
|
|
3009
2221
|
r"""
|
|
3010
|
-
|
|
2222
|
+
This interface is deprecated from version 2.4 and will be removed in a future version.
|
|
3011
2223
|
"""
|
|
3012
2224
|
return tensor_operator_registry.get('det')(self)
|
|
3013
2225
|
|
|
@@ -3017,12 +2229,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3017
2229
|
"""
|
|
3018
2230
|
return tensor_operator_registry.get('diff')(self, n, axis, prepend, append)
|
|
3019
2231
|
|
|
3020
|
-
def frac(self):
|
|
3021
|
-
r"""
|
|
3022
|
-
For details, please refer to :func:`mindspore.ops.frac`.
|
|
3023
|
-
"""
|
|
3024
|
-
return tensor_operator_registry.get('frac')(self)
|
|
3025
|
-
|
|
3026
2232
|
def argwhere(self):
|
|
3027
2233
|
r"""
|
|
3028
2234
|
For details, please refer to :func:`mindspore.ops.argwhere`.
|
|
@@ -3049,13 +2255,15 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3049
2255
|
|
|
3050
2256
|
def lgamma(self):
|
|
3051
2257
|
r"""
|
|
3052
|
-
|
|
2258
|
+
This interface is deprecated from version 2.4 and will be removed in a future version.
|
|
3053
2259
|
"""
|
|
3054
2260
|
return tensor_operator_registry.get('lgamma')(self)
|
|
3055
2261
|
|
|
3056
2262
|
def diagonal(self, offset=0, axis1=0, axis2=1):
|
|
3057
2263
|
"""
|
|
3058
2264
|
For details, please refer to :func:`mindspore.ops.diagonal`.
|
|
2265
|
+
The parameter `axis1` of the current interface is the same as the parameter `dim1` of the reference interface,
|
|
2266
|
+
the parameter `axis2` of the current interface is the same as the parameter `dim2` of the reference interface.
|
|
3059
2267
|
"""
|
|
3060
2268
|
return tensor_operator_registry.get('diagonal')(self, offset, axis1, axis2)
|
|
3061
2269
|
|
|
@@ -3102,73 +2310,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3102
2310
|
"""
|
|
3103
2311
|
return tensor_operator_registry.get('tracev2')(self, offset, axis1, axis2, dtype)
|
|
3104
2312
|
|
|
3105
|
-
def take(self, indices, axis=None, mode='clip'):
|
|
3106
|
-
"""
|
|
3107
|
-
Takes elements from a tensor along an axis.
|
|
3108
|
-
|
|
3109
|
-
Args:
|
|
3110
|
-
indices (Tensor): The indices with shape :math:`(Nj...)` of the values to extract.
|
|
3111
|
-
axis (int, optional): The axis over which to select values. By default,
|
|
3112
|
-
the flattened input tensor is used. Default: ``None`` .
|
|
3113
|
-
mode (str, optional): Support ``'raise'``, ``'wrap'``, ``'clip'``.
|
|
3114
|
-
|
|
3115
|
-
- ``raise``: Raises an error;
|
|
3116
|
-
|
|
3117
|
-
- ``wrap``: Wraps around;
|
|
3118
|
-
|
|
3119
|
-
- ``clip``: Clips to the range. ``'clip'`` mode means that all indices that are
|
|
3120
|
-
too large are replaced by the index that addresses the last element
|
|
3121
|
-
along that axis. Note that this disables indexing with negative numbers.
|
|
3122
|
-
|
|
3123
|
-
Default: ``'clip'`` .
|
|
3124
|
-
|
|
3125
|
-
Returns:
|
|
3126
|
-
Tensor, the indexed result.
|
|
3127
|
-
|
|
3128
|
-
Raises:
|
|
3129
|
-
ValueError: If `axis` is out of range, or `mode` has values other than ('raise', 'wrap', 'clip')
|
|
3130
|
-
|
|
3131
|
-
Supported Platforms:
|
|
3132
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3133
|
-
|
|
3134
|
-
Examples:
|
|
3135
|
-
>>> import numpy as np
|
|
3136
|
-
>>> from mindspore import Tensor
|
|
3137
|
-
>>> a = Tensor(np.array([4, 3, 5, 7, 6, 8]))
|
|
3138
|
-
>>> indices = Tensor(np.array([0, 1, 4]))
|
|
3139
|
-
>>> output = a.take(indices)
|
|
3140
|
-
>>> print(output)
|
|
3141
|
-
[4 3 6]
|
|
3142
|
-
"""
|
|
3143
|
-
if mode not in ('raise', 'wrap', 'clip'):
|
|
3144
|
-
raise ValueError(f"For 'Tensor.take', the argument 'mode' should be one of in ['raise', 'wrap', 'clip'],"
|
|
3145
|
-
f" but got {mode}.")
|
|
3146
|
-
if axis is None:
|
|
3147
|
-
a = self.ravel()
|
|
3148
|
-
axis = 0
|
|
3149
|
-
else:
|
|
3150
|
-
a = self
|
|
3151
|
-
ndim = a.ndim
|
|
3152
|
-
validator.check_axis_in_range(axis, ndim)
|
|
3153
|
-
axis = axis + ndim if axis < 0 else axis
|
|
3154
|
-
|
|
3155
|
-
shape_a = a.shape
|
|
3156
|
-
shape_indices = indices.shape
|
|
3157
|
-
size_indices = indices.size
|
|
3158
|
-
indices = tensor_operator_registry.get('check_indices')(shape_a[axis], indices, mode)
|
|
3159
|
-
|
|
3160
|
-
# reshapes indices to shape (Ni..., Nj..., Nk)
|
|
3161
|
-
shape_ni = shape_a[:axis]
|
|
3162
|
-
shape_nk = shape_a[axis + 1:]
|
|
3163
|
-
shape_out = shape_ni + shape_indices + shape_nk
|
|
3164
|
-
shape_indices = tuple(size_indices if i == axis else 1 for i in range(ndim))
|
|
3165
|
-
indices = indices.reshape(shape_indices)
|
|
3166
|
-
shape_indices = shape_ni + (indices.size,) + shape_nk
|
|
3167
|
-
indices = tensor_operator_registry.get('broadcast_to')(indices, shape_indices)
|
|
3168
|
-
|
|
3169
|
-
res = tensor_operator_registry.get('gather_d')(a, axis, indices)
|
|
3170
|
-
return res.reshape(shape_out)
|
|
3171
|
-
|
|
3172
2313
|
def choose(self, choices, mode='clip'):
|
|
3173
2314
|
"""
|
|
3174
2315
|
Construct a tensor from an index tensor and a list of tensors to choose from.
|
|
@@ -3249,34 +2390,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3249
2390
|
|
|
3250
2391
|
def searchsorted(self, v, side='left', sorter=None):
|
|
3251
2392
|
"""
|
|
3252
|
-
|
|
3253
|
-
|
|
3254
|
-
Args:
|
|
3255
|
-
v (Union[int, float, bool, list, tuple, Tensor]): Values to insert into the tensor.
|
|
3256
|
-
side (str, optional): If 'left', the index of the first suitable
|
|
3257
|
-
location found is given. If 'right', return the last such index. If there is
|
|
3258
|
-
no suitable index, return either 0 or N (where N is the length of the tensor).
|
|
3259
|
-
Default: ``left`` .
|
|
3260
|
-
sorter (Union[int, list, tuple, Tensor]): optional tensor of
|
|
3261
|
-
integer indices that sort the tensor into ascending order on the innermost dimension
|
|
3262
|
-
and the type must be int64. They are typically the result of argsort. Default: ``None`` .
|
|
3263
|
-
CPU and GPU can only use default values
|
|
3264
|
-
|
|
3265
|
-
Returns:
|
|
3266
|
-
Tensor, array of insertion points with the same shape as `v`.
|
|
3267
|
-
|
|
3268
|
-
Raises:
|
|
3269
|
-
ValueError: If argument for `side` or `sorter` is invalid.
|
|
3270
|
-
|
|
3271
|
-
Supported Platforms:
|
|
3272
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3273
|
-
|
|
3274
|
-
Examples:
|
|
3275
|
-
>>> import numpy as np
|
|
3276
|
-
>>> from mindspore import Tensor
|
|
3277
|
-
>>> x = Tensor(np.array([1, 2, 3, 4, 5]))
|
|
3278
|
-
>>> print(x.searchsorted(3))
|
|
3279
|
-
2
|
|
2393
|
+
For details, please refer to :func:`mindspore.ops.searchsorted`.
|
|
3280
2394
|
"""
|
|
3281
2395
|
if side not in ('left', 'right'):
|
|
3282
2396
|
raise ValueError(f"For 'Tensor.searchsorted', the argument 'side' should be one of in "
|
|
@@ -3301,20 +2415,15 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3301
2415
|
r"""
|
|
3302
2416
|
For details, please refer to :func:`mindspore.ops.gather_nd`.
|
|
3303
2417
|
"""
|
|
3304
|
-
validator.check_value_type('indices', indices, (Tensor,
|
|
2418
|
+
validator.check_value_type('indices', indices, (Tensor, TensorPy_,), 'Tensor.gather_nd')
|
|
3305
2419
|
return tensor_operator_registry.get('gather_nd')(self, indices)
|
|
3306
2420
|
|
|
3307
|
-
def gather(self, input_indices, axis, batch_dims=0):
|
|
3308
|
-
r"""
|
|
3309
|
-
For details, please refer to :func:`mindspore.ops.gather`.
|
|
3310
|
-
"""
|
|
3311
|
-
validator.check_is_int(axis, 'axis')
|
|
3312
|
-
validator.check_is_int(batch_dims, "batch_dims")
|
|
3313
|
-
return tensor_operator_registry.get('gather')(self, input_indices, axis, batch_dims)
|
|
3314
|
-
|
|
3315
2421
|
def uniform(self, from_=0., to=1., generator=None):
|
|
3316
2422
|
r"""
|
|
3317
|
-
Generates random numbers
|
|
2423
|
+
Generates random numbers that follows a uniform distribution within the half-open interval :math:`[from\_, to)`.
|
|
2424
|
+
|
|
2425
|
+
.. math::
|
|
2426
|
+
P(x)= \frac{1}{to - from\_}
|
|
3318
2427
|
|
|
3319
2428
|
Args:
|
|
3320
2429
|
from\_ (number): The lower bound of the interval.
|
|
@@ -3341,132 +2450,85 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3341
2450
|
"""
|
|
3342
2451
|
return tensor_operator_registry.get('uniform')(self, from_, to, generator)
|
|
3343
2452
|
|
|
3344
|
-
def
|
|
3345
|
-
"""
|
|
3346
|
-
|
|
3347
|
-
|
|
3348
|
-
The variance is the average of the squared deviations from the mean, i.e.,
|
|
3349
|
-
:math:`var = mean(abs(x - x.mean())**2)`.
|
|
2453
|
+
def uniform_(self, from_=0, to=1, *, generator=None):
|
|
2454
|
+
r"""
|
|
2455
|
+
Update the `self` Tensor in place by generating random numbers sampled from uniform distribution in the
|
|
2456
|
+
half-open interval :math:`[from\_, to)`.
|
|
3350
2457
|
|
|
3351
|
-
|
|
3352
|
-
|
|
2458
|
+
.. math::
|
|
2459
|
+
P(x)= \frac{1}{to - from\_}
|
|
3353
2460
|
|
|
3354
|
-
|
|
3355
|
-
|
|
2461
|
+
.. warning::
|
|
2462
|
+
This is an experimental API that is subject to change or deletion.
|
|
3356
2463
|
|
|
3357
2464
|
Args:
|
|
3358
|
-
|
|
3359
|
-
|
|
3360
|
-
|
|
3361
|
-
|
|
3362
|
-
|
|
2465
|
+
from\_ (Union[number.Number, Tensor], optional): The lower bound of the uniform distribution, it can be a
|
|
2466
|
+
scalar value or a tensor of any dimension with a single element. Default: ``0``.
|
|
2467
|
+
to (Union[number.Number, Tensor], optional): The upper bound of the uniform distribution, it can be a
|
|
2468
|
+
scalar value or a tensor of any dimension with a single element. Default: ``1``.
|
|
2469
|
+
|
|
2470
|
+
Keyword Args:
|
|
2471
|
+
generator (:class:`mindspore.Generator`, optional): a pseudorandom number generator.
|
|
2472
|
+
Default: ``None``, uses the default pseudorandom number generator.
|
|
3363
2473
|
|
|
3364
2474
|
Returns:
|
|
3365
|
-
|
|
2475
|
+
Return `self` Tensor.
|
|
3366
2476
|
|
|
3367
|
-
|
|
3368
|
-
|
|
3369
|
-
|
|
2477
|
+
Raises:
|
|
2478
|
+
TypeError: If `from_` or `to` is neither a number nor a Tensor.
|
|
2479
|
+
TypeError: If dtype of `from` or `to` is not one of: bool, int8, int16, int32, int64, uint8, float32,
|
|
2480
|
+
float64.
|
|
2481
|
+
ValueError: If `from_` or `to` is Tensor but contains multiple elements.
|
|
2482
|
+
RuntimeError: If `from_` is larger than `to`.
|
|
3370
2483
|
|
|
3371
2484
|
Supported Platforms:
|
|
3372
|
-
``Ascend``
|
|
2485
|
+
``Ascend``
|
|
3373
2486
|
|
|
3374
2487
|
Examples:
|
|
3375
|
-
>>> import
|
|
3376
|
-
>>>
|
|
3377
|
-
>>>
|
|
3378
|
-
>>>
|
|
3379
|
-
>>>
|
|
3380
|
-
|
|
2488
|
+
>>> import mindspore
|
|
2489
|
+
>>> x = mindspore.ops.ones((4, 2))
|
|
2490
|
+
>>> generator = mindspore.Generator()
|
|
2491
|
+
>>> generator.manual_seed(100)
|
|
2492
|
+
>>> output = x.uniform_(1., 2., generator=generator)
|
|
2493
|
+
>>> print(output.shape)
|
|
2494
|
+
(4, 2)
|
|
3381
2495
|
"""
|
|
3382
|
-
|
|
3383
|
-
return Tensor(float('nan'), self.dtype)
|
|
3384
|
-
if not isinstance(ddof, int):
|
|
3385
|
-
raise TypeError("For 'Tensor.var', the type of the argument 'ddof' must be int, but got "
|
|
3386
|
-
"{}.".format(type(ddof)))
|
|
3387
|
-
if not isinstance(keepdims, bool):
|
|
3388
|
-
raise TypeError("For 'Tensor.var', the type of the argument 'keepdims' must be bool, but "
|
|
3389
|
-
"got {}.".format(type(keepdims)))
|
|
2496
|
+
return tensor_operator_registry.get('uniform_')(self, from_=from_, to=to, generator=generator)
|
|
3390
2497
|
|
|
3391
|
-
if axis is None:
|
|
3392
|
-
axis = ()
|
|
3393
|
-
else:
|
|
3394
|
-
axis = validator.check_and_canonicalize_axes(axis, self.ndim)
|
|
3395
|
-
x_mean = tensor_operator_registry.get('mean')(self, axis, True)
|
|
3396
|
-
x_sub = tensor_operator_registry.get('__sub__')(self, x_mean)
|
|
3397
|
-
x_pow = tensor_operator_registry.get('__pow__')(x_sub, 2)
|
|
3398
|
-
x_sum = tensor_operator_registry.get('reducesum')(bool(keepdims))(x_pow, axis)
|
|
3399
|
-
nums = 1
|
|
3400
|
-
if axis == ():
|
|
3401
|
-
nums = self.size
|
|
3402
|
-
else:
|
|
3403
|
-
for ax in axis:
|
|
3404
|
-
nums *= self.shape[ax]
|
|
3405
|
-
return tensor_operator_registry.get('__truediv__')(x_sum, nums - ddof)
|
|
3406
2498
|
|
|
3407
|
-
def
|
|
3408
|
-
"""
|
|
3409
|
-
|
|
3410
|
-
"""
|
|
3411
|
-
x_var = self.var(axis, ddof, keepdims)
|
|
3412
|
-
return tensor_operator_registry.get('__pow__')(x_var, 0.5)
|
|
2499
|
+
def exponential_(self, lambd=1, *, generator=None):
|
|
2500
|
+
r"""
|
|
2501
|
+
Fills `self` tensor with elements drawn from the exponential distribution:
|
|
3413
2502
|
|
|
3414
|
-
|
|
3415
|
-
|
|
3416
|
-
Return sum of tensor elements over a given axis.
|
|
2503
|
+
.. math::
|
|
2504
|
+
f(x) = \lambda \exp(-\lambda x)
|
|
3417
2505
|
|
|
3418
|
-
|
|
3419
|
-
|
|
3420
|
-
|
|
2506
|
+
.. warning::
|
|
2507
|
+
- It is only supported on Atlas A2 Training Series Products.
|
|
2508
|
+
- This is an experimental API that is subject to change or deletion.
|
|
3421
2509
|
|
|
3422
2510
|
Args:
|
|
3423
|
-
|
|
2511
|
+
lambd (float, optional): Parameters of exponential distribution. Default: ``1``.
|
|
2512
|
+
|
|
2513
|
+
Keyword Args:
|
|
2514
|
+
generator (Generator, optional): a pseudorandom number generator.
|
|
3424
2515
|
Default: ``None`` .
|
|
3425
|
-
If ``None`` , sum all the elements of the input tensor.
|
|
3426
|
-
If the `axis` is negative, it counts from the last to the first `axis`.
|
|
3427
|
-
If the `axis` is a tuple or list of ints, a sum is performed on all the axes specified in the tuple
|
|
3428
|
-
or list instead of a single `axis` or all the axes as before.
|
|
3429
|
-
dtype (:class:`mindspore.dtype`, optional): defaults to ``None`` . Overrides the dtype of the
|
|
3430
|
-
output Tensor.
|
|
3431
|
-
keepdims (bool): If this is set to ``True`` , the axes which are reduced are left in the result as
|
|
3432
|
-
dimensions with size one. With this option, the result will broadcast correctly against the input
|
|
3433
|
-
array. If the default value is passed, then `keepdims` will not be passed through to the sum method
|
|
3434
|
-
of sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
|
|
3435
|
-
implement `keepdims` any exceptions will be raised. Default: ``False`` .
|
|
3436
|
-
initial (scalar): Starting value for the sum. Default: ``None`` .
|
|
3437
2516
|
|
|
3438
2517
|
Returns:
|
|
3439
|
-
Tensor
|
|
3440
|
-
If the input tensor is a 0-d array, or if the `axis` is ``None`` , a scalar is returned.
|
|
3441
|
-
|
|
3442
|
-
Raises:
|
|
3443
|
-
TypeError: If input is not array_like, or `axis` is not int, tuple of ints, list of ints or Tensor,
|
|
3444
|
-
or `keepdims` is not integer, or `initial` is not scalar.
|
|
3445
|
-
ValueError: If any `axis` is out of range or duplicate axes exist.
|
|
3446
|
-
|
|
3447
|
-
See also:
|
|
3448
|
-
- :func:`mindspore.Tensor.cumsum`: Return the cumulative sum of the elements along a given `axis`.
|
|
2518
|
+
Tensor, with same shape and same data type with input.
|
|
3449
2519
|
|
|
3450
2520
|
Supported Platforms:
|
|
3451
|
-
``Ascend``
|
|
2521
|
+
``Ascend``
|
|
3452
2522
|
|
|
3453
2523
|
Examples:
|
|
3454
|
-
>>> import
|
|
3455
|
-
>>>
|
|
3456
|
-
>>>
|
|
3457
|
-
>>> print(
|
|
3458
|
-
|
|
3459
|
-
|
|
3460
|
-
|
|
3461
|
-
|
|
3462
|
-
"""
|
|
3463
|
-
if initial is None:
|
|
3464
|
-
res = tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype)
|
|
3465
|
-
else:
|
|
3466
|
-
res = tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype) + initial
|
|
3467
|
-
if dtype is not None and (dtype == mstype.bool_):
|
|
3468
|
-
res = res.astype(mstype.bool_)
|
|
3469
|
-
return res
|
|
2524
|
+
>>> import mindspore
|
|
2525
|
+
>>> x = mindspore.Tensor([1, 2, 3.0])
|
|
2526
|
+
>>> out = x.exponential_(2)
|
|
2527
|
+
>>> print(out.shape)
|
|
2528
|
+
(3,)
|
|
2529
|
+
"""
|
|
2530
|
+
return tensor_operator_registry.get('exponential_')(self, lambd=lambd, generator=generator)
|
|
2531
|
+
|
|
3470
2532
|
|
|
3471
2533
|
def sum_to_size(self, *size):
|
|
3472
2534
|
r"""
|
|
@@ -3512,12 +2574,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3512
2574
|
return x.sum(tuple(axes), keepdims=True)
|
|
3513
2575
|
return x
|
|
3514
2576
|
|
|
3515
|
-
def nansum(self, axis=None, keepdims=False, dtype=None):
|
|
3516
|
-
"""
|
|
3517
|
-
For details, please refer to :func:`mindspore.ops.nansum`.
|
|
3518
|
-
"""
|
|
3519
|
-
return tensor_operator_registry.get('nansum')(self, axis=axis, keepdims=keepdims, dtype=dtype)
|
|
3520
|
-
|
|
3521
2577
|
def nanmean(self, axis=None, keepdims=False, *, dtype=None):
|
|
3522
2578
|
r"""
|
|
3523
2579
|
For details, please refer to :func:`mindspore.ops.nanmean`.
|
|
@@ -3530,91 +2586,51 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3530
2586
|
"""
|
|
3531
2587
|
return tensor_operator_registry.get('nanmedian')(self, axis, keepdims)
|
|
3532
2588
|
|
|
3533
|
-
def
|
|
2589
|
+
def bernoulli(self, *, generator=None):
|
|
2590
|
+
r"""
|
|
2591
|
+
For details, please refer to :func:`mindspore.mint.bernoulli`.
|
|
3534
2592
|
"""
|
|
3535
|
-
|
|
2593
|
+
return tensor_operator_registry.get('bernoulli')(self, generator=generator)
|
|
2594
|
+
|
|
2595
|
+
def random_(self, from_=0, to=None, *, generator=None):
|
|
2596
|
+
r"""
|
|
2597
|
+
Fill the tensor with numbers sampled from a discrete uniform distribution over an
|
|
2598
|
+
interval :math:`[from\_, to-1]`.
|
|
2599
|
+
|
|
2600
|
+
.. warning::
|
|
2601
|
+
This is an experimental API that is subject to change or deletion.
|
|
3536
2602
|
|
|
3537
2603
|
Args:
|
|
3538
|
-
|
|
3539
|
-
|
|
3540
|
-
|
|
3541
|
-
|
|
2604
|
+
from\_ (Union[number.Number, Tensor], optional): the lower bound of the generated random number.
|
|
2605
|
+
It can be a scalar value or a Tensor of any dimension with only a single element. Default: 0.
|
|
2606
|
+
to (Union[number.Number, Tensor], optional): the upper bound of the generated random number.
|
|
2607
|
+
By default it's the upper limit of the input data type.
|
|
2608
|
+
It can be a scalar value or a Tensor of any dimension with only a single element.
|
|
2609
|
+
Default: ``None``.
|
|
2610
|
+
|
|
2611
|
+
Keyword Args:
|
|
2612
|
+
generator (:class:`mindspore.Generator`, optional): a pseudorandom number generator.
|
|
2613
|
+
Default: ``None``, uses the default pseudorandom number generator.
|
|
3542
2614
|
|
|
3543
2615
|
Returns:
|
|
3544
|
-
|
|
2616
|
+
The input tensor.
|
|
3545
2617
|
|
|
3546
2618
|
Raises:
|
|
3547
|
-
|
|
3548
|
-
|
|
3549
|
-
|
|
3550
|
-
See also:
|
|
3551
|
-
- :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
3552
|
-
- :func:`mindspore.Tensor.resize`: Changes shape and size of tensor in-place.
|
|
2619
|
+
TypeError: If `from_` or `to` is not integer.
|
|
2620
|
+
RuntimeError: If `from_` >= `to`.
|
|
3553
2621
|
|
|
3554
2622
|
Supported Platforms:
|
|
3555
|
-
``Ascend``
|
|
2623
|
+
``Ascend``
|
|
3556
2624
|
|
|
3557
2625
|
Examples:
|
|
3558
|
-
>>> import numpy as np
|
|
3559
2626
|
>>> from mindspore import Tensor
|
|
3560
|
-
>>>
|
|
3561
|
-
>>>
|
|
3562
|
-
|
|
3563
|
-
>>>
|
|
3564
|
-
|
|
3565
|
-
[1 1 2 2 3 3 4 4]
|
|
3566
|
-
>>> print(x.repeat(3, axis=1))
|
|
3567
|
-
[[1 1 1 2 2 2]
|
|
3568
|
-
[3 3 3 4 4 4]]
|
|
3569
|
-
>>> print(x.repeat([1,2], axis=0))
|
|
3570
|
-
[[1 2]
|
|
3571
|
-
[3 4]
|
|
3572
|
-
[3 4]]
|
|
2627
|
+
>>> a = Tensor([[2, 3, 4], [1, 2, 3]])
|
|
2628
|
+
>>> from_ = 0
|
|
2629
|
+
>>> to = 5
|
|
2630
|
+
>>> print(a.random_(from_, to).shape)
|
|
2631
|
+
(2, 3)
|
|
3573
2632
|
"""
|
|
3574
|
-
|
|
3575
|
-
repeats = (repeats,)
|
|
3576
|
-
for index, element in enumerate(repeats):
|
|
3577
|
-
if not isinstance(element, int):
|
|
3578
|
-
raise TypeError(f"For 'Tensor.repeat', each element in {repeats} should be int, but got "
|
|
3579
|
-
f"{type(element)} at index {index}.")
|
|
3580
|
-
input_x = self
|
|
3581
|
-
if axis is None:
|
|
3582
|
-
input_x = self.ravel()
|
|
3583
|
-
axis = 0
|
|
3584
|
-
if axis is not None and not isinstance(axis, int):
|
|
3585
|
-
raise TypeError(f"For 'Tensor.repeat', the argument 'axis' should be int, but got {type(axis)}.")
|
|
3586
|
-
validator.check_axis_in_range(axis, input_x.ndim)
|
|
3587
|
-
axis = axis + input_x.ndim if axis < 0 else axis
|
|
3588
|
-
|
|
3589
|
-
if len(repeats) == 1:
|
|
3590
|
-
repeats = repeats[0]
|
|
3591
|
-
if repeats == 0:
|
|
3592
|
-
return Tensor_(input_x.dtype, (0,))
|
|
3593
|
-
return tensor_operator_registry.get('repeat_elements')(input_x, repeats, axis)
|
|
3594
|
-
size = input_x.shape[axis]
|
|
3595
|
-
if len(repeats) != size:
|
|
3596
|
-
raise ValueError(f"For 'Tensor.repeat', the length of 'repeats' must be the same as the shape of the "
|
|
3597
|
-
f"original tensor in the 'axis' dimension, but got the length of 'repeats' "
|
|
3598
|
-
f"{len(repeats)}, the shape of the original tensor in the 'axis' dimension {size}.")
|
|
3599
|
-
subs = tensor_operator_registry.get('tensor_split')(input_x, size, axis)
|
|
3600
|
-
repeated_subs = []
|
|
3601
|
-
for sub, rep in zip(subs, repeats):
|
|
3602
|
-
if rep != 0:
|
|
3603
|
-
repeated_subs.append(tensor_operator_registry.get('repeat_elements')(sub, rep, axis))
|
|
3604
|
-
return tensor_operator_registry.get('concatenate')(repeated_subs, axis)
|
|
3605
|
-
|
|
3606
|
-
@repeat_interleave_mint
|
|
3607
|
-
def repeat_interleave(self, repeats, dim=None):
|
|
3608
|
-
"""
|
|
3609
|
-
For details, please refer to :func:`mindspore.ops.repeat_interleave`.
|
|
3610
|
-
"""
|
|
3611
|
-
return tensor_operator_registry.get('repeat_interleave')(self, repeats, dim)
|
|
3612
|
-
|
|
3613
|
-
def bernoulli(self, p=0.5, seed=None):
|
|
3614
|
-
r"""
|
|
3615
|
-
For details, please refer to :func:`mindspore.ops.bernoulli`.
|
|
3616
|
-
"""
|
|
3617
|
-
return tensor_operator_registry.get('bernoulli')(self, p, seed)
|
|
2633
|
+
return tensor_operator_registry.get('random_')(self, from_=from_, to=to, generator=generator)
|
|
3618
2634
|
|
|
3619
2635
|
def random_categorical(self, num_sample, seed=0, dtype=mstype.int64):
|
|
3620
2636
|
r"""
|
|
@@ -3624,24 +2640,71 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3624
2640
|
validator.check_is_int(seed, 'seed')
|
|
3625
2641
|
return tensor_operator_registry.get('random_categorical')(self, num_sample, seed, dtype)
|
|
3626
2642
|
|
|
3627
|
-
def masked_select(self, mask):
|
|
3628
|
-
"""
|
|
3629
|
-
For details, please refer to :func:`mindspore.ops.masked_select`.
|
|
3630
|
-
"""
|
|
3631
|
-
return tensor_operator_registry.get('masked_select')(self, mask)
|
|
3632
|
-
|
|
3633
2643
|
def gather_elements(self, dim, index):
|
|
3634
2644
|
"""
|
|
3635
2645
|
For details, please refer to :func:`mindspore.ops.gather_elements`.
|
|
3636
2646
|
"""
|
|
3637
|
-
validator.check_value_type('index', index, (Tensor,
|
|
2647
|
+
validator.check_value_type('index', index, (Tensor, TensorPy_,), 'Tensor.gather_elements')
|
|
3638
2648
|
return tensor_operator_registry.get('gather_elements')(self, dim, index)
|
|
3639
2649
|
|
|
3640
|
-
def nonzero(self, as_tuple=False):
|
|
3641
|
-
"""
|
|
3642
|
-
|
|
2650
|
+
def nonzero(self, *, as_tuple=False):
|
|
2651
|
+
r"""
|
|
2652
|
+
Return the positions of all non-zero values.
|
|
2653
|
+
|
|
2654
|
+
Note:
|
|
2655
|
+
The rank of `self`.
|
|
2656
|
+
|
|
2657
|
+
- Ascend: its rank can be equal to 0 except O2 mode.
|
|
2658
|
+
- CPU/GPU: its rank should be greater than or eaqual to 1.
|
|
2659
|
+
|
|
2660
|
+
Keyword Args:
|
|
2661
|
+
as_tuple (bool, optional): Whether the output is tuple.
|
|
2662
|
+
If ``False`` , return Tensor. Default: ``False`` .
|
|
2663
|
+
If ``True`` , return Tuple of Tensor, only support ``Ascend`` .
|
|
2664
|
+
|
|
2665
|
+
Returns:
|
|
2666
|
+
- If `as_tuple` is ``False``, return the Tensor, a 2-D Tensor whose data type is int64,
|
|
2667
|
+
containing the positions of all non-zero values of the `self` .
|
|
2668
|
+
- If `as_tuple` is ``True``, return the Tuple of Tensor and data type is int64.
|
|
2669
|
+
The Tuple length is the dimension of the `self` tensor,
|
|
2670
|
+
and each element is the 1D tensor of the subscript of all non-zero elements of
|
|
2671
|
+
the `self` tensor in that dimension.
|
|
2672
|
+
|
|
2673
|
+
Raises:
|
|
2674
|
+
TypeError: If `self` is not Tensor.
|
|
2675
|
+
TypeError: If `as_tuple` is not bool.
|
|
2676
|
+
RuntimeError: On GPU or CPU or Ascend O2 mode, if dim of `input` equals to 0.
|
|
2677
|
+
|
|
2678
|
+
Supported Platforms:
|
|
2679
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2680
|
+
|
|
2681
|
+
Examples:
|
|
2682
|
+
>>> import mindspore
|
|
2683
|
+
>>> import numpy as np
|
|
2684
|
+
>>> from mindspore import Tensor
|
|
2685
|
+
>>> x = Tensor(np.array([[[1, 0], [-5, 0]]]), mindspore.int32)
|
|
2686
|
+
>>> output = x.nonzero()
|
|
2687
|
+
>>> print(output)
|
|
2688
|
+
[[0 0 0]
|
|
2689
|
+
[0 1 0]]
|
|
2690
|
+
>>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
|
|
2691
|
+
>>> output = x.nonzero(as_tuple=False)
|
|
2692
|
+
>>> print(output)
|
|
2693
|
+
[[0]
|
|
2694
|
+
[2]
|
|
2695
|
+
[4]]
|
|
2696
|
+
>>> x = Tensor(np.array([[[1, 0], [-5, 0]]]), mindspore.int32)
|
|
2697
|
+
>>> output = x.nonzero(as_tuple=True)
|
|
2698
|
+
>>> print(output)
|
|
2699
|
+
(Tensor(shape=[2], dtype=Int64, value=[0, 0]),
|
|
2700
|
+
Tensor(shape=[2], dtype=Int64, value=[0, 1]),
|
|
2701
|
+
Tensor(shape=[2], dtype=Int64, value=[0, 0]))
|
|
2702
|
+
>>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
|
|
2703
|
+
>>> output = x.nonzero(as_tuple=True)
|
|
2704
|
+
>>> print(output)
|
|
2705
|
+
(Tensor(shape=[3], dtype=Int64, value=[0, 2, 4]), )
|
|
3643
2706
|
"""
|
|
3644
|
-
return tensor_operator_registry.get('nonzero')(self, as_tuple)
|
|
2707
|
+
return tensor_operator_registry.get('nonzero')(self, as_tuple=as_tuple)
|
|
3645
2708
|
|
|
3646
2709
|
def svd(self, full_matrices=False, compute_uv=True):
|
|
3647
2710
|
"""
|
|
@@ -3654,12 +2717,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3654
2717
|
s, _, _ = svd_op(full_matrices, compute_uv)(self)
|
|
3655
2718
|
return s
|
|
3656
2719
|
|
|
3657
|
-
def hardshrink(self, lambd=0.5):
|
|
3658
|
-
r"""
|
|
3659
|
-
For details, please refer to :func:`mindspore.ops.hardshrink`.
|
|
3660
|
-
"""
|
|
3661
|
-
return tensor_operator_registry.get('hardshrink')(self, lambd)
|
|
3662
|
-
|
|
3663
2720
|
def heaviside(self, values):
|
|
3664
2721
|
r"""
|
|
3665
2722
|
For details, please refer to :func:`mindspore.ops.heaviside`.
|
|
@@ -3775,13 +2832,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3775
2832
|
>>> print(out2)
|
|
3776
2833
|
1
|
|
3777
2834
|
"""
|
|
3778
|
-
|
|
2835
|
+
if self.ndim == 1 and self.size == 0:
|
|
2836
|
+
return []
|
|
2837
|
+
return self._tolist()
|
|
3779
2838
|
|
|
3780
|
-
def unbind(self, dim=0):
|
|
3781
|
-
r"""
|
|
3782
|
-
For details, please refer to :func:`mindspore.ops.unbind`.
|
|
3783
|
-
"""
|
|
3784
|
-
return tensor_operator_registry.get('unbind')(self, dim)
|
|
3785
2839
|
|
|
3786
2840
|
def unsorted_segment_min(self, segment_ids, num_segments):
|
|
3787
2841
|
r"""
|
|
@@ -3801,14 +2855,15 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3801
2855
|
"""
|
|
3802
2856
|
return tensor_operator_registry.get('unsorted_segment_prod')(self, segment_ids, num_segments)
|
|
3803
2857
|
|
|
3804
|
-
def unique_consecutive(self,
|
|
2858
|
+
def unique_consecutive(self, return_inverse=False, return_counts=False, dim=None):
|
|
3805
2859
|
"""
|
|
3806
2860
|
For details, please refer to :func:`mindspore.ops.unique_consecutive`.
|
|
3807
2861
|
"""
|
|
3808
|
-
output, idx, counts
|
|
3809
|
-
|
|
2862
|
+
output, idx, counts =\
|
|
2863
|
+
tensor_operator_registry.get("unique_consecutive")(return_inverse, return_counts, dim)(self)
|
|
2864
|
+
if return_inverse and return_counts:
|
|
3810
2865
|
return output, idx, counts
|
|
3811
|
-
if
|
|
2866
|
+
if return_inverse:
|
|
3812
2867
|
return output, idx
|
|
3813
2868
|
if return_counts:
|
|
3814
2869
|
return output, counts
|
|
@@ -3820,12 +2875,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3820
2875
|
"""
|
|
3821
2876
|
return tensor_operator_registry.get("unique_with_pad")(self, pad_num)
|
|
3822
2877
|
|
|
3823
|
-
def diag(self):
|
|
3824
|
-
r"""
|
|
3825
|
-
For details, please refer to :func:`mindspore.ops.diag`.
|
|
3826
|
-
"""
|
|
3827
|
-
return tensor_operator_registry.get('diag')(self)
|
|
3828
|
-
|
|
3829
2878
|
def diagflat(self, offset=0):
|
|
3830
2879
|
r"""
|
|
3831
2880
|
For details, please refer to :func:`mindspore.ops.diagflat`.
|
|
@@ -3838,13 +2887,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3838
2887
|
"""
|
|
3839
2888
|
return tensor_operator_registry.get("xdivy")(self, y)
|
|
3840
2889
|
|
|
3841
|
-
@split_mint
|
|
3842
|
-
def split(self, split_size_or_sections, axis=0):
|
|
3843
|
-
"""
|
|
3844
|
-
For details, please refer to :func:`mindspore.ops.split`.
|
|
3845
|
-
"""
|
|
3846
|
-
return tensor_operator_registry.get('split')(self, split_size_or_sections, axis)
|
|
3847
|
-
|
|
3848
2890
|
def tensor_split(self, indices_or_sections, axis=0):
|
|
3849
2891
|
"""
|
|
3850
2892
|
For details, please refer to :func:`mindspore.ops.tensor_split`.
|
|
@@ -3870,12 +2912,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3870
2912
|
"""
|
|
3871
2913
|
return tensor_operator_registry.get('dsplit')(self, indices_or_sections)
|
|
3872
2914
|
|
|
3873
|
-
def xlogy(self, y):
|
|
3874
|
-
r"""
|
|
3875
|
-
For details, please refer to :func:`mindspore.ops.xlogy`.
|
|
3876
|
-
"""
|
|
3877
|
-
return tensor_operator_registry.get("xlogy")(self, y)
|
|
3878
|
-
|
|
3879
2915
|
def eigvals(self):
|
|
3880
2916
|
r"""
|
|
3881
2917
|
For details, please refer to :func:`mindspore.ops.eigvals`.
|
|
@@ -3885,30 +2921,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3885
2921
|
"""
|
|
3886
2922
|
return tensor_operator_registry.get("eigvals")()(self)
|
|
3887
2923
|
|
|
3888
|
-
def erf(self):
|
|
3889
|
-
r"""
|
|
3890
|
-
For details, please refer to :func:`mindspore.ops.erf`.
|
|
3891
|
-
"""
|
|
3892
|
-
return tensor_operator_registry.get("erf")(self)
|
|
3893
|
-
|
|
3894
|
-
def erfc(self):
|
|
3895
|
-
r"""
|
|
3896
|
-
For details, please refer to :func:`mindspore.ops.erfc`.
|
|
3897
|
-
"""
|
|
3898
|
-
return tensor_operator_registry.get("erfc")(self)
|
|
3899
|
-
|
|
3900
|
-
def tile(self, reps):
|
|
3901
|
-
r"""
|
|
3902
|
-
For details, please refer to :func:`mindspore.ops.tile`.
|
|
3903
|
-
"""
|
|
3904
|
-
return tensor_operator_registry.get('tile')(self, reps)
|
|
3905
|
-
|
|
3906
|
-
def topk(self, k, dim=None, largest=True, sorted=True):
|
|
3907
|
-
r"""
|
|
3908
|
-
For details, please refer to :func:`mindspore.ops.topk`.
|
|
3909
|
-
"""
|
|
3910
|
-
return tensor_operator_registry.get("topk")(self, k, dim, largest, sorted)
|
|
3911
|
-
|
|
3912
2924
|
def top_k(self, k, sorted=True):
|
|
3913
2925
|
r"""
|
|
3914
2926
|
`Tensor.top_k` is deprecated, please use `Tensor.topk` instead.
|
|
@@ -3917,55 +2929,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3917
2929
|
validator.check_bool(sorted, 'sorted')
|
|
3918
2930
|
return tensor_operator_registry.get("top_k")(self, k, sorted)
|
|
3919
2931
|
|
|
3920
|
-
def sigmoid(self):
|
|
3921
|
-
r"""
|
|
3922
|
-
For details, please refer to :func:`mindspore.ops.sigmoid`.
|
|
3923
|
-
"""
|
|
3924
|
-
return tensor_operator_registry.get("sigmoid")(self)
|
|
3925
|
-
|
|
3926
|
-
def median(self, axis=-1, keepdims=False):
|
|
3927
|
-
r"""
|
|
3928
|
-
For details, please refer to :func:`mindspore.ops.median`.
|
|
3929
|
-
"""
|
|
3930
|
-
validator.check_axis_in_range(axis, self.ndim)
|
|
3931
|
-
return tensor_operator_registry.get('median')(False, axis, keepdims)(self)
|
|
3932
|
-
|
|
3933
|
-
def addmv(self, mat, vec, beta=1, alpha=1):
|
|
3934
|
-
r"""
|
|
3935
|
-
For details, please refer to :func:`mindspore.ops.addmv`.
|
|
3936
|
-
"""
|
|
3937
|
-
return tensor_operator_registry.get('addmv')(self, mat, vec, beta=beta, alpha=alpha)
|
|
3938
|
-
|
|
3939
|
-
def asinh(self):
|
|
3940
|
-
r"""
|
|
3941
|
-
For details, please refer to :func:`mindspore.ops.asinh`.
|
|
3942
|
-
"""
|
|
3943
|
-
return tensor_operator_registry.get('asinh')(self)
|
|
3944
|
-
|
|
3945
|
-
def arcsinh(self):
|
|
3946
|
-
r"""
|
|
3947
|
-
Alias for :func:`mindspore.Tensor.asinh`.
|
|
3948
|
-
"""
|
|
3949
|
-
return tensor_operator_registry.get('arcsinh')(self)
|
|
3950
|
-
|
|
3951
|
-
def atan(self):
|
|
3952
|
-
r"""
|
|
3953
|
-
For details, please refer to :func:`mindspore.ops.atan`.
|
|
3954
|
-
"""
|
|
3955
|
-
return tensor_operator_registry.get('atan')(self)
|
|
3956
|
-
|
|
3957
|
-
def atanh(self):
|
|
3958
|
-
r"""
|
|
3959
|
-
For details, please refer to :func:`mindspore.ops.atanh`.
|
|
3960
|
-
"""
|
|
3961
|
-
return tensor_operator_registry.get('atanh')(self)
|
|
3962
|
-
|
|
3963
|
-
def arctanh(self):
|
|
3964
|
-
r"""
|
|
3965
|
-
Alias for :func:`mindspore.Tensor.atanh`.
|
|
3966
|
-
"""
|
|
3967
|
-
return tensor_operator_registry.get('arctanh')(self)
|
|
3968
|
-
|
|
3969
2932
|
def bmm(self, mat2):
|
|
3970
2933
|
r"""
|
|
3971
2934
|
For details, please refer to :func:`mindspore.ops.bmm`.
|
|
@@ -3976,8 +2939,14 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3976
2939
|
r"""
|
|
3977
2940
|
Performs tensor dtype conversion.
|
|
3978
2941
|
|
|
2942
|
+
Note:
|
|
2943
|
+
- If the `self` Tensor already has the correct `mindspore.dtype`, then self is returned.
|
|
2944
|
+
Otherwise, the returned tensor is a copy of `self` with the desired mindspore.dtype.
|
|
2945
|
+
- When converting complex numbers to boolean type, the imaginary part of the complex number is not
|
|
2946
|
+
taken into account. As long as the real part is non-zero, it returns True; otherwise, it returns False.
|
|
2947
|
+
|
|
3979
2948
|
Args:
|
|
3980
|
-
dtype (Number): The valid data type of the output tensor. Only constant value is allowed.
|
|
2949
|
+
dtype (dtype.Number): The valid data type of the output tensor. Only constant value is allowed.
|
|
3981
2950
|
|
|
3982
2951
|
Returns:
|
|
3983
2952
|
Tensor, converted to the specified `dtype`.
|
|
@@ -3999,7 +2968,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3999
2968
|
>>> print(output.dtype)
|
|
4000
2969
|
Int32
|
|
4001
2970
|
"""
|
|
4002
|
-
return
|
|
2971
|
+
return self if self.dtype == dtype else self._to(dtype)
|
|
4003
2972
|
|
|
4004
2973
|
def type(self, dtype=None):
|
|
4005
2974
|
r"""
|
|
@@ -4029,29 +2998,49 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4029
2998
|
return str(self.dtype)
|
|
4030
2999
|
return self.astype(dtype)
|
|
4031
3000
|
|
|
3001
|
+
|
|
4032
3002
|
def type_as(self, other):
|
|
4033
3003
|
r"""
|
|
4034
|
-
|
|
3004
|
+
Returns self tensor cast to the type of the with the input other tensor.
|
|
3005
|
+
|
|
3006
|
+
.. warning::
|
|
3007
|
+
This is an experimental API that is subject to change or deletion.
|
|
3008
|
+
|
|
3009
|
+
Note:
|
|
3010
|
+
When converting complex numbers to boolean type, the imaginary part of the complex number is not
|
|
3011
|
+
taken into account. As long as the real part is non-zero, it returns True; otherwise, it returns False.
|
|
4035
3012
|
|
|
4036
3013
|
Args:
|
|
4037
|
-
other (Tensor): The
|
|
3014
|
+
other (Tensor): The tensor whose data type is specified.
|
|
3015
|
+
The shape of tensor is :math:`(x_0, x_1, ..., x_R)`.
|
|
4038
3016
|
|
|
4039
3017
|
Returns:
|
|
4040
|
-
Tensor,
|
|
3018
|
+
Tensor, the shape of tensor is the same as `self`, :math:`(x_0, x_1, ..., x_R)`.
|
|
3019
|
+
|
|
3020
|
+
Raises:
|
|
3021
|
+
TypeError: If `other` is not a Tensor.
|
|
4041
3022
|
|
|
4042
3023
|
Supported Platforms:
|
|
4043
3024
|
``Ascend`` ``GPU`` ``CPU``
|
|
4044
3025
|
|
|
4045
3026
|
Examples:
|
|
4046
3027
|
>>> import mindspore
|
|
3028
|
+
>>> import numpy as np
|
|
4047
3029
|
>>> from mindspore import Tensor
|
|
4048
|
-
>>>
|
|
4049
|
-
>>>
|
|
4050
|
-
>>>
|
|
4051
|
-
>>>
|
|
3030
|
+
>>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
|
|
3031
|
+
>>> self = Tensor(input_np)
|
|
3032
|
+
>>> other_np = np.random.randn(2, 3, 4).astype(np.int32)
|
|
3033
|
+
>>> other = Tensor(other_np)
|
|
3034
|
+
>>> output = self.type_as(other)
|
|
3035
|
+
>>> print(output.dtype)
|
|
4052
3036
|
Int32
|
|
3037
|
+
>>> print(output.shape)
|
|
3038
|
+
(2, 3, 4, 5)
|
|
4053
3039
|
"""
|
|
4054
|
-
|
|
3040
|
+
if self.dtype == other.dtype:
|
|
3041
|
+
return self
|
|
3042
|
+
return TensorPy_.type_as(self, other)
|
|
3043
|
+
|
|
4055
3044
|
|
|
4056
3045
|
def bool(self):
|
|
4057
3046
|
r"""
|
|
@@ -4073,14 +3062,56 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4073
3062
|
>>> print(output.dtype)
|
|
4074
3063
|
Bool
|
|
4075
3064
|
"""
|
|
4076
|
-
return
|
|
3065
|
+
return self.to(mstype.bool_)
|
|
3066
|
+
|
|
3067
|
+
def float(self):
|
|
3068
|
+
r"""
|
|
3069
|
+
Converts input tensor dtype to `float32`.
|
|
3070
|
+
|
|
3071
|
+
Returns:
|
|
3072
|
+
Tensor, converted to the `float32` dtype.
|
|
3073
|
+
|
|
3074
|
+
Supported Platforms:
|
|
3075
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
3076
|
+
|
|
3077
|
+
Examples:
|
|
3078
|
+
>>> import numpy as np
|
|
3079
|
+
>>> import mindspore
|
|
3080
|
+
>>> from mindspore import Tensor
|
|
3081
|
+
>>> input_x = Tensor(np.ones([2,2]), mindspore.int32)
|
|
3082
|
+
>>> output = input_x.float()
|
|
3083
|
+
>>> print(output.dtype)
|
|
3084
|
+
Float32
|
|
3085
|
+
"""
|
|
3086
|
+
return self.to(mstype.float32)
|
|
3087
|
+
|
|
3088
|
+
def bfloat16(self):
|
|
3089
|
+
r"""
|
|
3090
|
+
Converts input tensor dtype to `bfloat16`.
|
|
3091
|
+
|
|
3092
|
+
Returns:
|
|
3093
|
+
Tensor, converted to the `bfloat16` dtype.
|
|
3094
|
+
|
|
3095
|
+
Supported Platforms:
|
|
3096
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
3097
|
+
|
|
3098
|
+
Examples:
|
|
3099
|
+
>>> import numpy as np
|
|
3100
|
+
>>> import mindspore
|
|
3101
|
+
>>> from mindspore import Tensor
|
|
3102
|
+
>>> input_x = Tensor(np.ones([2,2]), mindspore.int32)
|
|
3103
|
+
>>> output = input_x.bfloat16()
|
|
3104
|
+
>>> print(output.dtype)
|
|
3105
|
+
BFloat16
|
|
3106
|
+
"""
|
|
3107
|
+
return self.to(mstype.bfloat16)
|
|
4077
3108
|
|
|
4078
|
-
def
|
|
3109
|
+
def double(self):
|
|
4079
3110
|
r"""
|
|
4080
|
-
Converts input tensor dtype to `
|
|
3111
|
+
Converts input tensor dtype to `float64`.
|
|
4081
3112
|
|
|
4082
3113
|
Returns:
|
|
4083
|
-
Tensor, converted to the `
|
|
3114
|
+
Tensor, converted to the `float64` dtype.
|
|
4084
3115
|
|
|
4085
3116
|
Supported Platforms:
|
|
4086
3117
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4090,11 +3121,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4090
3121
|
>>> import mindspore
|
|
4091
3122
|
>>> from mindspore import Tensor
|
|
4092
3123
|
>>> input_x = Tensor(np.ones([2,2]), mindspore.int32)
|
|
4093
|
-
>>> output = input_x.
|
|
3124
|
+
>>> output = input_x.double()
|
|
4094
3125
|
>>> print(output.dtype)
|
|
4095
|
-
|
|
3126
|
+
Float64
|
|
4096
3127
|
"""
|
|
4097
|
-
return
|
|
3128
|
+
return self.to(mstype.float64)
|
|
4098
3129
|
|
|
4099
3130
|
def half(self):
|
|
4100
3131
|
r"""
|
|
@@ -4115,7 +3146,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4115
3146
|
>>> print(output.dtype)
|
|
4116
3147
|
Float16
|
|
4117
3148
|
"""
|
|
4118
|
-
return
|
|
3149
|
+
return self.to(mstype.float16)
|
|
4119
3150
|
|
|
4120
3151
|
def int(self):
|
|
4121
3152
|
r"""
|
|
@@ -4136,7 +3167,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4136
3167
|
>>> print(output.dtype)
|
|
4137
3168
|
Int32
|
|
4138
3169
|
"""
|
|
4139
|
-
return
|
|
3170
|
+
return self.to(mstype.int32)
|
|
4140
3171
|
|
|
4141
3172
|
def byte(self):
|
|
4142
3173
|
r"""
|
|
@@ -4155,9 +3186,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4155
3186
|
>>> input_x = Tensor(np.ones([2,2]), mindspore.float32)
|
|
4156
3187
|
>>> output = input_x.byte()
|
|
4157
3188
|
>>> print(output.dtype)
|
|
4158
|
-
|
|
3189
|
+
UInt8
|
|
4159
3190
|
"""
|
|
4160
|
-
return
|
|
3191
|
+
return self.to(mstype.uint8)
|
|
4161
3192
|
|
|
4162
3193
|
def long(self):
|
|
4163
3194
|
r"""
|
|
@@ -4178,7 +3209,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4178
3209
|
>>> print(output.dtype)
|
|
4179
3210
|
Int64
|
|
4180
3211
|
"""
|
|
4181
|
-
return
|
|
3212
|
+
return self.to(mstype.int64)
|
|
4182
3213
|
|
|
4183
3214
|
def short(self):
|
|
4184
3215
|
r"""
|
|
@@ -4200,7 +3231,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4200
3231
|
>>> output
|
|
4201
3232
|
Tensor(shape=[5], dtype=Int16, value= [1, 2, 3, 4, 5])
|
|
4202
3233
|
"""
|
|
4203
|
-
return
|
|
3234
|
+
return self.to(mstype.int16)
|
|
4204
3235
|
|
|
4205
3236
|
def cholesky(self, upper=False):
|
|
4206
3237
|
r"""
|
|
@@ -4210,7 +3241,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4210
3241
|
|
|
4211
3242
|
def cholesky_inverse(self, upper=False):
|
|
4212
3243
|
r"""
|
|
4213
|
-
|
|
3244
|
+
This interface is deprecated from version 2.4 and will be removed in a future version.
|
|
4214
3245
|
"""
|
|
4215
3246
|
return tensor_operator_registry.get('cholesky_inverse')(self, upper=upper)
|
|
4216
3247
|
|
|
@@ -4229,12 +3260,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4229
3260
|
"""
|
|
4230
3261
|
return tensor_operator_registry.get('conj')(self)
|
|
4231
3262
|
|
|
4232
|
-
def count_nonzero(self, axis=(), keep_dims=False, dtype=mstype.int32):
|
|
4233
|
-
r"""
|
|
4234
|
-
For details, please refer to :func:`mindspore.ops.count_nonzero`.
|
|
4235
|
-
"""
|
|
4236
|
-
return tensor_operator_registry.get('count_nonzero')(self, axis, keep_dims, dtype)
|
|
4237
|
-
|
|
4238
3263
|
def cross(self, other, dim=None):
|
|
4239
3264
|
r"""
|
|
4240
3265
|
For details, please refer to :func:`mindspore.ops.cross`.
|
|
@@ -4247,11 +3272,14 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4247
3272
|
"""
|
|
4248
3273
|
return tensor_operator_registry.get('erfinv')(self)
|
|
4249
3274
|
|
|
4250
|
-
def
|
|
3275
|
+
def erfinv_(self):
|
|
4251
3276
|
r"""
|
|
4252
|
-
|
|
3277
|
+
In-place version of erfinv(), for details, please refer to :func:`mindspore.ops.erfinv`.
|
|
3278
|
+
|
|
3279
|
+
.. warning::
|
|
3280
|
+
This is an experimental API that is subject to change or deletion.
|
|
4253
3281
|
"""
|
|
4254
|
-
return tensor_operator_registry.get('
|
|
3282
|
+
return tensor_operator_registry.get('erfinv_')(self)
|
|
4255
3283
|
|
|
4256
3284
|
def lcm(self, other):
|
|
4257
3285
|
r"""
|
|
@@ -4284,6 +3312,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4284
3312
|
def expand(self, size):
|
|
4285
3313
|
r"""
|
|
4286
3314
|
For details, please refer to :func:`mindspore.ops.broadcast_to`.
|
|
3315
|
+
The parameter `size` of the current interface is the same as the parameter `shape` of the reference interface.
|
|
4287
3316
|
"""
|
|
4288
3317
|
if isinstance(size, Tensor):
|
|
4289
3318
|
size = tensor_operator_registry.get('tensortotuple')()(size)
|
|
@@ -4297,59 +3326,80 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4297
3326
|
|
|
4298
3327
|
def multiply(self, value):
|
|
4299
3328
|
r"""
|
|
4300
|
-
For details, please refer to :func:`mindspore.ops.
|
|
3329
|
+
For details, please refer to :func:`mindspore.ops.mul`.
|
|
3330
|
+
The parameter `value` of the current interface is the same as the parameter `other` of the reference interface.
|
|
4301
3331
|
"""
|
|
4302
3332
|
return tensor_operator_registry.get('multiply')(self, value)
|
|
4303
3333
|
|
|
4304
|
-
def div(self, value, *, rounding_mode=None):
|
|
4305
|
-
r"""
|
|
4306
|
-
For details, please refer to :func:`mindspore.ops.div`.
|
|
4307
|
-
"""
|
|
4308
|
-
return tensor_operator_registry.get('div')(self, value, rounding_mode=rounding_mode)
|
|
4309
|
-
|
|
4310
|
-
def divide(self, value, *, rounding_mode=None):
|
|
4311
|
-
r"""
|
|
4312
|
-
Alias for :func:`mindspore.Tensor.div`.
|
|
4313
|
-
"""
|
|
4314
|
-
return tensor_operator_registry.get('div')(self, value, rounding_mode=rounding_mode)
|
|
4315
|
-
|
|
4316
|
-
def eq(self, other):
|
|
4317
|
-
r"""
|
|
4318
|
-
For details, please refer to :func:`mindspore.ops.eq`.
|
|
4319
|
-
"""
|
|
4320
|
-
return tensor_operator_registry.get('equal')(self, other)
|
|
4321
|
-
|
|
4322
3334
|
def equal(self, other):
|
|
4323
3335
|
r"""
|
|
4324
3336
|
For details, please refer to :func:`mindspore.ops.equal`.
|
|
4325
3337
|
"""
|
|
4326
3338
|
return tensor_operator_registry.get('equal')(self, other)
|
|
4327
3339
|
|
|
4328
|
-
def
|
|
3340
|
+
def index_add_(self, dim, index, source, *, alpha=1):
|
|
4329
3341
|
r"""
|
|
4330
|
-
|
|
4331
|
-
|
|
4332
|
-
|
|
3342
|
+
Accumulate the elements of `alpha` times `source` into the `self` by adding to the index
|
|
3343
|
+
in the order given in `index`. For example, if `dim == 0`, `index[i] == j`, and `alpha = -1`,
|
|
3344
|
+
then the `i` th row of `source` is subtracted from the `j` th row of `self` .
|
|
3345
|
+
The `dim` th dimension of `source` must have the same size as the length of `index` ,
|
|
3346
|
+
and all other dimensions must match `self`, or an error will be raised.
|
|
3347
|
+
For a 3-D tensor the output is defined as follows:
|
|
4333
3348
|
|
|
4334
|
-
|
|
4335
|
-
|
|
4336
|
-
|
|
4337
|
-
|
|
4338
|
-
|
|
4339
|
-
|
|
4340
|
-
return tensor_operator_registry.get('index_add')(self, indices=index, y=source, axis=dim)
|
|
3349
|
+
.. math::
|
|
3350
|
+
\begin{array}{ll}
|
|
3351
|
+
self[index[i],\ :,\ :]\ +=\ alpha * src[i,\ :,\ :] \qquad \#if\ dim == 0 \\
|
|
3352
|
+
self[:,\ \ index[i],\ :]\ +=\ alpha * src[:,\ \ i,\ :] \qquad \#if\ dim == 1 \\
|
|
3353
|
+
self[:,\ :,\ \ index[i]]\ +=\ alpha * src[:,\ :,\ \ i] \qquad\#if\ dim == 2 \\
|
|
3354
|
+
\end{array}
|
|
4341
3355
|
|
|
4342
|
-
|
|
4343
|
-
|
|
4344
|
-
For details, please refer to :func:`mindspore.ops.greater`.
|
|
4345
|
-
"""
|
|
4346
|
-
return tensor_operator_registry.get('greater')(self, other)
|
|
3356
|
+
.. warning::
|
|
3357
|
+
This is an experimental API that is subject to change or deletion.
|
|
4347
3358
|
|
|
4348
|
-
|
|
4349
|
-
|
|
4350
|
-
|
|
3359
|
+
Args:
|
|
3360
|
+
dim (int): The dimension along which to index.
|
|
3361
|
+
index (Tensor): Add the value of `self` and `source` along the dimension of the `dim` according to
|
|
3362
|
+
the specified index value, with data type int32. The `index` must be 1D with the same size as
|
|
3363
|
+
the size of `source` in the `dim` dimension. The values of `index` should be in [0, b),
|
|
3364
|
+
where the b is the size of `self` in the `dim` dimension.
|
|
3365
|
+
source (Tensor): The input tensor with the value to add. Must have same data type as `self`.
|
|
3366
|
+
The shape must be the same as `self` except the `dim` th dimension.
|
|
3367
|
+
|
|
3368
|
+
Keyword Args:
|
|
3369
|
+
alpha (number, optional): The scalar multiplier for source. Default: ``1``.
|
|
3370
|
+
|
|
3371
|
+
Returns:
|
|
3372
|
+
Tensor, has the same shape and dtype as `self`.
|
|
3373
|
+
|
|
3374
|
+
Raises:
|
|
3375
|
+
TypeError: If neither `index` nor `source` is a Tensor.
|
|
3376
|
+
ValueError: If dim is out of `self` rank's range.
|
|
3377
|
+
ValueError: If `self` rank is not the same as `source` rank.
|
|
3378
|
+
ValueError: If shape of `index` is not 1D or size of `index` is not equal to dimension
|
|
3379
|
+
of `source[dim]`.
|
|
3380
|
+
ValueError: If `source`'s shape is not the same as `self` except the `dim` th dimension.
|
|
3381
|
+
|
|
3382
|
+
Supported Platforms:
|
|
3383
|
+
``Ascend``
|
|
3384
|
+
|
|
3385
|
+
Examples:
|
|
3386
|
+
>>> import numpy as np
|
|
3387
|
+
>>> import mindspore
|
|
3388
|
+
>>> from mindspore import Tensor
|
|
3389
|
+
>>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
|
|
3390
|
+
>>> index = Tensor(np.array([0, 2]), mindspore.int32)
|
|
3391
|
+
>>> y = Tensor(np.array([[0.5, 1.0], [1.0, 1.5], [2.0, 2.5]]), mindspore.float32)
|
|
3392
|
+
>>> output = x.index_add_(1, index, y, alpha=1)
|
|
3393
|
+
>>> print(output)
|
|
3394
|
+
[[ 1.5 2. 4. ]
|
|
3395
|
+
[ 5. 5. 7.5]
|
|
3396
|
+
[ 9. 8. 11.5]]
|
|
3397
|
+
>>> print(x)
|
|
3398
|
+
[[ 1.5 2. 4. ]
|
|
3399
|
+
[ 5. 5. 7.5]
|
|
3400
|
+
[ 9. 8. 11.5]]
|
|
4351
3401
|
"""
|
|
4352
|
-
return tensor_operator_registry.get('
|
|
3402
|
+
return tensor_operator_registry.get('index_add_')(self, dim, index, source, alpha)
|
|
4353
3403
|
|
|
4354
3404
|
def igamma(self, other):
|
|
4355
3405
|
r"""
|
|
@@ -4363,18 +3413,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4363
3413
|
"""
|
|
4364
3414
|
return tensor_operator_registry.get('igammac')(self, other)
|
|
4365
3415
|
|
|
4366
|
-
def isinf(self):
|
|
4367
|
-
r"""
|
|
4368
|
-
For details, please refer to :func:`mindspore.ops.isinf`.
|
|
4369
|
-
"""
|
|
4370
|
-
return tensor_operator_registry.get('isinf')(self)
|
|
4371
|
-
|
|
4372
|
-
@isnan_mint
|
|
4373
3416
|
def isnan(self):
|
|
4374
3417
|
r"""
|
|
4375
|
-
For details, please refer to :func:`mindspore.ops.
|
|
3418
|
+
For details, please refer to :func:`mindspore.ops.ne`.
|
|
4376
3419
|
"""
|
|
4377
|
-
return
|
|
3420
|
+
return self.ne(self)
|
|
4378
3421
|
|
|
4379
3422
|
def flip(self, dims):
|
|
4380
3423
|
"""
|
|
@@ -4400,74 +3443,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4400
3443
|
"""
|
|
4401
3444
|
return tensor_operator_registry.get('is_floating_point')(self)
|
|
4402
3445
|
|
|
4403
|
-
def is_signed(self):
|
|
4404
|
-
"""
|
|
4405
|
-
Judge whether the data type of tensor is a signed data type.
|
|
4406
|
-
|
|
4407
|
-
Returns:
|
|
4408
|
-
Bool. If the dtype of `self` is a signed data type, return True. Otherwise, return False.
|
|
4409
|
-
|
|
4410
|
-
Supported Platforms:
|
|
4411
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4412
|
-
|
|
4413
|
-
Examples:
|
|
4414
|
-
>>> import mindspore as ms
|
|
4415
|
-
>>> x = ms.Tensor([1, 2, 3], ms.int64)
|
|
4416
|
-
>>> y = ms.Tensor([1, 2, 3], ms.uint64)
|
|
4417
|
-
>>> output = x.is_signed()
|
|
4418
|
-
>>> output2 = y.is_signed()
|
|
4419
|
-
>>> print(output)
|
|
4420
|
-
True
|
|
4421
|
-
>>> print(output2)
|
|
4422
|
-
False
|
|
4423
|
-
"""
|
|
4424
|
-
return self.dtype in mstype.signed_type
|
|
4425
|
-
|
|
4426
|
-
def le(self, other):
|
|
4427
|
-
r"""
|
|
4428
|
-
For details, please refer to :func:`mindspore.ops.le`.
|
|
4429
|
-
"""
|
|
4430
|
-
return tensor_operator_registry.get('le')(self, other)
|
|
4431
|
-
|
|
4432
|
-
def less(self, other):
|
|
4433
|
-
r"""
|
|
4434
|
-
For details, please refer to :func:`mindspore.ops.less`.
|
|
4435
|
-
"""
|
|
4436
|
-
return tensor_operator_registry.get('less')(self, other)
|
|
4437
|
-
|
|
4438
|
-
def lt(self, other):
|
|
4439
|
-
"""
|
|
4440
|
-
Alias for :func:`mindspore.Tensor.less`.
|
|
4441
|
-
"""
|
|
4442
|
-
return self.less(other)
|
|
4443
|
-
|
|
4444
|
-
def logical_and(self, other):
|
|
4445
|
-
r"""
|
|
4446
|
-
For details, please refer to :func:`mindspore.ops.logical_and`.
|
|
4447
|
-
"""
|
|
4448
|
-
return tensor_operator_registry.get('logical_and')(self, other)
|
|
4449
|
-
|
|
4450
|
-
def logical_not(self):
|
|
4451
|
-
r"""
|
|
4452
|
-
For details, please refer to :func:`mindspore.ops.logical_not`.
|
|
4453
|
-
"""
|
|
4454
|
-
return tensor_operator_registry.get('logical_not')(self)
|
|
4455
|
-
|
|
4456
|
-
def logical_or(self, other):
|
|
4457
|
-
r"""
|
|
4458
|
-
For details, please refer to :func:`mindspore.ops.logical_or`.
|
|
4459
|
-
"""
|
|
4460
|
-
return tensor_operator_registry.get('logical_or')(self, other)
|
|
4461
|
-
|
|
4462
|
-
def logical_xor(self, other):
|
|
4463
|
-
r"""
|
|
4464
|
-
For details, please refer to :func:`mindspore.ops.logical_xor`.
|
|
4465
|
-
"""
|
|
4466
|
-
return tensor_operator_registry.get('logical_xor')(self, other)
|
|
4467
|
-
|
|
4468
3446
|
def lstsq(self, A):
|
|
4469
3447
|
r"""
|
|
4470
|
-
|
|
3448
|
+
This interface is deprecated from version 2.4 and will be removed in a future version.
|
|
4471
3449
|
"""
|
|
4472
3450
|
return tensor_operator_registry.get('lstsq')(self, A)
|
|
4473
3451
|
|
|
@@ -4476,6 +3454,15 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4476
3454
|
r"""
|
|
4477
3455
|
Accessing this property is equivalent to Calling self.adjoint().
|
|
4478
3456
|
For details, please refer to :func:`mindspore.ops.adjoint`.
|
|
3457
|
+
|
|
3458
|
+
Examples:
|
|
3459
|
+
>>> from mindspore import Tensor
|
|
3460
|
+
>>> import numpy as np
|
|
3461
|
+
>>> x = Tensor(np.array([[0. + 0.j, 1. + 1.j], [2. + 2.j, 3. + 3.j]]))
|
|
3462
|
+
>>> output = x.mH
|
|
3463
|
+
>>> print(output)
|
|
3464
|
+
[[0.-0.j 2.-2.j]
|
|
3465
|
+
[1.-1.j 3.-3.j]]
|
|
4479
3466
|
"""
|
|
4480
3467
|
return self.adjoint()
|
|
4481
3468
|
|
|
@@ -4485,6 +3472,14 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4485
3472
|
Returns the Tensor that exchanges the last two dimensions.
|
|
4486
3473
|
Accessing the attribute, x.mT, is equal to calling the method, x.swapaxes(-2, -1).
|
|
4487
3474
|
For details, please refer to :func:`mindspore.Tensor.swapaxes`.
|
|
3475
|
+
|
|
3476
|
+
Examples:
|
|
3477
|
+
>>> from mindspore import Tensor
|
|
3478
|
+
>>> import numpy as np
|
|
3479
|
+
>>> x = Tensor(np.ones((2, 3, 4)))
|
|
3480
|
+
>>> output = x.mT
|
|
3481
|
+
>>> print(output.shape)
|
|
3482
|
+
(2, 4, 3)
|
|
4488
3483
|
"""
|
|
4489
3484
|
return self.swapaxes(-2, -1)
|
|
4490
3485
|
|
|
@@ -4494,12 +3489,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4494
3489
|
"""
|
|
4495
3490
|
return tensor_operator_registry.get('mvlgamma')(self, p)
|
|
4496
3491
|
|
|
4497
|
-
def matmul(self, tensor2):
|
|
4498
|
-
r"""
|
|
4499
|
-
For details, please refer to :func:`mindspore.ops.matmul`.
|
|
4500
|
-
"""
|
|
4501
|
-
return tensor_operator_registry.get('matmul')(self, tensor2)
|
|
4502
|
-
|
|
4503
3492
|
def inner(self, other):
|
|
4504
3493
|
r"""
|
|
4505
3494
|
For details, please refer to :func:`mindspore.ops.inner`.
|
|
@@ -4514,95 +3503,16 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4514
3503
|
|
|
4515
3504
|
def matrix_power(self, n):
|
|
4516
3505
|
r"""
|
|
4517
|
-
|
|
4518
|
-
|
|
4519
|
-
.. warning::
|
|
4520
|
-
This is an experimental API that is subject to change or deletion.
|
|
4521
|
-
|
|
3506
|
+
This interface is deprecated from version 2.4 and will be removed in a future version.
|
|
4522
3507
|
"""
|
|
4523
3508
|
return tensor_operator_registry.get('matrix_power')(self, n)
|
|
4524
3509
|
|
|
4525
|
-
def maximum(self, other):
|
|
4526
|
-
r"""
|
|
4527
|
-
For details, please refer to :func:`mindspore.ops.maximum`.
|
|
4528
|
-
"""
|
|
4529
|
-
return tensor_operator_registry.get('maximum')(self, other)
|
|
4530
|
-
|
|
4531
|
-
def mm(self, mat2):
|
|
4532
|
-
r"""
|
|
4533
|
-
For details, please refer to :func:`mindspore.ops.mm`.
|
|
4534
|
-
"""
|
|
4535
|
-
return tensor_operator_registry.get('mm')(self, mat2)
|
|
4536
|
-
|
|
4537
3510
|
def msort(self):
|
|
4538
3511
|
r"""
|
|
4539
3512
|
For details, please refer to :func:`mindspore.ops.msort`.
|
|
4540
3513
|
"""
|
|
4541
3514
|
return tensor_operator_registry.get('msort')(self)
|
|
4542
3515
|
|
|
4543
|
-
def mul(self, value):
|
|
4544
|
-
r"""
|
|
4545
|
-
For details, please refer to :func:`mindspore.ops.mul`.
|
|
4546
|
-
"""
|
|
4547
|
-
return tensor_operator_registry.get('mul')(self, value)
|
|
4548
|
-
|
|
4549
|
-
def nan_to_num(self, nan=None, posinf=None, neginf=None):
|
|
4550
|
-
"""
|
|
4551
|
-
For details, please refer to :func:`mindspore.ops.nan_to_num`.
|
|
4552
|
-
"""
|
|
4553
|
-
return tensor_operator_registry.get('nan_to_num')(self, nan, posinf, neginf)
|
|
4554
|
-
|
|
4555
|
-
def neg(self):
|
|
4556
|
-
r"""
|
|
4557
|
-
For details, please refer to :func:`mindspore.ops.neg`.
|
|
4558
|
-
"""
|
|
4559
|
-
return tensor_operator_registry.get('neg')(self)
|
|
4560
|
-
|
|
4561
|
-
def ne(self, other):
|
|
4562
|
-
r"""
|
|
4563
|
-
For details, please refer to :func:`mindspore.ops.ne`.
|
|
4564
|
-
"""
|
|
4565
|
-
return tensor_operator_registry.get('ne')(self, other)
|
|
4566
|
-
|
|
4567
|
-
def not_equal(self, other):
|
|
4568
|
-
r"""
|
|
4569
|
-
For details, please refer to :func:`mindspore.ops.not_equal`.
|
|
4570
|
-
"""
|
|
4571
|
-
return tensor_operator_registry.get('not_equal')(self, other)
|
|
4572
|
-
|
|
4573
|
-
def new_zeros(self, size, dtype=None):
|
|
4574
|
-
r"""
|
|
4575
|
-
Return a tensor of `size` filled with zeros.
|
|
4576
|
-
|
|
4577
|
-
.. warning::
|
|
4578
|
-
For argument `size`, Tensor type input will be deprecated in the future version.
|
|
4579
|
-
|
|
4580
|
-
Args:
|
|
4581
|
-
size (Union[int, tuple, list, Tensor]): An int, list or tuple of integers defining the output shape.
|
|
4582
|
-
dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned tensor has
|
|
4583
|
-
thesame dtype as `self`. Default: ``None``.
|
|
4584
|
-
|
|
4585
|
-
Returns:
|
|
4586
|
-
Tensor, the shape and dtype is defined above and filled with zeros.
|
|
4587
|
-
|
|
4588
|
-
Raises:
|
|
4589
|
-
TypeError: If `size` is neither an int nor an tuple/list/Tensor of int.
|
|
4590
|
-
|
|
4591
|
-
Supported Platforms:
|
|
4592
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4593
|
-
|
|
4594
|
-
Examples:
|
|
4595
|
-
>>> import numpy as np
|
|
4596
|
-
>>> import mindspore
|
|
4597
|
-
>>> from mindspore import Tensor
|
|
4598
|
-
>>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
|
|
4599
|
-
>>> output = x.new_zeros((2, 2))
|
|
4600
|
-
>>> print(output)
|
|
4601
|
-
[[0. 0.]
|
|
4602
|
-
[0. 0.]]
|
|
4603
|
-
"""
|
|
4604
|
-
return tensor_operator_registry.get('zeros')(size, dtype)
|
|
4605
|
-
|
|
4606
3516
|
def zero_(self):
|
|
4607
3517
|
r"""
|
|
4608
3518
|
Return a tensor filled with zeros.
|
|
@@ -4623,43 +3533,52 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4623
3533
|
>>> x = Tensor(np.array([2, 2]))
|
|
4624
3534
|
>>> output = x.zero_()
|
|
4625
3535
|
>>> print(output)
|
|
4626
|
-
[
|
|
4627
|
-
[0. 0.]]
|
|
3536
|
+
[0 0]
|
|
4628
3537
|
"""
|
|
4629
3538
|
return tensor_operator_registry.get('zero_')(self)
|
|
4630
3539
|
|
|
4631
|
-
def
|
|
3540
|
+
def new_empty(self, size, *, dtype=None, device=None):
|
|
4632
3541
|
r"""
|
|
4633
|
-
|
|
3542
|
+
Returns an uninitialized Tensor of `size`. Its dtype is specified by `dtype` and its
|
|
3543
|
+
device is specified by `device`.
|
|
4634
3544
|
|
|
4635
3545
|
.. warning::
|
|
4636
|
-
|
|
3546
|
+
This is an experimental API that is subject to change or deletion.
|
|
4637
3547
|
|
|
4638
3548
|
Args:
|
|
4639
|
-
size (Union[int,
|
|
4640
|
-
|
|
4641
|
-
|
|
3549
|
+
size (Union[tuple[int], list[int], int]): The specified shape of output tensor. Only positive integer or
|
|
3550
|
+
tuple or list containing positive integers are allowed.
|
|
3551
|
+
|
|
3552
|
+
Keyword Args:
|
|
3553
|
+
dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype = None`,
|
|
3554
|
+
the tensor will have the same dtype as `self`. Default ``None``.
|
|
3555
|
+
device (string, optional): The specified device of the output tensor. Support ``CPU`` and ``Ascend``. If
|
|
3556
|
+
`device = None`, the tensor will have the same device as `self` and if the device of `self` is not
|
|
3557
|
+
defined, the value set by :func:`mindspore.set_device` will be used. Default ``None``.
|
|
4642
3558
|
|
|
4643
3559
|
Returns:
|
|
4644
|
-
Tensor, the shape and
|
|
3560
|
+
Tensor, the shape, dtype and device is defined above but with uninitialized data (May be a random value).
|
|
4645
3561
|
|
|
4646
3562
|
Raises:
|
|
4647
|
-
TypeError: If `size` is neither an int nor
|
|
3563
|
+
TypeError: If `size` is neither an int nor a tuple or list of int.
|
|
4648
3564
|
|
|
4649
3565
|
Supported Platforms:
|
|
4650
|
-
``Ascend``
|
|
3566
|
+
``Ascend``
|
|
4651
3567
|
|
|
4652
3568
|
Examples:
|
|
4653
|
-
>>> import numpy as np
|
|
4654
3569
|
>>> import mindspore
|
|
4655
3570
|
>>> from mindspore import Tensor
|
|
4656
|
-
>>> x = Tensor(
|
|
4657
|
-
>>>
|
|
4658
|
-
>>> print(
|
|
4659
|
-
[[
|
|
4660
|
-
[
|
|
3571
|
+
>>> x = Tensor([[1, 2, 3], [4, 5, 6]])
|
|
3572
|
+
>>> output1 = x.new_empty((2, 3))
|
|
3573
|
+
>>> print(output1)
|
|
3574
|
+
[[0 0 0]
|
|
3575
|
+
[0 0 0]]
|
|
3576
|
+
>>> output2 = x.new_empty((2, 3), dtype=mindspore.float64)
|
|
3577
|
+
>>> print(output2)
|
|
3578
|
+
[[0. 0. 0.]
|
|
3579
|
+
[0. 0. 0.]]
|
|
4661
3580
|
"""
|
|
4662
|
-
return tensor_operator_registry.get('
|
|
3581
|
+
return tensor_operator_registry.get('new_empty')(self, size, dtype, device)
|
|
4663
3582
|
|
|
4664
3583
|
def sign(self):
|
|
4665
3584
|
r"""
|
|
@@ -4679,48 +3598,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4679
3598
|
"""
|
|
4680
3599
|
return tensor_operator_registry.get('sgn')(self)
|
|
4681
3600
|
|
|
4682
|
-
def sin(self):
|
|
4683
|
-
r"""
|
|
4684
|
-
For details, please refer to :func:`mindspore.ops.sin`.
|
|
4685
|
-
"""
|
|
4686
|
-
return tensor_operator_registry.get('sin')(self)
|
|
4687
|
-
|
|
4688
|
-
def sinc(self):
|
|
4689
|
-
r"""
|
|
4690
|
-
For details, please refer to :func:`mindspore.ops.sinc`.
|
|
4691
|
-
"""
|
|
4692
|
-
return tensor_operator_registry.get('sinc')(self)
|
|
4693
|
-
|
|
4694
|
-
def sinh(self):
|
|
4695
|
-
r"""
|
|
4696
|
-
For details, please refer to :func:`mindspore.ops.sinh`.
|
|
4697
|
-
"""
|
|
4698
|
-
return tensor_operator_registry.get('sinh')(self)
|
|
4699
|
-
|
|
4700
|
-
def sort(self, axis=-1, descending=False):
|
|
4701
|
-
r"""
|
|
4702
|
-
For details, please refer to :func:`mindspore.ops.sort`.
|
|
4703
|
-
"""
|
|
4704
|
-
return tensor_operator_registry.get('sort')(self, axis=axis, descending=descending)
|
|
4705
|
-
|
|
4706
|
-
def argsort(self, axis=-1, descending=False):
|
|
4707
|
-
"""
|
|
4708
|
-
For details, please refer to :func:`mindspore.ops.argsort`.
|
|
4709
|
-
"""
|
|
4710
|
-
return tensor_operator_registry.get('argsort')(self, axis, descending)
|
|
4711
|
-
|
|
4712
|
-
def trunc(self):
|
|
4713
|
-
r"""
|
|
4714
|
-
For details, please refer to :func:`mindspore.ops.trunc`.
|
|
4715
|
-
"""
|
|
4716
|
-
return tensor_operator_registry.get('trunc')(self)
|
|
4717
|
-
|
|
4718
|
-
def where(self, condition, y):
|
|
4719
|
-
r"""
|
|
4720
|
-
For details, please refer to :func:`mindspore.ops.where`.
|
|
4721
|
-
"""
|
|
4722
|
-
return tensor_operator_registry.get('where')(condition, self, y)
|
|
4723
|
-
|
|
4724
3601
|
def imag(self):
|
|
4725
3602
|
r"""
|
|
4726
3603
|
For details, please refer to :func:`mindspore.ops.imag`.
|
|
@@ -4729,13 +3606,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4729
3606
|
|
|
4730
3607
|
def quantile(self, q, axis=None, keepdims=False):
|
|
4731
3608
|
r"""
|
|
4732
|
-
|
|
3609
|
+
This interface is deprecated from version 2.4 and will be removed in a future version.
|
|
4733
3610
|
"""
|
|
4734
3611
|
return tensor_operator_registry.get('quantile')(self, q, axis, keepdims)
|
|
4735
3612
|
|
|
4736
3613
|
def nanquantile(self, q, axis=None, keepdims=False):
|
|
4737
3614
|
"""
|
|
4738
|
-
|
|
3615
|
+
This interface is deprecated from version 2.4 and will be removed in a future version.
|
|
4739
3616
|
"""
|
|
4740
3617
|
return tensor_operator_registry.get('nanquantile')(self, q, axis, keepdims)
|
|
4741
3618
|
|
|
@@ -4762,7 +3639,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4762
3639
|
|
|
4763
3640
|
def qr(self, some=True):
|
|
4764
3641
|
r"""
|
|
4765
|
-
|
|
3642
|
+
This interface is deprecated from version 2.4 and will be removed in a future version.
|
|
4766
3643
|
"""
|
|
4767
3644
|
validator.check_value_type('some', some, bool, 'Tensor.qr')
|
|
4768
3645
|
return tensor_operator_registry.get('qr')(self, 'reduced' if some else 'complete')
|
|
@@ -4776,7 +3653,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4776
3653
|
|
|
4777
3654
|
def masked_scatter(self, mask, x):
|
|
4778
3655
|
r"""
|
|
4779
|
-
|
|
3656
|
+
Updates the value in the "self Tensor" with the `tensor` value according to the mask, and returns a Tensor.
|
|
4780
3657
|
The shape of `mask` and the "self Tensor" must be the same or `mask` is broadcastable.
|
|
4781
3658
|
|
|
4782
3659
|
.. warning::
|
|
@@ -4816,35 +3693,39 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4816
3693
|
|
|
4817
3694
|
def index_put(self, indices, values, accumulate=False):
|
|
4818
3695
|
r"""
|
|
4819
|
-
|
|
4820
|
-
|
|
3696
|
+
Based on the indices in `indices`, replace the corresponding elements in Tensor `self`
|
|
3697
|
+
with the values in `values`. Outplace version of :func:`mindspore.Tensor.index_put_` 。
|
|
3698
|
+
|
|
3699
|
+
.. warning::
|
|
3700
|
+
The behavior is unpredictable in the following scenario:
|
|
3701
|
+
|
|
3702
|
+
- If `accumulate` is `False` and `indices` contains duplicate elements.
|
|
4821
3703
|
|
|
4822
3704
|
Args:
|
|
4823
|
-
indices (tuple[Tensor], list[Tensor]): the indices of type int32 or int64, used to index into the
|
|
4824
|
-
|
|
3705
|
+
indices (tuple[Tensor], list[Tensor]): the indices of type int32 or int64, used to index into the `self`.
|
|
3706
|
+
The rank of tensors in indices should be 1-D, size of indices should <= `self.rank`
|
|
4825
3707
|
and the tensors in indices should be broadcastable.
|
|
4826
|
-
values (Tensor): 1-D Tensor
|
|
4827
|
-
accumulate (bool): If `accumulate` is True
|
|
4828
|
-
|
|
3708
|
+
values (Tensor): 1-D Tensor with the same type as `self`. `values` should be broadcastable with size 1.
|
|
3709
|
+
accumulate (bool, optional): If `accumulate` is `True`, the elements in `values` will be added to `self`,
|
|
3710
|
+
otherwise the elements in `values` will replace the corresponding elements in the `self`.
|
|
4829
3711
|
Default: ``False``.
|
|
4830
3712
|
|
|
4831
3713
|
Returns:
|
|
4832
3714
|
Tensor, with the same type and shape as the "self Tensor".
|
|
4833
3715
|
|
|
4834
3716
|
Raises:
|
|
4835
|
-
TypeError: If the dtype of the
|
|
3717
|
+
TypeError: If the dtype of the `self` is not equal to the dtype of `values`.
|
|
4836
3718
|
TypeError: If the dtype of `indices` is not tuple[Tensor], list[Tensor].
|
|
4837
3719
|
TypeError: If the dtype of tensors in `indices` are not int32 or int64.
|
|
4838
3720
|
TypeError: If the dtype of tensors in `indices` are inconsistent.
|
|
4839
3721
|
TypeError: If the dtype of `accumulate` is not bool.
|
|
4840
3722
|
ValueError: If rank(`values`) is not 1-D.
|
|
4841
3723
|
ValueError: If size(`values`) is not 1 or max size of the tensors in `indices` when
|
|
4842
|
-
rank(
|
|
4843
|
-
ValueError: If size(`values`) is not 1 or
|
|
4844
|
-
rank("self Tensor") > size(`indices`).
|
|
3724
|
+
rank(`self`) == size(`indices`).
|
|
3725
|
+
ValueError: If size(`values`) is not 1 or `self`.shape[-1] when rank(`self`) > size(`indices`).
|
|
4845
3726
|
ValueError: If the rank of tensors in `indices` is not 1-D.
|
|
4846
3727
|
ValueError: If the tensors in `indices` is not be broadcastable.
|
|
4847
|
-
ValueError: If size(`indices`) > rank(
|
|
3728
|
+
ValueError: If size(`indices`) > rank(`self`).
|
|
4848
3729
|
|
|
4849
3730
|
Supported Platforms:
|
|
4850
3731
|
``Ascend`` ``CPU``
|
|
@@ -4866,6 +3747,60 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4866
3747
|
_index_put = tensor_operator_registry.get('index_put')(0 if accumulate is False else 1)
|
|
4867
3748
|
return _index_put(self, values, indices)
|
|
4868
3749
|
|
|
3750
|
+
def index_put_(self, indices, values, accumulate=False):
|
|
3751
|
+
r"""
|
|
3752
|
+
Based on the indices in `indices`, replace the corresponding elements in Tensor `self` with the values
|
|
3753
|
+
in `values`. The expression `Tensor.index_put_(indices, values)` is equivalent to `tensor[indices] = values`.
|
|
3754
|
+
Update and return `self`.
|
|
3755
|
+
|
|
3756
|
+
.. warning::
|
|
3757
|
+
The behavior is unpredictable in the following scenario:
|
|
3758
|
+
|
|
3759
|
+
- If `accumulate` is `False` and `indices` contains duplicate elements.
|
|
3760
|
+
|
|
3761
|
+
Args:
|
|
3762
|
+
indices (tuple[Tensor], list[Tensor]): the indices of type is bool, uint8, int32 or int64,
|
|
3763
|
+
used to index into the `self`. The size of indices should <= the rank of `self`
|
|
3764
|
+
and the tensors in indices should be broadcastable.
|
|
3765
|
+
values (Tensor): Tensor with the same type as `self`. If size == 1, it will be broadcastable.
|
|
3766
|
+
accumulate (bool, optional): If `accumulate` is `True`, the elements in `values` will be added to `self`,
|
|
3767
|
+
otherwise the elements in `values` will replace the corresponding elements in the `self`.
|
|
3768
|
+
Default: ``False``.
|
|
3769
|
+
|
|
3770
|
+
Returns:
|
|
3771
|
+
Tensor `self`.
|
|
3772
|
+
|
|
3773
|
+
Raises:
|
|
3774
|
+
TypeError: If the dtype of the `self` is not equal to the dtype of `values`.
|
|
3775
|
+
TypeError: If the dtype of `indices` is not tuple[Tensor], list[Tensor].
|
|
3776
|
+
TypeError: If the dtype of tensors in `indices` are not bool, uint8, int32 or int64.
|
|
3777
|
+
TypeError: If the dtypes of tensors in `indices` are inconsistent.
|
|
3778
|
+
TypeError: If the dtype of `accumulate` is not bool.
|
|
3779
|
+
ValueError: If size(`values`) is not 1 or max size of the tensors in `indices` when
|
|
3780
|
+
rank(`self`) == size(`indices`).
|
|
3781
|
+
ValueError: If size(`values`) is not 1 or `self`.shape[-1] when rank(`self`) > size(`indices`).
|
|
3782
|
+
ValueError: If the tensors in `indices` is not be broadcastable.
|
|
3783
|
+
ValueError: If size(`indices`) > rank(`self`).
|
|
3784
|
+
|
|
3785
|
+
Supported Platforms:
|
|
3786
|
+
``Ascend``
|
|
3787
|
+
|
|
3788
|
+
Examples:
|
|
3789
|
+
>>> import numpy as np
|
|
3790
|
+
>>> import mindspore
|
|
3791
|
+
>>> from mindspore import Tensor
|
|
3792
|
+
>>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32))
|
|
3793
|
+
>>> values = Tensor(np.array([3]).astype(np.int32))
|
|
3794
|
+
>>> indices = [Tensor(np.array([0, 1, 1]).astype(np.int32)), Tensor(np.array([1, 2, 1]).astype(np.int32))]
|
|
3795
|
+
>>> accumulate = True
|
|
3796
|
+
>>> x.index_put_(indices, values, accumulate)
|
|
3797
|
+
>>> print(x)
|
|
3798
|
+
[[1 5 3]
|
|
3799
|
+
[4 8 9]]
|
|
3800
|
+
"""
|
|
3801
|
+
index_put_ = tensor_operator_registry.get('index_put_')
|
|
3802
|
+
return index_put_(self, indices, values, accumulate)
|
|
3803
|
+
|
|
4869
3804
|
def move_to(self, to, blocking=True):
|
|
4870
3805
|
r"""
|
|
4871
3806
|
Copy Tensor to target device synchronously or asynchronously, default synchronously. only support PyNative mode.
|
|
@@ -4899,7 +3834,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4899
3834
|
mode = context.get_context("mode")
|
|
4900
3835
|
if mode != context.PYNATIVE_MODE:
|
|
4901
3836
|
raise ValueError(f"The method of 'move_to' only supported in pynative mode, but got: {mode}.")
|
|
4902
|
-
return
|
|
3837
|
+
return TensorPy_.move_to(self, to, blocking)
|
|
4903
3838
|
|
|
4904
3839
|
def _offload(self):
|
|
4905
3840
|
r"""
|
|
@@ -4914,9 +3849,71 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4914
3849
|
>>> x = ms.Tensor([1, 2, 3], ms.int64)
|
|
4915
3850
|
>>> x._offload()
|
|
4916
3851
|
"""
|
|
4917
|
-
return
|
|
3852
|
+
return TensorPy_._offload(self, False)
|
|
3853
|
+
|
|
3854
|
+
def _data_ptr(self):
|
|
3855
|
+
r"""
|
|
3856
|
+
Get the data ptr address of tensor, for CPU is host address, GPU/NPU is device address.
|
|
3857
|
+
User should know how to use the data ptr address.
|
|
3858
|
+
Note: this api is an experimental api, users need understatnd it before use.
|
|
3859
|
+
|
|
3860
|
+
Supported Platforms:
|
|
3861
|
+
``CPU/GPU/Ascend``
|
|
3862
|
+
|
|
3863
|
+
Examples:
|
|
3864
|
+
>>> import mindspore as ms
|
|
3865
|
+
>>> from mindspore import Tensor
|
|
3866
|
+
>>> x = ms.Tensor([1, 2, 3], ms.int64)
|
|
3867
|
+
>>> data_ptr = x._data_ptr()
|
|
3868
|
+
"""
|
|
3869
|
+
return TensorPy_._data_ptr(self)
|
|
3870
|
+
|
|
3871
|
+
def normal_(self, mean=0, std=1, *, generator=None):
|
|
3872
|
+
r"""
|
|
3873
|
+
Update the `self` tensor in place by generating random numbers sampled from the normal
|
|
3874
|
+
distribution which constructed by the parameters `mean` and `std`.
|
|
3875
|
+
|
|
3876
|
+
.. warning::
|
|
3877
|
+
This is an experimental API that is subject to change or deletion.
|
|
3878
|
+
|
|
3879
|
+
Args:
|
|
3880
|
+
mean (number, optional): the mean of normal distribution. With float data type.
|
|
3881
|
+
Default: ``0``.
|
|
3882
|
+
std (number, optional): the std of normal distribution. With float data type.
|
|
3883
|
+
Default: ``1``.
|
|
3884
|
+
|
|
3885
|
+
Keyword Args:
|
|
3886
|
+
generator (:class:`mindspore.Generator`, optional): a pseudorandom number generator.
|
|
3887
|
+
Default: ``None``, uses the default pseudorandom number generator.
|
|
3888
|
+
|
|
3889
|
+
Returns:
|
|
3890
|
+
A tensor that is filled with random numbers that follow a normal distribution and
|
|
3891
|
+
that has the same type and shape as the `self` tensor.
|
|
3892
|
+
|
|
3893
|
+
Raises:
|
|
3894
|
+
TypeError: If the dtype of `mean` or `std` is not one of: bool, int, float, complex.
|
|
3895
|
+
|
|
3896
|
+
Supported Platforms:
|
|
3897
|
+
``Ascend``
|
|
3898
|
+
|
|
3899
|
+
Examples:
|
|
3900
|
+
>>> import mindspore
|
|
3901
|
+
>>> import numpy as np
|
|
3902
|
+
>>> x = mindspore.Tensor(np.array([[1, 2], [3, 4]]), dtype=mindspore.float32)
|
|
3903
|
+
>>> output = x.normal_()
|
|
3904
|
+
>>> print(output)
|
|
3905
|
+
[[0.2788825 1.3305743]
|
|
3906
|
+
[1.244194 1.16303174]]
|
|
3907
|
+
"""
|
|
3908
|
+
return tensor_operator_registry.get('normal_')(self, mean=mean, std=std, generator=generator)
|
|
4918
3909
|
|
|
4919
3910
|
|
|
3911
|
+
def triangular_solve(self, A, upper=True, transpose=False, unitriangular=False):
|
|
3912
|
+
r"""
|
|
3913
|
+
For details, please refer to :func:`mindspore.mint.triangular_solve`.
|
|
3914
|
+
"""
|
|
3915
|
+
return tensor_operator_registry.get('triangular_solve')(self, A, upper, transpose, unitriangular)
|
|
3916
|
+
|
|
4920
3917
|
def _vm_compare(*args):
|
|
4921
3918
|
"""Implement `vm_compare` for tensor."""
|
|
4922
3919
|
if args:
|
|
@@ -4926,12 +3923,16 @@ def _vm_compare(*args):
|
|
|
4926
3923
|
if obj_str == "shape":
|
|
4927
3924
|
fn = getattr(args[0].asnumpy(), obj_str)
|
|
4928
3925
|
return fn
|
|
4929
|
-
if obj_str == "
|
|
4930
|
-
fn = getattr(args[0].asnumpy(),
|
|
3926
|
+
if obj_str == "_tensor_setitem" or obj_str == "_tensor_setitem_origin":
|
|
3927
|
+
fn = getattr(args[0].asnumpy(), "__setitem__")
|
|
4931
3928
|
index = args[1].asnumpy() if isinstance(args[1], Tensor) else args[1]
|
|
4932
3929
|
value = args[2].asnumpy() if isinstance(args[2], Tensor) else args[2]
|
|
4933
3930
|
fn(index, value)
|
|
4934
3931
|
return args[0]
|
|
3932
|
+
if obj_str == "_tensor_getitem" or obj_str == "_tensor_getitem_origin":
|
|
3933
|
+
fn = getattr(args[0].asnumpy(), "__getitem__")
|
|
3934
|
+
index = args[1].asnumpy() if isinstance(args[1], Tensor) else args[1]
|
|
3935
|
+
return Tensor(np.array(fn(index)))
|
|
4935
3936
|
if len(args) == 2:
|
|
4936
3937
|
fn = getattr(args[0].asnumpy(), obj_str)
|
|
4937
3938
|
return Tensor(fn())
|
|
@@ -4955,20 +3956,13 @@ def _check_tensor_input(input_data=None, dtype=None, shape=None, init=None):
|
|
|
4955
3956
|
raise ValueError("init, dtype and shape must have values at the same time.")
|
|
4956
3957
|
|
|
4957
3958
|
if input_data is not None:
|
|
4958
|
-
if isinstance(input_data, np.ndarray) and input_data.ndim >= 1 and input_data.size == 0:
|
|
4959
|
-
raise ValueError("input_data can not contain zero dimension.")
|
|
4960
3959
|
if isinstance(input_data, (tuple, list)):
|
|
4961
3960
|
try:
|
|
4962
|
-
|
|
3961
|
+
_ = np.array(input_data)
|
|
4963
3962
|
except ValueError as e:
|
|
4964
3963
|
if "The requested array has an inhomogeneous shape" in str(e):
|
|
4965
3964
|
raise TypeError(f"For Tensor, the input_data is {input_data} that contain unsupported element.")
|
|
4966
3965
|
raise
|
|
4967
|
-
if np_data.ndim >= 1 and np_data.size == 0:
|
|
4968
|
-
raise ValueError("input_data can not contain zero dimension.")
|
|
4969
|
-
|
|
4970
|
-
if shape is not None and not (hasattr(init, "__enable_zero_dim__") and init.__enable_zero_dim__) and 0 in shape:
|
|
4971
|
-
raise ValueError("Shape can not contain zero value.")
|
|
4972
3966
|
|
|
4973
3967
|
|
|
4974
3968
|
def _check_tensor_dynamic_shape(dtype=None, shape=None, init=None):
|