mindspore 2.2.14__cp39-cp39-win_amd64.whl → 2.4.0__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +8 -5
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +124 -25
- mindspore/_extends/builtin_operations.py +2 -1
- mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
- mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
- mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
- mindspore/_extends/parse/__init__.py +18 -14
- mindspore/_extends/parse/compile_config.py +299 -0
- mindspore/_extends/parse/namespace.py +2 -2
- mindspore/_extends/parse/parser.py +182 -68
- mindspore/_extends/parse/resources.py +45 -14
- mindspore/_extends/parse/standard_method.py +192 -252
- mindspore/{ops/_op_impl/tbe/atomic_addr_clean.py → _extends/pijit/__init__.py} +6 -16
- mindspore/_extends/pijit/pijit_func_white_list.py +669 -0
- mindspore/_extends/remote/kernel_build_server.py +2 -0
- mindspore/_profiler.py +30 -0
- mindspore/amp.py +67 -26
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/base.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +2 -2
- mindspore/boost/grad_freeze.py +2 -2
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/boost/less_batch_normalization.py +9 -6
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +20 -7
- mindspore/common/_jit_fallback_utils.py +2 -3
- mindspore/common/_pijit_context.py +190 -0
- mindspore/common/_register_for_adapter.py +7 -0
- mindspore/common/_register_for_recompute.py +48 -0
- mindspore/common/_register_for_tensor.py +10 -10
- mindspore/common/_stub_tensor.py +7 -1
- mindspore/common/_tensor_overload.py +139 -0
- mindspore/common/_utils.py +5 -17
- mindspore/common/api.py +449 -129
- mindspore/common/auto_dynamic_shape.py +27 -14
- mindspore/common/dtype.py +17 -10
- mindspore/common/dump.py +8 -11
- mindspore/common/file_system.py +48 -0
- mindspore/common/generator.py +254 -0
- mindspore/common/hook_handle.py +65 -30
- mindspore/common/initializer.py +1 -1
- mindspore/common/jit_config.py +34 -14
- mindspore/common/lazy_inline.py +72 -19
- mindspore/common/mindir_util.py +12 -2
- mindspore/common/mutable.py +79 -14
- mindspore/common/no_inline.py +54 -0
- mindspore/common/np_dtype.py +25 -0
- mindspore/common/parameter.py +73 -21
- mindspore/common/recompute.py +292 -0
- mindspore/common/seed.py +9 -9
- mindspore/common/sparse_tensor.py +276 -24
- mindspore/common/symbol.py +122 -0
- mindspore/common/tensor.py +668 -514
- mindspore/communication/__init__.py +6 -11
- mindspore/communication/_comm_helper.py +43 -3
- mindspore/communication/comm_func.py +1395 -0
- mindspore/communication/management.py +117 -104
- mindspore/config/op_info.config +22 -54
- mindspore/context.py +455 -71
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +6 -6
- mindspore/dataset/audio/transforms.py +711 -158
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/core/config.py +7 -0
- mindspore/dataset/core/validator_helpers.py +7 -0
- mindspore/dataset/engine/cache_client.py +2 -2
- mindspore/dataset/engine/datasets.py +201 -116
- mindspore/dataset/engine/datasets_audio.py +14 -14
- mindspore/dataset/engine/datasets_standard_format.py +83 -3
- mindspore/dataset/engine/datasets_text.py +39 -39
- mindspore/dataset/engine/datasets_user_defined.py +230 -141
- mindspore/dataset/engine/datasets_vision.py +78 -74
- mindspore/dataset/engine/iterators.py +29 -0
- mindspore/dataset/engine/obs/util.py +7 -0
- mindspore/dataset/engine/offload.py +5 -7
- mindspore/dataset/engine/queue.py +138 -66
- mindspore/dataset/engine/serializer_deserializer.py +2 -2
- mindspore/dataset/engine/validators.py +41 -15
- mindspore/dataset/text/__init__.py +2 -5
- mindspore/dataset/text/transforms.py +408 -121
- mindspore/dataset/text/utils.py +9 -9
- mindspore/dataset/transforms/__init__.py +0 -3
- mindspore/dataset/transforms/transforms.py +261 -76
- mindspore/dataset/utils/browse_dataset.py +9 -9
- mindspore/dataset/utils/line_reader.py +2 -0
- mindspore/dataset/vision/__init__.py +7 -10
- mindspore/dataset/vision/c_transforms.py +10 -10
- mindspore/dataset/vision/py_transforms_util.py +1 -1
- mindspore/dataset/vision/transforms.py +2844 -549
- mindspore/dataset/vision/utils.py +161 -10
- mindspore/dataset/vision/validators.py +16 -3
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/{rewrite/ast_creator_register.py → experimental/es/__init__.py} +5 -20
- mindspore/experimental/es/embedding_service.py +883 -0
- mindspore/experimental/es/embedding_service_layer.py +581 -0
- mindspore/experimental/llm_boost/__init__.py +21 -0
- mindspore/experimental/llm_boost/atb/__init__.py +23 -0
- mindspore/experimental/llm_boost/atb/boost_base.py +211 -0
- mindspore/experimental/llm_boost/atb/llama_boost.py +115 -0
- mindspore/experimental/llm_boost/atb/qwen_boost.py +101 -0
- mindspore/experimental/llm_boost/register.py +129 -0
- mindspore/experimental/llm_boost/utils.py +31 -0
- mindspore/experimental/optim/__init__.py +12 -2
- mindspore/experimental/optim/adadelta.py +161 -0
- mindspore/experimental/optim/adagrad.py +168 -0
- mindspore/experimental/optim/adam.py +35 -34
- mindspore/experimental/optim/adamax.py +170 -0
- mindspore/experimental/optim/adamw.py +124 -15
- mindspore/experimental/optim/asgd.py +153 -0
- mindspore/experimental/optim/lr_scheduler.py +66 -121
- mindspore/experimental/optim/nadam.py +157 -0
- mindspore/experimental/optim/optimizer.py +18 -8
- mindspore/experimental/optim/radam.py +194 -0
- mindspore/experimental/optim/rmsprop.py +154 -0
- mindspore/experimental/optim/rprop.py +164 -0
- mindspore/experimental/optim/sgd.py +28 -19
- mindspore/hal/__init__.py +40 -0
- mindspore/hal/_ascend.py +57 -0
- mindspore/hal/_base.py +57 -0
- mindspore/hal/_cpu.py +56 -0
- mindspore/hal/_gpu.py +57 -0
- mindspore/hal/contiguous_tensors_handle.py +175 -0
- mindspore/hal/device.py +356 -0
- mindspore/hal/event.py +179 -0
- mindspore/hal/memory.py +326 -0
- mindspore/hal/stream.py +357 -0
- mindspore/include/api/data_type.h +2 -2
- mindspore/include/api/dual_abi_helper.h +16 -3
- mindspore/include/api/model.h +4 -3
- mindspore/include/api/model_group.h +13 -1
- mindspore/include/api/status.h +14 -0
- mindspore/include/api/types.h +10 -10
- mindspore/include/c_api/model_c.h +173 -0
- mindspore/include/c_api/types_c.h +19 -0
- mindspore/include/dataset/config.h +2 -2
- mindspore/include/dataset/constants.h +2 -2
- mindspore/include/dataset/execute.h +3 -5
- mindspore/include/dataset/vision.h +58 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +3 -3
- mindspore/mindrecord/__init__.py +5 -1
- mindspore/mindrecord/config.py +809 -0
- mindspore/mindrecord/filereader.py +25 -0
- mindspore/mindrecord/filewriter.py +138 -103
- mindspore/mindrecord/mindpage.py +40 -6
- mindspore/mindrecord/shardutils.py +3 -2
- mindspore/mindrecord/shardwriter.py +7 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +8 -13
- mindspore/mindrecord/tools/cifar10_to_mr.py +9 -15
- mindspore/mindrecord/tools/csv_to_mr.py +4 -9
- mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
- mindspore/mindrecord/tools/mnist_to_mr.py +7 -12
- mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -6
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mint/__init__.py +1586 -0
- mindspore/mint/distributed/__init__.py +31 -0
- mindspore/mint/distributed/distributed.py +254 -0
- mindspore/{rewrite/ast_transformers → mint/linalg}/__init__.py +9 -4
- mindspore/mint/nn/__init__.py +757 -0
- mindspore/mint/nn/functional.py +679 -0
- mindspore/mint/nn/layer/__init__.py +39 -0
- mindspore/mint/nn/layer/activation.py +133 -0
- mindspore/mint/nn/layer/normalization.py +477 -0
- mindspore/mint/nn/layer/pooling.py +110 -0
- mindspore/mint/optim/__init__.py +24 -0
- mindspore/mint/optim/adamw.py +206 -0
- mindspore/mint/special/__init__.py +63 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/multiprocessing/__init__.py +73 -0
- mindspore/nn/cell.py +461 -323
- mindspore/nn/dynamic_lr.py +2 -2
- mindspore/nn/layer/activation.py +292 -135
- mindspore/nn/layer/basic.py +288 -83
- mindspore/nn/layer/channel_shuffle.py +3 -16
- mindspore/nn/layer/container.py +3 -3
- mindspore/nn/layer/conv.py +75 -66
- mindspore/nn/layer/embedding.py +221 -45
- mindspore/nn/layer/image.py +4 -7
- mindspore/nn/layer/math.py +1 -1
- mindspore/nn/layer/normalization.py +150 -68
- mindspore/nn/layer/padding.py +64 -87
- mindspore/nn/layer/pooling.py +175 -12
- mindspore/nn/layer/rnn_cells.py +6 -16
- mindspore/nn/layer/rnns.py +6 -5
- mindspore/nn/layer/thor_layer.py +1 -2
- mindspore/nn/layer/timedistributed.py +1 -1
- mindspore/nn/layer/transformer.py +55 -53
- mindspore/nn/learning_rate_schedule.py +6 -5
- mindspore/nn/loss/__init__.py +2 -2
- mindspore/nn/loss/loss.py +145 -88
- mindspore/nn/optim/__init__.py +2 -1
- mindspore/nn/optim/ada_grad.py +4 -2
- mindspore/nn/optim/adadelta.py +4 -2
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +102 -181
- mindspore/nn/optim/adamax.py +4 -2
- mindspore/nn/optim/adasum.py +3 -3
- mindspore/nn/optim/asgd.py +4 -2
- mindspore/nn/optim/ftrl.py +31 -61
- mindspore/nn/optim/lamb.py +5 -3
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +6 -4
- mindspore/nn/optim/momentum.py +13 -25
- mindspore/nn/optim/optimizer.py +6 -3
- mindspore/nn/optim/proximal_ada_grad.py +4 -2
- mindspore/nn/optim/rmsprop.py +9 -3
- mindspore/nn/optim/rprop.py +4 -2
- mindspore/nn/optim/sgd.py +5 -3
- mindspore/nn/optim/tft_wrapper.py +127 -0
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
- mindspore/nn/probability/distribution/beta.py +2 -2
- mindspore/nn/probability/distribution/categorical.py +4 -6
- mindspore/nn/probability/distribution/cauchy.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -2
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/logistic.py +1 -1
- mindspore/nn/probability/distribution/poisson.py +2 -2
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +13 -1
- mindspore/nn/wrap/__init__.py +2 -1
- mindspore/nn/wrap/cell_wrapper.py +46 -12
- mindspore/nn/wrap/grad_reducer.py +148 -8
- mindspore/nn/wrap/loss_scale.py +44 -7
- mindspore/numpy/__init__.py +2 -0
- mindspore/numpy/array_creations.py +67 -68
- mindspore/numpy/array_ops.py +70 -66
- mindspore/numpy/dtypes.py +3 -3
- mindspore/numpy/fft.py +966 -0
- mindspore/numpy/logic_ops.py +11 -10
- mindspore/numpy/math_ops.py +147 -152
- mindspore/numpy/utils.py +3 -0
- mindspore/numpy/utils_const.py +4 -4
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +9 -6
- mindspore/ops/_grad_experimental/grad_array_ops.py +4 -129
- mindspore/ops/_grad_experimental/grad_comm_ops.py +135 -36
- mindspore/ops/_grad_experimental/grad_math_ops.py +61 -298
- mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
- mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
- mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
- mindspore/ops/_op_impl/__init__.py +0 -1
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
- mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +1 -1
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
- mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -3
- mindspore/ops/_op_impl/cpu/adam.py +2 -2
- mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
- mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
- mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
- mindspore/ops/_vmap/vmap_array_ops.py +162 -101
- mindspore/ops/_vmap/vmap_base.py +8 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +143 -58
- mindspore/ops/_vmap/vmap_image_ops.py +70 -13
- mindspore/ops/_vmap/vmap_math_ops.py +147 -59
- mindspore/ops/_vmap/vmap_nn_ops.py +292 -117
- mindspore/ops/_vmap/vmap_other_ops.py +1 -1
- mindspore/ops/auto_generate/__init__.py +31 -0
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +309 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +252 -0
- mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
- mindspore/ops/auto_generate/gen_extend_func.py +1701 -0
- mindspore/ops/auto_generate/gen_ops_def.py +8482 -0
- mindspore/ops/auto_generate/gen_ops_prim.py +16704 -0
- mindspore/ops/auto_generate/pyboost_inner_prim.py +549 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +201 -66
- mindspore/ops/composite/math_ops.py +10 -49
- mindspore/ops/composite/multitype_ops/_compile_utils.py +192 -618
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +25 -134
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +8 -3
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
- mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
- mindspore/ops/deprecated.py +14 -3
- mindspore/ops/function/__init__.py +53 -11
- mindspore/ops/function/array_func.py +1269 -1821
- mindspore/ops/function/clip_func.py +19 -31
- mindspore/ops/function/debug_func.py +114 -5
- mindspore/ops/function/fft_func.py +44 -0
- mindspore/ops/function/grad/grad_func.py +30 -22
- mindspore/ops/function/image_func.py +27 -21
- mindspore/ops/function/linalg_func.py +35 -68
- mindspore/ops/function/math_func.py +1170 -2697
- mindspore/ops/function/nn_func.py +2116 -1128
- mindspore/ops/function/other_func.py +8 -8
- mindspore/ops/function/parameter_func.py +5 -93
- mindspore/ops/function/random_func.py +435 -113
- mindspore/ops/function/reshard_func.py +104 -0
- mindspore/ops/function/sparse_func.py +4 -4
- mindspore/ops/function/sparse_unary_func.py +9 -16
- mindspore/ops/function/spectral_func.py +1 -1
- mindspore/ops/function/vmap_func.py +16 -15
- mindspore/ops/functional.py +355 -346
- mindspore/ops/op_info_register.py +18 -45
- mindspore/ops/operations/__init__.py +38 -24
- mindspore/ops/operations/_grad_ops.py +21 -927
- mindspore/ops/operations/_infer_ops.py +19 -0
- mindspore/ops/operations/_inner_ops.py +173 -607
- mindspore/ops/operations/_rl_inner_ops.py +2 -2
- mindspore/ops/operations/_scalar_ops.py +5 -480
- mindspore/ops/operations/_sequence_ops.py +6 -36
- mindspore/ops/operations/_tensor_array.py +8 -8
- mindspore/ops/operations/array_ops.py +106 -2837
- mindspore/ops/operations/comm_ops.py +799 -127
- mindspore/ops/operations/custom_ops.py +124 -119
- mindspore/ops/operations/debug_ops.py +142 -41
- mindspore/ops/operations/image_ops.py +1 -217
- mindspore/ops/operations/inner_ops.py +5 -40
- mindspore/ops/operations/linalg_ops.py +1 -49
- mindspore/ops/operations/manually_defined/__init__.py +24 -0
- mindspore/ops/operations/manually_defined/_inner.py +73 -0
- mindspore/ops/operations/manually_defined/ops_def.py +2271 -0
- mindspore/ops/operations/math_ops.py +666 -4972
- mindspore/ops/operations/nn_ops.py +205 -2213
- mindspore/ops/operations/other_ops.py +60 -49
- mindspore/ops/operations/random_ops.py +50 -54
- mindspore/ops/operations/reshard_ops.py +53 -0
- mindspore/ops/operations/sparse_ops.py +4 -4
- mindspore/ops/primitive.py +216 -103
- mindspore/ops_generate/__init__.py +27 -0
- mindspore/ops_generate/arg_dtype_cast.py +252 -0
- mindspore/ops_generate/arg_handler.py +197 -0
- mindspore/ops_generate/gen_aclnn_implement.py +263 -0
- mindspore/ops_generate/gen_constants.py +36 -0
- mindspore/ops_generate/gen_ops.py +1099 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
- mindspore/ops_generate/gen_pyboost_func.py +1052 -0
- mindspore/ops_generate/gen_utils.py +209 -0
- mindspore/ops_generate/op_proto.py +145 -0
- mindspore/ops_generate/pyboost_utils.py +367 -0
- mindspore/ops_generate/template.py +261 -0
- mindspore/parallel/__init__.py +8 -4
- mindspore/parallel/_auto_parallel_context.py +100 -10
- mindspore/parallel/_cell_wrapper.py +99 -9
- mindspore/parallel/_cost_model_context.py +1 -1
- mindspore/parallel/_dp_allreduce_fusion.py +159 -159
- mindspore/parallel/_parallel_serialization.py +67 -23
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +99 -22
- mindspore/parallel/_transformer/__init__.py +1 -1
- mindspore/parallel/_transformer/layers.py +1 -1
- mindspore/parallel/_transformer/loss.py +1 -1
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/op_parallel_config.py +1 -1
- mindspore/parallel/_transformer/transformer.py +2 -2
- mindspore/parallel/_utils.py +173 -6
- mindspore/parallel/algo_parameter_config.py +8 -10
- mindspore/parallel/checkpoint_transform.py +204 -38
- mindspore/parallel/cluster/__init__.py +15 -0
- mindspore/parallel/cluster/process_entity/__init__.py +18 -0
- mindspore/parallel/cluster/process_entity/_api.py +352 -0
- mindspore/parallel/cluster/process_entity/_utils.py +101 -0
- mindspore/parallel/cluster/run.py +136 -0
- mindspore/parallel/mpi/__init__.py +1 -1
- mindspore/parallel/mpi/_mpi_config.py +1 -1
- mindspore/parallel/parameter_broadcast.py +151 -0
- mindspore/parallel/shard.py +279 -37
- mindspore/parallel/transform_safetensors.py +993 -0
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +4 -2
- mindspore/profiler/common/constant.py +29 -0
- mindspore/profiler/common/process_pool.py +41 -0
- mindspore/profiler/common/registry.py +47 -0
- mindspore/profiler/common/singleton.py +28 -0
- mindspore/profiler/common/util.py +153 -0
- mindspore/profiler/dynamic_profiler.py +694 -0
- mindspore/profiler/envprofiling.py +18 -20
- mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +71 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +180 -0
- mindspore/profiler/parser/ascend_analysis/function_event.py +185 -0
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +136 -0
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +131 -0
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +104 -0
- mindspore/profiler/parser/ascend_analysis/path_manager.py +313 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +123 -0
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +75 -0
- mindspore/profiler/parser/ascend_cluster_generator.py +14 -9
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -1
- mindspore/profiler/parser/ascend_flops_generator.py +20 -4
- mindspore/profiler/parser/ascend_hccl_generator.py +29 -278
- mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
- mindspore/profiler/parser/ascend_memory_generator.py +185 -0
- mindspore/profiler/parser/ascend_msprof_exporter.py +148 -146
- mindspore/profiler/parser/ascend_msprof_generator.py +73 -283
- mindspore/profiler/parser/ascend_op_generator.py +92 -42
- mindspore/profiler/parser/ascend_timeline_generator.py +298 -133
- mindspore/profiler/parser/base_timeline_generator.py +25 -25
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +25 -12
- mindspore/profiler/parser/framework_parser.py +4 -393
- mindspore/profiler/parser/gpu_analysis/__init__.py +14 -0
- mindspore/profiler/parser/gpu_analysis/function_event.py +44 -0
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +89 -0
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +72 -0
- mindspore/profiler/parser/integrator.py +3 -1
- mindspore/profiler/parser/memory_usage_parser.py +0 -154
- mindspore/profiler/parser/minddata_parser.py +72 -3
- mindspore/profiler/parser/profiler_info.py +94 -7
- mindspore/profiler/profiler.py +153 -0
- mindspore/profiler/profiling.py +631 -508
- mindspore/rewrite/__init__.py +2 -14
- mindspore/rewrite/api/node.py +122 -36
- mindspore/rewrite/api/pattern_engine.py +2 -3
- mindspore/rewrite/api/scoped_value.py +16 -15
- mindspore/rewrite/api/symbol_tree.py +45 -29
- mindspore/rewrite/ast_helpers/__init__.py +3 -6
- mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
- mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
- mindspore/rewrite/common/__init__.py +1 -2
- mindspore/rewrite/common/config.py +24 -0
- mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
- mindspore/rewrite/{namer.py → common/namer.py} +63 -18
- mindspore/rewrite/common/namespace.py +118 -0
- mindspore/rewrite/node/__init__.py +5 -5
- mindspore/rewrite/node/call_function.py +23 -7
- mindspore/rewrite/node/cell_container.py +7 -3
- mindspore/rewrite/node/control_flow.py +53 -28
- mindspore/rewrite/node/node.py +212 -196
- mindspore/rewrite/node/node_manager.py +51 -22
- mindspore/rewrite/node/node_topological_manager.py +3 -23
- mindspore/rewrite/parsers/__init__.py +12 -0
- mindspore/rewrite/parsers/arguments_parser.py +8 -9
- mindspore/rewrite/parsers/assign_parser.py +637 -413
- mindspore/rewrite/parsers/attribute_parser.py +3 -4
- mindspore/rewrite/parsers/class_def_parser.py +115 -148
- mindspore/rewrite/parsers/constant_parser.py +5 -5
- mindspore/rewrite/parsers/container_parser.py +4 -6
- mindspore/rewrite/parsers/expr_parser.py +55 -0
- mindspore/rewrite/parsers/for_parser.py +31 -98
- mindspore/rewrite/parsers/function_def_parser.py +13 -5
- mindspore/rewrite/parsers/if_parser.py +28 -10
- mindspore/rewrite/parsers/module_parser.py +8 -182
- mindspore/rewrite/parsers/parser.py +1 -5
- mindspore/rewrite/parsers/parser_register.py +1 -1
- mindspore/rewrite/parsers/return_parser.py +5 -10
- mindspore/rewrite/parsers/while_parser.py +59 -0
- mindspore/rewrite/sparsify/utils.py +1 -1
- mindspore/rewrite/symbol_tree/__init__.py +20 -0
- mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +705 -186
- mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
- mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
- mindspore/run_check/_check_version.py +40 -115
- mindspore/run_check/run_check.py +1 -1
- mindspore/safeguard/rewrite_obfuscation.py +597 -263
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +7 -5
- mindspore/train/_utils.py +204 -4
- mindspore/train/amp.py +335 -295
- mindspore/train/anf_ir_pb2.py +14 -2
- mindspore/train/callback/__init__.py +5 -2
- mindspore/train/callback/_backup_and_restore.py +5 -5
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +220 -43
- mindspore/train/callback/_cluster_monitor.py +201 -0
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_flops_collector.py +239 -0
- mindspore/train/callback/_landscape.py +15 -9
- mindspore/train/callback/_loss_monitor.py +5 -5
- mindspore/train/callback/_on_request_exit.py +136 -33
- mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
- mindspore/train/callback/_summary_collector.py +12 -12
- mindspore/train/callback/_tft_register.py +352 -0
- mindspore/train/callback/_time_monitor.py +3 -3
- mindspore/train/data_sink.py +6 -5
- mindspore/train/dataset_helper.py +66 -23
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/accuracy.py +7 -7
- mindspore/train/metrics/confusion_matrix.py +8 -6
- mindspore/train/metrics/cosine_similarity.py +6 -4
- mindspore/train/metrics/error.py +2 -2
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/metrics/perplexity.py +2 -1
- mindspore/train/metrics/roc.py +4 -4
- mindspore/train/metrics/topk.py +2 -2
- mindspore/train/mind_ir_pb2.py +116 -37
- mindspore/train/model.py +382 -76
- mindspore/train/serialization.py +787 -288
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +51 -28
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +21 -0
- mindspore/utils/utils.py +60 -0
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.2.14.dist-info → mindspore-2.4.0.dist-info}/METADATA +8 -4
- mindspore-2.4.0.dist-info/RECORD +1406 -0
- {mindspore-2.2.14.dist-info → mindspore-2.4.0.dist-info}/entry_points.txt +1 -0
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
- mindspore/gen_ops.py +0 -273
- mindspore/include/c_api/ms/abstract.h +0 -67
- mindspore/include/c_api/ms/attribute.h +0 -197
- mindspore/include/c_api/ms/base/handle_types.h +0 -43
- mindspore/include/c_api/ms/base/macros.h +0 -32
- mindspore/include/c_api/ms/base/status.h +0 -33
- mindspore/include/c_api/ms/base/types.h +0 -282
- mindspore/include/c_api/ms/context.h +0 -102
- mindspore/include/c_api/ms/graph.h +0 -160
- mindspore/include/c_api/ms/node.h +0 -606
- mindspore/include/c_api/ms/tensor.h +0 -161
- mindspore/include/c_api/ms/value.h +0 -84
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/nn/layer/flash_attention.py +0 -189
- mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +0 -93
- mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +0 -66
- mindspore/ops/_op_impl/cpu/concat.py +0 -39
- mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
- mindspore/ops/_op_impl/tbe/__init__.py +0 -47
- mindspore/ops/_op_impl/tbe/abs.py +0 -38
- mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/acos.py +0 -37
- mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/acosh.py +0 -37
- mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
- mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
- mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
- mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
- mindspore/ops/_op_impl/tbe/add.py +0 -42
- mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/add_n.py +0 -39
- mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
- mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
- mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
- mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
- mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
- mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
- mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
- mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/asin.py +0 -37
- mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/asinh.py +0 -37
- mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/assign.py +0 -79
- mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
- mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
- mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/atan.py +0 -37
- mindspore/ops/_op_impl/tbe/atan2.py +0 -38
- mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/atanh.py +0 -37
- mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
- mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
- mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
- mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
- mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
- mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
- mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
- mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
- mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
- mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
- mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cast.py +0 -55
- mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/cdist.py +0 -38
- mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/ceil.py +0 -37
- mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/celu.py +0 -39
- mindspore/ops/_op_impl/tbe/centralization.py +0 -39
- mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
- mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/concat.py +0 -40
- mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
- mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
- mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
- mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
- mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
- mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/cos.py +0 -37
- mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/cosh.py +0 -37
- mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
- mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cummin.py +0 -41
- mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
- mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
- mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
- mindspore/ops/_op_impl/tbe/diag.py +0 -38
- mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
- mindspore/ops/_op_impl/tbe/dilation.py +0 -40
- mindspore/ops/_op_impl/tbe/div.py +0 -41
- mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
- mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
- mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
- mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
- mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
- mindspore/ops/_op_impl/tbe/elu.py +0 -38
- mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/equal.py +0 -42
- mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/erf.py +0 -37
- mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfc.py +0 -37
- mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
- mindspore/ops/_op_impl/tbe/exp.py +0 -40
- mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
- mindspore/ops/_op_impl/tbe/expm1.py +0 -37
- mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
- mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/fill.py +0 -56
- mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/flatten.py +0 -48
- mindspore/ops/_op_impl/tbe/floor.py +0 -37
- mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
- mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
- mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
- mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
- mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
- mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
- mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
- mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/ger.py +0 -43
- mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/greater.py +0 -43
- mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
- mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
- mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
- mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
- mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
- mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
- mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
- mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/im2col.py +0 -42
- mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
- mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
- mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/inv.py +0 -38
- mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/invert.py +0 -37
- mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/iou.py +0 -38
- mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/is_close.py +0 -40
- mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
- mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
- mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
- mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
- mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
- mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
- mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/lerp.py +0 -38
- mindspore/ops/_op_impl/tbe/less.py +0 -41
- mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/log.py +0 -40
- mindspore/ops/_op_impl/tbe/log1p.py +0 -37
- mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
- mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
- mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
- mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
- mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/matmul.py +0 -53
- mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
- mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
- mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
- mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum.py +0 -39
- mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
- mindspore/ops/_op_impl/tbe/minimum.py +0 -40
- mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mish.py +0 -37
- mindspore/ops/_op_impl/tbe/mod.py +0 -41
- mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/mul.py +0 -37
- mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
- mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
- mindspore/ops/_op_impl/tbe/neg.py +0 -39
- mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
- mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
- mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
- mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
- mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
- mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/pack.py +0 -58
- mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
- mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
- mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/pdist.py +0 -36
- mindspore/ops/_op_impl/tbe/pooling.py +0 -46
- mindspore/ops/_op_impl/tbe/population_count.py +0 -38
- mindspore/ops/_op_impl/tbe/pow.py +0 -41
- mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/prelu.py +0 -37
- mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/range.py +0 -39
- mindspore/ops/_op_impl/tbe/real_div.py +0 -38
- mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
- mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
- mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
- mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
- mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6.py +0 -38
- mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/renorm.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
- mindspore/ops/_op_impl/tbe/rint.py +0 -37
- mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roll.py +0 -42
- mindspore/ops/_op_impl/tbe/round.py +0 -38
- mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
- mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
- mindspore/ops/_op_impl/tbe/select.py +0 -38
- mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/selu.py +0 -39
- mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sgd.py +0 -62
- mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sign.py +0 -38
- mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/sin.py +0 -37
- mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sinh.py +0 -37
- mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/slice.py +0 -58
- mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
- mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax.py +0 -37
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
- mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/softplus.py +0 -37
- mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softsign.py +0 -37
- mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sort.py +0 -38
- mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/split_d.py +0 -38
- mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/split_v.py +0 -39
- mindspore/ops/_op_impl/tbe/splitv.py +0 -39
- mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/square.py +0 -38
- mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
- mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
- mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
- mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
- mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
- mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
- mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
- mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
- mindspore/ops/_op_impl/tbe/sub.py +0 -39
- mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tan.py +0 -38
- mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh.py +0 -37
- mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
- mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
- mindspore/ops/_op_impl/tbe/tile.py +0 -37
- mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
- mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
- mindspore/ops/_op_impl/tbe/transpose.py +0 -60
- mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
- mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
- mindspore/ops/_op_impl/tbe/trunc.py +0 -39
- mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/unpack.py +0 -38
- mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
- mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
- mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
- mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
- mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
- mindspore/ops/_tracefunc.py +0 -241
- mindspore/ops/arg_dtype_cast.py +0 -54
- mindspore/ops/silent_check.py +0 -162
- mindspore/profiler/parser/msadvisor_analyzer.py +0 -82
- mindspore/profiler/parser/msadvisor_parser.py +0 -240
- mindspore/rewrite/api/tree_node_helper.py +0 -60
- mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
- mindspore/rewrite/namespace.py +0 -53
- mindspore-2.2.14.dist-info/RECORD +0 -1924
- {mindspore-2.2.14.dist-info → mindspore-2.4.0.dist-info}/WHEEL +0 -0
- {mindspore-2.2.14.dist-info → mindspore-2.4.0.dist-info}/top_level.txt +0 -0
mindspore/common/tensor.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2020-
|
|
1
|
+
# Copyright 2020-2024 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -27,13 +27,17 @@ from mindspore.common.seed import get_seed
|
|
|
27
27
|
from mindspore import context
|
|
28
28
|
from mindspore import log as logger
|
|
29
29
|
from mindspore.common import dtype as mstype
|
|
30
|
+
from mindspore.common.hook_handle import _TensorHookHandle
|
|
30
31
|
|
|
31
32
|
from mindspore.common._utils import get_slice_num
|
|
32
33
|
from mindspore.common._register_for_tensor import tensor_operator_registry
|
|
34
|
+
from mindspore.common._tensor_overload import (repeat_interleave_mint, add_mint, item_mint, isnan_mint, flatten_mint,
|
|
35
|
+
max_mint, mean_mint, min_mint, split_mint, sub_mint)
|
|
33
36
|
from mindspore._c_expression import Tensor as Tensor_
|
|
34
37
|
from mindspore import _checkparam as validator
|
|
35
|
-
from mindspore._checkparam import check_is_number, is_stub_tensor
|
|
38
|
+
from mindspore._checkparam import check_is_number, is_stub_tensor, check_hook_fn
|
|
36
39
|
from mindspore._check_jit_forbidden_api import jit_forbidden_register
|
|
40
|
+
from mindspore.common.symbol import Symbol
|
|
37
41
|
|
|
38
42
|
np_types = (np.int8, np.int16, np.int32, np.int64,
|
|
39
43
|
np.uint8, np.uint16, np.uint32, np.uint64, np.float16,
|
|
@@ -48,7 +52,8 @@ def _check_input_data_type(input_data):
|
|
|
48
52
|
valid_dtypes = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64,
|
|
49
53
|
np.float16, np.float32, np.float64, np.bool_, np.str_, np.complex64, np.complex128)
|
|
50
54
|
if isinstance(input_data, np.ndarray) and input_data.dtype not in valid_dtypes and \
|
|
51
|
-
input_data.dtype.kind != 'U' and input_data.dtype.kind != 'S'
|
|
55
|
+
input_data.dtype.kind != 'U' and input_data.dtype.kind != 'S' and \
|
|
56
|
+
not (input_data.dtype.kind == 'V' and input_data.dtype.char == 'E'): # Support np.str_ and np.bfloat16
|
|
52
57
|
new_line = '\n'
|
|
53
58
|
for index, x in np.ndenumerate(input_data):
|
|
54
59
|
if np.array(x).dtype not in valid_dtypes:
|
|
@@ -82,11 +87,11 @@ def tensor(input_data=None, dtype=None, shape=None, init=None, internal=False, c
|
|
|
82
87
|
based on the `dtype` argument.
|
|
83
88
|
|
|
84
89
|
Please refer to `Creating and Using Tensor
|
|
85
|
-
<https://www.mindspore.cn/docs/en/
|
|
90
|
+
<https://www.mindspore.cn/docs/en/master/model_train/program_form/static_graph.html#mindspore-user-defined-data-types>`_ .
|
|
86
91
|
|
|
87
92
|
The difference between it and the Tensor class is that it adds
|
|
88
93
|
`Annotation
|
|
89
|
-
<https://www.mindspore.cn/docs/en/
|
|
94
|
+
<https://www.mindspore.cn/docs/en/master/model_train/program_form/static_graph.html#annotation-type>`_
|
|
90
95
|
which can prevent the generation of AnyType compared to the Tensor class.
|
|
91
96
|
|
|
92
97
|
The arguments and return values are the same as the Tensor class. Also see: :class:`mindspore.Tensor`.
|
|
@@ -114,22 +119,25 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
114
119
|
Tensor is a data structure that stores an n-dimensional array.
|
|
115
120
|
|
|
116
121
|
Note:
|
|
117
|
-
If
|
|
122
|
+
If `init` interface is used to initialize `Tensor`, the `Tensor.init_data` API needs to be called to load the
|
|
118
123
|
actual data to `Tensor`.
|
|
119
124
|
|
|
125
|
+
Warning:
|
|
126
|
+
To convert dtype of a `Tensor`, it is recommended to use `Tensor.astype()` rather than
|
|
127
|
+
`Tensor(sourceTensor, dtype=newDtype)`.
|
|
128
|
+
|
|
120
129
|
Args:
|
|
121
130
|
input_data (Union[Tensor, float, int, bool, tuple, list, numpy.ndarray]): The data to be stored. It can be
|
|
122
131
|
another Tensor, Python number or NumPy ndarray. Default: ``None`` .
|
|
123
132
|
dtype (:class:`mindspore.dtype`): Used to indicate the data type of the output Tensor. The argument should
|
|
124
133
|
be defined in `mindspore.dtype`. If it is ``None`` , the data type of the output Tensor will be the same
|
|
125
134
|
as the `input_data`. Default: ``None`` .
|
|
126
|
-
shape (Union[tuple, list, int]): Used to indicate the shape of the output Tensor.
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
must be set. Default: ``None`` .
|
|
135
|
+
shape (Union[tuple, list, int, :class:`mindspore.Symbol`]): Used to indicate the shape of the output Tensor.
|
|
136
|
+
If `input_data` is available, `shape` doesn't need to be set. If ``None`` or `Symbol` exists in `shape` ,
|
|
137
|
+
a tensor of dynamic shape is created, `input_data` doesn't need to be set; if only integers exist in
|
|
138
|
+
`shape`, a tensor of static shape is created, `input_data` or `init` must be set. Default: ``None`` .
|
|
131
139
|
init (Initializer): The information of init data.
|
|
132
|
-
|
|
140
|
+
`init` is used for delayed initialization in parallel mode, when using init, `dtype` and `shape` must be
|
|
133
141
|
set. Default: ``None`` .
|
|
134
142
|
internal (bool): Whether it is created by the framework.
|
|
135
143
|
``'True'`` means that the tensor is created by framework.
|
|
@@ -137,14 +145,17 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
137
145
|
Default: ``False`` .
|
|
138
146
|
const_arg (bool): Whether the tensor is a constant when it is used for the argument of a network.
|
|
139
147
|
Default: ``False`` .
|
|
148
|
+
device(str): This parameter is reserved and does not need to be configured.
|
|
149
|
+
Default: ``None`` .
|
|
140
150
|
|
|
141
151
|
Outputs:
|
|
142
152
|
Tensor.
|
|
143
153
|
|
|
144
154
|
Note:
|
|
145
|
-
The default value None of `input_data` works as a placeholder,
|
|
155
|
+
The default value ``None`` of `input_data` works as a placeholder,
|
|
156
|
+
it does not mean that we can create a NoneType
|
|
146
157
|
Tensor.
|
|
147
|
-
Tensor with shape contains 0 is not fully tested and supported.
|
|
158
|
+
Tensor with `shape` contains 0 is not fully tested and supported.
|
|
148
159
|
|
|
149
160
|
Examples:
|
|
150
161
|
>>> import numpy as np
|
|
@@ -198,8 +209,14 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
198
209
|
"""
|
|
199
210
|
delta_seed = 0
|
|
200
211
|
|
|
201
|
-
def __init__(self, input_data=None, dtype=None, shape=None, init=None, internal=False, const_arg=False
|
|
212
|
+
def __init__(self, input_data=None, dtype=None, shape=None, init=None, internal=False, const_arg=False,
|
|
213
|
+
device=None):
|
|
202
214
|
self.init_finished = False
|
|
215
|
+
if isinstance(input_data, (Tensor, Tensor_)) and dtype is not None:
|
|
216
|
+
logger.info("It is suggested to use 'Tensor.astype()' to convert the dtype of a Tensor.")
|
|
217
|
+
_cast = tensor_operator_registry.get("cast")
|
|
218
|
+
input_data = _cast(input_data, dtype)
|
|
219
|
+
|
|
203
220
|
if is_stub_tensor(input_data):
|
|
204
221
|
input_data = input_data.stub_sync()
|
|
205
222
|
|
|
@@ -218,8 +235,16 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
218
235
|
if isinstance(input_data, np_types):
|
|
219
236
|
input_data = np.array(input_data)
|
|
220
237
|
|
|
221
|
-
if
|
|
222
|
-
|
|
238
|
+
if shape is not None:
|
|
239
|
+
if isinstance(shape, numbers.Number):
|
|
240
|
+
shape = (shape,)
|
|
241
|
+
elif isinstance(shape, Symbol):
|
|
242
|
+
self.symbolic_shape = [shape]
|
|
243
|
+
shape = (None,)
|
|
244
|
+
elif isinstance(shape, (list, tuple)) and any(isinstance(s, Symbol) for s in shape):
|
|
245
|
+
self.symbolic_shape = [item.to_dict() if isinstance(item, Symbol) else item for item in shape]
|
|
246
|
+
shape_without_symbol = (None if isinstance(item, Symbol) else item for item in shape)
|
|
247
|
+
shape = list(shape_without_symbol) if isinstance(shape, list) else tuple(shape_without_symbol)
|
|
223
248
|
|
|
224
249
|
_check_tensor_input(input_data, dtype, shape, init)
|
|
225
250
|
|
|
@@ -244,6 +269,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
244
269
|
Tensor_.__init__(self, input_data)
|
|
245
270
|
validator.check_value_type('const_arg', const_arg, bool, 'Tensor')
|
|
246
271
|
|
|
272
|
+
if device is not None and device != "CPU":
|
|
273
|
+
raise ValueError(f"Only 'CPU' is supported for device, but got {device}.")
|
|
274
|
+
|
|
247
275
|
self.const_arg = const_arg
|
|
248
276
|
self.virtual_flag = False
|
|
249
277
|
self.init = init
|
|
@@ -258,6 +286,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
258
286
|
self.slice_num_of_persistent_data_ = None
|
|
259
287
|
self.slice_shape_of_persistent_data_ = None
|
|
260
288
|
|
|
289
|
+
# the auto gradient information
|
|
290
|
+
self._grad = None
|
|
291
|
+
self._grad_fn = None
|
|
292
|
+
self._requires_grad = False
|
|
293
|
+
self._retain_grad = False
|
|
294
|
+
|
|
261
295
|
@classmethod
|
|
262
296
|
def __subclasshook__(cls, sub):
|
|
263
297
|
"""
|
|
@@ -295,19 +329,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
295
329
|
def __eq__(self, other):
|
|
296
330
|
if not isinstance(other, (int, float, Tensor)):
|
|
297
331
|
return False
|
|
298
|
-
# bool type is not supported for `Equal` operator in backend.
|
|
299
|
-
if self.dtype == mstype.bool_ or (isinstance(other, Tensor) and other.dtype == mstype.bool_):
|
|
300
|
-
if isinstance(other, Tensor):
|
|
301
|
-
return Tensor(np.array(self.asnumpy() == other.asnumpy()))
|
|
302
|
-
return Tensor(np.array(self.asnumpy() == other))
|
|
303
332
|
return tensor_operator_registry.get('__eq__')(self, other)
|
|
304
333
|
|
|
305
334
|
def __ne__(self, other):
|
|
306
335
|
if not isinstance(other, (int, float, Tensor)):
|
|
307
336
|
return True
|
|
308
|
-
# bool type is not supported for `NotEqual` operator in backend.
|
|
309
|
-
if self.dtype == mstype.bool_ or (isinstance(other, Tensor) and other.dtype == mstype.bool_):
|
|
310
|
-
return Tensor(np.array(self.asnumpy() != other.asnumpy()))
|
|
311
337
|
return tensor_operator_registry.get('__ne__')(self, other)
|
|
312
338
|
|
|
313
339
|
def __hash__(self):
|
|
@@ -322,7 +348,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
322
348
|
return out
|
|
323
349
|
|
|
324
350
|
def __round__(self):
|
|
325
|
-
out = tensor_operator_registry.get('round')(
|
|
351
|
+
out = tensor_operator_registry.get('round')(self)
|
|
326
352
|
return out
|
|
327
353
|
|
|
328
354
|
def __bool__(self):
|
|
@@ -360,9 +386,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
360
386
|
return self
|
|
361
387
|
|
|
362
388
|
def __abs__(self):
|
|
363
|
-
self._init_check()
|
|
364
389
|
return tensor_operator_registry.get('abs')(self)
|
|
365
390
|
|
|
391
|
+
@add_mint
|
|
366
392
|
def __add__(self, other):
|
|
367
393
|
return tensor_operator_registry.get('__add__')(self, other)
|
|
368
394
|
|
|
@@ -387,6 +413,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
387
413
|
def __iadd__(self, other):
|
|
388
414
|
return self.__add__(other)
|
|
389
415
|
|
|
416
|
+
@sub_mint
|
|
390
417
|
def __sub__(self, other):
|
|
391
418
|
return tensor_operator_registry.get('__sub__')(self, other)
|
|
392
419
|
|
|
@@ -488,8 +515,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
488
515
|
def __str__(self):
|
|
489
516
|
if self.dtype == mstype.type_none:
|
|
490
517
|
return "Unknown Tensor type!"
|
|
491
|
-
if self.dtype == mstype.bfloat16:
|
|
492
|
-
return str(self.float().asnumpy())
|
|
493
518
|
return str(self.asnumpy())
|
|
494
519
|
|
|
495
520
|
def __getstate__(self):
|
|
@@ -498,9 +523,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
498
523
|
return state
|
|
499
524
|
|
|
500
525
|
def __setstate__(self, state):
|
|
501
|
-
|
|
526
|
+
if isinstance(state, tuple):
|
|
527
|
+
value = state
|
|
528
|
+
else:
|
|
529
|
+
value = state.pop("value")
|
|
530
|
+
self.__dict__.update(state)
|
|
502
531
|
Tensor_.__setstate__(self, value)
|
|
503
|
-
self.__dict__.update(state)
|
|
504
532
|
|
|
505
533
|
@property
|
|
506
534
|
def shape(self):
|
|
@@ -509,6 +537,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
509
537
|
"""
|
|
510
538
|
return self._shape
|
|
511
539
|
|
|
540
|
+
@shape.setter
|
|
541
|
+
def shape(self, shape_value):
|
|
542
|
+
r"""
|
|
543
|
+
Set the shape value.
|
|
544
|
+
"""
|
|
545
|
+
self._shape = shape_value
|
|
546
|
+
|
|
512
547
|
@property
|
|
513
548
|
def dtype(self):
|
|
514
549
|
"""Return the dtype of the tensor (:class:`mindspore.dtype`)."""
|
|
@@ -544,6 +579,83 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
544
579
|
"""
|
|
545
580
|
return len(self._shape)
|
|
546
581
|
|
|
582
|
+
@property
|
|
583
|
+
def grad(self):
|
|
584
|
+
r"""
|
|
585
|
+
Get the gradient value.
|
|
586
|
+
"""
|
|
587
|
+
return self._grad
|
|
588
|
+
|
|
589
|
+
@grad.setter
|
|
590
|
+
def grad(self, grad):
|
|
591
|
+
r"""
|
|
592
|
+
Set the gradient value.
|
|
593
|
+
"""
|
|
594
|
+
self._grad = grad
|
|
595
|
+
|
|
596
|
+
@property
|
|
597
|
+
def grad_fn(self):
|
|
598
|
+
r"""
|
|
599
|
+
The function for backward.
|
|
600
|
+
"""
|
|
601
|
+
return self._grad_fn
|
|
602
|
+
|
|
603
|
+
@grad_fn.setter
|
|
604
|
+
def grad_fn(self, grad_fn):
|
|
605
|
+
r"""
|
|
606
|
+
Set the function for backward.
|
|
607
|
+
"""
|
|
608
|
+
self._grad_fn = grad_fn
|
|
609
|
+
|
|
610
|
+
@property
|
|
611
|
+
def is_leaf(self):
|
|
612
|
+
r"""
|
|
613
|
+
Whether the stub tensor is leaf.
|
|
614
|
+
They will be a leaf if they have requires_grad and requires_grad is False,
|
|
615
|
+
Or they were created by user.
|
|
616
|
+
"""
|
|
617
|
+
return self._requires_grad is False or self._grad_fn is None
|
|
618
|
+
|
|
619
|
+
@property
|
|
620
|
+
def requires_grad(self):
|
|
621
|
+
r"""
|
|
622
|
+
Whether the stub tensor need requires grad.
|
|
623
|
+
"""
|
|
624
|
+
return self._requires_grad
|
|
625
|
+
|
|
626
|
+
@requires_grad.setter
|
|
627
|
+
def requires_grad(self, requires_grad):
|
|
628
|
+
r"""
|
|
629
|
+
Mark the stub tensor whether need requires gradient.
|
|
630
|
+
"""
|
|
631
|
+
self._requires_grad = requires_grad
|
|
632
|
+
|
|
633
|
+
def retain_grad(self):
|
|
634
|
+
r"""
|
|
635
|
+
Enable the stub tensor which is not non-leaf to have the grad during backward().
|
|
636
|
+
"""
|
|
637
|
+
if not self._requires_grad:
|
|
638
|
+
RuntimeError("can't retain_grad on Tensor that has requires_grad = False.")
|
|
639
|
+
self._retain_grad = self._grad_fn is not None
|
|
640
|
+
|
|
641
|
+
@property
|
|
642
|
+
def retains_grad(self):
|
|
643
|
+
r"""
|
|
644
|
+
Is True if the stub tensor is non-leaf and its grad is enabled to be populated during backward().
|
|
645
|
+
"""
|
|
646
|
+
return self._retain_grad
|
|
647
|
+
|
|
648
|
+
def backward(self, grad=None):
|
|
649
|
+
r"""
|
|
650
|
+
Calculate the gradient.
|
|
651
|
+
"""
|
|
652
|
+
if grad is None:
|
|
653
|
+
grad = Tensor(np.ones(self.shape), self.dtype)
|
|
654
|
+
if self._grad_fn is not None:
|
|
655
|
+
self._grad_fn.apply(grad)
|
|
656
|
+
elif self._requires_grad:
|
|
657
|
+
self._grad = grad
|
|
658
|
+
|
|
547
659
|
@property
|
|
548
660
|
def H(self):
|
|
549
661
|
"""
|
|
@@ -607,8 +719,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
607
719
|
|
|
608
720
|
Examples:
|
|
609
721
|
>>> from mindspore import Tensor
|
|
722
|
+
>>> from mindspore import dtype as mstype
|
|
610
723
|
>>> import numpy as np
|
|
611
|
-
>>> x = Tensor(np.array([[1, 2], [3, 4]]))
|
|
724
|
+
>>> x = Tensor(np.array([[1, 2], [3, 4]]), dtype=mstype.int64)
|
|
612
725
|
>>> output = x.strides
|
|
613
726
|
>>> print(output)
|
|
614
727
|
(16, 8)
|
|
@@ -644,6 +757,8 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
644
757
|
[[1 3]
|
|
645
758
|
[2 4]]
|
|
646
759
|
"""
|
|
760
|
+
if self.ndim <= 1:
|
|
761
|
+
return self
|
|
647
762
|
return self.transpose()
|
|
648
763
|
|
|
649
764
|
@staticmethod
|
|
@@ -710,28 +825,24 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
710
825
|
r"""
|
|
711
826
|
For details, please refer to :func:`mindspore.ops.arccosh`.
|
|
712
827
|
"""
|
|
713
|
-
self._init_check()
|
|
714
828
|
return tensor_operator_registry.get('acosh')(self)
|
|
715
829
|
|
|
716
830
|
def arcsin(self):
|
|
717
831
|
r"""
|
|
718
832
|
For details, please refer to :func:`mindspore.ops.arcsin`.
|
|
719
833
|
"""
|
|
720
|
-
self._init_check()
|
|
721
834
|
return tensor_operator_registry.get('asin')(self)
|
|
722
835
|
|
|
723
836
|
def arctan(self):
|
|
724
837
|
r"""
|
|
725
838
|
For details, please refer to :func:`mindspore.ops.arctan`.
|
|
726
839
|
"""
|
|
727
|
-
self._init_check()
|
|
728
840
|
return tensor_operator_registry.get('atan')(self)
|
|
729
841
|
|
|
730
842
|
def arctan2(self, other):
|
|
731
843
|
r"""
|
|
732
844
|
For details, please refer to :func:`mindspore.ops.arctan2`.
|
|
733
845
|
"""
|
|
734
|
-
self._init_check()
|
|
735
846
|
return tensor_operator_registry.get('atan2')(self, other)
|
|
736
847
|
|
|
737
848
|
def cauchy(self, median=0.0, sigma=1.0):
|
|
@@ -766,7 +877,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
766
877
|
[[8.79836142e-01, 9.37541723e-01]])
|
|
767
878
|
|
|
768
879
|
"""
|
|
769
|
-
self._init_check()
|
|
770
880
|
out = tensor_operator_registry.get('cauchy')(list(self.shape), median, sigma)()
|
|
771
881
|
return out.astype(self.dtype)
|
|
772
882
|
|
|
@@ -804,7 +914,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
804
914
|
[[1.2788825 2.3305743]
|
|
805
915
|
[14.944194 0.16303174]]
|
|
806
916
|
"""
|
|
807
|
-
self._init_check()
|
|
808
917
|
return tensor_operator_registry.get('log_normal')(mean, std)(self)
|
|
809
918
|
|
|
810
919
|
@jit_forbidden_register
|
|
@@ -837,29 +946,24 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
837
946
|
r"""
|
|
838
947
|
For details, please refer to :func:`mindspore.ops.bincount`.
|
|
839
948
|
"""
|
|
840
|
-
self._init_check()
|
|
841
949
|
return tensor_operator_registry.get('bincount')(self, weights, minlength)
|
|
842
950
|
|
|
843
951
|
def chunk(self, chunks, axis=0):
|
|
844
952
|
r"""
|
|
845
953
|
For details, please refer to :func:`mindspore.ops.chunk`.
|
|
846
954
|
"""
|
|
847
|
-
self._init_check()
|
|
848
955
|
return tensor_operator_registry.get('chunk')(self, chunks, axis)
|
|
849
956
|
|
|
957
|
+
@item_mint
|
|
850
958
|
def item(self, index=None):
|
|
851
959
|
"""
|
|
852
960
|
Get the item at the specified index of the tensor.
|
|
853
961
|
|
|
854
|
-
Note:
|
|
855
|
-
Tensor.item returns a Tensor scalar instead of a Python scalar. And if the tensor is a Tensor scalar,
|
|
856
|
-
Tensor.item will return the numpy.ndarray.
|
|
857
|
-
|
|
858
962
|
Args:
|
|
859
963
|
index (Union[None, int, tuple(int)]): The index in Tensor. Default: ``None``.
|
|
860
964
|
|
|
861
965
|
Returns:
|
|
862
|
-
A
|
|
966
|
+
A scalar, type is defined by the dtype of the Tensor.
|
|
863
967
|
|
|
864
968
|
Raises:
|
|
865
969
|
ValueError: If the length of the `index` is not equal to self.ndim.
|
|
@@ -877,7 +981,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
877
981
|
>>> print(x.item())
|
|
878
982
|
1.2
|
|
879
983
|
"""
|
|
880
|
-
|
|
984
|
+
|
|
985
|
+
if index is not None:
|
|
986
|
+
output = self.asnumpy().item(index)
|
|
987
|
+
else:
|
|
988
|
+
output = self.asnumpy().item()
|
|
881
989
|
return output
|
|
882
990
|
|
|
883
991
|
def itemset(self, *args):
|
|
@@ -936,7 +1044,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
936
1044
|
>>> print(x.get_bytes())
|
|
937
1045
|
b'\x01\x00\x02\x00\x03\x00'
|
|
938
1046
|
"""
|
|
939
|
-
self._init_check()
|
|
940
1047
|
return Tensor_.get_bytes(self)
|
|
941
1048
|
|
|
942
1049
|
def asnumpy(self):
|
|
@@ -958,10 +1065,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
958
1065
|
>>> print(y)
|
|
959
1066
|
[11. 2.]
|
|
960
1067
|
"""
|
|
961
|
-
self.
|
|
1068
|
+
if self.has_init:
|
|
1069
|
+
self.init_data()
|
|
962
1070
|
return Tensor_.asnumpy(self)
|
|
963
1071
|
|
|
964
|
-
def numpy(self):
|
|
1072
|
+
def numpy(self, *, force=False):
|
|
965
1073
|
"""
|
|
966
1074
|
Alias for :func:`mindspore.Tensor.asnumpy`.
|
|
967
1075
|
"""
|
|
@@ -1002,21 +1110,18 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1002
1110
|
"""
|
|
1003
1111
|
For details, please refer to :func:`mindspore.ops.slice_scatter`.
|
|
1004
1112
|
"""
|
|
1005
|
-
self._init_check()
|
|
1006
1113
|
return tensor_operator_registry.get('slice_scatter')(self, src, axis, start, end, step)
|
|
1007
1114
|
|
|
1008
1115
|
def select_scatter(self, src, axis, index):
|
|
1009
1116
|
"""
|
|
1010
1117
|
For details, please refer to :func:`mindspore.ops.select_scatter`.
|
|
1011
1118
|
"""
|
|
1012
|
-
self._init_check()
|
|
1013
1119
|
return tensor_operator_registry.get('select_scatter')(self, src, axis, index)
|
|
1014
1120
|
|
|
1015
1121
|
def histc(self, bins=100, min=0., max=0.):
|
|
1016
1122
|
"""
|
|
1017
1123
|
For details, please refer to :func:`mindspore.ops.histc`.
|
|
1018
1124
|
"""
|
|
1019
|
-
self._init_check()
|
|
1020
1125
|
validator.check_value_type('min', min, (int, float,), 'Tensor.histc')
|
|
1021
1126
|
validator.check_value_type('max', max, (int, float,), 'Tensor.histc')
|
|
1022
1127
|
return tensor_operator_registry.get('histc')(self, bins, float(min), float(max))
|
|
@@ -1025,7 +1130,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1025
1130
|
"""
|
|
1026
1131
|
For details, please refer to :func:`mindspore.ops.geqrf`.
|
|
1027
1132
|
"""
|
|
1028
|
-
self._init_check()
|
|
1029
1133
|
return tensor_operator_registry.get('geqrf')(self)
|
|
1030
1134
|
|
|
1031
1135
|
def slice_shape_of_persistent_data(self):
|
|
@@ -1067,14 +1171,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1067
1171
|
>>> from mindspore import Tensor, ops
|
|
1068
1172
|
>>> x = Tensor([[1, 2, 3], [4, 5, 6]], dtype=ms.float32)
|
|
1069
1173
|
>>> y = ops.transpose(x, (1, 0))
|
|
1070
|
-
>>> y.contiguous()
|
|
1071
|
-
>>>
|
|
1072
|
-
|
|
1073
|
-
[[1. 2. 3.]
|
|
1074
|
-
[4. 5. 6.]]
|
|
1174
|
+
>>> z = y.contiguous()
|
|
1175
|
+
>>> print(z.is_contiguous())
|
|
1176
|
+
True
|
|
1075
1177
|
"""
|
|
1076
|
-
|
|
1077
|
-
return self
|
|
1178
|
+
return tensor_operator_registry.get('contiguous')(self)
|
|
1078
1179
|
|
|
1079
1180
|
def is_contiguous(self):
|
|
1080
1181
|
"""
|
|
@@ -1094,6 +1195,95 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1094
1195
|
"""
|
|
1095
1196
|
return Tensor_.is_contiguous(self)
|
|
1096
1197
|
|
|
1198
|
+
def stride(self, dim=None):
|
|
1199
|
+
"""
|
|
1200
|
+
The stride to jump from one element to the next in the input dim.
|
|
1201
|
+
When no parameters are passed in, a list of stride for all dimensions is returned.
|
|
1202
|
+
|
|
1203
|
+
Args:
|
|
1204
|
+
dim (int): The dim of stride from one element to the next.
|
|
1205
|
+
|
|
1206
|
+
Returns:
|
|
1207
|
+
Int, the stride of tensor.
|
|
1208
|
+
|
|
1209
|
+
Raises:
|
|
1210
|
+
TypeError: `dim` is not an int.
|
|
1211
|
+
|
|
1212
|
+
Examples:
|
|
1213
|
+
>>> import mindspore as ms
|
|
1214
|
+
>>> x = ms.Tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], dtype=ms.float32)
|
|
1215
|
+
>>> x.stride()
|
|
1216
|
+
[5, 1]
|
|
1217
|
+
"""
|
|
1218
|
+
stride = Tensor_.stride(self)
|
|
1219
|
+
if dim is None:
|
|
1220
|
+
return stride
|
|
1221
|
+
return stride[dim]
|
|
1222
|
+
|
|
1223
|
+
def storage_offset(self):
|
|
1224
|
+
"""
|
|
1225
|
+
Tensor's offset in the underlying storage in terms of the number of storage elements.
|
|
1226
|
+
|
|
1227
|
+
Returns:
|
|
1228
|
+
int, tensor's offset in the underlying storage in terms of number of storage elements.
|
|
1229
|
+
|
|
1230
|
+
Examples:
|
|
1231
|
+
>>> import mindspore as ms
|
|
1232
|
+
>>> x = ms.Tensor([1, 2, 3, 4, 5], dtype=ms.float32)
|
|
1233
|
+
>>> ret = x.storage_offset()
|
|
1234
|
+
>>> print(ret)
|
|
1235
|
+
0
|
|
1236
|
+
"""
|
|
1237
|
+
return Tensor_.storage_offset(self)
|
|
1238
|
+
|
|
1239
|
+
def register_hook(self, hook_fn):
|
|
1240
|
+
"""
|
|
1241
|
+
Registers a backward hook for tensor.
|
|
1242
|
+
|
|
1243
|
+
Note:
|
|
1244
|
+
- The `register_backward_hook(hook_fn)` does not work in graph mode or functions decorated with 'jit'.
|
|
1245
|
+
- The 'hook_fn' must be defined as the following code. `grad` is the gradient passed to the tensor,
|
|
1246
|
+
which may be modified by returning a new output gradient.
|
|
1247
|
+
- The 'hook_fn' should have the following signature:
|
|
1248
|
+
hook_fn(grad) -> New output gradient, but can not return None or not set return value.
|
|
1249
|
+
|
|
1250
|
+
Args:
|
|
1251
|
+
hook_fn (function): Python function. Tensor backward hook function.
|
|
1252
|
+
|
|
1253
|
+
Returns:
|
|
1254
|
+
A handle corresponding to the `hook_fn` . The handle can be used to remove the added `hook_fn` by calling
|
|
1255
|
+
`handle.remove()` .
|
|
1256
|
+
|
|
1257
|
+
Raises:
|
|
1258
|
+
TypeError: If the `hook_fn` is not a function of python.
|
|
1259
|
+
|
|
1260
|
+
Supported Platforms:
|
|
1261
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1262
|
+
|
|
1263
|
+
Examples:
|
|
1264
|
+
>>> import mindspore as ms
|
|
1265
|
+
>>> from mindspore import Tensor
|
|
1266
|
+
>>> ms.set_context(mode=ms.PYNATIVE_MODE)
|
|
1267
|
+
>>> def hook_fn(grad):
|
|
1268
|
+
... return grad * 2
|
|
1269
|
+
...
|
|
1270
|
+
>>> def hook_test(x, y):
|
|
1271
|
+
... z = x * y
|
|
1272
|
+
... z.register_hook(hook_fn)
|
|
1273
|
+
... z = z * y
|
|
1274
|
+
... return z
|
|
1275
|
+
...
|
|
1276
|
+
>>> ms_grad = ms.grad(hook_test, grad_position=(0,1))
|
|
1277
|
+
>>> output = ms_grad(Tensor(1, ms.float32), Tensor(2, ms.float32))
|
|
1278
|
+
>>> print(output)
|
|
1279
|
+
(Tensor(shape=[], dtype=Float32, value=8), Tensor(shape=[], dtype=Float32, value=6))
|
|
1280
|
+
"""
|
|
1281
|
+
if not check_hook_fn("register_hook", hook_fn):
|
|
1282
|
+
return _TensorHookHandle()
|
|
1283
|
+
handle = _TensorHookHandle()
|
|
1284
|
+
handle.id = Tensor_.register_hook(self, hook_fn)
|
|
1285
|
+
return handle
|
|
1286
|
+
|
|
1097
1287
|
def flush_from_cache(self):
|
|
1098
1288
|
"""
|
|
1099
1289
|
Flush cache data to host if tensor is cache enable.
|
|
@@ -1106,35 +1296,66 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1106
1296
|
>>> print(y)
|
|
1107
1297
|
None
|
|
1108
1298
|
"""
|
|
1109
|
-
self._init_check()
|
|
1110
1299
|
Tensor_._flush_from_cache(self)
|
|
1111
1300
|
|
|
1112
1301
|
def addcdiv(self, tensor1, tensor2, value=1):
|
|
1113
1302
|
r"""
|
|
1114
1303
|
For details, please refer to :func:`mindspore.ops.addcdiv`.
|
|
1115
1304
|
"""
|
|
1116
|
-
|
|
1117
|
-
return tensor_operator_registry.get('addcdiv')()(self, tensor1, tensor2, value)
|
|
1305
|
+
return tensor_operator_registry.get('addcdiv')(self, tensor1, tensor2, value)
|
|
1118
1306
|
|
|
1119
1307
|
def addcmul(self, tensor1, tensor2, value=1):
|
|
1120
1308
|
r"""
|
|
1121
1309
|
For details, please refer to :func:`mindspore.ops.addcmul`.
|
|
1122
1310
|
"""
|
|
1123
|
-
|
|
1124
|
-
return tensor_operator_registry.get('addcmul')()(self, tensor1, tensor2, value)
|
|
1311
|
+
return tensor_operator_registry.get('addcmul')(self, tensor1, tensor2, value)
|
|
1125
1312
|
|
|
1313
|
+
@add_mint
|
|
1126
1314
|
def add(self, other):
|
|
1127
1315
|
r"""
|
|
1128
1316
|
For details, please refer to :func:`mindspore.ops.add`.
|
|
1129
1317
|
"""
|
|
1130
|
-
|
|
1131
|
-
|
|
1318
|
+
return tensor_operator_registry.get('add')(self, other)
|
|
1319
|
+
|
|
1320
|
+
def add_(self, other, *, alpha=1):
|
|
1321
|
+
"""
|
|
1322
|
+
inplace update self by following compute:
|
|
1323
|
+
self = self + other * alpha.
|
|
1324
|
+
|
|
1325
|
+
.. warning::
|
|
1326
|
+
This is an experimental API that is subject to change or deletion.
|
|
1327
|
+
The `other` tensor must be broadcastable with the `self` tensor. It may be of a different data type.
|
|
1328
|
+
|
|
1329
|
+
Args:
|
|
1330
|
+
other (Tensor): the source tensor Add to self Tensor.
|
|
1331
|
+
alpha (Number): no effect currently.
|
|
1332
|
+
|
|
1333
|
+
Returns:
|
|
1334
|
+
Return self Tensor.
|
|
1335
|
+
|
|
1336
|
+
Supported Platforms:
|
|
1337
|
+
``Ascend``
|
|
1338
|
+
|
|
1339
|
+
Examples:
|
|
1340
|
+
>>> import numpy as np
|
|
1341
|
+
>>> from mindspore import Tensor
|
|
1342
|
+
>>> a = Tensor(np.ones((2,3)).astype("float32"))
|
|
1343
|
+
>>> b = Tensor(np.ones((2,3)).astype("float32"))
|
|
1344
|
+
>>> a.add_(b)
|
|
1345
|
+
>>> print(a)
|
|
1346
|
+
[[2. 2. 2.]
|
|
1347
|
+
[2. 2. 2.]]
|
|
1348
|
+
"""
|
|
1349
|
+
if isinstance(other, (int, float)):
|
|
1350
|
+
ret = tensor_operator_registry.get("adds_")(self, other, alpha)
|
|
1351
|
+
else:
|
|
1352
|
+
ret = tensor_operator_registry.get("add_")(self, other, alpha)
|
|
1353
|
+
return ret
|
|
1132
1354
|
|
|
1133
1355
|
def subtract(self, other, *, alpha=1):
|
|
1134
1356
|
r"""
|
|
1135
1357
|
For details, please refer to :func:`mindspore.ops.subtract`.
|
|
1136
1358
|
"""
|
|
1137
|
-
self._init_check()
|
|
1138
1359
|
return tensor_operator_registry.get('sub')(self, alpha * other)
|
|
1139
1360
|
|
|
1140
1361
|
def true_divide(self, value):
|
|
@@ -1142,7 +1363,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1142
1363
|
Alias for Tensor.div() with :math:`rounding\_mode=None`.
|
|
1143
1364
|
For details, please refer to :func:`mindspore.ops.div`.
|
|
1144
1365
|
"""
|
|
1145
|
-
self._init_check()
|
|
1146
1366
|
return tensor_operator_registry.get('div')(self, value, rounding_mode=None)
|
|
1147
1367
|
|
|
1148
1368
|
def triu(self, diagonal=0):
|
|
@@ -1153,7 +1373,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1153
1373
|
This is an experimental API that is subject to change or deletion.
|
|
1154
1374
|
|
|
1155
1375
|
"""
|
|
1156
|
-
self._init_check()
|
|
1157
1376
|
validator.check_value_type('diagonal', diagonal, [int], 'triu')
|
|
1158
1377
|
return tensor_operator_registry.get('triu')(self, diagonal)
|
|
1159
1378
|
|
|
@@ -1161,65 +1380,69 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1161
1380
|
r"""
|
|
1162
1381
|
For details, please refer to :func:`mindspore.ops.addbmm`.
|
|
1163
1382
|
"""
|
|
1164
|
-
self._init_check()
|
|
1165
1383
|
return tensor_operator_registry.get('addbmm')(self, batch1, batch2, beta=beta, alpha=alpha)
|
|
1166
1384
|
|
|
1167
1385
|
def addmm(self, mat1, mat2, *, beta=1, alpha=1):
|
|
1168
1386
|
r"""
|
|
1169
1387
|
For details, please refer to :func:`mindspore.ops.addmm`.
|
|
1170
1388
|
"""
|
|
1171
|
-
self._init_check()
|
|
1172
1389
|
return tensor_operator_registry.get('addmm')(self, mat1, mat2, beta=beta, alpha=alpha)
|
|
1173
1390
|
|
|
1391
|
+
def addmm_(self, mat1, mat2, *, beta=1, alpha=1):
|
|
1392
|
+
r"""
|
|
1393
|
+
For details, please refer to :func:`mindspore.ops.addmm`.
|
|
1394
|
+
|
|
1395
|
+
.. note::
|
|
1396
|
+
The output results are directly updated in the Tensor.
|
|
1397
|
+
|
|
1398
|
+
.. warning::
|
|
1399
|
+
This is an experimental API that is subject to change or deletion.
|
|
1400
|
+
|
|
1401
|
+
"""
|
|
1402
|
+
return tensor_operator_registry.get('addmm_')(self, mat1, mat2, beta=beta, alpha=alpha)
|
|
1403
|
+
|
|
1174
1404
|
def addr(self, vec1, vec2, beta=1, alpha=1):
|
|
1175
1405
|
r"""
|
|
1176
1406
|
For details, please refer to :func:`mindspore.ops.addr`.
|
|
1177
1407
|
"""
|
|
1178
|
-
self._init_check()
|
|
1179
1408
|
return tensor_operator_registry.get('addr')(self, vec1, vec2, beta=beta, alpha=alpha)
|
|
1180
1409
|
|
|
1181
1410
|
def adjoint(self):
|
|
1182
1411
|
r"""
|
|
1183
1412
|
For details, please refer to :func:`mindspore.ops.adjoint`.
|
|
1184
1413
|
"""
|
|
1185
|
-
self._init_check()
|
|
1186
1414
|
return tensor_operator_registry.get('adjoint')(self)
|
|
1187
1415
|
|
|
1188
1416
|
def all(self, axis=None, keep_dims=False):
|
|
1189
1417
|
r"""
|
|
1190
1418
|
For details, please refer to :func:`mindspore.ops.all`.
|
|
1191
1419
|
"""
|
|
1192
|
-
self._init_check()
|
|
1193
1420
|
return tensor_operator_registry.get('all')(self, axis, keep_dims)
|
|
1194
1421
|
|
|
1195
1422
|
def angle(self):
|
|
1196
1423
|
r"""
|
|
1197
1424
|
For details, please refer to :func:`mindspore.ops.angle`.
|
|
1198
1425
|
"""
|
|
1199
|
-
self._init_check()
|
|
1200
1426
|
return tensor_operator_registry.get('angle')(self)
|
|
1201
1427
|
|
|
1202
1428
|
def any(self, axis=None, keep_dims=False):
|
|
1203
1429
|
r"""
|
|
1204
1430
|
For details, please refer to :func:`mindspore.ops.any`.
|
|
1205
1431
|
"""
|
|
1206
|
-
self._init_check()
|
|
1207
1432
|
if axis is None:
|
|
1208
1433
|
axis = ()
|
|
1209
|
-
return tensor_operator_registry.get('any')(
|
|
1434
|
+
return tensor_operator_registry.get('any')(self, axis, keep_dims)
|
|
1210
1435
|
|
|
1211
1436
|
def atan2(self, other):
|
|
1212
1437
|
r"""
|
|
1213
1438
|
For details, please refer to :func:`mindspore.ops.atan2`.
|
|
1214
1439
|
"""
|
|
1215
|
-
self._init_check()
|
|
1216
1440
|
return tensor_operator_registry.get('atan2')(self, other)
|
|
1217
1441
|
|
|
1218
1442
|
def baddbmm(self, batch1, batch2, beta=1, alpha=1):
|
|
1219
1443
|
r"""
|
|
1220
1444
|
For details, please refer to :func:`mindspore.ops.baddbmm`.
|
|
1221
1445
|
"""
|
|
1222
|
-
self._init_check()
|
|
1223
1446
|
return tensor_operator_registry.get('baddbmm')(self, batch1, batch2, beta=beta, alpha=alpha)
|
|
1224
1447
|
|
|
1225
1448
|
def view(self, *shape):
|
|
@@ -1243,7 +1466,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1243
1466
|
[3. 2.]
|
|
1244
1467
|
[3. 4.]]
|
|
1245
1468
|
"""
|
|
1246
|
-
self._init_check()
|
|
1247
1469
|
if not shape:
|
|
1248
1470
|
raise ValueError("The shape variable should not be empty")
|
|
1249
1471
|
if isinstance(shape[0], tuple):
|
|
@@ -1277,7 +1499,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1277
1499
|
>>> print(output)
|
|
1278
1500
|
[1. 2. 3. 2. 3. 4.]
|
|
1279
1501
|
"""
|
|
1280
|
-
self._init_check()
|
|
1281
1502
|
if not isinstance(other, (Tensor, Tensor_)):
|
|
1282
1503
|
raise TypeError(f"For view_as, the input other must be a Tensor, but got {type(other)}")
|
|
1283
1504
|
return self.view(other.shape)
|
|
@@ -1286,42 +1507,36 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1286
1507
|
r"""
|
|
1287
1508
|
For details, please refer to :func:`mindspore.ops.t`.
|
|
1288
1509
|
"""
|
|
1289
|
-
self._init_check()
|
|
1290
1510
|
return tensor_operator_registry.get("t")(self)
|
|
1291
1511
|
|
|
1292
1512
|
def bitwise_and(self, other):
|
|
1293
1513
|
"""
|
|
1294
1514
|
For details, please refer to :func:`mindspore.ops.bitwise_and`.
|
|
1295
1515
|
"""
|
|
1296
|
-
self._init_check()
|
|
1297
1516
|
return tensor_operator_registry.get('bitwise_and')(self, other)
|
|
1298
1517
|
|
|
1299
1518
|
def bitwise_or(self, other):
|
|
1300
1519
|
"""
|
|
1301
1520
|
For details, please refer to :func:`mindspore.ops.bitwise_or`.
|
|
1302
1521
|
"""
|
|
1303
|
-
self._init_check()
|
|
1304
1522
|
return tensor_operator_registry.get('bitwise_or')(self, other)
|
|
1305
1523
|
|
|
1306
1524
|
def bitwise_xor(self, other):
|
|
1307
1525
|
"""
|
|
1308
1526
|
For details, please refer to :func:`mindspore.ops.bitwise_xor`.
|
|
1309
1527
|
"""
|
|
1310
|
-
self._init_check()
|
|
1311
1528
|
return tensor_operator_registry.get('bitwise_xor')(self, other)
|
|
1312
1529
|
|
|
1313
1530
|
def bitwise_left_shift(self, other):
|
|
1314
1531
|
"""
|
|
1315
1532
|
For details, please refer to :func:`mindspore.ops.bitwise_left_shift`.
|
|
1316
1533
|
"""
|
|
1317
|
-
self._init_check()
|
|
1318
1534
|
return tensor_operator_registry.get('bitwise_left_shift')(self, other)
|
|
1319
1535
|
|
|
1320
1536
|
def bitwise_right_shift(self, other):
|
|
1321
1537
|
"""
|
|
1322
1538
|
For details, please refer to :func:`mindspore.ops.bitwise_right_shift`.
|
|
1323
1539
|
"""
|
|
1324
|
-
self._init_check()
|
|
1325
1540
|
_cast = tensor_operator_registry.get('cast')
|
|
1326
1541
|
other = _cast(other, self.dtype)
|
|
1327
1542
|
return tensor_operator_registry.get('bitwise_right_shift')(self, other)
|
|
@@ -1330,50 +1545,43 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1330
1545
|
"""
|
|
1331
1546
|
For details, please refer to :func:`mindspore.ops.scatter`.
|
|
1332
1547
|
"""
|
|
1333
|
-
self._init_check()
|
|
1334
1548
|
return tensor_operator_registry.get('scatter')(self, axis, index, src)
|
|
1335
1549
|
|
|
1336
1550
|
def scatter_mul(self, indices, updates):
|
|
1337
1551
|
"""
|
|
1338
1552
|
For details, please refer to :func:`mindspore.ops.scatter_mul`.
|
|
1339
1553
|
"""
|
|
1340
|
-
self._init_check()
|
|
1341
1554
|
return tensor_operator_registry.get('tensor_scatter_mul')(self, indices, updates)
|
|
1342
1555
|
|
|
1343
1556
|
def scatter_div(self, indices, updates):
|
|
1344
1557
|
"""
|
|
1345
1558
|
For details, please refer to :func:`mindspore.ops.scatter_div`.
|
|
1346
1559
|
"""
|
|
1347
|
-
self._init_check()
|
|
1348
1560
|
return tensor_operator_registry.get('tensor_scatter_div')(self, indices, updates)
|
|
1349
1561
|
|
|
1350
1562
|
def ger(self, vec2):
|
|
1351
1563
|
"""
|
|
1352
1564
|
For details, please refer to :func:`mindspore.ops.ger`.
|
|
1353
1565
|
"""
|
|
1354
|
-
self._init_check()
|
|
1355
1566
|
return tensor_operator_registry.get('ger')(self, vec2)
|
|
1356
1567
|
|
|
1357
1568
|
def gt(self, x):
|
|
1358
1569
|
"""
|
|
1359
1570
|
For details, please refer to :func:`mindspore.ops.gt`.
|
|
1360
1571
|
"""
|
|
1361
|
-
|
|
1362
|
-
return tensor_operator_registry.get('gt')()(self, x)
|
|
1572
|
+
return tensor_operator_registry.get('gt')(self, x)
|
|
1363
1573
|
|
|
1364
1574
|
def ge(self, x):
|
|
1365
1575
|
"""
|
|
1366
1576
|
For details, please refer to :func:`mindspore.ops.ge`.
|
|
1367
1577
|
"""
|
|
1368
|
-
|
|
1369
|
-
return tensor_operator_registry.get('ge')()(self, x)
|
|
1578
|
+
return tensor_operator_registry.get('ge')(self, x)
|
|
1370
1579
|
|
|
1371
1580
|
def broadcast_to(self, shape):
|
|
1372
1581
|
"""
|
|
1373
1582
|
For details, please refer to :func:`mindspore.ops.broadcast_to`.
|
|
1374
1583
|
"""
|
|
1375
|
-
|
|
1376
|
-
return tensor_operator_registry.get('broadcast_to')(shape)(self)
|
|
1584
|
+
return tensor_operator_registry.get('broadcast_to')(self, shape)
|
|
1377
1585
|
|
|
1378
1586
|
def expand_as(self, x):
|
|
1379
1587
|
"""
|
|
@@ -1397,84 +1605,73 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1397
1605
|
[[1. 2. 3.]
|
|
1398
1606
|
[1. 2. 3.]]
|
|
1399
1607
|
"""
|
|
1400
|
-
|
|
1401
|
-
return tensor_operator_registry.get('broadcast_to')(x.shape)(self)
|
|
1608
|
+
return tensor_operator_registry.get('broadcast_to')(self, x.shape)
|
|
1402
1609
|
|
|
1403
1610
|
def exp(self):
|
|
1404
1611
|
"""
|
|
1405
1612
|
For details, please refer to :func:`mindspore.ops.exp`.
|
|
1406
1613
|
"""
|
|
1407
|
-
self._init_check()
|
|
1408
1614
|
return tensor_operator_registry.get('exp')(self)
|
|
1409
1615
|
|
|
1410
1616
|
def real(self):
|
|
1411
1617
|
r"""
|
|
1412
1618
|
For details, please refer to :func:`mindspore.ops.real`.
|
|
1413
1619
|
"""
|
|
1414
|
-
self._init_check()
|
|
1415
1620
|
return tensor_operator_registry.get('real')(self)
|
|
1416
1621
|
|
|
1417
1622
|
def rsqrt(self):
|
|
1418
1623
|
r"""
|
|
1419
1624
|
For details, please refer to :func:`mindspore.ops.rsqrt`.
|
|
1420
1625
|
"""
|
|
1421
|
-
self._init_check()
|
|
1422
1626
|
return tensor_operator_registry.get('rsqrt')(self)
|
|
1423
1627
|
|
|
1424
1628
|
def reciprocal(self):
|
|
1425
1629
|
r"""
|
|
1426
1630
|
For details, please refer to :func:`mindspore.ops.reciprocal`.
|
|
1427
1631
|
"""
|
|
1428
|
-
self._init_check()
|
|
1429
1632
|
return tensor_operator_registry.get('reciprocal')(self)
|
|
1430
1633
|
|
|
1431
1634
|
def sqrt(self):
|
|
1432
1635
|
"""
|
|
1433
1636
|
For details, please refer to :func:`mindspore.ops.sqrt`.
|
|
1434
1637
|
"""
|
|
1435
|
-
self._init_check()
|
|
1436
1638
|
return tensor_operator_registry.get('sqrt')(self)
|
|
1437
1639
|
|
|
1438
1640
|
def square(self):
|
|
1439
1641
|
"""
|
|
1440
1642
|
For details, please refer to :func:`mindspore.ops.square`.
|
|
1441
1643
|
"""
|
|
1442
|
-
self._init_check()
|
|
1443
1644
|
return tensor_operator_registry.get('square')(self)
|
|
1444
1645
|
|
|
1646
|
+
@sub_mint
|
|
1445
1647
|
def sub(self, y):
|
|
1446
1648
|
r"""
|
|
1447
1649
|
For details, please refer to :func:`mindspore.ops.sub`.
|
|
1448
1650
|
"""
|
|
1449
|
-
self._init_check()
|
|
1450
1651
|
return tensor_operator_registry.get('sub')(self, y)
|
|
1451
1652
|
|
|
1452
1653
|
def tan(self):
|
|
1453
1654
|
"""
|
|
1454
1655
|
For details, please refer to :func:`mindspore.ops.tan`.
|
|
1455
1656
|
"""
|
|
1456
|
-
|
|
1457
|
-
return tensor_operator_registry.get('tan')()(self)
|
|
1657
|
+
return tensor_operator_registry.get('tan')(self)
|
|
1458
1658
|
|
|
1459
1659
|
def tanh(self):
|
|
1460
1660
|
r"""
|
|
1461
1661
|
For details, please refer to :func:`mindspore.ops.tanh`.
|
|
1462
1662
|
"""
|
|
1463
|
-
self._init_check()
|
|
1464
1663
|
return tensor_operator_registry.get('tanh')(self)
|
|
1465
1664
|
|
|
1466
1665
|
def cosh(self):
|
|
1467
1666
|
r"""
|
|
1468
1667
|
For details, please refer to :func:`mindspore.ops.cosh`.
|
|
1469
1668
|
"""
|
|
1470
|
-
|
|
1471
|
-
return tensor_operator_registry.get('cosh')()(self)
|
|
1669
|
+
return tensor_operator_registry.get('cosh')(self)
|
|
1472
1670
|
|
|
1473
1671
|
def acos(self):
|
|
1474
1672
|
r"""
|
|
1475
1673
|
For details, please refer to :func:`mindspore.ops.acos`.
|
|
1476
1674
|
"""
|
|
1477
|
-
self._init_check()
|
|
1478
1675
|
return tensor_operator_registry.get('acos')(self)
|
|
1479
1676
|
|
|
1480
1677
|
def arccos(self):
|
|
@@ -1487,35 +1684,30 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1487
1684
|
r"""
|
|
1488
1685
|
For details, please refer to :func:`mindspore.ops.cos`.
|
|
1489
1686
|
"""
|
|
1490
|
-
self._init_check()
|
|
1491
1687
|
return tensor_operator_registry.get('cos')(self)
|
|
1492
1688
|
|
|
1493
1689
|
def cov(self, *, correction=1, fweights=None, aweights=None):
|
|
1494
1690
|
r"""
|
|
1495
1691
|
For details, please refer to :func:`mindspore.ops.cov`.
|
|
1496
1692
|
"""
|
|
1497
|
-
self._init_check()
|
|
1498
1693
|
return tensor_operator_registry.get('cov')(self, correction=correction, fweights=fweights, aweights=aweights)
|
|
1499
1694
|
|
|
1500
1695
|
def acosh(self):
|
|
1501
1696
|
"""
|
|
1502
1697
|
For details, please refer to :func:`mindspore.ops.acosh`.
|
|
1503
1698
|
"""
|
|
1504
|
-
self._init_check()
|
|
1505
1699
|
return tensor_operator_registry.get('acosh')(self)
|
|
1506
1700
|
|
|
1507
1701
|
def asin(self):
|
|
1508
1702
|
r"""
|
|
1509
1703
|
For details, please refer to :func:`mindspore.ops.asin`.
|
|
1510
1704
|
"""
|
|
1511
|
-
self._init_check()
|
|
1512
1705
|
return tensor_operator_registry.get('asin')(self)
|
|
1513
1706
|
|
|
1514
1707
|
def abs(self):
|
|
1515
1708
|
"""
|
|
1516
1709
|
For details, please refer to :func:`mindspore.ops.abs`.
|
|
1517
1710
|
"""
|
|
1518
|
-
self._init_check()
|
|
1519
1711
|
return tensor_operator_registry.get('abs')(self)
|
|
1520
1712
|
|
|
1521
1713
|
def absolute(self):
|
|
@@ -1528,14 +1720,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1528
1720
|
"""
|
|
1529
1721
|
For details, please refer to :func:`mindspore.ops.ceil`.
|
|
1530
1722
|
"""
|
|
1531
|
-
|
|
1532
|
-
return tensor_operator_registry.get('ceil')()(self)
|
|
1723
|
+
return tensor_operator_registry.get('ceil')(self)
|
|
1533
1724
|
|
|
1534
1725
|
def floor(self):
|
|
1535
1726
|
"""
|
|
1536
1727
|
For details, please refer to :func:`mindspore.ops.floor`.
|
|
1537
1728
|
"""
|
|
1538
|
-
self._init_check()
|
|
1539
1729
|
return tensor_operator_registry.get('floor')(self)
|
|
1540
1730
|
|
|
1541
1731
|
def floor_divide(self, other):
|
|
@@ -1545,21 +1735,18 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1545
1735
|
.. warning::
|
|
1546
1736
|
This is an experimental API that is subject to change or deletion.
|
|
1547
1737
|
"""
|
|
1548
|
-
self._init_check()
|
|
1549
1738
|
return tensor_operator_registry.get('floor_divide')(self, other)
|
|
1550
1739
|
|
|
1551
1740
|
def lerp(self, end, weight):
|
|
1552
1741
|
"""
|
|
1553
1742
|
For details, please refer to :func:`mindspore.ops.lerp`.
|
|
1554
1743
|
"""
|
|
1555
|
-
self._init_check()
|
|
1556
1744
|
return tensor_operator_registry.get('lerp')(self, end, weight)
|
|
1557
1745
|
|
|
1558
1746
|
def negative(self):
|
|
1559
1747
|
r"""
|
|
1560
1748
|
For details, please refer to :func:`mindspore.ops.negative`.
|
|
1561
1749
|
"""
|
|
1562
|
-
self._init_check()
|
|
1563
1750
|
return tensor_operator_registry.get("negative")(self)
|
|
1564
1751
|
|
|
1565
1752
|
# pylint: disable=redefined-builtin
|
|
@@ -1567,14 +1754,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1567
1754
|
"""
|
|
1568
1755
|
For details, please refer to :func:`mindspore.ops.norm`.
|
|
1569
1756
|
"""
|
|
1570
|
-
self._init_check()
|
|
1571
1757
|
return tensor_operator_registry.get('norm')(self, ord, dim, keepdim, dtype=dtype)
|
|
1572
1758
|
|
|
1573
1759
|
def renorm(self, p, axis, maxnorm):
|
|
1574
1760
|
"""
|
|
1575
1761
|
For details, please refer to :func:`mindspore.ops.renorm`.
|
|
1576
1762
|
"""
|
|
1577
|
-
self._init_check()
|
|
1578
1763
|
return tensor_operator_registry.get("renorm")(self, p, axis, maxnorm)
|
|
1579
1764
|
|
|
1580
1765
|
def approximate_equal(self, other, tolerance=1e-5):
|
|
@@ -1584,7 +1769,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1584
1769
|
validator.check_isinstance("x", self, Tensor)
|
|
1585
1770
|
validator.check_isinstance("y", other, Tensor)
|
|
1586
1771
|
validator.check_isinstance("tolerance", tolerance, float)
|
|
1587
|
-
self._init_check()
|
|
1588
1772
|
input_x = self.copy() if self.dtype == mstype.float32 else self.astype(mstype.float16)
|
|
1589
1773
|
input_y = other.copy() if other.dtype == mstype.float32 else other.astype(mstype.float16)
|
|
1590
1774
|
return tensor_operator_registry.get('__lt__')(tensor_operator_registry.get('abs')(
|
|
@@ -1595,14 +1779,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1595
1779
|
r"""
|
|
1596
1780
|
For details, please refer to :func:`mindspore.ops.log1p`.
|
|
1597
1781
|
"""
|
|
1598
|
-
self._init_check()
|
|
1599
1782
|
return tensor_operator_registry.get('log1p')(self)
|
|
1600
1783
|
|
|
1601
1784
|
def logit(self, eps=None):
|
|
1602
1785
|
r"""
|
|
1603
1786
|
For details, please refer to :func:`mindspore.ops.logit`.
|
|
1604
1787
|
"""
|
|
1605
|
-
self._init_check()
|
|
1606
1788
|
if eps is None:
|
|
1607
1789
|
eps = -1.0
|
|
1608
1790
|
validator.check_value_type('eps', eps, (float,), 'Tensor.logit')
|
|
@@ -1612,14 +1794,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1612
1794
|
r"""
|
|
1613
1795
|
For details, please refer to :func:`mindspore.ops.logaddexp`.
|
|
1614
1796
|
"""
|
|
1615
|
-
self._init_check()
|
|
1616
1797
|
return tensor_operator_registry.get('logaddexp')(self, other)
|
|
1617
1798
|
|
|
1618
1799
|
def logaddexp2(self, other):
|
|
1619
1800
|
r"""
|
|
1620
1801
|
For details, please refer to :func:`mindspore.ops.logaddexp2`.
|
|
1621
1802
|
"""
|
|
1622
|
-
self._init_check()
|
|
1623
1803
|
return tensor_operator_registry.get('logaddexp2')(self, other)
|
|
1624
1804
|
|
|
1625
1805
|
def logcumsumexp(self, axis):
|
|
@@ -1629,149 +1809,129 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1629
1809
|
.. warning::
|
|
1630
1810
|
This is an experimental API that is subject to change or deletion.
|
|
1631
1811
|
"""
|
|
1632
|
-
self._init_check()
|
|
1633
1812
|
return tensor_operator_registry.get('logcumsumexp')(self, axis)
|
|
1634
1813
|
|
|
1635
1814
|
def logsumexp(self, axis, keepdims=False):
|
|
1636
1815
|
r"""
|
|
1637
1816
|
For details, please refer to :func:`mindspore.ops.logsumexp`.
|
|
1638
1817
|
"""
|
|
1639
|
-
self._init_check()
|
|
1640
1818
|
return tensor_operator_registry.get('logsumexp')(self, axis, keepdims)
|
|
1641
1819
|
|
|
1642
1820
|
def logdet(self):
|
|
1643
1821
|
r"""
|
|
1644
1822
|
For details, please refer to :func:`mindspore.ops.logdet`.
|
|
1645
1823
|
"""
|
|
1646
|
-
self._init_check()
|
|
1647
1824
|
return tensor_operator_registry.get('logdet')(self)
|
|
1648
1825
|
|
|
1649
1826
|
def i0(self):
|
|
1650
1827
|
r"""
|
|
1651
1828
|
For details, please refer to :func:`mindspore.ops.i0`.
|
|
1652
1829
|
"""
|
|
1653
|
-
self._init_check()
|
|
1654
1830
|
return tensor_operator_registry.get('i0')(self)
|
|
1655
1831
|
|
|
1656
1832
|
def isclose(self, x2, rtol=1e-05, atol=1e-08, equal_nan=False):
|
|
1657
1833
|
"""
|
|
1658
1834
|
For details, please refer to :func:`mindspore.ops.isclose`.
|
|
1659
1835
|
"""
|
|
1660
|
-
self._init_check()
|
|
1661
1836
|
return tensor_operator_registry.get('isclose')(self, x2, rtol, atol, equal_nan)
|
|
1662
1837
|
|
|
1663
1838
|
def isneginf(self):
|
|
1664
1839
|
r"""
|
|
1665
1840
|
For details, please refer to :func:`mindspore.ops.isneginf`.
|
|
1666
1841
|
"""
|
|
1667
|
-
self._init_check()
|
|
1668
1842
|
return tensor_operator_registry.get('isneginf')(self)
|
|
1669
1843
|
|
|
1670
1844
|
def isposinf(self):
|
|
1671
1845
|
r"""
|
|
1672
1846
|
For details, please refer to :func:`mindspore.ops.isposinf`.
|
|
1673
1847
|
"""
|
|
1674
|
-
self._init_check()
|
|
1675
1848
|
return tensor_operator_registry.get('isposinf')(self)
|
|
1676
1849
|
|
|
1677
1850
|
def isreal(self):
|
|
1678
1851
|
r"""
|
|
1679
1852
|
For details, please refer to :func:`mindspore.ops.isreal`.
|
|
1680
1853
|
"""
|
|
1681
|
-
self._init_check()
|
|
1682
1854
|
return tensor_operator_registry.get('isreal')(self)
|
|
1683
1855
|
|
|
1684
1856
|
def isfinite(self):
|
|
1685
1857
|
r"""
|
|
1686
1858
|
For details, please refer to :func:`mindspore.ops.isfinite`.
|
|
1687
1859
|
"""
|
|
1688
|
-
|
|
1689
|
-
return tensor_operator_registry.get('isfinite')()(self)
|
|
1860
|
+
return tensor_operator_registry.get('isfinite')(self)
|
|
1690
1861
|
|
|
1691
1862
|
def is_complex(self):
|
|
1692
1863
|
r"""
|
|
1693
1864
|
For details, please refer to :func:`mindspore.ops.is_complex`.
|
|
1694
1865
|
"""
|
|
1695
|
-
self._init_check()
|
|
1696
1866
|
return tensor_operator_registry.get('is_complex')(self)
|
|
1697
1867
|
|
|
1698
1868
|
def inv(self):
|
|
1699
1869
|
r"""
|
|
1700
1870
|
For details, please refer to :func:`mindspore.ops.inv`.
|
|
1701
1871
|
"""
|
|
1702
|
-
self._init_check()
|
|
1703
1872
|
return tensor_operator_registry.get('inv')(self)
|
|
1704
1873
|
|
|
1705
1874
|
def inverse(self):
|
|
1706
1875
|
r"""
|
|
1707
1876
|
For details, please refer to :func:`mindspore.ops.inverse`.
|
|
1708
1877
|
"""
|
|
1709
|
-
self._init_check()
|
|
1710
1878
|
return tensor_operator_registry.get('inverse')(self)
|
|
1711
1879
|
|
|
1712
1880
|
def invert(self):
|
|
1713
1881
|
r"""
|
|
1714
1882
|
For details, please refer to :func:`mindspore.ops.invert`.
|
|
1715
1883
|
"""
|
|
1716
|
-
self._init_check()
|
|
1717
1884
|
return tensor_operator_registry.get('invert')(self)
|
|
1718
1885
|
|
|
1719
1886
|
def pow(self, exponent):
|
|
1720
1887
|
r"""
|
|
1721
1888
|
For details, please refer to :func:`mindspore.ops.pow`.
|
|
1722
1889
|
"""
|
|
1723
|
-
|
|
1724
|
-
return tensor_operator_registry.get('pow')()(self, exponent)
|
|
1890
|
+
return tensor_operator_registry.get('pow')(self, exponent)
|
|
1725
1891
|
|
|
1726
1892
|
def log(self):
|
|
1727
1893
|
"""
|
|
1728
1894
|
For details, please refer to :func:`mindspore.ops.log`.
|
|
1729
1895
|
"""
|
|
1730
|
-
self._init_check()
|
|
1731
1896
|
return tensor_operator_registry.get('log')(self)
|
|
1732
1897
|
|
|
1733
1898
|
def log10(self):
|
|
1734
1899
|
r"""
|
|
1735
1900
|
For details, please refer to :func:`mindspore.ops.log10`.
|
|
1736
1901
|
"""
|
|
1737
|
-
self._init_check()
|
|
1738
1902
|
return tensor_operator_registry.get('log10')(self)
|
|
1739
1903
|
|
|
1740
1904
|
def log2(self):
|
|
1741
1905
|
r"""
|
|
1742
1906
|
For details, please refer to :func:`mindspore.ops.log2`.
|
|
1743
1907
|
"""
|
|
1744
|
-
self._init_check()
|
|
1745
1908
|
return tensor_operator_registry.get('log2')(self)
|
|
1746
1909
|
|
|
1910
|
+
@mean_mint
|
|
1747
1911
|
def mean(self, axis=None, keep_dims=False):
|
|
1748
1912
|
"""
|
|
1749
1913
|
For details, please refer to :func:`mindspore.ops.mean`.
|
|
1750
1914
|
"""
|
|
1751
|
-
self._init_check()
|
|
1752
1915
|
return tensor_operator_registry.get('mean')(self, axis, keep_dims)
|
|
1753
1916
|
|
|
1754
1917
|
def amin(self, axis=None, keepdims=False, *, initial=None, where=None):
|
|
1755
1918
|
"""
|
|
1756
1919
|
For details, please refer to :func:`mindspore.ops.amin`.
|
|
1757
1920
|
"""
|
|
1758
|
-
self._init_check()
|
|
1759
1921
|
if axis is None:
|
|
1760
1922
|
axis = ()
|
|
1761
1923
|
return tensor_operator_registry.get('amin')(self, axis, keepdims, initial=initial, where=where)
|
|
1762
1924
|
|
|
1763
1925
|
def reverse(self, axis):
|
|
1764
1926
|
"""
|
|
1765
|
-
For details, please refer to :func:`mindspore.ops.
|
|
1927
|
+
For details, please refer to :func:`mindspore.ops.flip`.
|
|
1766
1928
|
"""
|
|
1767
|
-
|
|
1768
|
-
return tensor_operator_registry.get('reverse')(axis)(self)
|
|
1929
|
+
return tensor_operator_registry.get('flip')(self, axis)
|
|
1769
1930
|
|
|
1770
1931
|
def amax(self, axis=None, keepdims=False, *, initial=None, where=None):
|
|
1771
1932
|
"""
|
|
1772
1933
|
For details, please refer to :func:`mindspore.ops.amax`.
|
|
1773
1934
|
"""
|
|
1774
|
-
self._init_check()
|
|
1775
1935
|
if axis is None:
|
|
1776
1936
|
axis = ()
|
|
1777
1937
|
return tensor_operator_registry.get('amax')(self, axis, keepdims, initial=initial, where=where)
|
|
@@ -1780,28 +1940,24 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1780
1940
|
r"""
|
|
1781
1941
|
For details, please refer to :func:`mindspore.ops.aminmax`.
|
|
1782
1942
|
"""
|
|
1783
|
-
self._init_check()
|
|
1784
1943
|
return tensor_operator_registry.get('aminmax')(self, axis=axis, keepdims=keepdims)
|
|
1785
1944
|
|
|
1786
1945
|
def reverse_sequence(self, seq_lengths, seq_dim=0, batch_dim=0):
|
|
1787
1946
|
"""
|
|
1788
1947
|
For details, please refer to :func:`mindspore.ops.reverse_sequence`.
|
|
1789
1948
|
"""
|
|
1790
|
-
|
|
1791
|
-
return tensor_operator_registry.get("reverse_sequence")(seq_dim, batch_dim)(self, seq_lengths)
|
|
1949
|
+
return tensor_operator_registry.get("reverse_sequence")(self, seq_lengths, seq_dim, batch_dim)
|
|
1792
1950
|
|
|
1793
|
-
def prod(self, axis=None, keep_dims=False):
|
|
1951
|
+
def prod(self, axis=None, keep_dims=False, dtype=None):
|
|
1794
1952
|
"""
|
|
1795
1953
|
For details, please refer to :func:`mindspore.ops.prod`.
|
|
1796
1954
|
"""
|
|
1797
|
-
|
|
1798
|
-
return tensor_operator_registry.get('prod')(self, axis, keep_dims)
|
|
1955
|
+
return tensor_operator_registry.get('prod')(self, axis, keep_dims, dtype)
|
|
1799
1956
|
|
|
1800
1957
|
def select(self, condition, y):
|
|
1801
1958
|
r"""
|
|
1802
1959
|
For details, please refer to :func:`mindspore.ops.select`.
|
|
1803
1960
|
"""
|
|
1804
|
-
self._init_check()
|
|
1805
1961
|
if not isinstance(condition, Tensor):
|
|
1806
1962
|
raise TypeError(f"For 'Tensor.select', the argument 'condition' should be Tensor,"
|
|
1807
1963
|
f" but got {type(condition)}.")
|
|
@@ -1816,7 +1972,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1816
1972
|
f" then the tensor type should be float32 but got {self.dtype}")
|
|
1817
1973
|
input_y = y
|
|
1818
1974
|
if isinstance(y, (int, float)):
|
|
1819
|
-
input_y = tensor_operator_registry.get('zeros_like')(
|
|
1975
|
+
input_y = tensor_operator_registry.get('zeros_like')(self) + y
|
|
1820
1976
|
if isinstance(y, int):
|
|
1821
1977
|
input_y = tensor_operator_registry.get('cast')(input_y, mstype.int32)
|
|
1822
1978
|
else:
|
|
@@ -1827,22 +1983,46 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1827
1983
|
r"""
|
|
1828
1984
|
For details, please refer to :func:`mindspore.ops.transpose`.
|
|
1829
1985
|
"""
|
|
1830
|
-
self._init_check()
|
|
1831
1986
|
perm = validator.check_transpose_axis(axes, self.ndim)
|
|
1832
|
-
return tensor_operator_registry.get('transpose')(
|
|
1987
|
+
return tensor_operator_registry.get('transpose')(self, perm)
|
|
1833
1988
|
|
|
1834
1989
|
def col2im(self, output_size, kernel_size, dilation, padding_value, stride):
|
|
1835
1990
|
"""
|
|
1836
1991
|
For details, please refer to :func:`mindspore.ops.col2im`.
|
|
1837
1992
|
"""
|
|
1838
|
-
self._init_check()
|
|
1839
1993
|
return tensor_operator_registry.get('col2im')(self, output_size, kernel_size, dilation, padding_value, stride)
|
|
1840
1994
|
|
|
1841
1995
|
def reshape(self, *shape):
|
|
1996
|
+
r"""
|
|
1997
|
+
Rearranges the input Tensor based on the given `shape` .
|
|
1998
|
+
|
|
1999
|
+
The `shape` can only have one -1 at most, in which case it's inferred from the remaining dimensions and
|
|
2000
|
+
the number of elements in the input.
|
|
2001
|
+
|
|
2002
|
+
Args:
|
|
2003
|
+
shape (Union[int, tuple[int], list[int]]): If `shape` is a tuple or list, its elements should be
|
|
2004
|
+
integers, and only constant value is allowed. i.e., :math:`(y_1, y_2, ..., y_S)`.
|
|
2005
|
+
|
|
2006
|
+
Returns:
|
|
2007
|
+
Tensor, If the given `shape` does not contain -1, the `shape` of tensor is :math:`(y_1, y_2, ..., y_S)`.
|
|
2008
|
+
If the k-th position in the given `shape` is -1, the `shape` of tensor is :math:`(y_1, ..., y_{k-1},
|
|
2009
|
+
\frac{\prod_{i=1}^{R}x_{i}}{y_1\times ...\times y_{k-1}\times y_{k+1}\times...\times y_S} , y_{k+1},
|
|
2010
|
+
..., y_S)`, in where the shape of input tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
2011
|
+
|
|
2012
|
+
Supported Platforms:
|
|
2013
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2014
|
+
|
|
2015
|
+
Examples:
|
|
2016
|
+
>>> import mindspore
|
|
2017
|
+
>>> import numpy as np
|
|
2018
|
+
>>> from mindspore import Tensor, ops
|
|
2019
|
+
>>> input = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
|
|
2020
|
+
>>> output = input.reshape(3, 2)
|
|
2021
|
+
>>> print(output)
|
|
2022
|
+
[[-0.1 0.3]
|
|
2023
|
+
[ 3.6 0.4]
|
|
2024
|
+
[ 0.5 -3.2]]
|
|
1842
2025
|
"""
|
|
1843
|
-
For details, please refer to :func:`mindspore.ops.reshape`.
|
|
1844
|
-
"""
|
|
1845
|
-
self._init_check()
|
|
1846
2026
|
new_shape = validator.check_reshape_shp(shape)
|
|
1847
2027
|
return tensor_operator_registry.get('reshape')(self, new_shape)
|
|
1848
2028
|
|
|
@@ -1871,7 +2051,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1871
2051
|
[ 3.6 0.4]
|
|
1872
2052
|
[ 0.5 -3.2]]
|
|
1873
2053
|
"""
|
|
1874
|
-
self._init_check()
|
|
1875
2054
|
return tensor_operator_registry.get('reshape')(self, other.shape)
|
|
1876
2055
|
|
|
1877
2056
|
def ravel(self):
|
|
@@ -1881,13 +2060,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1881
2060
|
Returns:
|
|
1882
2061
|
Tensor, a 1-D tensor, containing the same elements of the input.
|
|
1883
2062
|
|
|
1884
|
-
Supported Platforms:
|
|
1885
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1886
|
-
|
|
1887
2063
|
See also:
|
|
1888
|
-
:func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
2064
|
+
- :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
2065
|
+
- :func:`mindspore.Tensor.flatten`: Return a copy of the tensor collapsed into one dimension.
|
|
1889
2066
|
|
|
1890
|
-
|
|
2067
|
+
Supported Platforms:
|
|
2068
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1891
2069
|
|
|
1892
2070
|
Examples:
|
|
1893
2071
|
>>> import numpy as np
|
|
@@ -1897,85 +2075,73 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1897
2075
|
>>> print(output.shape)
|
|
1898
2076
|
(24,)
|
|
1899
2077
|
"""
|
|
1900
|
-
self._init_check()
|
|
1901
2078
|
reshape_op = tensor_operator_registry.get('reshape')
|
|
1902
2079
|
return reshape_op(self, (-1,))
|
|
1903
2080
|
|
|
1904
|
-
def round(self):
|
|
2081
|
+
def round(self, decimals=0):
|
|
1905
2082
|
"""
|
|
1906
2083
|
For details, please refer to :func:`mindspore.ops.round`.
|
|
1907
2084
|
"""
|
|
1908
|
-
|
|
1909
|
-
return tensor_operator_registry.get('round')()(self)
|
|
2085
|
+
return tensor_operator_registry.get('round')(self, decimals=decimals)
|
|
1910
2086
|
|
|
1911
2087
|
def roll(self, shifts, dims):
|
|
1912
2088
|
"""
|
|
1913
2089
|
For details, please refer to :func:`mindspore.ops.roll`.
|
|
1914
2090
|
"""
|
|
1915
|
-
self._init_check()
|
|
1916
2091
|
return tensor_operator_registry.get('roll')(shifts, dims)(self)
|
|
1917
2092
|
|
|
1918
2093
|
def rot90(self, k, dims):
|
|
1919
2094
|
r"""
|
|
1920
2095
|
For details, please refer to :func:`mindspore.ops.rot90`.
|
|
1921
2096
|
"""
|
|
1922
|
-
self._init_check()
|
|
1923
2097
|
return tensor_operator_registry.get('rot90')(self, k, dims)
|
|
1924
2098
|
|
|
1925
2099
|
def deg2rad(self):
|
|
1926
2100
|
r"""
|
|
1927
2101
|
For details, please refer to :func:`mindspore.ops.deg2rad`.
|
|
1928
2102
|
"""
|
|
1929
|
-
self._init_check()
|
|
1930
2103
|
return tensor_operator_registry.get('deg2rad')(self)
|
|
1931
2104
|
|
|
1932
2105
|
def dot(self, other):
|
|
1933
2106
|
r"""
|
|
1934
2107
|
For details, please refer to :func:`mindspore.ops.dot`.
|
|
1935
2108
|
"""
|
|
1936
|
-
self._init_check()
|
|
1937
2109
|
return tensor_operator_registry.get('dot')(self, other)
|
|
1938
2110
|
|
|
1939
2111
|
def outer(self, vec2):
|
|
1940
2112
|
r"""
|
|
1941
2113
|
For details, please refer to :func:`mindspore.ops.outer`.
|
|
1942
2114
|
"""
|
|
1943
|
-
self._init_check()
|
|
1944
2115
|
return tensor_operator_registry.get('outer')(self, vec2)
|
|
1945
2116
|
|
|
1946
2117
|
def rad2deg(self):
|
|
1947
2118
|
r"""
|
|
1948
2119
|
For details, please refer to :func:`mindspore.ops.rad2deg`.
|
|
1949
2120
|
"""
|
|
1950
|
-
self._init_check()
|
|
1951
2121
|
return tensor_operator_registry.get('rad2deg')(self)
|
|
1952
2122
|
|
|
1953
2123
|
def copysign(self, other):
|
|
1954
2124
|
r"""
|
|
1955
2125
|
For details, please refer to :func:`mindspore.ops.copysign`.
|
|
1956
2126
|
"""
|
|
1957
|
-
self._init_check()
|
|
1958
2127
|
return tensor_operator_registry.get('copysign')(self, other)
|
|
1959
2128
|
|
|
1960
2129
|
def nelement(self):
|
|
1961
2130
|
r"""
|
|
1962
2131
|
Alias for :func:`mindspore.Tensor.numel`.
|
|
1963
2132
|
"""
|
|
1964
|
-
self._init_check()
|
|
1965
2133
|
return tensor_operator_registry.get('nelement')(self)
|
|
1966
2134
|
|
|
1967
2135
|
def numel(self):
|
|
1968
2136
|
r"""
|
|
1969
2137
|
For details, please refer to :func:`mindspore.ops.numel`.
|
|
1970
2138
|
"""
|
|
1971
|
-
self._init_check()
|
|
1972
2139
|
return tensor_operator_registry.get('numel')(self)
|
|
1973
2140
|
|
|
1974
2141
|
def permute(self, *axis):
|
|
1975
2142
|
"""
|
|
1976
2143
|
For details, please refer to :func:`mindspore.ops.permute`.
|
|
1977
2144
|
"""
|
|
1978
|
-
self._init_check()
|
|
1979
2145
|
perm = validator.check_transpose_axis(axis, self.ndim)
|
|
1980
2146
|
return tensor_operator_registry.get('permute')(self, perm)
|
|
1981
2147
|
|
|
@@ -1983,98 +2149,85 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1983
2149
|
"""
|
|
1984
2150
|
For details, please refer to :func:`mindspore.ops.positive`.
|
|
1985
2151
|
"""
|
|
1986
|
-
self._init_check()
|
|
1987
2152
|
return tensor_operator_registry.get("positive")(self)
|
|
1988
2153
|
|
|
1989
2154
|
def remainder(self, divisor):
|
|
1990
2155
|
r"""
|
|
1991
2156
|
For details, please refer to :func:`mindspore.ops.remainder`.
|
|
1992
2157
|
"""
|
|
1993
|
-
self._init_check()
|
|
1994
2158
|
return tensor_operator_registry.get('remainder')(self, divisor)
|
|
1995
2159
|
|
|
2160
|
+
@flatten_mint
|
|
1996
2161
|
def flatten(self, order='C', *, start_dim=0, end_dim=-1):
|
|
1997
2162
|
r"""
|
|
1998
2163
|
For details, please refer to :func:`mindspore.ops.flatten`.
|
|
1999
2164
|
"""
|
|
2000
|
-
self._init_check()
|
|
2001
2165
|
return tensor_operator_registry.get('flatten')(self, order, start_dim=start_dim, end_dim=end_dim)
|
|
2002
2166
|
|
|
2003
2167
|
def float_power(self, other):
|
|
2004
2168
|
r"""
|
|
2005
2169
|
For details, please refer to :func:`mindspore.ops.float_power`.
|
|
2006
2170
|
"""
|
|
2007
|
-
self._init_check()
|
|
2008
2171
|
return tensor_operator_registry.get('float_power')(self, other)
|
|
2009
2172
|
|
|
2010
2173
|
def fmax(self, other):
|
|
2011
2174
|
r"""
|
|
2012
2175
|
For details, please refer to :func:`mindspore.ops.fmax`.
|
|
2013
2176
|
"""
|
|
2014
|
-
self._init_check()
|
|
2015
2177
|
return tensor_operator_registry.get('fmax')(self, other)
|
|
2016
2178
|
|
|
2017
2179
|
def fmin(self, other):
|
|
2018
2180
|
r"""
|
|
2019
2181
|
For details, please refer to :func:`mindspore.ops.fmin`.
|
|
2020
2182
|
"""
|
|
2021
|
-
self._init_check()
|
|
2022
2183
|
return tensor_operator_registry.get('fmin')(self, other)
|
|
2023
2184
|
|
|
2024
2185
|
def fmod(self, other):
|
|
2025
2186
|
r"""
|
|
2026
2187
|
For details, please refer to :func:`mindspore.ops.fmod`.
|
|
2027
2188
|
"""
|
|
2028
|
-
self._init_check()
|
|
2029
2189
|
return tensor_operator_registry.get('fmod')(self, other)
|
|
2030
2190
|
|
|
2031
2191
|
def narrow(self, axis, start, length):
|
|
2032
2192
|
"""
|
|
2033
2193
|
For details, please refer to :func:`mindspore.ops.narrow`.
|
|
2034
2194
|
"""
|
|
2035
|
-
self._init_check()
|
|
2036
2195
|
return tensor_operator_registry.get('narrow')(self, axis, start, length)
|
|
2037
2196
|
|
|
2038
2197
|
def swapaxes(self, axis0, axis1):
|
|
2039
2198
|
"""
|
|
2040
2199
|
For details, please refer to :func:`mindspore.ops.swapaxes`.
|
|
2041
2200
|
"""
|
|
2042
|
-
self._init_check()
|
|
2043
2201
|
return tensor_operator_registry.get('swapaxes')(self, axis0, axis1)
|
|
2044
2202
|
|
|
2045
2203
|
def swapdims(self, dim0, dim1):
|
|
2046
2204
|
"""
|
|
2047
2205
|
For details, please refer to :func:`mindspore.ops.swapdims`.
|
|
2048
2206
|
"""
|
|
2049
|
-
self._init_check()
|
|
2050
2207
|
return tensor_operator_registry.get('swapdims')(self, dim0, dim1)
|
|
2051
2208
|
|
|
2052
2209
|
def squeeze(self, axis=None):
|
|
2053
2210
|
"""
|
|
2054
2211
|
For details, please refer to :func:`mindspore.ops.squeeze`.
|
|
2055
2212
|
"""
|
|
2056
|
-
self._init_check()
|
|
2057
2213
|
return tensor_operator_registry.get('squeeze')(self, axis)
|
|
2058
2214
|
|
|
2059
2215
|
def slogdet(self):
|
|
2060
2216
|
"""
|
|
2061
2217
|
For details, please refer to :func:`mindspore.ops.slogdet`.
|
|
2062
2218
|
"""
|
|
2063
|
-
self._init_check()
|
|
2064
2219
|
return tensor_operator_registry.get('slogdet')(self)
|
|
2065
2220
|
|
|
2066
2221
|
def tril(self, diagonal=0):
|
|
2067
2222
|
"""
|
|
2068
2223
|
For details, please refer to :func:`mindspore.ops.tril`.
|
|
2069
2224
|
"""
|
|
2070
|
-
self._init_check()
|
|
2071
2225
|
return tensor_operator_registry.get('tril')(self, diagonal)
|
|
2072
2226
|
|
|
2073
2227
|
def unsqueeze(self, dim):
|
|
2074
2228
|
"""
|
|
2075
2229
|
For details, please refer to :func:`mindspore.ops.unsqueeze`.
|
|
2076
2230
|
"""
|
|
2077
|
-
self._init_check()
|
|
2078
2231
|
validator.check_is_int(dim, 'dim')
|
|
2079
2232
|
validator.check_int_range(dim, -self.ndim - 1, self.ndim + 1, validator.INC_LEFT, 'dim')
|
|
2080
2233
|
return tensor_operator_registry.get('unsqueeze')(self, dim)
|
|
@@ -2083,7 +2236,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2083
2236
|
"""
|
|
2084
2237
|
For details, please refer to :func:`mindspore.ops.expand_dims`.
|
|
2085
2238
|
"""
|
|
2086
|
-
self._init_check()
|
|
2087
2239
|
validator.check_is_int(axis, 'axis')
|
|
2088
2240
|
validator.check_int_range(axis, -self.ndim - 1, self.ndim + 1, validator.INC_LEFT, 'axis')
|
|
2089
2241
|
return tensor_operator_registry.get('expand_dims')(self, axis)
|
|
@@ -2116,7 +2268,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2116
2268
|
>>> print(x.dtype)
|
|
2117
2269
|
Int32
|
|
2118
2270
|
"""
|
|
2119
|
-
self._init_check()
|
|
2120
2271
|
dtype = _check_astype_and_convert(dtype)
|
|
2121
2272
|
if not copy and dtype == self.dtype:
|
|
2122
2273
|
return self
|
|
@@ -2126,7 +2277,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2126
2277
|
"""
|
|
2127
2278
|
For details, please refer to :func:`mindspore.ops.argmax`.
|
|
2128
2279
|
"""
|
|
2129
|
-
self._init_check()
|
|
2130
2280
|
out = tensor_operator_registry.get('argmax')(self, axis, keepdims)
|
|
2131
2281
|
return out
|
|
2132
2282
|
|
|
@@ -2134,7 +2284,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2134
2284
|
"""
|
|
2135
2285
|
For details, please refer to :func:`mindspore.ops.argmin`.
|
|
2136
2286
|
"""
|
|
2137
|
-
self._init_check()
|
|
2138
2287
|
out = tensor_operator_registry.get('argmin')(self, axis, keepdims)
|
|
2139
2288
|
return out
|
|
2140
2289
|
|
|
@@ -2185,7 +2334,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2185
2334
|
"""
|
|
2186
2335
|
if self.shape == ():
|
|
2187
2336
|
return (self, Tensor(0))
|
|
2188
|
-
self._init_check()
|
|
2189
2337
|
return tensor_operator_registry.get('argmax_with_value')(self, axis, keep_dims)
|
|
2190
2338
|
|
|
2191
2339
|
def argmin_with_value(self, axis=0, keep_dims=False):
|
|
@@ -2233,7 +2381,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2233
2381
|
"""
|
|
2234
2382
|
if self.shape == ():
|
|
2235
2383
|
return (self, Tensor(0))
|
|
2236
|
-
self._init_check()
|
|
2237
2384
|
return tensor_operator_registry.get('argmin_with_value')(self, axis, keep_dims)
|
|
2238
2385
|
|
|
2239
2386
|
def cumsum(self, axis=None, dtype=None):
|
|
@@ -2275,15 +2422,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2275
2422
|
"""
|
|
2276
2423
|
For details, please refer to :func:`mindspore.ops.index_select`.
|
|
2277
2424
|
"""
|
|
2278
|
-
self._init_check()
|
|
2279
2425
|
return tensor_operator_registry.get('index_select')(self, axis, index)
|
|
2280
2426
|
|
|
2281
2427
|
def inplace_update(self, v, indices):
|
|
2282
2428
|
"""
|
|
2283
2429
|
For details, please refer to :func:`mindspore.ops.inplace_update`.
|
|
2284
2430
|
"""
|
|
2285
|
-
|
|
2286
|
-
return tensor_operator_registry.get('inplace_update')()(self, indices, v)
|
|
2431
|
+
return tensor_operator_registry.get('inplace_update')(self, v, indices)
|
|
2287
2432
|
|
|
2288
2433
|
def copy(self):
|
|
2289
2434
|
"""
|
|
@@ -2321,6 +2466,38 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2321
2466
|
x = x.astype(origin_dtype)
|
|
2322
2467
|
return x
|
|
2323
2468
|
|
|
2469
|
+
def copy_(self, src, non_blocking=False):
|
|
2470
|
+
"""
|
|
2471
|
+
Copies the elements from src into self tensor and returns self.
|
|
2472
|
+
|
|
2473
|
+
.. warning::
|
|
2474
|
+
This is an experimental API that is subject to change or deletion.
|
|
2475
|
+
The `src` tensor must be broadcastable with the `self` tensor. It may be of a different data type.
|
|
2476
|
+
|
|
2477
|
+
Args:
|
|
2478
|
+
src (Tensor): the source tensor to copy from.
|
|
2479
|
+
non_blocking (bool): no effect currently.
|
|
2480
|
+
|
|
2481
|
+
Returns:
|
|
2482
|
+
Return self Tensor.
|
|
2483
|
+
|
|
2484
|
+
Supported Platforms:
|
|
2485
|
+
``Ascend``
|
|
2486
|
+
|
|
2487
|
+
Examples:
|
|
2488
|
+
>>> import numpy as np
|
|
2489
|
+
>>> from mindspore import Tensor
|
|
2490
|
+
>>> a = Tensor(np.ones((3,3)).astype("float32"))
|
|
2491
|
+
>>> b = Tensor(np.zeros((3,3)).astype("float32"))
|
|
2492
|
+
>>> a.copy_(b)
|
|
2493
|
+
>>> print(a)
|
|
2494
|
+
[[0. 0. 0.]
|
|
2495
|
+
[0. 0. 0.]
|
|
2496
|
+
[0. 0. 0.]]
|
|
2497
|
+
"""
|
|
2498
|
+
return tensor_operator_registry.get("copy_")(self, src)
|
|
2499
|
+
|
|
2500
|
+
@max_mint
|
|
2324
2501
|
def max(self, axis=None, keepdims=False, *, initial=None, where=True, return_indices=False):
|
|
2325
2502
|
"""
|
|
2326
2503
|
Return the maximum of a tensor or maximum along an axis.
|
|
@@ -2357,15 +2534,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2357
2534
|
Raises:
|
|
2358
2535
|
TypeError: If arguments have types not specified above.
|
|
2359
2536
|
|
|
2360
|
-
Supported Platforms:
|
|
2361
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2362
|
-
|
|
2363
2537
|
See also:
|
|
2364
|
-
:func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
|
|
2365
|
-
|
|
2366
|
-
:func:`mindspore.Tensor.
|
|
2538
|
+
- :func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
|
|
2539
|
+
- :func:`mindspore.Tensor.argmax`: Return the indices of the maximum values along an axis.
|
|
2540
|
+
- :func:`mindspore.Tensor.min`: Return the minimum of a tensor or minimum along an axis.
|
|
2367
2541
|
|
|
2368
|
-
|
|
2542
|
+
Supported Platforms:
|
|
2543
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2369
2544
|
|
|
2370
2545
|
Examples:
|
|
2371
2546
|
>>> import numpy as np
|
|
@@ -2380,7 +2555,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2380
2555
|
>>> print(indices)
|
|
2381
2556
|
[1 1]
|
|
2382
2557
|
"""
|
|
2383
|
-
self._init_check()
|
|
2384
2558
|
if isinstance(axis, (list, tuple)):
|
|
2385
2559
|
reduce_ = tensor_operator_registry.get("reduce")
|
|
2386
2560
|
reduce_max = tensor_operator_registry.get("reduce_max")
|
|
@@ -2392,6 +2566,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2392
2566
|
return values
|
|
2393
2567
|
return values, indices
|
|
2394
2568
|
|
|
2569
|
+
@min_mint
|
|
2395
2570
|
def min(self, axis=None, keepdims=False, *, initial=None, where=True, return_indices=False):
|
|
2396
2571
|
"""
|
|
2397
2572
|
Return the minimum of a tensor or minimum along an axis.
|
|
@@ -2428,15 +2603,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2428
2603
|
Raises:
|
|
2429
2604
|
TypeError: If arguments have types not specified above.
|
|
2430
2605
|
|
|
2431
|
-
Supported Platforms:
|
|
2432
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2433
|
-
|
|
2434
2606
|
See also:
|
|
2435
|
-
:func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
|
|
2436
|
-
|
|
2437
|
-
:func:`mindspore.Tensor.
|
|
2607
|
+
- :func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
|
|
2608
|
+
- :func:`mindspore.Tensor.argmax`: Return the indices of the maximum values along an axis.
|
|
2609
|
+
- :func:`mindspore.Tensor.max`: Return the minimum of a tensor or minimum along an axis.
|
|
2438
2610
|
|
|
2439
|
-
|
|
2611
|
+
Supported Platforms:
|
|
2612
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2440
2613
|
|
|
2441
2614
|
Examples:
|
|
2442
2615
|
>>> import numpy as np
|
|
@@ -2460,12 +2633,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2460
2633
|
>>> print(indices)
|
|
2461
2634
|
[0 0]
|
|
2462
2635
|
"""
|
|
2463
|
-
self._init_check()
|
|
2464
2636
|
if isinstance(axis, (list, tuple)):
|
|
2465
2637
|
reduce_ = tensor_operator_registry.get("reduce")
|
|
2466
2638
|
reduce_min = tensor_operator_registry.get("reduce_min")
|
|
2467
2639
|
minimum = tensor_operator_registry.get("minimum")
|
|
2468
|
-
return reduce_(self, reduce_min(keepdims), cmp_fn=minimum
|
|
2640
|
+
return reduce_(self, reduce_min(keepdims), cmp_fn=minimum, axis=axis, keepdims=keepdims,
|
|
2469
2641
|
initial=initial, where=where)
|
|
2470
2642
|
values, indices = tensor_operator_registry.get("min")(self, axis, keepdims, initial=initial, where=where)
|
|
2471
2643
|
if not return_indices:
|
|
@@ -2476,7 +2648,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2476
2648
|
"""
|
|
2477
2649
|
For details, please refer to :func:`mindspore.ops.scatter_add`.
|
|
2478
2650
|
"""
|
|
2479
|
-
self._init_check()
|
|
2480
2651
|
return tensor_operator_registry.get("tensor_scatter_add")(self, indices, updates)
|
|
2481
2652
|
|
|
2482
2653
|
def scatter_sub(self, indices, updates):
|
|
@@ -2489,7 +2660,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2489
2660
|
|
|
2490
2661
|
The last axis of `indices` is the depth of each index vectors. For each index vector,
|
|
2491
2662
|
there must be a corresponding value in `updates`. The shape of `updates` should be
|
|
2492
|
-
equal to the shape of `self[indices]`. For more details, see
|
|
2663
|
+
equal to the shape of `self[indices]`. For more details, see Examples.
|
|
2493
2664
|
|
|
2494
2665
|
Note:
|
|
2495
2666
|
On GPU, if some values of the `indices` are out of bound, instead of raising an index error,
|
|
@@ -2524,28 +2695,30 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2524
2695
|
[[-3.3000002 0.3 3.6 ]
|
|
2525
2696
|
[ 0.4 0.5 -3.2 ]]
|
|
2526
2697
|
"""
|
|
2527
|
-
self._init_check()
|
|
2528
2698
|
return tensor_operator_registry.get('tensor_scatter_sub')(self, indices, updates)
|
|
2529
2699
|
|
|
2530
2700
|
def scatter_min(self, indices, updates):
|
|
2531
2701
|
"""
|
|
2532
2702
|
For details, please refer to :func:`mindspore.ops.scatter_min`.
|
|
2533
2703
|
"""
|
|
2534
|
-
|
|
2535
|
-
return tensor_operator_registry.get('tensor_scatter_min')()(self, indices, updates)
|
|
2704
|
+
return tensor_operator_registry.get('tensor_scatter_min')(self, indices, updates)
|
|
2536
2705
|
|
|
2537
2706
|
def scatter_max(self, indices, updates):
|
|
2538
2707
|
"""
|
|
2539
2708
|
For details, please refer to :func:`mindspore.ops.scatter_max`.
|
|
2540
2709
|
"""
|
|
2541
|
-
|
|
2542
|
-
|
|
2710
|
+
return tensor_operator_registry.get('tensor_scatter_max')(self, indices, updates)
|
|
2711
|
+
|
|
2712
|
+
def softmax(self, axis, dtype=None):
|
|
2713
|
+
"""
|
|
2714
|
+
For details, please refer to :func:`mindspore.ops.softmax`.
|
|
2715
|
+
"""
|
|
2716
|
+
return tensor_operator_registry.get('softmax')(self, axis, dtype=dtype)
|
|
2543
2717
|
|
|
2544
2718
|
def fill(self, value):
|
|
2545
2719
|
"""
|
|
2546
2720
|
`Tensor.fill` is deprecated, please use `ops.fill` instead.
|
|
2547
2721
|
"""
|
|
2548
|
-
self._init_check()
|
|
2549
2722
|
if value is None:
|
|
2550
2723
|
if self.dtype not in (mstype.float16, mstype.float32, mstype.float64):
|
|
2551
2724
|
raise TypeError("For 'Tensor.fill', if the argument 'value' is None, the type of the original "
|
|
@@ -2558,7 +2731,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2558
2731
|
"""
|
|
2559
2732
|
`Tensor.fills` is deprecated, please use `ops.fill` instead.
|
|
2560
2733
|
"""
|
|
2561
|
-
self._init_check()
|
|
2562
2734
|
return tensor_operator_registry.get('fills')(self, value)
|
|
2563
2735
|
|
|
2564
2736
|
def fill_diagonal(self, fill_value, wrap=False):
|
|
@@ -2600,14 +2772,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2600
2772
|
[5. 1. 1.]
|
|
2601
2773
|
[1. 5. 1.]]
|
|
2602
2774
|
"""
|
|
2603
|
-
self._init_check()
|
|
2604
2775
|
return tensor_operator_registry.get('fill_diagonal')(fill_value, wrap)(self)
|
|
2605
2776
|
|
|
2606
2777
|
def masked_fill(self, mask, value):
|
|
2607
2778
|
"""
|
|
2608
2779
|
For details, please refer to :func:`mindspore.ops.masked_fill`.
|
|
2609
2780
|
"""
|
|
2610
|
-
self._init_check()
|
|
2611
2781
|
if isinstance(value, (float, int)):
|
|
2612
2782
|
value = tensor_operator_registry.get("scalar_to_tensor")(value, self.dtype)
|
|
2613
2783
|
if not isinstance(mask, Tensor):
|
|
@@ -2663,13 +2833,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2663
2833
|
r"""
|
|
2664
2834
|
For details, please refer to :func:`mindspore.ops.minimum`.
|
|
2665
2835
|
"""
|
|
2666
|
-
return tensor_operator_registry.get('minimum')(
|
|
2836
|
+
return tensor_operator_registry.get('minimum')(self, other)
|
|
2667
2837
|
|
|
2668
2838
|
def clamp(self, min=None, max=None):
|
|
2669
2839
|
r"""
|
|
2670
2840
|
For details, please refer to :func:`mindspore.ops.clamp`.
|
|
2671
2841
|
"""
|
|
2672
|
-
self._init_check()
|
|
2673
2842
|
return tensor_operator_registry.get('clamp')(self, min, max)
|
|
2674
2843
|
|
|
2675
2844
|
def clip(self, min=None, max=None):
|
|
@@ -2678,10 +2847,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2678
2847
|
"""
|
|
2679
2848
|
return self.clamp(min, max)
|
|
2680
2849
|
|
|
2681
|
-
def _init_check(self):
|
|
2682
|
-
if self.has_init:
|
|
2683
|
-
self.init_data()
|
|
2684
|
-
|
|
2685
2850
|
def init_data(self, slice_index=None, shape=None, opt_shard_group=None):
|
|
2686
2851
|
"""
|
|
2687
2852
|
Get the tensor format data of this Tensor.
|
|
@@ -2698,7 +2863,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2698
2863
|
opt_shard_group(str): Optimizer shard group which is used in auto or semi auto parallel mode
|
|
2699
2864
|
to get one shard of a parameter's slice. For more information about optimizer parallel, please refer to:
|
|
2700
2865
|
`Optimizer Parallel
|
|
2701
|
-
<https://www.mindspore.cn/
|
|
2866
|
+
<https://www.mindspore.cn/docs/en/master/model_train/parallel/optimizer_parallel.html>`_.
|
|
2702
2867
|
Default: ``None``.
|
|
2703
2868
|
|
|
2704
2869
|
Returns:
|
|
@@ -2776,12 +2941,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2776
2941
|
if slice_num_of_persistent_data > 1:
|
|
2777
2942
|
self.assign_value(Tensor_.persistent_data_from_numpy(data, slice_num_of_persistent_data))
|
|
2778
2943
|
else:
|
|
2779
|
-
|
|
2780
|
-
# The dtype of data is np.float32 when mstype is bfloat16,
|
|
2781
|
-
# so we create tensor_ by init func instead of asnumpy
|
|
2782
|
-
self.assign_value(Tensor_(data, self.dtype))
|
|
2783
|
-
else:
|
|
2784
|
-
self.assign_value(Tensor_.from_numpy(data))
|
|
2944
|
+
self.assign_value(Tensor_.from_numpy(data))
|
|
2785
2945
|
return self
|
|
2786
2946
|
|
|
2787
2947
|
def resize(self, *new_shape):
|
|
@@ -2803,13 +2963,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2803
2963
|
Returns:
|
|
2804
2964
|
Tensor.
|
|
2805
2965
|
|
|
2806
|
-
Supported Platforms:
|
|
2807
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2808
|
-
|
|
2809
2966
|
See also:
|
|
2810
|
-
:func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
2967
|
+
- :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
2968
|
+
- :func:`mindspore.Tensor.repeat`: Repeat elements of a tensor.
|
|
2811
2969
|
|
|
2812
|
-
|
|
2970
|
+
Supported Platforms:
|
|
2971
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2813
2972
|
|
|
2814
2973
|
Examples:
|
|
2815
2974
|
>>> import numpy as np
|
|
@@ -2836,7 +2995,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2836
2995
|
diff_size = new_size - cur_size
|
|
2837
2996
|
if diff_size > 0:
|
|
2838
2997
|
pad_val = tensor_operator_registry.get('fill')(self.dtype, (diff_size,), 0)
|
|
2839
|
-
res = tensor_operator_registry.get('concatenate')(
|
|
2998
|
+
res = tensor_operator_registry.get('concatenate')((flattened, pad_val), 0)
|
|
2840
2999
|
else:
|
|
2841
3000
|
res = flattened[:new_size]
|
|
2842
3001
|
return res.reshape(new_shape)
|
|
@@ -2845,70 +3004,60 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2845
3004
|
r"""
|
|
2846
3005
|
For details, please refer to :func:`mindspore.ops.det`.
|
|
2847
3006
|
"""
|
|
2848
|
-
self._init_check()
|
|
2849
3007
|
return tensor_operator_registry.get('det')(self)
|
|
2850
3008
|
|
|
2851
3009
|
def diff(self, n=1, axis=-1, prepend=None, append=None):
|
|
2852
3010
|
r"""
|
|
2853
3011
|
For details, please refer to :func:`mindspore.ops.diff`.
|
|
2854
3012
|
"""
|
|
2855
|
-
self._init_check()
|
|
2856
3013
|
return tensor_operator_registry.get('diff')(self, n, axis, prepend, append)
|
|
2857
3014
|
|
|
2858
3015
|
def frac(self):
|
|
2859
3016
|
r"""
|
|
2860
3017
|
For details, please refer to :func:`mindspore.ops.frac`.
|
|
2861
3018
|
"""
|
|
2862
|
-
self._init_check()
|
|
2863
3019
|
return tensor_operator_registry.get('frac')(self)
|
|
2864
3020
|
|
|
2865
3021
|
def argwhere(self):
|
|
2866
3022
|
r"""
|
|
2867
3023
|
For details, please refer to :func:`mindspore.ops.argwhere`.
|
|
2868
3024
|
"""
|
|
2869
|
-
self._init_check()
|
|
2870
3025
|
return tensor_operator_registry.get('argwhere')(self)
|
|
2871
3026
|
|
|
2872
3027
|
def moveaxis(self, source, destination):
|
|
2873
3028
|
r"""
|
|
2874
3029
|
For details, please refer to :func:`mindspore.ops.moveaxis`.
|
|
2875
3030
|
"""
|
|
2876
|
-
self._init_check()
|
|
2877
3031
|
return tensor_operator_registry.get('moveaxis')(self, source, destination)
|
|
2878
3032
|
|
|
2879
3033
|
def movedim(self, source, destination):
|
|
2880
3034
|
r"""
|
|
2881
3035
|
For details, please refer to :func:`mindspore.ops.movedim`.
|
|
2882
3036
|
"""
|
|
2883
|
-
self._init_check()
|
|
2884
3037
|
return tensor_operator_registry.get('movedim')(self, source, destination)
|
|
2885
3038
|
|
|
2886
3039
|
def digamma(self):
|
|
2887
3040
|
r"""
|
|
2888
3041
|
For details, please refer to :func:`mindspore.ops.digamma`.
|
|
2889
3042
|
"""
|
|
2890
|
-
self._init_check()
|
|
2891
3043
|
return tensor_operator_registry.get('digamma')(self)
|
|
2892
3044
|
|
|
2893
3045
|
def lgamma(self):
|
|
2894
3046
|
r"""
|
|
2895
3047
|
For details, please refer to :func:`mindspore.ops.lgamma`.
|
|
2896
3048
|
"""
|
|
2897
|
-
self._init_check()
|
|
2898
3049
|
return tensor_operator_registry.get('lgamma')(self)
|
|
2899
3050
|
|
|
2900
3051
|
def diagonal(self, offset=0, axis1=0, axis2=1):
|
|
2901
3052
|
"""
|
|
2902
3053
|
For details, please refer to :func:`mindspore.ops.diagonal`.
|
|
2903
3054
|
"""
|
|
2904
|
-
self._init_check()
|
|
2905
3055
|
return tensor_operator_registry.get('diagonal')(self, offset, axis1, axis2)
|
|
2906
3056
|
|
|
2907
3057
|
def diagonal_scatter(self, src, offset=0, dim1=0, dim2=1):
|
|
2908
3058
|
r"""
|
|
2909
3059
|
For details, please refer to :func:`mindspore.ops.diagonal_scatter`.
|
|
2910
3060
|
"""
|
|
2911
|
-
self._init_check()
|
|
2912
3061
|
return tensor_operator_registry.get('diagonal_scatter')(self, src, offset, dim1, dim2)
|
|
2913
3062
|
|
|
2914
3063
|
def trace(self, offset=0, axis1=0, axis2=1, dtype=None):
|
|
@@ -2933,12 +3082,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2933
3082
|
Raises:
|
|
2934
3083
|
ValueError: If the input tensor has less than two dimensions.
|
|
2935
3084
|
|
|
3085
|
+
See also:
|
|
3086
|
+
- :func:`mindspore.Tensor.diagonal`: Return specified diagonals.
|
|
3087
|
+
|
|
2936
3088
|
Supported Platforms:
|
|
2937
3089
|
``Ascend`` ``GPU`` ``CPU``
|
|
2938
3090
|
|
|
2939
|
-
See also:
|
|
2940
|
-
:func:`mindspore.Tensor.diagonal`: Return specified diagonals.
|
|
2941
|
-
|
|
2942
3091
|
Examples:
|
|
2943
3092
|
>>> import numpy as np
|
|
2944
3093
|
>>> from mindspore import Tensor
|
|
@@ -2946,17 +3095,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2946
3095
|
>>> print(x.trace())
|
|
2947
3096
|
3.0
|
|
2948
3097
|
"""
|
|
2949
|
-
|
|
2950
|
-
self._init_check()
|
|
2951
|
-
return tensor_operator_registry.get('trace')(self)
|
|
2952
|
-
d = self.diagonal(offset, axis1=axis1, axis2=axis2)
|
|
2953
|
-
shape = d.shape
|
|
2954
|
-
if dtype is None:
|
|
2955
|
-
dtype = d.dtype
|
|
2956
|
-
if shape[-1] == 0:
|
|
2957
|
-
return tensor_operator_registry.get('fill')(dtype, shape[:-1], 0)
|
|
2958
|
-
res = tensor_operator_registry.get('reduce_sum')(d.astype(mstype.float32), -1)
|
|
2959
|
-
return res.astype(dtype)
|
|
3098
|
+
return tensor_operator_registry.get('tracev2')(self, offset, axis1, axis2, dtype)
|
|
2960
3099
|
|
|
2961
3100
|
def take(self, indices, axis=None, mode='clip'):
|
|
2962
3101
|
"""
|
|
@@ -3020,7 +3159,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3020
3159
|
shape_indices = tuple(size_indices if i == axis else 1 for i in range(ndim))
|
|
3021
3160
|
indices = indices.reshape(shape_indices)
|
|
3022
3161
|
shape_indices = shape_ni + (indices.size,) + shape_nk
|
|
3023
|
-
indices = tensor_operator_registry.get('broadcast_to')(shape_indices)
|
|
3162
|
+
indices = tensor_operator_registry.get('broadcast_to')(indices, shape_indices)
|
|
3024
3163
|
|
|
3025
3164
|
res = tensor_operator_registry.get('gather_d')(a, axis, indices)
|
|
3026
3165
|
return res.reshape(shape_out)
|
|
@@ -3065,7 +3204,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3065
3204
|
"""
|
|
3066
3205
|
if isinstance(choices, Tensor):
|
|
3067
3206
|
shape_choice = validator.infer_out_shape(self.shape, choices.shape[1:])
|
|
3068
|
-
choices = tensor_operator_registry.get('broadcast_to')((choices.shape[0],) + shape_choice)
|
|
3207
|
+
choices = tensor_operator_registry.get('broadcast_to')(choices, (choices.shape[0],) + shape_choice)
|
|
3069
3208
|
else:
|
|
3070
3209
|
# broadcasts choices to the same shape if choices is a sequence
|
|
3071
3210
|
choicelist = []
|
|
@@ -3078,14 +3217,14 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3078
3217
|
shape_choice = validator.infer_out_shape(self.shape, *shapes)
|
|
3079
3218
|
tmp = []
|
|
3080
3219
|
for choice in choicelist:
|
|
3081
|
-
tmp.append(tensor_operator_registry.get('broadcast_to')(shape_choice)
|
|
3220
|
+
tmp.append(tensor_operator_registry.get('broadcast_to')(choice, shape_choice))
|
|
3082
3221
|
choices = tensor_operator_registry.get('stack')(tmp, 0)
|
|
3083
3222
|
|
|
3084
3223
|
if self.ndim == 0 or choices.ndim == 0:
|
|
3085
3224
|
raise ValueError(f"For 'Tensor.choose', the original tensor and the argument 'choices' cannot be scalars."
|
|
3086
3225
|
f" Their dimensions should all be > 0, but got the original tensor's dimension "
|
|
3087
3226
|
f"{self.ndim}, 'choices' dimension {choices.ndim}.")
|
|
3088
|
-
a = tensor_operator_registry.get('broadcast_to')(shape_choice)
|
|
3227
|
+
a = tensor_operator_registry.get('broadcast_to')(self, shape_choice)
|
|
3089
3228
|
dtype = choices.dtype
|
|
3090
3229
|
# adjusts dtype for F.tensor_mul and F.gather_nd
|
|
3091
3230
|
a = a.astype(mstype.int32)
|
|
@@ -3097,10 +3236,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3097
3236
|
for i in range(ndim):
|
|
3098
3237
|
dim_grid = Tensor(list(range(a.shape[i])), mstype.int32)
|
|
3099
3238
|
dim_shape = validator.expanded_shape(ndim, a.shape[i], i)
|
|
3100
|
-
dim_grid = tensor_operator_registry.get('broadcast_to')(
|
|
3239
|
+
dim_grid = tensor_operator_registry.get('broadcast_to')(dim_grid.reshape(dim_shape), a.shape)
|
|
3101
3240
|
grids.append(dim_grid)
|
|
3102
3241
|
grid = tensor_operator_registry.get('stack')(grids, -1)
|
|
3103
|
-
indices = tensor_operator_registry.get('concatenate')(
|
|
3242
|
+
indices = tensor_operator_registry.get('concatenate')((a.reshape(a.shape + (1,)), grid), -1)
|
|
3104
3243
|
return tensor_operator_registry.get('gather_nd')(choices, indices).astype(dtype)
|
|
3105
3244
|
|
|
3106
3245
|
def searchsorted(self, v, side='left', sorter=None):
|
|
@@ -3113,9 +3252,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3113
3252
|
location found is given. If 'right', return the last such index. If there is
|
|
3114
3253
|
no suitable index, return either 0 or N (where N is the length of the tensor).
|
|
3115
3254
|
Default: ``left`` .
|
|
3116
|
-
sorter (Union[int,
|
|
3117
|
-
integer indices that sort the tensor into ascending order
|
|
3118
|
-
the result of argsort. Default: ``None`` .
|
|
3255
|
+
sorter (Union[int, list, tuple, Tensor]): optional tensor of
|
|
3256
|
+
integer indices that sort the tensor into ascending order on the innermost dimension
|
|
3257
|
+
and the type must be int64. They are typically the result of argsort. Default: ``None`` .
|
|
3258
|
+
CPU and GPU can only use default values
|
|
3119
3259
|
|
|
3120
3260
|
Returns:
|
|
3121
3261
|
Tensor, array of insertion points with the same shape as `v`.
|
|
@@ -3136,37 +3276,26 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3136
3276
|
if side not in ('left', 'right'):
|
|
3137
3277
|
raise ValueError(f"For 'Tensor.searchsorted', the argument 'side' should be one of in "
|
|
3138
3278
|
f"['left', 'right'], but got {side}.")
|
|
3139
|
-
a = self.astype(mstype.float32)
|
|
3140
3279
|
if not isinstance(v, Tensor):
|
|
3141
3280
|
v = tensor_operator_registry.get('make_tensor')(v)
|
|
3142
|
-
shape = v.shape
|
|
3143
3281
|
if sorter is not None:
|
|
3144
|
-
if not isinstance(sorter, (int,
|
|
3282
|
+
if not isinstance(sorter, (int, list, tuple, Tensor)):
|
|
3145
3283
|
raise TypeError("For Tensor.searchsorted, the type of the argument 'sorter' must be one of 'int', "
|
|
3146
|
-
"'
|
|
3284
|
+
"'list', 'tuple', 'Tensor', but got {}.".format(type(sorter)))
|
|
3147
3285
|
if not isinstance(sorter, Tensor):
|
|
3148
3286
|
sorter = tensor_operator_registry.get('make_tensor')(sorter)
|
|
3149
|
-
if sorter.
|
|
3150
|
-
raise ValueError('sorter must be
|
|
3151
|
-
|
|
3152
|
-
|
|
3153
|
-
|
|
3154
|
-
|
|
3155
|
-
|
|
3156
|
-
|
|
3157
|
-
sort_range = tuple(range(math.ceil(math.log2(tensor_operator_registry.get('shape_mul')(a.shape) + 1))))
|
|
3158
|
-
for _ in sort_range:
|
|
3159
|
-
mid = (i - -j) // 2
|
|
3160
|
-
mask = less_op(v, tensor_operator_registry.get('gather_nd')(a, mid.reshape(mid.shape + (1,))))
|
|
3161
|
-
i = tensor_operator_registry.get('select')(mask, i, mid)
|
|
3162
|
-
j = tensor_operator_registry.get('select')(mask, mid, j)
|
|
3163
|
-
return j
|
|
3287
|
+
if sorter.size != self.size:
|
|
3288
|
+
raise ValueError('The size of sorter must be the same as the Tensor')
|
|
3289
|
+
|
|
3290
|
+
dtype = mstype.int32
|
|
3291
|
+
right = (side == 'right')
|
|
3292
|
+
search_sorted_ = tensor_operator_registry.get('searchsorted')(dtype, right)
|
|
3293
|
+
return search_sorted_(self, v, sorter)
|
|
3164
3294
|
|
|
3165
3295
|
def gather_nd(self, indices):
|
|
3166
3296
|
r"""
|
|
3167
3297
|
For details, please refer to :func:`mindspore.ops.gather_nd`.
|
|
3168
3298
|
"""
|
|
3169
|
-
self._init_check()
|
|
3170
3299
|
validator.check_value_type('indices', indices, (Tensor, Tensor_,), 'Tensor.gather_nd')
|
|
3171
3300
|
return tensor_operator_registry.get('gather_nd')(self, indices)
|
|
3172
3301
|
|
|
@@ -3174,11 +3303,39 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3174
3303
|
r"""
|
|
3175
3304
|
For details, please refer to :func:`mindspore.ops.gather`.
|
|
3176
3305
|
"""
|
|
3177
|
-
self._init_check()
|
|
3178
3306
|
validator.check_is_int(axis, 'axis')
|
|
3179
3307
|
validator.check_is_int(batch_dims, "batch_dims")
|
|
3180
3308
|
return tensor_operator_registry.get('gather')(self, input_indices, axis, batch_dims)
|
|
3181
3309
|
|
|
3310
|
+
def uniform(self, from_=0., to=1., generator=None):
|
|
3311
|
+
r"""
|
|
3312
|
+
Generates random numbers in the half-open interval [from\_, to).
|
|
3313
|
+
|
|
3314
|
+
Args:
|
|
3315
|
+
from\_ (number): The lower bound of the interval.
|
|
3316
|
+
to (number): The upper bound of the interval.
|
|
3317
|
+
generator (Generator, optional): The random seed. Default: None.
|
|
3318
|
+
|
|
3319
|
+
Returns:
|
|
3320
|
+
Tensor, with the same shape as tensor.
|
|
3321
|
+
|
|
3322
|
+
Raises:
|
|
3323
|
+
TypeError: If `from_` is larger than `to`.
|
|
3324
|
+
|
|
3325
|
+
Supported Platforms:
|
|
3326
|
+
``Ascend``
|
|
3327
|
+
|
|
3328
|
+
Examples:
|
|
3329
|
+
>>> import mindspore
|
|
3330
|
+
>>> x = mindspore.ops.ones((4, 2))
|
|
3331
|
+
>>> generator = mindspore.Generator()
|
|
3332
|
+
>>> generator.manual_seed(100)
|
|
3333
|
+
>>> output = x.uniform(1., 2., generator)
|
|
3334
|
+
>>> print(output.shape)
|
|
3335
|
+
(4, 2)
|
|
3336
|
+
"""
|
|
3337
|
+
return tensor_operator_registry.get('uniform')(self, from_, to, generator)
|
|
3338
|
+
|
|
3182
3339
|
def var(self, axis=None, ddof=0, keepdims=False):
|
|
3183
3340
|
"""
|
|
3184
3341
|
Compute the variance along the specified axis.
|
|
@@ -3202,13 +3359,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3202
3359
|
Returns:
|
|
3203
3360
|
Variance tensor.
|
|
3204
3361
|
|
|
3205
|
-
Supported Platforms:
|
|
3206
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3207
|
-
|
|
3208
3362
|
See also:
|
|
3209
|
-
:func:`mindspore.Tensor.mean`: Reduce a dimension of a tensor by averaging all elements in the dimension.
|
|
3363
|
+
- :func:`mindspore.Tensor.mean`: Reduce a dimension of a tensor by averaging all elements in the dimension.
|
|
3364
|
+
- :func:`mindspore.Tensor.std`: Compute the standard deviation along the specified axis.
|
|
3210
3365
|
|
|
3211
|
-
|
|
3366
|
+
Supported Platforms:
|
|
3367
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
3212
3368
|
|
|
3213
3369
|
Examples:
|
|
3214
3370
|
>>> import numpy as np
|
|
@@ -3255,40 +3411,40 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3255
3411
|
Return sum of tensor elements over a given axis.
|
|
3256
3412
|
|
|
3257
3413
|
Note:
|
|
3258
|
-
Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and
|
|
3259
|
-
`
|
|
3414
|
+
Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are not supported.
|
|
3415
|
+
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
3260
3416
|
|
|
3261
3417
|
Args:
|
|
3262
|
-
axis (Union[None, int, tuple(int), list(int)]): Axis or axes along which a sum is performed.
|
|
3418
|
+
axis (Union[None, int, tuple(int), list(int), Tensor]): Axis or axes along which a sum is performed.
|
|
3263
3419
|
Default: ``None`` .
|
|
3264
|
-
If None, sum all the elements of the input tensor.
|
|
3265
|
-
If the axis is negative, it counts from the last to the first axis
|
|
3266
|
-
If the axis is a tuple or list of ints, a sum is performed on all the axes specified in the tuple
|
|
3267
|
-
or list instead of a single axis or all the axes as before.
|
|
3420
|
+
If ``None`` , sum all the elements of the input tensor.
|
|
3421
|
+
If the `axis` is negative, it counts from the last to the first `axis`.
|
|
3422
|
+
If the `axis` is a tuple or list of ints, a sum is performed on all the axes specified in the tuple
|
|
3423
|
+
or list instead of a single `axis` or all the axes as before.
|
|
3268
3424
|
dtype (:class:`mindspore.dtype`, optional): defaults to ``None`` . Overrides the dtype of the
|
|
3269
3425
|
output Tensor.
|
|
3270
3426
|
keepdims (bool): If this is set to ``True`` , the axes which are reduced are left in the result as
|
|
3271
3427
|
dimensions with size one. With this option, the result will broadcast correctly against the input
|
|
3272
|
-
array. If the default value is passed, then keepdims will not be passed through to the sum method
|
|
3428
|
+
array. If the default value is passed, then `keepdims` will not be passed through to the sum method
|
|
3273
3429
|
of sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
|
|
3274
|
-
implement keepdims any exceptions will be raised. Default: ``False`` .
|
|
3430
|
+
implement `keepdims` any exceptions will be raised. Default: ``False`` .
|
|
3275
3431
|
initial (scalar): Starting value for the sum. Default: ``None`` .
|
|
3276
3432
|
|
|
3277
3433
|
Returns:
|
|
3278
|
-
Tensor. A tensor with the same shape as input, with the specified axis removed.
|
|
3279
|
-
If the input tensor is a 0-d array, or if the axis is ``None`` , a scalar is returned.
|
|
3434
|
+
Tensor. A tensor with the same shape as input, with the specified `axis` removed.
|
|
3435
|
+
If the input tensor is a 0-d array, or if the `axis` is ``None`` , a scalar is returned.
|
|
3280
3436
|
|
|
3281
3437
|
Raises:
|
|
3282
|
-
TypeError: If input is not array_like, or `axis` is not int, tuple of ints
|
|
3438
|
+
TypeError: If input is not array_like, or `axis` is not int, tuple of ints, list of ints or Tensor,
|
|
3283
3439
|
or `keepdims` is not integer, or `initial` is not scalar.
|
|
3284
|
-
ValueError: If any axis is out of range or duplicate axes exist.
|
|
3440
|
+
ValueError: If any `axis` is out of range or duplicate axes exist.
|
|
3441
|
+
|
|
3442
|
+
See also:
|
|
3443
|
+
- :func:`mindspore.Tensor.cumsum`: Return the cumulative sum of the elements along a given `axis`.
|
|
3285
3444
|
|
|
3286
3445
|
Supported Platforms:
|
|
3287
3446
|
``Ascend`` ``GPU`` ``CPU``
|
|
3288
3447
|
|
|
3289
|
-
See also:
|
|
3290
|
-
:func:`mindspore.Tensor.cumsum`: Return the cumulative sum of the elements along a given axis.
|
|
3291
|
-
|
|
3292
3448
|
Examples:
|
|
3293
3449
|
>>> import numpy as np
|
|
3294
3450
|
>>> from mindspore import Tensor
|
|
@@ -3299,13 +3455,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3299
3455
|
>>> print(input_x.sum(axis=1))
|
|
3300
3456
|
[10. 35.]
|
|
3301
3457
|
"""
|
|
3302
|
-
if initial is
|
|
3303
|
-
|
|
3304
|
-
|
|
3305
|
-
|
|
3306
|
-
|
|
3307
|
-
|
|
3308
|
-
res = res.astype(dtype)
|
|
3458
|
+
if initial is None:
|
|
3459
|
+
res = tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype)
|
|
3460
|
+
else:
|
|
3461
|
+
res = tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype) + initial
|
|
3462
|
+
if dtype is not None and (dtype == mstype.bool_):
|
|
3463
|
+
res = res.astype(mstype.bool_)
|
|
3309
3464
|
return res
|
|
3310
3465
|
|
|
3311
3466
|
def sum_to_size(self, *size):
|
|
@@ -3333,7 +3488,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3333
3488
|
>>> print(output.shape)
|
|
3334
3489
|
(1, 3, 1, 3)
|
|
3335
3490
|
"""
|
|
3336
|
-
self._init_check()
|
|
3337
3491
|
x = self
|
|
3338
3492
|
if len(size) == 1 and isinstance(size[0], tuple):
|
|
3339
3493
|
size = size[0]
|
|
@@ -3357,21 +3511,18 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3357
3511
|
"""
|
|
3358
3512
|
For details, please refer to :func:`mindspore.ops.nansum`.
|
|
3359
3513
|
"""
|
|
3360
|
-
self._init_check()
|
|
3361
3514
|
return tensor_operator_registry.get('nansum')(self, axis=axis, keepdims=keepdims, dtype=dtype)
|
|
3362
3515
|
|
|
3363
3516
|
def nanmean(self, axis=None, keepdims=False, *, dtype=None):
|
|
3364
3517
|
r"""
|
|
3365
3518
|
For details, please refer to :func:`mindspore.ops.nanmean`.
|
|
3366
3519
|
"""
|
|
3367
|
-
self._init_check()
|
|
3368
3520
|
return tensor_operator_registry.get('nanmean')(self, axis, keepdims, dtype=dtype)
|
|
3369
3521
|
|
|
3370
3522
|
def nanmedian(self, axis=-1, keepdims=False):
|
|
3371
3523
|
r"""
|
|
3372
3524
|
For details, please refer to :func:`mindspore.ops.nanmedian`.
|
|
3373
3525
|
"""
|
|
3374
|
-
self._init_check()
|
|
3375
3526
|
return tensor_operator_registry.get('nanmedian')(self, axis, keepdims)
|
|
3376
3527
|
|
|
3377
3528
|
def repeat(self, repeats, axis=None):
|
|
@@ -3391,13 +3542,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3391
3542
|
ValueError: If the axis is out of range.
|
|
3392
3543
|
TypeError: If arguments have types not specified above.
|
|
3393
3544
|
|
|
3394
|
-
Supported Platforms:
|
|
3395
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3396
|
-
|
|
3397
3545
|
See also:
|
|
3398
|
-
:func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
3546
|
+
- :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
3547
|
+
- :func:`mindspore.Tensor.resize`: Changes shape and size of tensor in-place.
|
|
3399
3548
|
|
|
3400
|
-
|
|
3549
|
+
Supported Platforms:
|
|
3550
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
3401
3551
|
|
|
3402
3552
|
Examples:
|
|
3403
3553
|
>>> import numpy as np
|
|
@@ -3446,27 +3596,25 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3446
3596
|
for sub, rep in zip(subs, repeats):
|
|
3447
3597
|
if rep != 0:
|
|
3448
3598
|
repeated_subs.append(tensor_operator_registry.get('repeat_elements')(sub, rep, axis))
|
|
3449
|
-
return tensor_operator_registry.get('concatenate')(axis)
|
|
3599
|
+
return tensor_operator_registry.get('concatenate')(repeated_subs, axis)
|
|
3450
3600
|
|
|
3601
|
+
@repeat_interleave_mint
|
|
3451
3602
|
def repeat_interleave(self, repeats, dim=None):
|
|
3452
3603
|
"""
|
|
3453
3604
|
For details, please refer to :func:`mindspore.ops.repeat_interleave`.
|
|
3454
3605
|
"""
|
|
3455
|
-
self._init_check()
|
|
3456
3606
|
return tensor_operator_registry.get('repeat_interleave')(self, repeats, dim)
|
|
3457
3607
|
|
|
3458
3608
|
def bernoulli(self, p=0.5, seed=None):
|
|
3459
3609
|
r"""
|
|
3460
3610
|
For details, please refer to :func:`mindspore.ops.bernoulli`.
|
|
3461
3611
|
"""
|
|
3462
|
-
self._init_check()
|
|
3463
3612
|
return tensor_operator_registry.get('bernoulli')(self, p, seed)
|
|
3464
3613
|
|
|
3465
3614
|
def random_categorical(self, num_sample, seed=0, dtype=mstype.int64):
|
|
3466
3615
|
r"""
|
|
3467
3616
|
For details, please refer to :func:`mindspore.ops.random_categorical`.
|
|
3468
3617
|
"""
|
|
3469
|
-
self._init_check()
|
|
3470
3618
|
validator.check_is_int(num_sample, 'num_sample')
|
|
3471
3619
|
validator.check_is_int(seed, 'seed')
|
|
3472
3620
|
return tensor_operator_registry.get('random_categorical')(self, num_sample, seed, dtype)
|
|
@@ -3475,23 +3623,20 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3475
3623
|
"""
|
|
3476
3624
|
For details, please refer to :func:`mindspore.ops.masked_select`.
|
|
3477
3625
|
"""
|
|
3478
|
-
self._init_check()
|
|
3479
3626
|
return tensor_operator_registry.get('masked_select')(self, mask)
|
|
3480
3627
|
|
|
3481
3628
|
def gather_elements(self, dim, index):
|
|
3482
3629
|
"""
|
|
3483
3630
|
For details, please refer to :func:`mindspore.ops.gather_elements`.
|
|
3484
3631
|
"""
|
|
3485
|
-
self._init_check()
|
|
3486
3632
|
validator.check_value_type('index', index, (Tensor, Tensor_,), 'Tensor.gather_elements')
|
|
3487
3633
|
return tensor_operator_registry.get('gather_elements')(self, dim, index)
|
|
3488
3634
|
|
|
3489
|
-
def nonzero(self):
|
|
3635
|
+
def nonzero(self, as_tuple=False):
|
|
3490
3636
|
"""
|
|
3491
3637
|
For details, please refer to :func:`mindspore.ops.nonzero`.
|
|
3492
3638
|
"""
|
|
3493
|
-
|
|
3494
|
-
return tensor_operator_registry.get('nonzero')(self)
|
|
3639
|
+
return tensor_operator_registry.get('nonzero')(self, as_tuple)
|
|
3495
3640
|
|
|
3496
3641
|
def svd(self, full_matrices=False, compute_uv=True):
|
|
3497
3642
|
"""
|
|
@@ -3508,42 +3653,36 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3508
3653
|
r"""
|
|
3509
3654
|
For details, please refer to :func:`mindspore.ops.hardshrink`.
|
|
3510
3655
|
"""
|
|
3511
|
-
|
|
3512
|
-
return tensor_operator_registry.get('hardshrink')(lambd)(self)
|
|
3656
|
+
return tensor_operator_registry.get('hardshrink')(self, lambd)
|
|
3513
3657
|
|
|
3514
3658
|
def heaviside(self, values):
|
|
3515
3659
|
r"""
|
|
3516
3660
|
For details, please refer to :func:`mindspore.ops.heaviside`.
|
|
3517
3661
|
"""
|
|
3518
|
-
self._init_check()
|
|
3519
3662
|
return tensor_operator_registry.get('heaviside')(self, values)
|
|
3520
3663
|
|
|
3521
3664
|
def hypot(self, other):
|
|
3522
3665
|
r"""
|
|
3523
3666
|
For details, please refer to :func:`mindspore.ops.hypot`.
|
|
3524
3667
|
"""
|
|
3525
|
-
self._init_check()
|
|
3526
3668
|
return tensor_operator_registry.get('hypot')(self, other)
|
|
3527
3669
|
|
|
3528
3670
|
def soft_shrink(self, lambd=0.5):
|
|
3529
3671
|
r"""
|
|
3530
3672
|
For details, please refer to :func:`mindspore.ops.soft_shrink`.
|
|
3531
3673
|
"""
|
|
3532
|
-
self._init_check()
|
|
3533
3674
|
return tensor_operator_registry.get('soft_shrink')(self, lambd)
|
|
3534
3675
|
|
|
3535
3676
|
def matrix_determinant(self):
|
|
3536
3677
|
r"""
|
|
3537
3678
|
For details, please refer to :func:`mindspore.ops.matrix_determinant`.
|
|
3538
3679
|
"""
|
|
3539
|
-
self._init_check()
|
|
3540
3680
|
return tensor_operator_registry.get('matrix_determinant')(self)
|
|
3541
3681
|
|
|
3542
3682
|
def log_matrix_determinant(self):
|
|
3543
3683
|
r"""
|
|
3544
3684
|
For details, please refer to :func:`mindspore.ops.log_matrix_determinant`.
|
|
3545
3685
|
"""
|
|
3546
|
-
self._init_check()
|
|
3547
3686
|
return tensor_operator_registry.get('log_matrix_determinant')(self)
|
|
3548
3687
|
|
|
3549
3688
|
def to_coo(self):
|
|
@@ -3577,7 +3716,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3577
3716
|
[1 0]] [ 1. -5.] (2, 2)
|
|
3578
3717
|
|
|
3579
3718
|
"""
|
|
3580
|
-
self._init_check()
|
|
3581
3719
|
return tensor_operator_registry.get('dense_to_sparse_coo')(self)
|
|
3582
3720
|
|
|
3583
3721
|
def to_csr(self):
|
|
@@ -3610,7 +3748,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3610
3748
|
>>> print(output.indptr, output.indices, output.values, output.shape)
|
|
3611
3749
|
[0 1 2] [0 0] [ 1. -5.] (2, 2)
|
|
3612
3750
|
"""
|
|
3613
|
-
self._init_check()
|
|
3614
3751
|
return tensor_operator_registry.get('dense_to_sparse_csr')(self)
|
|
3615
3752
|
|
|
3616
3753
|
def tolist(self):
|
|
@@ -3633,42 +3770,36 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3633
3770
|
>>> print(out2)
|
|
3634
3771
|
1
|
|
3635
3772
|
"""
|
|
3636
|
-
self._init_check()
|
|
3637
3773
|
return self.asnumpy().tolist()
|
|
3638
3774
|
|
|
3639
3775
|
def unbind(self, dim=0):
|
|
3640
3776
|
r"""
|
|
3641
3777
|
For details, please refer to :func:`mindspore.ops.unbind`.
|
|
3642
3778
|
"""
|
|
3643
|
-
|
|
3644
|
-
return tensor_operator_registry.get('unbind')(dim)(self)
|
|
3779
|
+
return tensor_operator_registry.get('unbind')(self, dim)
|
|
3645
3780
|
|
|
3646
3781
|
def unsorted_segment_min(self, segment_ids, num_segments):
|
|
3647
3782
|
r"""
|
|
3648
3783
|
For details, please refer to :func:`mindspore.ops.unsorted_segment_min`.
|
|
3649
3784
|
"""
|
|
3650
|
-
self._init_check()
|
|
3651
3785
|
return tensor_operator_registry.get('unsorted_segment_min')(self, segment_ids, num_segments)
|
|
3652
3786
|
|
|
3653
3787
|
def unsorted_segment_max(self, segment_ids, num_segments):
|
|
3654
3788
|
r"""
|
|
3655
3789
|
For details, please refer to :func:`mindspore.ops.unsorted_segment_max`.
|
|
3656
3790
|
"""
|
|
3657
|
-
self._init_check()
|
|
3658
3791
|
return tensor_operator_registry.get('unsorted_segment_max')(self, segment_ids, num_segments)
|
|
3659
3792
|
|
|
3660
3793
|
def unsorted_segment_prod(self, segment_ids, num_segments):
|
|
3661
3794
|
r"""
|
|
3662
3795
|
For details, please refer to :func:`mindspore.ops.unsorted_segment_prod`.
|
|
3663
3796
|
"""
|
|
3664
|
-
self._init_check()
|
|
3665
3797
|
return tensor_operator_registry.get('unsorted_segment_prod')(self, segment_ids, num_segments)
|
|
3666
3798
|
|
|
3667
3799
|
def unique_consecutive(self, return_idx=False, return_counts=False, axis=None):
|
|
3668
3800
|
"""
|
|
3669
3801
|
For details, please refer to :func:`mindspore.ops.unique_consecutive`.
|
|
3670
3802
|
"""
|
|
3671
|
-
self._init_check()
|
|
3672
3803
|
output, idx, counts = tensor_operator_registry.get("unique_consecutive")(return_idx, return_counts, axis)(self)
|
|
3673
3804
|
if return_idx and return_counts:
|
|
3674
3805
|
return output, idx, counts
|
|
@@ -3682,30 +3813,27 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3682
3813
|
"""
|
|
3683
3814
|
For details, please refer to :func:`mindspore.ops.unique_with_pad`.
|
|
3684
3815
|
"""
|
|
3685
|
-
|
|
3686
|
-
return tensor_operator_registry.get("unique_with_pad")()(self, pad_num)
|
|
3816
|
+
return tensor_operator_registry.get("unique_with_pad")(self, pad_num)
|
|
3687
3817
|
|
|
3688
3818
|
def diag(self):
|
|
3689
3819
|
r"""
|
|
3690
3820
|
For details, please refer to :func:`mindspore.ops.diag`.
|
|
3691
3821
|
"""
|
|
3692
|
-
|
|
3693
|
-
return tensor_operator_registry.get('diag')()(self)
|
|
3822
|
+
return tensor_operator_registry.get('diag')(self)
|
|
3694
3823
|
|
|
3695
3824
|
def diagflat(self, offset=0):
|
|
3696
3825
|
r"""
|
|
3697
3826
|
For details, please refer to :func:`mindspore.ops.diagflat`.
|
|
3698
3827
|
"""
|
|
3699
|
-
self._init_check()
|
|
3700
3828
|
return tensor_operator_registry.get('diagflat')(self, offset)
|
|
3701
3829
|
|
|
3702
3830
|
def xdivy(self, y):
|
|
3703
3831
|
r"""
|
|
3704
3832
|
For details, please refer to :func:`mindspore.ops.xdivy`.
|
|
3705
3833
|
"""
|
|
3706
|
-
|
|
3707
|
-
return tensor_operator_registry.get("xdivy")()(self, y)
|
|
3834
|
+
return tensor_operator_registry.get("xdivy")(self, y)
|
|
3708
3835
|
|
|
3836
|
+
@split_mint
|
|
3709
3837
|
def split(self, split_size_or_sections, axis=0):
|
|
3710
3838
|
"""
|
|
3711
3839
|
For details, please refer to :func:`mindspore.ops.split`.
|
|
@@ -3716,7 +3844,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3716
3844
|
"""
|
|
3717
3845
|
For details, please refer to :func:`mindspore.ops.tensor_split`.
|
|
3718
3846
|
"""
|
|
3719
|
-
self._init_check()
|
|
3720
3847
|
return tensor_operator_registry.get('tensor_split')(self, indices_or_sections, axis)
|
|
3721
3848
|
|
|
3722
3849
|
def vsplit(self, indices_or_sections):
|
|
@@ -3724,28 +3851,25 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3724
3851
|
For details, please refer to :func:`mindspore.ops.vsplit`.
|
|
3725
3852
|
"""
|
|
3726
3853
|
|
|
3727
|
-
self._init_check()
|
|
3728
3854
|
return tensor_operator_registry.get('vsplit')(self, indices_or_sections)
|
|
3729
3855
|
|
|
3730
3856
|
def hsplit(self, indices_or_sections):
|
|
3731
3857
|
"""
|
|
3732
3858
|
For details, please refer to :func:`mindspore.ops.hsplit`.
|
|
3733
3859
|
"""
|
|
3734
|
-
self._init_check()
|
|
3735
3860
|
return tensor_operator_registry.get('hsplit')(self, indices_or_sections)
|
|
3736
3861
|
|
|
3737
3862
|
def dsplit(self, indices_or_sections):
|
|
3738
3863
|
"""
|
|
3739
3864
|
For details, please refer to :func:`mindspore.ops.dsplit`.
|
|
3740
3865
|
"""
|
|
3741
|
-
self._init_check()
|
|
3742
3866
|
return tensor_operator_registry.get('dsplit')(self, indices_or_sections)
|
|
3743
3867
|
|
|
3744
3868
|
def xlogy(self, y):
|
|
3745
3869
|
r"""
|
|
3746
3870
|
For details, please refer to :func:`mindspore.ops.xlogy`.
|
|
3747
3871
|
"""
|
|
3748
|
-
return tensor_operator_registry.get("xlogy")(
|
|
3872
|
+
return tensor_operator_registry.get("xlogy")(self, y)
|
|
3749
3873
|
|
|
3750
3874
|
def eigvals(self):
|
|
3751
3875
|
r"""
|
|
@@ -3760,13 +3884,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3760
3884
|
r"""
|
|
3761
3885
|
For details, please refer to :func:`mindspore.ops.erf`.
|
|
3762
3886
|
"""
|
|
3763
|
-
return tensor_operator_registry.get("erf")(
|
|
3887
|
+
return tensor_operator_registry.get("erf")(self)
|
|
3764
3888
|
|
|
3765
3889
|
def erfc(self):
|
|
3766
3890
|
r"""
|
|
3767
3891
|
For details, please refer to :func:`mindspore.ops.erfc`.
|
|
3768
3892
|
"""
|
|
3769
|
-
return tensor_operator_registry.get("erfc")(
|
|
3893
|
+
return tensor_operator_registry.get("erfc")(self)
|
|
3770
3894
|
|
|
3771
3895
|
def tile(self, reps):
|
|
3772
3896
|
r"""
|
|
@@ -3778,29 +3902,26 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3778
3902
|
r"""
|
|
3779
3903
|
For details, please refer to :func:`mindspore.ops.topk`.
|
|
3780
3904
|
"""
|
|
3781
|
-
self._init_check()
|
|
3782
3905
|
return tensor_operator_registry.get("topk")(self, k, dim, largest, sorted)
|
|
3783
3906
|
|
|
3784
3907
|
def top_k(self, k, sorted=True):
|
|
3785
3908
|
r"""
|
|
3786
3909
|
`Tensor.top_k` is deprecated, please use `Tensor.topk` instead.
|
|
3787
3910
|
"""
|
|
3788
|
-
self._init_check()
|
|
3789
3911
|
validator.check_is_int(k, 'k')
|
|
3790
3912
|
validator.check_bool(sorted, 'sorted')
|
|
3791
|
-
return tensor_operator_registry.get("top_k")(
|
|
3913
|
+
return tensor_operator_registry.get("top_k")(self, k, sorted)
|
|
3792
3914
|
|
|
3793
3915
|
def sigmoid(self):
|
|
3794
3916
|
r"""
|
|
3795
3917
|
For details, please refer to :func:`mindspore.ops.sigmoid`.
|
|
3796
3918
|
"""
|
|
3797
|
-
return tensor_operator_registry.get("sigmoid")(
|
|
3919
|
+
return tensor_operator_registry.get("sigmoid")(self)
|
|
3798
3920
|
|
|
3799
3921
|
def median(self, axis=-1, keepdims=False):
|
|
3800
3922
|
r"""
|
|
3801
3923
|
For details, please refer to :func:`mindspore.ops.median`.
|
|
3802
3924
|
"""
|
|
3803
|
-
self._init_check()
|
|
3804
3925
|
validator.check_axis_in_range(axis, self.ndim)
|
|
3805
3926
|
return tensor_operator_registry.get('median')(False, axis, keepdims)(self)
|
|
3806
3927
|
|
|
@@ -3808,49 +3929,42 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3808
3929
|
r"""
|
|
3809
3930
|
For details, please refer to :func:`mindspore.ops.addmv`.
|
|
3810
3931
|
"""
|
|
3811
|
-
self._init_check()
|
|
3812
3932
|
return tensor_operator_registry.get('addmv')(self, mat, vec, beta=beta, alpha=alpha)
|
|
3813
3933
|
|
|
3814
3934
|
def asinh(self):
|
|
3815
3935
|
r"""
|
|
3816
3936
|
For details, please refer to :func:`mindspore.ops.asinh`.
|
|
3817
3937
|
"""
|
|
3818
|
-
self._init_check()
|
|
3819
3938
|
return tensor_operator_registry.get('asinh')(self)
|
|
3820
3939
|
|
|
3821
3940
|
def arcsinh(self):
|
|
3822
3941
|
r"""
|
|
3823
3942
|
Alias for :func:`mindspore.Tensor.asinh`.
|
|
3824
3943
|
"""
|
|
3825
|
-
self._init_check()
|
|
3826
3944
|
return tensor_operator_registry.get('arcsinh')(self)
|
|
3827
3945
|
|
|
3828
3946
|
def atan(self):
|
|
3829
3947
|
r"""
|
|
3830
3948
|
For details, please refer to :func:`mindspore.ops.atan`.
|
|
3831
3949
|
"""
|
|
3832
|
-
self._init_check()
|
|
3833
3950
|
return tensor_operator_registry.get('atan')(self)
|
|
3834
3951
|
|
|
3835
3952
|
def atanh(self):
|
|
3836
3953
|
r"""
|
|
3837
3954
|
For details, please refer to :func:`mindspore.ops.atanh`.
|
|
3838
3955
|
"""
|
|
3839
|
-
self._init_check()
|
|
3840
3956
|
return tensor_operator_registry.get('atanh')(self)
|
|
3841
3957
|
|
|
3842
3958
|
def arctanh(self):
|
|
3843
3959
|
r"""
|
|
3844
3960
|
Alias for :func:`mindspore.Tensor.atanh`.
|
|
3845
3961
|
"""
|
|
3846
|
-
self._init_check()
|
|
3847
3962
|
return tensor_operator_registry.get('arctanh')(self)
|
|
3848
3963
|
|
|
3849
3964
|
def bmm(self, mat2):
|
|
3850
3965
|
r"""
|
|
3851
3966
|
For details, please refer to :func:`mindspore.ops.bmm`.
|
|
3852
3967
|
"""
|
|
3853
|
-
self._init_check()
|
|
3854
3968
|
return tensor_operator_registry.get('bmm')(self, mat2)
|
|
3855
3969
|
|
|
3856
3970
|
def to(self, dtype):
|
|
@@ -3880,8 +3994,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3880
3994
|
>>> print(output.dtype)
|
|
3881
3995
|
Int32
|
|
3882
3996
|
"""
|
|
3883
|
-
|
|
3884
|
-
return tensor_operator_registry.get('to')()(self, dtype)
|
|
3997
|
+
return tensor_operator_registry.get('to')(self, dtype)
|
|
3885
3998
|
|
|
3886
3999
|
def type(self, dtype=None):
|
|
3887
4000
|
r"""
|
|
@@ -3907,7 +4020,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3907
4020
|
[[1 2]
|
|
3908
4021
|
[3 4]]
|
|
3909
4022
|
"""
|
|
3910
|
-
self._init_check()
|
|
3911
4023
|
if dtype is None:
|
|
3912
4024
|
return str(self.dtype)
|
|
3913
4025
|
return self.astype(dtype)
|
|
@@ -3934,7 +4046,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3934
4046
|
>>> print(x.dtype)
|
|
3935
4047
|
Int32
|
|
3936
4048
|
"""
|
|
3937
|
-
self._init_check()
|
|
3938
4049
|
return self.astype(other.dtype)
|
|
3939
4050
|
|
|
3940
4051
|
def bool(self):
|
|
@@ -3957,8 +4068,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3957
4068
|
>>> print(output.dtype)
|
|
3958
4069
|
Bool
|
|
3959
4070
|
"""
|
|
3960
|
-
|
|
3961
|
-
return tensor_operator_registry.get('bool')()(self, mstype.bool_)
|
|
4071
|
+
return tensor_operator_registry.get('bool')(self, mstype.bool_)
|
|
3962
4072
|
|
|
3963
4073
|
def float(self):
|
|
3964
4074
|
r"""
|
|
@@ -3979,8 +4089,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3979
4089
|
>>> print(output.dtype)
|
|
3980
4090
|
Float32
|
|
3981
4091
|
"""
|
|
3982
|
-
|
|
3983
|
-
return tensor_operator_registry.get('float')()(self, mstype.float32)
|
|
4092
|
+
return tensor_operator_registry.get('float')(self, mstype.float32)
|
|
3984
4093
|
|
|
3985
4094
|
def half(self):
|
|
3986
4095
|
r"""
|
|
@@ -4001,8 +4110,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4001
4110
|
>>> print(output.dtype)
|
|
4002
4111
|
Float16
|
|
4003
4112
|
"""
|
|
4004
|
-
|
|
4005
|
-
return tensor_operator_registry.get('half')()(self, mstype.float16)
|
|
4113
|
+
return tensor_operator_registry.get('half')(self, mstype.float16)
|
|
4006
4114
|
|
|
4007
4115
|
def int(self):
|
|
4008
4116
|
r"""
|
|
@@ -4023,8 +4131,28 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4023
4131
|
>>> print(output.dtype)
|
|
4024
4132
|
Int32
|
|
4025
4133
|
"""
|
|
4026
|
-
|
|
4027
|
-
|
|
4134
|
+
return tensor_operator_registry.get('int')(self, mstype.int32)
|
|
4135
|
+
|
|
4136
|
+
def byte(self):
|
|
4137
|
+
r"""
|
|
4138
|
+
Converts input tensor dtype to `uint8`.
|
|
4139
|
+
|
|
4140
|
+
Returns:
|
|
4141
|
+
Tensor, converted to the `uint8` dtype.
|
|
4142
|
+
|
|
4143
|
+
Supported Platforms:
|
|
4144
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
4145
|
+
|
|
4146
|
+
Examples:
|
|
4147
|
+
>>> import numpy as np
|
|
4148
|
+
>>> import mindspore
|
|
4149
|
+
>>> from mindspore import Tensor
|
|
4150
|
+
>>> input_x = Tensor(np.ones([2,2]), mindspore.float32)
|
|
4151
|
+
>>> output = input_x.byte()
|
|
4152
|
+
>>> print(output.dtype)
|
|
4153
|
+
uint8
|
|
4154
|
+
"""
|
|
4155
|
+
return tensor_operator_registry.get('byte')(self, mstype.uint8)
|
|
4028
4156
|
|
|
4029
4157
|
def long(self):
|
|
4030
4158
|
r"""
|
|
@@ -4045,8 +4173,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4045
4173
|
>>> print(output.dtype)
|
|
4046
4174
|
Int64
|
|
4047
4175
|
"""
|
|
4048
|
-
|
|
4049
|
-
return tensor_operator_registry.get('long')()(self, mstype.int64)
|
|
4176
|
+
return tensor_operator_registry.get('long')(self, mstype.int64)
|
|
4050
4177
|
|
|
4051
4178
|
def short(self):
|
|
4052
4179
|
r"""
|
|
@@ -4068,22 +4195,19 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4068
4195
|
>>> output
|
|
4069
4196
|
Tensor(shape=[5], dtype=Int16, value= [1, 2, 3, 4, 5])
|
|
4070
4197
|
"""
|
|
4071
|
-
self._init_check()
|
|
4072
4198
|
return tensor_operator_registry.get('cast')(self, mstype.int16)
|
|
4073
4199
|
|
|
4074
4200
|
def cholesky(self, upper=False):
|
|
4075
4201
|
r"""
|
|
4076
4202
|
For details, please refer to :func:`mindspore.ops.cholesky`.
|
|
4077
4203
|
"""
|
|
4078
|
-
|
|
4079
|
-
return tensor_operator_registry.get('cholesky')(upper=upper)(self)
|
|
4204
|
+
return tensor_operator_registry.get('cholesky')(self, upper=upper)
|
|
4080
4205
|
|
|
4081
4206
|
def cholesky_inverse(self, upper=False):
|
|
4082
4207
|
r"""
|
|
4083
4208
|
For details, please refer to :func:`mindspore.ops.cholesky_inverse`.
|
|
4084
4209
|
"""
|
|
4085
|
-
|
|
4086
|
-
return tensor_operator_registry.get('cholesky_inverse')(upper=upper)(self)
|
|
4210
|
+
return tensor_operator_registry.get('cholesky_inverse')(self, upper=upper)
|
|
4087
4211
|
|
|
4088
4212
|
def cholesky_solve(self, input2, upper=False):
|
|
4089
4213
|
r"""
|
|
@@ -4092,63 +4216,54 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4092
4216
|
.. warning::
|
|
4093
4217
|
This is an experimental API that is subject to change or deletion.
|
|
4094
4218
|
"""
|
|
4095
|
-
self._init_check()
|
|
4096
4219
|
return tensor_operator_registry.get('cholesky_solve')(self, input2, upper)
|
|
4097
4220
|
|
|
4098
4221
|
def conj(self):
|
|
4099
4222
|
r"""
|
|
4100
4223
|
For details, please refer to :func:`mindspore.ops.conj`.
|
|
4101
4224
|
"""
|
|
4102
|
-
self._init_check()
|
|
4103
4225
|
return tensor_operator_registry.get('conj')(self)
|
|
4104
4226
|
|
|
4105
4227
|
def count_nonzero(self, axis=(), keep_dims=False, dtype=mstype.int32):
|
|
4106
4228
|
r"""
|
|
4107
4229
|
For details, please refer to :func:`mindspore.ops.count_nonzero`.
|
|
4108
4230
|
"""
|
|
4109
|
-
self._init_check()
|
|
4110
4231
|
return tensor_operator_registry.get('count_nonzero')(self, axis, keep_dims, dtype)
|
|
4111
4232
|
|
|
4112
4233
|
def cross(self, other, dim=None):
|
|
4113
4234
|
r"""
|
|
4114
4235
|
For details, please refer to :func:`mindspore.ops.cross`.
|
|
4115
4236
|
"""
|
|
4116
|
-
self._init_check()
|
|
4117
4237
|
return tensor_operator_registry.get('cross')(self, other, dim)
|
|
4118
4238
|
|
|
4119
4239
|
def erfinv(self):
|
|
4120
4240
|
r"""
|
|
4121
4241
|
For details, please refer to :func:`mindspore.ops.erfinv`.
|
|
4122
4242
|
"""
|
|
4123
|
-
self._init_check()
|
|
4124
4243
|
return tensor_operator_registry.get('erfinv')(self)
|
|
4125
4244
|
|
|
4126
4245
|
def less_equal(self, other):
|
|
4127
4246
|
r"""
|
|
4128
4247
|
For details, please refer to :func:`mindspore.ops.less_equal`.
|
|
4129
4248
|
"""
|
|
4130
|
-
self._init_check()
|
|
4131
4249
|
return tensor_operator_registry.get('less_equal')(self, other)
|
|
4132
4250
|
|
|
4133
4251
|
def lcm(self, other):
|
|
4134
4252
|
r"""
|
|
4135
4253
|
For details, please refer to :func:`mindspore.ops.lcm`.
|
|
4136
4254
|
"""
|
|
4137
|
-
self._init_check()
|
|
4138
4255
|
return tensor_operator_registry.get('lcm')(self, other)
|
|
4139
4256
|
|
|
4140
4257
|
def ldexp(self, other):
|
|
4141
4258
|
r"""
|
|
4142
4259
|
For details, please refer to :func:`mindspore.ops.ldexp`.
|
|
4143
4260
|
"""
|
|
4144
|
-
self._init_check()
|
|
4145
4261
|
return tensor_operator_registry.get('ldexp')(self, other)
|
|
4146
4262
|
|
|
4147
4263
|
def fold(self, output_size, kernel_size, dilation=1, padding=0, stride=1):
|
|
4148
4264
|
r"""
|
|
4149
4265
|
For details, please refer to :func:`mindspore.ops.fold`.
|
|
4150
4266
|
"""
|
|
4151
|
-
self._init_check()
|
|
4152
4267
|
return tensor_operator_registry.get('fold')(self, output_size, kernel_size, dilation, padding, stride)
|
|
4153
4268
|
|
|
4154
4269
|
def unfold(self, kernel_size, dilation=1, padding=0, stride=1):
|
|
@@ -4159,70 +4274,62 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4159
4274
|
This is an experimental API that is subject to change or deletion.
|
|
4160
4275
|
|
|
4161
4276
|
"""
|
|
4162
|
-
self._init_check()
|
|
4163
4277
|
return tensor_operator_registry.get('unfold')(self, kernel_size, dilation, padding, stride)
|
|
4164
4278
|
|
|
4165
4279
|
def expand(self, size):
|
|
4166
4280
|
r"""
|
|
4167
4281
|
For details, please refer to :func:`mindspore.ops.broadcast_to`.
|
|
4168
4282
|
"""
|
|
4169
|
-
|
|
4283
|
+
if isinstance(size, Tensor):
|
|
4284
|
+
size = tensor_operator_registry.get('tensortotuple')()(size)
|
|
4170
4285
|
return tensor_operator_registry.get('expand')(self, size)
|
|
4171
4286
|
|
|
4172
4287
|
def cumprod(self, dim, dtype=None):
|
|
4173
4288
|
r"""
|
|
4174
4289
|
For details, please refer to :func:`mindspore.ops.cumprod`.
|
|
4175
4290
|
"""
|
|
4176
|
-
self._init_check()
|
|
4177
4291
|
return tensor_operator_registry.get('cumprod')(self, dim, dtype)
|
|
4178
4292
|
|
|
4179
4293
|
def multiply(self, value):
|
|
4180
4294
|
r"""
|
|
4181
4295
|
For details, please refer to :func:`mindspore.ops.multiply`.
|
|
4182
4296
|
"""
|
|
4183
|
-
self._init_check()
|
|
4184
4297
|
return tensor_operator_registry.get('multiply')(self, value)
|
|
4185
4298
|
|
|
4186
4299
|
def div(self, value, *, rounding_mode=None):
|
|
4187
4300
|
r"""
|
|
4188
4301
|
For details, please refer to :func:`mindspore.ops.div`.
|
|
4189
4302
|
"""
|
|
4190
|
-
self._init_check()
|
|
4191
4303
|
return tensor_operator_registry.get('div')(self, value, rounding_mode=rounding_mode)
|
|
4192
4304
|
|
|
4193
4305
|
def divide(self, value, *, rounding_mode=None):
|
|
4194
4306
|
r"""
|
|
4195
4307
|
Alias for :func:`mindspore.Tensor.div`.
|
|
4196
4308
|
"""
|
|
4197
|
-
self._init_check()
|
|
4198
4309
|
return tensor_operator_registry.get('div')(self, value, rounding_mode=rounding_mode)
|
|
4199
4310
|
|
|
4200
4311
|
def eq(self, other):
|
|
4201
4312
|
r"""
|
|
4202
4313
|
For details, please refer to :func:`mindspore.ops.eq`.
|
|
4203
4314
|
"""
|
|
4204
|
-
self._init_check()
|
|
4205
4315
|
return tensor_operator_registry.get('equal')(self, other)
|
|
4206
4316
|
|
|
4207
4317
|
def equal(self, other):
|
|
4208
4318
|
r"""
|
|
4209
4319
|
For details, please refer to :func:`mindspore.ops.equal`.
|
|
4210
4320
|
"""
|
|
4211
|
-
self._init_check()
|
|
4212
4321
|
return tensor_operator_registry.get('equal')(self, other)
|
|
4213
4322
|
|
|
4214
4323
|
def expm1(self):
|
|
4215
4324
|
r"""
|
|
4216
4325
|
For details, please refer to :func:`mindspore.ops.expm1`.
|
|
4217
4326
|
"""
|
|
4218
|
-
self._init_check()
|
|
4219
4327
|
return tensor_operator_registry.get('expm1')(self)
|
|
4220
4328
|
|
|
4221
4329
|
def index_add(self, dim, index, source, *, alpha=1):
|
|
4222
4330
|
r"""
|
|
4223
4331
|
For details, please refer to :func:`mindspore.ops.index_add`.
|
|
4224
4332
|
"""
|
|
4225
|
-
self._init_check()
|
|
4226
4333
|
check_is_number(alpha, (int, float))
|
|
4227
4334
|
source = tensor_operator_registry.get('__mul__')(source, alpha)
|
|
4228
4335
|
return tensor_operator_registry.get('index_add')(self, indices=index, y=source, axis=dim)
|
|
@@ -4231,42 +4338,37 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4231
4338
|
r"""
|
|
4232
4339
|
For details, please refer to :func:`mindspore.ops.greater`.
|
|
4233
4340
|
"""
|
|
4234
|
-
self._init_check()
|
|
4235
4341
|
return tensor_operator_registry.get('greater')(self, other)
|
|
4236
4342
|
|
|
4237
4343
|
def greater_equal(self, other):
|
|
4238
4344
|
r"""
|
|
4239
4345
|
For details, please refer to :func:`mindspore.ops.greater_equal`.
|
|
4240
4346
|
"""
|
|
4241
|
-
self._init_check()
|
|
4242
4347
|
return tensor_operator_registry.get('greater_equal')(self, other)
|
|
4243
4348
|
|
|
4244
4349
|
def igamma(self, other):
|
|
4245
4350
|
r"""
|
|
4246
4351
|
For details, please refer to :func:`mindspore.ops.igamma`.
|
|
4247
4352
|
"""
|
|
4248
|
-
self._init_check()
|
|
4249
4353
|
return tensor_operator_registry.get('igamma')(self, other)
|
|
4250
4354
|
|
|
4251
4355
|
def igammac(self, other):
|
|
4252
4356
|
r"""
|
|
4253
4357
|
For details, please refer to :func:`mindspore.ops.igammac`.
|
|
4254
4358
|
"""
|
|
4255
|
-
self._init_check()
|
|
4256
4359
|
return tensor_operator_registry.get('igammac')(self, other)
|
|
4257
4360
|
|
|
4258
4361
|
def isinf(self):
|
|
4259
4362
|
r"""
|
|
4260
4363
|
For details, please refer to :func:`mindspore.ops.isinf`.
|
|
4261
4364
|
"""
|
|
4262
|
-
self._init_check()
|
|
4263
4365
|
return tensor_operator_registry.get('isinf')(self)
|
|
4264
4366
|
|
|
4367
|
+
@isnan_mint
|
|
4265
4368
|
def isnan(self):
|
|
4266
4369
|
r"""
|
|
4267
4370
|
For details, please refer to :func:`mindspore.ops.isnan`.
|
|
4268
4371
|
"""
|
|
4269
|
-
self._init_check()
|
|
4270
4372
|
return tensor_operator_registry.get('isnan')(self)
|
|
4271
4373
|
|
|
4272
4374
|
def flip(self, dims):
|
|
@@ -4320,14 +4422,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4320
4422
|
r"""
|
|
4321
4423
|
For details, please refer to :func:`mindspore.ops.le`.
|
|
4322
4424
|
"""
|
|
4323
|
-
self._init_check()
|
|
4324
4425
|
return tensor_operator_registry.get('le')(self, other)
|
|
4325
4426
|
|
|
4326
4427
|
def less(self, other):
|
|
4327
4428
|
r"""
|
|
4328
4429
|
For details, please refer to :func:`mindspore.ops.less`.
|
|
4329
4430
|
"""
|
|
4330
|
-
self._init_check()
|
|
4331
4431
|
return tensor_operator_registry.get('less')(self, other)
|
|
4332
4432
|
|
|
4333
4433
|
def lt(self, other):
|
|
@@ -4340,35 +4440,30 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4340
4440
|
r"""
|
|
4341
4441
|
For details, please refer to :func:`mindspore.ops.logical_and`.
|
|
4342
4442
|
"""
|
|
4343
|
-
self._init_check()
|
|
4344
4443
|
return tensor_operator_registry.get('logical_and')(self, other)
|
|
4345
4444
|
|
|
4346
4445
|
def logical_not(self):
|
|
4347
4446
|
r"""
|
|
4348
4447
|
For details, please refer to :func:`mindspore.ops.logical_not`.
|
|
4349
4448
|
"""
|
|
4350
|
-
self._init_check()
|
|
4351
4449
|
return tensor_operator_registry.get('logical_not')(self)
|
|
4352
4450
|
|
|
4353
4451
|
def logical_or(self, other):
|
|
4354
4452
|
r"""
|
|
4355
4453
|
For details, please refer to :func:`mindspore.ops.logical_or`.
|
|
4356
4454
|
"""
|
|
4357
|
-
self._init_check()
|
|
4358
4455
|
return tensor_operator_registry.get('logical_or')(self, other)
|
|
4359
4456
|
|
|
4360
4457
|
def logical_xor(self, other):
|
|
4361
4458
|
r"""
|
|
4362
4459
|
For details, please refer to :func:`mindspore.ops.logical_xor`.
|
|
4363
4460
|
"""
|
|
4364
|
-
self._init_check()
|
|
4365
4461
|
return tensor_operator_registry.get('logical_xor')(self, other)
|
|
4366
4462
|
|
|
4367
4463
|
def lstsq(self, A):
|
|
4368
4464
|
r"""
|
|
4369
4465
|
For details, please refer to :func:`mindspore.ops.lstsq`.
|
|
4370
4466
|
"""
|
|
4371
|
-
self._init_check()
|
|
4372
4467
|
return tensor_operator_registry.get('lstsq')(self, A)
|
|
4373
4468
|
|
|
4374
4469
|
@property
|
|
@@ -4392,28 +4487,24 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4392
4487
|
r"""
|
|
4393
4488
|
For details, please refer to :func:`mindspore.ops.mvlgamma`.
|
|
4394
4489
|
"""
|
|
4395
|
-
self._init_check()
|
|
4396
4490
|
return tensor_operator_registry.get('mvlgamma')(self, p)
|
|
4397
4491
|
|
|
4398
4492
|
def matmul(self, tensor2):
|
|
4399
4493
|
r"""
|
|
4400
4494
|
For details, please refer to :func:`mindspore.ops.matmul`.
|
|
4401
4495
|
"""
|
|
4402
|
-
self._init_check()
|
|
4403
4496
|
return tensor_operator_registry.get('matmul')(self, tensor2)
|
|
4404
4497
|
|
|
4405
4498
|
def inner(self, other):
|
|
4406
4499
|
r"""
|
|
4407
4500
|
For details, please refer to :func:`mindspore.ops.inner`.
|
|
4408
4501
|
"""
|
|
4409
|
-
self._init_check()
|
|
4410
4502
|
return tensor_operator_registry.get('inner')(self, other)
|
|
4411
4503
|
|
|
4412
4504
|
def multinomial(self, num_samples, replacement=True, seed=None):
|
|
4413
4505
|
r"""
|
|
4414
4506
|
For details, please refer to :func:`mindspore.ops.multinomial`.
|
|
4415
4507
|
"""
|
|
4416
|
-
self._init_check()
|
|
4417
4508
|
return tensor_operator_registry.get('multinomial')(self, num_samples, replacement, seed)
|
|
4418
4509
|
|
|
4419
4510
|
def matrix_power(self, n):
|
|
@@ -4424,38 +4515,33 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4424
4515
|
This is an experimental API that is subject to change or deletion.
|
|
4425
4516
|
|
|
4426
4517
|
"""
|
|
4427
|
-
self._init_check()
|
|
4428
4518
|
return tensor_operator_registry.get('matrix_power')(self, n)
|
|
4429
4519
|
|
|
4430
4520
|
def maximum(self, other):
|
|
4431
4521
|
r"""
|
|
4432
4522
|
For details, please refer to :func:`mindspore.ops.maximum`.
|
|
4433
4523
|
"""
|
|
4434
|
-
self._init_check()
|
|
4435
4524
|
return tensor_operator_registry.get('maximum')(self, other)
|
|
4436
4525
|
|
|
4437
4526
|
def mm(self, mat2):
|
|
4438
4527
|
r"""
|
|
4439
4528
|
For details, please refer to :func:`mindspore.ops.mm`.
|
|
4440
4529
|
"""
|
|
4441
|
-
self._init_check()
|
|
4442
4530
|
return tensor_operator_registry.get('mm')(self, mat2)
|
|
4443
4531
|
|
|
4444
4532
|
def msort(self):
|
|
4445
4533
|
r"""
|
|
4446
4534
|
For details, please refer to :func:`mindspore.ops.msort`.
|
|
4447
4535
|
"""
|
|
4448
|
-
self._init_check()
|
|
4449
4536
|
return tensor_operator_registry.get('msort')(self)
|
|
4450
4537
|
|
|
4451
4538
|
def mul(self, value):
|
|
4452
4539
|
r"""
|
|
4453
4540
|
For details, please refer to :func:`mindspore.ops.mul`.
|
|
4454
4541
|
"""
|
|
4455
|
-
self._init_check()
|
|
4456
4542
|
return tensor_operator_registry.get('mul')(self, value)
|
|
4457
4543
|
|
|
4458
|
-
def nan_to_num(self, nan=
|
|
4544
|
+
def nan_to_num(self, nan=None, posinf=None, neginf=None):
|
|
4459
4545
|
"""
|
|
4460
4546
|
For details, please refer to :func:`mindspore.ops.nan_to_num`.
|
|
4461
4547
|
"""
|
|
@@ -4465,31 +4551,29 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4465
4551
|
r"""
|
|
4466
4552
|
For details, please refer to :func:`mindspore.ops.neg`.
|
|
4467
4553
|
"""
|
|
4468
|
-
self._init_check()
|
|
4469
4554
|
return tensor_operator_registry.get('neg')(self)
|
|
4470
4555
|
|
|
4471
4556
|
def ne(self, other):
|
|
4472
4557
|
r"""
|
|
4473
4558
|
For details, please refer to :func:`mindspore.ops.ne`.
|
|
4474
4559
|
"""
|
|
4475
|
-
self._init_check()
|
|
4476
4560
|
return tensor_operator_registry.get('ne')(self, other)
|
|
4477
4561
|
|
|
4478
4562
|
def not_equal(self, other):
|
|
4479
4563
|
r"""
|
|
4480
4564
|
For details, please refer to :func:`mindspore.ops.not_equal`.
|
|
4481
4565
|
"""
|
|
4482
|
-
self._init_check()
|
|
4483
4566
|
return tensor_operator_registry.get('not_equal')(self, other)
|
|
4484
4567
|
|
|
4485
|
-
def new_zeros(self, size,
|
|
4568
|
+
def new_zeros(self, size, dtype=None):
|
|
4486
4569
|
r"""
|
|
4487
4570
|
Return a tensor of `size` filled with zeros.
|
|
4488
4571
|
|
|
4489
|
-
|
|
4490
|
-
|
|
4572
|
+
.. warning::
|
|
4573
|
+
For argument `size`, Tensor type input will be deprecated in the future version.
|
|
4491
4574
|
|
|
4492
|
-
|
|
4575
|
+
Args:
|
|
4576
|
+
size (Union[int, tuple, list, Tensor]): An int, list or tuple of integers defining the output shape.
|
|
4493
4577
|
dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned tensor has
|
|
4494
4578
|
thesame dtype as `self`. Default: ``None``.
|
|
4495
4579
|
|
|
@@ -4497,7 +4581,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4497
4581
|
Tensor, the shape and dtype is defined above and filled with zeros.
|
|
4498
4582
|
|
|
4499
4583
|
Raises:
|
|
4500
|
-
TypeError: If `size` is
|
|
4584
|
+
TypeError: If `size` is neither an int nor an tuple/list/Tensor of int.
|
|
4501
4585
|
|
|
4502
4586
|
Supported Platforms:
|
|
4503
4587
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4512,21 +4596,42 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4512
4596
|
[[0. 0.]
|
|
4513
4597
|
[0. 0.]]
|
|
4514
4598
|
"""
|
|
4515
|
-
|
|
4516
|
-
if isinstance(size, list):
|
|
4517
|
-
size = tuple(size)
|
|
4518
|
-
self._init_check()
|
|
4519
|
-
_dtype = self.dtype if dtype is None else dtype
|
|
4520
|
-
return tensor_operator_registry.get('zeros')(size, _dtype)
|
|
4599
|
+
return tensor_operator_registry.get('zeros')(size, dtype)
|
|
4521
4600
|
|
|
4522
|
-
def
|
|
4601
|
+
def zero_(self):
|
|
4602
|
+
r"""
|
|
4603
|
+
Return a tensor filled with zeros.
|
|
4604
|
+
|
|
4605
|
+
.. warning::
|
|
4606
|
+
This is an experimental API that is subject to change or deletion.
|
|
4607
|
+
|
|
4608
|
+
Returns:
|
|
4609
|
+
Return a tensor. Fill self tensor with zeros.
|
|
4610
|
+
|
|
4611
|
+
Supported Platforms:
|
|
4612
|
+
``Ascend``
|
|
4613
|
+
|
|
4614
|
+
Examples:
|
|
4615
|
+
>>> import numpy as np
|
|
4616
|
+
>>> import mindspore
|
|
4617
|
+
>>> from mindspore import Tensor
|
|
4618
|
+
>>> x = Tensor(np.array([2, 2]))
|
|
4619
|
+
>>> output = x.zero_()
|
|
4620
|
+
>>> print(output)
|
|
4621
|
+
[[0. 0.]
|
|
4622
|
+
[0. 0.]]
|
|
4623
|
+
"""
|
|
4624
|
+
return tensor_operator_registry.get('zero_')(self)
|
|
4625
|
+
|
|
4626
|
+
def new_ones(self, size, dtype=None):
|
|
4523
4627
|
r"""
|
|
4524
4628
|
Return a tensor of `size` filled with ones.
|
|
4525
4629
|
|
|
4526
|
-
|
|
4527
|
-
|
|
4630
|
+
.. warning::
|
|
4631
|
+
For argument `size`, Tensor type input will be deprecated in the future version.
|
|
4528
4632
|
|
|
4529
|
-
|
|
4633
|
+
Args:
|
|
4634
|
+
size (Union[int, tuple, list, Tensor]): An int, list or tuple of integers defining the output shape.
|
|
4530
4635
|
dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned
|
|
4531
4636
|
tensor has the same dtype as `self`. Default: ``None``.
|
|
4532
4637
|
|
|
@@ -4534,7 +4639,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4534
4639
|
Tensor, the shape and dtype is defined above and filled with ones.
|
|
4535
4640
|
|
|
4536
4641
|
Raises:
|
|
4537
|
-
TypeError: If `size` is
|
|
4642
|
+
TypeError: If `size` is neither an int nor an tuple/list/Tensor of int.
|
|
4538
4643
|
|
|
4539
4644
|
Supported Platforms:
|
|
4540
4645
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4549,109 +4654,90 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4549
4654
|
[[1. 1.]
|
|
4550
4655
|
[1. 1.]]
|
|
4551
4656
|
"""
|
|
4552
|
-
|
|
4553
|
-
if isinstance(size, list):
|
|
4554
|
-
size = tuple(size)
|
|
4555
|
-
self._init_check()
|
|
4556
|
-
_dtype = self.dtype if dtype is None else dtype
|
|
4557
|
-
return tensor_operator_registry.get('ones')(size, _dtype)
|
|
4657
|
+
return tensor_operator_registry.get('ones')(size, dtype)
|
|
4558
4658
|
|
|
4559
4659
|
def sign(self):
|
|
4560
4660
|
r"""
|
|
4561
4661
|
For details, please refer to :func:`mindspore.ops.sign`.
|
|
4562
4662
|
"""
|
|
4563
|
-
self._init_check()
|
|
4564
4663
|
return tensor_operator_registry.get('sign')(self)
|
|
4565
4664
|
|
|
4566
4665
|
def signbit(self):
|
|
4567
4666
|
"""
|
|
4568
4667
|
For details, please refer to :func:`mindspore.ops.signbit`.
|
|
4569
4668
|
"""
|
|
4570
|
-
self._init_check()
|
|
4571
4669
|
return tensor_operator_registry.get('signbit')(self)
|
|
4572
4670
|
|
|
4573
4671
|
def sgn(self):
|
|
4574
4672
|
"""
|
|
4575
4673
|
For details, please refer to :func:`mindspore.ops.sgn`.
|
|
4576
4674
|
"""
|
|
4577
|
-
self._init_check()
|
|
4578
4675
|
return tensor_operator_registry.get('sgn')(self)
|
|
4579
4676
|
|
|
4580
4677
|
def sin(self):
|
|
4581
4678
|
r"""
|
|
4582
4679
|
For details, please refer to :func:`mindspore.ops.sin`.
|
|
4583
4680
|
"""
|
|
4584
|
-
self._init_check()
|
|
4585
4681
|
return tensor_operator_registry.get('sin')(self)
|
|
4586
4682
|
|
|
4587
4683
|
def sinc(self):
|
|
4588
4684
|
r"""
|
|
4589
4685
|
For details, please refer to :func:`mindspore.ops.sinc`.
|
|
4590
4686
|
"""
|
|
4591
|
-
self._init_check()
|
|
4592
4687
|
return tensor_operator_registry.get('sinc')(self)
|
|
4593
4688
|
|
|
4594
4689
|
def sinh(self):
|
|
4595
4690
|
r"""
|
|
4596
4691
|
For details, please refer to :func:`mindspore.ops.sinh`.
|
|
4597
4692
|
"""
|
|
4598
|
-
self._init_check()
|
|
4599
4693
|
return tensor_operator_registry.get('sinh')(self)
|
|
4600
4694
|
|
|
4601
4695
|
def sort(self, axis=-1, descending=False):
|
|
4602
4696
|
r"""
|
|
4603
4697
|
For details, please refer to :func:`mindspore.ops.sort`.
|
|
4604
4698
|
"""
|
|
4605
|
-
self._init_check()
|
|
4606
4699
|
return tensor_operator_registry.get('sort')(self, axis=axis, descending=descending)
|
|
4607
4700
|
|
|
4608
4701
|
def argsort(self, axis=-1, descending=False):
|
|
4609
4702
|
"""
|
|
4610
4703
|
For details, please refer to :func:`mindspore.ops.argsort`.
|
|
4611
4704
|
"""
|
|
4612
|
-
self._init_check()
|
|
4613
4705
|
return tensor_operator_registry.get('argsort')(self, axis, descending)
|
|
4614
4706
|
|
|
4615
4707
|
def trunc(self):
|
|
4616
4708
|
r"""
|
|
4617
4709
|
For details, please refer to :func:`mindspore.ops.trunc`.
|
|
4618
4710
|
"""
|
|
4619
|
-
self._init_check()
|
|
4620
4711
|
return tensor_operator_registry.get('trunc')(self)
|
|
4621
4712
|
|
|
4622
4713
|
def where(self, condition, y):
|
|
4623
4714
|
r"""
|
|
4624
4715
|
For details, please refer to :func:`mindspore.ops.where`.
|
|
4625
4716
|
"""
|
|
4626
|
-
self._init_check()
|
|
4627
4717
|
return tensor_operator_registry.get('where')(condition, self, y)
|
|
4628
4718
|
|
|
4629
4719
|
def imag(self):
|
|
4630
4720
|
r"""
|
|
4631
4721
|
For details, please refer to :func:`mindspore.ops.imag`.
|
|
4632
4722
|
"""
|
|
4633
|
-
self._init_check()
|
|
4634
4723
|
return tensor_operator_registry.get('imag')(self)
|
|
4635
4724
|
|
|
4636
4725
|
def quantile(self, q, axis=None, keepdims=False):
|
|
4637
4726
|
r"""
|
|
4638
4727
|
For details, please refer to :func:`mindspore.ops.quantile`.
|
|
4639
4728
|
"""
|
|
4640
|
-
self._init_check()
|
|
4641
4729
|
return tensor_operator_registry.get('quantile')(self, q, axis, keepdims)
|
|
4642
4730
|
|
|
4643
4731
|
def nanquantile(self, q, axis=None, keepdims=False):
|
|
4644
4732
|
"""
|
|
4645
4733
|
For details, please refer to :func:`mindspore.ops.nanquantile`.
|
|
4646
4734
|
"""
|
|
4647
|
-
self._init_check()
|
|
4648
4735
|
return tensor_operator_registry.get('nanquantile')(self, q, axis, keepdims)
|
|
4649
4736
|
|
|
4650
4737
|
def orgqr(self, input2):
|
|
4651
4738
|
r"""
|
|
4652
4739
|
For details, please refer to :func:`mindspore.ops.orgqr`.
|
|
4653
4740
|
"""
|
|
4654
|
-
self._init_check()
|
|
4655
4741
|
return tensor_operator_registry.get('orgqr')(self, input2)
|
|
4656
4742
|
|
|
4657
4743
|
def lu_solve(self, LU_data, LU_pivots):
|
|
@@ -4661,7 +4747,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4661
4747
|
.. warning::
|
|
4662
4748
|
This is an experimental API that is subject to change or deletion.
|
|
4663
4749
|
"""
|
|
4664
|
-
self._init_check()
|
|
4665
4750
|
return tensor_operator_registry.get('lu_solve')(self, LU_data, LU_pivots)
|
|
4666
4751
|
|
|
4667
4752
|
|
|
@@ -4669,14 +4754,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4669
4754
|
r"""
|
|
4670
4755
|
For details, please refer to :func:`mindspore.ops.nextafter`.
|
|
4671
4756
|
"""
|
|
4672
|
-
self._init_check()
|
|
4673
4757
|
return tensor_operator_registry.get('nextafter')(self, other)
|
|
4674
4758
|
|
|
4675
4759
|
def qr(self, some=True):
|
|
4676
4760
|
r"""
|
|
4677
4761
|
For details, please refer to :func:`mindspore.ops.qr`.
|
|
4678
4762
|
"""
|
|
4679
|
-
self._init_check()
|
|
4680
4763
|
validator.check_value_type('some', some, bool, 'Tensor.qr')
|
|
4681
4764
|
return tensor_operator_registry.get('qr')(self, 'reduced' if some else 'complete')
|
|
4682
4765
|
|
|
@@ -4686,7 +4769,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4686
4769
|
For details, please refer to :func:`mindspore.ops.ormqr`,
|
|
4687
4770
|
Args `input2` and `input3` correspond to the args `tau` and `other` of :func:`mindspore.ops.ormqr`.
|
|
4688
4771
|
"""
|
|
4689
|
-
self._init_check()
|
|
4690
4772
|
return tensor_operator_registry.get('ormqr')(self, input2, input3, left, transpose)
|
|
4691
4773
|
|
|
4692
4774
|
|
|
@@ -4728,7 +4810,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4728
4810
|
>>> print(output)
|
|
4729
4811
|
[5. 6. 3. 7.]
|
|
4730
4812
|
"""
|
|
4731
|
-
self._init_check()
|
|
4732
4813
|
return tensor_operator_registry.get('masked_scatter')()(self, mask, x)
|
|
4733
4814
|
|
|
4734
4815
|
|
|
@@ -4780,12 +4861,47 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4780
4861
|
[[1 5 3]
|
|
4781
4862
|
[4 8 9]]
|
|
4782
4863
|
"""
|
|
4783
|
-
self._init_check()
|
|
4784
4864
|
validator.check_value_type('accumulate', accumulate, bool, 'Tensor.index_put')
|
|
4785
4865
|
_index_put = tensor_operator_registry.get('index_put')(0 if accumulate is False else 1)
|
|
4786
4866
|
return _index_put(self, values, indices)
|
|
4787
4867
|
|
|
4788
4868
|
|
|
4869
|
+
def move_to(self, to, blocking=True):
|
|
4870
|
+
r"""
|
|
4871
|
+
Copy Tensor to target device synchronously or asynchronously, default synchronously. only support PyNative mode.
|
|
4872
|
+
|
|
4873
|
+
Args:
|
|
4874
|
+
to (str): a string type value, one of ``"Ascend"``, ``"GPU"``, ``"CPU"``.
|
|
4875
|
+
blocking (bool): a bool type value, using synchronous copy or asynchronous copy.
|
|
4876
|
+
Default: ``True`` , synchronous copy.
|
|
4877
|
+
|
|
4878
|
+
Returns:
|
|
4879
|
+
New Tensor, storged on target device which with the same type and shape as the "self Tensor".
|
|
4880
|
+
|
|
4881
|
+
Raises:
|
|
4882
|
+
ValueError: If the type of `blocking` is not bool type.
|
|
4883
|
+
ValueError: If the value of `to` is not one of ``"Ascend"``, ``"GPU"``, ``"CPU"``.
|
|
4884
|
+
ValueError: If the run mode is not PyNative mode.
|
|
4885
|
+
|
|
4886
|
+
Supported Platforms:
|
|
4887
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
4888
|
+
|
|
4889
|
+
Examples:
|
|
4890
|
+
>>> import mindspore as ms
|
|
4891
|
+
>>> from mindspore import Tensor
|
|
4892
|
+
>>> x = ms.Tensor([1, 2, 3], ms.int64)
|
|
4893
|
+
>>> new_tensor = x.move_to("CPU")
|
|
4894
|
+
"""
|
|
4895
|
+
if not isinstance(blocking, bool):
|
|
4896
|
+
raise ValueError(f"The type of 'blocking' must be bool, but got {blocking}")
|
|
4897
|
+
if to not in ("Ascend", "GPU", "CPU"):
|
|
4898
|
+
raise ValueError(f"The value of 'to' must be one of ['Ascend', 'GPU', 'CPU'], but got {to}")
|
|
4899
|
+
mode = context.get_context("mode")
|
|
4900
|
+
if mode != context.PYNATIVE_MODE:
|
|
4901
|
+
raise ValueError(f"The method of 'move_to' only supported in pynative mode, but got: {mode}.")
|
|
4902
|
+
return Tensor(Tensor_.move_to(self, to, blocking), device="CPU" if to == "CPU" else None)
|
|
4903
|
+
|
|
4904
|
+
|
|
4789
4905
|
def _offload(self):
|
|
4790
4906
|
r"""
|
|
4791
4907
|
Offload tensor parameter to host. Currently, only support for pynative mode.
|
|
@@ -4799,7 +4915,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4799
4915
|
>>> x = ms.Tensor([1, 2, 3], ms.int64)
|
|
4800
4916
|
>>> x._offload()
|
|
4801
4917
|
"""
|
|
4802
|
-
self._init_check()
|
|
4803
4918
|
return Tensor_._offload(self)
|
|
4804
4919
|
|
|
4805
4920
|
|
|
@@ -4831,6 +4946,44 @@ def _vm_compare(*args):
|
|
|
4831
4946
|
return Tensor(np.array(fn(y)))
|
|
4832
4947
|
|
|
4833
4948
|
|
|
4949
|
+
def _check_sequence_shape(input_data):
|
|
4950
|
+
"""Check the shape of tensor input with type of sequence."""
|
|
4951
|
+
max_dims_reached = False
|
|
4952
|
+
max_ndim = 64 # corresponding to NPY_MAXDIMS
|
|
4953
|
+
out_shape = [0]*max_ndim
|
|
4954
|
+
|
|
4955
|
+
def check_shape_recursive(input_data, curr_ndim):
|
|
4956
|
+
nonlocal max_dims_reached, max_ndim, out_shape
|
|
4957
|
+
if curr_ndim > max_ndim:
|
|
4958
|
+
return False
|
|
4959
|
+
if not isinstance(input_data, (tuple, list)):
|
|
4960
|
+
if max_dims_reached and curr_ndim != max_ndim:
|
|
4961
|
+
max_ndim = curr_ndim
|
|
4962
|
+
return False
|
|
4963
|
+
max_dims_reached = True
|
|
4964
|
+
max_ndim = curr_ndim
|
|
4965
|
+
return True
|
|
4966
|
+
if not max_dims_reached:
|
|
4967
|
+
out_shape[curr_ndim] = len(input_data)
|
|
4968
|
+
else:
|
|
4969
|
+
if out_shape[curr_ndim] != len(input_data):
|
|
4970
|
+
max_ndim = curr_ndim
|
|
4971
|
+
return False
|
|
4972
|
+
if not input_data:
|
|
4973
|
+
# process empty list
|
|
4974
|
+
if not check_shape_recursive(None, curr_ndim + 1):
|
|
4975
|
+
return False
|
|
4976
|
+
for data in input_data:
|
|
4977
|
+
if not check_shape_recursive(data, curr_ndim + 1):
|
|
4978
|
+
return False
|
|
4979
|
+
return True
|
|
4980
|
+
|
|
4981
|
+
if not check_shape_recursive(input_data, 0):
|
|
4982
|
+
raise ValueError(f"When initializing a tensor with a sequence, the sequence has an inhomogeneous shape "
|
|
4983
|
+
f"after {max_ndim} dimensions. The detected shape was {tuple(out_shape[:max_ndim])} "
|
|
4984
|
+
f"+ inhomogeneous part.")
|
|
4985
|
+
|
|
4986
|
+
|
|
4834
4987
|
def _check_tensor_input(input_data=None, dtype=None, shape=None, init=None):
|
|
4835
4988
|
"""Check the tensor input."""
|
|
4836
4989
|
if input_data is not None and shape is not None:
|
|
@@ -4841,11 +4994,12 @@ def _check_tensor_input(input_data=None, dtype=None, shape=None, init=None):
|
|
|
4841
4994
|
raise ValueError("init, dtype and shape must have values at the same time.")
|
|
4842
4995
|
|
|
4843
4996
|
if input_data is not None:
|
|
4844
|
-
if isinstance(input_data, np.ndarray) and input_data.ndim
|
|
4845
|
-
raise ValueError("input_data can not contain zero dimension.")
|
|
4846
|
-
if isinstance(input_data, (tuple, list)) and np.array(input_data).ndim > 1 \
|
|
4847
|
-
and np.array(input_data).size == 0:
|
|
4997
|
+
if isinstance(input_data, np.ndarray) and input_data.ndim >= 1 and input_data.size == 0:
|
|
4848
4998
|
raise ValueError("input_data can not contain zero dimension.")
|
|
4999
|
+
if isinstance(input_data, (tuple, list)):
|
|
5000
|
+
_check_sequence_shape(input_data)
|
|
5001
|
+
if np.array(input_data).ndim >= 1 and np.array(input_data).size == 0:
|
|
5002
|
+
raise ValueError("input_data can not contain zero dimension.")
|
|
4849
5003
|
|
|
4850
5004
|
if shape is not None and not (hasattr(init, "__enable_zero_dim__") and init.__enable_zero_dim__) and 0 in shape:
|
|
4851
5005
|
raise ValueError("Shape can not contain zero value.")
|
|
@@ -4882,4 +5036,4 @@ def _check_astype_and_convert(dtype):
|
|
|
4882
5036
|
return dtype
|
|
4883
5037
|
|
|
4884
5038
|
|
|
4885
|
-
tensor_operator_registry
|
|
5039
|
+
setattr(tensor_operator_registry, 'vm_compare', _vm_compare)
|