mindspore 2.2.11__cp39-cp39-win_amd64.whl → 2.3.0__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +7 -5
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +76 -18
- mindspore/_extends/builtin_operations.py +2 -1
- mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
- mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
- mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
- mindspore/_extends/parse/__init__.py +18 -14
- mindspore/_extends/parse/compile_config.py +258 -0
- mindspore/_extends/parse/namespace.py +2 -2
- mindspore/_extends/parse/parser.py +174 -62
- mindspore/_extends/parse/resources.py +45 -14
- mindspore/_extends/parse/standard_method.py +142 -240
- mindspore/{ops/_op_impl/tbe/atomic_addr_clean.py → _extends/pijit/__init__.py} +6 -16
- mindspore/_extends/pijit/pijit_func_white_list.py +343 -0
- mindspore/_extends/remote/kernel_build_server.py +2 -0
- mindspore/_profiler.py +30 -0
- mindspore/amp.py +51 -24
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/base.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +2 -2
- mindspore/boost/grad_freeze.py +2 -2
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/boost/less_batch_normalization.py +9 -6
- mindspore/common/__init__.py +15 -4
- mindspore/common/_jit_fallback_utils.py +2 -3
- mindspore/common/_register_for_adapter.py +7 -0
- mindspore/common/_register_for_recompute.py +48 -0
- mindspore/common/_register_for_tensor.py +8 -9
- mindspore/common/_stub_tensor.py +7 -1
- mindspore/common/_utils.py +5 -17
- mindspore/common/api.py +411 -106
- mindspore/common/auto_dynamic_shape.py +27 -14
- mindspore/common/dtype.py +17 -10
- mindspore/common/dump.py +6 -8
- mindspore/common/file_system.py +48 -0
- mindspore/common/generator.py +260 -0
- mindspore/common/hook_handle.py +51 -4
- mindspore/common/initializer.py +1 -1
- mindspore/common/jit_config.py +34 -14
- mindspore/common/lazy_inline.py +72 -19
- mindspore/common/mindir_util.py +12 -2
- mindspore/common/mutable.py +79 -14
- mindspore/common/no_inline.py +54 -0
- mindspore/common/np_dtype.py +25 -0
- mindspore/common/parameter.py +30 -11
- mindspore/common/recompute.py +262 -0
- mindspore/common/seed.py +9 -9
- mindspore/common/sparse_tensor.py +272 -24
- mindspore/common/symbol.py +122 -0
- mindspore/common/tensor.py +468 -496
- mindspore/communication/__init__.py +6 -11
- mindspore/communication/_comm_helper.py +5 -0
- mindspore/communication/comm_func.py +1140 -0
- mindspore/communication/management.py +118 -102
- mindspore/config/op_info.config +22 -54
- mindspore/context.py +378 -65
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +6 -6
- mindspore/dataset/audio/transforms.py +711 -158
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/engine/cache_client.py +2 -2
- mindspore/dataset/engine/datasets.py +163 -83
- mindspore/dataset/engine/datasets_audio.py +14 -14
- mindspore/dataset/engine/datasets_standard_format.py +33 -3
- mindspore/dataset/engine/datasets_text.py +38 -38
- mindspore/dataset/engine/datasets_user_defined.py +78 -59
- mindspore/dataset/engine/datasets_vision.py +77 -73
- mindspore/dataset/engine/offload.py +5 -7
- mindspore/dataset/engine/queue.py +56 -38
- mindspore/dataset/engine/validators.py +11 -5
- mindspore/dataset/text/__init__.py +3 -3
- mindspore/dataset/text/transforms.py +408 -121
- mindspore/dataset/text/utils.py +9 -9
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/transforms.py +261 -76
- mindspore/dataset/utils/browse_dataset.py +9 -9
- mindspore/dataset/vision/__init__.py +8 -8
- mindspore/dataset/vision/c_transforms.py +10 -10
- mindspore/dataset/vision/py_transforms_util.py +3 -3
- mindspore/dataset/vision/transforms.py +2844 -549
- mindspore/dataset/vision/utils.py +161 -10
- mindspore/dataset/vision/validators.py +14 -2
- mindspore/dnnl.dll +0 -0
- mindspore/experimental/optim/__init__.py +12 -2
- mindspore/experimental/optim/adadelta.py +161 -0
- mindspore/experimental/optim/adagrad.py +168 -0
- mindspore/experimental/optim/adam.py +35 -34
- mindspore/experimental/optim/adamax.py +170 -0
- mindspore/experimental/optim/adamw.py +40 -16
- mindspore/experimental/optim/asgd.py +153 -0
- mindspore/experimental/optim/lr_scheduler.py +71 -127
- mindspore/experimental/optim/nadam.py +157 -0
- mindspore/experimental/optim/optimizer.py +15 -8
- mindspore/experimental/optim/radam.py +194 -0
- mindspore/experimental/optim/rmsprop.py +154 -0
- mindspore/experimental/optim/rprop.py +164 -0
- mindspore/experimental/optim/sgd.py +28 -19
- mindspore/hal/__init__.py +40 -0
- mindspore/hal/_ascend.py +57 -0
- mindspore/hal/_base.py +57 -0
- mindspore/hal/_cpu.py +56 -0
- mindspore/hal/_gpu.py +57 -0
- mindspore/hal/device.py +356 -0
- mindspore/hal/event.py +179 -0
- mindspore/hal/memory.py +326 -0
- mindspore/hal/stream.py +339 -0
- mindspore/include/api/data_type.h +2 -2
- mindspore/include/api/dual_abi_helper.h +16 -3
- mindspore/include/api/model.h +4 -3
- mindspore/include/api/status.h +14 -0
- mindspore/include/c_api/model_c.h +173 -0
- mindspore/include/c_api/ms/base/types.h +1 -0
- mindspore/include/c_api/types_c.h +19 -0
- mindspore/include/dataset/execute.h +1 -3
- mindspore/include/dataset/vision.h +54 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +2 -2
- mindspore/mindrecord/__init__.py +5 -1
- mindspore/mindrecord/config.py +809 -0
- mindspore/mindrecord/filereader.py +25 -0
- mindspore/mindrecord/filewriter.py +76 -58
- mindspore/mindrecord/mindpage.py +40 -6
- mindspore/mindrecord/shardutils.py +3 -2
- mindspore/mindrecord/shardwriter.py +7 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +53 -66
- mindspore/mindrecord/tools/cifar10_to_mr.py +48 -63
- mindspore/mindrecord/tools/csv_to_mr.py +7 -17
- mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
- mindspore/mindrecord/tools/mnist_to_mr.py +11 -21
- mindspore/mindrecord/tools/tfrecord_to_mr.py +2 -10
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/mint/__init__.py +1137 -0
- mindspore/{rewrite/ast_transformers → mint/linalg}/__init__.py +9 -4
- mindspore/mint/nn/__init__.py +512 -0
- mindspore/mint/nn/functional.py +573 -0
- mindspore/mint/optim/__init__.py +24 -0
- mindspore/mint/optim/adamw.py +185 -0
- mindspore/multiprocessing/__init__.py +72 -0
- mindspore/nn/__init__.py +1 -0
- mindspore/nn/cell.py +213 -257
- mindspore/nn/dynamic_lr.py +2 -2
- mindspore/nn/extend/__init__.py +29 -0
- mindspore/nn/extend/basic.py +140 -0
- mindspore/nn/extend/embedding.py +143 -0
- mindspore/{rewrite/ast_creator_register.py → nn/extend/layer/__init__.py} +9 -19
- mindspore/nn/extend/layer/normalization.py +109 -0
- mindspore/nn/extend/pooling.py +117 -0
- mindspore/nn/layer/activation.py +84 -94
- mindspore/nn/layer/basic.py +177 -82
- mindspore/nn/layer/channel_shuffle.py +3 -16
- mindspore/nn/layer/container.py +3 -3
- mindspore/nn/layer/conv.py +75 -66
- mindspore/nn/layer/embedding.py +103 -45
- mindspore/nn/layer/embedding_service.py +531 -0
- mindspore/nn/layer/embedding_service_layer.py +393 -0
- mindspore/nn/layer/image.py +4 -7
- mindspore/nn/layer/math.py +1 -1
- mindspore/nn/layer/normalization.py +52 -66
- mindspore/nn/layer/padding.py +30 -39
- mindspore/nn/layer/pooling.py +18 -9
- mindspore/nn/layer/rnn_cells.py +6 -16
- mindspore/nn/layer/rnns.py +6 -5
- mindspore/nn/layer/thor_layer.py +1 -2
- mindspore/nn/layer/timedistributed.py +1 -1
- mindspore/nn/layer/transformer.py +52 -50
- mindspore/nn/learning_rate_schedule.py +6 -5
- mindspore/nn/loss/loss.py +63 -84
- mindspore/nn/optim/ada_grad.py +6 -4
- mindspore/nn/optim/adadelta.py +3 -1
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +102 -181
- mindspore/nn/optim/adamax.py +4 -2
- mindspore/nn/optim/adasum.py +3 -3
- mindspore/nn/optim/asgd.py +4 -2
- mindspore/nn/optim/ftrl.py +31 -61
- mindspore/nn/optim/lamb.py +5 -3
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +6 -4
- mindspore/nn/optim/momentum.py +13 -25
- mindspore/nn/optim/optimizer.py +6 -3
- mindspore/nn/optim/proximal_ada_grad.py +4 -2
- mindspore/nn/optim/rmsprop.py +9 -3
- mindspore/nn/optim/rprop.py +4 -2
- mindspore/nn/optim/sgd.py +7 -4
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
- mindspore/nn/probability/distribution/beta.py +2 -2
- mindspore/nn/probability/distribution/categorical.py +4 -6
- mindspore/nn/probability/distribution/cauchy.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -2
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/logistic.py +1 -1
- mindspore/nn/probability/distribution/poisson.py +2 -2
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +13 -1
- mindspore/nn/wrap/__init__.py +2 -1
- mindspore/nn/wrap/cell_wrapper.py +58 -13
- mindspore/nn/wrap/grad_reducer.py +148 -8
- mindspore/nn/wrap/loss_scale.py +32 -9
- mindspore/numpy/__init__.py +2 -0
- mindspore/numpy/array_creations.py +2 -0
- mindspore/numpy/array_ops.py +6 -6
- mindspore/numpy/dtypes.py +3 -3
- mindspore/numpy/fft.py +431 -0
- mindspore/numpy/math_ops.py +61 -67
- mindspore/numpy/utils.py +3 -0
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +8 -4
- mindspore/ops/_grad_experimental/grad_array_ops.py +4 -160
- mindspore/ops/_grad_experimental/grad_comm_ops.py +93 -36
- mindspore/ops/_grad_experimental/grad_inner_ops.py +8 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +92 -287
- mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
- mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
- mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
- mindspore/ops/_op_impl/__init__.py +0 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +1 -0
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
- mindspore/ops/_op_impl/{cpu/concat.py → aicpu/generate_eod_mask.py} +16 -17
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
- mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -3
- mindspore/ops/_op_impl/cpu/adam.py +2 -2
- mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
- mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
- mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
- mindspore/ops/_vmap/vmap_array_ops.py +164 -101
- mindspore/ops/_vmap/vmap_base.py +8 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +143 -58
- mindspore/ops/_vmap/vmap_image_ops.py +70 -13
- mindspore/ops/_vmap/vmap_math_ops.py +130 -58
- mindspore/ops/_vmap/vmap_nn_ops.py +249 -115
- mindspore/ops/_vmap/vmap_other_ops.py +1 -1
- mindspore/ops/auto_generate/__init__.py +31 -0
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +231 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +250 -0
- mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
- mindspore/ops/auto_generate/gen_extend_func.py +980 -0
- mindspore/ops/auto_generate/gen_ops_def.py +6443 -0
- mindspore/ops/auto_generate/gen_ops_prim.py +13167 -0
- mindspore/ops/auto_generate/pyboost_inner_prim.py +429 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +121 -23
- mindspore/ops/composite/math_ops.py +10 -49
- mindspore/ops/composite/multitype_ops/_compile_utils.py +191 -618
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +25 -134
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
- mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
- mindspore/ops/deprecated.py +14 -3
- mindspore/ops/extend/__init__.py +53 -0
- mindspore/ops/extend/array_func.py +218 -0
- mindspore/ops/extend/math_func.py +76 -0
- mindspore/ops/extend/nn_func.py +308 -0
- mindspore/ops/function/__init__.py +31 -11
- mindspore/ops/function/array_func.py +848 -1736
- mindspore/ops/function/clip_func.py +19 -31
- mindspore/ops/function/debug_func.py +2 -5
- mindspore/ops/function/fft_func.py +31 -0
- mindspore/ops/function/grad/grad_func.py +27 -20
- mindspore/ops/function/image_func.py +27 -21
- mindspore/ops/function/linalg_func.py +30 -53
- mindspore/ops/function/math_func.py +916 -2791
- mindspore/ops/function/nn_func.py +1445 -889
- mindspore/ops/function/other_func.py +6 -7
- mindspore/ops/function/parameter_func.py +6 -92
- mindspore/ops/function/random_func.py +254 -108
- mindspore/ops/function/reshard_func.py +102 -0
- mindspore/ops/function/sparse_func.py +4 -4
- mindspore/ops/function/sparse_unary_func.py +11 -18
- mindspore/ops/function/spectral_func.py +1 -1
- mindspore/ops/function/vmap_func.py +15 -14
- mindspore/ops/functional.py +342 -343
- mindspore/ops/op_info_register.py +16 -43
- mindspore/ops/operations/__init__.py +32 -23
- mindspore/ops/operations/_embedding_cache_ops.py +1 -1
- mindspore/ops/operations/_grad_ops.py +21 -853
- mindspore/ops/operations/_infer_ops.py +19 -0
- mindspore/ops/operations/_inner_ops.py +155 -511
- mindspore/ops/operations/_quant_ops.py +4 -4
- mindspore/ops/operations/_rl_inner_ops.py +3 -3
- mindspore/ops/operations/_scalar_ops.py +5 -480
- mindspore/ops/operations/_sequence_ops.py +6 -36
- mindspore/ops/operations/_tensor_array.py +8 -8
- mindspore/ops/operations/array_ops.py +112 -2698
- mindspore/ops/operations/comm_ops.py +801 -118
- mindspore/ops/operations/custom_ops.py +62 -121
- mindspore/ops/operations/debug_ops.py +105 -36
- mindspore/ops/operations/image_ops.py +3 -219
- mindspore/ops/operations/inner_ops.py +54 -40
- mindspore/ops/operations/linalg_ops.py +1 -49
- mindspore/ops/operations/manually_defined/__init__.py +24 -0
- mindspore/ops/operations/manually_defined/_inner.py +61 -0
- mindspore/ops/operations/manually_defined/ops_def.py +2016 -0
- mindspore/ops/operations/math_ops.py +621 -4654
- mindspore/ops/operations/nn_ops.py +316 -2226
- mindspore/ops/operations/other_ops.py +53 -45
- mindspore/ops/operations/random_ops.py +4 -51
- mindspore/ops/operations/reshard_ops.py +53 -0
- mindspore/ops/operations/sparse_ops.py +8 -8
- mindspore/ops/primitive.py +204 -103
- mindspore/ops/silent_check.py +162 -0
- mindspore/ops_generate/__init__.py +27 -0
- mindspore/ops_generate/arg_dtype_cast.py +250 -0
- mindspore/ops_generate/arg_handler.py +197 -0
- mindspore/ops_generate/gen_aclnn_implement.py +263 -0
- mindspore/ops_generate/gen_ops.py +1084 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
- mindspore/ops_generate/gen_pyboost_func.py +968 -0
- mindspore/ops_generate/gen_utils.py +209 -0
- mindspore/ops_generate/op_proto.py +138 -0
- mindspore/ops_generate/pyboost_utils.py +354 -0
- mindspore/ops_generate/template.py +239 -0
- mindspore/parallel/__init__.py +7 -4
- mindspore/parallel/_auto_parallel_context.py +155 -6
- mindspore/parallel/_cell_wrapper.py +16 -9
- mindspore/parallel/_cost_model_context.py +1 -1
- mindspore/parallel/_dp_allreduce_fusion.py +159 -159
- mindspore/parallel/_parallel_serialization.py +62 -14
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +18 -9
- mindspore/parallel/_transformer/__init__.py +1 -1
- mindspore/parallel/_transformer/layers.py +1 -1
- mindspore/parallel/_transformer/loss.py +1 -1
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/op_parallel_config.py +1 -1
- mindspore/parallel/_transformer/transformer.py +10 -10
- mindspore/parallel/_utils.py +161 -6
- mindspore/parallel/algo_parameter_config.py +6 -8
- mindspore/parallel/checkpoint_transform.py +369 -64
- mindspore/parallel/cluster/__init__.py +15 -0
- mindspore/parallel/cluster/process_entity/__init__.py +18 -0
- mindspore/parallel/cluster/process_entity/_api.py +344 -0
- mindspore/parallel/cluster/process_entity/_utils.py +126 -0
- mindspore/parallel/cluster/run.py +136 -0
- mindspore/parallel/mpi/__init__.py +1 -1
- mindspore/parallel/mpi/_mpi_config.py +1 -1
- mindspore/parallel/parameter_broadcast.py +152 -0
- mindspore/parallel/shard.py +128 -17
- mindspore/profiler/__init__.py +3 -2
- mindspore/profiler/common/process_pool.py +41 -0
- mindspore/profiler/common/singleton.py +28 -0
- mindspore/profiler/common/util.py +125 -0
- mindspore/profiler/envprofiling.py +2 -2
- mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +53 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +159 -0
- mindspore/profiler/parser/ascend_analysis/function_event.py +161 -0
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +131 -0
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +85 -0
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +57 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +116 -0
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +68 -0
- mindspore/profiler/parser/ascend_cluster_generator.py +116 -0
- mindspore/profiler/parser/ascend_communicate_generator.py +314 -0
- mindspore/profiler/parser/ascend_flops_generator.py +27 -5
- mindspore/profiler/parser/ascend_fpbp_generator.py +8 -2
- mindspore/profiler/parser/ascend_hccl_generator.py +31 -280
- mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
- mindspore/profiler/parser/ascend_memory_generator.py +185 -0
- mindspore/profiler/parser/ascend_msprof_exporter.py +151 -126
- mindspore/profiler/parser/ascend_msprof_generator.py +75 -274
- mindspore/profiler/parser/ascend_op_generator.py +94 -36
- mindspore/profiler/parser/ascend_timeline_generator.py +297 -131
- mindspore/profiler/parser/base_timeline_generator.py +17 -3
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +2 -1
- mindspore/profiler/parser/framework_parser.py +11 -4
- mindspore/profiler/parser/integrator.py +3 -1
- mindspore/profiler/parser/memory_usage_parser.py +8 -2
- mindspore/profiler/parser/minddata_analyzer.py +8 -2
- mindspore/profiler/parser/minddata_parser.py +73 -4
- mindspore/profiler/parser/msadvisor_analyzer.py +5 -3
- mindspore/profiler/parser/msadvisor_parser.py +10 -4
- mindspore/profiler/parser/profiler_info.py +16 -1
- mindspore/profiler/profiling.py +522 -195
- mindspore/rewrite/__init__.py +2 -13
- mindspore/rewrite/api/node.py +123 -37
- mindspore/rewrite/api/pattern_engine.py +2 -3
- mindspore/rewrite/api/scoped_value.py +16 -15
- mindspore/rewrite/api/symbol_tree.py +46 -30
- mindspore/rewrite/ast_helpers/__init__.py +3 -6
- mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
- mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
- mindspore/rewrite/common/__init__.py +1 -2
- mindspore/rewrite/common/config.py +24 -0
- mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
- mindspore/rewrite/{namer.py → common/namer.py} +63 -18
- mindspore/rewrite/common/namespace.py +118 -0
- mindspore/rewrite/node/__init__.py +5 -5
- mindspore/rewrite/node/call_function.py +23 -7
- mindspore/rewrite/node/cell_container.py +7 -3
- mindspore/rewrite/node/control_flow.py +53 -28
- mindspore/rewrite/node/node.py +212 -196
- mindspore/rewrite/node/node_manager.py +51 -22
- mindspore/rewrite/node/node_topological_manager.py +3 -23
- mindspore/rewrite/parsers/__init__.py +12 -0
- mindspore/rewrite/parsers/arguments_parser.py +8 -9
- mindspore/rewrite/parsers/assign_parser.py +637 -413
- mindspore/rewrite/parsers/attribute_parser.py +3 -4
- mindspore/rewrite/parsers/class_def_parser.py +115 -148
- mindspore/rewrite/parsers/constant_parser.py +5 -5
- mindspore/rewrite/parsers/container_parser.py +4 -6
- mindspore/rewrite/parsers/expr_parser.py +55 -0
- mindspore/rewrite/parsers/for_parser.py +31 -98
- mindspore/rewrite/parsers/function_def_parser.py +13 -5
- mindspore/rewrite/parsers/if_parser.py +28 -10
- mindspore/rewrite/parsers/module_parser.py +8 -182
- mindspore/rewrite/parsers/parser.py +1 -5
- mindspore/rewrite/parsers/parser_register.py +1 -1
- mindspore/rewrite/parsers/return_parser.py +5 -10
- mindspore/rewrite/parsers/while_parser.py +59 -0
- mindspore/rewrite/sparsify/utils.py +1 -1
- mindspore/rewrite/symbol_tree/__init__.py +20 -0
- mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
- mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
- mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
- mindspore/run_check/_check_version.py +6 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/safeguard/rewrite_obfuscation.py +9 -19
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +6 -5
- mindspore/train/_utils.py +178 -4
- mindspore/train/amp.py +167 -245
- mindspore/train/anf_ir_pb2.py +14 -2
- mindspore/train/callback/__init__.py +5 -2
- mindspore/train/callback/_backup_and_restore.py +5 -5
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +151 -37
- mindspore/train/callback/_cluster_monitor.py +201 -0
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_flops_collector.py +238 -0
- mindspore/train/callback/_landscape.py +16 -11
- mindspore/train/callback/_loss_monitor.py +2 -2
- mindspore/train/callback/_mindio_ttp.py +443 -0
- mindspore/train/callback/_on_request_exit.py +2 -2
- mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
- mindspore/train/callback/_summary_collector.py +13 -14
- mindspore/train/callback/_time_monitor.py +3 -3
- mindspore/train/data_sink.py +6 -5
- mindspore/train/dataset_helper.py +66 -21
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/accuracy.py +7 -7
- mindspore/train/metrics/confusion_matrix.py +8 -6
- mindspore/train/metrics/cosine_similarity.py +6 -4
- mindspore/train/metrics/error.py +2 -2
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/metrics/perplexity.py +2 -1
- mindspore/train/metrics/topk.py +2 -2
- mindspore/train/mind_ir_pb2.py +89 -15
- mindspore/train/model.py +298 -56
- mindspore/train/serialization.py +501 -221
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/_writer_pool.py +1 -1
- mindspore/train/summary/summary_record.py +56 -34
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/turbojpeg.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.2.11.dist-info → mindspore-2.3.0.dist-info}/METADATA +3 -3
- mindspore-2.3.0.dist-info/RECORD +1400 -0
- {mindspore-2.2.11.dist-info → mindspore-2.3.0.dist-info}/entry_points.txt +1 -0
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
- mindspore/gen_ops.py +0 -273
- mindspore/nn/layer/flash_attention.py +0 -189
- mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
- mindspore/ops/_op_impl/tbe/__init__.py +0 -47
- mindspore/ops/_op_impl/tbe/abs.py +0 -38
- mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/acos.py +0 -37
- mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/acosh.py +0 -37
- mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
- mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
- mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
- mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
- mindspore/ops/_op_impl/tbe/add.py +0 -42
- mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/add_n.py +0 -39
- mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
- mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
- mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
- mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
- mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
- mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
- mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
- mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/asin.py +0 -37
- mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/asinh.py +0 -37
- mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/assign.py +0 -79
- mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
- mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
- mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/atan.py +0 -37
- mindspore/ops/_op_impl/tbe/atan2.py +0 -38
- mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/atanh.py +0 -37
- mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
- mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
- mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
- mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
- mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
- mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
- mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
- mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
- mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
- mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
- mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cast.py +0 -55
- mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/cdist.py +0 -38
- mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/ceil.py +0 -37
- mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/celu.py +0 -39
- mindspore/ops/_op_impl/tbe/centralization.py +0 -39
- mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
- mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/concat.py +0 -40
- mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
- mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
- mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
- mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
- mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
- mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/cos.py +0 -37
- mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/cosh.py +0 -37
- mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
- mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cummin.py +0 -41
- mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
- mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
- mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
- mindspore/ops/_op_impl/tbe/diag.py +0 -38
- mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
- mindspore/ops/_op_impl/tbe/dilation.py +0 -40
- mindspore/ops/_op_impl/tbe/div.py +0 -41
- mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
- mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
- mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
- mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
- mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
- mindspore/ops/_op_impl/tbe/elu.py +0 -38
- mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/equal.py +0 -42
- mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/erf.py +0 -37
- mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfc.py +0 -37
- mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
- mindspore/ops/_op_impl/tbe/exp.py +0 -40
- mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
- mindspore/ops/_op_impl/tbe/expm1.py +0 -37
- mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
- mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/fill.py +0 -56
- mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/flatten.py +0 -48
- mindspore/ops/_op_impl/tbe/floor.py +0 -37
- mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
- mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
- mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
- mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
- mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
- mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
- mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
- mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/ger.py +0 -43
- mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/greater.py +0 -43
- mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
- mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
- mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
- mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
- mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
- mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
- mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
- mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/im2col.py +0 -42
- mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
- mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
- mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/inv.py +0 -38
- mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/invert.py +0 -37
- mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/iou.py +0 -38
- mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/is_close.py +0 -40
- mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
- mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
- mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
- mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
- mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
- mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
- mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/lerp.py +0 -38
- mindspore/ops/_op_impl/tbe/less.py +0 -41
- mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/log.py +0 -40
- mindspore/ops/_op_impl/tbe/log1p.py +0 -37
- mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
- mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
- mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
- mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
- mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/matmul.py +0 -53
- mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
- mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
- mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
- mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum.py +0 -39
- mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
- mindspore/ops/_op_impl/tbe/minimum.py +0 -40
- mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mish.py +0 -37
- mindspore/ops/_op_impl/tbe/mod.py +0 -41
- mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/mul.py +0 -37
- mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
- mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
- mindspore/ops/_op_impl/tbe/neg.py +0 -39
- mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
- mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
- mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
- mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
- mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
- mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/pack.py +0 -58
- mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
- mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
- mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/pdist.py +0 -36
- mindspore/ops/_op_impl/tbe/pooling.py +0 -46
- mindspore/ops/_op_impl/tbe/population_count.py +0 -38
- mindspore/ops/_op_impl/tbe/pow.py +0 -41
- mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/prelu.py +0 -37
- mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/range.py +0 -39
- mindspore/ops/_op_impl/tbe/real_div.py +0 -38
- mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
- mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
- mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
- mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
- mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6.py +0 -38
- mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/renorm.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
- mindspore/ops/_op_impl/tbe/rint.py +0 -37
- mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roll.py +0 -42
- mindspore/ops/_op_impl/tbe/round.py +0 -38
- mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
- mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
- mindspore/ops/_op_impl/tbe/select.py +0 -38
- mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/selu.py +0 -39
- mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sgd.py +0 -62
- mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sign.py +0 -38
- mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/sin.py +0 -37
- mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sinh.py +0 -37
- mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/slice.py +0 -58
- mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
- mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax.py +0 -37
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
- mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/softplus.py +0 -37
- mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softsign.py +0 -37
- mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sort.py +0 -38
- mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/split_d.py +0 -38
- mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/split_v.py +0 -39
- mindspore/ops/_op_impl/tbe/splitv.py +0 -39
- mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/square.py +0 -38
- mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
- mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
- mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
- mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
- mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
- mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
- mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
- mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
- mindspore/ops/_op_impl/tbe/sub.py +0 -39
- mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tan.py +0 -38
- mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh.py +0 -37
- mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
- mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
- mindspore/ops/_op_impl/tbe/tile.py +0 -37
- mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
- mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
- mindspore/ops/_op_impl/tbe/transpose.py +0 -60
- mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
- mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
- mindspore/ops/_op_impl/tbe/trunc.py +0 -39
- mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/unpack.py +0 -38
- mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
- mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
- mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
- mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
- mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
- mindspore/ops/_tracefunc.py +0 -241
- mindspore/ops/arg_dtype_cast.py +0 -54
- mindspore/rewrite/api/tree_node_helper.py +0 -60
- mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
- mindspore/rewrite/namespace.py +0 -53
- mindspore-2.2.11.dist-info/RECORD +0 -1920
- {mindspore-2.2.11.dist-info → mindspore-2.3.0.dist-info}/WHEEL +0 -0
- {mindspore-2.2.11.dist-info → mindspore-2.3.0.dist-info}/top_level.txt +0 -0
mindspore/common/tensor.py
CHANGED
|
@@ -27,13 +27,15 @@ from mindspore.common.seed import get_seed
|
|
|
27
27
|
from mindspore import context
|
|
28
28
|
from mindspore import log as logger
|
|
29
29
|
from mindspore.common import dtype as mstype
|
|
30
|
+
from mindspore.common.hook_handle import _TensorHookHandle
|
|
30
31
|
|
|
31
32
|
from mindspore.common._utils import get_slice_num
|
|
32
33
|
from mindspore.common._register_for_tensor import tensor_operator_registry
|
|
33
34
|
from mindspore._c_expression import Tensor as Tensor_
|
|
34
35
|
from mindspore import _checkparam as validator
|
|
35
|
-
from mindspore._checkparam import check_is_number, is_stub_tensor
|
|
36
|
+
from mindspore._checkparam import check_is_number, is_stub_tensor, check_hook_fn
|
|
36
37
|
from mindspore._check_jit_forbidden_api import jit_forbidden_register
|
|
38
|
+
from mindspore.common.symbol import Symbol
|
|
37
39
|
|
|
38
40
|
np_types = (np.int8, np.int16, np.int32, np.int64,
|
|
39
41
|
np.uint8, np.uint16, np.uint32, np.uint64, np.float16,
|
|
@@ -48,7 +50,8 @@ def _check_input_data_type(input_data):
|
|
|
48
50
|
valid_dtypes = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64,
|
|
49
51
|
np.float16, np.float32, np.float64, np.bool_, np.str_, np.complex64, np.complex128)
|
|
50
52
|
if isinstance(input_data, np.ndarray) and input_data.dtype not in valid_dtypes and \
|
|
51
|
-
input_data.dtype.kind != 'U' and input_data.dtype.kind != 'S'
|
|
53
|
+
input_data.dtype.kind != 'U' and input_data.dtype.kind != 'S' and \
|
|
54
|
+
input_data.dtype.kind != 'T': # Support dtype np.str_ and npy_bfloat16
|
|
52
55
|
new_line = '\n'
|
|
53
56
|
for index, x in np.ndenumerate(input_data):
|
|
54
57
|
if np.array(x).dtype not in valid_dtypes:
|
|
@@ -82,11 +85,11 @@ def tensor(input_data=None, dtype=None, shape=None, init=None, internal=False, c
|
|
|
82
85
|
based on the `dtype` argument.
|
|
83
86
|
|
|
84
87
|
Please refer to `Creating and Using Tensor
|
|
85
|
-
<https://www.mindspore.cn/docs/en/
|
|
88
|
+
<https://www.mindspore.cn/docs/en/master/note/static_graph_syntax_support.html#mindspore-user-defined-data-types>`_ .
|
|
86
89
|
|
|
87
90
|
The difference between it and the Tensor class is that it adds
|
|
88
91
|
`Annotation
|
|
89
|
-
<https://www.mindspore.cn/docs/en/
|
|
92
|
+
<https://www.mindspore.cn/docs/en/master/design/dynamic_graph_and_static_graph.html?#annotation-type>`_
|
|
90
93
|
which can prevent the generation of AnyType compared to the Tensor class.
|
|
91
94
|
|
|
92
95
|
The arguments and return values are the same as the Tensor class. Also see: :class:`mindspore.Tensor`.
|
|
@@ -114,22 +117,25 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
114
117
|
Tensor is a data structure that stores an n-dimensional array.
|
|
115
118
|
|
|
116
119
|
Note:
|
|
117
|
-
If
|
|
120
|
+
If `init` interface is used to initialize `Tensor`, the `Tensor.init_data` API needs to be called to load the
|
|
118
121
|
actual data to `Tensor`.
|
|
119
122
|
|
|
123
|
+
Warning:
|
|
124
|
+
To convert dtype of a `Tensor`, it is recommended to use `Tensor.astype()` rather than
|
|
125
|
+
`Tensor(sourceTensor, dtype=newDtype)`.
|
|
126
|
+
|
|
120
127
|
Args:
|
|
121
128
|
input_data (Union[Tensor, float, int, bool, tuple, list, numpy.ndarray]): The data to be stored. It can be
|
|
122
129
|
another Tensor, Python number or NumPy ndarray. Default: ``None`` .
|
|
123
130
|
dtype (:class:`mindspore.dtype`): Used to indicate the data type of the output Tensor. The argument should
|
|
124
131
|
be defined in `mindspore.dtype`. If it is ``None`` , the data type of the output Tensor will be the same
|
|
125
132
|
as the `input_data`. Default: ``None`` .
|
|
126
|
-
shape (Union[tuple, list, int]): Used to indicate the shape of the output Tensor.
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
must be set. Default: ``None`` .
|
|
133
|
+
shape (Union[tuple, list, int, :class:`mindspore.Symbol`]): Used to indicate the shape of the output Tensor.
|
|
134
|
+
If `input_data` is available, `shape` doesn't need to be set. If ``None`` or `Symbol` exists in `shape` ,
|
|
135
|
+
a tensor of dynamic shape is created, `input_data` doesn't need to be set; if only integers exist in
|
|
136
|
+
`shape`, a tensor of static shape is created, `input_data` or `init` must be set. Default: ``None`` .
|
|
131
137
|
init (Initializer): The information of init data.
|
|
132
|
-
|
|
138
|
+
`init` is used for delayed initialization in parallel mode, when using init, `dtype` and `shape` must be
|
|
133
139
|
set. Default: ``None`` .
|
|
134
140
|
internal (bool): Whether it is created by the framework.
|
|
135
141
|
``'True'`` means that the tensor is created by framework.
|
|
@@ -142,9 +148,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
142
148
|
Tensor.
|
|
143
149
|
|
|
144
150
|
Note:
|
|
145
|
-
The default value None of `input_data` works as a placeholder,
|
|
151
|
+
The default value ``None`` of `input_data` works as a placeholder,
|
|
152
|
+
it does not mean that we can create a NoneType
|
|
146
153
|
Tensor.
|
|
147
|
-
Tensor with shape contains 0 is not fully tested and supported.
|
|
154
|
+
Tensor with `shape` contains 0 is not fully tested and supported.
|
|
148
155
|
|
|
149
156
|
Examples:
|
|
150
157
|
>>> import numpy as np
|
|
@@ -200,6 +207,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
200
207
|
|
|
201
208
|
def __init__(self, input_data=None, dtype=None, shape=None, init=None, internal=False, const_arg=False):
|
|
202
209
|
self.init_finished = False
|
|
210
|
+
if isinstance(input_data, (Tensor, Tensor_)) and dtype is not None:
|
|
211
|
+
logger.info("It is suggested to use 'Tensor.astype()' to convert the dtype of a Tensor.")
|
|
212
|
+
_cast = tensor_operator_registry.get("cast")
|
|
213
|
+
input_data = _cast(input_data, dtype)
|
|
214
|
+
|
|
203
215
|
if is_stub_tensor(input_data):
|
|
204
216
|
input_data = input_data.stub_sync()
|
|
205
217
|
|
|
@@ -218,8 +230,16 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
218
230
|
if isinstance(input_data, np_types):
|
|
219
231
|
input_data = np.array(input_data)
|
|
220
232
|
|
|
221
|
-
if
|
|
222
|
-
|
|
233
|
+
if shape is not None:
|
|
234
|
+
if isinstance(shape, numbers.Number):
|
|
235
|
+
shape = (shape,)
|
|
236
|
+
elif isinstance(shape, Symbol):
|
|
237
|
+
self.symbolic_shape = [shape]
|
|
238
|
+
shape = (None,)
|
|
239
|
+
elif isinstance(shape, (list, tuple)) and any(isinstance(s, Symbol) for s in shape):
|
|
240
|
+
self.symbolic_shape = [item.to_dict() if isinstance(item, Symbol) else item for item in shape]
|
|
241
|
+
shape_without_symbol = (None if isinstance(item, Symbol) else item for item in shape)
|
|
242
|
+
shape = list(shape_without_symbol) if isinstance(shape, list) else tuple(shape_without_symbol)
|
|
223
243
|
|
|
224
244
|
_check_tensor_input(input_data, dtype, shape, init)
|
|
225
245
|
|
|
@@ -258,6 +278,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
258
278
|
self.slice_num_of_persistent_data_ = None
|
|
259
279
|
self.slice_shape_of_persistent_data_ = None
|
|
260
280
|
|
|
281
|
+
# the auto gradient information
|
|
282
|
+
self._grad = None
|
|
283
|
+
self._grad_fn = None
|
|
284
|
+
self._requires_grad = False
|
|
285
|
+
self._retain_grad = False
|
|
286
|
+
|
|
261
287
|
@classmethod
|
|
262
288
|
def __subclasshook__(cls, sub):
|
|
263
289
|
"""
|
|
@@ -295,19 +321,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
295
321
|
def __eq__(self, other):
|
|
296
322
|
if not isinstance(other, (int, float, Tensor)):
|
|
297
323
|
return False
|
|
298
|
-
# bool type is not supported for `Equal` operator in backend.
|
|
299
|
-
if self.dtype == mstype.bool_ or (isinstance(other, Tensor) and other.dtype == mstype.bool_):
|
|
300
|
-
if isinstance(other, Tensor):
|
|
301
|
-
return Tensor(np.array(self.asnumpy() == other.asnumpy()))
|
|
302
|
-
return Tensor(np.array(self.asnumpy() == other))
|
|
303
324
|
return tensor_operator_registry.get('__eq__')(self, other)
|
|
304
325
|
|
|
305
326
|
def __ne__(self, other):
|
|
306
327
|
if not isinstance(other, (int, float, Tensor)):
|
|
307
328
|
return True
|
|
308
|
-
# bool type is not supported for `NotEqual` operator in backend.
|
|
309
|
-
if self.dtype == mstype.bool_ or (isinstance(other, Tensor) and other.dtype == mstype.bool_):
|
|
310
|
-
return Tensor(np.array(self.asnumpy() != other.asnumpy()))
|
|
311
329
|
return tensor_operator_registry.get('__ne__')(self, other)
|
|
312
330
|
|
|
313
331
|
def __hash__(self):
|
|
@@ -322,7 +340,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
322
340
|
return out
|
|
323
341
|
|
|
324
342
|
def __round__(self):
|
|
325
|
-
out = tensor_operator_registry.get('round')(
|
|
343
|
+
out = tensor_operator_registry.get('round')(self)
|
|
326
344
|
return out
|
|
327
345
|
|
|
328
346
|
def __bool__(self):
|
|
@@ -360,7 +378,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
360
378
|
return self
|
|
361
379
|
|
|
362
380
|
def __abs__(self):
|
|
363
|
-
self._init_check()
|
|
364
381
|
return tensor_operator_registry.get('abs')(self)
|
|
365
382
|
|
|
366
383
|
def __add__(self, other):
|
|
@@ -488,8 +505,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
488
505
|
def __str__(self):
|
|
489
506
|
if self.dtype == mstype.type_none:
|
|
490
507
|
return "Unknown Tensor type!"
|
|
491
|
-
if self.dtype == mstype.bfloat16:
|
|
492
|
-
return str(self.float().asnumpy())
|
|
493
508
|
return str(self.asnumpy())
|
|
494
509
|
|
|
495
510
|
def __getstate__(self):
|
|
@@ -509,6 +524,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
509
524
|
"""
|
|
510
525
|
return self._shape
|
|
511
526
|
|
|
527
|
+
@shape.setter
|
|
528
|
+
def shape(self, shape_value):
|
|
529
|
+
r"""
|
|
530
|
+
Set the shape value.
|
|
531
|
+
"""
|
|
532
|
+
self._shape = shape_value
|
|
533
|
+
|
|
512
534
|
@property
|
|
513
535
|
def dtype(self):
|
|
514
536
|
"""Return the dtype of the tensor (:class:`mindspore.dtype`)."""
|
|
@@ -544,6 +566,83 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
544
566
|
"""
|
|
545
567
|
return len(self._shape)
|
|
546
568
|
|
|
569
|
+
@property
|
|
570
|
+
def grad(self):
|
|
571
|
+
r"""
|
|
572
|
+
Get the gradient value.
|
|
573
|
+
"""
|
|
574
|
+
return self._grad
|
|
575
|
+
|
|
576
|
+
@grad.setter
|
|
577
|
+
def grad(self, grad):
|
|
578
|
+
r"""
|
|
579
|
+
Set the gradient value.
|
|
580
|
+
"""
|
|
581
|
+
self._grad = grad
|
|
582
|
+
|
|
583
|
+
@property
|
|
584
|
+
def grad_fn(self):
|
|
585
|
+
r"""
|
|
586
|
+
The function for backward.
|
|
587
|
+
"""
|
|
588
|
+
return self._grad_fn
|
|
589
|
+
|
|
590
|
+
@grad_fn.setter
|
|
591
|
+
def grad_fn(self, grad_fn):
|
|
592
|
+
r"""
|
|
593
|
+
Set the function for backward.
|
|
594
|
+
"""
|
|
595
|
+
self._grad_fn = grad_fn
|
|
596
|
+
|
|
597
|
+
@property
|
|
598
|
+
def is_leaf(self):
|
|
599
|
+
r"""
|
|
600
|
+
Whether the stub tensor is leaf.
|
|
601
|
+
They will be a leaf if they have requires_grad and requires_grad is False,
|
|
602
|
+
Or they were created by user.
|
|
603
|
+
"""
|
|
604
|
+
return self._requires_grad is False or self._grad_fn is None
|
|
605
|
+
|
|
606
|
+
@property
|
|
607
|
+
def requires_grad(self):
|
|
608
|
+
r"""
|
|
609
|
+
Whether the stub tensor need requires grad.
|
|
610
|
+
"""
|
|
611
|
+
return self._requires_grad
|
|
612
|
+
|
|
613
|
+
@requires_grad.setter
|
|
614
|
+
def requires_grad(self, requires_grad):
|
|
615
|
+
r"""
|
|
616
|
+
Mark the stub tensor whether need requires gradient.
|
|
617
|
+
"""
|
|
618
|
+
self._requires_grad = requires_grad
|
|
619
|
+
|
|
620
|
+
def retain_grad(self):
|
|
621
|
+
r"""
|
|
622
|
+
Enable the stub tensor which is not non-leaf to have the grad during backward().
|
|
623
|
+
"""
|
|
624
|
+
if not self._requires_grad:
|
|
625
|
+
RuntimeError("can't retain_grad on Tensor that has requires_grad = False.")
|
|
626
|
+
self._retain_grad = self._grad_fn is not None
|
|
627
|
+
|
|
628
|
+
@property
|
|
629
|
+
def retains_grad(self):
|
|
630
|
+
r"""
|
|
631
|
+
Is True if the stub tensor is non-leaf and its grad is enabled to be populated during backward().
|
|
632
|
+
"""
|
|
633
|
+
return self._retain_grad
|
|
634
|
+
|
|
635
|
+
def backward(self, grad=None):
|
|
636
|
+
r"""
|
|
637
|
+
Calculate the gradient.
|
|
638
|
+
"""
|
|
639
|
+
if grad is None:
|
|
640
|
+
grad = Tensor(np.ones(self.shape), self.dtype)
|
|
641
|
+
if self._grad_fn is not None:
|
|
642
|
+
self._grad_fn.apply(grad)
|
|
643
|
+
elif self._requires_grad:
|
|
644
|
+
self._grad = grad
|
|
645
|
+
|
|
547
646
|
@property
|
|
548
647
|
def H(self):
|
|
549
648
|
"""
|
|
@@ -644,6 +743,8 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
644
743
|
[[1 3]
|
|
645
744
|
[2 4]]
|
|
646
745
|
"""
|
|
746
|
+
if self.ndim <= 1:
|
|
747
|
+
return self
|
|
647
748
|
return self.transpose()
|
|
648
749
|
|
|
649
750
|
@staticmethod
|
|
@@ -710,28 +811,24 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
710
811
|
r"""
|
|
711
812
|
For details, please refer to :func:`mindspore.ops.arccosh`.
|
|
712
813
|
"""
|
|
713
|
-
self._init_check()
|
|
714
814
|
return tensor_operator_registry.get('acosh')(self)
|
|
715
815
|
|
|
716
816
|
def arcsin(self):
|
|
717
817
|
r"""
|
|
718
818
|
For details, please refer to :func:`mindspore.ops.arcsin`.
|
|
719
819
|
"""
|
|
720
|
-
self._init_check()
|
|
721
820
|
return tensor_operator_registry.get('asin')(self)
|
|
722
821
|
|
|
723
822
|
def arctan(self):
|
|
724
823
|
r"""
|
|
725
824
|
For details, please refer to :func:`mindspore.ops.arctan`.
|
|
726
825
|
"""
|
|
727
|
-
self._init_check()
|
|
728
826
|
return tensor_operator_registry.get('atan')(self)
|
|
729
827
|
|
|
730
828
|
def arctan2(self, other):
|
|
731
829
|
r"""
|
|
732
830
|
For details, please refer to :func:`mindspore.ops.arctan2`.
|
|
733
831
|
"""
|
|
734
|
-
self._init_check()
|
|
735
832
|
return tensor_operator_registry.get('atan2')(self, other)
|
|
736
833
|
|
|
737
834
|
def cauchy(self, median=0.0, sigma=1.0):
|
|
@@ -766,7 +863,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
766
863
|
[[8.79836142e-01, 9.37541723e-01]])
|
|
767
864
|
|
|
768
865
|
"""
|
|
769
|
-
self._init_check()
|
|
770
866
|
out = tensor_operator_registry.get('cauchy')(list(self.shape), median, sigma)()
|
|
771
867
|
return out.astype(self.dtype)
|
|
772
868
|
|
|
@@ -804,7 +900,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
804
900
|
[[1.2788825 2.3305743]
|
|
805
901
|
[14.944194 0.16303174]]
|
|
806
902
|
"""
|
|
807
|
-
self._init_check()
|
|
808
903
|
return tensor_operator_registry.get('log_normal')(mean, std)(self)
|
|
809
904
|
|
|
810
905
|
@jit_forbidden_register
|
|
@@ -837,29 +932,23 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
837
932
|
r"""
|
|
838
933
|
For details, please refer to :func:`mindspore.ops.bincount`.
|
|
839
934
|
"""
|
|
840
|
-
self._init_check()
|
|
841
935
|
return tensor_operator_registry.get('bincount')(self, weights, minlength)
|
|
842
936
|
|
|
843
937
|
def chunk(self, chunks, axis=0):
|
|
844
938
|
r"""
|
|
845
939
|
For details, please refer to :func:`mindspore.ops.chunk`.
|
|
846
940
|
"""
|
|
847
|
-
self._init_check()
|
|
848
941
|
return tensor_operator_registry.get('chunk')(self, chunks, axis)
|
|
849
942
|
|
|
850
943
|
def item(self, index=None):
|
|
851
944
|
"""
|
|
852
945
|
Get the item at the specified index of the tensor.
|
|
853
946
|
|
|
854
|
-
Note:
|
|
855
|
-
Tensor.item returns a Tensor scalar instead of a Python scalar. And if the tensor is a Tensor scalar,
|
|
856
|
-
Tensor.item will return the numpy.ndarray.
|
|
857
|
-
|
|
858
947
|
Args:
|
|
859
948
|
index (Union[None, int, tuple(int)]): The index in Tensor. Default: ``None``.
|
|
860
949
|
|
|
861
950
|
Returns:
|
|
862
|
-
A
|
|
951
|
+
A scalar, type is defined by the dtype of the Tensor.
|
|
863
952
|
|
|
864
953
|
Raises:
|
|
865
954
|
ValueError: If the length of the `index` is not equal to self.ndim.
|
|
@@ -877,7 +966,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
877
966
|
>>> print(x.item())
|
|
878
967
|
1.2
|
|
879
968
|
"""
|
|
880
|
-
|
|
969
|
+
|
|
970
|
+
if index is not None:
|
|
971
|
+
output = self.asnumpy().item(index)
|
|
972
|
+
else:
|
|
973
|
+
output = self.asnumpy().item()
|
|
881
974
|
return output
|
|
882
975
|
|
|
883
976
|
def itemset(self, *args):
|
|
@@ -936,7 +1029,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
936
1029
|
>>> print(x.get_bytes())
|
|
937
1030
|
b'\x01\x00\x02\x00\x03\x00'
|
|
938
1031
|
"""
|
|
939
|
-
self._init_check()
|
|
940
1032
|
return Tensor_.get_bytes(self)
|
|
941
1033
|
|
|
942
1034
|
def asnumpy(self):
|
|
@@ -958,9 +1050,8 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
958
1050
|
>>> print(y)
|
|
959
1051
|
[11. 2.]
|
|
960
1052
|
"""
|
|
961
|
-
self.
|
|
962
|
-
|
|
963
|
-
raise TypeError(f"For asnumpy, the type of tensor cannot be BFloat16, but got {self.dtype}.")
|
|
1053
|
+
if self.has_init:
|
|
1054
|
+
self.init_data()
|
|
964
1055
|
return Tensor_.asnumpy(self)
|
|
965
1056
|
|
|
966
1057
|
def numpy(self):
|
|
@@ -1004,21 +1095,18 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1004
1095
|
"""
|
|
1005
1096
|
For details, please refer to :func:`mindspore.ops.slice_scatter`.
|
|
1006
1097
|
"""
|
|
1007
|
-
self._init_check()
|
|
1008
1098
|
return tensor_operator_registry.get('slice_scatter')(self, src, axis, start, end, step)
|
|
1009
1099
|
|
|
1010
1100
|
def select_scatter(self, src, axis, index):
|
|
1011
1101
|
"""
|
|
1012
1102
|
For details, please refer to :func:`mindspore.ops.select_scatter`.
|
|
1013
1103
|
"""
|
|
1014
|
-
self._init_check()
|
|
1015
1104
|
return tensor_operator_registry.get('select_scatter')(self, src, axis, index)
|
|
1016
1105
|
|
|
1017
1106
|
def histc(self, bins=100, min=0., max=0.):
|
|
1018
1107
|
"""
|
|
1019
1108
|
For details, please refer to :func:`mindspore.ops.histc`.
|
|
1020
1109
|
"""
|
|
1021
|
-
self._init_check()
|
|
1022
1110
|
validator.check_value_type('min', min, (int, float,), 'Tensor.histc')
|
|
1023
1111
|
validator.check_value_type('max', max, (int, float,), 'Tensor.histc')
|
|
1024
1112
|
return tensor_operator_registry.get('histc')(self, bins, float(min), float(max))
|
|
@@ -1027,7 +1115,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1027
1115
|
"""
|
|
1028
1116
|
For details, please refer to :func:`mindspore.ops.geqrf`.
|
|
1029
1117
|
"""
|
|
1030
|
-
self._init_check()
|
|
1031
1118
|
return tensor_operator_registry.get('geqrf')(self)
|
|
1032
1119
|
|
|
1033
1120
|
def slice_shape_of_persistent_data(self):
|
|
@@ -1069,14 +1156,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1069
1156
|
>>> from mindspore import Tensor, ops
|
|
1070
1157
|
>>> x = Tensor([[1, 2, 3], [4, 5, 6]], dtype=ms.float32)
|
|
1071
1158
|
>>> y = ops.transpose(x, (1, 0))
|
|
1072
|
-
>>> y.contiguous()
|
|
1073
|
-
>>>
|
|
1074
|
-
|
|
1075
|
-
[[1. 2. 3.]
|
|
1076
|
-
[4. 5. 6.]]
|
|
1159
|
+
>>> z = y.contiguous()
|
|
1160
|
+
>>> print(z.is_contiguous())
|
|
1161
|
+
True
|
|
1077
1162
|
"""
|
|
1078
|
-
|
|
1079
|
-
return self
|
|
1163
|
+
return tensor_operator_registry.get('contiguous')(self)
|
|
1080
1164
|
|
|
1081
1165
|
def is_contiguous(self):
|
|
1082
1166
|
"""
|
|
@@ -1096,6 +1180,95 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1096
1180
|
"""
|
|
1097
1181
|
return Tensor_.is_contiguous(self)
|
|
1098
1182
|
|
|
1183
|
+
def stride(self, dim=None):
|
|
1184
|
+
"""
|
|
1185
|
+
The stride to jump from one element to the next in the input dim.
|
|
1186
|
+
When no parameters are passed in, a list of stride for all dimensions is returned.
|
|
1187
|
+
|
|
1188
|
+
Args:
|
|
1189
|
+
dim (int): The dim of stride from one element to the next.
|
|
1190
|
+
|
|
1191
|
+
Returns:
|
|
1192
|
+
Int, the stride of tensor.
|
|
1193
|
+
|
|
1194
|
+
Raises:
|
|
1195
|
+
TypeError: `dim` is not an int.
|
|
1196
|
+
|
|
1197
|
+
Examples:
|
|
1198
|
+
>>> import mindspore as ms
|
|
1199
|
+
>>> x = ms.Tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], dtype=ms.float32)
|
|
1200
|
+
>>> x.stride()
|
|
1201
|
+
[5, 1]
|
|
1202
|
+
"""
|
|
1203
|
+
stride = Tensor_.stride(self)
|
|
1204
|
+
if dim is None:
|
|
1205
|
+
return stride
|
|
1206
|
+
return stride[dim]
|
|
1207
|
+
|
|
1208
|
+
def storage_offset(self):
|
|
1209
|
+
"""
|
|
1210
|
+
Tensor's offset in the underlying storage in terms of the number of storage elements.
|
|
1211
|
+
|
|
1212
|
+
Returns:
|
|
1213
|
+
int, tensor's offset in the underlying storage in terms of number of storage elements.
|
|
1214
|
+
|
|
1215
|
+
Examples:
|
|
1216
|
+
>>> import mindspore as ms
|
|
1217
|
+
>>> x = ms.Tensor([1, 2, 3, 4, 5], dtype=ms.float32)
|
|
1218
|
+
>>> ret = x.storage_offset()
|
|
1219
|
+
>>> print(ret)
|
|
1220
|
+
0
|
|
1221
|
+
"""
|
|
1222
|
+
return Tensor_.storage_offset(self)
|
|
1223
|
+
|
|
1224
|
+
def register_hook(self, hook_fn):
|
|
1225
|
+
"""
|
|
1226
|
+
Registers a backward hook for tensor.
|
|
1227
|
+
|
|
1228
|
+
Note:
|
|
1229
|
+
- The `register_backward_hook(hook_fn)` does not work in graph mode or functions decorated with 'jit'.
|
|
1230
|
+
- The 'hook_fn' must be defined as the following code. `grad` is the gradient passed to the tensor,
|
|
1231
|
+
which may be modified by returning a new output gradient.
|
|
1232
|
+
- The 'hook_fn' should have the following signature:
|
|
1233
|
+
hook_fn(grad) -> New output gradient, but can not return None or not set return value.
|
|
1234
|
+
|
|
1235
|
+
Args:
|
|
1236
|
+
hook_fn (function): Python function. Tensor backward hook function.
|
|
1237
|
+
|
|
1238
|
+
Returns:
|
|
1239
|
+
A handle corresponding to the `hook_fn` . The handle can be used to remove the added `hook_fn` by calling
|
|
1240
|
+
`handle.remove()` .
|
|
1241
|
+
|
|
1242
|
+
Raises:
|
|
1243
|
+
TypeError: If the `hook_fn` is not a function of python.
|
|
1244
|
+
|
|
1245
|
+
Supported Platforms:
|
|
1246
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1247
|
+
|
|
1248
|
+
Examples:
|
|
1249
|
+
>>> import mindspore as ms
|
|
1250
|
+
>>> from mindspore import Tensor
|
|
1251
|
+
>>> ms.set_context(mode=ms.PYNATIVE_MODE)
|
|
1252
|
+
>>> def hook_fn(grad):
|
|
1253
|
+
... return grad * 2
|
|
1254
|
+
...
|
|
1255
|
+
>>> def hook_test(x, y):
|
|
1256
|
+
... z = x * y
|
|
1257
|
+
... z.register_hook(hook_fn)
|
|
1258
|
+
... z = z * y
|
|
1259
|
+
... return z
|
|
1260
|
+
...
|
|
1261
|
+
>>> ms_grad = ms.grad(hook_test, grad_position=(0,1))
|
|
1262
|
+
>>> output = ms_grad(Tensor(1, ms.float32), Tensor(2, ms.float32))
|
|
1263
|
+
>>> print(output)
|
|
1264
|
+
(Tensor(shape=[], dtype=Float32, value=8), Tensor(shape=[], dtype=Float32, value=6))
|
|
1265
|
+
"""
|
|
1266
|
+
if not check_hook_fn("register_hook", hook_fn):
|
|
1267
|
+
return _TensorHookHandle()
|
|
1268
|
+
handle = _TensorHookHandle()
|
|
1269
|
+
handle.id = Tensor_.register_hook(self, hook_fn)
|
|
1270
|
+
return handle
|
|
1271
|
+
|
|
1099
1272
|
def flush_from_cache(self):
|
|
1100
1273
|
"""
|
|
1101
1274
|
Flush cache data to host if tensor is cache enable.
|
|
@@ -1108,35 +1281,30 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1108
1281
|
>>> print(y)
|
|
1109
1282
|
None
|
|
1110
1283
|
"""
|
|
1111
|
-
self._init_check()
|
|
1112
1284
|
Tensor_._flush_from_cache(self)
|
|
1113
1285
|
|
|
1114
1286
|
def addcdiv(self, tensor1, tensor2, value=1):
|
|
1115
1287
|
r"""
|
|
1116
1288
|
For details, please refer to :func:`mindspore.ops.addcdiv`.
|
|
1117
1289
|
"""
|
|
1118
|
-
|
|
1119
|
-
return tensor_operator_registry.get('addcdiv')()(self, tensor1, tensor2, value)
|
|
1290
|
+
return tensor_operator_registry.get('addcdiv')(self, tensor1, tensor2, value)
|
|
1120
1291
|
|
|
1121
1292
|
def addcmul(self, tensor1, tensor2, value=1):
|
|
1122
1293
|
r"""
|
|
1123
1294
|
For details, please refer to :func:`mindspore.ops.addcmul`.
|
|
1124
1295
|
"""
|
|
1125
|
-
|
|
1126
|
-
return tensor_operator_registry.get('addcmul')()(self, tensor1, tensor2, value)
|
|
1296
|
+
return tensor_operator_registry.get('addcmul')(self, tensor1, tensor2, value)
|
|
1127
1297
|
|
|
1128
1298
|
def add(self, other):
|
|
1129
1299
|
r"""
|
|
1130
1300
|
For details, please refer to :func:`mindspore.ops.add`.
|
|
1131
1301
|
"""
|
|
1132
|
-
|
|
1133
|
-
return tensor_operator_registry.get('add')()(self, other)
|
|
1302
|
+
return tensor_operator_registry.get('add')(self, other)
|
|
1134
1303
|
|
|
1135
1304
|
def subtract(self, other, *, alpha=1):
|
|
1136
1305
|
r"""
|
|
1137
1306
|
For details, please refer to :func:`mindspore.ops.subtract`.
|
|
1138
1307
|
"""
|
|
1139
|
-
self._init_check()
|
|
1140
1308
|
return tensor_operator_registry.get('sub')(self, alpha * other)
|
|
1141
1309
|
|
|
1142
1310
|
def true_divide(self, value):
|
|
@@ -1144,7 +1312,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1144
1312
|
Alias for Tensor.div() with :math:`rounding\_mode=None`.
|
|
1145
1313
|
For details, please refer to :func:`mindspore.ops.div`.
|
|
1146
1314
|
"""
|
|
1147
|
-
self._init_check()
|
|
1148
1315
|
return tensor_operator_registry.get('div')(self, value, rounding_mode=None)
|
|
1149
1316
|
|
|
1150
1317
|
def triu(self, diagonal=0):
|
|
@@ -1155,7 +1322,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1155
1322
|
This is an experimental API that is subject to change or deletion.
|
|
1156
1323
|
|
|
1157
1324
|
"""
|
|
1158
|
-
self._init_check()
|
|
1159
1325
|
validator.check_value_type('diagonal', diagonal, [int], 'triu')
|
|
1160
1326
|
return tensor_operator_registry.get('triu')(self, diagonal)
|
|
1161
1327
|
|
|
@@ -1163,65 +1329,56 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1163
1329
|
r"""
|
|
1164
1330
|
For details, please refer to :func:`mindspore.ops.addbmm`.
|
|
1165
1331
|
"""
|
|
1166
|
-
self._init_check()
|
|
1167
1332
|
return tensor_operator_registry.get('addbmm')(self, batch1, batch2, beta=beta, alpha=alpha)
|
|
1168
1333
|
|
|
1169
1334
|
def addmm(self, mat1, mat2, *, beta=1, alpha=1):
|
|
1170
1335
|
r"""
|
|
1171
1336
|
For details, please refer to :func:`mindspore.ops.addmm`.
|
|
1172
1337
|
"""
|
|
1173
|
-
self._init_check()
|
|
1174
1338
|
return tensor_operator_registry.get('addmm')(self, mat1, mat2, beta=beta, alpha=alpha)
|
|
1175
1339
|
|
|
1176
1340
|
def addr(self, vec1, vec2, beta=1, alpha=1):
|
|
1177
1341
|
r"""
|
|
1178
1342
|
For details, please refer to :func:`mindspore.ops.addr`.
|
|
1179
1343
|
"""
|
|
1180
|
-
self._init_check()
|
|
1181
1344
|
return tensor_operator_registry.get('addr')(self, vec1, vec2, beta=beta, alpha=alpha)
|
|
1182
1345
|
|
|
1183
1346
|
def adjoint(self):
|
|
1184
1347
|
r"""
|
|
1185
1348
|
For details, please refer to :func:`mindspore.ops.adjoint`.
|
|
1186
1349
|
"""
|
|
1187
|
-
self._init_check()
|
|
1188
1350
|
return tensor_operator_registry.get('adjoint')(self)
|
|
1189
1351
|
|
|
1190
1352
|
def all(self, axis=None, keep_dims=False):
|
|
1191
1353
|
r"""
|
|
1192
1354
|
For details, please refer to :func:`mindspore.ops.all`.
|
|
1193
1355
|
"""
|
|
1194
|
-
self._init_check()
|
|
1195
1356
|
return tensor_operator_registry.get('all')(self, axis, keep_dims)
|
|
1196
1357
|
|
|
1197
1358
|
def angle(self):
|
|
1198
1359
|
r"""
|
|
1199
1360
|
For details, please refer to :func:`mindspore.ops.angle`.
|
|
1200
1361
|
"""
|
|
1201
|
-
self._init_check()
|
|
1202
1362
|
return tensor_operator_registry.get('angle')(self)
|
|
1203
1363
|
|
|
1204
1364
|
def any(self, axis=None, keep_dims=False):
|
|
1205
1365
|
r"""
|
|
1206
1366
|
For details, please refer to :func:`mindspore.ops.any`.
|
|
1207
1367
|
"""
|
|
1208
|
-
self._init_check()
|
|
1209
1368
|
if axis is None:
|
|
1210
1369
|
axis = ()
|
|
1211
|
-
return tensor_operator_registry.get('any')(
|
|
1370
|
+
return tensor_operator_registry.get('any')(self, axis, keep_dims)
|
|
1212
1371
|
|
|
1213
1372
|
def atan2(self, other):
|
|
1214
1373
|
r"""
|
|
1215
1374
|
For details, please refer to :func:`mindspore.ops.atan2`.
|
|
1216
1375
|
"""
|
|
1217
|
-
self._init_check()
|
|
1218
1376
|
return tensor_operator_registry.get('atan2')(self, other)
|
|
1219
1377
|
|
|
1220
1378
|
def baddbmm(self, batch1, batch2, beta=1, alpha=1):
|
|
1221
1379
|
r"""
|
|
1222
1380
|
For details, please refer to :func:`mindspore.ops.baddbmm`.
|
|
1223
1381
|
"""
|
|
1224
|
-
self._init_check()
|
|
1225
1382
|
return tensor_operator_registry.get('baddbmm')(self, batch1, batch2, beta=beta, alpha=alpha)
|
|
1226
1383
|
|
|
1227
1384
|
def view(self, *shape):
|
|
@@ -1245,7 +1402,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1245
1402
|
[3. 2.]
|
|
1246
1403
|
[3. 4.]]
|
|
1247
1404
|
"""
|
|
1248
|
-
self._init_check()
|
|
1249
1405
|
if not shape:
|
|
1250
1406
|
raise ValueError("The shape variable should not be empty")
|
|
1251
1407
|
if isinstance(shape[0], tuple):
|
|
@@ -1279,7 +1435,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1279
1435
|
>>> print(output)
|
|
1280
1436
|
[1. 2. 3. 2. 3. 4.]
|
|
1281
1437
|
"""
|
|
1282
|
-
self._init_check()
|
|
1283
1438
|
if not isinstance(other, (Tensor, Tensor_)):
|
|
1284
1439
|
raise TypeError(f"For view_as, the input other must be a Tensor, but got {type(other)}")
|
|
1285
1440
|
return self.view(other.shape)
|
|
@@ -1288,42 +1443,36 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1288
1443
|
r"""
|
|
1289
1444
|
For details, please refer to :func:`mindspore.ops.t`.
|
|
1290
1445
|
"""
|
|
1291
|
-
self._init_check()
|
|
1292
1446
|
return tensor_operator_registry.get("t")(self)
|
|
1293
1447
|
|
|
1294
1448
|
def bitwise_and(self, other):
|
|
1295
1449
|
"""
|
|
1296
1450
|
For details, please refer to :func:`mindspore.ops.bitwise_and`.
|
|
1297
1451
|
"""
|
|
1298
|
-
self._init_check()
|
|
1299
1452
|
return tensor_operator_registry.get('bitwise_and')(self, other)
|
|
1300
1453
|
|
|
1301
1454
|
def bitwise_or(self, other):
|
|
1302
1455
|
"""
|
|
1303
1456
|
For details, please refer to :func:`mindspore.ops.bitwise_or`.
|
|
1304
1457
|
"""
|
|
1305
|
-
self._init_check()
|
|
1306
1458
|
return tensor_operator_registry.get('bitwise_or')(self, other)
|
|
1307
1459
|
|
|
1308
1460
|
def bitwise_xor(self, other):
|
|
1309
1461
|
"""
|
|
1310
1462
|
For details, please refer to :func:`mindspore.ops.bitwise_xor`.
|
|
1311
1463
|
"""
|
|
1312
|
-
self._init_check()
|
|
1313
1464
|
return tensor_operator_registry.get('bitwise_xor')(self, other)
|
|
1314
1465
|
|
|
1315
1466
|
def bitwise_left_shift(self, other):
|
|
1316
1467
|
"""
|
|
1317
1468
|
For details, please refer to :func:`mindspore.ops.bitwise_left_shift`.
|
|
1318
1469
|
"""
|
|
1319
|
-
self._init_check()
|
|
1320
1470
|
return tensor_operator_registry.get('bitwise_left_shift')(self, other)
|
|
1321
1471
|
|
|
1322
1472
|
def bitwise_right_shift(self, other):
|
|
1323
1473
|
"""
|
|
1324
1474
|
For details, please refer to :func:`mindspore.ops.bitwise_right_shift`.
|
|
1325
1475
|
"""
|
|
1326
|
-
self._init_check()
|
|
1327
1476
|
_cast = tensor_operator_registry.get('cast')
|
|
1328
1477
|
other = _cast(other, self.dtype)
|
|
1329
1478
|
return tensor_operator_registry.get('bitwise_right_shift')(self, other)
|
|
@@ -1332,50 +1481,43 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1332
1481
|
"""
|
|
1333
1482
|
For details, please refer to :func:`mindspore.ops.scatter`.
|
|
1334
1483
|
"""
|
|
1335
|
-
self._init_check()
|
|
1336
1484
|
return tensor_operator_registry.get('scatter')(self, axis, index, src)
|
|
1337
1485
|
|
|
1338
1486
|
def scatter_mul(self, indices, updates):
|
|
1339
1487
|
"""
|
|
1340
1488
|
For details, please refer to :func:`mindspore.ops.scatter_mul`.
|
|
1341
1489
|
"""
|
|
1342
|
-
self._init_check()
|
|
1343
1490
|
return tensor_operator_registry.get('tensor_scatter_mul')(self, indices, updates)
|
|
1344
1491
|
|
|
1345
1492
|
def scatter_div(self, indices, updates):
|
|
1346
1493
|
"""
|
|
1347
1494
|
For details, please refer to :func:`mindspore.ops.scatter_div`.
|
|
1348
1495
|
"""
|
|
1349
|
-
self._init_check()
|
|
1350
1496
|
return tensor_operator_registry.get('tensor_scatter_div')(self, indices, updates)
|
|
1351
1497
|
|
|
1352
1498
|
def ger(self, vec2):
|
|
1353
1499
|
"""
|
|
1354
1500
|
For details, please refer to :func:`mindspore.ops.ger`.
|
|
1355
1501
|
"""
|
|
1356
|
-
self._init_check()
|
|
1357
1502
|
return tensor_operator_registry.get('ger')(self, vec2)
|
|
1358
1503
|
|
|
1359
1504
|
def gt(self, x):
|
|
1360
1505
|
"""
|
|
1361
1506
|
For details, please refer to :func:`mindspore.ops.gt`.
|
|
1362
1507
|
"""
|
|
1363
|
-
|
|
1364
|
-
return tensor_operator_registry.get('gt')()(self, x)
|
|
1508
|
+
return tensor_operator_registry.get('gt')(self, x)
|
|
1365
1509
|
|
|
1366
1510
|
def ge(self, x):
|
|
1367
1511
|
"""
|
|
1368
1512
|
For details, please refer to :func:`mindspore.ops.ge`.
|
|
1369
1513
|
"""
|
|
1370
|
-
|
|
1371
|
-
return tensor_operator_registry.get('ge')()(self, x)
|
|
1514
|
+
return tensor_operator_registry.get('ge')(self, x)
|
|
1372
1515
|
|
|
1373
1516
|
def broadcast_to(self, shape):
|
|
1374
1517
|
"""
|
|
1375
1518
|
For details, please refer to :func:`mindspore.ops.broadcast_to`.
|
|
1376
1519
|
"""
|
|
1377
|
-
|
|
1378
|
-
return tensor_operator_registry.get('broadcast_to')(shape)(self)
|
|
1520
|
+
return tensor_operator_registry.get('broadcast_to')(self, shape)
|
|
1379
1521
|
|
|
1380
1522
|
def expand_as(self, x):
|
|
1381
1523
|
"""
|
|
@@ -1399,84 +1541,72 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1399
1541
|
[[1. 2. 3.]
|
|
1400
1542
|
[1. 2. 3.]]
|
|
1401
1543
|
"""
|
|
1402
|
-
|
|
1403
|
-
return tensor_operator_registry.get('broadcast_to')(x.shape)(self)
|
|
1544
|
+
return tensor_operator_registry.get('broadcast_to')(self, x.shape)
|
|
1404
1545
|
|
|
1405
1546
|
def exp(self):
|
|
1406
1547
|
"""
|
|
1407
1548
|
For details, please refer to :func:`mindspore.ops.exp`.
|
|
1408
1549
|
"""
|
|
1409
|
-
self._init_check()
|
|
1410
1550
|
return tensor_operator_registry.get('exp')(self)
|
|
1411
1551
|
|
|
1412
1552
|
def real(self):
|
|
1413
1553
|
r"""
|
|
1414
1554
|
For details, please refer to :func:`mindspore.ops.real`.
|
|
1415
1555
|
"""
|
|
1416
|
-
self._init_check()
|
|
1417
1556
|
return tensor_operator_registry.get('real')(self)
|
|
1418
1557
|
|
|
1419
1558
|
def rsqrt(self):
|
|
1420
1559
|
r"""
|
|
1421
1560
|
For details, please refer to :func:`mindspore.ops.rsqrt`.
|
|
1422
1561
|
"""
|
|
1423
|
-
self._init_check()
|
|
1424
1562
|
return tensor_operator_registry.get('rsqrt')(self)
|
|
1425
1563
|
|
|
1426
1564
|
def reciprocal(self):
|
|
1427
1565
|
r"""
|
|
1428
1566
|
For details, please refer to :func:`mindspore.ops.reciprocal`.
|
|
1429
1567
|
"""
|
|
1430
|
-
self._init_check()
|
|
1431
1568
|
return tensor_operator_registry.get('reciprocal')(self)
|
|
1432
1569
|
|
|
1433
1570
|
def sqrt(self):
|
|
1434
1571
|
"""
|
|
1435
1572
|
For details, please refer to :func:`mindspore.ops.sqrt`.
|
|
1436
1573
|
"""
|
|
1437
|
-
self._init_check()
|
|
1438
1574
|
return tensor_operator_registry.get('sqrt')(self)
|
|
1439
1575
|
|
|
1440
1576
|
def square(self):
|
|
1441
1577
|
"""
|
|
1442
1578
|
For details, please refer to :func:`mindspore.ops.square`.
|
|
1443
1579
|
"""
|
|
1444
|
-
self._init_check()
|
|
1445
1580
|
return tensor_operator_registry.get('square')(self)
|
|
1446
1581
|
|
|
1447
1582
|
def sub(self, y):
|
|
1448
1583
|
r"""
|
|
1449
1584
|
For details, please refer to :func:`mindspore.ops.sub`.
|
|
1450
1585
|
"""
|
|
1451
|
-
self._init_check()
|
|
1452
1586
|
return tensor_operator_registry.get('sub')(self, y)
|
|
1453
1587
|
|
|
1454
1588
|
def tan(self):
|
|
1455
1589
|
"""
|
|
1456
1590
|
For details, please refer to :func:`mindspore.ops.tan`.
|
|
1457
1591
|
"""
|
|
1458
|
-
|
|
1459
|
-
return tensor_operator_registry.get('tan')()(self)
|
|
1592
|
+
return tensor_operator_registry.get('tan')(self)
|
|
1460
1593
|
|
|
1461
1594
|
def tanh(self):
|
|
1462
1595
|
r"""
|
|
1463
1596
|
For details, please refer to :func:`mindspore.ops.tanh`.
|
|
1464
1597
|
"""
|
|
1465
|
-
self._init_check()
|
|
1466
1598
|
return tensor_operator_registry.get('tanh')(self)
|
|
1467
1599
|
|
|
1468
1600
|
def cosh(self):
|
|
1469
1601
|
r"""
|
|
1470
1602
|
For details, please refer to :func:`mindspore.ops.cosh`.
|
|
1471
1603
|
"""
|
|
1472
|
-
|
|
1473
|
-
return tensor_operator_registry.get('cosh')()(self)
|
|
1604
|
+
return tensor_operator_registry.get('cosh')(self)
|
|
1474
1605
|
|
|
1475
1606
|
def acos(self):
|
|
1476
1607
|
r"""
|
|
1477
1608
|
For details, please refer to :func:`mindspore.ops.acos`.
|
|
1478
1609
|
"""
|
|
1479
|
-
self._init_check()
|
|
1480
1610
|
return tensor_operator_registry.get('acos')(self)
|
|
1481
1611
|
|
|
1482
1612
|
def arccos(self):
|
|
@@ -1489,35 +1619,30 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1489
1619
|
r"""
|
|
1490
1620
|
For details, please refer to :func:`mindspore.ops.cos`.
|
|
1491
1621
|
"""
|
|
1492
|
-
self._init_check()
|
|
1493
1622
|
return tensor_operator_registry.get('cos')(self)
|
|
1494
1623
|
|
|
1495
1624
|
def cov(self, *, correction=1, fweights=None, aweights=None):
|
|
1496
1625
|
r"""
|
|
1497
1626
|
For details, please refer to :func:`mindspore.ops.cov`.
|
|
1498
1627
|
"""
|
|
1499
|
-
self._init_check()
|
|
1500
1628
|
return tensor_operator_registry.get('cov')(self, correction=correction, fweights=fweights, aweights=aweights)
|
|
1501
1629
|
|
|
1502
1630
|
def acosh(self):
|
|
1503
1631
|
"""
|
|
1504
1632
|
For details, please refer to :func:`mindspore.ops.acosh`.
|
|
1505
1633
|
"""
|
|
1506
|
-
self._init_check()
|
|
1507
1634
|
return tensor_operator_registry.get('acosh')(self)
|
|
1508
1635
|
|
|
1509
1636
|
def asin(self):
|
|
1510
1637
|
r"""
|
|
1511
1638
|
For details, please refer to :func:`mindspore.ops.asin`.
|
|
1512
1639
|
"""
|
|
1513
|
-
self._init_check()
|
|
1514
1640
|
return tensor_operator_registry.get('asin')(self)
|
|
1515
1641
|
|
|
1516
1642
|
def abs(self):
|
|
1517
1643
|
"""
|
|
1518
1644
|
For details, please refer to :func:`mindspore.ops.abs`.
|
|
1519
1645
|
"""
|
|
1520
|
-
self._init_check()
|
|
1521
1646
|
return tensor_operator_registry.get('abs')(self)
|
|
1522
1647
|
|
|
1523
1648
|
def absolute(self):
|
|
@@ -1530,14 +1655,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1530
1655
|
"""
|
|
1531
1656
|
For details, please refer to :func:`mindspore.ops.ceil`.
|
|
1532
1657
|
"""
|
|
1533
|
-
|
|
1534
|
-
return tensor_operator_registry.get('ceil')()(self)
|
|
1658
|
+
return tensor_operator_registry.get('ceil')(self)
|
|
1535
1659
|
|
|
1536
1660
|
def floor(self):
|
|
1537
1661
|
"""
|
|
1538
1662
|
For details, please refer to :func:`mindspore.ops.floor`.
|
|
1539
1663
|
"""
|
|
1540
|
-
self._init_check()
|
|
1541
1664
|
return tensor_operator_registry.get('floor')(self)
|
|
1542
1665
|
|
|
1543
1666
|
def floor_divide(self, other):
|
|
@@ -1547,21 +1670,18 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1547
1670
|
.. warning::
|
|
1548
1671
|
This is an experimental API that is subject to change or deletion.
|
|
1549
1672
|
"""
|
|
1550
|
-
self._init_check()
|
|
1551
1673
|
return tensor_operator_registry.get('floor_divide')(self, other)
|
|
1552
1674
|
|
|
1553
1675
|
def lerp(self, end, weight):
|
|
1554
1676
|
"""
|
|
1555
1677
|
For details, please refer to :func:`mindspore.ops.lerp`.
|
|
1556
1678
|
"""
|
|
1557
|
-
self._init_check()
|
|
1558
1679
|
return tensor_operator_registry.get('lerp')(self, end, weight)
|
|
1559
1680
|
|
|
1560
1681
|
def negative(self):
|
|
1561
1682
|
r"""
|
|
1562
1683
|
For details, please refer to :func:`mindspore.ops.negative`.
|
|
1563
1684
|
"""
|
|
1564
|
-
self._init_check()
|
|
1565
1685
|
return tensor_operator_registry.get("negative")(self)
|
|
1566
1686
|
|
|
1567
1687
|
# pylint: disable=redefined-builtin
|
|
@@ -1569,14 +1689,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1569
1689
|
"""
|
|
1570
1690
|
For details, please refer to :func:`mindspore.ops.norm`.
|
|
1571
1691
|
"""
|
|
1572
|
-
self._init_check()
|
|
1573
1692
|
return tensor_operator_registry.get('norm')(self, ord, dim, keepdim, dtype=dtype)
|
|
1574
1693
|
|
|
1575
1694
|
def renorm(self, p, axis, maxnorm):
|
|
1576
1695
|
"""
|
|
1577
1696
|
For details, please refer to :func:`mindspore.ops.renorm`.
|
|
1578
1697
|
"""
|
|
1579
|
-
self._init_check()
|
|
1580
1698
|
return tensor_operator_registry.get("renorm")(self, p, axis, maxnorm)
|
|
1581
1699
|
|
|
1582
1700
|
def approximate_equal(self, other, tolerance=1e-5):
|
|
@@ -1586,7 +1704,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1586
1704
|
validator.check_isinstance("x", self, Tensor)
|
|
1587
1705
|
validator.check_isinstance("y", other, Tensor)
|
|
1588
1706
|
validator.check_isinstance("tolerance", tolerance, float)
|
|
1589
|
-
self._init_check()
|
|
1590
1707
|
input_x = self.copy() if self.dtype == mstype.float32 else self.astype(mstype.float16)
|
|
1591
1708
|
input_y = other.copy() if other.dtype == mstype.float32 else other.astype(mstype.float16)
|
|
1592
1709
|
return tensor_operator_registry.get('__lt__')(tensor_operator_registry.get('abs')(
|
|
@@ -1597,14 +1714,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1597
1714
|
r"""
|
|
1598
1715
|
For details, please refer to :func:`mindspore.ops.log1p`.
|
|
1599
1716
|
"""
|
|
1600
|
-
self._init_check()
|
|
1601
1717
|
return tensor_operator_registry.get('log1p')(self)
|
|
1602
1718
|
|
|
1603
1719
|
def logit(self, eps=None):
|
|
1604
1720
|
r"""
|
|
1605
1721
|
For details, please refer to :func:`mindspore.ops.logit`.
|
|
1606
1722
|
"""
|
|
1607
|
-
self._init_check()
|
|
1608
1723
|
if eps is None:
|
|
1609
1724
|
eps = -1.0
|
|
1610
1725
|
validator.check_value_type('eps', eps, (float,), 'Tensor.logit')
|
|
@@ -1614,14 +1729,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1614
1729
|
r"""
|
|
1615
1730
|
For details, please refer to :func:`mindspore.ops.logaddexp`.
|
|
1616
1731
|
"""
|
|
1617
|
-
self._init_check()
|
|
1618
1732
|
return tensor_operator_registry.get('logaddexp')(self, other)
|
|
1619
1733
|
|
|
1620
1734
|
def logaddexp2(self, other):
|
|
1621
1735
|
r"""
|
|
1622
1736
|
For details, please refer to :func:`mindspore.ops.logaddexp2`.
|
|
1623
1737
|
"""
|
|
1624
|
-
self._init_check()
|
|
1625
1738
|
return tensor_operator_registry.get('logaddexp2')(self, other)
|
|
1626
1739
|
|
|
1627
1740
|
def logcumsumexp(self, axis):
|
|
@@ -1631,149 +1744,128 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1631
1744
|
.. warning::
|
|
1632
1745
|
This is an experimental API that is subject to change or deletion.
|
|
1633
1746
|
"""
|
|
1634
|
-
self._init_check()
|
|
1635
1747
|
return tensor_operator_registry.get('logcumsumexp')(self, axis)
|
|
1636
1748
|
|
|
1637
1749
|
def logsumexp(self, axis, keepdims=False):
|
|
1638
1750
|
r"""
|
|
1639
1751
|
For details, please refer to :func:`mindspore.ops.logsumexp`.
|
|
1640
1752
|
"""
|
|
1641
|
-
self._init_check()
|
|
1642
1753
|
return tensor_operator_registry.get('logsumexp')(self, axis, keepdims)
|
|
1643
1754
|
|
|
1644
1755
|
def logdet(self):
|
|
1645
1756
|
r"""
|
|
1646
1757
|
For details, please refer to :func:`mindspore.ops.logdet`.
|
|
1647
1758
|
"""
|
|
1648
|
-
self._init_check()
|
|
1649
1759
|
return tensor_operator_registry.get('logdet')(self)
|
|
1650
1760
|
|
|
1651
1761
|
def i0(self):
|
|
1652
1762
|
r"""
|
|
1653
1763
|
For details, please refer to :func:`mindspore.ops.i0`.
|
|
1654
1764
|
"""
|
|
1655
|
-
self._init_check()
|
|
1656
1765
|
return tensor_operator_registry.get('i0')(self)
|
|
1657
1766
|
|
|
1658
1767
|
def isclose(self, x2, rtol=1e-05, atol=1e-08, equal_nan=False):
|
|
1659
1768
|
"""
|
|
1660
1769
|
For details, please refer to :func:`mindspore.ops.isclose`.
|
|
1661
1770
|
"""
|
|
1662
|
-
self._init_check()
|
|
1663
1771
|
return tensor_operator_registry.get('isclose')(self, x2, rtol, atol, equal_nan)
|
|
1664
1772
|
|
|
1665
1773
|
def isneginf(self):
|
|
1666
1774
|
r"""
|
|
1667
1775
|
For details, please refer to :func:`mindspore.ops.isneginf`.
|
|
1668
1776
|
"""
|
|
1669
|
-
self._init_check()
|
|
1670
1777
|
return tensor_operator_registry.get('isneginf')(self)
|
|
1671
1778
|
|
|
1672
1779
|
def isposinf(self):
|
|
1673
1780
|
r"""
|
|
1674
1781
|
For details, please refer to :func:`mindspore.ops.isposinf`.
|
|
1675
1782
|
"""
|
|
1676
|
-
self._init_check()
|
|
1677
1783
|
return tensor_operator_registry.get('isposinf')(self)
|
|
1678
1784
|
|
|
1679
1785
|
def isreal(self):
|
|
1680
1786
|
r"""
|
|
1681
1787
|
For details, please refer to :func:`mindspore.ops.isreal`.
|
|
1682
1788
|
"""
|
|
1683
|
-
self._init_check()
|
|
1684
1789
|
return tensor_operator_registry.get('isreal')(self)
|
|
1685
1790
|
|
|
1686
1791
|
def isfinite(self):
|
|
1687
1792
|
r"""
|
|
1688
1793
|
For details, please refer to :func:`mindspore.ops.isfinite`.
|
|
1689
1794
|
"""
|
|
1690
|
-
|
|
1691
|
-
return tensor_operator_registry.get('isfinite')()(self)
|
|
1795
|
+
return tensor_operator_registry.get('isfinite')(self)
|
|
1692
1796
|
|
|
1693
1797
|
def is_complex(self):
|
|
1694
1798
|
r"""
|
|
1695
1799
|
For details, please refer to :func:`mindspore.ops.is_complex`.
|
|
1696
1800
|
"""
|
|
1697
|
-
self._init_check()
|
|
1698
1801
|
return tensor_operator_registry.get('is_complex')(self)
|
|
1699
1802
|
|
|
1700
1803
|
def inv(self):
|
|
1701
1804
|
r"""
|
|
1702
1805
|
For details, please refer to :func:`mindspore.ops.inv`.
|
|
1703
1806
|
"""
|
|
1704
|
-
self._init_check()
|
|
1705
1807
|
return tensor_operator_registry.get('inv')(self)
|
|
1706
1808
|
|
|
1707
1809
|
def inverse(self):
|
|
1708
1810
|
r"""
|
|
1709
1811
|
For details, please refer to :func:`mindspore.ops.inverse`.
|
|
1710
1812
|
"""
|
|
1711
|
-
self._init_check()
|
|
1712
1813
|
return tensor_operator_registry.get('inverse')(self)
|
|
1713
1814
|
|
|
1714
1815
|
def invert(self):
|
|
1715
1816
|
r"""
|
|
1716
1817
|
For details, please refer to :func:`mindspore.ops.invert`.
|
|
1717
1818
|
"""
|
|
1718
|
-
self._init_check()
|
|
1719
1819
|
return tensor_operator_registry.get('invert')(self)
|
|
1720
1820
|
|
|
1721
1821
|
def pow(self, exponent):
|
|
1722
1822
|
r"""
|
|
1723
1823
|
For details, please refer to :func:`mindspore.ops.pow`.
|
|
1724
1824
|
"""
|
|
1725
|
-
|
|
1726
|
-
return tensor_operator_registry.get('pow')()(self, exponent)
|
|
1825
|
+
return tensor_operator_registry.get('pow')(self, exponent)
|
|
1727
1826
|
|
|
1728
1827
|
def log(self):
|
|
1729
1828
|
"""
|
|
1730
1829
|
For details, please refer to :func:`mindspore.ops.log`.
|
|
1731
1830
|
"""
|
|
1732
|
-
self._init_check()
|
|
1733
1831
|
return tensor_operator_registry.get('log')(self)
|
|
1734
1832
|
|
|
1735
1833
|
def log10(self):
|
|
1736
1834
|
r"""
|
|
1737
1835
|
For details, please refer to :func:`mindspore.ops.log10`.
|
|
1738
1836
|
"""
|
|
1739
|
-
self._init_check()
|
|
1740
1837
|
return tensor_operator_registry.get('log10')(self)
|
|
1741
1838
|
|
|
1742
1839
|
def log2(self):
|
|
1743
1840
|
r"""
|
|
1744
1841
|
For details, please refer to :func:`mindspore.ops.log2`.
|
|
1745
1842
|
"""
|
|
1746
|
-
self._init_check()
|
|
1747
1843
|
return tensor_operator_registry.get('log2')(self)
|
|
1748
1844
|
|
|
1749
1845
|
def mean(self, axis=None, keep_dims=False):
|
|
1750
1846
|
"""
|
|
1751
1847
|
For details, please refer to :func:`mindspore.ops.mean`.
|
|
1752
1848
|
"""
|
|
1753
|
-
self._init_check()
|
|
1754
1849
|
return tensor_operator_registry.get('mean')(self, axis, keep_dims)
|
|
1755
1850
|
|
|
1756
1851
|
def amin(self, axis=None, keepdims=False, *, initial=None, where=None):
|
|
1757
1852
|
"""
|
|
1758
1853
|
For details, please refer to :func:`mindspore.ops.amin`.
|
|
1759
1854
|
"""
|
|
1760
|
-
self._init_check()
|
|
1761
1855
|
if axis is None:
|
|
1762
1856
|
axis = ()
|
|
1763
1857
|
return tensor_operator_registry.get('amin')(self, axis, keepdims, initial=initial, where=where)
|
|
1764
1858
|
|
|
1765
1859
|
def reverse(self, axis):
|
|
1766
1860
|
"""
|
|
1767
|
-
For details, please refer to :func:`mindspore.ops.
|
|
1861
|
+
For details, please refer to :func:`mindspore.ops.flip`.
|
|
1768
1862
|
"""
|
|
1769
|
-
|
|
1770
|
-
return tensor_operator_registry.get('reverse')(axis)(self)
|
|
1863
|
+
return tensor_operator_registry.get('flip')(self, axis)
|
|
1771
1864
|
|
|
1772
1865
|
def amax(self, axis=None, keepdims=False, *, initial=None, where=None):
|
|
1773
1866
|
"""
|
|
1774
1867
|
For details, please refer to :func:`mindspore.ops.amax`.
|
|
1775
1868
|
"""
|
|
1776
|
-
self._init_check()
|
|
1777
1869
|
if axis is None:
|
|
1778
1870
|
axis = ()
|
|
1779
1871
|
return tensor_operator_registry.get('amax')(self, axis, keepdims, initial=initial, where=where)
|
|
@@ -1782,28 +1874,24 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1782
1874
|
r"""
|
|
1783
1875
|
For details, please refer to :func:`mindspore.ops.aminmax`.
|
|
1784
1876
|
"""
|
|
1785
|
-
self._init_check()
|
|
1786
1877
|
return tensor_operator_registry.get('aminmax')(self, axis=axis, keepdims=keepdims)
|
|
1787
1878
|
|
|
1788
1879
|
def reverse_sequence(self, seq_lengths, seq_dim=0, batch_dim=0):
|
|
1789
1880
|
"""
|
|
1790
1881
|
For details, please refer to :func:`mindspore.ops.reverse_sequence`.
|
|
1791
1882
|
"""
|
|
1792
|
-
|
|
1793
|
-
return tensor_operator_registry.get("reverse_sequence")(seq_dim, batch_dim)(self, seq_lengths)
|
|
1883
|
+
return tensor_operator_registry.get("reverse_sequence")(self, seq_lengths, seq_dim, batch_dim)
|
|
1794
1884
|
|
|
1795
|
-
def prod(self, axis=None, keep_dims=False):
|
|
1885
|
+
def prod(self, axis=None, keep_dims=False, dtype=None):
|
|
1796
1886
|
"""
|
|
1797
1887
|
For details, please refer to :func:`mindspore.ops.prod`.
|
|
1798
1888
|
"""
|
|
1799
|
-
|
|
1800
|
-
return tensor_operator_registry.get('prod')(self, axis, keep_dims)
|
|
1889
|
+
return tensor_operator_registry.get('prod')(self, axis, keep_dims, dtype)
|
|
1801
1890
|
|
|
1802
1891
|
def select(self, condition, y):
|
|
1803
1892
|
r"""
|
|
1804
1893
|
For details, please refer to :func:`mindspore.ops.select`.
|
|
1805
1894
|
"""
|
|
1806
|
-
self._init_check()
|
|
1807
1895
|
if not isinstance(condition, Tensor):
|
|
1808
1896
|
raise TypeError(f"For 'Tensor.select', the argument 'condition' should be Tensor,"
|
|
1809
1897
|
f" but got {type(condition)}.")
|
|
@@ -1818,7 +1906,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1818
1906
|
f" then the tensor type should be float32 but got {self.dtype}")
|
|
1819
1907
|
input_y = y
|
|
1820
1908
|
if isinstance(y, (int, float)):
|
|
1821
|
-
input_y = tensor_operator_registry.get('zeros_like')(
|
|
1909
|
+
input_y = tensor_operator_registry.get('zeros_like')(self) + y
|
|
1822
1910
|
if isinstance(y, int):
|
|
1823
1911
|
input_y = tensor_operator_registry.get('cast')(input_y, mstype.int32)
|
|
1824
1912
|
else:
|
|
@@ -1829,22 +1917,46 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1829
1917
|
r"""
|
|
1830
1918
|
For details, please refer to :func:`mindspore.ops.transpose`.
|
|
1831
1919
|
"""
|
|
1832
|
-
self._init_check()
|
|
1833
1920
|
perm = validator.check_transpose_axis(axes, self.ndim)
|
|
1834
|
-
return tensor_operator_registry.get('transpose')(
|
|
1921
|
+
return tensor_operator_registry.get('transpose')(self, perm)
|
|
1835
1922
|
|
|
1836
1923
|
def col2im(self, output_size, kernel_size, dilation, padding_value, stride):
|
|
1837
1924
|
"""
|
|
1838
1925
|
For details, please refer to :func:`mindspore.ops.col2im`.
|
|
1839
1926
|
"""
|
|
1840
|
-
self._init_check()
|
|
1841
1927
|
return tensor_operator_registry.get('col2im')(self, output_size, kernel_size, dilation, padding_value, stride)
|
|
1842
1928
|
|
|
1843
1929
|
def reshape(self, *shape):
|
|
1930
|
+
r"""
|
|
1931
|
+
Rearranges the input Tensor based on the given `shape` .
|
|
1932
|
+
|
|
1933
|
+
The `shape` can only have one -1 at most, in which case it's inferred from the remaining dimensions and
|
|
1934
|
+
the number of elements in the input.
|
|
1935
|
+
|
|
1936
|
+
Args:
|
|
1937
|
+
shape (Union[int, tuple[int], list[int]]): If `shape` is a tuple or list, its elements should be
|
|
1938
|
+
integers, and only constant value is allowed. i.e., :math:`(y_1, y_2, ..., y_S)`.
|
|
1939
|
+
|
|
1940
|
+
Returns:
|
|
1941
|
+
Tensor, If the given `shape` does not contain -1, the `shape` of tensor is :math:`(y_1, y_2, ..., y_S)`.
|
|
1942
|
+
If the k-th position in the given `shape` is -1, the `shape` of tensor is :math:`(y_1, ..., y_{k-1},
|
|
1943
|
+
\frac{\prod_{i=1}^{R}x_{i}}{y_1\times ...\times y_{k-1}\times y_{k+1}\times...\times y_S} , y_{k+1},
|
|
1944
|
+
..., y_S)`, in where the shape of input tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
1945
|
+
|
|
1946
|
+
Supported Platforms:
|
|
1947
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1948
|
+
|
|
1949
|
+
Examples:
|
|
1950
|
+
>>> import mindspore
|
|
1951
|
+
>>> import numpy as np
|
|
1952
|
+
>>> from mindspore import Tensor, ops
|
|
1953
|
+
>>> input = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
|
|
1954
|
+
>>> output = input.reshape(3, 2)
|
|
1955
|
+
>>> print(output)
|
|
1956
|
+
[[-0.1 0.3]
|
|
1957
|
+
[ 3.6 0.4]
|
|
1958
|
+
[ 0.5 -3.2]]
|
|
1844
1959
|
"""
|
|
1845
|
-
For details, please refer to :func:`mindspore.ops.reshape`.
|
|
1846
|
-
"""
|
|
1847
|
-
self._init_check()
|
|
1848
1960
|
new_shape = validator.check_reshape_shp(shape)
|
|
1849
1961
|
return tensor_operator_registry.get('reshape')(self, new_shape)
|
|
1850
1962
|
|
|
@@ -1873,7 +1985,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1873
1985
|
[ 3.6 0.4]
|
|
1874
1986
|
[ 0.5 -3.2]]
|
|
1875
1987
|
"""
|
|
1876
|
-
self._init_check()
|
|
1877
1988
|
return tensor_operator_registry.get('reshape')(self, other.shape)
|
|
1878
1989
|
|
|
1879
1990
|
def ravel(self):
|
|
@@ -1883,13 +1994,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1883
1994
|
Returns:
|
|
1884
1995
|
Tensor, a 1-D tensor, containing the same elements of the input.
|
|
1885
1996
|
|
|
1886
|
-
Supported Platforms:
|
|
1887
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1888
|
-
|
|
1889
1997
|
See also:
|
|
1890
|
-
:func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
1998
|
+
- :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
1999
|
+
- :func:`mindspore.Tensor.flatten`: Return a copy of the tensor collapsed into one dimension.
|
|
1891
2000
|
|
|
1892
|
-
|
|
2001
|
+
Supported Platforms:
|
|
2002
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1893
2003
|
|
|
1894
2004
|
Examples:
|
|
1895
2005
|
>>> import numpy as np
|
|
@@ -1899,7 +2009,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1899
2009
|
>>> print(output.shape)
|
|
1900
2010
|
(24,)
|
|
1901
2011
|
"""
|
|
1902
|
-
self._init_check()
|
|
1903
2012
|
reshape_op = tensor_operator_registry.get('reshape')
|
|
1904
2013
|
return reshape_op(self, (-1,))
|
|
1905
2014
|
|
|
@@ -1907,77 +2016,66 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1907
2016
|
"""
|
|
1908
2017
|
For details, please refer to :func:`mindspore.ops.round`.
|
|
1909
2018
|
"""
|
|
1910
|
-
|
|
1911
|
-
return tensor_operator_registry.get('round')()(self)
|
|
2019
|
+
return tensor_operator_registry.get('round')(self)
|
|
1912
2020
|
|
|
1913
2021
|
def roll(self, shifts, dims):
|
|
1914
2022
|
"""
|
|
1915
2023
|
For details, please refer to :func:`mindspore.ops.roll`.
|
|
1916
2024
|
"""
|
|
1917
|
-
self._init_check()
|
|
1918
2025
|
return tensor_operator_registry.get('roll')(shifts, dims)(self)
|
|
1919
2026
|
|
|
1920
2027
|
def rot90(self, k, dims):
|
|
1921
2028
|
r"""
|
|
1922
2029
|
For details, please refer to :func:`mindspore.ops.rot90`.
|
|
1923
2030
|
"""
|
|
1924
|
-
self._init_check()
|
|
1925
2031
|
return tensor_operator_registry.get('rot90')(self, k, dims)
|
|
1926
2032
|
|
|
1927
2033
|
def deg2rad(self):
|
|
1928
2034
|
r"""
|
|
1929
2035
|
For details, please refer to :func:`mindspore.ops.deg2rad`.
|
|
1930
2036
|
"""
|
|
1931
|
-
self._init_check()
|
|
1932
2037
|
return tensor_operator_registry.get('deg2rad')(self)
|
|
1933
2038
|
|
|
1934
2039
|
def dot(self, other):
|
|
1935
2040
|
r"""
|
|
1936
2041
|
For details, please refer to :func:`mindspore.ops.dot`.
|
|
1937
2042
|
"""
|
|
1938
|
-
self._init_check()
|
|
1939
2043
|
return tensor_operator_registry.get('dot')(self, other)
|
|
1940
2044
|
|
|
1941
2045
|
def outer(self, vec2):
|
|
1942
2046
|
r"""
|
|
1943
2047
|
For details, please refer to :func:`mindspore.ops.outer`.
|
|
1944
2048
|
"""
|
|
1945
|
-
self._init_check()
|
|
1946
2049
|
return tensor_operator_registry.get('outer')(self, vec2)
|
|
1947
2050
|
|
|
1948
2051
|
def rad2deg(self):
|
|
1949
2052
|
r"""
|
|
1950
2053
|
For details, please refer to :func:`mindspore.ops.rad2deg`.
|
|
1951
2054
|
"""
|
|
1952
|
-
self._init_check()
|
|
1953
2055
|
return tensor_operator_registry.get('rad2deg')(self)
|
|
1954
2056
|
|
|
1955
2057
|
def copysign(self, other):
|
|
1956
2058
|
r"""
|
|
1957
2059
|
For details, please refer to :func:`mindspore.ops.copysign`.
|
|
1958
2060
|
"""
|
|
1959
|
-
self._init_check()
|
|
1960
2061
|
return tensor_operator_registry.get('copysign')(self, other)
|
|
1961
2062
|
|
|
1962
2063
|
def nelement(self):
|
|
1963
2064
|
r"""
|
|
1964
2065
|
Alias for :func:`mindspore.Tensor.numel`.
|
|
1965
2066
|
"""
|
|
1966
|
-
self._init_check()
|
|
1967
2067
|
return tensor_operator_registry.get('nelement')(self)
|
|
1968
2068
|
|
|
1969
2069
|
def numel(self):
|
|
1970
2070
|
r"""
|
|
1971
2071
|
For details, please refer to :func:`mindspore.ops.numel`.
|
|
1972
2072
|
"""
|
|
1973
|
-
self._init_check()
|
|
1974
2073
|
return tensor_operator_registry.get('numel')(self)
|
|
1975
2074
|
|
|
1976
2075
|
def permute(self, *axis):
|
|
1977
2076
|
"""
|
|
1978
2077
|
For details, please refer to :func:`mindspore.ops.permute`.
|
|
1979
2078
|
"""
|
|
1980
|
-
self._init_check()
|
|
1981
2079
|
perm = validator.check_transpose_axis(axis, self.ndim)
|
|
1982
2080
|
return tensor_operator_registry.get('permute')(self, perm)
|
|
1983
2081
|
|
|
@@ -1985,98 +2083,84 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1985
2083
|
"""
|
|
1986
2084
|
For details, please refer to :func:`mindspore.ops.positive`.
|
|
1987
2085
|
"""
|
|
1988
|
-
self._init_check()
|
|
1989
2086
|
return tensor_operator_registry.get("positive")(self)
|
|
1990
2087
|
|
|
1991
2088
|
def remainder(self, divisor):
|
|
1992
2089
|
r"""
|
|
1993
2090
|
For details, please refer to :func:`mindspore.ops.remainder`.
|
|
1994
2091
|
"""
|
|
1995
|
-
self._init_check()
|
|
1996
2092
|
return tensor_operator_registry.get('remainder')(self, divisor)
|
|
1997
2093
|
|
|
1998
2094
|
def flatten(self, order='C', *, start_dim=0, end_dim=-1):
|
|
1999
2095
|
r"""
|
|
2000
2096
|
For details, please refer to :func:`mindspore.ops.flatten`.
|
|
2001
2097
|
"""
|
|
2002
|
-
self._init_check()
|
|
2003
2098
|
return tensor_operator_registry.get('flatten')(self, order, start_dim=start_dim, end_dim=end_dim)
|
|
2004
2099
|
|
|
2005
2100
|
def float_power(self, other):
|
|
2006
2101
|
r"""
|
|
2007
2102
|
For details, please refer to :func:`mindspore.ops.float_power`.
|
|
2008
2103
|
"""
|
|
2009
|
-
self._init_check()
|
|
2010
2104
|
return tensor_operator_registry.get('float_power')(self, other)
|
|
2011
2105
|
|
|
2012
2106
|
def fmax(self, other):
|
|
2013
2107
|
r"""
|
|
2014
2108
|
For details, please refer to :func:`mindspore.ops.fmax`.
|
|
2015
2109
|
"""
|
|
2016
|
-
self._init_check()
|
|
2017
2110
|
return tensor_operator_registry.get('fmax')(self, other)
|
|
2018
2111
|
|
|
2019
2112
|
def fmin(self, other):
|
|
2020
2113
|
r"""
|
|
2021
2114
|
For details, please refer to :func:`mindspore.ops.fmin`.
|
|
2022
2115
|
"""
|
|
2023
|
-
self._init_check()
|
|
2024
2116
|
return tensor_operator_registry.get('fmin')(self, other)
|
|
2025
2117
|
|
|
2026
2118
|
def fmod(self, other):
|
|
2027
2119
|
r"""
|
|
2028
2120
|
For details, please refer to :func:`mindspore.ops.fmod`.
|
|
2029
2121
|
"""
|
|
2030
|
-
self._init_check()
|
|
2031
2122
|
return tensor_operator_registry.get('fmod')(self, other)
|
|
2032
2123
|
|
|
2033
2124
|
def narrow(self, axis, start, length):
|
|
2034
2125
|
"""
|
|
2035
2126
|
For details, please refer to :func:`mindspore.ops.narrow`.
|
|
2036
2127
|
"""
|
|
2037
|
-
self._init_check()
|
|
2038
2128
|
return tensor_operator_registry.get('narrow')(self, axis, start, length)
|
|
2039
2129
|
|
|
2040
2130
|
def swapaxes(self, axis0, axis1):
|
|
2041
2131
|
"""
|
|
2042
2132
|
For details, please refer to :func:`mindspore.ops.swapaxes`.
|
|
2043
2133
|
"""
|
|
2044
|
-
self._init_check()
|
|
2045
2134
|
return tensor_operator_registry.get('swapaxes')(self, axis0, axis1)
|
|
2046
2135
|
|
|
2047
2136
|
def swapdims(self, dim0, dim1):
|
|
2048
2137
|
"""
|
|
2049
2138
|
For details, please refer to :func:`mindspore.ops.swapdims`.
|
|
2050
2139
|
"""
|
|
2051
|
-
self._init_check()
|
|
2052
2140
|
return tensor_operator_registry.get('swapdims')(self, dim0, dim1)
|
|
2053
2141
|
|
|
2054
2142
|
def squeeze(self, axis=None):
|
|
2055
2143
|
"""
|
|
2056
2144
|
For details, please refer to :func:`mindspore.ops.squeeze`.
|
|
2057
2145
|
"""
|
|
2058
|
-
self._init_check()
|
|
2059
2146
|
return tensor_operator_registry.get('squeeze')(self, axis)
|
|
2060
2147
|
|
|
2061
2148
|
def slogdet(self):
|
|
2062
2149
|
"""
|
|
2063
2150
|
For details, please refer to :func:`mindspore.ops.slogdet`.
|
|
2064
2151
|
"""
|
|
2065
|
-
self._init_check()
|
|
2066
2152
|
return tensor_operator_registry.get('slogdet')(self)
|
|
2067
2153
|
|
|
2068
2154
|
def tril(self, diagonal=0):
|
|
2069
2155
|
"""
|
|
2070
2156
|
For details, please refer to :func:`mindspore.ops.tril`.
|
|
2071
2157
|
"""
|
|
2072
|
-
self._init_check()
|
|
2073
2158
|
return tensor_operator_registry.get('tril')(self, diagonal)
|
|
2074
2159
|
|
|
2075
2160
|
def unsqueeze(self, dim):
|
|
2076
2161
|
"""
|
|
2077
2162
|
For details, please refer to :func:`mindspore.ops.unsqueeze`.
|
|
2078
2163
|
"""
|
|
2079
|
-
self._init_check()
|
|
2080
2164
|
validator.check_is_int(dim, 'dim')
|
|
2081
2165
|
validator.check_int_range(dim, -self.ndim - 1, self.ndim + 1, validator.INC_LEFT, 'dim')
|
|
2082
2166
|
return tensor_operator_registry.get('unsqueeze')(self, dim)
|
|
@@ -2085,7 +2169,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2085
2169
|
"""
|
|
2086
2170
|
For details, please refer to :func:`mindspore.ops.expand_dims`.
|
|
2087
2171
|
"""
|
|
2088
|
-
self._init_check()
|
|
2089
2172
|
validator.check_is_int(axis, 'axis')
|
|
2090
2173
|
validator.check_int_range(axis, -self.ndim - 1, self.ndim + 1, validator.INC_LEFT, 'axis')
|
|
2091
2174
|
return tensor_operator_registry.get('expand_dims')(self, axis)
|
|
@@ -2118,7 +2201,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2118
2201
|
>>> print(x.dtype)
|
|
2119
2202
|
Int32
|
|
2120
2203
|
"""
|
|
2121
|
-
self._init_check()
|
|
2122
2204
|
dtype = _check_astype_and_convert(dtype)
|
|
2123
2205
|
if not copy and dtype == self.dtype:
|
|
2124
2206
|
return self
|
|
@@ -2128,7 +2210,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2128
2210
|
"""
|
|
2129
2211
|
For details, please refer to :func:`mindspore.ops.argmax`.
|
|
2130
2212
|
"""
|
|
2131
|
-
self._init_check()
|
|
2132
2213
|
out = tensor_operator_registry.get('argmax')(self, axis, keepdims)
|
|
2133
2214
|
return out
|
|
2134
2215
|
|
|
@@ -2136,7 +2217,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2136
2217
|
"""
|
|
2137
2218
|
For details, please refer to :func:`mindspore.ops.argmin`.
|
|
2138
2219
|
"""
|
|
2139
|
-
self._init_check()
|
|
2140
2220
|
out = tensor_operator_registry.get('argmin')(self, axis, keepdims)
|
|
2141
2221
|
return out
|
|
2142
2222
|
|
|
@@ -2187,7 +2267,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2187
2267
|
"""
|
|
2188
2268
|
if self.shape == ():
|
|
2189
2269
|
return (self, Tensor(0))
|
|
2190
|
-
self._init_check()
|
|
2191
2270
|
return tensor_operator_registry.get('argmax_with_value')(self, axis, keep_dims)
|
|
2192
2271
|
|
|
2193
2272
|
def argmin_with_value(self, axis=0, keep_dims=False):
|
|
@@ -2235,7 +2314,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2235
2314
|
"""
|
|
2236
2315
|
if self.shape == ():
|
|
2237
2316
|
return (self, Tensor(0))
|
|
2238
|
-
self._init_check()
|
|
2239
2317
|
return tensor_operator_registry.get('argmin_with_value')(self, axis, keep_dims)
|
|
2240
2318
|
|
|
2241
2319
|
def cumsum(self, axis=None, dtype=None):
|
|
@@ -2277,15 +2355,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2277
2355
|
"""
|
|
2278
2356
|
For details, please refer to :func:`mindspore.ops.index_select`.
|
|
2279
2357
|
"""
|
|
2280
|
-
self._init_check()
|
|
2281
2358
|
return tensor_operator_registry.get('index_select')(self, axis, index)
|
|
2282
2359
|
|
|
2283
2360
|
def inplace_update(self, v, indices):
|
|
2284
2361
|
"""
|
|
2285
2362
|
For details, please refer to :func:`mindspore.ops.inplace_update`.
|
|
2286
2363
|
"""
|
|
2287
|
-
|
|
2288
|
-
return tensor_operator_registry.get('inplace_update')()(self, indices, v)
|
|
2364
|
+
return tensor_operator_registry.get('inplace_update')(self, v, indices)
|
|
2289
2365
|
|
|
2290
2366
|
def copy(self):
|
|
2291
2367
|
"""
|
|
@@ -2359,15 +2435,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2359
2435
|
Raises:
|
|
2360
2436
|
TypeError: If arguments have types not specified above.
|
|
2361
2437
|
|
|
2362
|
-
Supported Platforms:
|
|
2363
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2364
|
-
|
|
2365
2438
|
See also:
|
|
2366
|
-
:func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
|
|
2367
|
-
|
|
2368
|
-
:func:`mindspore.Tensor.
|
|
2439
|
+
- :func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
|
|
2440
|
+
- :func:`mindspore.Tensor.argmax`: Return the indices of the maximum values along an axis.
|
|
2441
|
+
- :func:`mindspore.Tensor.min`: Return the minimum of a tensor or minimum along an axis.
|
|
2369
2442
|
|
|
2370
|
-
|
|
2443
|
+
Supported Platforms:
|
|
2444
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2371
2445
|
|
|
2372
2446
|
Examples:
|
|
2373
2447
|
>>> import numpy as np
|
|
@@ -2382,7 +2456,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2382
2456
|
>>> print(indices)
|
|
2383
2457
|
[1 1]
|
|
2384
2458
|
"""
|
|
2385
|
-
self._init_check()
|
|
2386
2459
|
if isinstance(axis, (list, tuple)):
|
|
2387
2460
|
reduce_ = tensor_operator_registry.get("reduce")
|
|
2388
2461
|
reduce_max = tensor_operator_registry.get("reduce_max")
|
|
@@ -2430,15 +2503,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2430
2503
|
Raises:
|
|
2431
2504
|
TypeError: If arguments have types not specified above.
|
|
2432
2505
|
|
|
2433
|
-
Supported Platforms:
|
|
2434
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2435
|
-
|
|
2436
2506
|
See also:
|
|
2437
|
-
:func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
|
|
2438
|
-
|
|
2439
|
-
:func:`mindspore.Tensor.
|
|
2507
|
+
- :func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
|
|
2508
|
+
- :func:`mindspore.Tensor.argmax`: Return the indices of the maximum values along an axis.
|
|
2509
|
+
- :func:`mindspore.Tensor.max`: Return the minimum of a tensor or minimum along an axis.
|
|
2440
2510
|
|
|
2441
|
-
|
|
2511
|
+
Supported Platforms:
|
|
2512
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2442
2513
|
|
|
2443
2514
|
Examples:
|
|
2444
2515
|
>>> import numpy as np
|
|
@@ -2462,12 +2533,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2462
2533
|
>>> print(indices)
|
|
2463
2534
|
[0 0]
|
|
2464
2535
|
"""
|
|
2465
|
-
self._init_check()
|
|
2466
2536
|
if isinstance(axis, (list, tuple)):
|
|
2467
2537
|
reduce_ = tensor_operator_registry.get("reduce")
|
|
2468
2538
|
reduce_min = tensor_operator_registry.get("reduce_min")
|
|
2469
2539
|
minimum = tensor_operator_registry.get("minimum")
|
|
2470
|
-
return reduce_(self, reduce_min(keepdims), cmp_fn=minimum
|
|
2540
|
+
return reduce_(self, reduce_min(keepdims), cmp_fn=minimum, axis=axis, keepdims=keepdims,
|
|
2471
2541
|
initial=initial, where=where)
|
|
2472
2542
|
values, indices = tensor_operator_registry.get("min")(self, axis, keepdims, initial=initial, where=where)
|
|
2473
2543
|
if not return_indices:
|
|
@@ -2478,7 +2548,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2478
2548
|
"""
|
|
2479
2549
|
For details, please refer to :func:`mindspore.ops.scatter_add`.
|
|
2480
2550
|
"""
|
|
2481
|
-
self._init_check()
|
|
2482
2551
|
return tensor_operator_registry.get("tensor_scatter_add")(self, indices, updates)
|
|
2483
2552
|
|
|
2484
2553
|
def scatter_sub(self, indices, updates):
|
|
@@ -2491,7 +2560,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2491
2560
|
|
|
2492
2561
|
The last axis of `indices` is the depth of each index vectors. For each index vector,
|
|
2493
2562
|
there must be a corresponding value in `updates`. The shape of `updates` should be
|
|
2494
|
-
equal to the shape of `self[indices]`. For more details, see
|
|
2563
|
+
equal to the shape of `self[indices]`. For more details, see Examples.
|
|
2495
2564
|
|
|
2496
2565
|
Note:
|
|
2497
2566
|
On GPU, if some values of the `indices` are out of bound, instead of raising an index error,
|
|
@@ -2526,28 +2595,30 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2526
2595
|
[[-3.3000002 0.3 3.6 ]
|
|
2527
2596
|
[ 0.4 0.5 -3.2 ]]
|
|
2528
2597
|
"""
|
|
2529
|
-
self._init_check()
|
|
2530
2598
|
return tensor_operator_registry.get('tensor_scatter_sub')(self, indices, updates)
|
|
2531
2599
|
|
|
2532
2600
|
def scatter_min(self, indices, updates):
|
|
2533
2601
|
"""
|
|
2534
2602
|
For details, please refer to :func:`mindspore.ops.scatter_min`.
|
|
2535
2603
|
"""
|
|
2536
|
-
|
|
2537
|
-
return tensor_operator_registry.get('tensor_scatter_min')()(self, indices, updates)
|
|
2604
|
+
return tensor_operator_registry.get('tensor_scatter_min')(self, indices, updates)
|
|
2538
2605
|
|
|
2539
2606
|
def scatter_max(self, indices, updates):
|
|
2540
2607
|
"""
|
|
2541
2608
|
For details, please refer to :func:`mindspore.ops.scatter_max`.
|
|
2542
2609
|
"""
|
|
2543
|
-
|
|
2544
|
-
|
|
2610
|
+
return tensor_operator_registry.get('tensor_scatter_max')(self, indices, updates)
|
|
2611
|
+
|
|
2612
|
+
def softmax(self, axis, dtype=None):
|
|
2613
|
+
"""
|
|
2614
|
+
For details, please refer to :func:`mindspore.ops.softmax`.
|
|
2615
|
+
"""
|
|
2616
|
+
return tensor_operator_registry.get('softmax')(self, axis, dtype=dtype)
|
|
2545
2617
|
|
|
2546
2618
|
def fill(self, value):
|
|
2547
2619
|
"""
|
|
2548
2620
|
`Tensor.fill` is deprecated, please use `ops.fill` instead.
|
|
2549
2621
|
"""
|
|
2550
|
-
self._init_check()
|
|
2551
2622
|
if value is None:
|
|
2552
2623
|
if self.dtype not in (mstype.float16, mstype.float32, mstype.float64):
|
|
2553
2624
|
raise TypeError("For 'Tensor.fill', if the argument 'value' is None, the type of the original "
|
|
@@ -2560,7 +2631,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2560
2631
|
"""
|
|
2561
2632
|
`Tensor.fills` is deprecated, please use `ops.fill` instead.
|
|
2562
2633
|
"""
|
|
2563
|
-
self._init_check()
|
|
2564
2634
|
return tensor_operator_registry.get('fills')(self, value)
|
|
2565
2635
|
|
|
2566
2636
|
def fill_diagonal(self, fill_value, wrap=False):
|
|
@@ -2602,14 +2672,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2602
2672
|
[5. 1. 1.]
|
|
2603
2673
|
[1. 5. 1.]]
|
|
2604
2674
|
"""
|
|
2605
|
-
self._init_check()
|
|
2606
2675
|
return tensor_operator_registry.get('fill_diagonal')(fill_value, wrap)(self)
|
|
2607
2676
|
|
|
2608
2677
|
def masked_fill(self, mask, value):
|
|
2609
2678
|
"""
|
|
2610
2679
|
For details, please refer to :func:`mindspore.ops.masked_fill`.
|
|
2611
2680
|
"""
|
|
2612
|
-
self._init_check()
|
|
2613
2681
|
if isinstance(value, (float, int)):
|
|
2614
2682
|
value = tensor_operator_registry.get("scalar_to_tensor")(value, self.dtype)
|
|
2615
2683
|
if not isinstance(mask, Tensor):
|
|
@@ -2665,13 +2733,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2665
2733
|
r"""
|
|
2666
2734
|
For details, please refer to :func:`mindspore.ops.minimum`.
|
|
2667
2735
|
"""
|
|
2668
|
-
return tensor_operator_registry.get('minimum')(
|
|
2736
|
+
return tensor_operator_registry.get('minimum')(self, other)
|
|
2669
2737
|
|
|
2670
2738
|
def clamp(self, min=None, max=None):
|
|
2671
2739
|
r"""
|
|
2672
2740
|
For details, please refer to :func:`mindspore.ops.clamp`.
|
|
2673
2741
|
"""
|
|
2674
|
-
self._init_check()
|
|
2675
2742
|
return tensor_operator_registry.get('clamp')(self, min, max)
|
|
2676
2743
|
|
|
2677
2744
|
def clip(self, min=None, max=None):
|
|
@@ -2680,10 +2747,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2680
2747
|
"""
|
|
2681
2748
|
return self.clamp(min, max)
|
|
2682
2749
|
|
|
2683
|
-
def _init_check(self):
|
|
2684
|
-
if self.has_init:
|
|
2685
|
-
self.init_data()
|
|
2686
|
-
|
|
2687
2750
|
def init_data(self, slice_index=None, shape=None, opt_shard_group=None):
|
|
2688
2751
|
"""
|
|
2689
2752
|
Get the tensor format data of this Tensor.
|
|
@@ -2700,7 +2763,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2700
2763
|
opt_shard_group(str): Optimizer shard group which is used in auto or semi auto parallel mode
|
|
2701
2764
|
to get one shard of a parameter's slice. For more information about optimizer parallel, please refer to:
|
|
2702
2765
|
`Optimizer Parallel
|
|
2703
|
-
<https://www.mindspore.cn/tutorials/experts/en/
|
|
2766
|
+
<https://www.mindspore.cn/tutorials/experts/en/master/parallel/optimizer_parallel.html>`_.
|
|
2704
2767
|
Default: ``None``.
|
|
2705
2768
|
|
|
2706
2769
|
Returns:
|
|
@@ -2778,12 +2841,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2778
2841
|
if slice_num_of_persistent_data > 1:
|
|
2779
2842
|
self.assign_value(Tensor_.persistent_data_from_numpy(data, slice_num_of_persistent_data))
|
|
2780
2843
|
else:
|
|
2781
|
-
|
|
2782
|
-
# The dtype of data is np.float32 when mstype is bfloat16,
|
|
2783
|
-
# so we create tensor_ by init func instead of asnumpy
|
|
2784
|
-
self.assign_value(Tensor_(data, self.dtype))
|
|
2785
|
-
else:
|
|
2786
|
-
self.assign_value(Tensor_.from_numpy(data))
|
|
2844
|
+
self.assign_value(Tensor_.from_numpy(data))
|
|
2787
2845
|
return self
|
|
2788
2846
|
|
|
2789
2847
|
def resize(self, *new_shape):
|
|
@@ -2805,13 +2863,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2805
2863
|
Returns:
|
|
2806
2864
|
Tensor.
|
|
2807
2865
|
|
|
2808
|
-
Supported Platforms:
|
|
2809
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2810
|
-
|
|
2811
2866
|
See also:
|
|
2812
|
-
:func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
2867
|
+
- :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
2868
|
+
- :func:`mindspore.Tensor.repeat`: Repeat elements of a tensor.
|
|
2813
2869
|
|
|
2814
|
-
|
|
2870
|
+
Supported Platforms:
|
|
2871
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2815
2872
|
|
|
2816
2873
|
Examples:
|
|
2817
2874
|
>>> import numpy as np
|
|
@@ -2838,7 +2895,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2838
2895
|
diff_size = new_size - cur_size
|
|
2839
2896
|
if diff_size > 0:
|
|
2840
2897
|
pad_val = tensor_operator_registry.get('fill')(self.dtype, (diff_size,), 0)
|
|
2841
|
-
res = tensor_operator_registry.get('concatenate')(
|
|
2898
|
+
res = tensor_operator_registry.get('concatenate')((flattened, pad_val), 0)
|
|
2842
2899
|
else:
|
|
2843
2900
|
res = flattened[:new_size]
|
|
2844
2901
|
return res.reshape(new_shape)
|
|
@@ -2847,70 +2904,60 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2847
2904
|
r"""
|
|
2848
2905
|
For details, please refer to :func:`mindspore.ops.det`.
|
|
2849
2906
|
"""
|
|
2850
|
-
self._init_check()
|
|
2851
2907
|
return tensor_operator_registry.get('det')(self)
|
|
2852
2908
|
|
|
2853
2909
|
def diff(self, n=1, axis=-1, prepend=None, append=None):
|
|
2854
2910
|
r"""
|
|
2855
2911
|
For details, please refer to :func:`mindspore.ops.diff`.
|
|
2856
2912
|
"""
|
|
2857
|
-
self._init_check()
|
|
2858
2913
|
return tensor_operator_registry.get('diff')(self, n, axis, prepend, append)
|
|
2859
2914
|
|
|
2860
2915
|
def frac(self):
|
|
2861
2916
|
r"""
|
|
2862
2917
|
For details, please refer to :func:`mindspore.ops.frac`.
|
|
2863
2918
|
"""
|
|
2864
|
-
self._init_check()
|
|
2865
2919
|
return tensor_operator_registry.get('frac')(self)
|
|
2866
2920
|
|
|
2867
2921
|
def argwhere(self):
|
|
2868
2922
|
r"""
|
|
2869
2923
|
For details, please refer to :func:`mindspore.ops.argwhere`.
|
|
2870
2924
|
"""
|
|
2871
|
-
self._init_check()
|
|
2872
2925
|
return tensor_operator_registry.get('argwhere')(self)
|
|
2873
2926
|
|
|
2874
2927
|
def moveaxis(self, source, destination):
|
|
2875
2928
|
r"""
|
|
2876
2929
|
For details, please refer to :func:`mindspore.ops.moveaxis`.
|
|
2877
2930
|
"""
|
|
2878
|
-
self._init_check()
|
|
2879
2931
|
return tensor_operator_registry.get('moveaxis')(self, source, destination)
|
|
2880
2932
|
|
|
2881
2933
|
def movedim(self, source, destination):
|
|
2882
2934
|
r"""
|
|
2883
2935
|
For details, please refer to :func:`mindspore.ops.movedim`.
|
|
2884
2936
|
"""
|
|
2885
|
-
self._init_check()
|
|
2886
2937
|
return tensor_operator_registry.get('movedim')(self, source, destination)
|
|
2887
2938
|
|
|
2888
2939
|
def digamma(self):
|
|
2889
2940
|
r"""
|
|
2890
2941
|
For details, please refer to :func:`mindspore.ops.digamma`.
|
|
2891
2942
|
"""
|
|
2892
|
-
self._init_check()
|
|
2893
2943
|
return tensor_operator_registry.get('digamma')(self)
|
|
2894
2944
|
|
|
2895
2945
|
def lgamma(self):
|
|
2896
2946
|
r"""
|
|
2897
2947
|
For details, please refer to :func:`mindspore.ops.lgamma`.
|
|
2898
2948
|
"""
|
|
2899
|
-
self._init_check()
|
|
2900
2949
|
return tensor_operator_registry.get('lgamma')(self)
|
|
2901
2950
|
|
|
2902
2951
|
def diagonal(self, offset=0, axis1=0, axis2=1):
|
|
2903
2952
|
"""
|
|
2904
2953
|
For details, please refer to :func:`mindspore.ops.diagonal`.
|
|
2905
2954
|
"""
|
|
2906
|
-
self._init_check()
|
|
2907
2955
|
return tensor_operator_registry.get('diagonal')(self, offset, axis1, axis2)
|
|
2908
2956
|
|
|
2909
2957
|
def diagonal_scatter(self, src, offset=0, dim1=0, dim2=1):
|
|
2910
2958
|
r"""
|
|
2911
2959
|
For details, please refer to :func:`mindspore.ops.diagonal_scatter`.
|
|
2912
2960
|
"""
|
|
2913
|
-
self._init_check()
|
|
2914
2961
|
return tensor_operator_registry.get('diagonal_scatter')(self, src, offset, dim1, dim2)
|
|
2915
2962
|
|
|
2916
2963
|
def trace(self, offset=0, axis1=0, axis2=1, dtype=None):
|
|
@@ -2935,12 +2982,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2935
2982
|
Raises:
|
|
2936
2983
|
ValueError: If the input tensor has less than two dimensions.
|
|
2937
2984
|
|
|
2985
|
+
See also:
|
|
2986
|
+
- :func:`mindspore.Tensor.diagonal`: Return specified diagonals.
|
|
2987
|
+
|
|
2938
2988
|
Supported Platforms:
|
|
2939
2989
|
``Ascend`` ``GPU`` ``CPU``
|
|
2940
2990
|
|
|
2941
|
-
See also:
|
|
2942
|
-
:func:`mindspore.Tensor.diagonal`: Return specified diagonals.
|
|
2943
|
-
|
|
2944
2991
|
Examples:
|
|
2945
2992
|
>>> import numpy as np
|
|
2946
2993
|
>>> from mindspore import Tensor
|
|
@@ -2949,7 +2996,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2949
2996
|
3.0
|
|
2950
2997
|
"""
|
|
2951
2998
|
if offset == 0 and axis1 == 0 and axis2 == 1 and dtype is None:
|
|
2952
|
-
self._init_check()
|
|
2953
2999
|
return tensor_operator_registry.get('trace')(self)
|
|
2954
3000
|
d = self.diagonal(offset, axis1=axis1, axis2=axis2)
|
|
2955
3001
|
shape = d.shape
|
|
@@ -3022,7 +3068,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3022
3068
|
shape_indices = tuple(size_indices if i == axis else 1 for i in range(ndim))
|
|
3023
3069
|
indices = indices.reshape(shape_indices)
|
|
3024
3070
|
shape_indices = shape_ni + (indices.size,) + shape_nk
|
|
3025
|
-
indices = tensor_operator_registry.get('broadcast_to')(shape_indices)
|
|
3071
|
+
indices = tensor_operator_registry.get('broadcast_to')(indices, shape_indices)
|
|
3026
3072
|
|
|
3027
3073
|
res = tensor_operator_registry.get('gather_d')(a, axis, indices)
|
|
3028
3074
|
return res.reshape(shape_out)
|
|
@@ -3067,7 +3113,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3067
3113
|
"""
|
|
3068
3114
|
if isinstance(choices, Tensor):
|
|
3069
3115
|
shape_choice = validator.infer_out_shape(self.shape, choices.shape[1:])
|
|
3070
|
-
choices = tensor_operator_registry.get('broadcast_to')((choices.shape[0],) + shape_choice)
|
|
3116
|
+
choices = tensor_operator_registry.get('broadcast_to')(choices, (choices.shape[0],) + shape_choice)
|
|
3071
3117
|
else:
|
|
3072
3118
|
# broadcasts choices to the same shape if choices is a sequence
|
|
3073
3119
|
choicelist = []
|
|
@@ -3080,14 +3126,14 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3080
3126
|
shape_choice = validator.infer_out_shape(self.shape, *shapes)
|
|
3081
3127
|
tmp = []
|
|
3082
3128
|
for choice in choicelist:
|
|
3083
|
-
tmp.append(tensor_operator_registry.get('broadcast_to')(shape_choice)
|
|
3129
|
+
tmp.append(tensor_operator_registry.get('broadcast_to')(choice, shape_choice))
|
|
3084
3130
|
choices = tensor_operator_registry.get('stack')(tmp, 0)
|
|
3085
3131
|
|
|
3086
3132
|
if self.ndim == 0 or choices.ndim == 0:
|
|
3087
3133
|
raise ValueError(f"For 'Tensor.choose', the original tensor and the argument 'choices' cannot be scalars."
|
|
3088
3134
|
f" Their dimensions should all be > 0, but got the original tensor's dimension "
|
|
3089
3135
|
f"{self.ndim}, 'choices' dimension {choices.ndim}.")
|
|
3090
|
-
a = tensor_operator_registry.get('broadcast_to')(shape_choice)
|
|
3136
|
+
a = tensor_operator_registry.get('broadcast_to')(self, shape_choice)
|
|
3091
3137
|
dtype = choices.dtype
|
|
3092
3138
|
# adjusts dtype for F.tensor_mul and F.gather_nd
|
|
3093
3139
|
a = a.astype(mstype.int32)
|
|
@@ -3099,10 +3145,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3099
3145
|
for i in range(ndim):
|
|
3100
3146
|
dim_grid = Tensor(list(range(a.shape[i])), mstype.int32)
|
|
3101
3147
|
dim_shape = validator.expanded_shape(ndim, a.shape[i], i)
|
|
3102
|
-
dim_grid = tensor_operator_registry.get('broadcast_to')(
|
|
3148
|
+
dim_grid = tensor_operator_registry.get('broadcast_to')(dim_grid.reshape(dim_shape), a.shape)
|
|
3103
3149
|
grids.append(dim_grid)
|
|
3104
3150
|
grid = tensor_operator_registry.get('stack')(grids, -1)
|
|
3105
|
-
indices = tensor_operator_registry.get('concatenate')(
|
|
3151
|
+
indices = tensor_operator_registry.get('concatenate')((a.reshape(a.shape + (1,)), grid), -1)
|
|
3106
3152
|
return tensor_operator_registry.get('gather_nd')(choices, indices).astype(dtype)
|
|
3107
3153
|
|
|
3108
3154
|
def searchsorted(self, v, side='left', sorter=None):
|
|
@@ -3115,9 +3161,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3115
3161
|
location found is given. If 'right', return the last such index. If there is
|
|
3116
3162
|
no suitable index, return either 0 or N (where N is the length of the tensor).
|
|
3117
3163
|
Default: ``left`` .
|
|
3118
|
-
sorter (Union[int,
|
|
3119
|
-
integer indices that sort the tensor into ascending order
|
|
3120
|
-
the result of argsort. Default: ``None`` .
|
|
3164
|
+
sorter (Union[int, list, tuple, Tensor]): optional tensor of
|
|
3165
|
+
integer indices that sort the tensor into ascending order on the innermost dimension
|
|
3166
|
+
and the type must be int64. They are typically the result of argsort. Default: ``None`` .
|
|
3121
3167
|
|
|
3122
3168
|
Returns:
|
|
3123
3169
|
Tensor, array of insertion points with the same shape as `v`.
|
|
@@ -3138,37 +3184,26 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3138
3184
|
if side not in ('left', 'right'):
|
|
3139
3185
|
raise ValueError(f"For 'Tensor.searchsorted', the argument 'side' should be one of in "
|
|
3140
3186
|
f"['left', 'right'], but got {side}.")
|
|
3141
|
-
a = self.astype(mstype.float32)
|
|
3142
3187
|
if not isinstance(v, Tensor):
|
|
3143
3188
|
v = tensor_operator_registry.get('make_tensor')(v)
|
|
3144
|
-
shape = v.shape
|
|
3145
3189
|
if sorter is not None:
|
|
3146
|
-
if not isinstance(sorter, (int,
|
|
3190
|
+
if not isinstance(sorter, (int, list, tuple, Tensor)):
|
|
3147
3191
|
raise TypeError("For Tensor.searchsorted, the type of the argument 'sorter' must be one of 'int', "
|
|
3148
|
-
"'
|
|
3192
|
+
"'list', 'tuple', 'Tensor', but got {}.".format(type(sorter)))
|
|
3149
3193
|
if not isinstance(sorter, Tensor):
|
|
3150
3194
|
sorter = tensor_operator_registry.get('make_tensor')(sorter)
|
|
3151
|
-
if sorter.
|
|
3152
|
-
raise ValueError('sorter must be
|
|
3153
|
-
|
|
3154
|
-
|
|
3155
|
-
|
|
3156
|
-
|
|
3157
|
-
|
|
3158
|
-
|
|
3159
|
-
sort_range = tuple(range(math.ceil(math.log2(tensor_operator_registry.get('shape_mul')(a.shape) + 1))))
|
|
3160
|
-
for _ in sort_range:
|
|
3161
|
-
mid = (i - -j) // 2
|
|
3162
|
-
mask = less_op(v, tensor_operator_registry.get('gather_nd')(a, mid.reshape(mid.shape + (1,))))
|
|
3163
|
-
i = tensor_operator_registry.get('select')(mask, i, mid)
|
|
3164
|
-
j = tensor_operator_registry.get('select')(mask, mid, j)
|
|
3165
|
-
return j
|
|
3195
|
+
if sorter.size != self.size:
|
|
3196
|
+
raise ValueError('The size of sorter must be the same as the Tensor')
|
|
3197
|
+
|
|
3198
|
+
dtype = mstype.int32
|
|
3199
|
+
right = (side == 'right')
|
|
3200
|
+
search_sorted_ = tensor_operator_registry.get('searchsorted')(dtype, right)
|
|
3201
|
+
return search_sorted_(self, v, sorter)
|
|
3166
3202
|
|
|
3167
3203
|
def gather_nd(self, indices):
|
|
3168
3204
|
r"""
|
|
3169
3205
|
For details, please refer to :func:`mindspore.ops.gather_nd`.
|
|
3170
3206
|
"""
|
|
3171
|
-
self._init_check()
|
|
3172
3207
|
validator.check_value_type('indices', indices, (Tensor, Tensor_,), 'Tensor.gather_nd')
|
|
3173
3208
|
return tensor_operator_registry.get('gather_nd')(self, indices)
|
|
3174
3209
|
|
|
@@ -3176,11 +3211,39 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3176
3211
|
r"""
|
|
3177
3212
|
For details, please refer to :func:`mindspore.ops.gather`.
|
|
3178
3213
|
"""
|
|
3179
|
-
self._init_check()
|
|
3180
3214
|
validator.check_is_int(axis, 'axis')
|
|
3181
3215
|
validator.check_is_int(batch_dims, "batch_dims")
|
|
3182
3216
|
return tensor_operator_registry.get('gather')(self, input_indices, axis, batch_dims)
|
|
3183
3217
|
|
|
3218
|
+
def uniform(self, from_=0., to=1., generator=None):
|
|
3219
|
+
r"""
|
|
3220
|
+
Generates random numbers in the half-open interval [from_, to).
|
|
3221
|
+
|
|
3222
|
+
Args:
|
|
3223
|
+
from_ (number): The lower bound of the interval.
|
|
3224
|
+
to (number): The upper bound of the interval.
|
|
3225
|
+
generator (Generator, optional): The random seed. Default: None.
|
|
3226
|
+
|
|
3227
|
+
Returns:
|
|
3228
|
+
Tensor, with the same shape as tensor.
|
|
3229
|
+
|
|
3230
|
+
Raises:
|
|
3231
|
+
TypeError: If `from_` is larger than `to`.
|
|
3232
|
+
|
|
3233
|
+
Supported Platforms:
|
|
3234
|
+
``Ascend``
|
|
3235
|
+
|
|
3236
|
+
Examples:
|
|
3237
|
+
>>> import mindspore
|
|
3238
|
+
>>> x = mindspore.ops.ones((4, 2))
|
|
3239
|
+
>>> generator = mindspore.Generator()
|
|
3240
|
+
>>> generator.manual_seed(100)
|
|
3241
|
+
>>> output = x.uniform(1., 2., generator)
|
|
3242
|
+
>>> print(output.shape)
|
|
3243
|
+
(4, 2)
|
|
3244
|
+
"""
|
|
3245
|
+
return tensor_operator_registry.get('uniform')(self, from_, to, generator)
|
|
3246
|
+
|
|
3184
3247
|
def var(self, axis=None, ddof=0, keepdims=False):
|
|
3185
3248
|
"""
|
|
3186
3249
|
Compute the variance along the specified axis.
|
|
@@ -3204,13 +3267,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3204
3267
|
Returns:
|
|
3205
3268
|
Variance tensor.
|
|
3206
3269
|
|
|
3207
|
-
Supported Platforms:
|
|
3208
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3209
|
-
|
|
3210
3270
|
See also:
|
|
3211
|
-
:func:`mindspore.Tensor.mean`: Reduce a dimension of a tensor by averaging all elements in the dimension.
|
|
3271
|
+
- :func:`mindspore.Tensor.mean`: Reduce a dimension of a tensor by averaging all elements in the dimension.
|
|
3272
|
+
- :func:`mindspore.Tensor.std`: Compute the standard deviation along the specified axis.
|
|
3212
3273
|
|
|
3213
|
-
|
|
3274
|
+
Supported Platforms:
|
|
3275
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
3214
3276
|
|
|
3215
3277
|
Examples:
|
|
3216
3278
|
>>> import numpy as np
|
|
@@ -3257,40 +3319,40 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3257
3319
|
Return sum of tensor elements over a given axis.
|
|
3258
3320
|
|
|
3259
3321
|
Note:
|
|
3260
|
-
Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and
|
|
3261
|
-
`
|
|
3322
|
+
Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are not supported.
|
|
3323
|
+
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
3262
3324
|
|
|
3263
3325
|
Args:
|
|
3264
|
-
axis (Union[None, int, tuple(int), list(int)]): Axis or axes along which a sum is performed.
|
|
3326
|
+
axis (Union[None, int, tuple(int), list(int), Tensor]): Axis or axes along which a sum is performed.
|
|
3265
3327
|
Default: ``None`` .
|
|
3266
|
-
If None, sum all the elements of the input tensor.
|
|
3267
|
-
If the axis is negative, it counts from the last to the first axis
|
|
3268
|
-
If the axis is a tuple or list of ints, a sum is performed on all the axes specified in the tuple
|
|
3269
|
-
or list instead of a single axis or all the axes as before.
|
|
3328
|
+
If ``None`` , sum all the elements of the input tensor.
|
|
3329
|
+
If the `axis` is negative, it counts from the last to the first `axis`.
|
|
3330
|
+
If the `axis` is a tuple or list of ints, a sum is performed on all the axes specified in the tuple
|
|
3331
|
+
or list instead of a single `axis` or all the axes as before.
|
|
3270
3332
|
dtype (:class:`mindspore.dtype`, optional): defaults to ``None`` . Overrides the dtype of the
|
|
3271
3333
|
output Tensor.
|
|
3272
3334
|
keepdims (bool): If this is set to ``True`` , the axes which are reduced are left in the result as
|
|
3273
3335
|
dimensions with size one. With this option, the result will broadcast correctly against the input
|
|
3274
|
-
array. If the default value is passed, then keepdims will not be passed through to the sum method
|
|
3336
|
+
array. If the default value is passed, then `keepdims` will not be passed through to the sum method
|
|
3275
3337
|
of sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
|
|
3276
|
-
implement keepdims any exceptions will be raised. Default: ``False`` .
|
|
3338
|
+
implement `keepdims` any exceptions will be raised. Default: ``False`` .
|
|
3277
3339
|
initial (scalar): Starting value for the sum. Default: ``None`` .
|
|
3278
3340
|
|
|
3279
3341
|
Returns:
|
|
3280
|
-
Tensor. A tensor with the same shape as input, with the specified axis removed.
|
|
3281
|
-
If the input tensor is a 0-d array, or if the axis is ``None`` , a scalar is returned.
|
|
3342
|
+
Tensor. A tensor with the same shape as input, with the specified `axis` removed.
|
|
3343
|
+
If the input tensor is a 0-d array, or if the `axis` is ``None`` , a scalar is returned.
|
|
3282
3344
|
|
|
3283
3345
|
Raises:
|
|
3284
|
-
TypeError: If input is not array_like, or `axis` is not int, tuple of ints
|
|
3346
|
+
TypeError: If input is not array_like, or `axis` is not int, tuple of ints, list of ints or Tensor,
|
|
3285
3347
|
or `keepdims` is not integer, or `initial` is not scalar.
|
|
3286
|
-
ValueError: If any axis is out of range or duplicate axes exist.
|
|
3348
|
+
ValueError: If any `axis` is out of range or duplicate axes exist.
|
|
3349
|
+
|
|
3350
|
+
See also:
|
|
3351
|
+
- :func:`mindspore.Tensor.cumsum`: Return the cumulative sum of the elements along a given `axis`.
|
|
3287
3352
|
|
|
3288
3353
|
Supported Platforms:
|
|
3289
3354
|
``Ascend`` ``GPU`` ``CPU``
|
|
3290
3355
|
|
|
3291
|
-
See also:
|
|
3292
|
-
:func:`mindspore.Tensor.cumsum`: Return the cumulative sum of the elements along a given axis.
|
|
3293
|
-
|
|
3294
3356
|
Examples:
|
|
3295
3357
|
>>> import numpy as np
|
|
3296
3358
|
>>> from mindspore import Tensor
|
|
@@ -3301,13 +3363,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3301
3363
|
>>> print(input_x.sum(axis=1))
|
|
3302
3364
|
[10. 35.]
|
|
3303
3365
|
"""
|
|
3304
|
-
if initial is
|
|
3305
|
-
|
|
3306
|
-
|
|
3307
|
-
|
|
3308
|
-
|
|
3309
|
-
|
|
3310
|
-
res = res.astype(dtype)
|
|
3366
|
+
if initial is None:
|
|
3367
|
+
res = tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype)
|
|
3368
|
+
else:
|
|
3369
|
+
res = tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype) + initial
|
|
3370
|
+
if dtype is not None and (dtype == mstype.bool_):
|
|
3371
|
+
res = res.astype(mstype.bool_)
|
|
3311
3372
|
return res
|
|
3312
3373
|
|
|
3313
3374
|
def sum_to_size(self, *size):
|
|
@@ -3335,7 +3396,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3335
3396
|
>>> print(output.shape)
|
|
3336
3397
|
(1, 3, 1, 3)
|
|
3337
3398
|
"""
|
|
3338
|
-
self._init_check()
|
|
3339
3399
|
x = self
|
|
3340
3400
|
if len(size) == 1 and isinstance(size[0], tuple):
|
|
3341
3401
|
size = size[0]
|
|
@@ -3359,21 +3419,18 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3359
3419
|
"""
|
|
3360
3420
|
For details, please refer to :func:`mindspore.ops.nansum`.
|
|
3361
3421
|
"""
|
|
3362
|
-
self._init_check()
|
|
3363
3422
|
return tensor_operator_registry.get('nansum')(self, axis=axis, keepdims=keepdims, dtype=dtype)
|
|
3364
3423
|
|
|
3365
3424
|
def nanmean(self, axis=None, keepdims=False, *, dtype=None):
|
|
3366
3425
|
r"""
|
|
3367
3426
|
For details, please refer to :func:`mindspore.ops.nanmean`.
|
|
3368
3427
|
"""
|
|
3369
|
-
self._init_check()
|
|
3370
3428
|
return tensor_operator_registry.get('nanmean')(self, axis, keepdims, dtype=dtype)
|
|
3371
3429
|
|
|
3372
3430
|
def nanmedian(self, axis=-1, keepdims=False):
|
|
3373
3431
|
r"""
|
|
3374
3432
|
For details, please refer to :func:`mindspore.ops.nanmedian`.
|
|
3375
3433
|
"""
|
|
3376
|
-
self._init_check()
|
|
3377
3434
|
return tensor_operator_registry.get('nanmedian')(self, axis, keepdims)
|
|
3378
3435
|
|
|
3379
3436
|
def repeat(self, repeats, axis=None):
|
|
@@ -3393,13 +3450,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3393
3450
|
ValueError: If the axis is out of range.
|
|
3394
3451
|
TypeError: If arguments have types not specified above.
|
|
3395
3452
|
|
|
3396
|
-
Supported Platforms:
|
|
3397
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3398
|
-
|
|
3399
3453
|
See also:
|
|
3400
|
-
:func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
3454
|
+
- :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
3455
|
+
- :func:`mindspore.Tensor.resize`: Changes shape and size of tensor in-place.
|
|
3401
3456
|
|
|
3402
|
-
|
|
3457
|
+
Supported Platforms:
|
|
3458
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
3403
3459
|
|
|
3404
3460
|
Examples:
|
|
3405
3461
|
>>> import numpy as np
|
|
@@ -3448,27 +3504,24 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3448
3504
|
for sub, rep in zip(subs, repeats):
|
|
3449
3505
|
if rep != 0:
|
|
3450
3506
|
repeated_subs.append(tensor_operator_registry.get('repeat_elements')(sub, rep, axis))
|
|
3451
|
-
return tensor_operator_registry.get('concatenate')(axis)
|
|
3507
|
+
return tensor_operator_registry.get('concatenate')(repeated_subs, axis)
|
|
3452
3508
|
|
|
3453
3509
|
def repeat_interleave(self, repeats, dim=None):
|
|
3454
3510
|
"""
|
|
3455
3511
|
For details, please refer to :func:`mindspore.ops.repeat_interleave`.
|
|
3456
3512
|
"""
|
|
3457
|
-
self._init_check()
|
|
3458
3513
|
return tensor_operator_registry.get('repeat_interleave')(self, repeats, dim)
|
|
3459
3514
|
|
|
3460
3515
|
def bernoulli(self, p=0.5, seed=None):
|
|
3461
3516
|
r"""
|
|
3462
3517
|
For details, please refer to :func:`mindspore.ops.bernoulli`.
|
|
3463
3518
|
"""
|
|
3464
|
-
self._init_check()
|
|
3465
3519
|
return tensor_operator_registry.get('bernoulli')(self, p, seed)
|
|
3466
3520
|
|
|
3467
3521
|
def random_categorical(self, num_sample, seed=0, dtype=mstype.int64):
|
|
3468
3522
|
r"""
|
|
3469
3523
|
For details, please refer to :func:`mindspore.ops.random_categorical`.
|
|
3470
3524
|
"""
|
|
3471
|
-
self._init_check()
|
|
3472
3525
|
validator.check_is_int(num_sample, 'num_sample')
|
|
3473
3526
|
validator.check_is_int(seed, 'seed')
|
|
3474
3527
|
return tensor_operator_registry.get('random_categorical')(self, num_sample, seed, dtype)
|
|
@@ -3477,23 +3530,20 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3477
3530
|
"""
|
|
3478
3531
|
For details, please refer to :func:`mindspore.ops.masked_select`.
|
|
3479
3532
|
"""
|
|
3480
|
-
self._init_check()
|
|
3481
3533
|
return tensor_operator_registry.get('masked_select')(self, mask)
|
|
3482
3534
|
|
|
3483
3535
|
def gather_elements(self, dim, index):
|
|
3484
3536
|
"""
|
|
3485
3537
|
For details, please refer to :func:`mindspore.ops.gather_elements`.
|
|
3486
3538
|
"""
|
|
3487
|
-
self._init_check()
|
|
3488
3539
|
validator.check_value_type('index', index, (Tensor, Tensor_,), 'Tensor.gather_elements')
|
|
3489
3540
|
return tensor_operator_registry.get('gather_elements')(self, dim, index)
|
|
3490
3541
|
|
|
3491
|
-
def nonzero(self):
|
|
3542
|
+
def nonzero(self, as_tuple=False):
|
|
3492
3543
|
"""
|
|
3493
3544
|
For details, please refer to :func:`mindspore.ops.nonzero`.
|
|
3494
3545
|
"""
|
|
3495
|
-
|
|
3496
|
-
return tensor_operator_registry.get('nonzero')(self)
|
|
3546
|
+
return tensor_operator_registry.get('nonzero')(self, as_tuple)
|
|
3497
3547
|
|
|
3498
3548
|
def svd(self, full_matrices=False, compute_uv=True):
|
|
3499
3549
|
"""
|
|
@@ -3510,42 +3560,36 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3510
3560
|
r"""
|
|
3511
3561
|
For details, please refer to :func:`mindspore.ops.hardshrink`.
|
|
3512
3562
|
"""
|
|
3513
|
-
|
|
3514
|
-
return tensor_operator_registry.get('hardshrink')(lambd)(self)
|
|
3563
|
+
return tensor_operator_registry.get('hardshrink')(self, lambd)
|
|
3515
3564
|
|
|
3516
3565
|
def heaviside(self, values):
|
|
3517
3566
|
r"""
|
|
3518
3567
|
For details, please refer to :func:`mindspore.ops.heaviside`.
|
|
3519
3568
|
"""
|
|
3520
|
-
self._init_check()
|
|
3521
3569
|
return tensor_operator_registry.get('heaviside')(self, values)
|
|
3522
3570
|
|
|
3523
3571
|
def hypot(self, other):
|
|
3524
3572
|
r"""
|
|
3525
3573
|
For details, please refer to :func:`mindspore.ops.hypot`.
|
|
3526
3574
|
"""
|
|
3527
|
-
self._init_check()
|
|
3528
3575
|
return tensor_operator_registry.get('hypot')(self, other)
|
|
3529
3576
|
|
|
3530
3577
|
def soft_shrink(self, lambd=0.5):
|
|
3531
3578
|
r"""
|
|
3532
3579
|
For details, please refer to :func:`mindspore.ops.soft_shrink`.
|
|
3533
3580
|
"""
|
|
3534
|
-
self._init_check()
|
|
3535
3581
|
return tensor_operator_registry.get('soft_shrink')(self, lambd)
|
|
3536
3582
|
|
|
3537
3583
|
def matrix_determinant(self):
|
|
3538
3584
|
r"""
|
|
3539
3585
|
For details, please refer to :func:`mindspore.ops.matrix_determinant`.
|
|
3540
3586
|
"""
|
|
3541
|
-
self._init_check()
|
|
3542
3587
|
return tensor_operator_registry.get('matrix_determinant')(self)
|
|
3543
3588
|
|
|
3544
3589
|
def log_matrix_determinant(self):
|
|
3545
3590
|
r"""
|
|
3546
3591
|
For details, please refer to :func:`mindspore.ops.log_matrix_determinant`.
|
|
3547
3592
|
"""
|
|
3548
|
-
self._init_check()
|
|
3549
3593
|
return tensor_operator_registry.get('log_matrix_determinant')(self)
|
|
3550
3594
|
|
|
3551
3595
|
def to_coo(self):
|
|
@@ -3579,7 +3623,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3579
3623
|
[1 0]] [ 1. -5.] (2, 2)
|
|
3580
3624
|
|
|
3581
3625
|
"""
|
|
3582
|
-
self._init_check()
|
|
3583
3626
|
return tensor_operator_registry.get('dense_to_sparse_coo')(self)
|
|
3584
3627
|
|
|
3585
3628
|
def to_csr(self):
|
|
@@ -3612,7 +3655,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3612
3655
|
>>> print(output.indptr, output.indices, output.values, output.shape)
|
|
3613
3656
|
[0 1 2] [0 0] [ 1. -5.] (2, 2)
|
|
3614
3657
|
"""
|
|
3615
|
-
self._init_check()
|
|
3616
3658
|
return tensor_operator_registry.get('dense_to_sparse_csr')(self)
|
|
3617
3659
|
|
|
3618
3660
|
def tolist(self):
|
|
@@ -3635,42 +3677,36 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3635
3677
|
>>> print(out2)
|
|
3636
3678
|
1
|
|
3637
3679
|
"""
|
|
3638
|
-
self._init_check()
|
|
3639
3680
|
return self.asnumpy().tolist()
|
|
3640
3681
|
|
|
3641
3682
|
def unbind(self, dim=0):
|
|
3642
3683
|
r"""
|
|
3643
3684
|
For details, please refer to :func:`mindspore.ops.unbind`.
|
|
3644
3685
|
"""
|
|
3645
|
-
|
|
3646
|
-
return tensor_operator_registry.get('unbind')(dim)(self)
|
|
3686
|
+
return tensor_operator_registry.get('unbind')(self, dim)
|
|
3647
3687
|
|
|
3648
3688
|
def unsorted_segment_min(self, segment_ids, num_segments):
|
|
3649
3689
|
r"""
|
|
3650
3690
|
For details, please refer to :func:`mindspore.ops.unsorted_segment_min`.
|
|
3651
3691
|
"""
|
|
3652
|
-
self._init_check()
|
|
3653
3692
|
return tensor_operator_registry.get('unsorted_segment_min')(self, segment_ids, num_segments)
|
|
3654
3693
|
|
|
3655
3694
|
def unsorted_segment_max(self, segment_ids, num_segments):
|
|
3656
3695
|
r"""
|
|
3657
3696
|
For details, please refer to :func:`mindspore.ops.unsorted_segment_max`.
|
|
3658
3697
|
"""
|
|
3659
|
-
self._init_check()
|
|
3660
3698
|
return tensor_operator_registry.get('unsorted_segment_max')(self, segment_ids, num_segments)
|
|
3661
3699
|
|
|
3662
3700
|
def unsorted_segment_prod(self, segment_ids, num_segments):
|
|
3663
3701
|
r"""
|
|
3664
3702
|
For details, please refer to :func:`mindspore.ops.unsorted_segment_prod`.
|
|
3665
3703
|
"""
|
|
3666
|
-
self._init_check()
|
|
3667
3704
|
return tensor_operator_registry.get('unsorted_segment_prod')(self, segment_ids, num_segments)
|
|
3668
3705
|
|
|
3669
3706
|
def unique_consecutive(self, return_idx=False, return_counts=False, axis=None):
|
|
3670
3707
|
"""
|
|
3671
3708
|
For details, please refer to :func:`mindspore.ops.unique_consecutive`.
|
|
3672
3709
|
"""
|
|
3673
|
-
self._init_check()
|
|
3674
3710
|
output, idx, counts = tensor_operator_registry.get("unique_consecutive")(return_idx, return_counts, axis)(self)
|
|
3675
3711
|
if return_idx and return_counts:
|
|
3676
3712
|
return output, idx, counts
|
|
@@ -3684,29 +3720,25 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3684
3720
|
"""
|
|
3685
3721
|
For details, please refer to :func:`mindspore.ops.unique_with_pad`.
|
|
3686
3722
|
"""
|
|
3687
|
-
|
|
3688
|
-
return tensor_operator_registry.get("unique_with_pad")()(self, pad_num)
|
|
3723
|
+
return tensor_operator_registry.get("unique_with_pad")(self, pad_num)
|
|
3689
3724
|
|
|
3690
3725
|
def diag(self):
|
|
3691
3726
|
r"""
|
|
3692
3727
|
For details, please refer to :func:`mindspore.ops.diag`.
|
|
3693
3728
|
"""
|
|
3694
|
-
|
|
3695
|
-
return tensor_operator_registry.get('diag')()(self)
|
|
3729
|
+
return tensor_operator_registry.get('diag')(self)
|
|
3696
3730
|
|
|
3697
3731
|
def diagflat(self, offset=0):
|
|
3698
3732
|
r"""
|
|
3699
3733
|
For details, please refer to :func:`mindspore.ops.diagflat`.
|
|
3700
3734
|
"""
|
|
3701
|
-
self._init_check()
|
|
3702
3735
|
return tensor_operator_registry.get('diagflat')(self, offset)
|
|
3703
3736
|
|
|
3704
3737
|
def xdivy(self, y):
|
|
3705
3738
|
r"""
|
|
3706
3739
|
For details, please refer to :func:`mindspore.ops.xdivy`.
|
|
3707
3740
|
"""
|
|
3708
|
-
|
|
3709
|
-
return tensor_operator_registry.get("xdivy")()(self, y)
|
|
3741
|
+
return tensor_operator_registry.get("xdivy")(self, y)
|
|
3710
3742
|
|
|
3711
3743
|
def split(self, split_size_or_sections, axis=0):
|
|
3712
3744
|
"""
|
|
@@ -3718,7 +3750,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3718
3750
|
"""
|
|
3719
3751
|
For details, please refer to :func:`mindspore.ops.tensor_split`.
|
|
3720
3752
|
"""
|
|
3721
|
-
self._init_check()
|
|
3722
3753
|
return tensor_operator_registry.get('tensor_split')(self, indices_or_sections, axis)
|
|
3723
3754
|
|
|
3724
3755
|
def vsplit(self, indices_or_sections):
|
|
@@ -3726,28 +3757,25 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3726
3757
|
For details, please refer to :func:`mindspore.ops.vsplit`.
|
|
3727
3758
|
"""
|
|
3728
3759
|
|
|
3729
|
-
self._init_check()
|
|
3730
3760
|
return tensor_operator_registry.get('vsplit')(self, indices_or_sections)
|
|
3731
3761
|
|
|
3732
3762
|
def hsplit(self, indices_or_sections):
|
|
3733
3763
|
"""
|
|
3734
3764
|
For details, please refer to :func:`mindspore.ops.hsplit`.
|
|
3735
3765
|
"""
|
|
3736
|
-
self._init_check()
|
|
3737
3766
|
return tensor_operator_registry.get('hsplit')(self, indices_or_sections)
|
|
3738
3767
|
|
|
3739
3768
|
def dsplit(self, indices_or_sections):
|
|
3740
3769
|
"""
|
|
3741
3770
|
For details, please refer to :func:`mindspore.ops.dsplit`.
|
|
3742
3771
|
"""
|
|
3743
|
-
self._init_check()
|
|
3744
3772
|
return tensor_operator_registry.get('dsplit')(self, indices_or_sections)
|
|
3745
3773
|
|
|
3746
3774
|
def xlogy(self, y):
|
|
3747
3775
|
r"""
|
|
3748
3776
|
For details, please refer to :func:`mindspore.ops.xlogy`.
|
|
3749
3777
|
"""
|
|
3750
|
-
return tensor_operator_registry.get("xlogy")(
|
|
3778
|
+
return tensor_operator_registry.get("xlogy")(self, y)
|
|
3751
3779
|
|
|
3752
3780
|
def eigvals(self):
|
|
3753
3781
|
r"""
|
|
@@ -3762,13 +3790,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3762
3790
|
r"""
|
|
3763
3791
|
For details, please refer to :func:`mindspore.ops.erf`.
|
|
3764
3792
|
"""
|
|
3765
|
-
return tensor_operator_registry.get("erf")(
|
|
3793
|
+
return tensor_operator_registry.get("erf")(self)
|
|
3766
3794
|
|
|
3767
3795
|
def erfc(self):
|
|
3768
3796
|
r"""
|
|
3769
3797
|
For details, please refer to :func:`mindspore.ops.erfc`.
|
|
3770
3798
|
"""
|
|
3771
|
-
return tensor_operator_registry.get("erfc")(
|
|
3799
|
+
return tensor_operator_registry.get("erfc")(self)
|
|
3772
3800
|
|
|
3773
3801
|
def tile(self, reps):
|
|
3774
3802
|
r"""
|
|
@@ -3780,29 +3808,26 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3780
3808
|
r"""
|
|
3781
3809
|
For details, please refer to :func:`mindspore.ops.topk`.
|
|
3782
3810
|
"""
|
|
3783
|
-
self._init_check()
|
|
3784
3811
|
return tensor_operator_registry.get("topk")(self, k, dim, largest, sorted)
|
|
3785
3812
|
|
|
3786
3813
|
def top_k(self, k, sorted=True):
|
|
3787
3814
|
r"""
|
|
3788
3815
|
`Tensor.top_k` is deprecated, please use `Tensor.topk` instead.
|
|
3789
3816
|
"""
|
|
3790
|
-
self._init_check()
|
|
3791
3817
|
validator.check_is_int(k, 'k')
|
|
3792
3818
|
validator.check_bool(sorted, 'sorted')
|
|
3793
|
-
return tensor_operator_registry.get("top_k")(
|
|
3819
|
+
return tensor_operator_registry.get("top_k")(self, k, sorted)
|
|
3794
3820
|
|
|
3795
3821
|
def sigmoid(self):
|
|
3796
3822
|
r"""
|
|
3797
3823
|
For details, please refer to :func:`mindspore.ops.sigmoid`.
|
|
3798
3824
|
"""
|
|
3799
|
-
return tensor_operator_registry.get("sigmoid")(
|
|
3825
|
+
return tensor_operator_registry.get("sigmoid")(self)
|
|
3800
3826
|
|
|
3801
3827
|
def median(self, axis=-1, keepdims=False):
|
|
3802
3828
|
r"""
|
|
3803
3829
|
For details, please refer to :func:`mindspore.ops.median`.
|
|
3804
3830
|
"""
|
|
3805
|
-
self._init_check()
|
|
3806
3831
|
validator.check_axis_in_range(axis, self.ndim)
|
|
3807
3832
|
return tensor_operator_registry.get('median')(False, axis, keepdims)(self)
|
|
3808
3833
|
|
|
@@ -3810,49 +3835,42 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3810
3835
|
r"""
|
|
3811
3836
|
For details, please refer to :func:`mindspore.ops.addmv`.
|
|
3812
3837
|
"""
|
|
3813
|
-
self._init_check()
|
|
3814
3838
|
return tensor_operator_registry.get('addmv')(self, mat, vec, beta=beta, alpha=alpha)
|
|
3815
3839
|
|
|
3816
3840
|
def asinh(self):
|
|
3817
3841
|
r"""
|
|
3818
3842
|
For details, please refer to :func:`mindspore.ops.asinh`.
|
|
3819
3843
|
"""
|
|
3820
|
-
self._init_check()
|
|
3821
3844
|
return tensor_operator_registry.get('asinh')(self)
|
|
3822
3845
|
|
|
3823
3846
|
def arcsinh(self):
|
|
3824
3847
|
r"""
|
|
3825
3848
|
Alias for :func:`mindspore.Tensor.asinh`.
|
|
3826
3849
|
"""
|
|
3827
|
-
self._init_check()
|
|
3828
3850
|
return tensor_operator_registry.get('arcsinh')(self)
|
|
3829
3851
|
|
|
3830
3852
|
def atan(self):
|
|
3831
3853
|
r"""
|
|
3832
3854
|
For details, please refer to :func:`mindspore.ops.atan`.
|
|
3833
3855
|
"""
|
|
3834
|
-
self._init_check()
|
|
3835
3856
|
return tensor_operator_registry.get('atan')(self)
|
|
3836
3857
|
|
|
3837
3858
|
def atanh(self):
|
|
3838
3859
|
r"""
|
|
3839
3860
|
For details, please refer to :func:`mindspore.ops.atanh`.
|
|
3840
3861
|
"""
|
|
3841
|
-
self._init_check()
|
|
3842
3862
|
return tensor_operator_registry.get('atanh')(self)
|
|
3843
3863
|
|
|
3844
3864
|
def arctanh(self):
|
|
3845
3865
|
r"""
|
|
3846
3866
|
Alias for :func:`mindspore.Tensor.atanh`.
|
|
3847
3867
|
"""
|
|
3848
|
-
self._init_check()
|
|
3849
3868
|
return tensor_operator_registry.get('arctanh')(self)
|
|
3850
3869
|
|
|
3851
3870
|
def bmm(self, mat2):
|
|
3852
3871
|
r"""
|
|
3853
3872
|
For details, please refer to :func:`mindspore.ops.bmm`.
|
|
3854
3873
|
"""
|
|
3855
|
-
self._init_check()
|
|
3856
3874
|
return tensor_operator_registry.get('bmm')(self, mat2)
|
|
3857
3875
|
|
|
3858
3876
|
def to(self, dtype):
|
|
@@ -3882,8 +3900,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3882
3900
|
>>> print(output.dtype)
|
|
3883
3901
|
Int32
|
|
3884
3902
|
"""
|
|
3885
|
-
|
|
3886
|
-
return tensor_operator_registry.get('to')()(self, dtype)
|
|
3903
|
+
return tensor_operator_registry.get('to')(self, dtype)
|
|
3887
3904
|
|
|
3888
3905
|
def type(self, dtype=None):
|
|
3889
3906
|
r"""
|
|
@@ -3909,7 +3926,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3909
3926
|
[[1 2]
|
|
3910
3927
|
[3 4]]
|
|
3911
3928
|
"""
|
|
3912
|
-
self._init_check()
|
|
3913
3929
|
if dtype is None:
|
|
3914
3930
|
return str(self.dtype)
|
|
3915
3931
|
return self.astype(dtype)
|
|
@@ -3936,7 +3952,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3936
3952
|
>>> print(x.dtype)
|
|
3937
3953
|
Int32
|
|
3938
3954
|
"""
|
|
3939
|
-
self._init_check()
|
|
3940
3955
|
return self.astype(other.dtype)
|
|
3941
3956
|
|
|
3942
3957
|
def bool(self):
|
|
@@ -3959,8 +3974,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3959
3974
|
>>> print(output.dtype)
|
|
3960
3975
|
Bool
|
|
3961
3976
|
"""
|
|
3962
|
-
|
|
3963
|
-
return tensor_operator_registry.get('bool')()(self, mstype.bool_)
|
|
3977
|
+
return tensor_operator_registry.get('bool')(self, mstype.bool_)
|
|
3964
3978
|
|
|
3965
3979
|
def float(self):
|
|
3966
3980
|
r"""
|
|
@@ -3981,8 +3995,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3981
3995
|
>>> print(output.dtype)
|
|
3982
3996
|
Float32
|
|
3983
3997
|
"""
|
|
3984
|
-
|
|
3985
|
-
return tensor_operator_registry.get('float')()(self, mstype.float32)
|
|
3998
|
+
return tensor_operator_registry.get('float')(self, mstype.float32)
|
|
3986
3999
|
|
|
3987
4000
|
def half(self):
|
|
3988
4001
|
r"""
|
|
@@ -4003,8 +4016,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4003
4016
|
>>> print(output.dtype)
|
|
4004
4017
|
Float16
|
|
4005
4018
|
"""
|
|
4006
|
-
|
|
4007
|
-
return tensor_operator_registry.get('half')()(self, mstype.float16)
|
|
4019
|
+
return tensor_operator_registry.get('half')(self, mstype.float16)
|
|
4008
4020
|
|
|
4009
4021
|
def int(self):
|
|
4010
4022
|
r"""
|
|
@@ -4025,8 +4037,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4025
4037
|
>>> print(output.dtype)
|
|
4026
4038
|
Int32
|
|
4027
4039
|
"""
|
|
4028
|
-
|
|
4029
|
-
return tensor_operator_registry.get('int')()(self, mstype.int32)
|
|
4040
|
+
return tensor_operator_registry.get('int')(self, mstype.int32)
|
|
4030
4041
|
|
|
4031
4042
|
def long(self):
|
|
4032
4043
|
r"""
|
|
@@ -4047,8 +4058,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4047
4058
|
>>> print(output.dtype)
|
|
4048
4059
|
Int64
|
|
4049
4060
|
"""
|
|
4050
|
-
|
|
4051
|
-
return tensor_operator_registry.get('long')()(self, mstype.int64)
|
|
4061
|
+
return tensor_operator_registry.get('long')(self, mstype.int64)
|
|
4052
4062
|
|
|
4053
4063
|
def short(self):
|
|
4054
4064
|
r"""
|
|
@@ -4070,22 +4080,19 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4070
4080
|
>>> output
|
|
4071
4081
|
Tensor(shape=[5], dtype=Int16, value= [1, 2, 3, 4, 5])
|
|
4072
4082
|
"""
|
|
4073
|
-
self._init_check()
|
|
4074
4083
|
return tensor_operator_registry.get('cast')(self, mstype.int16)
|
|
4075
4084
|
|
|
4076
4085
|
def cholesky(self, upper=False):
|
|
4077
4086
|
r"""
|
|
4078
4087
|
For details, please refer to :func:`mindspore.ops.cholesky`.
|
|
4079
4088
|
"""
|
|
4080
|
-
|
|
4081
|
-
return tensor_operator_registry.get('cholesky')(upper=upper)(self)
|
|
4089
|
+
return tensor_operator_registry.get('cholesky')(self, upper=upper)
|
|
4082
4090
|
|
|
4083
4091
|
def cholesky_inverse(self, upper=False):
|
|
4084
4092
|
r"""
|
|
4085
4093
|
For details, please refer to :func:`mindspore.ops.cholesky_inverse`.
|
|
4086
4094
|
"""
|
|
4087
|
-
|
|
4088
|
-
return tensor_operator_registry.get('cholesky_inverse')(upper=upper)(self)
|
|
4095
|
+
return tensor_operator_registry.get('cholesky_inverse')(self, upper=upper)
|
|
4089
4096
|
|
|
4090
4097
|
def cholesky_solve(self, input2, upper=False):
|
|
4091
4098
|
r"""
|
|
@@ -4094,63 +4101,54 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4094
4101
|
.. warning::
|
|
4095
4102
|
This is an experimental API that is subject to change or deletion.
|
|
4096
4103
|
"""
|
|
4097
|
-
self._init_check()
|
|
4098
4104
|
return tensor_operator_registry.get('cholesky_solve')(self, input2, upper)
|
|
4099
4105
|
|
|
4100
4106
|
def conj(self):
|
|
4101
4107
|
r"""
|
|
4102
4108
|
For details, please refer to :func:`mindspore.ops.conj`.
|
|
4103
4109
|
"""
|
|
4104
|
-
self._init_check()
|
|
4105
4110
|
return tensor_operator_registry.get('conj')(self)
|
|
4106
4111
|
|
|
4107
4112
|
def count_nonzero(self, axis=(), keep_dims=False, dtype=mstype.int32):
|
|
4108
4113
|
r"""
|
|
4109
4114
|
For details, please refer to :func:`mindspore.ops.count_nonzero`.
|
|
4110
4115
|
"""
|
|
4111
|
-
self._init_check()
|
|
4112
4116
|
return tensor_operator_registry.get('count_nonzero')(self, axis, keep_dims, dtype)
|
|
4113
4117
|
|
|
4114
4118
|
def cross(self, other, dim=None):
|
|
4115
4119
|
r"""
|
|
4116
4120
|
For details, please refer to :func:`mindspore.ops.cross`.
|
|
4117
4121
|
"""
|
|
4118
|
-
self._init_check()
|
|
4119
4122
|
return tensor_operator_registry.get('cross')(self, other, dim)
|
|
4120
4123
|
|
|
4121
4124
|
def erfinv(self):
|
|
4122
4125
|
r"""
|
|
4123
4126
|
For details, please refer to :func:`mindspore.ops.erfinv`.
|
|
4124
4127
|
"""
|
|
4125
|
-
self._init_check()
|
|
4126
4128
|
return tensor_operator_registry.get('erfinv')(self)
|
|
4127
4129
|
|
|
4128
4130
|
def less_equal(self, other):
|
|
4129
4131
|
r"""
|
|
4130
4132
|
For details, please refer to :func:`mindspore.ops.less_equal`.
|
|
4131
4133
|
"""
|
|
4132
|
-
self._init_check()
|
|
4133
4134
|
return tensor_operator_registry.get('less_equal')(self, other)
|
|
4134
4135
|
|
|
4135
4136
|
def lcm(self, other):
|
|
4136
4137
|
r"""
|
|
4137
4138
|
For details, please refer to :func:`mindspore.ops.lcm`.
|
|
4138
4139
|
"""
|
|
4139
|
-
self._init_check()
|
|
4140
4140
|
return tensor_operator_registry.get('lcm')(self, other)
|
|
4141
4141
|
|
|
4142
4142
|
def ldexp(self, other):
|
|
4143
4143
|
r"""
|
|
4144
4144
|
For details, please refer to :func:`mindspore.ops.ldexp`.
|
|
4145
4145
|
"""
|
|
4146
|
-
self._init_check()
|
|
4147
4146
|
return tensor_operator_registry.get('ldexp')(self, other)
|
|
4148
4147
|
|
|
4149
4148
|
def fold(self, output_size, kernel_size, dilation=1, padding=0, stride=1):
|
|
4150
4149
|
r"""
|
|
4151
4150
|
For details, please refer to :func:`mindspore.ops.fold`.
|
|
4152
4151
|
"""
|
|
4153
|
-
self._init_check()
|
|
4154
4152
|
return tensor_operator_registry.get('fold')(self, output_size, kernel_size, dilation, padding, stride)
|
|
4155
4153
|
|
|
4156
4154
|
def unfold(self, kernel_size, dilation=1, padding=0, stride=1):
|
|
@@ -4161,70 +4159,62 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4161
4159
|
This is an experimental API that is subject to change or deletion.
|
|
4162
4160
|
|
|
4163
4161
|
"""
|
|
4164
|
-
self._init_check()
|
|
4165
4162
|
return tensor_operator_registry.get('unfold')(self, kernel_size, dilation, padding, stride)
|
|
4166
4163
|
|
|
4167
4164
|
def expand(self, size):
|
|
4168
4165
|
r"""
|
|
4169
4166
|
For details, please refer to :func:`mindspore.ops.broadcast_to`.
|
|
4170
4167
|
"""
|
|
4171
|
-
|
|
4168
|
+
if isinstance(size, Tensor):
|
|
4169
|
+
size = tensor_operator_registry.get('tensortotuple')()(size)
|
|
4172
4170
|
return tensor_operator_registry.get('expand')(self, size)
|
|
4173
4171
|
|
|
4174
4172
|
def cumprod(self, dim, dtype=None):
|
|
4175
4173
|
r"""
|
|
4176
4174
|
For details, please refer to :func:`mindspore.ops.cumprod`.
|
|
4177
4175
|
"""
|
|
4178
|
-
self._init_check()
|
|
4179
4176
|
return tensor_operator_registry.get('cumprod')(self, dim, dtype)
|
|
4180
4177
|
|
|
4181
4178
|
def multiply(self, value):
|
|
4182
4179
|
r"""
|
|
4183
4180
|
For details, please refer to :func:`mindspore.ops.multiply`.
|
|
4184
4181
|
"""
|
|
4185
|
-
self._init_check()
|
|
4186
4182
|
return tensor_operator_registry.get('multiply')(self, value)
|
|
4187
4183
|
|
|
4188
4184
|
def div(self, value, *, rounding_mode=None):
|
|
4189
4185
|
r"""
|
|
4190
4186
|
For details, please refer to :func:`mindspore.ops.div`.
|
|
4191
4187
|
"""
|
|
4192
|
-
self._init_check()
|
|
4193
4188
|
return tensor_operator_registry.get('div')(self, value, rounding_mode=rounding_mode)
|
|
4194
4189
|
|
|
4195
4190
|
def divide(self, value, *, rounding_mode=None):
|
|
4196
4191
|
r"""
|
|
4197
4192
|
Alias for :func:`mindspore.Tensor.div`.
|
|
4198
4193
|
"""
|
|
4199
|
-
self._init_check()
|
|
4200
4194
|
return tensor_operator_registry.get('div')(self, value, rounding_mode=rounding_mode)
|
|
4201
4195
|
|
|
4202
4196
|
def eq(self, other):
|
|
4203
4197
|
r"""
|
|
4204
4198
|
For details, please refer to :func:`mindspore.ops.eq`.
|
|
4205
4199
|
"""
|
|
4206
|
-
self._init_check()
|
|
4207
4200
|
return tensor_operator_registry.get('equal')(self, other)
|
|
4208
4201
|
|
|
4209
4202
|
def equal(self, other):
|
|
4210
4203
|
r"""
|
|
4211
4204
|
For details, please refer to :func:`mindspore.ops.equal`.
|
|
4212
4205
|
"""
|
|
4213
|
-
self._init_check()
|
|
4214
4206
|
return tensor_operator_registry.get('equal')(self, other)
|
|
4215
4207
|
|
|
4216
4208
|
def expm1(self):
|
|
4217
4209
|
r"""
|
|
4218
4210
|
For details, please refer to :func:`mindspore.ops.expm1`.
|
|
4219
4211
|
"""
|
|
4220
|
-
self._init_check()
|
|
4221
4212
|
return tensor_operator_registry.get('expm1')(self)
|
|
4222
4213
|
|
|
4223
4214
|
def index_add(self, dim, index, source, *, alpha=1):
|
|
4224
4215
|
r"""
|
|
4225
4216
|
For details, please refer to :func:`mindspore.ops.index_add`.
|
|
4226
4217
|
"""
|
|
4227
|
-
self._init_check()
|
|
4228
4218
|
check_is_number(alpha, (int, float))
|
|
4229
4219
|
source = tensor_operator_registry.get('__mul__')(source, alpha)
|
|
4230
4220
|
return tensor_operator_registry.get('index_add')(self, indices=index, y=source, axis=dim)
|
|
@@ -4233,42 +4223,36 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4233
4223
|
r"""
|
|
4234
4224
|
For details, please refer to :func:`mindspore.ops.greater`.
|
|
4235
4225
|
"""
|
|
4236
|
-
self._init_check()
|
|
4237
4226
|
return tensor_operator_registry.get('greater')(self, other)
|
|
4238
4227
|
|
|
4239
4228
|
def greater_equal(self, other):
|
|
4240
4229
|
r"""
|
|
4241
4230
|
For details, please refer to :func:`mindspore.ops.greater_equal`.
|
|
4242
4231
|
"""
|
|
4243
|
-
self._init_check()
|
|
4244
4232
|
return tensor_operator_registry.get('greater_equal')(self, other)
|
|
4245
4233
|
|
|
4246
4234
|
def igamma(self, other):
|
|
4247
4235
|
r"""
|
|
4248
4236
|
For details, please refer to :func:`mindspore.ops.igamma`.
|
|
4249
4237
|
"""
|
|
4250
|
-
self._init_check()
|
|
4251
4238
|
return tensor_operator_registry.get('igamma')(self, other)
|
|
4252
4239
|
|
|
4253
4240
|
def igammac(self, other):
|
|
4254
4241
|
r"""
|
|
4255
4242
|
For details, please refer to :func:`mindspore.ops.igammac`.
|
|
4256
4243
|
"""
|
|
4257
|
-
self._init_check()
|
|
4258
4244
|
return tensor_operator_registry.get('igammac')(self, other)
|
|
4259
4245
|
|
|
4260
4246
|
def isinf(self):
|
|
4261
4247
|
r"""
|
|
4262
4248
|
For details, please refer to :func:`mindspore.ops.isinf`.
|
|
4263
4249
|
"""
|
|
4264
|
-
self._init_check()
|
|
4265
4250
|
return tensor_operator_registry.get('isinf')(self)
|
|
4266
4251
|
|
|
4267
4252
|
def isnan(self):
|
|
4268
4253
|
r"""
|
|
4269
4254
|
For details, please refer to :func:`mindspore.ops.isnan`.
|
|
4270
4255
|
"""
|
|
4271
|
-
self._init_check()
|
|
4272
4256
|
return tensor_operator_registry.get('isnan')(self)
|
|
4273
4257
|
|
|
4274
4258
|
def flip(self, dims):
|
|
@@ -4322,14 +4306,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4322
4306
|
r"""
|
|
4323
4307
|
For details, please refer to :func:`mindspore.ops.le`.
|
|
4324
4308
|
"""
|
|
4325
|
-
self._init_check()
|
|
4326
4309
|
return tensor_operator_registry.get('le')(self, other)
|
|
4327
4310
|
|
|
4328
4311
|
def less(self, other):
|
|
4329
4312
|
r"""
|
|
4330
4313
|
For details, please refer to :func:`mindspore.ops.less`.
|
|
4331
4314
|
"""
|
|
4332
|
-
self._init_check()
|
|
4333
4315
|
return tensor_operator_registry.get('less')(self, other)
|
|
4334
4316
|
|
|
4335
4317
|
def lt(self, other):
|
|
@@ -4342,35 +4324,30 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4342
4324
|
r"""
|
|
4343
4325
|
For details, please refer to :func:`mindspore.ops.logical_and`.
|
|
4344
4326
|
"""
|
|
4345
|
-
self._init_check()
|
|
4346
4327
|
return tensor_operator_registry.get('logical_and')(self, other)
|
|
4347
4328
|
|
|
4348
4329
|
def logical_not(self):
|
|
4349
4330
|
r"""
|
|
4350
4331
|
For details, please refer to :func:`mindspore.ops.logical_not`.
|
|
4351
4332
|
"""
|
|
4352
|
-
self._init_check()
|
|
4353
4333
|
return tensor_operator_registry.get('logical_not')(self)
|
|
4354
4334
|
|
|
4355
4335
|
def logical_or(self, other):
|
|
4356
4336
|
r"""
|
|
4357
4337
|
For details, please refer to :func:`mindspore.ops.logical_or`.
|
|
4358
4338
|
"""
|
|
4359
|
-
self._init_check()
|
|
4360
4339
|
return tensor_operator_registry.get('logical_or')(self, other)
|
|
4361
4340
|
|
|
4362
4341
|
def logical_xor(self, other):
|
|
4363
4342
|
r"""
|
|
4364
4343
|
For details, please refer to :func:`mindspore.ops.logical_xor`.
|
|
4365
4344
|
"""
|
|
4366
|
-
self._init_check()
|
|
4367
4345
|
return tensor_operator_registry.get('logical_xor')(self, other)
|
|
4368
4346
|
|
|
4369
4347
|
def lstsq(self, A):
|
|
4370
4348
|
r"""
|
|
4371
4349
|
For details, please refer to :func:`mindspore.ops.lstsq`.
|
|
4372
4350
|
"""
|
|
4373
|
-
self._init_check()
|
|
4374
4351
|
return tensor_operator_registry.get('lstsq')(self, A)
|
|
4375
4352
|
|
|
4376
4353
|
@property
|
|
@@ -4394,28 +4371,24 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4394
4371
|
r"""
|
|
4395
4372
|
For details, please refer to :func:`mindspore.ops.mvlgamma`.
|
|
4396
4373
|
"""
|
|
4397
|
-
self._init_check()
|
|
4398
4374
|
return tensor_operator_registry.get('mvlgamma')(self, p)
|
|
4399
4375
|
|
|
4400
4376
|
def matmul(self, tensor2):
|
|
4401
4377
|
r"""
|
|
4402
4378
|
For details, please refer to :func:`mindspore.ops.matmul`.
|
|
4403
4379
|
"""
|
|
4404
|
-
self._init_check()
|
|
4405
4380
|
return tensor_operator_registry.get('matmul')(self, tensor2)
|
|
4406
4381
|
|
|
4407
4382
|
def inner(self, other):
|
|
4408
4383
|
r"""
|
|
4409
4384
|
For details, please refer to :func:`mindspore.ops.inner`.
|
|
4410
4385
|
"""
|
|
4411
|
-
self._init_check()
|
|
4412
4386
|
return tensor_operator_registry.get('inner')(self, other)
|
|
4413
4387
|
|
|
4414
4388
|
def multinomial(self, num_samples, replacement=True, seed=None):
|
|
4415
4389
|
r"""
|
|
4416
4390
|
For details, please refer to :func:`mindspore.ops.multinomial`.
|
|
4417
4391
|
"""
|
|
4418
|
-
self._init_check()
|
|
4419
4392
|
return tensor_operator_registry.get('multinomial')(self, num_samples, replacement, seed)
|
|
4420
4393
|
|
|
4421
4394
|
def matrix_power(self, n):
|
|
@@ -4426,35 +4399,30 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4426
4399
|
This is an experimental API that is subject to change or deletion.
|
|
4427
4400
|
|
|
4428
4401
|
"""
|
|
4429
|
-
self._init_check()
|
|
4430
4402
|
return tensor_operator_registry.get('matrix_power')(self, n)
|
|
4431
4403
|
|
|
4432
4404
|
def maximum(self, other):
|
|
4433
4405
|
r"""
|
|
4434
4406
|
For details, please refer to :func:`mindspore.ops.maximum`.
|
|
4435
4407
|
"""
|
|
4436
|
-
self._init_check()
|
|
4437
4408
|
return tensor_operator_registry.get('maximum')(self, other)
|
|
4438
4409
|
|
|
4439
4410
|
def mm(self, mat2):
|
|
4440
4411
|
r"""
|
|
4441
4412
|
For details, please refer to :func:`mindspore.ops.mm`.
|
|
4442
4413
|
"""
|
|
4443
|
-
self._init_check()
|
|
4444
4414
|
return tensor_operator_registry.get('mm')(self, mat2)
|
|
4445
4415
|
|
|
4446
4416
|
def msort(self):
|
|
4447
4417
|
r"""
|
|
4448
4418
|
For details, please refer to :func:`mindspore.ops.msort`.
|
|
4449
4419
|
"""
|
|
4450
|
-
self._init_check()
|
|
4451
4420
|
return tensor_operator_registry.get('msort')(self)
|
|
4452
4421
|
|
|
4453
4422
|
def mul(self, value):
|
|
4454
4423
|
r"""
|
|
4455
4424
|
For details, please refer to :func:`mindspore.ops.mul`.
|
|
4456
4425
|
"""
|
|
4457
|
-
self._init_check()
|
|
4458
4426
|
return tensor_operator_registry.get('mul')(self, value)
|
|
4459
4427
|
|
|
4460
4428
|
def nan_to_num(self, nan=0.0, posinf=None, neginf=None):
|
|
@@ -4467,31 +4435,29 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4467
4435
|
r"""
|
|
4468
4436
|
For details, please refer to :func:`mindspore.ops.neg`.
|
|
4469
4437
|
"""
|
|
4470
|
-
self._init_check()
|
|
4471
4438
|
return tensor_operator_registry.get('neg')(self)
|
|
4472
4439
|
|
|
4473
4440
|
def ne(self, other):
|
|
4474
4441
|
r"""
|
|
4475
4442
|
For details, please refer to :func:`mindspore.ops.ne`.
|
|
4476
4443
|
"""
|
|
4477
|
-
self._init_check()
|
|
4478
4444
|
return tensor_operator_registry.get('ne')(self, other)
|
|
4479
4445
|
|
|
4480
4446
|
def not_equal(self, other):
|
|
4481
4447
|
r"""
|
|
4482
4448
|
For details, please refer to :func:`mindspore.ops.not_equal`.
|
|
4483
4449
|
"""
|
|
4484
|
-
self._init_check()
|
|
4485
4450
|
return tensor_operator_registry.get('not_equal')(self, other)
|
|
4486
4451
|
|
|
4487
|
-
def new_zeros(self, size,
|
|
4452
|
+
def new_zeros(self, size, dtype=None):
|
|
4488
4453
|
r"""
|
|
4489
4454
|
Return a tensor of `size` filled with zeros.
|
|
4490
4455
|
|
|
4491
|
-
|
|
4492
|
-
|
|
4456
|
+
.. warning::
|
|
4457
|
+
For argument `size`, Tensor type input will be deprecated in the future version.
|
|
4493
4458
|
|
|
4494
|
-
|
|
4459
|
+
Args:
|
|
4460
|
+
size (Union[int, tuple, list, Tensor]): An int, list or tuple of integers defining the output shape.
|
|
4495
4461
|
dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned tensor has
|
|
4496
4462
|
thesame dtype as `self`. Default: ``None``.
|
|
4497
4463
|
|
|
@@ -4499,7 +4465,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4499
4465
|
Tensor, the shape and dtype is defined above and filled with zeros.
|
|
4500
4466
|
|
|
4501
4467
|
Raises:
|
|
4502
|
-
TypeError: If `size` is
|
|
4468
|
+
TypeError: If `size` is neither an int nor an tuple/list/Tensor of int.
|
|
4503
4469
|
|
|
4504
4470
|
Supported Platforms:
|
|
4505
4471
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4514,21 +4480,17 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4514
4480
|
[[0. 0.]
|
|
4515
4481
|
[0. 0.]]
|
|
4516
4482
|
"""
|
|
4517
|
-
|
|
4518
|
-
if isinstance(size, list):
|
|
4519
|
-
size = tuple(size)
|
|
4520
|
-
self._init_check()
|
|
4521
|
-
_dtype = self.dtype if dtype is None else dtype
|
|
4522
|
-
return tensor_operator_registry.get('zeros')(size, _dtype)
|
|
4483
|
+
return tensor_operator_registry.get('zeros')(size, dtype)
|
|
4523
4484
|
|
|
4524
|
-
def new_ones(self, size,
|
|
4485
|
+
def new_ones(self, size, dtype=None):
|
|
4525
4486
|
r"""
|
|
4526
4487
|
Return a tensor of `size` filled with ones.
|
|
4527
4488
|
|
|
4528
|
-
|
|
4529
|
-
|
|
4489
|
+
.. warning::
|
|
4490
|
+
For argument `size`, Tensor type input will be deprecated in the future version.
|
|
4530
4491
|
|
|
4531
|
-
|
|
4492
|
+
Args:
|
|
4493
|
+
size (Union[int, tuple, list, Tensor]): An int, list or tuple of integers defining the output shape.
|
|
4532
4494
|
dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned
|
|
4533
4495
|
tensor has the same dtype as `self`. Default: ``None``.
|
|
4534
4496
|
|
|
@@ -4536,7 +4498,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4536
4498
|
Tensor, the shape and dtype is defined above and filled with ones.
|
|
4537
4499
|
|
|
4538
4500
|
Raises:
|
|
4539
|
-
TypeError: If `size` is
|
|
4501
|
+
TypeError: If `size` is neither an int nor an tuple/list/Tensor of int.
|
|
4540
4502
|
|
|
4541
4503
|
Supported Platforms:
|
|
4542
4504
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4551,109 +4513,90 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4551
4513
|
[[1. 1.]
|
|
4552
4514
|
[1. 1.]]
|
|
4553
4515
|
"""
|
|
4554
|
-
|
|
4555
|
-
if isinstance(size, list):
|
|
4556
|
-
size = tuple(size)
|
|
4557
|
-
self._init_check()
|
|
4558
|
-
_dtype = self.dtype if dtype is None else dtype
|
|
4559
|
-
return tensor_operator_registry.get('ones')(size, _dtype)
|
|
4516
|
+
return tensor_operator_registry.get('ones')(size, dtype)
|
|
4560
4517
|
|
|
4561
4518
|
def sign(self):
|
|
4562
4519
|
r"""
|
|
4563
4520
|
For details, please refer to :func:`mindspore.ops.sign`.
|
|
4564
4521
|
"""
|
|
4565
|
-
self._init_check()
|
|
4566
4522
|
return tensor_operator_registry.get('sign')(self)
|
|
4567
4523
|
|
|
4568
4524
|
def signbit(self):
|
|
4569
4525
|
"""
|
|
4570
4526
|
For details, please refer to :func:`mindspore.ops.signbit`.
|
|
4571
4527
|
"""
|
|
4572
|
-
self._init_check()
|
|
4573
4528
|
return tensor_operator_registry.get('signbit')(self)
|
|
4574
4529
|
|
|
4575
4530
|
def sgn(self):
|
|
4576
4531
|
"""
|
|
4577
4532
|
For details, please refer to :func:`mindspore.ops.sgn`.
|
|
4578
4533
|
"""
|
|
4579
|
-
self._init_check()
|
|
4580
4534
|
return tensor_operator_registry.get('sgn')(self)
|
|
4581
4535
|
|
|
4582
4536
|
def sin(self):
|
|
4583
4537
|
r"""
|
|
4584
4538
|
For details, please refer to :func:`mindspore.ops.sin`.
|
|
4585
4539
|
"""
|
|
4586
|
-
self._init_check()
|
|
4587
4540
|
return tensor_operator_registry.get('sin')(self)
|
|
4588
4541
|
|
|
4589
4542
|
def sinc(self):
|
|
4590
4543
|
r"""
|
|
4591
4544
|
For details, please refer to :func:`mindspore.ops.sinc`.
|
|
4592
4545
|
"""
|
|
4593
|
-
self._init_check()
|
|
4594
4546
|
return tensor_operator_registry.get('sinc')(self)
|
|
4595
4547
|
|
|
4596
4548
|
def sinh(self):
|
|
4597
4549
|
r"""
|
|
4598
4550
|
For details, please refer to :func:`mindspore.ops.sinh`.
|
|
4599
4551
|
"""
|
|
4600
|
-
self._init_check()
|
|
4601
4552
|
return tensor_operator_registry.get('sinh')(self)
|
|
4602
4553
|
|
|
4603
4554
|
def sort(self, axis=-1, descending=False):
|
|
4604
4555
|
r"""
|
|
4605
4556
|
For details, please refer to :func:`mindspore.ops.sort`.
|
|
4606
4557
|
"""
|
|
4607
|
-
self._init_check()
|
|
4608
4558
|
return tensor_operator_registry.get('sort')(self, axis=axis, descending=descending)
|
|
4609
4559
|
|
|
4610
4560
|
def argsort(self, axis=-1, descending=False):
|
|
4611
4561
|
"""
|
|
4612
4562
|
For details, please refer to :func:`mindspore.ops.argsort`.
|
|
4613
4563
|
"""
|
|
4614
|
-
self._init_check()
|
|
4615
4564
|
return tensor_operator_registry.get('argsort')(self, axis, descending)
|
|
4616
4565
|
|
|
4617
4566
|
def trunc(self):
|
|
4618
4567
|
r"""
|
|
4619
4568
|
For details, please refer to :func:`mindspore.ops.trunc`.
|
|
4620
4569
|
"""
|
|
4621
|
-
self._init_check()
|
|
4622
4570
|
return tensor_operator_registry.get('trunc')(self)
|
|
4623
4571
|
|
|
4624
4572
|
def where(self, condition, y):
|
|
4625
4573
|
r"""
|
|
4626
4574
|
For details, please refer to :func:`mindspore.ops.where`.
|
|
4627
4575
|
"""
|
|
4628
|
-
self._init_check()
|
|
4629
4576
|
return tensor_operator_registry.get('where')(condition, self, y)
|
|
4630
4577
|
|
|
4631
4578
|
def imag(self):
|
|
4632
4579
|
r"""
|
|
4633
4580
|
For details, please refer to :func:`mindspore.ops.imag`.
|
|
4634
4581
|
"""
|
|
4635
|
-
self._init_check()
|
|
4636
4582
|
return tensor_operator_registry.get('imag')(self)
|
|
4637
4583
|
|
|
4638
4584
|
def quantile(self, q, axis=None, keepdims=False):
|
|
4639
4585
|
r"""
|
|
4640
4586
|
For details, please refer to :func:`mindspore.ops.quantile`.
|
|
4641
4587
|
"""
|
|
4642
|
-
self._init_check()
|
|
4643
4588
|
return tensor_operator_registry.get('quantile')(self, q, axis, keepdims)
|
|
4644
4589
|
|
|
4645
4590
|
def nanquantile(self, q, axis=None, keepdims=False):
|
|
4646
4591
|
"""
|
|
4647
4592
|
For details, please refer to :func:`mindspore.ops.nanquantile`.
|
|
4648
4593
|
"""
|
|
4649
|
-
self._init_check()
|
|
4650
4594
|
return tensor_operator_registry.get('nanquantile')(self, q, axis, keepdims)
|
|
4651
4595
|
|
|
4652
4596
|
def orgqr(self, input2):
|
|
4653
4597
|
r"""
|
|
4654
4598
|
For details, please refer to :func:`mindspore.ops.orgqr`.
|
|
4655
4599
|
"""
|
|
4656
|
-
self._init_check()
|
|
4657
4600
|
return tensor_operator_registry.get('orgqr')(self, input2)
|
|
4658
4601
|
|
|
4659
4602
|
def lu_solve(self, LU_data, LU_pivots):
|
|
@@ -4663,7 +4606,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4663
4606
|
.. warning::
|
|
4664
4607
|
This is an experimental API that is subject to change or deletion.
|
|
4665
4608
|
"""
|
|
4666
|
-
self._init_check()
|
|
4667
4609
|
return tensor_operator_registry.get('lu_solve')(self, LU_data, LU_pivots)
|
|
4668
4610
|
|
|
4669
4611
|
|
|
@@ -4671,14 +4613,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4671
4613
|
r"""
|
|
4672
4614
|
For details, please refer to :func:`mindspore.ops.nextafter`.
|
|
4673
4615
|
"""
|
|
4674
|
-
self._init_check()
|
|
4675
4616
|
return tensor_operator_registry.get('nextafter')(self, other)
|
|
4676
4617
|
|
|
4677
4618
|
def qr(self, some=True):
|
|
4678
4619
|
r"""
|
|
4679
4620
|
For details, please refer to :func:`mindspore.ops.qr`.
|
|
4680
4621
|
"""
|
|
4681
|
-
self._init_check()
|
|
4682
4622
|
validator.check_value_type('some', some, bool, 'Tensor.qr')
|
|
4683
4623
|
return tensor_operator_registry.get('qr')(self, 'reduced' if some else 'complete')
|
|
4684
4624
|
|
|
@@ -4688,7 +4628,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4688
4628
|
For details, please refer to :func:`mindspore.ops.ormqr`,
|
|
4689
4629
|
Args `input2` and `input3` correspond to the args `tau` and `other` of :func:`mindspore.ops.ormqr`.
|
|
4690
4630
|
"""
|
|
4691
|
-
self._init_check()
|
|
4692
4631
|
return tensor_operator_registry.get('ormqr')(self, input2, input3, left, transpose)
|
|
4693
4632
|
|
|
4694
4633
|
|
|
@@ -4730,7 +4669,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4730
4669
|
>>> print(output)
|
|
4731
4670
|
[5. 6. 3. 7.]
|
|
4732
4671
|
"""
|
|
4733
|
-
self._init_check()
|
|
4734
4672
|
return tensor_operator_registry.get('masked_scatter')()(self, mask, x)
|
|
4735
4673
|
|
|
4736
4674
|
|
|
@@ -4782,12 +4720,47 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4782
4720
|
[[1 5 3]
|
|
4783
4721
|
[4 8 9]]
|
|
4784
4722
|
"""
|
|
4785
|
-
self._init_check()
|
|
4786
4723
|
validator.check_value_type('accumulate', accumulate, bool, 'Tensor.index_put')
|
|
4787
4724
|
_index_put = tensor_operator_registry.get('index_put')(0 if accumulate is False else 1)
|
|
4788
4725
|
return _index_put(self, values, indices)
|
|
4789
4726
|
|
|
4790
4727
|
|
|
4728
|
+
def move_to(self, to, blocking=True):
|
|
4729
|
+
r"""
|
|
4730
|
+
Copy Tensor to target device synchronously or asynchronously, default synchronously. only support PyNative mode.
|
|
4731
|
+
|
|
4732
|
+
Args:
|
|
4733
|
+
to (str): a string type value, one of ``"Ascend"``, ``"GPU"``, ``"CPU"``.
|
|
4734
|
+
blocking (bool): a bool type value, using synchronous copy or asynchronous copy.
|
|
4735
|
+
Default: ``True`` , synchronous copy.
|
|
4736
|
+
|
|
4737
|
+
Returns:
|
|
4738
|
+
New Tensor, storged on target device which with the same type and shape as the "self Tensor".
|
|
4739
|
+
|
|
4740
|
+
Raises:
|
|
4741
|
+
ValueError: If the type of `blocking` is not bool type.
|
|
4742
|
+
ValueError: If the value of `to` is not one of ``"Ascend"``, ``"GPU"``, ``"CPU"``.
|
|
4743
|
+
ValueError: If the run mode is not PyNative mode.
|
|
4744
|
+
|
|
4745
|
+
Supported Platforms:
|
|
4746
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
4747
|
+
|
|
4748
|
+
Examples:
|
|
4749
|
+
>>> import mindspore as ms
|
|
4750
|
+
>>> from mindspore import Tensor
|
|
4751
|
+
>>> x = ms.Tensor([1, 2, 3], ms.int64)
|
|
4752
|
+
>>> new_tensor = x.move_to("CPU")
|
|
4753
|
+
"""
|
|
4754
|
+
if not isinstance(blocking, bool):
|
|
4755
|
+
raise ValueError(f"The type of 'blocking' must be bool, but got {blocking}")
|
|
4756
|
+
if to not in ("Ascend", "GPU", "CPU"):
|
|
4757
|
+
raise ValueError(f"The value of 'to' must be one of ['Ascend', 'GPU', 'CPU'], but got {to}")
|
|
4758
|
+
mode = context.get_context("mode")
|
|
4759
|
+
if mode != context.PYNATIVE_MODE:
|
|
4760
|
+
raise ValueError(f"The method of 'move_to' only supported in pynative mode, but got: {mode}.")
|
|
4761
|
+
return Tensor(Tensor_.move_to(self, to, blocking))
|
|
4762
|
+
|
|
4763
|
+
|
|
4791
4764
|
def _offload(self):
|
|
4792
4765
|
r"""
|
|
4793
4766
|
Offload tensor parameter to host. Currently, only support for pynative mode.
|
|
@@ -4801,7 +4774,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4801
4774
|
>>> x = ms.Tensor([1, 2, 3], ms.int64)
|
|
4802
4775
|
>>> x._offload()
|
|
4803
4776
|
"""
|
|
4804
|
-
self._init_check()
|
|
4805
4777
|
return Tensor_._offload(self)
|
|
4806
4778
|
|
|
4807
4779
|
|
|
@@ -4843,9 +4815,9 @@ def _check_tensor_input(input_data=None, dtype=None, shape=None, init=None):
|
|
|
4843
4815
|
raise ValueError("init, dtype and shape must have values at the same time.")
|
|
4844
4816
|
|
|
4845
4817
|
if input_data is not None:
|
|
4846
|
-
if isinstance(input_data, np.ndarray) and input_data.ndim
|
|
4818
|
+
if isinstance(input_data, np.ndarray) and input_data.ndim >= 1 and input_data.size == 0:
|
|
4847
4819
|
raise ValueError("input_data can not contain zero dimension.")
|
|
4848
|
-
if isinstance(input_data, (tuple, list)) and np.array(input_data).ndim
|
|
4820
|
+
if isinstance(input_data, (tuple, list)) and np.array(input_data).ndim >= 1 \
|
|
4849
4821
|
and np.array(input_data).size == 0:
|
|
4850
4822
|
raise ValueError("input_data can not contain zero dimension.")
|
|
4851
4823
|
|
|
@@ -4884,4 +4856,4 @@ def _check_astype_and_convert(dtype):
|
|
|
4884
4856
|
return dtype
|
|
4885
4857
|
|
|
4886
4858
|
|
|
4887
|
-
tensor_operator_registry
|
|
4859
|
+
setattr(tensor_operator_registry, 'vm_compare', _vm_compare)
|