mindspore 2.2.14__cp39-cp39-win_amd64.whl → 2.3.0__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +6 -5
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +76 -18
- mindspore/_extends/builtin_operations.py +2 -1
- mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
- mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
- mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
- mindspore/_extends/parse/__init__.py +18 -14
- mindspore/_extends/parse/compile_config.py +258 -0
- mindspore/_extends/parse/namespace.py +2 -2
- mindspore/_extends/parse/parser.py +174 -62
- mindspore/_extends/parse/resources.py +45 -14
- mindspore/_extends/parse/standard_method.py +142 -240
- mindspore/{ops/_op_impl/tbe/atomic_addr_clean.py → _extends/pijit/__init__.py} +6 -16
- mindspore/_extends/pijit/pijit_func_white_list.py +343 -0
- mindspore/_extends/remote/kernel_build_server.py +2 -0
- mindspore/_profiler.py +30 -0
- mindspore/amp.py +51 -24
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/base.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +2 -2
- mindspore/boost/grad_freeze.py +2 -2
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/boost/less_batch_normalization.py +9 -6
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +15 -4
- mindspore/common/_jit_fallback_utils.py +2 -3
- mindspore/common/_register_for_adapter.py +7 -0
- mindspore/common/_register_for_recompute.py +48 -0
- mindspore/common/_register_for_tensor.py +8 -9
- mindspore/common/_stub_tensor.py +7 -1
- mindspore/common/_utils.py +5 -17
- mindspore/common/api.py +411 -106
- mindspore/common/auto_dynamic_shape.py +27 -14
- mindspore/common/dtype.py +17 -10
- mindspore/common/dump.py +6 -8
- mindspore/common/file_system.py +48 -0
- mindspore/common/generator.py +260 -0
- mindspore/common/hook_handle.py +51 -4
- mindspore/common/initializer.py +1 -1
- mindspore/common/jit_config.py +34 -14
- mindspore/common/lazy_inline.py +72 -19
- mindspore/common/mindir_util.py +12 -2
- mindspore/common/mutable.py +79 -14
- mindspore/common/no_inline.py +54 -0
- mindspore/common/np_dtype.py +25 -0
- mindspore/common/parameter.py +30 -11
- mindspore/common/recompute.py +262 -0
- mindspore/common/seed.py +9 -9
- mindspore/common/sparse_tensor.py +272 -24
- mindspore/common/symbol.py +122 -0
- mindspore/common/tensor.py +468 -494
- mindspore/communication/__init__.py +6 -11
- mindspore/communication/_comm_helper.py +5 -0
- mindspore/communication/comm_func.py +1140 -0
- mindspore/communication/management.py +115 -102
- mindspore/config/op_info.config +22 -54
- mindspore/context.py +346 -63
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +6 -6
- mindspore/dataset/audio/transforms.py +711 -158
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/engine/cache_client.py +2 -2
- mindspore/dataset/engine/datasets.py +140 -83
- mindspore/dataset/engine/datasets_audio.py +14 -14
- mindspore/dataset/engine/datasets_standard_format.py +33 -3
- mindspore/dataset/engine/datasets_text.py +38 -38
- mindspore/dataset/engine/datasets_user_defined.py +78 -59
- mindspore/dataset/engine/datasets_vision.py +77 -73
- mindspore/dataset/engine/offload.py +5 -7
- mindspore/dataset/engine/queue.py +56 -38
- mindspore/dataset/engine/validators.py +11 -5
- mindspore/dataset/text/__init__.py +3 -3
- mindspore/dataset/text/transforms.py +408 -121
- mindspore/dataset/text/utils.py +9 -9
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/transforms.py +261 -76
- mindspore/dataset/utils/browse_dataset.py +9 -9
- mindspore/dataset/vision/__init__.py +8 -8
- mindspore/dataset/vision/c_transforms.py +10 -10
- mindspore/dataset/vision/py_transforms_util.py +1 -1
- mindspore/dataset/vision/transforms.py +2844 -549
- mindspore/dataset/vision/utils.py +161 -10
- mindspore/dataset/vision/validators.py +14 -2
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/optim/__init__.py +12 -2
- mindspore/experimental/optim/adadelta.py +161 -0
- mindspore/experimental/optim/adagrad.py +168 -0
- mindspore/experimental/optim/adam.py +35 -34
- mindspore/experimental/optim/adamax.py +170 -0
- mindspore/experimental/optim/adamw.py +40 -16
- mindspore/experimental/optim/asgd.py +153 -0
- mindspore/experimental/optim/lr_scheduler.py +66 -121
- mindspore/experimental/optim/nadam.py +157 -0
- mindspore/experimental/optim/optimizer.py +15 -8
- mindspore/experimental/optim/radam.py +194 -0
- mindspore/experimental/optim/rmsprop.py +154 -0
- mindspore/experimental/optim/rprop.py +164 -0
- mindspore/experimental/optim/sgd.py +28 -19
- mindspore/hal/__init__.py +40 -0
- mindspore/hal/_ascend.py +57 -0
- mindspore/hal/_base.py +57 -0
- mindspore/hal/_cpu.py +56 -0
- mindspore/hal/_gpu.py +57 -0
- mindspore/hal/device.py +356 -0
- mindspore/hal/event.py +179 -0
- mindspore/hal/memory.py +326 -0
- mindspore/hal/stream.py +339 -0
- mindspore/include/api/data_type.h +2 -2
- mindspore/include/api/dual_abi_helper.h +16 -3
- mindspore/include/api/model.h +4 -3
- mindspore/include/api/status.h +14 -0
- mindspore/include/c_api/model_c.h +173 -0
- mindspore/include/c_api/ms/base/types.h +1 -0
- mindspore/include/c_api/types_c.h +19 -0
- mindspore/include/dataset/execute.h +1 -3
- mindspore/include/dataset/vision.h +54 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +2 -2
- mindspore/mindrecord/__init__.py +5 -1
- mindspore/mindrecord/config.py +809 -0
- mindspore/mindrecord/filereader.py +25 -0
- mindspore/mindrecord/filewriter.py +76 -58
- mindspore/mindrecord/mindpage.py +40 -6
- mindspore/mindrecord/shardutils.py +3 -2
- mindspore/mindrecord/shardwriter.py +7 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +8 -13
- mindspore/mindrecord/tools/cifar10_to_mr.py +9 -15
- mindspore/mindrecord/tools/csv_to_mr.py +4 -9
- mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
- mindspore/mindrecord/tools/mnist_to_mr.py +7 -12
- mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -6
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/mint/__init__.py +1137 -0
- mindspore/{rewrite/ast_transformers → mint/linalg}/__init__.py +9 -4
- mindspore/mint/nn/__init__.py +512 -0
- mindspore/mint/nn/functional.py +573 -0
- mindspore/mint/optim/__init__.py +24 -0
- mindspore/mint/optim/adamw.py +185 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/multiprocessing/__init__.py +72 -0
- mindspore/nn/__init__.py +1 -0
- mindspore/nn/cell.py +213 -257
- mindspore/nn/dynamic_lr.py +2 -2
- mindspore/nn/extend/__init__.py +29 -0
- mindspore/nn/extend/basic.py +140 -0
- mindspore/nn/extend/embedding.py +143 -0
- mindspore/{rewrite/ast_creator_register.py → nn/extend/layer/__init__.py} +9 -19
- mindspore/nn/extend/layer/normalization.py +109 -0
- mindspore/nn/extend/pooling.py +117 -0
- mindspore/nn/layer/activation.py +83 -93
- mindspore/nn/layer/basic.py +177 -82
- mindspore/nn/layer/channel_shuffle.py +3 -16
- mindspore/nn/layer/container.py +3 -3
- mindspore/nn/layer/conv.py +75 -66
- mindspore/nn/layer/embedding.py +101 -43
- mindspore/nn/layer/embedding_service.py +531 -0
- mindspore/nn/layer/embedding_service_layer.py +393 -0
- mindspore/nn/layer/image.py +4 -7
- mindspore/nn/layer/math.py +1 -1
- mindspore/nn/layer/normalization.py +52 -66
- mindspore/nn/layer/padding.py +30 -39
- mindspore/nn/layer/pooling.py +18 -9
- mindspore/nn/layer/rnn_cells.py +6 -16
- mindspore/nn/layer/rnns.py +6 -5
- mindspore/nn/layer/thor_layer.py +1 -2
- mindspore/nn/layer/timedistributed.py +1 -1
- mindspore/nn/layer/transformer.py +52 -50
- mindspore/nn/learning_rate_schedule.py +6 -5
- mindspore/nn/loss/loss.py +62 -83
- mindspore/nn/optim/ada_grad.py +4 -2
- mindspore/nn/optim/adadelta.py +3 -1
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +102 -181
- mindspore/nn/optim/adamax.py +4 -2
- mindspore/nn/optim/adasum.py +3 -3
- mindspore/nn/optim/asgd.py +4 -2
- mindspore/nn/optim/ftrl.py +31 -61
- mindspore/nn/optim/lamb.py +5 -3
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +6 -4
- mindspore/nn/optim/momentum.py +13 -25
- mindspore/nn/optim/optimizer.py +6 -3
- mindspore/nn/optim/proximal_ada_grad.py +4 -2
- mindspore/nn/optim/rmsprop.py +9 -3
- mindspore/nn/optim/rprop.py +4 -2
- mindspore/nn/optim/sgd.py +5 -3
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
- mindspore/nn/probability/distribution/beta.py +2 -2
- mindspore/nn/probability/distribution/categorical.py +4 -6
- mindspore/nn/probability/distribution/cauchy.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -2
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/logistic.py +1 -1
- mindspore/nn/probability/distribution/poisson.py +2 -2
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +13 -1
- mindspore/nn/wrap/__init__.py +2 -1
- mindspore/nn/wrap/cell_wrapper.py +58 -13
- mindspore/nn/wrap/grad_reducer.py +148 -8
- mindspore/nn/wrap/loss_scale.py +32 -9
- mindspore/numpy/__init__.py +2 -0
- mindspore/numpy/array_creations.py +2 -0
- mindspore/numpy/array_ops.py +6 -6
- mindspore/numpy/dtypes.py +3 -3
- mindspore/numpy/fft.py +431 -0
- mindspore/numpy/math_ops.py +62 -68
- mindspore/numpy/utils.py +3 -0
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +6 -5
- mindspore/ops/_grad_experimental/grad_array_ops.py +4 -129
- mindspore/ops/_grad_experimental/grad_comm_ops.py +89 -34
- mindspore/ops/_grad_experimental/grad_math_ops.py +68 -283
- mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
- mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
- mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
- mindspore/ops/_op_impl/__init__.py +0 -1
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
- mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +1 -1
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
- mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -3
- mindspore/ops/_op_impl/cpu/adam.py +2 -2
- mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
- mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
- mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
- mindspore/ops/_vmap/vmap_array_ops.py +164 -101
- mindspore/ops/_vmap/vmap_base.py +8 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +143 -58
- mindspore/ops/_vmap/vmap_image_ops.py +70 -13
- mindspore/ops/_vmap/vmap_math_ops.py +130 -58
- mindspore/ops/_vmap/vmap_nn_ops.py +249 -115
- mindspore/ops/_vmap/vmap_other_ops.py +1 -1
- mindspore/ops/auto_generate/__init__.py +31 -0
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +231 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +250 -0
- mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
- mindspore/ops/auto_generate/gen_extend_func.py +980 -0
- mindspore/ops/auto_generate/gen_ops_def.py +6443 -0
- mindspore/ops/auto_generate/gen_ops_prim.py +13167 -0
- mindspore/ops/auto_generate/pyboost_inner_prim.py +429 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +121 -23
- mindspore/ops/composite/math_ops.py +10 -49
- mindspore/ops/composite/multitype_ops/_compile_utils.py +191 -618
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +25 -134
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
- mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
- mindspore/ops/deprecated.py +14 -3
- mindspore/ops/extend/__init__.py +53 -0
- mindspore/ops/extend/array_func.py +218 -0
- mindspore/ops/extend/math_func.py +76 -0
- mindspore/ops/extend/nn_func.py +308 -0
- mindspore/ops/function/__init__.py +31 -11
- mindspore/ops/function/array_func.py +846 -1735
- mindspore/ops/function/clip_func.py +19 -31
- mindspore/ops/function/debug_func.py +1 -4
- mindspore/ops/function/fft_func.py +31 -0
- mindspore/ops/function/grad/grad_func.py +27 -20
- mindspore/ops/function/image_func.py +27 -21
- mindspore/ops/function/linalg_func.py +35 -68
- mindspore/ops/function/math_func.py +913 -2791
- mindspore/ops/function/nn_func.py +1439 -885
- mindspore/ops/function/other_func.py +6 -7
- mindspore/ops/function/parameter_func.py +5 -93
- mindspore/ops/function/random_func.py +254 -108
- mindspore/ops/function/reshard_func.py +102 -0
- mindspore/ops/function/sparse_func.py +4 -4
- mindspore/ops/function/sparse_unary_func.py +9 -16
- mindspore/ops/function/spectral_func.py +1 -1
- mindspore/ops/function/vmap_func.py +14 -14
- mindspore/ops/functional.py +342 -343
- mindspore/ops/op_info_register.py +16 -43
- mindspore/ops/operations/__init__.py +32 -23
- mindspore/ops/operations/_grad_ops.py +21 -853
- mindspore/ops/operations/_infer_ops.py +19 -0
- mindspore/ops/operations/_inner_ops.py +107 -518
- mindspore/ops/operations/_rl_inner_ops.py +2 -2
- mindspore/ops/operations/_scalar_ops.py +5 -480
- mindspore/ops/operations/_sequence_ops.py +6 -36
- mindspore/ops/operations/_tensor_array.py +8 -8
- mindspore/ops/operations/array_ops.py +108 -2705
- mindspore/ops/operations/comm_ops.py +801 -118
- mindspore/ops/operations/custom_ops.py +61 -120
- mindspore/ops/operations/debug_ops.py +104 -35
- mindspore/ops/operations/image_ops.py +1 -217
- mindspore/ops/operations/inner_ops.py +5 -40
- mindspore/ops/operations/linalg_ops.py +1 -49
- mindspore/ops/operations/manually_defined/__init__.py +24 -0
- mindspore/ops/operations/manually_defined/_inner.py +61 -0
- mindspore/ops/operations/manually_defined/ops_def.py +2016 -0
- mindspore/ops/operations/math_ops.py +572 -4667
- mindspore/ops/operations/nn_ops.py +248 -2162
- mindspore/ops/operations/other_ops.py +53 -45
- mindspore/ops/operations/random_ops.py +4 -53
- mindspore/ops/operations/reshard_ops.py +53 -0
- mindspore/ops/operations/sparse_ops.py +4 -4
- mindspore/ops/primitive.py +204 -103
- mindspore/ops/silent_check.py +5 -5
- mindspore/ops_generate/__init__.py +27 -0
- mindspore/ops_generate/arg_dtype_cast.py +250 -0
- mindspore/ops_generate/arg_handler.py +197 -0
- mindspore/ops_generate/gen_aclnn_implement.py +263 -0
- mindspore/ops_generate/gen_ops.py +1084 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
- mindspore/ops_generate/gen_pyboost_func.py +968 -0
- mindspore/ops_generate/gen_utils.py +209 -0
- mindspore/ops_generate/op_proto.py +138 -0
- mindspore/ops_generate/pyboost_utils.py +354 -0
- mindspore/ops_generate/template.py +239 -0
- mindspore/parallel/__init__.py +6 -4
- mindspore/parallel/_auto_parallel_context.py +73 -3
- mindspore/parallel/_cell_wrapper.py +16 -9
- mindspore/parallel/_cost_model_context.py +1 -1
- mindspore/parallel/_dp_allreduce_fusion.py +159 -159
- mindspore/parallel/_parallel_serialization.py +29 -13
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +18 -11
- mindspore/parallel/_transformer/__init__.py +1 -1
- mindspore/parallel/_transformer/layers.py +1 -1
- mindspore/parallel/_transformer/loss.py +1 -1
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/op_parallel_config.py +1 -1
- mindspore/parallel/_transformer/transformer.py +2 -2
- mindspore/parallel/_utils.py +161 -6
- mindspore/parallel/algo_parameter_config.py +6 -8
- mindspore/parallel/checkpoint_transform.py +191 -32
- mindspore/parallel/cluster/__init__.py +15 -0
- mindspore/parallel/cluster/process_entity/__init__.py +18 -0
- mindspore/parallel/cluster/process_entity/_api.py +344 -0
- mindspore/parallel/cluster/process_entity/_utils.py +126 -0
- mindspore/parallel/cluster/run.py +136 -0
- mindspore/parallel/mpi/__init__.py +1 -1
- mindspore/parallel/mpi/_mpi_config.py +1 -1
- mindspore/parallel/parameter_broadcast.py +152 -0
- mindspore/parallel/shard.py +128 -17
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +3 -2
- mindspore/profiler/common/process_pool.py +41 -0
- mindspore/profiler/common/singleton.py +28 -0
- mindspore/profiler/common/util.py +125 -0
- mindspore/profiler/envprofiling.py +2 -2
- mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +53 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +159 -0
- mindspore/profiler/parser/ascend_analysis/function_event.py +161 -0
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +131 -0
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +85 -0
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +57 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +116 -0
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +68 -0
- mindspore/profiler/parser/ascend_cluster_generator.py +14 -9
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -1
- mindspore/profiler/parser/ascend_flops_generator.py +20 -4
- mindspore/profiler/parser/ascend_hccl_generator.py +29 -278
- mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
- mindspore/profiler/parser/ascend_memory_generator.py +185 -0
- mindspore/profiler/parser/ascend_msprof_exporter.py +147 -146
- mindspore/profiler/parser/ascend_msprof_generator.py +73 -283
- mindspore/profiler/parser/ascend_op_generator.py +92 -42
- mindspore/profiler/parser/ascend_timeline_generator.py +296 -133
- mindspore/profiler/parser/base_timeline_generator.py +6 -0
- mindspore/profiler/parser/framework_parser.py +3 -2
- mindspore/profiler/parser/integrator.py +3 -1
- mindspore/profiler/parser/minddata_parser.py +72 -3
- mindspore/profiler/parser/msadvisor_analyzer.py +1 -1
- mindspore/profiler/parser/msadvisor_parser.py +1 -1
- mindspore/profiler/parser/profiler_info.py +16 -1
- mindspore/profiler/profiling.py +445 -190
- mindspore/rewrite/__init__.py +2 -13
- mindspore/rewrite/api/node.py +122 -36
- mindspore/rewrite/api/pattern_engine.py +2 -3
- mindspore/rewrite/api/scoped_value.py +16 -15
- mindspore/rewrite/api/symbol_tree.py +45 -29
- mindspore/rewrite/ast_helpers/__init__.py +3 -6
- mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
- mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
- mindspore/rewrite/common/__init__.py +1 -2
- mindspore/rewrite/common/config.py +24 -0
- mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
- mindspore/rewrite/{namer.py → common/namer.py} +63 -18
- mindspore/rewrite/common/namespace.py +118 -0
- mindspore/rewrite/node/__init__.py +5 -5
- mindspore/rewrite/node/call_function.py +23 -7
- mindspore/rewrite/node/cell_container.py +7 -3
- mindspore/rewrite/node/control_flow.py +53 -28
- mindspore/rewrite/node/node.py +212 -196
- mindspore/rewrite/node/node_manager.py +51 -22
- mindspore/rewrite/node/node_topological_manager.py +3 -23
- mindspore/rewrite/parsers/__init__.py +12 -0
- mindspore/rewrite/parsers/arguments_parser.py +8 -9
- mindspore/rewrite/parsers/assign_parser.py +637 -413
- mindspore/rewrite/parsers/attribute_parser.py +3 -4
- mindspore/rewrite/parsers/class_def_parser.py +115 -148
- mindspore/rewrite/parsers/constant_parser.py +5 -5
- mindspore/rewrite/parsers/container_parser.py +4 -6
- mindspore/rewrite/parsers/expr_parser.py +55 -0
- mindspore/rewrite/parsers/for_parser.py +31 -98
- mindspore/rewrite/parsers/function_def_parser.py +13 -5
- mindspore/rewrite/parsers/if_parser.py +28 -10
- mindspore/rewrite/parsers/module_parser.py +8 -182
- mindspore/rewrite/parsers/parser.py +1 -5
- mindspore/rewrite/parsers/parser_register.py +1 -1
- mindspore/rewrite/parsers/return_parser.py +5 -10
- mindspore/rewrite/parsers/while_parser.py +59 -0
- mindspore/rewrite/sparsify/utils.py +1 -1
- mindspore/rewrite/symbol_tree/__init__.py +20 -0
- mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
- mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
- mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
- mindspore/run_check/_check_version.py +6 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/safeguard/rewrite_obfuscation.py +9 -19
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +6 -5
- mindspore/train/_utils.py +178 -4
- mindspore/train/amp.py +167 -245
- mindspore/train/anf_ir_pb2.py +14 -2
- mindspore/train/callback/__init__.py +5 -2
- mindspore/train/callback/_backup_and_restore.py +5 -5
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +143 -29
- mindspore/train/callback/_cluster_monitor.py +201 -0
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_flops_collector.py +238 -0
- mindspore/train/callback/_landscape.py +15 -9
- mindspore/train/callback/_loss_monitor.py +2 -2
- mindspore/train/callback/_mindio_ttp.py +443 -0
- mindspore/train/callback/_on_request_exit.py +2 -2
- mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
- mindspore/train/callback/_summary_collector.py +7 -7
- mindspore/train/callback/_time_monitor.py +3 -3
- mindspore/train/data_sink.py +6 -5
- mindspore/train/dataset_helper.py +60 -21
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/accuracy.py +7 -7
- mindspore/train/metrics/confusion_matrix.py +8 -6
- mindspore/train/metrics/cosine_similarity.py +6 -4
- mindspore/train/metrics/error.py +2 -2
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/metrics/perplexity.py +2 -1
- mindspore/train/metrics/topk.py +2 -2
- mindspore/train/mind_ir_pb2.py +89 -15
- mindspore/train/model.py +290 -60
- mindspore/train/serialization.py +495 -220
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +51 -28
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/turbojpeg.dll +0 -0
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.2.14.dist-info → mindspore-2.3.0.dist-info}/METADATA +3 -3
- mindspore-2.3.0.dist-info/RECORD +1400 -0
- {mindspore-2.2.14.dist-info → mindspore-2.3.0.dist-info}/entry_points.txt +1 -0
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
- mindspore/gen_ops.py +0 -273
- mindspore/nn/layer/flash_attention.py +0 -189
- mindspore/ops/_op_impl/cpu/concat.py +0 -39
- mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
- mindspore/ops/_op_impl/tbe/__init__.py +0 -47
- mindspore/ops/_op_impl/tbe/abs.py +0 -38
- mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/acos.py +0 -37
- mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/acosh.py +0 -37
- mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
- mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
- mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
- mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
- mindspore/ops/_op_impl/tbe/add.py +0 -42
- mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/add_n.py +0 -39
- mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
- mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
- mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
- mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
- mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
- mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
- mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
- mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/asin.py +0 -37
- mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/asinh.py +0 -37
- mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/assign.py +0 -79
- mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
- mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
- mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/atan.py +0 -37
- mindspore/ops/_op_impl/tbe/atan2.py +0 -38
- mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/atanh.py +0 -37
- mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
- mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
- mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
- mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
- mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
- mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
- mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
- mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
- mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
- mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
- mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cast.py +0 -55
- mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/cdist.py +0 -38
- mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/ceil.py +0 -37
- mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/celu.py +0 -39
- mindspore/ops/_op_impl/tbe/centralization.py +0 -39
- mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
- mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/concat.py +0 -40
- mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
- mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
- mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
- mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
- mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
- mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/cos.py +0 -37
- mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/cosh.py +0 -37
- mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
- mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cummin.py +0 -41
- mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
- mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
- mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
- mindspore/ops/_op_impl/tbe/diag.py +0 -38
- mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
- mindspore/ops/_op_impl/tbe/dilation.py +0 -40
- mindspore/ops/_op_impl/tbe/div.py +0 -41
- mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
- mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
- mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
- mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
- mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
- mindspore/ops/_op_impl/tbe/elu.py +0 -38
- mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/equal.py +0 -42
- mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/erf.py +0 -37
- mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfc.py +0 -37
- mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
- mindspore/ops/_op_impl/tbe/exp.py +0 -40
- mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
- mindspore/ops/_op_impl/tbe/expm1.py +0 -37
- mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
- mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/fill.py +0 -56
- mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/flatten.py +0 -48
- mindspore/ops/_op_impl/tbe/floor.py +0 -37
- mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
- mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
- mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
- mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
- mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
- mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
- mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
- mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/ger.py +0 -43
- mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/greater.py +0 -43
- mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
- mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
- mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
- mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
- mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
- mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
- mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
- mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/im2col.py +0 -42
- mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
- mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
- mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/inv.py +0 -38
- mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/invert.py +0 -37
- mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/iou.py +0 -38
- mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/is_close.py +0 -40
- mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
- mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
- mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
- mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
- mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
- mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
- mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/lerp.py +0 -38
- mindspore/ops/_op_impl/tbe/less.py +0 -41
- mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/log.py +0 -40
- mindspore/ops/_op_impl/tbe/log1p.py +0 -37
- mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
- mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
- mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
- mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
- mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/matmul.py +0 -53
- mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
- mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
- mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
- mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum.py +0 -39
- mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
- mindspore/ops/_op_impl/tbe/minimum.py +0 -40
- mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mish.py +0 -37
- mindspore/ops/_op_impl/tbe/mod.py +0 -41
- mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/mul.py +0 -37
- mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
- mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
- mindspore/ops/_op_impl/tbe/neg.py +0 -39
- mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
- mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
- mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
- mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
- mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
- mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/pack.py +0 -58
- mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
- mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
- mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/pdist.py +0 -36
- mindspore/ops/_op_impl/tbe/pooling.py +0 -46
- mindspore/ops/_op_impl/tbe/population_count.py +0 -38
- mindspore/ops/_op_impl/tbe/pow.py +0 -41
- mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/prelu.py +0 -37
- mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/range.py +0 -39
- mindspore/ops/_op_impl/tbe/real_div.py +0 -38
- mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
- mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
- mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
- mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
- mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6.py +0 -38
- mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/renorm.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
- mindspore/ops/_op_impl/tbe/rint.py +0 -37
- mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roll.py +0 -42
- mindspore/ops/_op_impl/tbe/round.py +0 -38
- mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
- mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
- mindspore/ops/_op_impl/tbe/select.py +0 -38
- mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/selu.py +0 -39
- mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sgd.py +0 -62
- mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sign.py +0 -38
- mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/sin.py +0 -37
- mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sinh.py +0 -37
- mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/slice.py +0 -58
- mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
- mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax.py +0 -37
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
- mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/softplus.py +0 -37
- mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softsign.py +0 -37
- mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sort.py +0 -38
- mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/split_d.py +0 -38
- mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/split_v.py +0 -39
- mindspore/ops/_op_impl/tbe/splitv.py +0 -39
- mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/square.py +0 -38
- mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
- mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
- mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
- mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
- mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
- mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
- mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
- mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
- mindspore/ops/_op_impl/tbe/sub.py +0 -39
- mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tan.py +0 -38
- mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh.py +0 -37
- mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
- mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
- mindspore/ops/_op_impl/tbe/tile.py +0 -37
- mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
- mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
- mindspore/ops/_op_impl/tbe/transpose.py +0 -60
- mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
- mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
- mindspore/ops/_op_impl/tbe/trunc.py +0 -39
- mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/unpack.py +0 -38
- mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
- mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
- mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
- mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
- mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
- mindspore/ops/_tracefunc.py +0 -241
- mindspore/ops/arg_dtype_cast.py +0 -54
- mindspore/rewrite/api/tree_node_helper.py +0 -60
- mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
- mindspore/rewrite/namespace.py +0 -53
- mindspore-2.2.14.dist-info/RECORD +0 -1924
- {mindspore-2.2.14.dist-info → mindspore-2.3.0.dist-info}/WHEEL +0 -0
- {mindspore-2.2.14.dist-info → mindspore-2.3.0.dist-info}/top_level.txt +0 -0
mindspore/common/tensor.py
CHANGED
|
@@ -27,13 +27,15 @@ from mindspore.common.seed import get_seed
|
|
|
27
27
|
from mindspore import context
|
|
28
28
|
from mindspore import log as logger
|
|
29
29
|
from mindspore.common import dtype as mstype
|
|
30
|
+
from mindspore.common.hook_handle import _TensorHookHandle
|
|
30
31
|
|
|
31
32
|
from mindspore.common._utils import get_slice_num
|
|
32
33
|
from mindspore.common._register_for_tensor import tensor_operator_registry
|
|
33
34
|
from mindspore._c_expression import Tensor as Tensor_
|
|
34
35
|
from mindspore import _checkparam as validator
|
|
35
|
-
from mindspore._checkparam import check_is_number, is_stub_tensor
|
|
36
|
+
from mindspore._checkparam import check_is_number, is_stub_tensor, check_hook_fn
|
|
36
37
|
from mindspore._check_jit_forbidden_api import jit_forbidden_register
|
|
38
|
+
from mindspore.common.symbol import Symbol
|
|
37
39
|
|
|
38
40
|
np_types = (np.int8, np.int16, np.int32, np.int64,
|
|
39
41
|
np.uint8, np.uint16, np.uint32, np.uint64, np.float16,
|
|
@@ -48,7 +50,8 @@ def _check_input_data_type(input_data):
|
|
|
48
50
|
valid_dtypes = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64,
|
|
49
51
|
np.float16, np.float32, np.float64, np.bool_, np.str_, np.complex64, np.complex128)
|
|
50
52
|
if isinstance(input_data, np.ndarray) and input_data.dtype not in valid_dtypes and \
|
|
51
|
-
input_data.dtype.kind != 'U' and input_data.dtype.kind != 'S'
|
|
53
|
+
input_data.dtype.kind != 'U' and input_data.dtype.kind != 'S' and \
|
|
54
|
+
input_data.dtype.kind != 'T': # Support dtype np.str_ and npy_bfloat16
|
|
52
55
|
new_line = '\n'
|
|
53
56
|
for index, x in np.ndenumerate(input_data):
|
|
54
57
|
if np.array(x).dtype not in valid_dtypes:
|
|
@@ -82,11 +85,11 @@ def tensor(input_data=None, dtype=None, shape=None, init=None, internal=False, c
|
|
|
82
85
|
based on the `dtype` argument.
|
|
83
86
|
|
|
84
87
|
Please refer to `Creating and Using Tensor
|
|
85
|
-
<https://www.mindspore.cn/docs/en/
|
|
88
|
+
<https://www.mindspore.cn/docs/en/master/note/static_graph_syntax_support.html#mindspore-user-defined-data-types>`_ .
|
|
86
89
|
|
|
87
90
|
The difference between it and the Tensor class is that it adds
|
|
88
91
|
`Annotation
|
|
89
|
-
<https://www.mindspore.cn/docs/en/
|
|
92
|
+
<https://www.mindspore.cn/docs/en/master/design/dynamic_graph_and_static_graph.html?#annotation-type>`_
|
|
90
93
|
which can prevent the generation of AnyType compared to the Tensor class.
|
|
91
94
|
|
|
92
95
|
The arguments and return values are the same as the Tensor class. Also see: :class:`mindspore.Tensor`.
|
|
@@ -114,22 +117,25 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
114
117
|
Tensor is a data structure that stores an n-dimensional array.
|
|
115
118
|
|
|
116
119
|
Note:
|
|
117
|
-
If
|
|
120
|
+
If `init` interface is used to initialize `Tensor`, the `Tensor.init_data` API needs to be called to load the
|
|
118
121
|
actual data to `Tensor`.
|
|
119
122
|
|
|
123
|
+
Warning:
|
|
124
|
+
To convert dtype of a `Tensor`, it is recommended to use `Tensor.astype()` rather than
|
|
125
|
+
`Tensor(sourceTensor, dtype=newDtype)`.
|
|
126
|
+
|
|
120
127
|
Args:
|
|
121
128
|
input_data (Union[Tensor, float, int, bool, tuple, list, numpy.ndarray]): The data to be stored. It can be
|
|
122
129
|
another Tensor, Python number or NumPy ndarray. Default: ``None`` .
|
|
123
130
|
dtype (:class:`mindspore.dtype`): Used to indicate the data type of the output Tensor. The argument should
|
|
124
131
|
be defined in `mindspore.dtype`. If it is ``None`` , the data type of the output Tensor will be the same
|
|
125
132
|
as the `input_data`. Default: ``None`` .
|
|
126
|
-
shape (Union[tuple, list, int]): Used to indicate the shape of the output Tensor.
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
must be set. Default: ``None`` .
|
|
133
|
+
shape (Union[tuple, list, int, :class:`mindspore.Symbol`]): Used to indicate the shape of the output Tensor.
|
|
134
|
+
If `input_data` is available, `shape` doesn't need to be set. If ``None`` or `Symbol` exists in `shape` ,
|
|
135
|
+
a tensor of dynamic shape is created, `input_data` doesn't need to be set; if only integers exist in
|
|
136
|
+
`shape`, a tensor of static shape is created, `input_data` or `init` must be set. Default: ``None`` .
|
|
131
137
|
init (Initializer): The information of init data.
|
|
132
|
-
|
|
138
|
+
`init` is used for delayed initialization in parallel mode, when using init, `dtype` and `shape` must be
|
|
133
139
|
set. Default: ``None`` .
|
|
134
140
|
internal (bool): Whether it is created by the framework.
|
|
135
141
|
``'True'`` means that the tensor is created by framework.
|
|
@@ -142,9 +148,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
142
148
|
Tensor.
|
|
143
149
|
|
|
144
150
|
Note:
|
|
145
|
-
The default value None of `input_data` works as a placeholder,
|
|
151
|
+
The default value ``None`` of `input_data` works as a placeholder,
|
|
152
|
+
it does not mean that we can create a NoneType
|
|
146
153
|
Tensor.
|
|
147
|
-
Tensor with shape contains 0 is not fully tested and supported.
|
|
154
|
+
Tensor with `shape` contains 0 is not fully tested and supported.
|
|
148
155
|
|
|
149
156
|
Examples:
|
|
150
157
|
>>> import numpy as np
|
|
@@ -200,6 +207,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
200
207
|
|
|
201
208
|
def __init__(self, input_data=None, dtype=None, shape=None, init=None, internal=False, const_arg=False):
|
|
202
209
|
self.init_finished = False
|
|
210
|
+
if isinstance(input_data, (Tensor, Tensor_)) and dtype is not None:
|
|
211
|
+
logger.info("It is suggested to use 'Tensor.astype()' to convert the dtype of a Tensor.")
|
|
212
|
+
_cast = tensor_operator_registry.get("cast")
|
|
213
|
+
input_data = _cast(input_data, dtype)
|
|
214
|
+
|
|
203
215
|
if is_stub_tensor(input_data):
|
|
204
216
|
input_data = input_data.stub_sync()
|
|
205
217
|
|
|
@@ -218,8 +230,16 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
218
230
|
if isinstance(input_data, np_types):
|
|
219
231
|
input_data = np.array(input_data)
|
|
220
232
|
|
|
221
|
-
if
|
|
222
|
-
|
|
233
|
+
if shape is not None:
|
|
234
|
+
if isinstance(shape, numbers.Number):
|
|
235
|
+
shape = (shape,)
|
|
236
|
+
elif isinstance(shape, Symbol):
|
|
237
|
+
self.symbolic_shape = [shape]
|
|
238
|
+
shape = (None,)
|
|
239
|
+
elif isinstance(shape, (list, tuple)) and any(isinstance(s, Symbol) for s in shape):
|
|
240
|
+
self.symbolic_shape = [item.to_dict() if isinstance(item, Symbol) else item for item in shape]
|
|
241
|
+
shape_without_symbol = (None if isinstance(item, Symbol) else item for item in shape)
|
|
242
|
+
shape = list(shape_without_symbol) if isinstance(shape, list) else tuple(shape_without_symbol)
|
|
223
243
|
|
|
224
244
|
_check_tensor_input(input_data, dtype, shape, init)
|
|
225
245
|
|
|
@@ -258,6 +278,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
258
278
|
self.slice_num_of_persistent_data_ = None
|
|
259
279
|
self.slice_shape_of_persistent_data_ = None
|
|
260
280
|
|
|
281
|
+
# the auto gradient information
|
|
282
|
+
self._grad = None
|
|
283
|
+
self._grad_fn = None
|
|
284
|
+
self._requires_grad = False
|
|
285
|
+
self._retain_grad = False
|
|
286
|
+
|
|
261
287
|
@classmethod
|
|
262
288
|
def __subclasshook__(cls, sub):
|
|
263
289
|
"""
|
|
@@ -295,19 +321,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
295
321
|
def __eq__(self, other):
|
|
296
322
|
if not isinstance(other, (int, float, Tensor)):
|
|
297
323
|
return False
|
|
298
|
-
# bool type is not supported for `Equal` operator in backend.
|
|
299
|
-
if self.dtype == mstype.bool_ or (isinstance(other, Tensor) and other.dtype == mstype.bool_):
|
|
300
|
-
if isinstance(other, Tensor):
|
|
301
|
-
return Tensor(np.array(self.asnumpy() == other.asnumpy()))
|
|
302
|
-
return Tensor(np.array(self.asnumpy() == other))
|
|
303
324
|
return tensor_operator_registry.get('__eq__')(self, other)
|
|
304
325
|
|
|
305
326
|
def __ne__(self, other):
|
|
306
327
|
if not isinstance(other, (int, float, Tensor)):
|
|
307
328
|
return True
|
|
308
|
-
# bool type is not supported for `NotEqual` operator in backend.
|
|
309
|
-
if self.dtype == mstype.bool_ or (isinstance(other, Tensor) and other.dtype == mstype.bool_):
|
|
310
|
-
return Tensor(np.array(self.asnumpy() != other.asnumpy()))
|
|
311
329
|
return tensor_operator_registry.get('__ne__')(self, other)
|
|
312
330
|
|
|
313
331
|
def __hash__(self):
|
|
@@ -322,7 +340,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
322
340
|
return out
|
|
323
341
|
|
|
324
342
|
def __round__(self):
|
|
325
|
-
out = tensor_operator_registry.get('round')(
|
|
343
|
+
out = tensor_operator_registry.get('round')(self)
|
|
326
344
|
return out
|
|
327
345
|
|
|
328
346
|
def __bool__(self):
|
|
@@ -360,7 +378,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
360
378
|
return self
|
|
361
379
|
|
|
362
380
|
def __abs__(self):
|
|
363
|
-
self._init_check()
|
|
364
381
|
return tensor_operator_registry.get('abs')(self)
|
|
365
382
|
|
|
366
383
|
def __add__(self, other):
|
|
@@ -488,8 +505,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
488
505
|
def __str__(self):
|
|
489
506
|
if self.dtype == mstype.type_none:
|
|
490
507
|
return "Unknown Tensor type!"
|
|
491
|
-
if self.dtype == mstype.bfloat16:
|
|
492
|
-
return str(self.float().asnumpy())
|
|
493
508
|
return str(self.asnumpy())
|
|
494
509
|
|
|
495
510
|
def __getstate__(self):
|
|
@@ -509,6 +524,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
509
524
|
"""
|
|
510
525
|
return self._shape
|
|
511
526
|
|
|
527
|
+
@shape.setter
|
|
528
|
+
def shape(self, shape_value):
|
|
529
|
+
r"""
|
|
530
|
+
Set the shape value.
|
|
531
|
+
"""
|
|
532
|
+
self._shape = shape_value
|
|
533
|
+
|
|
512
534
|
@property
|
|
513
535
|
def dtype(self):
|
|
514
536
|
"""Return the dtype of the tensor (:class:`mindspore.dtype`)."""
|
|
@@ -544,6 +566,83 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
544
566
|
"""
|
|
545
567
|
return len(self._shape)
|
|
546
568
|
|
|
569
|
+
@property
|
|
570
|
+
def grad(self):
|
|
571
|
+
r"""
|
|
572
|
+
Get the gradient value.
|
|
573
|
+
"""
|
|
574
|
+
return self._grad
|
|
575
|
+
|
|
576
|
+
@grad.setter
|
|
577
|
+
def grad(self, grad):
|
|
578
|
+
r"""
|
|
579
|
+
Set the gradient value.
|
|
580
|
+
"""
|
|
581
|
+
self._grad = grad
|
|
582
|
+
|
|
583
|
+
@property
|
|
584
|
+
def grad_fn(self):
|
|
585
|
+
r"""
|
|
586
|
+
The function for backward.
|
|
587
|
+
"""
|
|
588
|
+
return self._grad_fn
|
|
589
|
+
|
|
590
|
+
@grad_fn.setter
|
|
591
|
+
def grad_fn(self, grad_fn):
|
|
592
|
+
r"""
|
|
593
|
+
Set the function for backward.
|
|
594
|
+
"""
|
|
595
|
+
self._grad_fn = grad_fn
|
|
596
|
+
|
|
597
|
+
@property
|
|
598
|
+
def is_leaf(self):
|
|
599
|
+
r"""
|
|
600
|
+
Whether the stub tensor is leaf.
|
|
601
|
+
They will be a leaf if they have requires_grad and requires_grad is False,
|
|
602
|
+
Or they were created by user.
|
|
603
|
+
"""
|
|
604
|
+
return self._requires_grad is False or self._grad_fn is None
|
|
605
|
+
|
|
606
|
+
@property
|
|
607
|
+
def requires_grad(self):
|
|
608
|
+
r"""
|
|
609
|
+
Whether the stub tensor need requires grad.
|
|
610
|
+
"""
|
|
611
|
+
return self._requires_grad
|
|
612
|
+
|
|
613
|
+
@requires_grad.setter
|
|
614
|
+
def requires_grad(self, requires_grad):
|
|
615
|
+
r"""
|
|
616
|
+
Mark the stub tensor whether need requires gradient.
|
|
617
|
+
"""
|
|
618
|
+
self._requires_grad = requires_grad
|
|
619
|
+
|
|
620
|
+
def retain_grad(self):
|
|
621
|
+
r"""
|
|
622
|
+
Enable the stub tensor which is not non-leaf to have the grad during backward().
|
|
623
|
+
"""
|
|
624
|
+
if not self._requires_grad:
|
|
625
|
+
RuntimeError("can't retain_grad on Tensor that has requires_grad = False.")
|
|
626
|
+
self._retain_grad = self._grad_fn is not None
|
|
627
|
+
|
|
628
|
+
@property
|
|
629
|
+
def retains_grad(self):
|
|
630
|
+
r"""
|
|
631
|
+
Is True if the stub tensor is non-leaf and its grad is enabled to be populated during backward().
|
|
632
|
+
"""
|
|
633
|
+
return self._retain_grad
|
|
634
|
+
|
|
635
|
+
def backward(self, grad=None):
|
|
636
|
+
r"""
|
|
637
|
+
Calculate the gradient.
|
|
638
|
+
"""
|
|
639
|
+
if grad is None:
|
|
640
|
+
grad = Tensor(np.ones(self.shape), self.dtype)
|
|
641
|
+
if self._grad_fn is not None:
|
|
642
|
+
self._grad_fn.apply(grad)
|
|
643
|
+
elif self._requires_grad:
|
|
644
|
+
self._grad = grad
|
|
645
|
+
|
|
547
646
|
@property
|
|
548
647
|
def H(self):
|
|
549
648
|
"""
|
|
@@ -644,6 +743,8 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
644
743
|
[[1 3]
|
|
645
744
|
[2 4]]
|
|
646
745
|
"""
|
|
746
|
+
if self.ndim <= 1:
|
|
747
|
+
return self
|
|
647
748
|
return self.transpose()
|
|
648
749
|
|
|
649
750
|
@staticmethod
|
|
@@ -710,28 +811,24 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
710
811
|
r"""
|
|
711
812
|
For details, please refer to :func:`mindspore.ops.arccosh`.
|
|
712
813
|
"""
|
|
713
|
-
self._init_check()
|
|
714
814
|
return tensor_operator_registry.get('acosh')(self)
|
|
715
815
|
|
|
716
816
|
def arcsin(self):
|
|
717
817
|
r"""
|
|
718
818
|
For details, please refer to :func:`mindspore.ops.arcsin`.
|
|
719
819
|
"""
|
|
720
|
-
self._init_check()
|
|
721
820
|
return tensor_operator_registry.get('asin')(self)
|
|
722
821
|
|
|
723
822
|
def arctan(self):
|
|
724
823
|
r"""
|
|
725
824
|
For details, please refer to :func:`mindspore.ops.arctan`.
|
|
726
825
|
"""
|
|
727
|
-
self._init_check()
|
|
728
826
|
return tensor_operator_registry.get('atan')(self)
|
|
729
827
|
|
|
730
828
|
def arctan2(self, other):
|
|
731
829
|
r"""
|
|
732
830
|
For details, please refer to :func:`mindspore.ops.arctan2`.
|
|
733
831
|
"""
|
|
734
|
-
self._init_check()
|
|
735
832
|
return tensor_operator_registry.get('atan2')(self, other)
|
|
736
833
|
|
|
737
834
|
def cauchy(self, median=0.0, sigma=1.0):
|
|
@@ -766,7 +863,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
766
863
|
[[8.79836142e-01, 9.37541723e-01]])
|
|
767
864
|
|
|
768
865
|
"""
|
|
769
|
-
self._init_check()
|
|
770
866
|
out = tensor_operator_registry.get('cauchy')(list(self.shape), median, sigma)()
|
|
771
867
|
return out.astype(self.dtype)
|
|
772
868
|
|
|
@@ -804,7 +900,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
804
900
|
[[1.2788825 2.3305743]
|
|
805
901
|
[14.944194 0.16303174]]
|
|
806
902
|
"""
|
|
807
|
-
self._init_check()
|
|
808
903
|
return tensor_operator_registry.get('log_normal')(mean, std)(self)
|
|
809
904
|
|
|
810
905
|
@jit_forbidden_register
|
|
@@ -837,29 +932,23 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
837
932
|
r"""
|
|
838
933
|
For details, please refer to :func:`mindspore.ops.bincount`.
|
|
839
934
|
"""
|
|
840
|
-
self._init_check()
|
|
841
935
|
return tensor_operator_registry.get('bincount')(self, weights, minlength)
|
|
842
936
|
|
|
843
937
|
def chunk(self, chunks, axis=0):
|
|
844
938
|
r"""
|
|
845
939
|
For details, please refer to :func:`mindspore.ops.chunk`.
|
|
846
940
|
"""
|
|
847
|
-
self._init_check()
|
|
848
941
|
return tensor_operator_registry.get('chunk')(self, chunks, axis)
|
|
849
942
|
|
|
850
943
|
def item(self, index=None):
|
|
851
944
|
"""
|
|
852
945
|
Get the item at the specified index of the tensor.
|
|
853
946
|
|
|
854
|
-
Note:
|
|
855
|
-
Tensor.item returns a Tensor scalar instead of a Python scalar. And if the tensor is a Tensor scalar,
|
|
856
|
-
Tensor.item will return the numpy.ndarray.
|
|
857
|
-
|
|
858
947
|
Args:
|
|
859
948
|
index (Union[None, int, tuple(int)]): The index in Tensor. Default: ``None``.
|
|
860
949
|
|
|
861
950
|
Returns:
|
|
862
|
-
A
|
|
951
|
+
A scalar, type is defined by the dtype of the Tensor.
|
|
863
952
|
|
|
864
953
|
Raises:
|
|
865
954
|
ValueError: If the length of the `index` is not equal to self.ndim.
|
|
@@ -877,7 +966,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
877
966
|
>>> print(x.item())
|
|
878
967
|
1.2
|
|
879
968
|
"""
|
|
880
|
-
|
|
969
|
+
|
|
970
|
+
if index is not None:
|
|
971
|
+
output = self.asnumpy().item(index)
|
|
972
|
+
else:
|
|
973
|
+
output = self.asnumpy().item()
|
|
881
974
|
return output
|
|
882
975
|
|
|
883
976
|
def itemset(self, *args):
|
|
@@ -936,7 +1029,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
936
1029
|
>>> print(x.get_bytes())
|
|
937
1030
|
b'\x01\x00\x02\x00\x03\x00'
|
|
938
1031
|
"""
|
|
939
|
-
self._init_check()
|
|
940
1032
|
return Tensor_.get_bytes(self)
|
|
941
1033
|
|
|
942
1034
|
def asnumpy(self):
|
|
@@ -958,7 +1050,8 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
958
1050
|
>>> print(y)
|
|
959
1051
|
[11. 2.]
|
|
960
1052
|
"""
|
|
961
|
-
self.
|
|
1053
|
+
if self.has_init:
|
|
1054
|
+
self.init_data()
|
|
962
1055
|
return Tensor_.asnumpy(self)
|
|
963
1056
|
|
|
964
1057
|
def numpy(self):
|
|
@@ -1002,21 +1095,18 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1002
1095
|
"""
|
|
1003
1096
|
For details, please refer to :func:`mindspore.ops.slice_scatter`.
|
|
1004
1097
|
"""
|
|
1005
|
-
self._init_check()
|
|
1006
1098
|
return tensor_operator_registry.get('slice_scatter')(self, src, axis, start, end, step)
|
|
1007
1099
|
|
|
1008
1100
|
def select_scatter(self, src, axis, index):
|
|
1009
1101
|
"""
|
|
1010
1102
|
For details, please refer to :func:`mindspore.ops.select_scatter`.
|
|
1011
1103
|
"""
|
|
1012
|
-
self._init_check()
|
|
1013
1104
|
return tensor_operator_registry.get('select_scatter')(self, src, axis, index)
|
|
1014
1105
|
|
|
1015
1106
|
def histc(self, bins=100, min=0., max=0.):
|
|
1016
1107
|
"""
|
|
1017
1108
|
For details, please refer to :func:`mindspore.ops.histc`.
|
|
1018
1109
|
"""
|
|
1019
|
-
self._init_check()
|
|
1020
1110
|
validator.check_value_type('min', min, (int, float,), 'Tensor.histc')
|
|
1021
1111
|
validator.check_value_type('max', max, (int, float,), 'Tensor.histc')
|
|
1022
1112
|
return tensor_operator_registry.get('histc')(self, bins, float(min), float(max))
|
|
@@ -1025,7 +1115,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1025
1115
|
"""
|
|
1026
1116
|
For details, please refer to :func:`mindspore.ops.geqrf`.
|
|
1027
1117
|
"""
|
|
1028
|
-
self._init_check()
|
|
1029
1118
|
return tensor_operator_registry.get('geqrf')(self)
|
|
1030
1119
|
|
|
1031
1120
|
def slice_shape_of_persistent_data(self):
|
|
@@ -1067,14 +1156,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1067
1156
|
>>> from mindspore import Tensor, ops
|
|
1068
1157
|
>>> x = Tensor([[1, 2, 3], [4, 5, 6]], dtype=ms.float32)
|
|
1069
1158
|
>>> y = ops.transpose(x, (1, 0))
|
|
1070
|
-
>>> y.contiguous()
|
|
1071
|
-
>>>
|
|
1072
|
-
|
|
1073
|
-
[[1. 2. 3.]
|
|
1074
|
-
[4. 5. 6.]]
|
|
1159
|
+
>>> z = y.contiguous()
|
|
1160
|
+
>>> print(z.is_contiguous())
|
|
1161
|
+
True
|
|
1075
1162
|
"""
|
|
1076
|
-
|
|
1077
|
-
return self
|
|
1163
|
+
return tensor_operator_registry.get('contiguous')(self)
|
|
1078
1164
|
|
|
1079
1165
|
def is_contiguous(self):
|
|
1080
1166
|
"""
|
|
@@ -1094,6 +1180,95 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1094
1180
|
"""
|
|
1095
1181
|
return Tensor_.is_contiguous(self)
|
|
1096
1182
|
|
|
1183
|
+
def stride(self, dim=None):
|
|
1184
|
+
"""
|
|
1185
|
+
The stride to jump from one element to the next in the input dim.
|
|
1186
|
+
When no parameters are passed in, a list of stride for all dimensions is returned.
|
|
1187
|
+
|
|
1188
|
+
Args:
|
|
1189
|
+
dim (int): The dim of stride from one element to the next.
|
|
1190
|
+
|
|
1191
|
+
Returns:
|
|
1192
|
+
Int, the stride of tensor.
|
|
1193
|
+
|
|
1194
|
+
Raises:
|
|
1195
|
+
TypeError: `dim` is not an int.
|
|
1196
|
+
|
|
1197
|
+
Examples:
|
|
1198
|
+
>>> import mindspore as ms
|
|
1199
|
+
>>> x = ms.Tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], dtype=ms.float32)
|
|
1200
|
+
>>> x.stride()
|
|
1201
|
+
[5, 1]
|
|
1202
|
+
"""
|
|
1203
|
+
stride = Tensor_.stride(self)
|
|
1204
|
+
if dim is None:
|
|
1205
|
+
return stride
|
|
1206
|
+
return stride[dim]
|
|
1207
|
+
|
|
1208
|
+
def storage_offset(self):
|
|
1209
|
+
"""
|
|
1210
|
+
Tensor's offset in the underlying storage in terms of the number of storage elements.
|
|
1211
|
+
|
|
1212
|
+
Returns:
|
|
1213
|
+
int, tensor's offset in the underlying storage in terms of number of storage elements.
|
|
1214
|
+
|
|
1215
|
+
Examples:
|
|
1216
|
+
>>> import mindspore as ms
|
|
1217
|
+
>>> x = ms.Tensor([1, 2, 3, 4, 5], dtype=ms.float32)
|
|
1218
|
+
>>> ret = x.storage_offset()
|
|
1219
|
+
>>> print(ret)
|
|
1220
|
+
0
|
|
1221
|
+
"""
|
|
1222
|
+
return Tensor_.storage_offset(self)
|
|
1223
|
+
|
|
1224
|
+
def register_hook(self, hook_fn):
|
|
1225
|
+
"""
|
|
1226
|
+
Registers a backward hook for tensor.
|
|
1227
|
+
|
|
1228
|
+
Note:
|
|
1229
|
+
- The `register_backward_hook(hook_fn)` does not work in graph mode or functions decorated with 'jit'.
|
|
1230
|
+
- The 'hook_fn' must be defined as the following code. `grad` is the gradient passed to the tensor,
|
|
1231
|
+
which may be modified by returning a new output gradient.
|
|
1232
|
+
- The 'hook_fn' should have the following signature:
|
|
1233
|
+
hook_fn(grad) -> New output gradient, but can not return None or not set return value.
|
|
1234
|
+
|
|
1235
|
+
Args:
|
|
1236
|
+
hook_fn (function): Python function. Tensor backward hook function.
|
|
1237
|
+
|
|
1238
|
+
Returns:
|
|
1239
|
+
A handle corresponding to the `hook_fn` . The handle can be used to remove the added `hook_fn` by calling
|
|
1240
|
+
`handle.remove()` .
|
|
1241
|
+
|
|
1242
|
+
Raises:
|
|
1243
|
+
TypeError: If the `hook_fn` is not a function of python.
|
|
1244
|
+
|
|
1245
|
+
Supported Platforms:
|
|
1246
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1247
|
+
|
|
1248
|
+
Examples:
|
|
1249
|
+
>>> import mindspore as ms
|
|
1250
|
+
>>> from mindspore import Tensor
|
|
1251
|
+
>>> ms.set_context(mode=ms.PYNATIVE_MODE)
|
|
1252
|
+
>>> def hook_fn(grad):
|
|
1253
|
+
... return grad * 2
|
|
1254
|
+
...
|
|
1255
|
+
>>> def hook_test(x, y):
|
|
1256
|
+
... z = x * y
|
|
1257
|
+
... z.register_hook(hook_fn)
|
|
1258
|
+
... z = z * y
|
|
1259
|
+
... return z
|
|
1260
|
+
...
|
|
1261
|
+
>>> ms_grad = ms.grad(hook_test, grad_position=(0,1))
|
|
1262
|
+
>>> output = ms_grad(Tensor(1, ms.float32), Tensor(2, ms.float32))
|
|
1263
|
+
>>> print(output)
|
|
1264
|
+
(Tensor(shape=[], dtype=Float32, value=8), Tensor(shape=[], dtype=Float32, value=6))
|
|
1265
|
+
"""
|
|
1266
|
+
if not check_hook_fn("register_hook", hook_fn):
|
|
1267
|
+
return _TensorHookHandle()
|
|
1268
|
+
handle = _TensorHookHandle()
|
|
1269
|
+
handle.id = Tensor_.register_hook(self, hook_fn)
|
|
1270
|
+
return handle
|
|
1271
|
+
|
|
1097
1272
|
def flush_from_cache(self):
|
|
1098
1273
|
"""
|
|
1099
1274
|
Flush cache data to host if tensor is cache enable.
|
|
@@ -1106,35 +1281,30 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1106
1281
|
>>> print(y)
|
|
1107
1282
|
None
|
|
1108
1283
|
"""
|
|
1109
|
-
self._init_check()
|
|
1110
1284
|
Tensor_._flush_from_cache(self)
|
|
1111
1285
|
|
|
1112
1286
|
def addcdiv(self, tensor1, tensor2, value=1):
|
|
1113
1287
|
r"""
|
|
1114
1288
|
For details, please refer to :func:`mindspore.ops.addcdiv`.
|
|
1115
1289
|
"""
|
|
1116
|
-
|
|
1117
|
-
return tensor_operator_registry.get('addcdiv')()(self, tensor1, tensor2, value)
|
|
1290
|
+
return tensor_operator_registry.get('addcdiv')(self, tensor1, tensor2, value)
|
|
1118
1291
|
|
|
1119
1292
|
def addcmul(self, tensor1, tensor2, value=1):
|
|
1120
1293
|
r"""
|
|
1121
1294
|
For details, please refer to :func:`mindspore.ops.addcmul`.
|
|
1122
1295
|
"""
|
|
1123
|
-
|
|
1124
|
-
return tensor_operator_registry.get('addcmul')()(self, tensor1, tensor2, value)
|
|
1296
|
+
return tensor_operator_registry.get('addcmul')(self, tensor1, tensor2, value)
|
|
1125
1297
|
|
|
1126
1298
|
def add(self, other):
|
|
1127
1299
|
r"""
|
|
1128
1300
|
For details, please refer to :func:`mindspore.ops.add`.
|
|
1129
1301
|
"""
|
|
1130
|
-
|
|
1131
|
-
return tensor_operator_registry.get('add')()(self, other)
|
|
1302
|
+
return tensor_operator_registry.get('add')(self, other)
|
|
1132
1303
|
|
|
1133
1304
|
def subtract(self, other, *, alpha=1):
|
|
1134
1305
|
r"""
|
|
1135
1306
|
For details, please refer to :func:`mindspore.ops.subtract`.
|
|
1136
1307
|
"""
|
|
1137
|
-
self._init_check()
|
|
1138
1308
|
return tensor_operator_registry.get('sub')(self, alpha * other)
|
|
1139
1309
|
|
|
1140
1310
|
def true_divide(self, value):
|
|
@@ -1142,7 +1312,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1142
1312
|
Alias for Tensor.div() with :math:`rounding\_mode=None`.
|
|
1143
1313
|
For details, please refer to :func:`mindspore.ops.div`.
|
|
1144
1314
|
"""
|
|
1145
|
-
self._init_check()
|
|
1146
1315
|
return tensor_operator_registry.get('div')(self, value, rounding_mode=None)
|
|
1147
1316
|
|
|
1148
1317
|
def triu(self, diagonal=0):
|
|
@@ -1153,7 +1322,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1153
1322
|
This is an experimental API that is subject to change or deletion.
|
|
1154
1323
|
|
|
1155
1324
|
"""
|
|
1156
|
-
self._init_check()
|
|
1157
1325
|
validator.check_value_type('diagonal', diagonal, [int], 'triu')
|
|
1158
1326
|
return tensor_operator_registry.get('triu')(self, diagonal)
|
|
1159
1327
|
|
|
@@ -1161,65 +1329,56 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1161
1329
|
r"""
|
|
1162
1330
|
For details, please refer to :func:`mindspore.ops.addbmm`.
|
|
1163
1331
|
"""
|
|
1164
|
-
self._init_check()
|
|
1165
1332
|
return tensor_operator_registry.get('addbmm')(self, batch1, batch2, beta=beta, alpha=alpha)
|
|
1166
1333
|
|
|
1167
1334
|
def addmm(self, mat1, mat2, *, beta=1, alpha=1):
|
|
1168
1335
|
r"""
|
|
1169
1336
|
For details, please refer to :func:`mindspore.ops.addmm`.
|
|
1170
1337
|
"""
|
|
1171
|
-
self._init_check()
|
|
1172
1338
|
return tensor_operator_registry.get('addmm')(self, mat1, mat2, beta=beta, alpha=alpha)
|
|
1173
1339
|
|
|
1174
1340
|
def addr(self, vec1, vec2, beta=1, alpha=1):
|
|
1175
1341
|
r"""
|
|
1176
1342
|
For details, please refer to :func:`mindspore.ops.addr`.
|
|
1177
1343
|
"""
|
|
1178
|
-
self._init_check()
|
|
1179
1344
|
return tensor_operator_registry.get('addr')(self, vec1, vec2, beta=beta, alpha=alpha)
|
|
1180
1345
|
|
|
1181
1346
|
def adjoint(self):
|
|
1182
1347
|
r"""
|
|
1183
1348
|
For details, please refer to :func:`mindspore.ops.adjoint`.
|
|
1184
1349
|
"""
|
|
1185
|
-
self._init_check()
|
|
1186
1350
|
return tensor_operator_registry.get('adjoint')(self)
|
|
1187
1351
|
|
|
1188
1352
|
def all(self, axis=None, keep_dims=False):
|
|
1189
1353
|
r"""
|
|
1190
1354
|
For details, please refer to :func:`mindspore.ops.all`.
|
|
1191
1355
|
"""
|
|
1192
|
-
self._init_check()
|
|
1193
1356
|
return tensor_operator_registry.get('all')(self, axis, keep_dims)
|
|
1194
1357
|
|
|
1195
1358
|
def angle(self):
|
|
1196
1359
|
r"""
|
|
1197
1360
|
For details, please refer to :func:`mindspore.ops.angle`.
|
|
1198
1361
|
"""
|
|
1199
|
-
self._init_check()
|
|
1200
1362
|
return tensor_operator_registry.get('angle')(self)
|
|
1201
1363
|
|
|
1202
1364
|
def any(self, axis=None, keep_dims=False):
|
|
1203
1365
|
r"""
|
|
1204
1366
|
For details, please refer to :func:`mindspore.ops.any`.
|
|
1205
1367
|
"""
|
|
1206
|
-
self._init_check()
|
|
1207
1368
|
if axis is None:
|
|
1208
1369
|
axis = ()
|
|
1209
|
-
return tensor_operator_registry.get('any')(
|
|
1370
|
+
return tensor_operator_registry.get('any')(self, axis, keep_dims)
|
|
1210
1371
|
|
|
1211
1372
|
def atan2(self, other):
|
|
1212
1373
|
r"""
|
|
1213
1374
|
For details, please refer to :func:`mindspore.ops.atan2`.
|
|
1214
1375
|
"""
|
|
1215
|
-
self._init_check()
|
|
1216
1376
|
return tensor_operator_registry.get('atan2')(self, other)
|
|
1217
1377
|
|
|
1218
1378
|
def baddbmm(self, batch1, batch2, beta=1, alpha=1):
|
|
1219
1379
|
r"""
|
|
1220
1380
|
For details, please refer to :func:`mindspore.ops.baddbmm`.
|
|
1221
1381
|
"""
|
|
1222
|
-
self._init_check()
|
|
1223
1382
|
return tensor_operator_registry.get('baddbmm')(self, batch1, batch2, beta=beta, alpha=alpha)
|
|
1224
1383
|
|
|
1225
1384
|
def view(self, *shape):
|
|
@@ -1243,7 +1402,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1243
1402
|
[3. 2.]
|
|
1244
1403
|
[3. 4.]]
|
|
1245
1404
|
"""
|
|
1246
|
-
self._init_check()
|
|
1247
1405
|
if not shape:
|
|
1248
1406
|
raise ValueError("The shape variable should not be empty")
|
|
1249
1407
|
if isinstance(shape[0], tuple):
|
|
@@ -1277,7 +1435,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1277
1435
|
>>> print(output)
|
|
1278
1436
|
[1. 2. 3. 2. 3. 4.]
|
|
1279
1437
|
"""
|
|
1280
|
-
self._init_check()
|
|
1281
1438
|
if not isinstance(other, (Tensor, Tensor_)):
|
|
1282
1439
|
raise TypeError(f"For view_as, the input other must be a Tensor, but got {type(other)}")
|
|
1283
1440
|
return self.view(other.shape)
|
|
@@ -1286,42 +1443,36 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1286
1443
|
r"""
|
|
1287
1444
|
For details, please refer to :func:`mindspore.ops.t`.
|
|
1288
1445
|
"""
|
|
1289
|
-
self._init_check()
|
|
1290
1446
|
return tensor_operator_registry.get("t")(self)
|
|
1291
1447
|
|
|
1292
1448
|
def bitwise_and(self, other):
|
|
1293
1449
|
"""
|
|
1294
1450
|
For details, please refer to :func:`mindspore.ops.bitwise_and`.
|
|
1295
1451
|
"""
|
|
1296
|
-
self._init_check()
|
|
1297
1452
|
return tensor_operator_registry.get('bitwise_and')(self, other)
|
|
1298
1453
|
|
|
1299
1454
|
def bitwise_or(self, other):
|
|
1300
1455
|
"""
|
|
1301
1456
|
For details, please refer to :func:`mindspore.ops.bitwise_or`.
|
|
1302
1457
|
"""
|
|
1303
|
-
self._init_check()
|
|
1304
1458
|
return tensor_operator_registry.get('bitwise_or')(self, other)
|
|
1305
1459
|
|
|
1306
1460
|
def bitwise_xor(self, other):
|
|
1307
1461
|
"""
|
|
1308
1462
|
For details, please refer to :func:`mindspore.ops.bitwise_xor`.
|
|
1309
1463
|
"""
|
|
1310
|
-
self._init_check()
|
|
1311
1464
|
return tensor_operator_registry.get('bitwise_xor')(self, other)
|
|
1312
1465
|
|
|
1313
1466
|
def bitwise_left_shift(self, other):
|
|
1314
1467
|
"""
|
|
1315
1468
|
For details, please refer to :func:`mindspore.ops.bitwise_left_shift`.
|
|
1316
1469
|
"""
|
|
1317
|
-
self._init_check()
|
|
1318
1470
|
return tensor_operator_registry.get('bitwise_left_shift')(self, other)
|
|
1319
1471
|
|
|
1320
1472
|
def bitwise_right_shift(self, other):
|
|
1321
1473
|
"""
|
|
1322
1474
|
For details, please refer to :func:`mindspore.ops.bitwise_right_shift`.
|
|
1323
1475
|
"""
|
|
1324
|
-
self._init_check()
|
|
1325
1476
|
_cast = tensor_operator_registry.get('cast')
|
|
1326
1477
|
other = _cast(other, self.dtype)
|
|
1327
1478
|
return tensor_operator_registry.get('bitwise_right_shift')(self, other)
|
|
@@ -1330,50 +1481,43 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1330
1481
|
"""
|
|
1331
1482
|
For details, please refer to :func:`mindspore.ops.scatter`.
|
|
1332
1483
|
"""
|
|
1333
|
-
self._init_check()
|
|
1334
1484
|
return tensor_operator_registry.get('scatter')(self, axis, index, src)
|
|
1335
1485
|
|
|
1336
1486
|
def scatter_mul(self, indices, updates):
|
|
1337
1487
|
"""
|
|
1338
1488
|
For details, please refer to :func:`mindspore.ops.scatter_mul`.
|
|
1339
1489
|
"""
|
|
1340
|
-
self._init_check()
|
|
1341
1490
|
return tensor_operator_registry.get('tensor_scatter_mul')(self, indices, updates)
|
|
1342
1491
|
|
|
1343
1492
|
def scatter_div(self, indices, updates):
|
|
1344
1493
|
"""
|
|
1345
1494
|
For details, please refer to :func:`mindspore.ops.scatter_div`.
|
|
1346
1495
|
"""
|
|
1347
|
-
self._init_check()
|
|
1348
1496
|
return tensor_operator_registry.get('tensor_scatter_div')(self, indices, updates)
|
|
1349
1497
|
|
|
1350
1498
|
def ger(self, vec2):
|
|
1351
1499
|
"""
|
|
1352
1500
|
For details, please refer to :func:`mindspore.ops.ger`.
|
|
1353
1501
|
"""
|
|
1354
|
-
self._init_check()
|
|
1355
1502
|
return tensor_operator_registry.get('ger')(self, vec2)
|
|
1356
1503
|
|
|
1357
1504
|
def gt(self, x):
|
|
1358
1505
|
"""
|
|
1359
1506
|
For details, please refer to :func:`mindspore.ops.gt`.
|
|
1360
1507
|
"""
|
|
1361
|
-
|
|
1362
|
-
return tensor_operator_registry.get('gt')()(self, x)
|
|
1508
|
+
return tensor_operator_registry.get('gt')(self, x)
|
|
1363
1509
|
|
|
1364
1510
|
def ge(self, x):
|
|
1365
1511
|
"""
|
|
1366
1512
|
For details, please refer to :func:`mindspore.ops.ge`.
|
|
1367
1513
|
"""
|
|
1368
|
-
|
|
1369
|
-
return tensor_operator_registry.get('ge')()(self, x)
|
|
1514
|
+
return tensor_operator_registry.get('ge')(self, x)
|
|
1370
1515
|
|
|
1371
1516
|
def broadcast_to(self, shape):
|
|
1372
1517
|
"""
|
|
1373
1518
|
For details, please refer to :func:`mindspore.ops.broadcast_to`.
|
|
1374
1519
|
"""
|
|
1375
|
-
|
|
1376
|
-
return tensor_operator_registry.get('broadcast_to')(shape)(self)
|
|
1520
|
+
return tensor_operator_registry.get('broadcast_to')(self, shape)
|
|
1377
1521
|
|
|
1378
1522
|
def expand_as(self, x):
|
|
1379
1523
|
"""
|
|
@@ -1397,84 +1541,72 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1397
1541
|
[[1. 2. 3.]
|
|
1398
1542
|
[1. 2. 3.]]
|
|
1399
1543
|
"""
|
|
1400
|
-
|
|
1401
|
-
return tensor_operator_registry.get('broadcast_to')(x.shape)(self)
|
|
1544
|
+
return tensor_operator_registry.get('broadcast_to')(self, x.shape)
|
|
1402
1545
|
|
|
1403
1546
|
def exp(self):
|
|
1404
1547
|
"""
|
|
1405
1548
|
For details, please refer to :func:`mindspore.ops.exp`.
|
|
1406
1549
|
"""
|
|
1407
|
-
self._init_check()
|
|
1408
1550
|
return tensor_operator_registry.get('exp')(self)
|
|
1409
1551
|
|
|
1410
1552
|
def real(self):
|
|
1411
1553
|
r"""
|
|
1412
1554
|
For details, please refer to :func:`mindspore.ops.real`.
|
|
1413
1555
|
"""
|
|
1414
|
-
self._init_check()
|
|
1415
1556
|
return tensor_operator_registry.get('real')(self)
|
|
1416
1557
|
|
|
1417
1558
|
def rsqrt(self):
|
|
1418
1559
|
r"""
|
|
1419
1560
|
For details, please refer to :func:`mindspore.ops.rsqrt`.
|
|
1420
1561
|
"""
|
|
1421
|
-
self._init_check()
|
|
1422
1562
|
return tensor_operator_registry.get('rsqrt')(self)
|
|
1423
1563
|
|
|
1424
1564
|
def reciprocal(self):
|
|
1425
1565
|
r"""
|
|
1426
1566
|
For details, please refer to :func:`mindspore.ops.reciprocal`.
|
|
1427
1567
|
"""
|
|
1428
|
-
self._init_check()
|
|
1429
1568
|
return tensor_operator_registry.get('reciprocal')(self)
|
|
1430
1569
|
|
|
1431
1570
|
def sqrt(self):
|
|
1432
1571
|
"""
|
|
1433
1572
|
For details, please refer to :func:`mindspore.ops.sqrt`.
|
|
1434
1573
|
"""
|
|
1435
|
-
self._init_check()
|
|
1436
1574
|
return tensor_operator_registry.get('sqrt')(self)
|
|
1437
1575
|
|
|
1438
1576
|
def square(self):
|
|
1439
1577
|
"""
|
|
1440
1578
|
For details, please refer to :func:`mindspore.ops.square`.
|
|
1441
1579
|
"""
|
|
1442
|
-
self._init_check()
|
|
1443
1580
|
return tensor_operator_registry.get('square')(self)
|
|
1444
1581
|
|
|
1445
1582
|
def sub(self, y):
|
|
1446
1583
|
r"""
|
|
1447
1584
|
For details, please refer to :func:`mindspore.ops.sub`.
|
|
1448
1585
|
"""
|
|
1449
|
-
self._init_check()
|
|
1450
1586
|
return tensor_operator_registry.get('sub')(self, y)
|
|
1451
1587
|
|
|
1452
1588
|
def tan(self):
|
|
1453
1589
|
"""
|
|
1454
1590
|
For details, please refer to :func:`mindspore.ops.tan`.
|
|
1455
1591
|
"""
|
|
1456
|
-
|
|
1457
|
-
return tensor_operator_registry.get('tan')()(self)
|
|
1592
|
+
return tensor_operator_registry.get('tan')(self)
|
|
1458
1593
|
|
|
1459
1594
|
def tanh(self):
|
|
1460
1595
|
r"""
|
|
1461
1596
|
For details, please refer to :func:`mindspore.ops.tanh`.
|
|
1462
1597
|
"""
|
|
1463
|
-
self._init_check()
|
|
1464
1598
|
return tensor_operator_registry.get('tanh')(self)
|
|
1465
1599
|
|
|
1466
1600
|
def cosh(self):
|
|
1467
1601
|
r"""
|
|
1468
1602
|
For details, please refer to :func:`mindspore.ops.cosh`.
|
|
1469
1603
|
"""
|
|
1470
|
-
|
|
1471
|
-
return tensor_operator_registry.get('cosh')()(self)
|
|
1604
|
+
return tensor_operator_registry.get('cosh')(self)
|
|
1472
1605
|
|
|
1473
1606
|
def acos(self):
|
|
1474
1607
|
r"""
|
|
1475
1608
|
For details, please refer to :func:`mindspore.ops.acos`.
|
|
1476
1609
|
"""
|
|
1477
|
-
self._init_check()
|
|
1478
1610
|
return tensor_operator_registry.get('acos')(self)
|
|
1479
1611
|
|
|
1480
1612
|
def arccos(self):
|
|
@@ -1487,35 +1619,30 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1487
1619
|
r"""
|
|
1488
1620
|
For details, please refer to :func:`mindspore.ops.cos`.
|
|
1489
1621
|
"""
|
|
1490
|
-
self._init_check()
|
|
1491
1622
|
return tensor_operator_registry.get('cos')(self)
|
|
1492
1623
|
|
|
1493
1624
|
def cov(self, *, correction=1, fweights=None, aweights=None):
|
|
1494
1625
|
r"""
|
|
1495
1626
|
For details, please refer to :func:`mindspore.ops.cov`.
|
|
1496
1627
|
"""
|
|
1497
|
-
self._init_check()
|
|
1498
1628
|
return tensor_operator_registry.get('cov')(self, correction=correction, fweights=fweights, aweights=aweights)
|
|
1499
1629
|
|
|
1500
1630
|
def acosh(self):
|
|
1501
1631
|
"""
|
|
1502
1632
|
For details, please refer to :func:`mindspore.ops.acosh`.
|
|
1503
1633
|
"""
|
|
1504
|
-
self._init_check()
|
|
1505
1634
|
return tensor_operator_registry.get('acosh')(self)
|
|
1506
1635
|
|
|
1507
1636
|
def asin(self):
|
|
1508
1637
|
r"""
|
|
1509
1638
|
For details, please refer to :func:`mindspore.ops.asin`.
|
|
1510
1639
|
"""
|
|
1511
|
-
self._init_check()
|
|
1512
1640
|
return tensor_operator_registry.get('asin')(self)
|
|
1513
1641
|
|
|
1514
1642
|
def abs(self):
|
|
1515
1643
|
"""
|
|
1516
1644
|
For details, please refer to :func:`mindspore.ops.abs`.
|
|
1517
1645
|
"""
|
|
1518
|
-
self._init_check()
|
|
1519
1646
|
return tensor_operator_registry.get('abs')(self)
|
|
1520
1647
|
|
|
1521
1648
|
def absolute(self):
|
|
@@ -1528,14 +1655,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1528
1655
|
"""
|
|
1529
1656
|
For details, please refer to :func:`mindspore.ops.ceil`.
|
|
1530
1657
|
"""
|
|
1531
|
-
|
|
1532
|
-
return tensor_operator_registry.get('ceil')()(self)
|
|
1658
|
+
return tensor_operator_registry.get('ceil')(self)
|
|
1533
1659
|
|
|
1534
1660
|
def floor(self):
|
|
1535
1661
|
"""
|
|
1536
1662
|
For details, please refer to :func:`mindspore.ops.floor`.
|
|
1537
1663
|
"""
|
|
1538
|
-
self._init_check()
|
|
1539
1664
|
return tensor_operator_registry.get('floor')(self)
|
|
1540
1665
|
|
|
1541
1666
|
def floor_divide(self, other):
|
|
@@ -1545,21 +1670,18 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1545
1670
|
.. warning::
|
|
1546
1671
|
This is an experimental API that is subject to change or deletion.
|
|
1547
1672
|
"""
|
|
1548
|
-
self._init_check()
|
|
1549
1673
|
return tensor_operator_registry.get('floor_divide')(self, other)
|
|
1550
1674
|
|
|
1551
1675
|
def lerp(self, end, weight):
|
|
1552
1676
|
"""
|
|
1553
1677
|
For details, please refer to :func:`mindspore.ops.lerp`.
|
|
1554
1678
|
"""
|
|
1555
|
-
self._init_check()
|
|
1556
1679
|
return tensor_operator_registry.get('lerp')(self, end, weight)
|
|
1557
1680
|
|
|
1558
1681
|
def negative(self):
|
|
1559
1682
|
r"""
|
|
1560
1683
|
For details, please refer to :func:`mindspore.ops.negative`.
|
|
1561
1684
|
"""
|
|
1562
|
-
self._init_check()
|
|
1563
1685
|
return tensor_operator_registry.get("negative")(self)
|
|
1564
1686
|
|
|
1565
1687
|
# pylint: disable=redefined-builtin
|
|
@@ -1567,14 +1689,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1567
1689
|
"""
|
|
1568
1690
|
For details, please refer to :func:`mindspore.ops.norm`.
|
|
1569
1691
|
"""
|
|
1570
|
-
self._init_check()
|
|
1571
1692
|
return tensor_operator_registry.get('norm')(self, ord, dim, keepdim, dtype=dtype)
|
|
1572
1693
|
|
|
1573
1694
|
def renorm(self, p, axis, maxnorm):
|
|
1574
1695
|
"""
|
|
1575
1696
|
For details, please refer to :func:`mindspore.ops.renorm`.
|
|
1576
1697
|
"""
|
|
1577
|
-
self._init_check()
|
|
1578
1698
|
return tensor_operator_registry.get("renorm")(self, p, axis, maxnorm)
|
|
1579
1699
|
|
|
1580
1700
|
def approximate_equal(self, other, tolerance=1e-5):
|
|
@@ -1584,7 +1704,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1584
1704
|
validator.check_isinstance("x", self, Tensor)
|
|
1585
1705
|
validator.check_isinstance("y", other, Tensor)
|
|
1586
1706
|
validator.check_isinstance("tolerance", tolerance, float)
|
|
1587
|
-
self._init_check()
|
|
1588
1707
|
input_x = self.copy() if self.dtype == mstype.float32 else self.astype(mstype.float16)
|
|
1589
1708
|
input_y = other.copy() if other.dtype == mstype.float32 else other.astype(mstype.float16)
|
|
1590
1709
|
return tensor_operator_registry.get('__lt__')(tensor_operator_registry.get('abs')(
|
|
@@ -1595,14 +1714,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1595
1714
|
r"""
|
|
1596
1715
|
For details, please refer to :func:`mindspore.ops.log1p`.
|
|
1597
1716
|
"""
|
|
1598
|
-
self._init_check()
|
|
1599
1717
|
return tensor_operator_registry.get('log1p')(self)
|
|
1600
1718
|
|
|
1601
1719
|
def logit(self, eps=None):
|
|
1602
1720
|
r"""
|
|
1603
1721
|
For details, please refer to :func:`mindspore.ops.logit`.
|
|
1604
1722
|
"""
|
|
1605
|
-
self._init_check()
|
|
1606
1723
|
if eps is None:
|
|
1607
1724
|
eps = -1.0
|
|
1608
1725
|
validator.check_value_type('eps', eps, (float,), 'Tensor.logit')
|
|
@@ -1612,14 +1729,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1612
1729
|
r"""
|
|
1613
1730
|
For details, please refer to :func:`mindspore.ops.logaddexp`.
|
|
1614
1731
|
"""
|
|
1615
|
-
self._init_check()
|
|
1616
1732
|
return tensor_operator_registry.get('logaddexp')(self, other)
|
|
1617
1733
|
|
|
1618
1734
|
def logaddexp2(self, other):
|
|
1619
1735
|
r"""
|
|
1620
1736
|
For details, please refer to :func:`mindspore.ops.logaddexp2`.
|
|
1621
1737
|
"""
|
|
1622
|
-
self._init_check()
|
|
1623
1738
|
return tensor_operator_registry.get('logaddexp2')(self, other)
|
|
1624
1739
|
|
|
1625
1740
|
def logcumsumexp(self, axis):
|
|
@@ -1629,149 +1744,128 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1629
1744
|
.. warning::
|
|
1630
1745
|
This is an experimental API that is subject to change or deletion.
|
|
1631
1746
|
"""
|
|
1632
|
-
self._init_check()
|
|
1633
1747
|
return tensor_operator_registry.get('logcumsumexp')(self, axis)
|
|
1634
1748
|
|
|
1635
1749
|
def logsumexp(self, axis, keepdims=False):
|
|
1636
1750
|
r"""
|
|
1637
1751
|
For details, please refer to :func:`mindspore.ops.logsumexp`.
|
|
1638
1752
|
"""
|
|
1639
|
-
self._init_check()
|
|
1640
1753
|
return tensor_operator_registry.get('logsumexp')(self, axis, keepdims)
|
|
1641
1754
|
|
|
1642
1755
|
def logdet(self):
|
|
1643
1756
|
r"""
|
|
1644
1757
|
For details, please refer to :func:`mindspore.ops.logdet`.
|
|
1645
1758
|
"""
|
|
1646
|
-
self._init_check()
|
|
1647
1759
|
return tensor_operator_registry.get('logdet')(self)
|
|
1648
1760
|
|
|
1649
1761
|
def i0(self):
|
|
1650
1762
|
r"""
|
|
1651
1763
|
For details, please refer to :func:`mindspore.ops.i0`.
|
|
1652
1764
|
"""
|
|
1653
|
-
self._init_check()
|
|
1654
1765
|
return tensor_operator_registry.get('i0')(self)
|
|
1655
1766
|
|
|
1656
1767
|
def isclose(self, x2, rtol=1e-05, atol=1e-08, equal_nan=False):
|
|
1657
1768
|
"""
|
|
1658
1769
|
For details, please refer to :func:`mindspore.ops.isclose`.
|
|
1659
1770
|
"""
|
|
1660
|
-
self._init_check()
|
|
1661
1771
|
return tensor_operator_registry.get('isclose')(self, x2, rtol, atol, equal_nan)
|
|
1662
1772
|
|
|
1663
1773
|
def isneginf(self):
|
|
1664
1774
|
r"""
|
|
1665
1775
|
For details, please refer to :func:`mindspore.ops.isneginf`.
|
|
1666
1776
|
"""
|
|
1667
|
-
self._init_check()
|
|
1668
1777
|
return tensor_operator_registry.get('isneginf')(self)
|
|
1669
1778
|
|
|
1670
1779
|
def isposinf(self):
|
|
1671
1780
|
r"""
|
|
1672
1781
|
For details, please refer to :func:`mindspore.ops.isposinf`.
|
|
1673
1782
|
"""
|
|
1674
|
-
self._init_check()
|
|
1675
1783
|
return tensor_operator_registry.get('isposinf')(self)
|
|
1676
1784
|
|
|
1677
1785
|
def isreal(self):
|
|
1678
1786
|
r"""
|
|
1679
1787
|
For details, please refer to :func:`mindspore.ops.isreal`.
|
|
1680
1788
|
"""
|
|
1681
|
-
self._init_check()
|
|
1682
1789
|
return tensor_operator_registry.get('isreal')(self)
|
|
1683
1790
|
|
|
1684
1791
|
def isfinite(self):
|
|
1685
1792
|
r"""
|
|
1686
1793
|
For details, please refer to :func:`mindspore.ops.isfinite`.
|
|
1687
1794
|
"""
|
|
1688
|
-
|
|
1689
|
-
return tensor_operator_registry.get('isfinite')()(self)
|
|
1795
|
+
return tensor_operator_registry.get('isfinite')(self)
|
|
1690
1796
|
|
|
1691
1797
|
def is_complex(self):
|
|
1692
1798
|
r"""
|
|
1693
1799
|
For details, please refer to :func:`mindspore.ops.is_complex`.
|
|
1694
1800
|
"""
|
|
1695
|
-
self._init_check()
|
|
1696
1801
|
return tensor_operator_registry.get('is_complex')(self)
|
|
1697
1802
|
|
|
1698
1803
|
def inv(self):
|
|
1699
1804
|
r"""
|
|
1700
1805
|
For details, please refer to :func:`mindspore.ops.inv`.
|
|
1701
1806
|
"""
|
|
1702
|
-
self._init_check()
|
|
1703
1807
|
return tensor_operator_registry.get('inv')(self)
|
|
1704
1808
|
|
|
1705
1809
|
def inverse(self):
|
|
1706
1810
|
r"""
|
|
1707
1811
|
For details, please refer to :func:`mindspore.ops.inverse`.
|
|
1708
1812
|
"""
|
|
1709
|
-
self._init_check()
|
|
1710
1813
|
return tensor_operator_registry.get('inverse')(self)
|
|
1711
1814
|
|
|
1712
1815
|
def invert(self):
|
|
1713
1816
|
r"""
|
|
1714
1817
|
For details, please refer to :func:`mindspore.ops.invert`.
|
|
1715
1818
|
"""
|
|
1716
|
-
self._init_check()
|
|
1717
1819
|
return tensor_operator_registry.get('invert')(self)
|
|
1718
1820
|
|
|
1719
1821
|
def pow(self, exponent):
|
|
1720
1822
|
r"""
|
|
1721
1823
|
For details, please refer to :func:`mindspore.ops.pow`.
|
|
1722
1824
|
"""
|
|
1723
|
-
|
|
1724
|
-
return tensor_operator_registry.get('pow')()(self, exponent)
|
|
1825
|
+
return tensor_operator_registry.get('pow')(self, exponent)
|
|
1725
1826
|
|
|
1726
1827
|
def log(self):
|
|
1727
1828
|
"""
|
|
1728
1829
|
For details, please refer to :func:`mindspore.ops.log`.
|
|
1729
1830
|
"""
|
|
1730
|
-
self._init_check()
|
|
1731
1831
|
return tensor_operator_registry.get('log')(self)
|
|
1732
1832
|
|
|
1733
1833
|
def log10(self):
|
|
1734
1834
|
r"""
|
|
1735
1835
|
For details, please refer to :func:`mindspore.ops.log10`.
|
|
1736
1836
|
"""
|
|
1737
|
-
self._init_check()
|
|
1738
1837
|
return tensor_operator_registry.get('log10')(self)
|
|
1739
1838
|
|
|
1740
1839
|
def log2(self):
|
|
1741
1840
|
r"""
|
|
1742
1841
|
For details, please refer to :func:`mindspore.ops.log2`.
|
|
1743
1842
|
"""
|
|
1744
|
-
self._init_check()
|
|
1745
1843
|
return tensor_operator_registry.get('log2')(self)
|
|
1746
1844
|
|
|
1747
1845
|
def mean(self, axis=None, keep_dims=False):
|
|
1748
1846
|
"""
|
|
1749
1847
|
For details, please refer to :func:`mindspore.ops.mean`.
|
|
1750
1848
|
"""
|
|
1751
|
-
self._init_check()
|
|
1752
1849
|
return tensor_operator_registry.get('mean')(self, axis, keep_dims)
|
|
1753
1850
|
|
|
1754
1851
|
def amin(self, axis=None, keepdims=False, *, initial=None, where=None):
|
|
1755
1852
|
"""
|
|
1756
1853
|
For details, please refer to :func:`mindspore.ops.amin`.
|
|
1757
1854
|
"""
|
|
1758
|
-
self._init_check()
|
|
1759
1855
|
if axis is None:
|
|
1760
1856
|
axis = ()
|
|
1761
1857
|
return tensor_operator_registry.get('amin')(self, axis, keepdims, initial=initial, where=where)
|
|
1762
1858
|
|
|
1763
1859
|
def reverse(self, axis):
|
|
1764
1860
|
"""
|
|
1765
|
-
For details, please refer to :func:`mindspore.ops.
|
|
1861
|
+
For details, please refer to :func:`mindspore.ops.flip`.
|
|
1766
1862
|
"""
|
|
1767
|
-
|
|
1768
|
-
return tensor_operator_registry.get('reverse')(axis)(self)
|
|
1863
|
+
return tensor_operator_registry.get('flip')(self, axis)
|
|
1769
1864
|
|
|
1770
1865
|
def amax(self, axis=None, keepdims=False, *, initial=None, where=None):
|
|
1771
1866
|
"""
|
|
1772
1867
|
For details, please refer to :func:`mindspore.ops.amax`.
|
|
1773
1868
|
"""
|
|
1774
|
-
self._init_check()
|
|
1775
1869
|
if axis is None:
|
|
1776
1870
|
axis = ()
|
|
1777
1871
|
return tensor_operator_registry.get('amax')(self, axis, keepdims, initial=initial, where=where)
|
|
@@ -1780,28 +1874,24 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1780
1874
|
r"""
|
|
1781
1875
|
For details, please refer to :func:`mindspore.ops.aminmax`.
|
|
1782
1876
|
"""
|
|
1783
|
-
self._init_check()
|
|
1784
1877
|
return tensor_operator_registry.get('aminmax')(self, axis=axis, keepdims=keepdims)
|
|
1785
1878
|
|
|
1786
1879
|
def reverse_sequence(self, seq_lengths, seq_dim=0, batch_dim=0):
|
|
1787
1880
|
"""
|
|
1788
1881
|
For details, please refer to :func:`mindspore.ops.reverse_sequence`.
|
|
1789
1882
|
"""
|
|
1790
|
-
|
|
1791
|
-
return tensor_operator_registry.get("reverse_sequence")(seq_dim, batch_dim)(self, seq_lengths)
|
|
1883
|
+
return tensor_operator_registry.get("reverse_sequence")(self, seq_lengths, seq_dim, batch_dim)
|
|
1792
1884
|
|
|
1793
|
-
def prod(self, axis=None, keep_dims=False):
|
|
1885
|
+
def prod(self, axis=None, keep_dims=False, dtype=None):
|
|
1794
1886
|
"""
|
|
1795
1887
|
For details, please refer to :func:`mindspore.ops.prod`.
|
|
1796
1888
|
"""
|
|
1797
|
-
|
|
1798
|
-
return tensor_operator_registry.get('prod')(self, axis, keep_dims)
|
|
1889
|
+
return tensor_operator_registry.get('prod')(self, axis, keep_dims, dtype)
|
|
1799
1890
|
|
|
1800
1891
|
def select(self, condition, y):
|
|
1801
1892
|
r"""
|
|
1802
1893
|
For details, please refer to :func:`mindspore.ops.select`.
|
|
1803
1894
|
"""
|
|
1804
|
-
self._init_check()
|
|
1805
1895
|
if not isinstance(condition, Tensor):
|
|
1806
1896
|
raise TypeError(f"For 'Tensor.select', the argument 'condition' should be Tensor,"
|
|
1807
1897
|
f" but got {type(condition)}.")
|
|
@@ -1816,7 +1906,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1816
1906
|
f" then the tensor type should be float32 but got {self.dtype}")
|
|
1817
1907
|
input_y = y
|
|
1818
1908
|
if isinstance(y, (int, float)):
|
|
1819
|
-
input_y = tensor_operator_registry.get('zeros_like')(
|
|
1909
|
+
input_y = tensor_operator_registry.get('zeros_like')(self) + y
|
|
1820
1910
|
if isinstance(y, int):
|
|
1821
1911
|
input_y = tensor_operator_registry.get('cast')(input_y, mstype.int32)
|
|
1822
1912
|
else:
|
|
@@ -1827,22 +1917,46 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1827
1917
|
r"""
|
|
1828
1918
|
For details, please refer to :func:`mindspore.ops.transpose`.
|
|
1829
1919
|
"""
|
|
1830
|
-
self._init_check()
|
|
1831
1920
|
perm = validator.check_transpose_axis(axes, self.ndim)
|
|
1832
|
-
return tensor_operator_registry.get('transpose')(
|
|
1921
|
+
return tensor_operator_registry.get('transpose')(self, perm)
|
|
1833
1922
|
|
|
1834
1923
|
def col2im(self, output_size, kernel_size, dilation, padding_value, stride):
|
|
1835
1924
|
"""
|
|
1836
1925
|
For details, please refer to :func:`mindspore.ops.col2im`.
|
|
1837
1926
|
"""
|
|
1838
|
-
self._init_check()
|
|
1839
1927
|
return tensor_operator_registry.get('col2im')(self, output_size, kernel_size, dilation, padding_value, stride)
|
|
1840
1928
|
|
|
1841
1929
|
def reshape(self, *shape):
|
|
1930
|
+
r"""
|
|
1931
|
+
Rearranges the input Tensor based on the given `shape` .
|
|
1932
|
+
|
|
1933
|
+
The `shape` can only have one -1 at most, in which case it's inferred from the remaining dimensions and
|
|
1934
|
+
the number of elements in the input.
|
|
1935
|
+
|
|
1936
|
+
Args:
|
|
1937
|
+
shape (Union[int, tuple[int], list[int]]): If `shape` is a tuple or list, its elements should be
|
|
1938
|
+
integers, and only constant value is allowed. i.e., :math:`(y_1, y_2, ..., y_S)`.
|
|
1939
|
+
|
|
1940
|
+
Returns:
|
|
1941
|
+
Tensor, If the given `shape` does not contain -1, the `shape` of tensor is :math:`(y_1, y_2, ..., y_S)`.
|
|
1942
|
+
If the k-th position in the given `shape` is -1, the `shape` of tensor is :math:`(y_1, ..., y_{k-1},
|
|
1943
|
+
\frac{\prod_{i=1}^{R}x_{i}}{y_1\times ...\times y_{k-1}\times y_{k+1}\times...\times y_S} , y_{k+1},
|
|
1944
|
+
..., y_S)`, in where the shape of input tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
1945
|
+
|
|
1946
|
+
Supported Platforms:
|
|
1947
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1948
|
+
|
|
1949
|
+
Examples:
|
|
1950
|
+
>>> import mindspore
|
|
1951
|
+
>>> import numpy as np
|
|
1952
|
+
>>> from mindspore import Tensor, ops
|
|
1953
|
+
>>> input = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
|
|
1954
|
+
>>> output = input.reshape(3, 2)
|
|
1955
|
+
>>> print(output)
|
|
1956
|
+
[[-0.1 0.3]
|
|
1957
|
+
[ 3.6 0.4]
|
|
1958
|
+
[ 0.5 -3.2]]
|
|
1842
1959
|
"""
|
|
1843
|
-
For details, please refer to :func:`mindspore.ops.reshape`.
|
|
1844
|
-
"""
|
|
1845
|
-
self._init_check()
|
|
1846
1960
|
new_shape = validator.check_reshape_shp(shape)
|
|
1847
1961
|
return tensor_operator_registry.get('reshape')(self, new_shape)
|
|
1848
1962
|
|
|
@@ -1871,7 +1985,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1871
1985
|
[ 3.6 0.4]
|
|
1872
1986
|
[ 0.5 -3.2]]
|
|
1873
1987
|
"""
|
|
1874
|
-
self._init_check()
|
|
1875
1988
|
return tensor_operator_registry.get('reshape')(self, other.shape)
|
|
1876
1989
|
|
|
1877
1990
|
def ravel(self):
|
|
@@ -1881,13 +1994,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1881
1994
|
Returns:
|
|
1882
1995
|
Tensor, a 1-D tensor, containing the same elements of the input.
|
|
1883
1996
|
|
|
1884
|
-
Supported Platforms:
|
|
1885
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1886
|
-
|
|
1887
1997
|
See also:
|
|
1888
|
-
:func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
1998
|
+
- :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
1999
|
+
- :func:`mindspore.Tensor.flatten`: Return a copy of the tensor collapsed into one dimension.
|
|
1889
2000
|
|
|
1890
|
-
|
|
2001
|
+
Supported Platforms:
|
|
2002
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1891
2003
|
|
|
1892
2004
|
Examples:
|
|
1893
2005
|
>>> import numpy as np
|
|
@@ -1897,7 +2009,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1897
2009
|
>>> print(output.shape)
|
|
1898
2010
|
(24,)
|
|
1899
2011
|
"""
|
|
1900
|
-
self._init_check()
|
|
1901
2012
|
reshape_op = tensor_operator_registry.get('reshape')
|
|
1902
2013
|
return reshape_op(self, (-1,))
|
|
1903
2014
|
|
|
@@ -1905,77 +2016,66 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1905
2016
|
"""
|
|
1906
2017
|
For details, please refer to :func:`mindspore.ops.round`.
|
|
1907
2018
|
"""
|
|
1908
|
-
|
|
1909
|
-
return tensor_operator_registry.get('round')()(self)
|
|
2019
|
+
return tensor_operator_registry.get('round')(self)
|
|
1910
2020
|
|
|
1911
2021
|
def roll(self, shifts, dims):
|
|
1912
2022
|
"""
|
|
1913
2023
|
For details, please refer to :func:`mindspore.ops.roll`.
|
|
1914
2024
|
"""
|
|
1915
|
-
self._init_check()
|
|
1916
2025
|
return tensor_operator_registry.get('roll')(shifts, dims)(self)
|
|
1917
2026
|
|
|
1918
2027
|
def rot90(self, k, dims):
|
|
1919
2028
|
r"""
|
|
1920
2029
|
For details, please refer to :func:`mindspore.ops.rot90`.
|
|
1921
2030
|
"""
|
|
1922
|
-
self._init_check()
|
|
1923
2031
|
return tensor_operator_registry.get('rot90')(self, k, dims)
|
|
1924
2032
|
|
|
1925
2033
|
def deg2rad(self):
|
|
1926
2034
|
r"""
|
|
1927
2035
|
For details, please refer to :func:`mindspore.ops.deg2rad`.
|
|
1928
2036
|
"""
|
|
1929
|
-
self._init_check()
|
|
1930
2037
|
return tensor_operator_registry.get('deg2rad')(self)
|
|
1931
2038
|
|
|
1932
2039
|
def dot(self, other):
|
|
1933
2040
|
r"""
|
|
1934
2041
|
For details, please refer to :func:`mindspore.ops.dot`.
|
|
1935
2042
|
"""
|
|
1936
|
-
self._init_check()
|
|
1937
2043
|
return tensor_operator_registry.get('dot')(self, other)
|
|
1938
2044
|
|
|
1939
2045
|
def outer(self, vec2):
|
|
1940
2046
|
r"""
|
|
1941
2047
|
For details, please refer to :func:`mindspore.ops.outer`.
|
|
1942
2048
|
"""
|
|
1943
|
-
self._init_check()
|
|
1944
2049
|
return tensor_operator_registry.get('outer')(self, vec2)
|
|
1945
2050
|
|
|
1946
2051
|
def rad2deg(self):
|
|
1947
2052
|
r"""
|
|
1948
2053
|
For details, please refer to :func:`mindspore.ops.rad2deg`.
|
|
1949
2054
|
"""
|
|
1950
|
-
self._init_check()
|
|
1951
2055
|
return tensor_operator_registry.get('rad2deg')(self)
|
|
1952
2056
|
|
|
1953
2057
|
def copysign(self, other):
|
|
1954
2058
|
r"""
|
|
1955
2059
|
For details, please refer to :func:`mindspore.ops.copysign`.
|
|
1956
2060
|
"""
|
|
1957
|
-
self._init_check()
|
|
1958
2061
|
return tensor_operator_registry.get('copysign')(self, other)
|
|
1959
2062
|
|
|
1960
2063
|
def nelement(self):
|
|
1961
2064
|
r"""
|
|
1962
2065
|
Alias for :func:`mindspore.Tensor.numel`.
|
|
1963
2066
|
"""
|
|
1964
|
-
self._init_check()
|
|
1965
2067
|
return tensor_operator_registry.get('nelement')(self)
|
|
1966
2068
|
|
|
1967
2069
|
def numel(self):
|
|
1968
2070
|
r"""
|
|
1969
2071
|
For details, please refer to :func:`mindspore.ops.numel`.
|
|
1970
2072
|
"""
|
|
1971
|
-
self._init_check()
|
|
1972
2073
|
return tensor_operator_registry.get('numel')(self)
|
|
1973
2074
|
|
|
1974
2075
|
def permute(self, *axis):
|
|
1975
2076
|
"""
|
|
1976
2077
|
For details, please refer to :func:`mindspore.ops.permute`.
|
|
1977
2078
|
"""
|
|
1978
|
-
self._init_check()
|
|
1979
2079
|
perm = validator.check_transpose_axis(axis, self.ndim)
|
|
1980
2080
|
return tensor_operator_registry.get('permute')(self, perm)
|
|
1981
2081
|
|
|
@@ -1983,98 +2083,84 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1983
2083
|
"""
|
|
1984
2084
|
For details, please refer to :func:`mindspore.ops.positive`.
|
|
1985
2085
|
"""
|
|
1986
|
-
self._init_check()
|
|
1987
2086
|
return tensor_operator_registry.get("positive")(self)
|
|
1988
2087
|
|
|
1989
2088
|
def remainder(self, divisor):
|
|
1990
2089
|
r"""
|
|
1991
2090
|
For details, please refer to :func:`mindspore.ops.remainder`.
|
|
1992
2091
|
"""
|
|
1993
|
-
self._init_check()
|
|
1994
2092
|
return tensor_operator_registry.get('remainder')(self, divisor)
|
|
1995
2093
|
|
|
1996
2094
|
def flatten(self, order='C', *, start_dim=0, end_dim=-1):
|
|
1997
2095
|
r"""
|
|
1998
2096
|
For details, please refer to :func:`mindspore.ops.flatten`.
|
|
1999
2097
|
"""
|
|
2000
|
-
self._init_check()
|
|
2001
2098
|
return tensor_operator_registry.get('flatten')(self, order, start_dim=start_dim, end_dim=end_dim)
|
|
2002
2099
|
|
|
2003
2100
|
def float_power(self, other):
|
|
2004
2101
|
r"""
|
|
2005
2102
|
For details, please refer to :func:`mindspore.ops.float_power`.
|
|
2006
2103
|
"""
|
|
2007
|
-
self._init_check()
|
|
2008
2104
|
return tensor_operator_registry.get('float_power')(self, other)
|
|
2009
2105
|
|
|
2010
2106
|
def fmax(self, other):
|
|
2011
2107
|
r"""
|
|
2012
2108
|
For details, please refer to :func:`mindspore.ops.fmax`.
|
|
2013
2109
|
"""
|
|
2014
|
-
self._init_check()
|
|
2015
2110
|
return tensor_operator_registry.get('fmax')(self, other)
|
|
2016
2111
|
|
|
2017
2112
|
def fmin(self, other):
|
|
2018
2113
|
r"""
|
|
2019
2114
|
For details, please refer to :func:`mindspore.ops.fmin`.
|
|
2020
2115
|
"""
|
|
2021
|
-
self._init_check()
|
|
2022
2116
|
return tensor_operator_registry.get('fmin')(self, other)
|
|
2023
2117
|
|
|
2024
2118
|
def fmod(self, other):
|
|
2025
2119
|
r"""
|
|
2026
2120
|
For details, please refer to :func:`mindspore.ops.fmod`.
|
|
2027
2121
|
"""
|
|
2028
|
-
self._init_check()
|
|
2029
2122
|
return tensor_operator_registry.get('fmod')(self, other)
|
|
2030
2123
|
|
|
2031
2124
|
def narrow(self, axis, start, length):
|
|
2032
2125
|
"""
|
|
2033
2126
|
For details, please refer to :func:`mindspore.ops.narrow`.
|
|
2034
2127
|
"""
|
|
2035
|
-
self._init_check()
|
|
2036
2128
|
return tensor_operator_registry.get('narrow')(self, axis, start, length)
|
|
2037
2129
|
|
|
2038
2130
|
def swapaxes(self, axis0, axis1):
|
|
2039
2131
|
"""
|
|
2040
2132
|
For details, please refer to :func:`mindspore.ops.swapaxes`.
|
|
2041
2133
|
"""
|
|
2042
|
-
self._init_check()
|
|
2043
2134
|
return tensor_operator_registry.get('swapaxes')(self, axis0, axis1)
|
|
2044
2135
|
|
|
2045
2136
|
def swapdims(self, dim0, dim1):
|
|
2046
2137
|
"""
|
|
2047
2138
|
For details, please refer to :func:`mindspore.ops.swapdims`.
|
|
2048
2139
|
"""
|
|
2049
|
-
self._init_check()
|
|
2050
2140
|
return tensor_operator_registry.get('swapdims')(self, dim0, dim1)
|
|
2051
2141
|
|
|
2052
2142
|
def squeeze(self, axis=None):
|
|
2053
2143
|
"""
|
|
2054
2144
|
For details, please refer to :func:`mindspore.ops.squeeze`.
|
|
2055
2145
|
"""
|
|
2056
|
-
self._init_check()
|
|
2057
2146
|
return tensor_operator_registry.get('squeeze')(self, axis)
|
|
2058
2147
|
|
|
2059
2148
|
def slogdet(self):
|
|
2060
2149
|
"""
|
|
2061
2150
|
For details, please refer to :func:`mindspore.ops.slogdet`.
|
|
2062
2151
|
"""
|
|
2063
|
-
self._init_check()
|
|
2064
2152
|
return tensor_operator_registry.get('slogdet')(self)
|
|
2065
2153
|
|
|
2066
2154
|
def tril(self, diagonal=0):
|
|
2067
2155
|
"""
|
|
2068
2156
|
For details, please refer to :func:`mindspore.ops.tril`.
|
|
2069
2157
|
"""
|
|
2070
|
-
self._init_check()
|
|
2071
2158
|
return tensor_operator_registry.get('tril')(self, diagonal)
|
|
2072
2159
|
|
|
2073
2160
|
def unsqueeze(self, dim):
|
|
2074
2161
|
"""
|
|
2075
2162
|
For details, please refer to :func:`mindspore.ops.unsqueeze`.
|
|
2076
2163
|
"""
|
|
2077
|
-
self._init_check()
|
|
2078
2164
|
validator.check_is_int(dim, 'dim')
|
|
2079
2165
|
validator.check_int_range(dim, -self.ndim - 1, self.ndim + 1, validator.INC_LEFT, 'dim')
|
|
2080
2166
|
return tensor_operator_registry.get('unsqueeze')(self, dim)
|
|
@@ -2083,7 +2169,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2083
2169
|
"""
|
|
2084
2170
|
For details, please refer to :func:`mindspore.ops.expand_dims`.
|
|
2085
2171
|
"""
|
|
2086
|
-
self._init_check()
|
|
2087
2172
|
validator.check_is_int(axis, 'axis')
|
|
2088
2173
|
validator.check_int_range(axis, -self.ndim - 1, self.ndim + 1, validator.INC_LEFT, 'axis')
|
|
2089
2174
|
return tensor_operator_registry.get('expand_dims')(self, axis)
|
|
@@ -2116,7 +2201,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2116
2201
|
>>> print(x.dtype)
|
|
2117
2202
|
Int32
|
|
2118
2203
|
"""
|
|
2119
|
-
self._init_check()
|
|
2120
2204
|
dtype = _check_astype_and_convert(dtype)
|
|
2121
2205
|
if not copy and dtype == self.dtype:
|
|
2122
2206
|
return self
|
|
@@ -2126,7 +2210,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2126
2210
|
"""
|
|
2127
2211
|
For details, please refer to :func:`mindspore.ops.argmax`.
|
|
2128
2212
|
"""
|
|
2129
|
-
self._init_check()
|
|
2130
2213
|
out = tensor_operator_registry.get('argmax')(self, axis, keepdims)
|
|
2131
2214
|
return out
|
|
2132
2215
|
|
|
@@ -2134,7 +2217,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2134
2217
|
"""
|
|
2135
2218
|
For details, please refer to :func:`mindspore.ops.argmin`.
|
|
2136
2219
|
"""
|
|
2137
|
-
self._init_check()
|
|
2138
2220
|
out = tensor_operator_registry.get('argmin')(self, axis, keepdims)
|
|
2139
2221
|
return out
|
|
2140
2222
|
|
|
@@ -2185,7 +2267,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2185
2267
|
"""
|
|
2186
2268
|
if self.shape == ():
|
|
2187
2269
|
return (self, Tensor(0))
|
|
2188
|
-
self._init_check()
|
|
2189
2270
|
return tensor_operator_registry.get('argmax_with_value')(self, axis, keep_dims)
|
|
2190
2271
|
|
|
2191
2272
|
def argmin_with_value(self, axis=0, keep_dims=False):
|
|
@@ -2233,7 +2314,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2233
2314
|
"""
|
|
2234
2315
|
if self.shape == ():
|
|
2235
2316
|
return (self, Tensor(0))
|
|
2236
|
-
self._init_check()
|
|
2237
2317
|
return tensor_operator_registry.get('argmin_with_value')(self, axis, keep_dims)
|
|
2238
2318
|
|
|
2239
2319
|
def cumsum(self, axis=None, dtype=None):
|
|
@@ -2275,15 +2355,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2275
2355
|
"""
|
|
2276
2356
|
For details, please refer to :func:`mindspore.ops.index_select`.
|
|
2277
2357
|
"""
|
|
2278
|
-
self._init_check()
|
|
2279
2358
|
return tensor_operator_registry.get('index_select')(self, axis, index)
|
|
2280
2359
|
|
|
2281
2360
|
def inplace_update(self, v, indices):
|
|
2282
2361
|
"""
|
|
2283
2362
|
For details, please refer to :func:`mindspore.ops.inplace_update`.
|
|
2284
2363
|
"""
|
|
2285
|
-
|
|
2286
|
-
return tensor_operator_registry.get('inplace_update')()(self, indices, v)
|
|
2364
|
+
return tensor_operator_registry.get('inplace_update')(self, v, indices)
|
|
2287
2365
|
|
|
2288
2366
|
def copy(self):
|
|
2289
2367
|
"""
|
|
@@ -2357,15 +2435,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2357
2435
|
Raises:
|
|
2358
2436
|
TypeError: If arguments have types not specified above.
|
|
2359
2437
|
|
|
2360
|
-
Supported Platforms:
|
|
2361
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2362
|
-
|
|
2363
2438
|
See also:
|
|
2364
|
-
:func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
|
|
2365
|
-
|
|
2366
|
-
:func:`mindspore.Tensor.
|
|
2439
|
+
- :func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
|
|
2440
|
+
- :func:`mindspore.Tensor.argmax`: Return the indices of the maximum values along an axis.
|
|
2441
|
+
- :func:`mindspore.Tensor.min`: Return the minimum of a tensor or minimum along an axis.
|
|
2367
2442
|
|
|
2368
|
-
|
|
2443
|
+
Supported Platforms:
|
|
2444
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2369
2445
|
|
|
2370
2446
|
Examples:
|
|
2371
2447
|
>>> import numpy as np
|
|
@@ -2380,7 +2456,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2380
2456
|
>>> print(indices)
|
|
2381
2457
|
[1 1]
|
|
2382
2458
|
"""
|
|
2383
|
-
self._init_check()
|
|
2384
2459
|
if isinstance(axis, (list, tuple)):
|
|
2385
2460
|
reduce_ = tensor_operator_registry.get("reduce")
|
|
2386
2461
|
reduce_max = tensor_operator_registry.get("reduce_max")
|
|
@@ -2428,15 +2503,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2428
2503
|
Raises:
|
|
2429
2504
|
TypeError: If arguments have types not specified above.
|
|
2430
2505
|
|
|
2431
|
-
Supported Platforms:
|
|
2432
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2433
|
-
|
|
2434
2506
|
See also:
|
|
2435
|
-
:func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
|
|
2436
|
-
|
|
2437
|
-
:func:`mindspore.Tensor.
|
|
2507
|
+
- :func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
|
|
2508
|
+
- :func:`mindspore.Tensor.argmax`: Return the indices of the maximum values along an axis.
|
|
2509
|
+
- :func:`mindspore.Tensor.max`: Return the minimum of a tensor or minimum along an axis.
|
|
2438
2510
|
|
|
2439
|
-
|
|
2511
|
+
Supported Platforms:
|
|
2512
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2440
2513
|
|
|
2441
2514
|
Examples:
|
|
2442
2515
|
>>> import numpy as np
|
|
@@ -2460,12 +2533,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2460
2533
|
>>> print(indices)
|
|
2461
2534
|
[0 0]
|
|
2462
2535
|
"""
|
|
2463
|
-
self._init_check()
|
|
2464
2536
|
if isinstance(axis, (list, tuple)):
|
|
2465
2537
|
reduce_ = tensor_operator_registry.get("reduce")
|
|
2466
2538
|
reduce_min = tensor_operator_registry.get("reduce_min")
|
|
2467
2539
|
minimum = tensor_operator_registry.get("minimum")
|
|
2468
|
-
return reduce_(self, reduce_min(keepdims), cmp_fn=minimum
|
|
2540
|
+
return reduce_(self, reduce_min(keepdims), cmp_fn=minimum, axis=axis, keepdims=keepdims,
|
|
2469
2541
|
initial=initial, where=where)
|
|
2470
2542
|
values, indices = tensor_operator_registry.get("min")(self, axis, keepdims, initial=initial, where=where)
|
|
2471
2543
|
if not return_indices:
|
|
@@ -2476,7 +2548,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2476
2548
|
"""
|
|
2477
2549
|
For details, please refer to :func:`mindspore.ops.scatter_add`.
|
|
2478
2550
|
"""
|
|
2479
|
-
self._init_check()
|
|
2480
2551
|
return tensor_operator_registry.get("tensor_scatter_add")(self, indices, updates)
|
|
2481
2552
|
|
|
2482
2553
|
def scatter_sub(self, indices, updates):
|
|
@@ -2489,7 +2560,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2489
2560
|
|
|
2490
2561
|
The last axis of `indices` is the depth of each index vectors. For each index vector,
|
|
2491
2562
|
there must be a corresponding value in `updates`. The shape of `updates` should be
|
|
2492
|
-
equal to the shape of `self[indices]`. For more details, see
|
|
2563
|
+
equal to the shape of `self[indices]`. For more details, see Examples.
|
|
2493
2564
|
|
|
2494
2565
|
Note:
|
|
2495
2566
|
On GPU, if some values of the `indices` are out of bound, instead of raising an index error,
|
|
@@ -2524,28 +2595,30 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2524
2595
|
[[-3.3000002 0.3 3.6 ]
|
|
2525
2596
|
[ 0.4 0.5 -3.2 ]]
|
|
2526
2597
|
"""
|
|
2527
|
-
self._init_check()
|
|
2528
2598
|
return tensor_operator_registry.get('tensor_scatter_sub')(self, indices, updates)
|
|
2529
2599
|
|
|
2530
2600
|
def scatter_min(self, indices, updates):
|
|
2531
2601
|
"""
|
|
2532
2602
|
For details, please refer to :func:`mindspore.ops.scatter_min`.
|
|
2533
2603
|
"""
|
|
2534
|
-
|
|
2535
|
-
return tensor_operator_registry.get('tensor_scatter_min')()(self, indices, updates)
|
|
2604
|
+
return tensor_operator_registry.get('tensor_scatter_min')(self, indices, updates)
|
|
2536
2605
|
|
|
2537
2606
|
def scatter_max(self, indices, updates):
|
|
2538
2607
|
"""
|
|
2539
2608
|
For details, please refer to :func:`mindspore.ops.scatter_max`.
|
|
2540
2609
|
"""
|
|
2541
|
-
|
|
2542
|
-
|
|
2610
|
+
return tensor_operator_registry.get('tensor_scatter_max')(self, indices, updates)
|
|
2611
|
+
|
|
2612
|
+
def softmax(self, axis, dtype=None):
|
|
2613
|
+
"""
|
|
2614
|
+
For details, please refer to :func:`mindspore.ops.softmax`.
|
|
2615
|
+
"""
|
|
2616
|
+
return tensor_operator_registry.get('softmax')(self, axis, dtype=dtype)
|
|
2543
2617
|
|
|
2544
2618
|
def fill(self, value):
|
|
2545
2619
|
"""
|
|
2546
2620
|
`Tensor.fill` is deprecated, please use `ops.fill` instead.
|
|
2547
2621
|
"""
|
|
2548
|
-
self._init_check()
|
|
2549
2622
|
if value is None:
|
|
2550
2623
|
if self.dtype not in (mstype.float16, mstype.float32, mstype.float64):
|
|
2551
2624
|
raise TypeError("For 'Tensor.fill', if the argument 'value' is None, the type of the original "
|
|
@@ -2558,7 +2631,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2558
2631
|
"""
|
|
2559
2632
|
`Tensor.fills` is deprecated, please use `ops.fill` instead.
|
|
2560
2633
|
"""
|
|
2561
|
-
self._init_check()
|
|
2562
2634
|
return tensor_operator_registry.get('fills')(self, value)
|
|
2563
2635
|
|
|
2564
2636
|
def fill_diagonal(self, fill_value, wrap=False):
|
|
@@ -2600,14 +2672,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2600
2672
|
[5. 1. 1.]
|
|
2601
2673
|
[1. 5. 1.]]
|
|
2602
2674
|
"""
|
|
2603
|
-
self._init_check()
|
|
2604
2675
|
return tensor_operator_registry.get('fill_diagonal')(fill_value, wrap)(self)
|
|
2605
2676
|
|
|
2606
2677
|
def masked_fill(self, mask, value):
|
|
2607
2678
|
"""
|
|
2608
2679
|
For details, please refer to :func:`mindspore.ops.masked_fill`.
|
|
2609
2680
|
"""
|
|
2610
|
-
self._init_check()
|
|
2611
2681
|
if isinstance(value, (float, int)):
|
|
2612
2682
|
value = tensor_operator_registry.get("scalar_to_tensor")(value, self.dtype)
|
|
2613
2683
|
if not isinstance(mask, Tensor):
|
|
@@ -2663,13 +2733,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2663
2733
|
r"""
|
|
2664
2734
|
For details, please refer to :func:`mindspore.ops.minimum`.
|
|
2665
2735
|
"""
|
|
2666
|
-
return tensor_operator_registry.get('minimum')(
|
|
2736
|
+
return tensor_operator_registry.get('minimum')(self, other)
|
|
2667
2737
|
|
|
2668
2738
|
def clamp(self, min=None, max=None):
|
|
2669
2739
|
r"""
|
|
2670
2740
|
For details, please refer to :func:`mindspore.ops.clamp`.
|
|
2671
2741
|
"""
|
|
2672
|
-
self._init_check()
|
|
2673
2742
|
return tensor_operator_registry.get('clamp')(self, min, max)
|
|
2674
2743
|
|
|
2675
2744
|
def clip(self, min=None, max=None):
|
|
@@ -2678,10 +2747,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2678
2747
|
"""
|
|
2679
2748
|
return self.clamp(min, max)
|
|
2680
2749
|
|
|
2681
|
-
def _init_check(self):
|
|
2682
|
-
if self.has_init:
|
|
2683
|
-
self.init_data()
|
|
2684
|
-
|
|
2685
2750
|
def init_data(self, slice_index=None, shape=None, opt_shard_group=None):
|
|
2686
2751
|
"""
|
|
2687
2752
|
Get the tensor format data of this Tensor.
|
|
@@ -2698,7 +2763,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2698
2763
|
opt_shard_group(str): Optimizer shard group which is used in auto or semi auto parallel mode
|
|
2699
2764
|
to get one shard of a parameter's slice. For more information about optimizer parallel, please refer to:
|
|
2700
2765
|
`Optimizer Parallel
|
|
2701
|
-
<https://www.mindspore.cn/tutorials/experts/en/
|
|
2766
|
+
<https://www.mindspore.cn/tutorials/experts/en/master/parallel/optimizer_parallel.html>`_.
|
|
2702
2767
|
Default: ``None``.
|
|
2703
2768
|
|
|
2704
2769
|
Returns:
|
|
@@ -2776,12 +2841,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2776
2841
|
if slice_num_of_persistent_data > 1:
|
|
2777
2842
|
self.assign_value(Tensor_.persistent_data_from_numpy(data, slice_num_of_persistent_data))
|
|
2778
2843
|
else:
|
|
2779
|
-
|
|
2780
|
-
# The dtype of data is np.float32 when mstype is bfloat16,
|
|
2781
|
-
# so we create tensor_ by init func instead of asnumpy
|
|
2782
|
-
self.assign_value(Tensor_(data, self.dtype))
|
|
2783
|
-
else:
|
|
2784
|
-
self.assign_value(Tensor_.from_numpy(data))
|
|
2844
|
+
self.assign_value(Tensor_.from_numpy(data))
|
|
2785
2845
|
return self
|
|
2786
2846
|
|
|
2787
2847
|
def resize(self, *new_shape):
|
|
@@ -2803,13 +2863,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2803
2863
|
Returns:
|
|
2804
2864
|
Tensor.
|
|
2805
2865
|
|
|
2806
|
-
Supported Platforms:
|
|
2807
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2808
|
-
|
|
2809
2866
|
See also:
|
|
2810
|
-
:func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
2867
|
+
- :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
2868
|
+
- :func:`mindspore.Tensor.repeat`: Repeat elements of a tensor.
|
|
2811
2869
|
|
|
2812
|
-
|
|
2870
|
+
Supported Platforms:
|
|
2871
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2813
2872
|
|
|
2814
2873
|
Examples:
|
|
2815
2874
|
>>> import numpy as np
|
|
@@ -2836,7 +2895,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2836
2895
|
diff_size = new_size - cur_size
|
|
2837
2896
|
if diff_size > 0:
|
|
2838
2897
|
pad_val = tensor_operator_registry.get('fill')(self.dtype, (diff_size,), 0)
|
|
2839
|
-
res = tensor_operator_registry.get('concatenate')(
|
|
2898
|
+
res = tensor_operator_registry.get('concatenate')((flattened, pad_val), 0)
|
|
2840
2899
|
else:
|
|
2841
2900
|
res = flattened[:new_size]
|
|
2842
2901
|
return res.reshape(new_shape)
|
|
@@ -2845,70 +2904,60 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2845
2904
|
r"""
|
|
2846
2905
|
For details, please refer to :func:`mindspore.ops.det`.
|
|
2847
2906
|
"""
|
|
2848
|
-
self._init_check()
|
|
2849
2907
|
return tensor_operator_registry.get('det')(self)
|
|
2850
2908
|
|
|
2851
2909
|
def diff(self, n=1, axis=-1, prepend=None, append=None):
|
|
2852
2910
|
r"""
|
|
2853
2911
|
For details, please refer to :func:`mindspore.ops.diff`.
|
|
2854
2912
|
"""
|
|
2855
|
-
self._init_check()
|
|
2856
2913
|
return tensor_operator_registry.get('diff')(self, n, axis, prepend, append)
|
|
2857
2914
|
|
|
2858
2915
|
def frac(self):
|
|
2859
2916
|
r"""
|
|
2860
2917
|
For details, please refer to :func:`mindspore.ops.frac`.
|
|
2861
2918
|
"""
|
|
2862
|
-
self._init_check()
|
|
2863
2919
|
return tensor_operator_registry.get('frac')(self)
|
|
2864
2920
|
|
|
2865
2921
|
def argwhere(self):
|
|
2866
2922
|
r"""
|
|
2867
2923
|
For details, please refer to :func:`mindspore.ops.argwhere`.
|
|
2868
2924
|
"""
|
|
2869
|
-
self._init_check()
|
|
2870
2925
|
return tensor_operator_registry.get('argwhere')(self)
|
|
2871
2926
|
|
|
2872
2927
|
def moveaxis(self, source, destination):
|
|
2873
2928
|
r"""
|
|
2874
2929
|
For details, please refer to :func:`mindspore.ops.moveaxis`.
|
|
2875
2930
|
"""
|
|
2876
|
-
self._init_check()
|
|
2877
2931
|
return tensor_operator_registry.get('moveaxis')(self, source, destination)
|
|
2878
2932
|
|
|
2879
2933
|
def movedim(self, source, destination):
|
|
2880
2934
|
r"""
|
|
2881
2935
|
For details, please refer to :func:`mindspore.ops.movedim`.
|
|
2882
2936
|
"""
|
|
2883
|
-
self._init_check()
|
|
2884
2937
|
return tensor_operator_registry.get('movedim')(self, source, destination)
|
|
2885
2938
|
|
|
2886
2939
|
def digamma(self):
|
|
2887
2940
|
r"""
|
|
2888
2941
|
For details, please refer to :func:`mindspore.ops.digamma`.
|
|
2889
2942
|
"""
|
|
2890
|
-
self._init_check()
|
|
2891
2943
|
return tensor_operator_registry.get('digamma')(self)
|
|
2892
2944
|
|
|
2893
2945
|
def lgamma(self):
|
|
2894
2946
|
r"""
|
|
2895
2947
|
For details, please refer to :func:`mindspore.ops.lgamma`.
|
|
2896
2948
|
"""
|
|
2897
|
-
self._init_check()
|
|
2898
2949
|
return tensor_operator_registry.get('lgamma')(self)
|
|
2899
2950
|
|
|
2900
2951
|
def diagonal(self, offset=0, axis1=0, axis2=1):
|
|
2901
2952
|
"""
|
|
2902
2953
|
For details, please refer to :func:`mindspore.ops.diagonal`.
|
|
2903
2954
|
"""
|
|
2904
|
-
self._init_check()
|
|
2905
2955
|
return tensor_operator_registry.get('diagonal')(self, offset, axis1, axis2)
|
|
2906
2956
|
|
|
2907
2957
|
def diagonal_scatter(self, src, offset=0, dim1=0, dim2=1):
|
|
2908
2958
|
r"""
|
|
2909
2959
|
For details, please refer to :func:`mindspore.ops.diagonal_scatter`.
|
|
2910
2960
|
"""
|
|
2911
|
-
self._init_check()
|
|
2912
2961
|
return tensor_operator_registry.get('diagonal_scatter')(self, src, offset, dim1, dim2)
|
|
2913
2962
|
|
|
2914
2963
|
def trace(self, offset=0, axis1=0, axis2=1, dtype=None):
|
|
@@ -2933,12 +2982,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2933
2982
|
Raises:
|
|
2934
2983
|
ValueError: If the input tensor has less than two dimensions.
|
|
2935
2984
|
|
|
2985
|
+
See also:
|
|
2986
|
+
- :func:`mindspore.Tensor.diagonal`: Return specified diagonals.
|
|
2987
|
+
|
|
2936
2988
|
Supported Platforms:
|
|
2937
2989
|
``Ascend`` ``GPU`` ``CPU``
|
|
2938
2990
|
|
|
2939
|
-
See also:
|
|
2940
|
-
:func:`mindspore.Tensor.diagonal`: Return specified diagonals.
|
|
2941
|
-
|
|
2942
2991
|
Examples:
|
|
2943
2992
|
>>> import numpy as np
|
|
2944
2993
|
>>> from mindspore import Tensor
|
|
@@ -2947,7 +2996,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2947
2996
|
3.0
|
|
2948
2997
|
"""
|
|
2949
2998
|
if offset == 0 and axis1 == 0 and axis2 == 1 and dtype is None:
|
|
2950
|
-
self._init_check()
|
|
2951
2999
|
return tensor_operator_registry.get('trace')(self)
|
|
2952
3000
|
d = self.diagonal(offset, axis1=axis1, axis2=axis2)
|
|
2953
3001
|
shape = d.shape
|
|
@@ -3020,7 +3068,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3020
3068
|
shape_indices = tuple(size_indices if i == axis else 1 for i in range(ndim))
|
|
3021
3069
|
indices = indices.reshape(shape_indices)
|
|
3022
3070
|
shape_indices = shape_ni + (indices.size,) + shape_nk
|
|
3023
|
-
indices = tensor_operator_registry.get('broadcast_to')(shape_indices)
|
|
3071
|
+
indices = tensor_operator_registry.get('broadcast_to')(indices, shape_indices)
|
|
3024
3072
|
|
|
3025
3073
|
res = tensor_operator_registry.get('gather_d')(a, axis, indices)
|
|
3026
3074
|
return res.reshape(shape_out)
|
|
@@ -3065,7 +3113,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3065
3113
|
"""
|
|
3066
3114
|
if isinstance(choices, Tensor):
|
|
3067
3115
|
shape_choice = validator.infer_out_shape(self.shape, choices.shape[1:])
|
|
3068
|
-
choices = tensor_operator_registry.get('broadcast_to')((choices.shape[0],) + shape_choice)
|
|
3116
|
+
choices = tensor_operator_registry.get('broadcast_to')(choices, (choices.shape[0],) + shape_choice)
|
|
3069
3117
|
else:
|
|
3070
3118
|
# broadcasts choices to the same shape if choices is a sequence
|
|
3071
3119
|
choicelist = []
|
|
@@ -3078,14 +3126,14 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3078
3126
|
shape_choice = validator.infer_out_shape(self.shape, *shapes)
|
|
3079
3127
|
tmp = []
|
|
3080
3128
|
for choice in choicelist:
|
|
3081
|
-
tmp.append(tensor_operator_registry.get('broadcast_to')(shape_choice)
|
|
3129
|
+
tmp.append(tensor_operator_registry.get('broadcast_to')(choice, shape_choice))
|
|
3082
3130
|
choices = tensor_operator_registry.get('stack')(tmp, 0)
|
|
3083
3131
|
|
|
3084
3132
|
if self.ndim == 0 or choices.ndim == 0:
|
|
3085
3133
|
raise ValueError(f"For 'Tensor.choose', the original tensor and the argument 'choices' cannot be scalars."
|
|
3086
3134
|
f" Their dimensions should all be > 0, but got the original tensor's dimension "
|
|
3087
3135
|
f"{self.ndim}, 'choices' dimension {choices.ndim}.")
|
|
3088
|
-
a = tensor_operator_registry.get('broadcast_to')(shape_choice)
|
|
3136
|
+
a = tensor_operator_registry.get('broadcast_to')(self, shape_choice)
|
|
3089
3137
|
dtype = choices.dtype
|
|
3090
3138
|
# adjusts dtype for F.tensor_mul and F.gather_nd
|
|
3091
3139
|
a = a.astype(mstype.int32)
|
|
@@ -3097,10 +3145,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3097
3145
|
for i in range(ndim):
|
|
3098
3146
|
dim_grid = Tensor(list(range(a.shape[i])), mstype.int32)
|
|
3099
3147
|
dim_shape = validator.expanded_shape(ndim, a.shape[i], i)
|
|
3100
|
-
dim_grid = tensor_operator_registry.get('broadcast_to')(
|
|
3148
|
+
dim_grid = tensor_operator_registry.get('broadcast_to')(dim_grid.reshape(dim_shape), a.shape)
|
|
3101
3149
|
grids.append(dim_grid)
|
|
3102
3150
|
grid = tensor_operator_registry.get('stack')(grids, -1)
|
|
3103
|
-
indices = tensor_operator_registry.get('concatenate')(
|
|
3151
|
+
indices = tensor_operator_registry.get('concatenate')((a.reshape(a.shape + (1,)), grid), -1)
|
|
3104
3152
|
return tensor_operator_registry.get('gather_nd')(choices, indices).astype(dtype)
|
|
3105
3153
|
|
|
3106
3154
|
def searchsorted(self, v, side='left', sorter=None):
|
|
@@ -3113,9 +3161,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3113
3161
|
location found is given. If 'right', return the last such index. If there is
|
|
3114
3162
|
no suitable index, return either 0 or N (where N is the length of the tensor).
|
|
3115
3163
|
Default: ``left`` .
|
|
3116
|
-
sorter (Union[int,
|
|
3117
|
-
integer indices that sort the tensor into ascending order
|
|
3118
|
-
the result of argsort. Default: ``None`` .
|
|
3164
|
+
sorter (Union[int, list, tuple, Tensor]): optional tensor of
|
|
3165
|
+
integer indices that sort the tensor into ascending order on the innermost dimension
|
|
3166
|
+
and the type must be int64. They are typically the result of argsort. Default: ``None`` .
|
|
3119
3167
|
|
|
3120
3168
|
Returns:
|
|
3121
3169
|
Tensor, array of insertion points with the same shape as `v`.
|
|
@@ -3136,37 +3184,26 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3136
3184
|
if side not in ('left', 'right'):
|
|
3137
3185
|
raise ValueError(f"For 'Tensor.searchsorted', the argument 'side' should be one of in "
|
|
3138
3186
|
f"['left', 'right'], but got {side}.")
|
|
3139
|
-
a = self.astype(mstype.float32)
|
|
3140
3187
|
if not isinstance(v, Tensor):
|
|
3141
3188
|
v = tensor_operator_registry.get('make_tensor')(v)
|
|
3142
|
-
shape = v.shape
|
|
3143
3189
|
if sorter is not None:
|
|
3144
|
-
if not isinstance(sorter, (int,
|
|
3190
|
+
if not isinstance(sorter, (int, list, tuple, Tensor)):
|
|
3145
3191
|
raise TypeError("For Tensor.searchsorted, the type of the argument 'sorter' must be one of 'int', "
|
|
3146
|
-
"'
|
|
3192
|
+
"'list', 'tuple', 'Tensor', but got {}.".format(type(sorter)))
|
|
3147
3193
|
if not isinstance(sorter, Tensor):
|
|
3148
3194
|
sorter = tensor_operator_registry.get('make_tensor')(sorter)
|
|
3149
|
-
if sorter.
|
|
3150
|
-
raise ValueError('sorter must be
|
|
3151
|
-
|
|
3152
|
-
|
|
3153
|
-
|
|
3154
|
-
|
|
3155
|
-
|
|
3156
|
-
|
|
3157
|
-
sort_range = tuple(range(math.ceil(math.log2(tensor_operator_registry.get('shape_mul')(a.shape) + 1))))
|
|
3158
|
-
for _ in sort_range:
|
|
3159
|
-
mid = (i - -j) // 2
|
|
3160
|
-
mask = less_op(v, tensor_operator_registry.get('gather_nd')(a, mid.reshape(mid.shape + (1,))))
|
|
3161
|
-
i = tensor_operator_registry.get('select')(mask, i, mid)
|
|
3162
|
-
j = tensor_operator_registry.get('select')(mask, mid, j)
|
|
3163
|
-
return j
|
|
3195
|
+
if sorter.size != self.size:
|
|
3196
|
+
raise ValueError('The size of sorter must be the same as the Tensor')
|
|
3197
|
+
|
|
3198
|
+
dtype = mstype.int32
|
|
3199
|
+
right = (side == 'right')
|
|
3200
|
+
search_sorted_ = tensor_operator_registry.get('searchsorted')(dtype, right)
|
|
3201
|
+
return search_sorted_(self, v, sorter)
|
|
3164
3202
|
|
|
3165
3203
|
def gather_nd(self, indices):
|
|
3166
3204
|
r"""
|
|
3167
3205
|
For details, please refer to :func:`mindspore.ops.gather_nd`.
|
|
3168
3206
|
"""
|
|
3169
|
-
self._init_check()
|
|
3170
3207
|
validator.check_value_type('indices', indices, (Tensor, Tensor_,), 'Tensor.gather_nd')
|
|
3171
3208
|
return tensor_operator_registry.get('gather_nd')(self, indices)
|
|
3172
3209
|
|
|
@@ -3174,11 +3211,39 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3174
3211
|
r"""
|
|
3175
3212
|
For details, please refer to :func:`mindspore.ops.gather`.
|
|
3176
3213
|
"""
|
|
3177
|
-
self._init_check()
|
|
3178
3214
|
validator.check_is_int(axis, 'axis')
|
|
3179
3215
|
validator.check_is_int(batch_dims, "batch_dims")
|
|
3180
3216
|
return tensor_operator_registry.get('gather')(self, input_indices, axis, batch_dims)
|
|
3181
3217
|
|
|
3218
|
+
def uniform(self, from_=0., to=1., generator=None):
|
|
3219
|
+
r"""
|
|
3220
|
+
Generates random numbers in the half-open interval [from_, to).
|
|
3221
|
+
|
|
3222
|
+
Args:
|
|
3223
|
+
from_ (number): The lower bound of the interval.
|
|
3224
|
+
to (number): The upper bound of the interval.
|
|
3225
|
+
generator (Generator, optional): The random seed. Default: None.
|
|
3226
|
+
|
|
3227
|
+
Returns:
|
|
3228
|
+
Tensor, with the same shape as tensor.
|
|
3229
|
+
|
|
3230
|
+
Raises:
|
|
3231
|
+
TypeError: If `from_` is larger than `to`.
|
|
3232
|
+
|
|
3233
|
+
Supported Platforms:
|
|
3234
|
+
``Ascend``
|
|
3235
|
+
|
|
3236
|
+
Examples:
|
|
3237
|
+
>>> import mindspore
|
|
3238
|
+
>>> x = mindspore.ops.ones((4, 2))
|
|
3239
|
+
>>> generator = mindspore.Generator()
|
|
3240
|
+
>>> generator.manual_seed(100)
|
|
3241
|
+
>>> output = x.uniform(1., 2., generator)
|
|
3242
|
+
>>> print(output.shape)
|
|
3243
|
+
(4, 2)
|
|
3244
|
+
"""
|
|
3245
|
+
return tensor_operator_registry.get('uniform')(self, from_, to, generator)
|
|
3246
|
+
|
|
3182
3247
|
def var(self, axis=None, ddof=0, keepdims=False):
|
|
3183
3248
|
"""
|
|
3184
3249
|
Compute the variance along the specified axis.
|
|
@@ -3202,13 +3267,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3202
3267
|
Returns:
|
|
3203
3268
|
Variance tensor.
|
|
3204
3269
|
|
|
3205
|
-
Supported Platforms:
|
|
3206
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3207
|
-
|
|
3208
3270
|
See also:
|
|
3209
|
-
:func:`mindspore.Tensor.mean`: Reduce a dimension of a tensor by averaging all elements in the dimension.
|
|
3271
|
+
- :func:`mindspore.Tensor.mean`: Reduce a dimension of a tensor by averaging all elements in the dimension.
|
|
3272
|
+
- :func:`mindspore.Tensor.std`: Compute the standard deviation along the specified axis.
|
|
3210
3273
|
|
|
3211
|
-
|
|
3274
|
+
Supported Platforms:
|
|
3275
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
3212
3276
|
|
|
3213
3277
|
Examples:
|
|
3214
3278
|
>>> import numpy as np
|
|
@@ -3255,40 +3319,40 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3255
3319
|
Return sum of tensor elements over a given axis.
|
|
3256
3320
|
|
|
3257
3321
|
Note:
|
|
3258
|
-
Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and
|
|
3259
|
-
`
|
|
3322
|
+
Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are not supported.
|
|
3323
|
+
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
3260
3324
|
|
|
3261
3325
|
Args:
|
|
3262
|
-
axis (Union[None, int, tuple(int), list(int)]): Axis or axes along which a sum is performed.
|
|
3326
|
+
axis (Union[None, int, tuple(int), list(int), Tensor]): Axis or axes along which a sum is performed.
|
|
3263
3327
|
Default: ``None`` .
|
|
3264
|
-
If None, sum all the elements of the input tensor.
|
|
3265
|
-
If the axis is negative, it counts from the last to the first axis
|
|
3266
|
-
If the axis is a tuple or list of ints, a sum is performed on all the axes specified in the tuple
|
|
3267
|
-
or list instead of a single axis or all the axes as before.
|
|
3328
|
+
If ``None`` , sum all the elements of the input tensor.
|
|
3329
|
+
If the `axis` is negative, it counts from the last to the first `axis`.
|
|
3330
|
+
If the `axis` is a tuple or list of ints, a sum is performed on all the axes specified in the tuple
|
|
3331
|
+
or list instead of a single `axis` or all the axes as before.
|
|
3268
3332
|
dtype (:class:`mindspore.dtype`, optional): defaults to ``None`` . Overrides the dtype of the
|
|
3269
3333
|
output Tensor.
|
|
3270
3334
|
keepdims (bool): If this is set to ``True`` , the axes which are reduced are left in the result as
|
|
3271
3335
|
dimensions with size one. With this option, the result will broadcast correctly against the input
|
|
3272
|
-
array. If the default value is passed, then keepdims will not be passed through to the sum method
|
|
3336
|
+
array. If the default value is passed, then `keepdims` will not be passed through to the sum method
|
|
3273
3337
|
of sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
|
|
3274
|
-
implement keepdims any exceptions will be raised. Default: ``False`` .
|
|
3338
|
+
implement `keepdims` any exceptions will be raised. Default: ``False`` .
|
|
3275
3339
|
initial (scalar): Starting value for the sum. Default: ``None`` .
|
|
3276
3340
|
|
|
3277
3341
|
Returns:
|
|
3278
|
-
Tensor. A tensor with the same shape as input, with the specified axis removed.
|
|
3279
|
-
If the input tensor is a 0-d array, or if the axis is ``None`` , a scalar is returned.
|
|
3342
|
+
Tensor. A tensor with the same shape as input, with the specified `axis` removed.
|
|
3343
|
+
If the input tensor is a 0-d array, or if the `axis` is ``None`` , a scalar is returned.
|
|
3280
3344
|
|
|
3281
3345
|
Raises:
|
|
3282
|
-
TypeError: If input is not array_like, or `axis` is not int, tuple of ints
|
|
3346
|
+
TypeError: If input is not array_like, or `axis` is not int, tuple of ints, list of ints or Tensor,
|
|
3283
3347
|
or `keepdims` is not integer, or `initial` is not scalar.
|
|
3284
|
-
ValueError: If any axis is out of range or duplicate axes exist.
|
|
3348
|
+
ValueError: If any `axis` is out of range or duplicate axes exist.
|
|
3349
|
+
|
|
3350
|
+
See also:
|
|
3351
|
+
- :func:`mindspore.Tensor.cumsum`: Return the cumulative sum of the elements along a given `axis`.
|
|
3285
3352
|
|
|
3286
3353
|
Supported Platforms:
|
|
3287
3354
|
``Ascend`` ``GPU`` ``CPU``
|
|
3288
3355
|
|
|
3289
|
-
See also:
|
|
3290
|
-
:func:`mindspore.Tensor.cumsum`: Return the cumulative sum of the elements along a given axis.
|
|
3291
|
-
|
|
3292
3356
|
Examples:
|
|
3293
3357
|
>>> import numpy as np
|
|
3294
3358
|
>>> from mindspore import Tensor
|
|
@@ -3299,13 +3363,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3299
3363
|
>>> print(input_x.sum(axis=1))
|
|
3300
3364
|
[10. 35.]
|
|
3301
3365
|
"""
|
|
3302
|
-
if initial is
|
|
3303
|
-
|
|
3304
|
-
|
|
3305
|
-
|
|
3306
|
-
|
|
3307
|
-
|
|
3308
|
-
res = res.astype(dtype)
|
|
3366
|
+
if initial is None:
|
|
3367
|
+
res = tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype)
|
|
3368
|
+
else:
|
|
3369
|
+
res = tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype) + initial
|
|
3370
|
+
if dtype is not None and (dtype == mstype.bool_):
|
|
3371
|
+
res = res.astype(mstype.bool_)
|
|
3309
3372
|
return res
|
|
3310
3373
|
|
|
3311
3374
|
def sum_to_size(self, *size):
|
|
@@ -3333,7 +3396,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3333
3396
|
>>> print(output.shape)
|
|
3334
3397
|
(1, 3, 1, 3)
|
|
3335
3398
|
"""
|
|
3336
|
-
self._init_check()
|
|
3337
3399
|
x = self
|
|
3338
3400
|
if len(size) == 1 and isinstance(size[0], tuple):
|
|
3339
3401
|
size = size[0]
|
|
@@ -3357,21 +3419,18 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3357
3419
|
"""
|
|
3358
3420
|
For details, please refer to :func:`mindspore.ops.nansum`.
|
|
3359
3421
|
"""
|
|
3360
|
-
self._init_check()
|
|
3361
3422
|
return tensor_operator_registry.get('nansum')(self, axis=axis, keepdims=keepdims, dtype=dtype)
|
|
3362
3423
|
|
|
3363
3424
|
def nanmean(self, axis=None, keepdims=False, *, dtype=None):
|
|
3364
3425
|
r"""
|
|
3365
3426
|
For details, please refer to :func:`mindspore.ops.nanmean`.
|
|
3366
3427
|
"""
|
|
3367
|
-
self._init_check()
|
|
3368
3428
|
return tensor_operator_registry.get('nanmean')(self, axis, keepdims, dtype=dtype)
|
|
3369
3429
|
|
|
3370
3430
|
def nanmedian(self, axis=-1, keepdims=False):
|
|
3371
3431
|
r"""
|
|
3372
3432
|
For details, please refer to :func:`mindspore.ops.nanmedian`.
|
|
3373
3433
|
"""
|
|
3374
|
-
self._init_check()
|
|
3375
3434
|
return tensor_operator_registry.get('nanmedian')(self, axis, keepdims)
|
|
3376
3435
|
|
|
3377
3436
|
def repeat(self, repeats, axis=None):
|
|
@@ -3391,13 +3450,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3391
3450
|
ValueError: If the axis is out of range.
|
|
3392
3451
|
TypeError: If arguments have types not specified above.
|
|
3393
3452
|
|
|
3394
|
-
Supported Platforms:
|
|
3395
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3396
|
-
|
|
3397
3453
|
See also:
|
|
3398
|
-
:func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
3454
|
+
- :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
3455
|
+
- :func:`mindspore.Tensor.resize`: Changes shape and size of tensor in-place.
|
|
3399
3456
|
|
|
3400
|
-
|
|
3457
|
+
Supported Platforms:
|
|
3458
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
3401
3459
|
|
|
3402
3460
|
Examples:
|
|
3403
3461
|
>>> import numpy as np
|
|
@@ -3446,27 +3504,24 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3446
3504
|
for sub, rep in zip(subs, repeats):
|
|
3447
3505
|
if rep != 0:
|
|
3448
3506
|
repeated_subs.append(tensor_operator_registry.get('repeat_elements')(sub, rep, axis))
|
|
3449
|
-
return tensor_operator_registry.get('concatenate')(axis)
|
|
3507
|
+
return tensor_operator_registry.get('concatenate')(repeated_subs, axis)
|
|
3450
3508
|
|
|
3451
3509
|
def repeat_interleave(self, repeats, dim=None):
|
|
3452
3510
|
"""
|
|
3453
3511
|
For details, please refer to :func:`mindspore.ops.repeat_interleave`.
|
|
3454
3512
|
"""
|
|
3455
|
-
self._init_check()
|
|
3456
3513
|
return tensor_operator_registry.get('repeat_interleave')(self, repeats, dim)
|
|
3457
3514
|
|
|
3458
3515
|
def bernoulli(self, p=0.5, seed=None):
|
|
3459
3516
|
r"""
|
|
3460
3517
|
For details, please refer to :func:`mindspore.ops.bernoulli`.
|
|
3461
3518
|
"""
|
|
3462
|
-
self._init_check()
|
|
3463
3519
|
return tensor_operator_registry.get('bernoulli')(self, p, seed)
|
|
3464
3520
|
|
|
3465
3521
|
def random_categorical(self, num_sample, seed=0, dtype=mstype.int64):
|
|
3466
3522
|
r"""
|
|
3467
3523
|
For details, please refer to :func:`mindspore.ops.random_categorical`.
|
|
3468
3524
|
"""
|
|
3469
|
-
self._init_check()
|
|
3470
3525
|
validator.check_is_int(num_sample, 'num_sample')
|
|
3471
3526
|
validator.check_is_int(seed, 'seed')
|
|
3472
3527
|
return tensor_operator_registry.get('random_categorical')(self, num_sample, seed, dtype)
|
|
@@ -3475,23 +3530,20 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3475
3530
|
"""
|
|
3476
3531
|
For details, please refer to :func:`mindspore.ops.masked_select`.
|
|
3477
3532
|
"""
|
|
3478
|
-
self._init_check()
|
|
3479
3533
|
return tensor_operator_registry.get('masked_select')(self, mask)
|
|
3480
3534
|
|
|
3481
3535
|
def gather_elements(self, dim, index):
|
|
3482
3536
|
"""
|
|
3483
3537
|
For details, please refer to :func:`mindspore.ops.gather_elements`.
|
|
3484
3538
|
"""
|
|
3485
|
-
self._init_check()
|
|
3486
3539
|
validator.check_value_type('index', index, (Tensor, Tensor_,), 'Tensor.gather_elements')
|
|
3487
3540
|
return tensor_operator_registry.get('gather_elements')(self, dim, index)
|
|
3488
3541
|
|
|
3489
|
-
def nonzero(self):
|
|
3542
|
+
def nonzero(self, as_tuple=False):
|
|
3490
3543
|
"""
|
|
3491
3544
|
For details, please refer to :func:`mindspore.ops.nonzero`.
|
|
3492
3545
|
"""
|
|
3493
|
-
|
|
3494
|
-
return tensor_operator_registry.get('nonzero')(self)
|
|
3546
|
+
return tensor_operator_registry.get('nonzero')(self, as_tuple)
|
|
3495
3547
|
|
|
3496
3548
|
def svd(self, full_matrices=False, compute_uv=True):
|
|
3497
3549
|
"""
|
|
@@ -3508,42 +3560,36 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3508
3560
|
r"""
|
|
3509
3561
|
For details, please refer to :func:`mindspore.ops.hardshrink`.
|
|
3510
3562
|
"""
|
|
3511
|
-
|
|
3512
|
-
return tensor_operator_registry.get('hardshrink')(lambd)(self)
|
|
3563
|
+
return tensor_operator_registry.get('hardshrink')(self, lambd)
|
|
3513
3564
|
|
|
3514
3565
|
def heaviside(self, values):
|
|
3515
3566
|
r"""
|
|
3516
3567
|
For details, please refer to :func:`mindspore.ops.heaviside`.
|
|
3517
3568
|
"""
|
|
3518
|
-
self._init_check()
|
|
3519
3569
|
return tensor_operator_registry.get('heaviside')(self, values)
|
|
3520
3570
|
|
|
3521
3571
|
def hypot(self, other):
|
|
3522
3572
|
r"""
|
|
3523
3573
|
For details, please refer to :func:`mindspore.ops.hypot`.
|
|
3524
3574
|
"""
|
|
3525
|
-
self._init_check()
|
|
3526
3575
|
return tensor_operator_registry.get('hypot')(self, other)
|
|
3527
3576
|
|
|
3528
3577
|
def soft_shrink(self, lambd=0.5):
|
|
3529
3578
|
r"""
|
|
3530
3579
|
For details, please refer to :func:`mindspore.ops.soft_shrink`.
|
|
3531
3580
|
"""
|
|
3532
|
-
self._init_check()
|
|
3533
3581
|
return tensor_operator_registry.get('soft_shrink')(self, lambd)
|
|
3534
3582
|
|
|
3535
3583
|
def matrix_determinant(self):
|
|
3536
3584
|
r"""
|
|
3537
3585
|
For details, please refer to :func:`mindspore.ops.matrix_determinant`.
|
|
3538
3586
|
"""
|
|
3539
|
-
self._init_check()
|
|
3540
3587
|
return tensor_operator_registry.get('matrix_determinant')(self)
|
|
3541
3588
|
|
|
3542
3589
|
def log_matrix_determinant(self):
|
|
3543
3590
|
r"""
|
|
3544
3591
|
For details, please refer to :func:`mindspore.ops.log_matrix_determinant`.
|
|
3545
3592
|
"""
|
|
3546
|
-
self._init_check()
|
|
3547
3593
|
return tensor_operator_registry.get('log_matrix_determinant')(self)
|
|
3548
3594
|
|
|
3549
3595
|
def to_coo(self):
|
|
@@ -3577,7 +3623,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3577
3623
|
[1 0]] [ 1. -5.] (2, 2)
|
|
3578
3624
|
|
|
3579
3625
|
"""
|
|
3580
|
-
self._init_check()
|
|
3581
3626
|
return tensor_operator_registry.get('dense_to_sparse_coo')(self)
|
|
3582
3627
|
|
|
3583
3628
|
def to_csr(self):
|
|
@@ -3610,7 +3655,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3610
3655
|
>>> print(output.indptr, output.indices, output.values, output.shape)
|
|
3611
3656
|
[0 1 2] [0 0] [ 1. -5.] (2, 2)
|
|
3612
3657
|
"""
|
|
3613
|
-
self._init_check()
|
|
3614
3658
|
return tensor_operator_registry.get('dense_to_sparse_csr')(self)
|
|
3615
3659
|
|
|
3616
3660
|
def tolist(self):
|
|
@@ -3633,42 +3677,36 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3633
3677
|
>>> print(out2)
|
|
3634
3678
|
1
|
|
3635
3679
|
"""
|
|
3636
|
-
self._init_check()
|
|
3637
3680
|
return self.asnumpy().tolist()
|
|
3638
3681
|
|
|
3639
3682
|
def unbind(self, dim=0):
|
|
3640
3683
|
r"""
|
|
3641
3684
|
For details, please refer to :func:`mindspore.ops.unbind`.
|
|
3642
3685
|
"""
|
|
3643
|
-
|
|
3644
|
-
return tensor_operator_registry.get('unbind')(dim)(self)
|
|
3686
|
+
return tensor_operator_registry.get('unbind')(self, dim)
|
|
3645
3687
|
|
|
3646
3688
|
def unsorted_segment_min(self, segment_ids, num_segments):
|
|
3647
3689
|
r"""
|
|
3648
3690
|
For details, please refer to :func:`mindspore.ops.unsorted_segment_min`.
|
|
3649
3691
|
"""
|
|
3650
|
-
self._init_check()
|
|
3651
3692
|
return tensor_operator_registry.get('unsorted_segment_min')(self, segment_ids, num_segments)
|
|
3652
3693
|
|
|
3653
3694
|
def unsorted_segment_max(self, segment_ids, num_segments):
|
|
3654
3695
|
r"""
|
|
3655
3696
|
For details, please refer to :func:`mindspore.ops.unsorted_segment_max`.
|
|
3656
3697
|
"""
|
|
3657
|
-
self._init_check()
|
|
3658
3698
|
return tensor_operator_registry.get('unsorted_segment_max')(self, segment_ids, num_segments)
|
|
3659
3699
|
|
|
3660
3700
|
def unsorted_segment_prod(self, segment_ids, num_segments):
|
|
3661
3701
|
r"""
|
|
3662
3702
|
For details, please refer to :func:`mindspore.ops.unsorted_segment_prod`.
|
|
3663
3703
|
"""
|
|
3664
|
-
self._init_check()
|
|
3665
3704
|
return tensor_operator_registry.get('unsorted_segment_prod')(self, segment_ids, num_segments)
|
|
3666
3705
|
|
|
3667
3706
|
def unique_consecutive(self, return_idx=False, return_counts=False, axis=None):
|
|
3668
3707
|
"""
|
|
3669
3708
|
For details, please refer to :func:`mindspore.ops.unique_consecutive`.
|
|
3670
3709
|
"""
|
|
3671
|
-
self._init_check()
|
|
3672
3710
|
output, idx, counts = tensor_operator_registry.get("unique_consecutive")(return_idx, return_counts, axis)(self)
|
|
3673
3711
|
if return_idx and return_counts:
|
|
3674
3712
|
return output, idx, counts
|
|
@@ -3682,29 +3720,25 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3682
3720
|
"""
|
|
3683
3721
|
For details, please refer to :func:`mindspore.ops.unique_with_pad`.
|
|
3684
3722
|
"""
|
|
3685
|
-
|
|
3686
|
-
return tensor_operator_registry.get("unique_with_pad")()(self, pad_num)
|
|
3723
|
+
return tensor_operator_registry.get("unique_with_pad")(self, pad_num)
|
|
3687
3724
|
|
|
3688
3725
|
def diag(self):
|
|
3689
3726
|
r"""
|
|
3690
3727
|
For details, please refer to :func:`mindspore.ops.diag`.
|
|
3691
3728
|
"""
|
|
3692
|
-
|
|
3693
|
-
return tensor_operator_registry.get('diag')()(self)
|
|
3729
|
+
return tensor_operator_registry.get('diag')(self)
|
|
3694
3730
|
|
|
3695
3731
|
def diagflat(self, offset=0):
|
|
3696
3732
|
r"""
|
|
3697
3733
|
For details, please refer to :func:`mindspore.ops.diagflat`.
|
|
3698
3734
|
"""
|
|
3699
|
-
self._init_check()
|
|
3700
3735
|
return tensor_operator_registry.get('diagflat')(self, offset)
|
|
3701
3736
|
|
|
3702
3737
|
def xdivy(self, y):
|
|
3703
3738
|
r"""
|
|
3704
3739
|
For details, please refer to :func:`mindspore.ops.xdivy`.
|
|
3705
3740
|
"""
|
|
3706
|
-
|
|
3707
|
-
return tensor_operator_registry.get("xdivy")()(self, y)
|
|
3741
|
+
return tensor_operator_registry.get("xdivy")(self, y)
|
|
3708
3742
|
|
|
3709
3743
|
def split(self, split_size_or_sections, axis=0):
|
|
3710
3744
|
"""
|
|
@@ -3716,7 +3750,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3716
3750
|
"""
|
|
3717
3751
|
For details, please refer to :func:`mindspore.ops.tensor_split`.
|
|
3718
3752
|
"""
|
|
3719
|
-
self._init_check()
|
|
3720
3753
|
return tensor_operator_registry.get('tensor_split')(self, indices_or_sections, axis)
|
|
3721
3754
|
|
|
3722
3755
|
def vsplit(self, indices_or_sections):
|
|
@@ -3724,28 +3757,25 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3724
3757
|
For details, please refer to :func:`mindspore.ops.vsplit`.
|
|
3725
3758
|
"""
|
|
3726
3759
|
|
|
3727
|
-
self._init_check()
|
|
3728
3760
|
return tensor_operator_registry.get('vsplit')(self, indices_or_sections)
|
|
3729
3761
|
|
|
3730
3762
|
def hsplit(self, indices_or_sections):
|
|
3731
3763
|
"""
|
|
3732
3764
|
For details, please refer to :func:`mindspore.ops.hsplit`.
|
|
3733
3765
|
"""
|
|
3734
|
-
self._init_check()
|
|
3735
3766
|
return tensor_operator_registry.get('hsplit')(self, indices_or_sections)
|
|
3736
3767
|
|
|
3737
3768
|
def dsplit(self, indices_or_sections):
|
|
3738
3769
|
"""
|
|
3739
3770
|
For details, please refer to :func:`mindspore.ops.dsplit`.
|
|
3740
3771
|
"""
|
|
3741
|
-
self._init_check()
|
|
3742
3772
|
return tensor_operator_registry.get('dsplit')(self, indices_or_sections)
|
|
3743
3773
|
|
|
3744
3774
|
def xlogy(self, y):
|
|
3745
3775
|
r"""
|
|
3746
3776
|
For details, please refer to :func:`mindspore.ops.xlogy`.
|
|
3747
3777
|
"""
|
|
3748
|
-
return tensor_operator_registry.get("xlogy")(
|
|
3778
|
+
return tensor_operator_registry.get("xlogy")(self, y)
|
|
3749
3779
|
|
|
3750
3780
|
def eigvals(self):
|
|
3751
3781
|
r"""
|
|
@@ -3760,13 +3790,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3760
3790
|
r"""
|
|
3761
3791
|
For details, please refer to :func:`mindspore.ops.erf`.
|
|
3762
3792
|
"""
|
|
3763
|
-
return tensor_operator_registry.get("erf")(
|
|
3793
|
+
return tensor_operator_registry.get("erf")(self)
|
|
3764
3794
|
|
|
3765
3795
|
def erfc(self):
|
|
3766
3796
|
r"""
|
|
3767
3797
|
For details, please refer to :func:`mindspore.ops.erfc`.
|
|
3768
3798
|
"""
|
|
3769
|
-
return tensor_operator_registry.get("erfc")(
|
|
3799
|
+
return tensor_operator_registry.get("erfc")(self)
|
|
3770
3800
|
|
|
3771
3801
|
def tile(self, reps):
|
|
3772
3802
|
r"""
|
|
@@ -3778,29 +3808,26 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3778
3808
|
r"""
|
|
3779
3809
|
For details, please refer to :func:`mindspore.ops.topk`.
|
|
3780
3810
|
"""
|
|
3781
|
-
self._init_check()
|
|
3782
3811
|
return tensor_operator_registry.get("topk")(self, k, dim, largest, sorted)
|
|
3783
3812
|
|
|
3784
3813
|
def top_k(self, k, sorted=True):
|
|
3785
3814
|
r"""
|
|
3786
3815
|
`Tensor.top_k` is deprecated, please use `Tensor.topk` instead.
|
|
3787
3816
|
"""
|
|
3788
|
-
self._init_check()
|
|
3789
3817
|
validator.check_is_int(k, 'k')
|
|
3790
3818
|
validator.check_bool(sorted, 'sorted')
|
|
3791
|
-
return tensor_operator_registry.get("top_k")(
|
|
3819
|
+
return tensor_operator_registry.get("top_k")(self, k, sorted)
|
|
3792
3820
|
|
|
3793
3821
|
def sigmoid(self):
|
|
3794
3822
|
r"""
|
|
3795
3823
|
For details, please refer to :func:`mindspore.ops.sigmoid`.
|
|
3796
3824
|
"""
|
|
3797
|
-
return tensor_operator_registry.get("sigmoid")(
|
|
3825
|
+
return tensor_operator_registry.get("sigmoid")(self)
|
|
3798
3826
|
|
|
3799
3827
|
def median(self, axis=-1, keepdims=False):
|
|
3800
3828
|
r"""
|
|
3801
3829
|
For details, please refer to :func:`mindspore.ops.median`.
|
|
3802
3830
|
"""
|
|
3803
|
-
self._init_check()
|
|
3804
3831
|
validator.check_axis_in_range(axis, self.ndim)
|
|
3805
3832
|
return tensor_operator_registry.get('median')(False, axis, keepdims)(self)
|
|
3806
3833
|
|
|
@@ -3808,49 +3835,42 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3808
3835
|
r"""
|
|
3809
3836
|
For details, please refer to :func:`mindspore.ops.addmv`.
|
|
3810
3837
|
"""
|
|
3811
|
-
self._init_check()
|
|
3812
3838
|
return tensor_operator_registry.get('addmv')(self, mat, vec, beta=beta, alpha=alpha)
|
|
3813
3839
|
|
|
3814
3840
|
def asinh(self):
|
|
3815
3841
|
r"""
|
|
3816
3842
|
For details, please refer to :func:`mindspore.ops.asinh`.
|
|
3817
3843
|
"""
|
|
3818
|
-
self._init_check()
|
|
3819
3844
|
return tensor_operator_registry.get('asinh')(self)
|
|
3820
3845
|
|
|
3821
3846
|
def arcsinh(self):
|
|
3822
3847
|
r"""
|
|
3823
3848
|
Alias for :func:`mindspore.Tensor.asinh`.
|
|
3824
3849
|
"""
|
|
3825
|
-
self._init_check()
|
|
3826
3850
|
return tensor_operator_registry.get('arcsinh')(self)
|
|
3827
3851
|
|
|
3828
3852
|
def atan(self):
|
|
3829
3853
|
r"""
|
|
3830
3854
|
For details, please refer to :func:`mindspore.ops.atan`.
|
|
3831
3855
|
"""
|
|
3832
|
-
self._init_check()
|
|
3833
3856
|
return tensor_operator_registry.get('atan')(self)
|
|
3834
3857
|
|
|
3835
3858
|
def atanh(self):
|
|
3836
3859
|
r"""
|
|
3837
3860
|
For details, please refer to :func:`mindspore.ops.atanh`.
|
|
3838
3861
|
"""
|
|
3839
|
-
self._init_check()
|
|
3840
3862
|
return tensor_operator_registry.get('atanh')(self)
|
|
3841
3863
|
|
|
3842
3864
|
def arctanh(self):
|
|
3843
3865
|
r"""
|
|
3844
3866
|
Alias for :func:`mindspore.Tensor.atanh`.
|
|
3845
3867
|
"""
|
|
3846
|
-
self._init_check()
|
|
3847
3868
|
return tensor_operator_registry.get('arctanh')(self)
|
|
3848
3869
|
|
|
3849
3870
|
def bmm(self, mat2):
|
|
3850
3871
|
r"""
|
|
3851
3872
|
For details, please refer to :func:`mindspore.ops.bmm`.
|
|
3852
3873
|
"""
|
|
3853
|
-
self._init_check()
|
|
3854
3874
|
return tensor_operator_registry.get('bmm')(self, mat2)
|
|
3855
3875
|
|
|
3856
3876
|
def to(self, dtype):
|
|
@@ -3880,8 +3900,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3880
3900
|
>>> print(output.dtype)
|
|
3881
3901
|
Int32
|
|
3882
3902
|
"""
|
|
3883
|
-
|
|
3884
|
-
return tensor_operator_registry.get('to')()(self, dtype)
|
|
3903
|
+
return tensor_operator_registry.get('to')(self, dtype)
|
|
3885
3904
|
|
|
3886
3905
|
def type(self, dtype=None):
|
|
3887
3906
|
r"""
|
|
@@ -3907,7 +3926,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3907
3926
|
[[1 2]
|
|
3908
3927
|
[3 4]]
|
|
3909
3928
|
"""
|
|
3910
|
-
self._init_check()
|
|
3911
3929
|
if dtype is None:
|
|
3912
3930
|
return str(self.dtype)
|
|
3913
3931
|
return self.astype(dtype)
|
|
@@ -3934,7 +3952,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3934
3952
|
>>> print(x.dtype)
|
|
3935
3953
|
Int32
|
|
3936
3954
|
"""
|
|
3937
|
-
self._init_check()
|
|
3938
3955
|
return self.astype(other.dtype)
|
|
3939
3956
|
|
|
3940
3957
|
def bool(self):
|
|
@@ -3957,8 +3974,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3957
3974
|
>>> print(output.dtype)
|
|
3958
3975
|
Bool
|
|
3959
3976
|
"""
|
|
3960
|
-
|
|
3961
|
-
return tensor_operator_registry.get('bool')()(self, mstype.bool_)
|
|
3977
|
+
return tensor_operator_registry.get('bool')(self, mstype.bool_)
|
|
3962
3978
|
|
|
3963
3979
|
def float(self):
|
|
3964
3980
|
r"""
|
|
@@ -3979,8 +3995,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3979
3995
|
>>> print(output.dtype)
|
|
3980
3996
|
Float32
|
|
3981
3997
|
"""
|
|
3982
|
-
|
|
3983
|
-
return tensor_operator_registry.get('float')()(self, mstype.float32)
|
|
3998
|
+
return tensor_operator_registry.get('float')(self, mstype.float32)
|
|
3984
3999
|
|
|
3985
4000
|
def half(self):
|
|
3986
4001
|
r"""
|
|
@@ -4001,8 +4016,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4001
4016
|
>>> print(output.dtype)
|
|
4002
4017
|
Float16
|
|
4003
4018
|
"""
|
|
4004
|
-
|
|
4005
|
-
return tensor_operator_registry.get('half')()(self, mstype.float16)
|
|
4019
|
+
return tensor_operator_registry.get('half')(self, mstype.float16)
|
|
4006
4020
|
|
|
4007
4021
|
def int(self):
|
|
4008
4022
|
r"""
|
|
@@ -4023,8 +4037,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4023
4037
|
>>> print(output.dtype)
|
|
4024
4038
|
Int32
|
|
4025
4039
|
"""
|
|
4026
|
-
|
|
4027
|
-
return tensor_operator_registry.get('int')()(self, mstype.int32)
|
|
4040
|
+
return tensor_operator_registry.get('int')(self, mstype.int32)
|
|
4028
4041
|
|
|
4029
4042
|
def long(self):
|
|
4030
4043
|
r"""
|
|
@@ -4045,8 +4058,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4045
4058
|
>>> print(output.dtype)
|
|
4046
4059
|
Int64
|
|
4047
4060
|
"""
|
|
4048
|
-
|
|
4049
|
-
return tensor_operator_registry.get('long')()(self, mstype.int64)
|
|
4061
|
+
return tensor_operator_registry.get('long')(self, mstype.int64)
|
|
4050
4062
|
|
|
4051
4063
|
def short(self):
|
|
4052
4064
|
r"""
|
|
@@ -4068,22 +4080,19 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4068
4080
|
>>> output
|
|
4069
4081
|
Tensor(shape=[5], dtype=Int16, value= [1, 2, 3, 4, 5])
|
|
4070
4082
|
"""
|
|
4071
|
-
self._init_check()
|
|
4072
4083
|
return tensor_operator_registry.get('cast')(self, mstype.int16)
|
|
4073
4084
|
|
|
4074
4085
|
def cholesky(self, upper=False):
|
|
4075
4086
|
r"""
|
|
4076
4087
|
For details, please refer to :func:`mindspore.ops.cholesky`.
|
|
4077
4088
|
"""
|
|
4078
|
-
|
|
4079
|
-
return tensor_operator_registry.get('cholesky')(upper=upper)(self)
|
|
4089
|
+
return tensor_operator_registry.get('cholesky')(self, upper=upper)
|
|
4080
4090
|
|
|
4081
4091
|
def cholesky_inverse(self, upper=False):
|
|
4082
4092
|
r"""
|
|
4083
4093
|
For details, please refer to :func:`mindspore.ops.cholesky_inverse`.
|
|
4084
4094
|
"""
|
|
4085
|
-
|
|
4086
|
-
return tensor_operator_registry.get('cholesky_inverse')(upper=upper)(self)
|
|
4095
|
+
return tensor_operator_registry.get('cholesky_inverse')(self, upper=upper)
|
|
4087
4096
|
|
|
4088
4097
|
def cholesky_solve(self, input2, upper=False):
|
|
4089
4098
|
r"""
|
|
@@ -4092,63 +4101,54 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4092
4101
|
.. warning::
|
|
4093
4102
|
This is an experimental API that is subject to change or deletion.
|
|
4094
4103
|
"""
|
|
4095
|
-
self._init_check()
|
|
4096
4104
|
return tensor_operator_registry.get('cholesky_solve')(self, input2, upper)
|
|
4097
4105
|
|
|
4098
4106
|
def conj(self):
|
|
4099
4107
|
r"""
|
|
4100
4108
|
For details, please refer to :func:`mindspore.ops.conj`.
|
|
4101
4109
|
"""
|
|
4102
|
-
self._init_check()
|
|
4103
4110
|
return tensor_operator_registry.get('conj')(self)
|
|
4104
4111
|
|
|
4105
4112
|
def count_nonzero(self, axis=(), keep_dims=False, dtype=mstype.int32):
|
|
4106
4113
|
r"""
|
|
4107
4114
|
For details, please refer to :func:`mindspore.ops.count_nonzero`.
|
|
4108
4115
|
"""
|
|
4109
|
-
self._init_check()
|
|
4110
4116
|
return tensor_operator_registry.get('count_nonzero')(self, axis, keep_dims, dtype)
|
|
4111
4117
|
|
|
4112
4118
|
def cross(self, other, dim=None):
|
|
4113
4119
|
r"""
|
|
4114
4120
|
For details, please refer to :func:`mindspore.ops.cross`.
|
|
4115
4121
|
"""
|
|
4116
|
-
self._init_check()
|
|
4117
4122
|
return tensor_operator_registry.get('cross')(self, other, dim)
|
|
4118
4123
|
|
|
4119
4124
|
def erfinv(self):
|
|
4120
4125
|
r"""
|
|
4121
4126
|
For details, please refer to :func:`mindspore.ops.erfinv`.
|
|
4122
4127
|
"""
|
|
4123
|
-
self._init_check()
|
|
4124
4128
|
return tensor_operator_registry.get('erfinv')(self)
|
|
4125
4129
|
|
|
4126
4130
|
def less_equal(self, other):
|
|
4127
4131
|
r"""
|
|
4128
4132
|
For details, please refer to :func:`mindspore.ops.less_equal`.
|
|
4129
4133
|
"""
|
|
4130
|
-
self._init_check()
|
|
4131
4134
|
return tensor_operator_registry.get('less_equal')(self, other)
|
|
4132
4135
|
|
|
4133
4136
|
def lcm(self, other):
|
|
4134
4137
|
r"""
|
|
4135
4138
|
For details, please refer to :func:`mindspore.ops.lcm`.
|
|
4136
4139
|
"""
|
|
4137
|
-
self._init_check()
|
|
4138
4140
|
return tensor_operator_registry.get('lcm')(self, other)
|
|
4139
4141
|
|
|
4140
4142
|
def ldexp(self, other):
|
|
4141
4143
|
r"""
|
|
4142
4144
|
For details, please refer to :func:`mindspore.ops.ldexp`.
|
|
4143
4145
|
"""
|
|
4144
|
-
self._init_check()
|
|
4145
4146
|
return tensor_operator_registry.get('ldexp')(self, other)
|
|
4146
4147
|
|
|
4147
4148
|
def fold(self, output_size, kernel_size, dilation=1, padding=0, stride=1):
|
|
4148
4149
|
r"""
|
|
4149
4150
|
For details, please refer to :func:`mindspore.ops.fold`.
|
|
4150
4151
|
"""
|
|
4151
|
-
self._init_check()
|
|
4152
4152
|
return tensor_operator_registry.get('fold')(self, output_size, kernel_size, dilation, padding, stride)
|
|
4153
4153
|
|
|
4154
4154
|
def unfold(self, kernel_size, dilation=1, padding=0, stride=1):
|
|
@@ -4159,70 +4159,62 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4159
4159
|
This is an experimental API that is subject to change or deletion.
|
|
4160
4160
|
|
|
4161
4161
|
"""
|
|
4162
|
-
self._init_check()
|
|
4163
4162
|
return tensor_operator_registry.get('unfold')(self, kernel_size, dilation, padding, stride)
|
|
4164
4163
|
|
|
4165
4164
|
def expand(self, size):
|
|
4166
4165
|
r"""
|
|
4167
4166
|
For details, please refer to :func:`mindspore.ops.broadcast_to`.
|
|
4168
4167
|
"""
|
|
4169
|
-
|
|
4168
|
+
if isinstance(size, Tensor):
|
|
4169
|
+
size = tensor_operator_registry.get('tensortotuple')()(size)
|
|
4170
4170
|
return tensor_operator_registry.get('expand')(self, size)
|
|
4171
4171
|
|
|
4172
4172
|
def cumprod(self, dim, dtype=None):
|
|
4173
4173
|
r"""
|
|
4174
4174
|
For details, please refer to :func:`mindspore.ops.cumprod`.
|
|
4175
4175
|
"""
|
|
4176
|
-
self._init_check()
|
|
4177
4176
|
return tensor_operator_registry.get('cumprod')(self, dim, dtype)
|
|
4178
4177
|
|
|
4179
4178
|
def multiply(self, value):
|
|
4180
4179
|
r"""
|
|
4181
4180
|
For details, please refer to :func:`mindspore.ops.multiply`.
|
|
4182
4181
|
"""
|
|
4183
|
-
self._init_check()
|
|
4184
4182
|
return tensor_operator_registry.get('multiply')(self, value)
|
|
4185
4183
|
|
|
4186
4184
|
def div(self, value, *, rounding_mode=None):
|
|
4187
4185
|
r"""
|
|
4188
4186
|
For details, please refer to :func:`mindspore.ops.div`.
|
|
4189
4187
|
"""
|
|
4190
|
-
self._init_check()
|
|
4191
4188
|
return tensor_operator_registry.get('div')(self, value, rounding_mode=rounding_mode)
|
|
4192
4189
|
|
|
4193
4190
|
def divide(self, value, *, rounding_mode=None):
|
|
4194
4191
|
r"""
|
|
4195
4192
|
Alias for :func:`mindspore.Tensor.div`.
|
|
4196
4193
|
"""
|
|
4197
|
-
self._init_check()
|
|
4198
4194
|
return tensor_operator_registry.get('div')(self, value, rounding_mode=rounding_mode)
|
|
4199
4195
|
|
|
4200
4196
|
def eq(self, other):
|
|
4201
4197
|
r"""
|
|
4202
4198
|
For details, please refer to :func:`mindspore.ops.eq`.
|
|
4203
4199
|
"""
|
|
4204
|
-
self._init_check()
|
|
4205
4200
|
return tensor_operator_registry.get('equal')(self, other)
|
|
4206
4201
|
|
|
4207
4202
|
def equal(self, other):
|
|
4208
4203
|
r"""
|
|
4209
4204
|
For details, please refer to :func:`mindspore.ops.equal`.
|
|
4210
4205
|
"""
|
|
4211
|
-
self._init_check()
|
|
4212
4206
|
return tensor_operator_registry.get('equal')(self, other)
|
|
4213
4207
|
|
|
4214
4208
|
def expm1(self):
|
|
4215
4209
|
r"""
|
|
4216
4210
|
For details, please refer to :func:`mindspore.ops.expm1`.
|
|
4217
4211
|
"""
|
|
4218
|
-
self._init_check()
|
|
4219
4212
|
return tensor_operator_registry.get('expm1')(self)
|
|
4220
4213
|
|
|
4221
4214
|
def index_add(self, dim, index, source, *, alpha=1):
|
|
4222
4215
|
r"""
|
|
4223
4216
|
For details, please refer to :func:`mindspore.ops.index_add`.
|
|
4224
4217
|
"""
|
|
4225
|
-
self._init_check()
|
|
4226
4218
|
check_is_number(alpha, (int, float))
|
|
4227
4219
|
source = tensor_operator_registry.get('__mul__')(source, alpha)
|
|
4228
4220
|
return tensor_operator_registry.get('index_add')(self, indices=index, y=source, axis=dim)
|
|
@@ -4231,42 +4223,36 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4231
4223
|
r"""
|
|
4232
4224
|
For details, please refer to :func:`mindspore.ops.greater`.
|
|
4233
4225
|
"""
|
|
4234
|
-
self._init_check()
|
|
4235
4226
|
return tensor_operator_registry.get('greater')(self, other)
|
|
4236
4227
|
|
|
4237
4228
|
def greater_equal(self, other):
|
|
4238
4229
|
r"""
|
|
4239
4230
|
For details, please refer to :func:`mindspore.ops.greater_equal`.
|
|
4240
4231
|
"""
|
|
4241
|
-
self._init_check()
|
|
4242
4232
|
return tensor_operator_registry.get('greater_equal')(self, other)
|
|
4243
4233
|
|
|
4244
4234
|
def igamma(self, other):
|
|
4245
4235
|
r"""
|
|
4246
4236
|
For details, please refer to :func:`mindspore.ops.igamma`.
|
|
4247
4237
|
"""
|
|
4248
|
-
self._init_check()
|
|
4249
4238
|
return tensor_operator_registry.get('igamma')(self, other)
|
|
4250
4239
|
|
|
4251
4240
|
def igammac(self, other):
|
|
4252
4241
|
r"""
|
|
4253
4242
|
For details, please refer to :func:`mindspore.ops.igammac`.
|
|
4254
4243
|
"""
|
|
4255
|
-
self._init_check()
|
|
4256
4244
|
return tensor_operator_registry.get('igammac')(self, other)
|
|
4257
4245
|
|
|
4258
4246
|
def isinf(self):
|
|
4259
4247
|
r"""
|
|
4260
4248
|
For details, please refer to :func:`mindspore.ops.isinf`.
|
|
4261
4249
|
"""
|
|
4262
|
-
self._init_check()
|
|
4263
4250
|
return tensor_operator_registry.get('isinf')(self)
|
|
4264
4251
|
|
|
4265
4252
|
def isnan(self):
|
|
4266
4253
|
r"""
|
|
4267
4254
|
For details, please refer to :func:`mindspore.ops.isnan`.
|
|
4268
4255
|
"""
|
|
4269
|
-
self._init_check()
|
|
4270
4256
|
return tensor_operator_registry.get('isnan')(self)
|
|
4271
4257
|
|
|
4272
4258
|
def flip(self, dims):
|
|
@@ -4320,14 +4306,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4320
4306
|
r"""
|
|
4321
4307
|
For details, please refer to :func:`mindspore.ops.le`.
|
|
4322
4308
|
"""
|
|
4323
|
-
self._init_check()
|
|
4324
4309
|
return tensor_operator_registry.get('le')(self, other)
|
|
4325
4310
|
|
|
4326
4311
|
def less(self, other):
|
|
4327
4312
|
r"""
|
|
4328
4313
|
For details, please refer to :func:`mindspore.ops.less`.
|
|
4329
4314
|
"""
|
|
4330
|
-
self._init_check()
|
|
4331
4315
|
return tensor_operator_registry.get('less')(self, other)
|
|
4332
4316
|
|
|
4333
4317
|
def lt(self, other):
|
|
@@ -4340,35 +4324,30 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4340
4324
|
r"""
|
|
4341
4325
|
For details, please refer to :func:`mindspore.ops.logical_and`.
|
|
4342
4326
|
"""
|
|
4343
|
-
self._init_check()
|
|
4344
4327
|
return tensor_operator_registry.get('logical_and')(self, other)
|
|
4345
4328
|
|
|
4346
4329
|
def logical_not(self):
|
|
4347
4330
|
r"""
|
|
4348
4331
|
For details, please refer to :func:`mindspore.ops.logical_not`.
|
|
4349
4332
|
"""
|
|
4350
|
-
self._init_check()
|
|
4351
4333
|
return tensor_operator_registry.get('logical_not')(self)
|
|
4352
4334
|
|
|
4353
4335
|
def logical_or(self, other):
|
|
4354
4336
|
r"""
|
|
4355
4337
|
For details, please refer to :func:`mindspore.ops.logical_or`.
|
|
4356
4338
|
"""
|
|
4357
|
-
self._init_check()
|
|
4358
4339
|
return tensor_operator_registry.get('logical_or')(self, other)
|
|
4359
4340
|
|
|
4360
4341
|
def logical_xor(self, other):
|
|
4361
4342
|
r"""
|
|
4362
4343
|
For details, please refer to :func:`mindspore.ops.logical_xor`.
|
|
4363
4344
|
"""
|
|
4364
|
-
self._init_check()
|
|
4365
4345
|
return tensor_operator_registry.get('logical_xor')(self, other)
|
|
4366
4346
|
|
|
4367
4347
|
def lstsq(self, A):
|
|
4368
4348
|
r"""
|
|
4369
4349
|
For details, please refer to :func:`mindspore.ops.lstsq`.
|
|
4370
4350
|
"""
|
|
4371
|
-
self._init_check()
|
|
4372
4351
|
return tensor_operator_registry.get('lstsq')(self, A)
|
|
4373
4352
|
|
|
4374
4353
|
@property
|
|
@@ -4392,28 +4371,24 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4392
4371
|
r"""
|
|
4393
4372
|
For details, please refer to :func:`mindspore.ops.mvlgamma`.
|
|
4394
4373
|
"""
|
|
4395
|
-
self._init_check()
|
|
4396
4374
|
return tensor_operator_registry.get('mvlgamma')(self, p)
|
|
4397
4375
|
|
|
4398
4376
|
def matmul(self, tensor2):
|
|
4399
4377
|
r"""
|
|
4400
4378
|
For details, please refer to :func:`mindspore.ops.matmul`.
|
|
4401
4379
|
"""
|
|
4402
|
-
self._init_check()
|
|
4403
4380
|
return tensor_operator_registry.get('matmul')(self, tensor2)
|
|
4404
4381
|
|
|
4405
4382
|
def inner(self, other):
|
|
4406
4383
|
r"""
|
|
4407
4384
|
For details, please refer to :func:`mindspore.ops.inner`.
|
|
4408
4385
|
"""
|
|
4409
|
-
self._init_check()
|
|
4410
4386
|
return tensor_operator_registry.get('inner')(self, other)
|
|
4411
4387
|
|
|
4412
4388
|
def multinomial(self, num_samples, replacement=True, seed=None):
|
|
4413
4389
|
r"""
|
|
4414
4390
|
For details, please refer to :func:`mindspore.ops.multinomial`.
|
|
4415
4391
|
"""
|
|
4416
|
-
self._init_check()
|
|
4417
4392
|
return tensor_operator_registry.get('multinomial')(self, num_samples, replacement, seed)
|
|
4418
4393
|
|
|
4419
4394
|
def matrix_power(self, n):
|
|
@@ -4424,35 +4399,30 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4424
4399
|
This is an experimental API that is subject to change or deletion.
|
|
4425
4400
|
|
|
4426
4401
|
"""
|
|
4427
|
-
self._init_check()
|
|
4428
4402
|
return tensor_operator_registry.get('matrix_power')(self, n)
|
|
4429
4403
|
|
|
4430
4404
|
def maximum(self, other):
|
|
4431
4405
|
r"""
|
|
4432
4406
|
For details, please refer to :func:`mindspore.ops.maximum`.
|
|
4433
4407
|
"""
|
|
4434
|
-
self._init_check()
|
|
4435
4408
|
return tensor_operator_registry.get('maximum')(self, other)
|
|
4436
4409
|
|
|
4437
4410
|
def mm(self, mat2):
|
|
4438
4411
|
r"""
|
|
4439
4412
|
For details, please refer to :func:`mindspore.ops.mm`.
|
|
4440
4413
|
"""
|
|
4441
|
-
self._init_check()
|
|
4442
4414
|
return tensor_operator_registry.get('mm')(self, mat2)
|
|
4443
4415
|
|
|
4444
4416
|
def msort(self):
|
|
4445
4417
|
r"""
|
|
4446
4418
|
For details, please refer to :func:`mindspore.ops.msort`.
|
|
4447
4419
|
"""
|
|
4448
|
-
self._init_check()
|
|
4449
4420
|
return tensor_operator_registry.get('msort')(self)
|
|
4450
4421
|
|
|
4451
4422
|
def mul(self, value):
|
|
4452
4423
|
r"""
|
|
4453
4424
|
For details, please refer to :func:`mindspore.ops.mul`.
|
|
4454
4425
|
"""
|
|
4455
|
-
self._init_check()
|
|
4456
4426
|
return tensor_operator_registry.get('mul')(self, value)
|
|
4457
4427
|
|
|
4458
4428
|
def nan_to_num(self, nan=0.0, posinf=None, neginf=None):
|
|
@@ -4465,31 +4435,29 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4465
4435
|
r"""
|
|
4466
4436
|
For details, please refer to :func:`mindspore.ops.neg`.
|
|
4467
4437
|
"""
|
|
4468
|
-
self._init_check()
|
|
4469
4438
|
return tensor_operator_registry.get('neg')(self)
|
|
4470
4439
|
|
|
4471
4440
|
def ne(self, other):
|
|
4472
4441
|
r"""
|
|
4473
4442
|
For details, please refer to :func:`mindspore.ops.ne`.
|
|
4474
4443
|
"""
|
|
4475
|
-
self._init_check()
|
|
4476
4444
|
return tensor_operator_registry.get('ne')(self, other)
|
|
4477
4445
|
|
|
4478
4446
|
def not_equal(self, other):
|
|
4479
4447
|
r"""
|
|
4480
4448
|
For details, please refer to :func:`mindspore.ops.not_equal`.
|
|
4481
4449
|
"""
|
|
4482
|
-
self._init_check()
|
|
4483
4450
|
return tensor_operator_registry.get('not_equal')(self, other)
|
|
4484
4451
|
|
|
4485
|
-
def new_zeros(self, size,
|
|
4452
|
+
def new_zeros(self, size, dtype=None):
|
|
4486
4453
|
r"""
|
|
4487
4454
|
Return a tensor of `size` filled with zeros.
|
|
4488
4455
|
|
|
4489
|
-
|
|
4490
|
-
|
|
4456
|
+
.. warning::
|
|
4457
|
+
For argument `size`, Tensor type input will be deprecated in the future version.
|
|
4491
4458
|
|
|
4492
|
-
|
|
4459
|
+
Args:
|
|
4460
|
+
size (Union[int, tuple, list, Tensor]): An int, list or tuple of integers defining the output shape.
|
|
4493
4461
|
dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned tensor has
|
|
4494
4462
|
thesame dtype as `self`. Default: ``None``.
|
|
4495
4463
|
|
|
@@ -4497,7 +4465,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4497
4465
|
Tensor, the shape and dtype is defined above and filled with zeros.
|
|
4498
4466
|
|
|
4499
4467
|
Raises:
|
|
4500
|
-
TypeError: If `size` is
|
|
4468
|
+
TypeError: If `size` is neither an int nor an tuple/list/Tensor of int.
|
|
4501
4469
|
|
|
4502
4470
|
Supported Platforms:
|
|
4503
4471
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4512,21 +4480,17 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4512
4480
|
[[0. 0.]
|
|
4513
4481
|
[0. 0.]]
|
|
4514
4482
|
"""
|
|
4515
|
-
|
|
4516
|
-
if isinstance(size, list):
|
|
4517
|
-
size = tuple(size)
|
|
4518
|
-
self._init_check()
|
|
4519
|
-
_dtype = self.dtype if dtype is None else dtype
|
|
4520
|
-
return tensor_operator_registry.get('zeros')(size, _dtype)
|
|
4483
|
+
return tensor_operator_registry.get('zeros')(size, dtype)
|
|
4521
4484
|
|
|
4522
|
-
def new_ones(self, size,
|
|
4485
|
+
def new_ones(self, size, dtype=None):
|
|
4523
4486
|
r"""
|
|
4524
4487
|
Return a tensor of `size` filled with ones.
|
|
4525
4488
|
|
|
4526
|
-
|
|
4527
|
-
|
|
4489
|
+
.. warning::
|
|
4490
|
+
For argument `size`, Tensor type input will be deprecated in the future version.
|
|
4528
4491
|
|
|
4529
|
-
|
|
4492
|
+
Args:
|
|
4493
|
+
size (Union[int, tuple, list, Tensor]): An int, list or tuple of integers defining the output shape.
|
|
4530
4494
|
dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned
|
|
4531
4495
|
tensor has the same dtype as `self`. Default: ``None``.
|
|
4532
4496
|
|
|
@@ -4534,7 +4498,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4534
4498
|
Tensor, the shape and dtype is defined above and filled with ones.
|
|
4535
4499
|
|
|
4536
4500
|
Raises:
|
|
4537
|
-
TypeError: If `size` is
|
|
4501
|
+
TypeError: If `size` is neither an int nor an tuple/list/Tensor of int.
|
|
4538
4502
|
|
|
4539
4503
|
Supported Platforms:
|
|
4540
4504
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4549,109 +4513,90 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4549
4513
|
[[1. 1.]
|
|
4550
4514
|
[1. 1.]]
|
|
4551
4515
|
"""
|
|
4552
|
-
|
|
4553
|
-
if isinstance(size, list):
|
|
4554
|
-
size = tuple(size)
|
|
4555
|
-
self._init_check()
|
|
4556
|
-
_dtype = self.dtype if dtype is None else dtype
|
|
4557
|
-
return tensor_operator_registry.get('ones')(size, _dtype)
|
|
4516
|
+
return tensor_operator_registry.get('ones')(size, dtype)
|
|
4558
4517
|
|
|
4559
4518
|
def sign(self):
|
|
4560
4519
|
r"""
|
|
4561
4520
|
For details, please refer to :func:`mindspore.ops.sign`.
|
|
4562
4521
|
"""
|
|
4563
|
-
self._init_check()
|
|
4564
4522
|
return tensor_operator_registry.get('sign')(self)
|
|
4565
4523
|
|
|
4566
4524
|
def signbit(self):
|
|
4567
4525
|
"""
|
|
4568
4526
|
For details, please refer to :func:`mindspore.ops.signbit`.
|
|
4569
4527
|
"""
|
|
4570
|
-
self._init_check()
|
|
4571
4528
|
return tensor_operator_registry.get('signbit')(self)
|
|
4572
4529
|
|
|
4573
4530
|
def sgn(self):
|
|
4574
4531
|
"""
|
|
4575
4532
|
For details, please refer to :func:`mindspore.ops.sgn`.
|
|
4576
4533
|
"""
|
|
4577
|
-
self._init_check()
|
|
4578
4534
|
return tensor_operator_registry.get('sgn')(self)
|
|
4579
4535
|
|
|
4580
4536
|
def sin(self):
|
|
4581
4537
|
r"""
|
|
4582
4538
|
For details, please refer to :func:`mindspore.ops.sin`.
|
|
4583
4539
|
"""
|
|
4584
|
-
self._init_check()
|
|
4585
4540
|
return tensor_operator_registry.get('sin')(self)
|
|
4586
4541
|
|
|
4587
4542
|
def sinc(self):
|
|
4588
4543
|
r"""
|
|
4589
4544
|
For details, please refer to :func:`mindspore.ops.sinc`.
|
|
4590
4545
|
"""
|
|
4591
|
-
self._init_check()
|
|
4592
4546
|
return tensor_operator_registry.get('sinc')(self)
|
|
4593
4547
|
|
|
4594
4548
|
def sinh(self):
|
|
4595
4549
|
r"""
|
|
4596
4550
|
For details, please refer to :func:`mindspore.ops.sinh`.
|
|
4597
4551
|
"""
|
|
4598
|
-
self._init_check()
|
|
4599
4552
|
return tensor_operator_registry.get('sinh')(self)
|
|
4600
4553
|
|
|
4601
4554
|
def sort(self, axis=-1, descending=False):
|
|
4602
4555
|
r"""
|
|
4603
4556
|
For details, please refer to :func:`mindspore.ops.sort`.
|
|
4604
4557
|
"""
|
|
4605
|
-
self._init_check()
|
|
4606
4558
|
return tensor_operator_registry.get('sort')(self, axis=axis, descending=descending)
|
|
4607
4559
|
|
|
4608
4560
|
def argsort(self, axis=-1, descending=False):
|
|
4609
4561
|
"""
|
|
4610
4562
|
For details, please refer to :func:`mindspore.ops.argsort`.
|
|
4611
4563
|
"""
|
|
4612
|
-
self._init_check()
|
|
4613
4564
|
return tensor_operator_registry.get('argsort')(self, axis, descending)
|
|
4614
4565
|
|
|
4615
4566
|
def trunc(self):
|
|
4616
4567
|
r"""
|
|
4617
4568
|
For details, please refer to :func:`mindspore.ops.trunc`.
|
|
4618
4569
|
"""
|
|
4619
|
-
self._init_check()
|
|
4620
4570
|
return tensor_operator_registry.get('trunc')(self)
|
|
4621
4571
|
|
|
4622
4572
|
def where(self, condition, y):
|
|
4623
4573
|
r"""
|
|
4624
4574
|
For details, please refer to :func:`mindspore.ops.where`.
|
|
4625
4575
|
"""
|
|
4626
|
-
self._init_check()
|
|
4627
4576
|
return tensor_operator_registry.get('where')(condition, self, y)
|
|
4628
4577
|
|
|
4629
4578
|
def imag(self):
|
|
4630
4579
|
r"""
|
|
4631
4580
|
For details, please refer to :func:`mindspore.ops.imag`.
|
|
4632
4581
|
"""
|
|
4633
|
-
self._init_check()
|
|
4634
4582
|
return tensor_operator_registry.get('imag')(self)
|
|
4635
4583
|
|
|
4636
4584
|
def quantile(self, q, axis=None, keepdims=False):
|
|
4637
4585
|
r"""
|
|
4638
4586
|
For details, please refer to :func:`mindspore.ops.quantile`.
|
|
4639
4587
|
"""
|
|
4640
|
-
self._init_check()
|
|
4641
4588
|
return tensor_operator_registry.get('quantile')(self, q, axis, keepdims)
|
|
4642
4589
|
|
|
4643
4590
|
def nanquantile(self, q, axis=None, keepdims=False):
|
|
4644
4591
|
"""
|
|
4645
4592
|
For details, please refer to :func:`mindspore.ops.nanquantile`.
|
|
4646
4593
|
"""
|
|
4647
|
-
self._init_check()
|
|
4648
4594
|
return tensor_operator_registry.get('nanquantile')(self, q, axis, keepdims)
|
|
4649
4595
|
|
|
4650
4596
|
def orgqr(self, input2):
|
|
4651
4597
|
r"""
|
|
4652
4598
|
For details, please refer to :func:`mindspore.ops.orgqr`.
|
|
4653
4599
|
"""
|
|
4654
|
-
self._init_check()
|
|
4655
4600
|
return tensor_operator_registry.get('orgqr')(self, input2)
|
|
4656
4601
|
|
|
4657
4602
|
def lu_solve(self, LU_data, LU_pivots):
|
|
@@ -4661,7 +4606,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4661
4606
|
.. warning::
|
|
4662
4607
|
This is an experimental API that is subject to change or deletion.
|
|
4663
4608
|
"""
|
|
4664
|
-
self._init_check()
|
|
4665
4609
|
return tensor_operator_registry.get('lu_solve')(self, LU_data, LU_pivots)
|
|
4666
4610
|
|
|
4667
4611
|
|
|
@@ -4669,14 +4613,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4669
4613
|
r"""
|
|
4670
4614
|
For details, please refer to :func:`mindspore.ops.nextafter`.
|
|
4671
4615
|
"""
|
|
4672
|
-
self._init_check()
|
|
4673
4616
|
return tensor_operator_registry.get('nextafter')(self, other)
|
|
4674
4617
|
|
|
4675
4618
|
def qr(self, some=True):
|
|
4676
4619
|
r"""
|
|
4677
4620
|
For details, please refer to :func:`mindspore.ops.qr`.
|
|
4678
4621
|
"""
|
|
4679
|
-
self._init_check()
|
|
4680
4622
|
validator.check_value_type('some', some, bool, 'Tensor.qr')
|
|
4681
4623
|
return tensor_operator_registry.get('qr')(self, 'reduced' if some else 'complete')
|
|
4682
4624
|
|
|
@@ -4686,7 +4628,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4686
4628
|
For details, please refer to :func:`mindspore.ops.ormqr`,
|
|
4687
4629
|
Args `input2` and `input3` correspond to the args `tau` and `other` of :func:`mindspore.ops.ormqr`.
|
|
4688
4630
|
"""
|
|
4689
|
-
self._init_check()
|
|
4690
4631
|
return tensor_operator_registry.get('ormqr')(self, input2, input3, left, transpose)
|
|
4691
4632
|
|
|
4692
4633
|
|
|
@@ -4728,7 +4669,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4728
4669
|
>>> print(output)
|
|
4729
4670
|
[5. 6. 3. 7.]
|
|
4730
4671
|
"""
|
|
4731
|
-
self._init_check()
|
|
4732
4672
|
return tensor_operator_registry.get('masked_scatter')()(self, mask, x)
|
|
4733
4673
|
|
|
4734
4674
|
|
|
@@ -4780,12 +4720,47 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4780
4720
|
[[1 5 3]
|
|
4781
4721
|
[4 8 9]]
|
|
4782
4722
|
"""
|
|
4783
|
-
self._init_check()
|
|
4784
4723
|
validator.check_value_type('accumulate', accumulate, bool, 'Tensor.index_put')
|
|
4785
4724
|
_index_put = tensor_operator_registry.get('index_put')(0 if accumulate is False else 1)
|
|
4786
4725
|
return _index_put(self, values, indices)
|
|
4787
4726
|
|
|
4788
4727
|
|
|
4728
|
+
def move_to(self, to, blocking=True):
|
|
4729
|
+
r"""
|
|
4730
|
+
Copy Tensor to target device synchronously or asynchronously, default synchronously. only support PyNative mode.
|
|
4731
|
+
|
|
4732
|
+
Args:
|
|
4733
|
+
to (str): a string type value, one of ``"Ascend"``, ``"GPU"``, ``"CPU"``.
|
|
4734
|
+
blocking (bool): a bool type value, using synchronous copy or asynchronous copy.
|
|
4735
|
+
Default: ``True`` , synchronous copy.
|
|
4736
|
+
|
|
4737
|
+
Returns:
|
|
4738
|
+
New Tensor, storged on target device which with the same type and shape as the "self Tensor".
|
|
4739
|
+
|
|
4740
|
+
Raises:
|
|
4741
|
+
ValueError: If the type of `blocking` is not bool type.
|
|
4742
|
+
ValueError: If the value of `to` is not one of ``"Ascend"``, ``"GPU"``, ``"CPU"``.
|
|
4743
|
+
ValueError: If the run mode is not PyNative mode.
|
|
4744
|
+
|
|
4745
|
+
Supported Platforms:
|
|
4746
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
4747
|
+
|
|
4748
|
+
Examples:
|
|
4749
|
+
>>> import mindspore as ms
|
|
4750
|
+
>>> from mindspore import Tensor
|
|
4751
|
+
>>> x = ms.Tensor([1, 2, 3], ms.int64)
|
|
4752
|
+
>>> new_tensor = x.move_to("CPU")
|
|
4753
|
+
"""
|
|
4754
|
+
if not isinstance(blocking, bool):
|
|
4755
|
+
raise ValueError(f"The type of 'blocking' must be bool, but got {blocking}")
|
|
4756
|
+
if to not in ("Ascend", "GPU", "CPU"):
|
|
4757
|
+
raise ValueError(f"The value of 'to' must be one of ['Ascend', 'GPU', 'CPU'], but got {to}")
|
|
4758
|
+
mode = context.get_context("mode")
|
|
4759
|
+
if mode != context.PYNATIVE_MODE:
|
|
4760
|
+
raise ValueError(f"The method of 'move_to' only supported in pynative mode, but got: {mode}.")
|
|
4761
|
+
return Tensor(Tensor_.move_to(self, to, blocking))
|
|
4762
|
+
|
|
4763
|
+
|
|
4789
4764
|
def _offload(self):
|
|
4790
4765
|
r"""
|
|
4791
4766
|
Offload tensor parameter to host. Currently, only support for pynative mode.
|
|
@@ -4799,7 +4774,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4799
4774
|
>>> x = ms.Tensor([1, 2, 3], ms.int64)
|
|
4800
4775
|
>>> x._offload()
|
|
4801
4776
|
"""
|
|
4802
|
-
self._init_check()
|
|
4803
4777
|
return Tensor_._offload(self)
|
|
4804
4778
|
|
|
4805
4779
|
|
|
@@ -4841,9 +4815,9 @@ def _check_tensor_input(input_data=None, dtype=None, shape=None, init=None):
|
|
|
4841
4815
|
raise ValueError("init, dtype and shape must have values at the same time.")
|
|
4842
4816
|
|
|
4843
4817
|
if input_data is not None:
|
|
4844
|
-
if isinstance(input_data, np.ndarray) and input_data.ndim
|
|
4818
|
+
if isinstance(input_data, np.ndarray) and input_data.ndim >= 1 and input_data.size == 0:
|
|
4845
4819
|
raise ValueError("input_data can not contain zero dimension.")
|
|
4846
|
-
if isinstance(input_data, (tuple, list)) and np.array(input_data).ndim
|
|
4820
|
+
if isinstance(input_data, (tuple, list)) and np.array(input_data).ndim >= 1 \
|
|
4847
4821
|
and np.array(input_data).size == 0:
|
|
4848
4822
|
raise ValueError("input_data can not contain zero dimension.")
|
|
4849
4823
|
|
|
@@ -4882,4 +4856,4 @@ def _check_astype_and_convert(dtype):
|
|
|
4882
4856
|
return dtype
|
|
4883
4857
|
|
|
4884
4858
|
|
|
4885
|
-
tensor_operator_registry
|
|
4859
|
+
setattr(tensor_operator_registry, 'vm_compare', _vm_compare)
|