mindspore 2.2.14__cp38-cp38-manylinux1_x86_64.whl → 2.3.0rc2__cp38-cp38-manylinux1_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +4 -4
- mindspore/_akg/akg/composite/build_module.py +155 -11
- mindspore/_akg/akg/config/repository.json +38 -0
- mindspore/_akg/akg/ms/info_version_adapt.py +29 -0
- mindspore/_akg/akg/tvm/contrib/nvcc.py +4 -1
- mindspore/_akg/akg/utils/ascend_profilier/path_manager.py +2 -1
- mindspore/_akg/akg/utils/composite_op_helper.py +4 -2
- mindspore/_akg/akg/utils/dump_ascend_meta.py +2 -2
- mindspore/_akg/akg/utils/gen_random.py +14 -8
- mindspore/_akg/akg/utils/op_dsl.py +11 -0
- mindspore/_akg/akg/utils/tbe_codegen_utils.py +18 -8
- mindspore/_c_dataengine.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +78 -0
- mindspore/_extends/builtin_operations.py +2 -1
- mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
- mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
- mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
- mindspore/_extends/parse/__init__.py +18 -14
- mindspore/_extends/parse/compile_config.py +229 -0
- mindspore/_extends/parse/parser.py +155 -59
- mindspore/_extends/parse/resources.py +40 -7
- mindspore/_extends/parse/standard_method.py +127 -206
- mindspore/_extends/remote/kernel_build_server.py +2 -0
- mindspore/_mindspore_offline_debug.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/{ops/_op_impl/tbe/atomic_addr_clean.py → _profiler.py} +13 -16
- mindspore/amp.py +24 -18
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/boost_cell_wrapper.py +1 -1
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +7 -3
- mindspore/common/_jit_fallback_utils.py +2 -3
- mindspore/common/_register_for_adapter.py +7 -0
- mindspore/common/_register_for_recompute.py +48 -0
- mindspore/common/_stub_tensor.py +7 -1
- mindspore/common/_utils.py +5 -17
- mindspore/common/api.py +145 -50
- mindspore/common/auto_dynamic_shape.py +27 -14
- mindspore/common/dtype.py +9 -6
- mindspore/common/dump.py +5 -4
- mindspore/common/hook_handle.py +51 -4
- mindspore/common/initializer.py +1 -1
- mindspore/common/jit_config.py +33 -13
- mindspore/common/lazy_inline.py +58 -17
- mindspore/common/mindir_util.py +12 -2
- mindspore/common/mutable.py +79 -14
- mindspore/common/parameter.py +24 -4
- mindspore/common/recompute.py +247 -0
- mindspore/common/seed.py +9 -9
- mindspore/common/sparse_tensor.py +251 -18
- mindspore/common/symbol.py +122 -0
- mindspore/common/tensor.py +391 -465
- mindspore/communication/__init__.py +3 -3
- mindspore/communication/_comm_helper.py +5 -0
- mindspore/communication/management.py +53 -38
- mindspore/config/op_info.config +22 -54
- mindspore/context.py +176 -55
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +6 -6
- mindspore/dataset/audio/transforms.py +711 -158
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/engine/cache_client.py +2 -2
- mindspore/dataset/engine/datasets.py +72 -38
- mindspore/dataset/engine/datasets_audio.py +14 -14
- mindspore/dataset/engine/datasets_standard_format.py +33 -3
- mindspore/dataset/engine/datasets_text.py +38 -38
- mindspore/dataset/engine/datasets_user_defined.py +7 -7
- mindspore/dataset/engine/datasets_vision.py +75 -71
- mindspore/dataset/engine/offload.py +5 -7
- mindspore/dataset/text/__init__.py +3 -3
- mindspore/dataset/text/transforms.py +408 -121
- mindspore/dataset/text/utils.py +9 -9
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/transforms.py +261 -76
- mindspore/dataset/utils/browse_dataset.py +9 -9
- mindspore/dataset/vision/__init__.py +3 -3
- mindspore/dataset/vision/c_transforms.py +5 -5
- mindspore/dataset/vision/transforms.py +2264 -514
- mindspore/dataset/vision/utils.py +40 -9
- mindspore/dataset/vision/validators.py +7 -1
- mindspore/experimental/optim/__init__.py +12 -2
- mindspore/experimental/optim/adadelta.py +161 -0
- mindspore/experimental/optim/adagrad.py +168 -0
- mindspore/experimental/optim/adam.py +35 -34
- mindspore/experimental/optim/adamax.py +170 -0
- mindspore/experimental/optim/adamw.py +40 -16
- mindspore/experimental/optim/asgd.py +153 -0
- mindspore/experimental/optim/lr_scheduler.py +66 -121
- mindspore/experimental/optim/nadam.py +157 -0
- mindspore/experimental/optim/optimizer.py +15 -8
- mindspore/experimental/optim/radam.py +194 -0
- mindspore/experimental/optim/rmsprop.py +154 -0
- mindspore/experimental/optim/rprop.py +164 -0
- mindspore/experimental/optim/sgd.py +28 -19
- mindspore/hal/__init__.py +34 -0
- mindspore/hal/_ascend.py +57 -0
- mindspore/hal/_base.py +57 -0
- mindspore/hal/_cpu.py +56 -0
- mindspore/hal/_gpu.py +57 -0
- mindspore/hal/device.py +356 -0
- mindspore/hal/event.py +179 -0
- mindspore/hal/stream.py +339 -0
- mindspore/include/api/data_type.h +2 -2
- mindspore/include/api/dual_abi_helper.h +16 -3
- mindspore/include/api/model.h +1 -3
- mindspore/include/api/status.h +14 -0
- mindspore/include/c_api/model_c.h +173 -0
- mindspore/include/c_api/ms/base/types.h +1 -0
- mindspore/include/c_api/types_c.h +19 -0
- mindspore/include/dataset/execute.h +1 -3
- mindspore/include/mindapi/base/format.h +125 -23
- mindspore/include/mindapi/base/types.h +12 -0
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +2044 -154
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +2044 -33
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/build_tbe_kernel.py +529 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/compiler.py +56 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/custom.py +1109 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/get_file_path.py +36 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/tbe_topi.py +556 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6318 -1760
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_add_custom.h +49 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +59 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +59 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +52 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +232 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +232 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.cpp +192 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.cpp +274 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +39 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/x86_64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/cpu/libakg.so +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
- mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu10.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.6/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/{libmindspore_ascend.so.1 → libmindspore_ascend.so.2} +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
- mindspore/log.py +2 -2
- mindspore/mindrecord/__init__.py +5 -1
- mindspore/mindrecord/config.py +809 -0
- mindspore/mindrecord/filereader.py +25 -0
- mindspore/mindrecord/filewriter.py +74 -56
- mindspore/mindrecord/mindpage.py +40 -6
- mindspore/mindrecord/shardutils.py +3 -2
- mindspore/mindrecord/shardwriter.py +7 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +8 -13
- mindspore/mindrecord/tools/cifar10_to_mr.py +9 -15
- mindspore/mindrecord/tools/csv_to_mr.py +4 -9
- mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
- mindspore/mindrecord/tools/mnist_to_mr.py +7 -12
- mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -6
- mindspore/mint/__init__.py +457 -0
- mindspore/mint/nn/__init__.py +430 -0
- mindspore/mint/nn/functional.py +424 -0
- mindspore/mint/optim/__init__.py +24 -0
- mindspore/mint/optim/adamw.py +186 -0
- mindspore/multiprocessing/__init__.py +72 -0
- mindspore/nn/__init__.py +3 -0
- mindspore/nn/cell.py +131 -174
- mindspore/nn/dynamic_lr.py +2 -2
- mindspore/nn/extend/__init__.py +29 -0
- mindspore/nn/extend/basic.py +140 -0
- mindspore/nn/extend/embedding.py +143 -0
- mindspore/{rewrite/ast_creator_register.py → nn/extend/layer/__init__.py} +9 -19
- mindspore/nn/extend/layer/normalization.py +107 -0
- mindspore/nn/extend/pooling.py +117 -0
- mindspore/nn/generator.py +297 -0
- mindspore/nn/layer/activation.py +79 -90
- mindspore/nn/layer/basic.py +113 -81
- mindspore/nn/layer/channel_shuffle.py +3 -16
- mindspore/nn/layer/container.py +3 -3
- mindspore/nn/layer/conv.py +71 -71
- mindspore/nn/layer/embedding.py +105 -44
- mindspore/nn/layer/image.py +4 -7
- mindspore/nn/layer/normalization.py +52 -66
- mindspore/nn/layer/padding.py +30 -39
- mindspore/nn/layer/pooling.py +13 -9
- mindspore/nn/layer/rnn_cells.py +5 -15
- mindspore/nn/layer/rnns.py +6 -5
- mindspore/nn/layer/thor_layer.py +1 -2
- mindspore/nn/layer/timedistributed.py +1 -1
- mindspore/nn/layer/transformer.py +52 -50
- mindspore/nn/learning_rate_schedule.py +6 -5
- mindspore/nn/loss/loss.py +43 -64
- mindspore/nn/optim/ada_grad.py +4 -2
- mindspore/nn/optim/adadelta.py +3 -1
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +102 -181
- mindspore/nn/optim/adamax.py +4 -2
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +4 -2
- mindspore/nn/optim/ftrl.py +31 -61
- mindspore/nn/optim/lamb.py +5 -3
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +6 -4
- mindspore/nn/optim/momentum.py +13 -25
- mindspore/nn/optim/optimizer.py +6 -3
- mindspore/nn/optim/proximal_ada_grad.py +4 -2
- mindspore/nn/optim/rmsprop.py +9 -3
- mindspore/nn/optim/rprop.py +4 -2
- mindspore/nn/optim/sgd.py +6 -5
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
- mindspore/nn/probability/distribution/beta.py +2 -2
- mindspore/nn/probability/distribution/categorical.py +4 -6
- mindspore/nn/probability/distribution/cauchy.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/poisson.py +2 -2
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +13 -1
- mindspore/nn/wrap/__init__.py +2 -1
- mindspore/nn/wrap/cell_wrapper.py +33 -12
- mindspore/nn/wrap/grad_reducer.py +148 -8
- mindspore/nn/wrap/loss_scale.py +7 -7
- mindspore/numpy/__init__.py +2 -0
- mindspore/numpy/array_creations.py +2 -0
- mindspore/numpy/array_ops.py +1 -5
- mindspore/numpy/fft.py +431 -0
- mindspore/numpy/math_ops.py +54 -60
- mindspore/numpy/utils.py +3 -0
- mindspore/ops/__init__.py +5 -4
- mindspore/ops/_grad_experimental/grad_array_ops.py +4 -129
- mindspore/ops/_grad_experimental/grad_comm_ops.py +14 -18
- mindspore/ops/_grad_experimental/grad_math_ops.py +68 -283
- mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
- mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
- mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
- mindspore/ops/_op_impl/__init__.py +0 -1
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
- mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +1 -1
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
- mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -3
- mindspore/ops/_op_impl/cpu/adam.py +2 -2
- mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
- mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
- mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
- mindspore/ops/_vmap/vmap_array_ops.py +137 -101
- mindspore/ops/_vmap/vmap_base.py +8 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +143 -58
- mindspore/ops/_vmap/vmap_image_ops.py +70 -13
- mindspore/ops/_vmap/vmap_math_ops.py +101 -57
- mindspore/ops/_vmap/vmap_nn_ops.py +230 -97
- mindspore/ops/_vmap/vmap_other_ops.py +1 -1
- mindspore/ops/auto_generate/__init__.py +31 -0
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +205 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +257 -0
- mindspore/ops/auto_generate/gen_arg_handler.py +171 -0
- mindspore/ops/auto_generate/gen_extend_func.py +404 -0
- mindspore/ops/auto_generate/gen_ops_def.py +5653 -0
- mindspore/ops/auto_generate/gen_ops_prim.py +11623 -0
- mindspore/ops/auto_generate/pyboost_inner_prim.py +359 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +118 -17
- mindspore/ops/composite/math_ops.py +9 -48
- mindspore/ops/composite/multitype_ops/_compile_utils.py +168 -602
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +24 -133
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
- mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
- mindspore/ops/deprecated.py +14 -3
- mindspore/ops/extend/__init__.py +54 -0
- mindspore/ops/extend/array_func.py +259 -0
- mindspore/ops/extend/math_func.py +76 -0
- mindspore/ops/extend/nn_func.py +384 -0
- mindspore/ops/function/__init__.py +37 -12
- mindspore/ops/function/array_func.py +702 -1867
- mindspore/ops/function/clip_func.py +19 -31
- mindspore/ops/function/debug_func.py +1 -4
- mindspore/ops/function/fft_func.py +31 -0
- mindspore/ops/function/grad/grad_func.py +24 -17
- mindspore/ops/function/image_func.py +27 -21
- mindspore/ops/function/linalg_func.py +35 -68
- mindspore/ops/function/math_func.py +639 -2531
- mindspore/ops/function/nn_func.py +1274 -832
- mindspore/ops/function/other_func.py +4 -5
- mindspore/ops/function/parameter_func.py +5 -93
- mindspore/ops/function/random_func.py +84 -71
- mindspore/ops/function/sparse_unary_func.py +9 -16
- mindspore/ops/function/spectral_func.py +1 -1
- mindspore/ops/function/vmap_func.py +14 -14
- mindspore/ops/functional.py +57 -63
- mindspore/ops/op_info_register.py +16 -43
- mindspore/ops/operations/__init__.py +19 -20
- mindspore/ops/operations/_grad_ops.py +20 -828
- mindspore/ops/operations/_inner_ops.py +180 -288
- mindspore/ops/operations/_scalar_ops.py +5 -480
- mindspore/ops/operations/_sequence_ops.py +6 -36
- mindspore/ops/operations/array_ops.py +83 -2697
- mindspore/ops/operations/comm_ops.py +38 -46
- mindspore/ops/operations/custom_ops.py +14 -96
- mindspore/ops/operations/debug_ops.py +100 -31
- mindspore/ops/operations/image_ops.py +1 -217
- mindspore/ops/operations/inner_ops.py +3 -38
- mindspore/ops/operations/linalg_ops.py +1 -49
- mindspore/{rewrite/ast_transformers → ops/operations/manually_defined}/__init__.py +11 -4
- mindspore/ops/operations/manually_defined/_inner.py +61 -0
- mindspore/ops/operations/manually_defined/ops_def.py +1716 -0
- mindspore/ops/operations/math_ops.py +581 -4629
- mindspore/ops/operations/nn_ops.py +260 -1941
- mindspore/ops/operations/other_ops.py +50 -42
- mindspore/ops/operations/random_ops.py +3 -52
- mindspore/ops/operations/sparse_ops.py +3 -3
- mindspore/ops/primitive.py +196 -96
- mindspore/ops_generate/__init__.py +27 -0
- mindspore/ops_generate/arg_dtype_cast.py +257 -0
- mindspore/ops_generate/arg_handler.py +171 -0
- mindspore/ops_generate/gen_aclnn_implement.py +266 -0
- mindspore/ops_generate/gen_ops.py +1062 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
- mindspore/ops_generate/gen_pyboost_func.py +939 -0
- mindspore/ops_generate/gen_utils.py +188 -0
- mindspore/ops_generate/op_proto.py +138 -0
- mindspore/ops_generate/pyboost_utils.py +349 -0
- mindspore/ops_generate/template.py +238 -0
- mindspore/parallel/__init__.py +6 -4
- mindspore/parallel/_auto_parallel_context.py +52 -2
- mindspore/parallel/_cell_wrapper.py +16 -9
- mindspore/parallel/_cost_model_context.py +1 -1
- mindspore/parallel/_dp_allreduce_fusion.py +159 -159
- mindspore/parallel/_parallel_serialization.py +29 -13
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +19 -7
- mindspore/parallel/_transformer/__init__.py +1 -1
- mindspore/parallel/_transformer/layers.py +1 -1
- mindspore/parallel/_transformer/loss.py +1 -1
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/op_parallel_config.py +1 -1
- mindspore/parallel/_transformer/transformer.py +1 -1
- mindspore/parallel/_utils.py +147 -6
- mindspore/parallel/algo_parameter_config.py +6 -6
- mindspore/parallel/checkpoint_transform.py +180 -24
- mindspore/parallel/cluster/__init__.py +15 -0
- mindspore/parallel/cluster/process_entity/__init__.py +18 -0
- mindspore/parallel/cluster/process_entity/_api.py +345 -0
- mindspore/parallel/cluster/process_entity/_utils.py +116 -0
- mindspore/parallel/cluster/run.py +139 -0
- mindspore/parallel/mpi/__init__.py +1 -1
- mindspore/parallel/mpi/_mpi_config.py +1 -1
- mindspore/parallel/parameter_broadcast.py +152 -0
- mindspore/parallel/shard.py +99 -2
- mindspore/profiler/common/util.py +20 -0
- mindspore/profiler/envprofiling.py +1 -1
- mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +66 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +77 -0
- mindspore/profiler/parser/ascend_analysis/function_event.py +146 -0
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +109 -0
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +80 -0
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +52 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +116 -0
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +59 -0
- mindspore/profiler/parser/ascend_cluster_generator.py +14 -9
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -1
- mindspore/profiler/parser/ascend_flops_generator.py +20 -4
- mindspore/profiler/parser/ascend_hccl_generator.py +25 -277
- mindspore/profiler/parser/ascend_msprof_exporter.py +112 -132
- mindspore/profiler/parser/ascend_msprof_generator.py +73 -283
- mindspore/profiler/parser/ascend_op_generator.py +92 -42
- mindspore/profiler/parser/ascend_timeline_generator.py +294 -133
- mindspore/profiler/parser/base_timeline_generator.py +6 -0
- mindspore/profiler/parser/framework_parser.py +3 -2
- mindspore/profiler/parser/integrator.py +3 -1
- mindspore/profiler/parser/msadvisor_analyzer.py +1 -1
- mindspore/profiler/parser/msadvisor_parser.py +1 -1
- mindspore/profiler/parser/profiler_info.py +16 -1
- mindspore/profiler/profiling.py +305 -167
- mindspore/rewrite/__init__.py +2 -13
- mindspore/rewrite/api/node.py +121 -35
- mindspore/rewrite/api/pattern_engine.py +2 -3
- mindspore/rewrite/api/scoped_value.py +16 -15
- mindspore/rewrite/api/symbol_tree.py +45 -29
- mindspore/rewrite/ast_helpers/__init__.py +3 -6
- mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
- mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
- mindspore/rewrite/common/__init__.py +1 -2
- mindspore/rewrite/common/config.py +24 -0
- mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
- mindspore/rewrite/{namer.py → common/namer.py} +63 -18
- mindspore/rewrite/common/namespace.py +118 -0
- mindspore/rewrite/node/__init__.py +5 -5
- mindspore/rewrite/node/call_function.py +23 -7
- mindspore/rewrite/node/cell_container.py +7 -3
- mindspore/rewrite/node/control_flow.py +53 -28
- mindspore/rewrite/node/node.py +212 -196
- mindspore/rewrite/node/node_manager.py +51 -22
- mindspore/rewrite/node/node_topological_manager.py +3 -23
- mindspore/rewrite/parsers/__init__.py +12 -0
- mindspore/rewrite/parsers/arguments_parser.py +8 -9
- mindspore/rewrite/parsers/assign_parser.py +635 -413
- mindspore/rewrite/parsers/attribute_parser.py +3 -4
- mindspore/rewrite/parsers/class_def_parser.py +107 -144
- mindspore/rewrite/parsers/constant_parser.py +5 -5
- mindspore/rewrite/parsers/container_parser.py +4 -6
- mindspore/rewrite/parsers/expr_parser.py +55 -0
- mindspore/rewrite/parsers/for_parser.py +31 -98
- mindspore/rewrite/parsers/function_def_parser.py +13 -5
- mindspore/rewrite/parsers/if_parser.py +28 -10
- mindspore/rewrite/parsers/module_parser.py +8 -182
- mindspore/rewrite/parsers/parser.py +1 -5
- mindspore/rewrite/parsers/parser_register.py +1 -1
- mindspore/rewrite/parsers/return_parser.py +5 -10
- mindspore/rewrite/parsers/while_parser.py +59 -0
- mindspore/rewrite/sparsify/utils.py +1 -1
- mindspore/rewrite/symbol_tree/__init__.py +20 -0
- mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
- mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
- mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
- mindspore/run_check/_check_version.py +6 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/safeguard/rewrite_obfuscation.py +9 -19
- mindspore/scipy/__init__.py +2 -1
- mindspore/scipy/fft.py +133 -0
- mindspore/scipy/linalg.py +140 -55
- mindspore/scipy/ops.py +15 -71
- mindspore/scipy/ops_grad.py +5 -34
- mindspore/scipy/optimize/line_search.py +2 -2
- mindspore/scipy/optimize/minimize.py +1 -1
- mindspore/train/__init__.py +3 -2
- mindspore/train/_utils.py +178 -4
- mindspore/train/amp.py +167 -245
- mindspore/train/anf_ir_pb2.py +8 -2
- mindspore/train/callback/_backup_and_restore.py +4 -4
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +39 -13
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_landscape.py +14 -8
- mindspore/train/callback/_loss_monitor.py +2 -2
- mindspore/train/callback/_on_request_exit.py +2 -2
- mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
- mindspore/train/callback/_summary_collector.py +7 -7
- mindspore/train/callback/_time_monitor.py +2 -2
- mindspore/train/data_sink.py +1 -1
- mindspore/train/dataset_helper.py +18 -4
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/accuracy.py +7 -7
- mindspore/train/metrics/confusion_matrix.py +8 -6
- mindspore/train/metrics/cosine_similarity.py +6 -4
- mindspore/train/metrics/error.py +2 -2
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/metrics/perplexity.py +2 -1
- mindspore/train/metrics/topk.py +2 -2
- mindspore/train/mind_ir_pb2.py +89 -15
- mindspore/train/model.py +24 -22
- mindspore/train/serialization.py +257 -133
- mindspore/train/summary/summary_record.py +51 -28
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/version.py +1 -1
- {mindspore-2.2.14.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +2 -2
- {mindspore-2.2.14.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +534 -1066
- {mindspore-2.2.14.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +1 -0
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
- mindspore/config/super_bar_config.json +0 -544
- mindspore/gen_ops.py +0 -273
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/nn/layer/flash_attention.py +0 -189
- mindspore/ops/_op_impl/cpu/concat.py +0 -39
- mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
- mindspore/ops/_op_impl/tbe/__init__.py +0 -47
- mindspore/ops/_op_impl/tbe/abs.py +0 -38
- mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/acos.py +0 -37
- mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/acosh.py +0 -37
- mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
- mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
- mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
- mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
- mindspore/ops/_op_impl/tbe/add.py +0 -42
- mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/add_n.py +0 -39
- mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
- mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
- mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
- mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
- mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
- mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
- mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
- mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/asin.py +0 -37
- mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/asinh.py +0 -37
- mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/assign.py +0 -79
- mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
- mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
- mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/atan.py +0 -37
- mindspore/ops/_op_impl/tbe/atan2.py +0 -38
- mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/atanh.py +0 -37
- mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
- mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
- mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
- mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
- mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
- mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
- mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
- mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
- mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
- mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
- mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cast.py +0 -55
- mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/cdist.py +0 -38
- mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/ceil.py +0 -37
- mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/celu.py +0 -39
- mindspore/ops/_op_impl/tbe/centralization.py +0 -39
- mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
- mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/concat.py +0 -40
- mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
- mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
- mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
- mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
- mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
- mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/cos.py +0 -37
- mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/cosh.py +0 -37
- mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
- mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cummin.py +0 -41
- mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
- mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
- mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
- mindspore/ops/_op_impl/tbe/diag.py +0 -38
- mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
- mindspore/ops/_op_impl/tbe/dilation.py +0 -40
- mindspore/ops/_op_impl/tbe/div.py +0 -41
- mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
- mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
- mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
- mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
- mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
- mindspore/ops/_op_impl/tbe/elu.py +0 -38
- mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/equal.py +0 -42
- mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/erf.py +0 -37
- mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfc.py +0 -37
- mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
- mindspore/ops/_op_impl/tbe/exp.py +0 -40
- mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
- mindspore/ops/_op_impl/tbe/expm1.py +0 -37
- mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
- mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/fill.py +0 -56
- mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/flatten.py +0 -48
- mindspore/ops/_op_impl/tbe/floor.py +0 -37
- mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
- mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
- mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
- mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
- mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
- mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
- mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
- mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/ger.py +0 -43
- mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/greater.py +0 -43
- mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
- mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
- mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
- mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
- mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
- mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
- mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
- mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/im2col.py +0 -42
- mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
- mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
- mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/inv.py +0 -38
- mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/invert.py +0 -37
- mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/iou.py +0 -38
- mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/is_close.py +0 -40
- mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
- mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
- mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
- mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
- mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
- mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
- mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/lerp.py +0 -38
- mindspore/ops/_op_impl/tbe/less.py +0 -41
- mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/log.py +0 -40
- mindspore/ops/_op_impl/tbe/log1p.py +0 -37
- mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
- mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
- mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
- mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
- mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/matmul.py +0 -53
- mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
- mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
- mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
- mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum.py +0 -39
- mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
- mindspore/ops/_op_impl/tbe/minimum.py +0 -40
- mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mish.py +0 -37
- mindspore/ops/_op_impl/tbe/mod.py +0 -41
- mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/mul.py +0 -37
- mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
- mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
- mindspore/ops/_op_impl/tbe/neg.py +0 -39
- mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
- mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
- mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
- mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
- mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
- mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/pack.py +0 -58
- mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
- mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
- mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/pdist.py +0 -36
- mindspore/ops/_op_impl/tbe/pooling.py +0 -46
- mindspore/ops/_op_impl/tbe/population_count.py +0 -38
- mindspore/ops/_op_impl/tbe/pow.py +0 -41
- mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/prelu.py +0 -37
- mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/range.py +0 -39
- mindspore/ops/_op_impl/tbe/real_div.py +0 -38
- mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
- mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
- mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
- mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
- mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6.py +0 -38
- mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/renorm.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
- mindspore/ops/_op_impl/tbe/rint.py +0 -37
- mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roll.py +0 -42
- mindspore/ops/_op_impl/tbe/round.py +0 -38
- mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
- mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
- mindspore/ops/_op_impl/tbe/select.py +0 -38
- mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/selu.py +0 -39
- mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sgd.py +0 -62
- mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sign.py +0 -38
- mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/sin.py +0 -37
- mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sinh.py +0 -37
- mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/slice.py +0 -58
- mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
- mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax.py +0 -37
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
- mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/softplus.py +0 -37
- mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softsign.py +0 -37
- mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sort.py +0 -38
- mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/split_d.py +0 -38
- mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/split_v.py +0 -39
- mindspore/ops/_op_impl/tbe/splitv.py +0 -39
- mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/square.py +0 -38
- mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
- mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
- mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
- mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
- mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
- mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
- mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
- mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
- mindspore/ops/_op_impl/tbe/sub.py +0 -39
- mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tan.py +0 -38
- mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh.py +0 -37
- mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
- mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
- mindspore/ops/_op_impl/tbe/tile.py +0 -37
- mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
- mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
- mindspore/ops/_op_impl/tbe/transpose.py +0 -60
- mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
- mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
- mindspore/ops/_op_impl/tbe/trunc.py +0 -39
- mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/unpack.py +0 -38
- mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
- mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
- mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
- mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
- mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
- mindspore/ops/_tracefunc.py +0 -241
- mindspore/ops/arg_dtype_cast.py +0 -54
- mindspore/rewrite/api/tree_node_helper.py +0 -60
- mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
- mindspore/rewrite/namespace.py +0 -53
- {mindspore-2.2.14.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
- {mindspore-2.2.14.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
mindspore/common/tensor.py
CHANGED
|
@@ -27,13 +27,15 @@ from mindspore.common.seed import get_seed
|
|
|
27
27
|
from mindspore import context
|
|
28
28
|
from mindspore import log as logger
|
|
29
29
|
from mindspore.common import dtype as mstype
|
|
30
|
+
from mindspore.common.hook_handle import _TensorHookHandle
|
|
30
31
|
|
|
31
32
|
from mindspore.common._utils import get_slice_num
|
|
32
33
|
from mindspore.common._register_for_tensor import tensor_operator_registry
|
|
33
34
|
from mindspore._c_expression import Tensor as Tensor_
|
|
34
35
|
from mindspore import _checkparam as validator
|
|
35
|
-
from mindspore._checkparam import check_is_number, is_stub_tensor
|
|
36
|
+
from mindspore._checkparam import check_is_number, is_stub_tensor, check_hook_fn
|
|
36
37
|
from mindspore._check_jit_forbidden_api import jit_forbidden_register
|
|
38
|
+
from mindspore.common.symbol import Symbol
|
|
37
39
|
|
|
38
40
|
np_types = (np.int8, np.int16, np.int32, np.int64,
|
|
39
41
|
np.uint8, np.uint16, np.uint32, np.uint64, np.float16,
|
|
@@ -82,11 +84,11 @@ def tensor(input_data=None, dtype=None, shape=None, init=None, internal=False, c
|
|
|
82
84
|
based on the `dtype` argument.
|
|
83
85
|
|
|
84
86
|
Please refer to `Creating and Using Tensor
|
|
85
|
-
<https://www.mindspore.cn/docs/en/
|
|
87
|
+
<https://www.mindspore.cn/docs/en/master/note/static_graph_syntax_support.html#mindspore-user-defined-data-types>`_ .
|
|
86
88
|
|
|
87
89
|
The difference between it and the Tensor class is that it adds
|
|
88
90
|
`Annotation
|
|
89
|
-
<https://www.mindspore.cn/docs/en/
|
|
91
|
+
<https://www.mindspore.cn/docs/en/master/design/dynamic_graph_and_static_graph.html?#annotation-type>`_
|
|
90
92
|
which can prevent the generation of AnyType compared to the Tensor class.
|
|
91
93
|
|
|
92
94
|
The arguments and return values are the same as the Tensor class. Also see: :class:`mindspore.Tensor`.
|
|
@@ -114,22 +116,25 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
114
116
|
Tensor is a data structure that stores an n-dimensional array.
|
|
115
117
|
|
|
116
118
|
Note:
|
|
117
|
-
If
|
|
119
|
+
If `init` interface is used to initialize `Tensor`, the `Tensor.init_data` API needs to be called to load the
|
|
118
120
|
actual data to `Tensor`.
|
|
119
121
|
|
|
122
|
+
Warning:
|
|
123
|
+
To convert dtype of a `Tensor`, it is recommended to use `Tensor.astype()` rather than
|
|
124
|
+
`Tensor(sourceTensor, dtype=newDtype)`.
|
|
125
|
+
|
|
120
126
|
Args:
|
|
121
127
|
input_data (Union[Tensor, float, int, bool, tuple, list, numpy.ndarray]): The data to be stored. It can be
|
|
122
128
|
another Tensor, Python number or NumPy ndarray. Default: ``None`` .
|
|
123
129
|
dtype (:class:`mindspore.dtype`): Used to indicate the data type of the output Tensor. The argument should
|
|
124
130
|
be defined in `mindspore.dtype`. If it is ``None`` , the data type of the output Tensor will be the same
|
|
125
131
|
as the `input_data`. Default: ``None`` .
|
|
126
|
-
shape (Union[tuple, list, int]): Used to indicate the shape of the output Tensor.
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
must be set. Default: ``None`` .
|
|
132
|
+
shape (Union[tuple, list, int, :class:`mindspore.Symbol`]): Used to indicate the shape of the output Tensor.
|
|
133
|
+
If `input_data` is available, `shape` doesn't need to be set. If ``None`` or `Symbol` exists in `shape` ,
|
|
134
|
+
a tensor of dynamic shape is created, `input_data` doesn't need to be set; if only integers exist in
|
|
135
|
+
`shape`, a tensor of static shape is created, `input_data` or `init` must be set. Default: ``None`` .
|
|
131
136
|
init (Initializer): The information of init data.
|
|
132
|
-
|
|
137
|
+
`init` is used for delayed initialization in parallel mode, when using init, `dtype` and `shape` must be
|
|
133
138
|
set. Default: ``None`` .
|
|
134
139
|
internal (bool): Whether it is created by the framework.
|
|
135
140
|
``'True'`` means that the tensor is created by framework.
|
|
@@ -142,9 +147,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
142
147
|
Tensor.
|
|
143
148
|
|
|
144
149
|
Note:
|
|
145
|
-
The default value None of `input_data` works as a placeholder,
|
|
150
|
+
The default value ``None`` of `input_data` works as a placeholder,
|
|
151
|
+
it does not mean that we can create a NoneType
|
|
146
152
|
Tensor.
|
|
147
|
-
Tensor with shape contains 0 is not fully tested and supported.
|
|
153
|
+
Tensor with `shape` contains 0 is not fully tested and supported.
|
|
148
154
|
|
|
149
155
|
Examples:
|
|
150
156
|
>>> import numpy as np
|
|
@@ -200,6 +206,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
200
206
|
|
|
201
207
|
def __init__(self, input_data=None, dtype=None, shape=None, init=None, internal=False, const_arg=False):
|
|
202
208
|
self.init_finished = False
|
|
209
|
+
if isinstance(input_data, (Tensor, Tensor_)) and dtype is not None:
|
|
210
|
+
logger.info("It is suggested to use 'Tensor.astype()' to convert the dtype of a Tensor.")
|
|
211
|
+
_cast = tensor_operator_registry.get("cast")
|
|
212
|
+
input_data = _cast(input_data, dtype)
|
|
213
|
+
|
|
203
214
|
if is_stub_tensor(input_data):
|
|
204
215
|
input_data = input_data.stub_sync()
|
|
205
216
|
|
|
@@ -218,8 +229,16 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
218
229
|
if isinstance(input_data, np_types):
|
|
219
230
|
input_data = np.array(input_data)
|
|
220
231
|
|
|
221
|
-
if
|
|
222
|
-
|
|
232
|
+
if shape is not None:
|
|
233
|
+
if isinstance(shape, numbers.Number):
|
|
234
|
+
shape = (shape,)
|
|
235
|
+
elif isinstance(shape, Symbol):
|
|
236
|
+
self.symbolic_shape = [shape]
|
|
237
|
+
shape = (None,)
|
|
238
|
+
elif isinstance(shape, (list, tuple)) and any(isinstance(s, Symbol) for s in shape):
|
|
239
|
+
self.symbolic_shape = [item.to_dict() if isinstance(item, Symbol) else item for item in shape]
|
|
240
|
+
shape_without_symbol = (None if isinstance(item, Symbol) else item for item in shape)
|
|
241
|
+
shape = list(shape_without_symbol) if isinstance(shape, list) else tuple(shape_without_symbol)
|
|
223
242
|
|
|
224
243
|
_check_tensor_input(input_data, dtype, shape, init)
|
|
225
244
|
|
|
@@ -258,6 +277,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
258
277
|
self.slice_num_of_persistent_data_ = None
|
|
259
278
|
self.slice_shape_of_persistent_data_ = None
|
|
260
279
|
|
|
280
|
+
# the auto gradient information
|
|
281
|
+
self._grad = None
|
|
282
|
+
self._grad_fn = None
|
|
283
|
+
self._requires_grad = False
|
|
284
|
+
self._retain_grad = False
|
|
285
|
+
|
|
261
286
|
@classmethod
|
|
262
287
|
def __subclasshook__(cls, sub):
|
|
263
288
|
"""
|
|
@@ -295,19 +320,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
295
320
|
def __eq__(self, other):
|
|
296
321
|
if not isinstance(other, (int, float, Tensor)):
|
|
297
322
|
return False
|
|
298
|
-
# bool type is not supported for `Equal` operator in backend.
|
|
299
|
-
if self.dtype == mstype.bool_ or (isinstance(other, Tensor) and other.dtype == mstype.bool_):
|
|
300
|
-
if isinstance(other, Tensor):
|
|
301
|
-
return Tensor(np.array(self.asnumpy() == other.asnumpy()))
|
|
302
|
-
return Tensor(np.array(self.asnumpy() == other))
|
|
303
323
|
return tensor_operator_registry.get('__eq__')(self, other)
|
|
304
324
|
|
|
305
325
|
def __ne__(self, other):
|
|
306
326
|
if not isinstance(other, (int, float, Tensor)):
|
|
307
327
|
return True
|
|
308
|
-
# bool type is not supported for `NotEqual` operator in backend.
|
|
309
|
-
if self.dtype == mstype.bool_ or (isinstance(other, Tensor) and other.dtype == mstype.bool_):
|
|
310
|
-
return Tensor(np.array(self.asnumpy() != other.asnumpy()))
|
|
311
328
|
return tensor_operator_registry.get('__ne__')(self, other)
|
|
312
329
|
|
|
313
330
|
def __hash__(self):
|
|
@@ -322,11 +339,14 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
322
339
|
return out
|
|
323
340
|
|
|
324
341
|
def __round__(self):
|
|
325
|
-
out = tensor_operator_registry.get('round')(
|
|
342
|
+
out = tensor_operator_registry.get('round')(self)
|
|
326
343
|
return out
|
|
327
344
|
|
|
328
345
|
def __bool__(self):
|
|
329
|
-
|
|
346
|
+
if self.dtype == mstype.bfloat16:
|
|
347
|
+
data = self.float().asnumpy()
|
|
348
|
+
else:
|
|
349
|
+
data = self.asnumpy()
|
|
330
350
|
if data.shape == ():
|
|
331
351
|
return bool(data)
|
|
332
352
|
if data.shape == (1,):
|
|
@@ -342,15 +362,24 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
342
362
|
raise ValueError(message)
|
|
343
363
|
|
|
344
364
|
def __int__(self):
|
|
345
|
-
|
|
365
|
+
if self.dtype == mstype.bfloat16:
|
|
366
|
+
data = self.float().asnumpy()
|
|
367
|
+
else:
|
|
368
|
+
data = self.asnumpy()
|
|
346
369
|
return self._convert_scalar_(data, int, "Only one element tensors can be converted to Python scalars")
|
|
347
370
|
|
|
348
371
|
def __float__(self):
|
|
349
|
-
|
|
372
|
+
if self.dtype == mstype.bfloat16:
|
|
373
|
+
data = self.float().asnumpy()
|
|
374
|
+
else:
|
|
375
|
+
data = self.asnumpy()
|
|
350
376
|
return self._convert_scalar_(data, float, "Only one element tensors can be converted to Python scalars")
|
|
351
377
|
|
|
352
378
|
def __index__(self):
|
|
353
|
-
|
|
379
|
+
if self.dtype == mstype.bfloat16:
|
|
380
|
+
data = self.float().asnumpy()
|
|
381
|
+
else:
|
|
382
|
+
data = self.asnumpy()
|
|
354
383
|
if data.dtype not in ["int8", "int16", "int32", "int64", "bool"]:
|
|
355
384
|
raise ValueError("Only integer tensors of a single element can be converted to an index.")
|
|
356
385
|
return self._convert_scalar_(data, int,
|
|
@@ -360,7 +389,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
360
389
|
return self
|
|
361
390
|
|
|
362
391
|
def __abs__(self):
|
|
363
|
-
self._init_check()
|
|
364
392
|
return tensor_operator_registry.get('abs')(self)
|
|
365
393
|
|
|
366
394
|
def __add__(self, other):
|
|
@@ -544,6 +572,83 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
544
572
|
"""
|
|
545
573
|
return len(self._shape)
|
|
546
574
|
|
|
575
|
+
@property
|
|
576
|
+
def grad(self):
|
|
577
|
+
r"""
|
|
578
|
+
Get the gradient value.
|
|
579
|
+
"""
|
|
580
|
+
return self._grad
|
|
581
|
+
|
|
582
|
+
@grad.setter
|
|
583
|
+
def grad(self, grad):
|
|
584
|
+
r"""
|
|
585
|
+
Set the gradient value.
|
|
586
|
+
"""
|
|
587
|
+
self._grad = grad
|
|
588
|
+
|
|
589
|
+
@property
|
|
590
|
+
def grad_fn(self):
|
|
591
|
+
r"""
|
|
592
|
+
The function for backward.
|
|
593
|
+
"""
|
|
594
|
+
return self._grad_fn
|
|
595
|
+
|
|
596
|
+
@grad_fn.setter
|
|
597
|
+
def grad_fn(self, grad_fn):
|
|
598
|
+
r"""
|
|
599
|
+
Set the function for backward.
|
|
600
|
+
"""
|
|
601
|
+
self._grad_fn = grad_fn
|
|
602
|
+
|
|
603
|
+
@property
|
|
604
|
+
def is_leaf(self):
|
|
605
|
+
r"""
|
|
606
|
+
Whether the stub tensor is leaf.
|
|
607
|
+
They will be a leaf if they have requires_grad and requires_grad is False,
|
|
608
|
+
Or they were created by user.
|
|
609
|
+
"""
|
|
610
|
+
return self._requires_grad is False or self._grad_fn is None
|
|
611
|
+
|
|
612
|
+
@property
|
|
613
|
+
def requires_grad(self):
|
|
614
|
+
r"""
|
|
615
|
+
Whether the stub tensor need requires grad.
|
|
616
|
+
"""
|
|
617
|
+
return self._requires_grad
|
|
618
|
+
|
|
619
|
+
@requires_grad.setter
|
|
620
|
+
def requires_grad(self, requires_grad):
|
|
621
|
+
r"""
|
|
622
|
+
Mark the stub tensor whether need requires gradient.
|
|
623
|
+
"""
|
|
624
|
+
self._requires_grad = requires_grad
|
|
625
|
+
|
|
626
|
+
def retain_grad(self):
|
|
627
|
+
r"""
|
|
628
|
+
Enable the stub tensor which is not non-leaf to have the grad during backward().
|
|
629
|
+
"""
|
|
630
|
+
if not self._requires_grad:
|
|
631
|
+
RuntimeError("can't retain_grad on Tensor that has requires_grad = False.")
|
|
632
|
+
self._retain_grad = self._grad_fn is not None
|
|
633
|
+
|
|
634
|
+
@property
|
|
635
|
+
def retains_grad(self):
|
|
636
|
+
r"""
|
|
637
|
+
Is True if the stub tensor is non-leaf and its grad is enabled to be populated during backward().
|
|
638
|
+
"""
|
|
639
|
+
return self._retain_grad
|
|
640
|
+
|
|
641
|
+
def backward(self, grad=None):
|
|
642
|
+
r"""
|
|
643
|
+
Calculate the gradient.
|
|
644
|
+
"""
|
|
645
|
+
if grad is None:
|
|
646
|
+
grad = Tensor(np.ones(self.shape), self.dtype)
|
|
647
|
+
if self._grad_fn is not None:
|
|
648
|
+
self._grad_fn.apply(grad)
|
|
649
|
+
elif self._requires_grad:
|
|
650
|
+
self._grad = grad
|
|
651
|
+
|
|
547
652
|
@property
|
|
548
653
|
def H(self):
|
|
549
654
|
"""
|
|
@@ -644,6 +749,8 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
644
749
|
[[1 3]
|
|
645
750
|
[2 4]]
|
|
646
751
|
"""
|
|
752
|
+
if self.ndim <= 1:
|
|
753
|
+
return self
|
|
647
754
|
return self.transpose()
|
|
648
755
|
|
|
649
756
|
@staticmethod
|
|
@@ -710,28 +817,24 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
710
817
|
r"""
|
|
711
818
|
For details, please refer to :func:`mindspore.ops.arccosh`.
|
|
712
819
|
"""
|
|
713
|
-
self._init_check()
|
|
714
820
|
return tensor_operator_registry.get('acosh')(self)
|
|
715
821
|
|
|
716
822
|
def arcsin(self):
|
|
717
823
|
r"""
|
|
718
824
|
For details, please refer to :func:`mindspore.ops.arcsin`.
|
|
719
825
|
"""
|
|
720
|
-
self._init_check()
|
|
721
826
|
return tensor_operator_registry.get('asin')(self)
|
|
722
827
|
|
|
723
828
|
def arctan(self):
|
|
724
829
|
r"""
|
|
725
830
|
For details, please refer to :func:`mindspore.ops.arctan`.
|
|
726
831
|
"""
|
|
727
|
-
self._init_check()
|
|
728
832
|
return tensor_operator_registry.get('atan')(self)
|
|
729
833
|
|
|
730
834
|
def arctan2(self, other):
|
|
731
835
|
r"""
|
|
732
836
|
For details, please refer to :func:`mindspore.ops.arctan2`.
|
|
733
837
|
"""
|
|
734
|
-
self._init_check()
|
|
735
838
|
return tensor_operator_registry.get('atan2')(self, other)
|
|
736
839
|
|
|
737
840
|
def cauchy(self, median=0.0, sigma=1.0):
|
|
@@ -766,7 +869,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
766
869
|
[[8.79836142e-01, 9.37541723e-01]])
|
|
767
870
|
|
|
768
871
|
"""
|
|
769
|
-
self._init_check()
|
|
770
872
|
out = tensor_operator_registry.get('cauchy')(list(self.shape), median, sigma)()
|
|
771
873
|
return out.astype(self.dtype)
|
|
772
874
|
|
|
@@ -804,7 +906,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
804
906
|
[[1.2788825 2.3305743]
|
|
805
907
|
[14.944194 0.16303174]]
|
|
806
908
|
"""
|
|
807
|
-
self._init_check()
|
|
808
909
|
return tensor_operator_registry.get('log_normal')(mean, std)(self)
|
|
809
910
|
|
|
810
911
|
@jit_forbidden_register
|
|
@@ -837,29 +938,23 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
837
938
|
r"""
|
|
838
939
|
For details, please refer to :func:`mindspore.ops.bincount`.
|
|
839
940
|
"""
|
|
840
|
-
self._init_check()
|
|
841
941
|
return tensor_operator_registry.get('bincount')(self, weights, minlength)
|
|
842
942
|
|
|
843
943
|
def chunk(self, chunks, axis=0):
|
|
844
944
|
r"""
|
|
845
945
|
For details, please refer to :func:`mindspore.ops.chunk`.
|
|
846
946
|
"""
|
|
847
|
-
self._init_check()
|
|
848
947
|
return tensor_operator_registry.get('chunk')(self, chunks, axis)
|
|
849
948
|
|
|
850
949
|
def item(self, index=None):
|
|
851
950
|
"""
|
|
852
951
|
Get the item at the specified index of the tensor.
|
|
853
952
|
|
|
854
|
-
Note:
|
|
855
|
-
Tensor.item returns a Tensor scalar instead of a Python scalar. And if the tensor is a Tensor scalar,
|
|
856
|
-
Tensor.item will return the numpy.ndarray.
|
|
857
|
-
|
|
858
953
|
Args:
|
|
859
954
|
index (Union[None, int, tuple(int)]): The index in Tensor. Default: ``None``.
|
|
860
955
|
|
|
861
956
|
Returns:
|
|
862
|
-
A
|
|
957
|
+
A scalar, type is defined by the dtype of the Tensor.
|
|
863
958
|
|
|
864
959
|
Raises:
|
|
865
960
|
ValueError: If the length of the `index` is not equal to self.ndim.
|
|
@@ -877,7 +972,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
877
972
|
>>> print(x.item())
|
|
878
973
|
1.2
|
|
879
974
|
"""
|
|
880
|
-
|
|
975
|
+
|
|
976
|
+
if index is not None:
|
|
977
|
+
output = self.asnumpy().item(index)
|
|
978
|
+
else:
|
|
979
|
+
output = self.asnumpy().item()
|
|
881
980
|
return output
|
|
882
981
|
|
|
883
982
|
def itemset(self, *args):
|
|
@@ -936,7 +1035,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
936
1035
|
>>> print(x.get_bytes())
|
|
937
1036
|
b'\x01\x00\x02\x00\x03\x00'
|
|
938
1037
|
"""
|
|
939
|
-
self._init_check()
|
|
940
1038
|
return Tensor_.get_bytes(self)
|
|
941
1039
|
|
|
942
1040
|
def asnumpy(self):
|
|
@@ -958,7 +1056,8 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
958
1056
|
>>> print(y)
|
|
959
1057
|
[11. 2.]
|
|
960
1058
|
"""
|
|
961
|
-
self.
|
|
1059
|
+
if self.has_init:
|
|
1060
|
+
self.init_data()
|
|
962
1061
|
return Tensor_.asnumpy(self)
|
|
963
1062
|
|
|
964
1063
|
def numpy(self):
|
|
@@ -1002,21 +1101,18 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1002
1101
|
"""
|
|
1003
1102
|
For details, please refer to :func:`mindspore.ops.slice_scatter`.
|
|
1004
1103
|
"""
|
|
1005
|
-
self._init_check()
|
|
1006
1104
|
return tensor_operator_registry.get('slice_scatter')(self, src, axis, start, end, step)
|
|
1007
1105
|
|
|
1008
1106
|
def select_scatter(self, src, axis, index):
|
|
1009
1107
|
"""
|
|
1010
1108
|
For details, please refer to :func:`mindspore.ops.select_scatter`.
|
|
1011
1109
|
"""
|
|
1012
|
-
self._init_check()
|
|
1013
1110
|
return tensor_operator_registry.get('select_scatter')(self, src, axis, index)
|
|
1014
1111
|
|
|
1015
1112
|
def histc(self, bins=100, min=0., max=0.):
|
|
1016
1113
|
"""
|
|
1017
1114
|
For details, please refer to :func:`mindspore.ops.histc`.
|
|
1018
1115
|
"""
|
|
1019
|
-
self._init_check()
|
|
1020
1116
|
validator.check_value_type('min', min, (int, float,), 'Tensor.histc')
|
|
1021
1117
|
validator.check_value_type('max', max, (int, float,), 'Tensor.histc')
|
|
1022
1118
|
return tensor_operator_registry.get('histc')(self, bins, float(min), float(max))
|
|
@@ -1025,7 +1121,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1025
1121
|
"""
|
|
1026
1122
|
For details, please refer to :func:`mindspore.ops.geqrf`.
|
|
1027
1123
|
"""
|
|
1028
|
-
self._init_check()
|
|
1029
1124
|
return tensor_operator_registry.get('geqrf')(self)
|
|
1030
1125
|
|
|
1031
1126
|
def slice_shape_of_persistent_data(self):
|
|
@@ -1067,14 +1162,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1067
1162
|
>>> from mindspore import Tensor, ops
|
|
1068
1163
|
>>> x = Tensor([[1, 2, 3], [4, 5, 6]], dtype=ms.float32)
|
|
1069
1164
|
>>> y = ops.transpose(x, (1, 0))
|
|
1070
|
-
>>> y.contiguous()
|
|
1071
|
-
>>>
|
|
1072
|
-
|
|
1073
|
-
[[1. 2. 3.]
|
|
1074
|
-
[4. 5. 6.]]
|
|
1165
|
+
>>> z = y.contiguous()
|
|
1166
|
+
>>> print(z.is_contiguous())
|
|
1167
|
+
True
|
|
1075
1168
|
"""
|
|
1076
|
-
|
|
1077
|
-
return self
|
|
1169
|
+
return tensor_operator_registry.get('contiguous')(self)
|
|
1078
1170
|
|
|
1079
1171
|
def is_contiguous(self):
|
|
1080
1172
|
"""
|
|
@@ -1094,6 +1186,95 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1094
1186
|
"""
|
|
1095
1187
|
return Tensor_.is_contiguous(self)
|
|
1096
1188
|
|
|
1189
|
+
def stride(self, dim=None):
|
|
1190
|
+
"""
|
|
1191
|
+
The stride to jump from one element to the next in the input dim.
|
|
1192
|
+
When no parameters are passed in, a list of stride for all dimensions is returned.
|
|
1193
|
+
|
|
1194
|
+
Args:
|
|
1195
|
+
dim (int): The dim of stride from one element to the next.
|
|
1196
|
+
|
|
1197
|
+
Returns:
|
|
1198
|
+
Int, the stride of tensor.
|
|
1199
|
+
|
|
1200
|
+
Raises:
|
|
1201
|
+
TypeError: `dim` is not an int.
|
|
1202
|
+
|
|
1203
|
+
Examples:
|
|
1204
|
+
>>> import mindspore as ms
|
|
1205
|
+
>>> x = ms.Tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], dtype=ms.float32)
|
|
1206
|
+
>>> x.stride()
|
|
1207
|
+
[5, 1]
|
|
1208
|
+
"""
|
|
1209
|
+
stride = Tensor_.stride(self)
|
|
1210
|
+
if dim is None:
|
|
1211
|
+
return stride
|
|
1212
|
+
return stride[dim]
|
|
1213
|
+
|
|
1214
|
+
def storage_offset(self):
|
|
1215
|
+
"""
|
|
1216
|
+
Tensor's offset in the underlying storage in terms of the number of storage elements.
|
|
1217
|
+
|
|
1218
|
+
Returns:
|
|
1219
|
+
int, tensor's offset in the underlying storage in terms of number of storage elements.
|
|
1220
|
+
|
|
1221
|
+
Examples:
|
|
1222
|
+
>>> import mindspore as ms
|
|
1223
|
+
>>> x = ms.Tensor([1, 2, 3, 4, 5], dtype=ms.float32)
|
|
1224
|
+
>>> ret = x.storage_offset()
|
|
1225
|
+
>>> print(ret)
|
|
1226
|
+
0
|
|
1227
|
+
"""
|
|
1228
|
+
return Tensor_.storage_offset(self)
|
|
1229
|
+
|
|
1230
|
+
def register_hook(self, hook_fn):
|
|
1231
|
+
"""
|
|
1232
|
+
Registers a backward hook for tensor.
|
|
1233
|
+
|
|
1234
|
+
Note:
|
|
1235
|
+
- The `register_backward_hook(hook_fn)` does not work in graph mode or functions decorated with 'jit'.
|
|
1236
|
+
- The 'hook_fn' must be defined as the following code. `grad` is the gradient passed to the tensor,
|
|
1237
|
+
which may be modified by returning a new output gradient.
|
|
1238
|
+
- The 'hook_fn' should have the following signature:
|
|
1239
|
+
hook_fn(grad) -> New output gradient, but can not return None or not set return value.
|
|
1240
|
+
|
|
1241
|
+
Args:
|
|
1242
|
+
hook_fn (function): Python function. Tensor backward hook function.
|
|
1243
|
+
|
|
1244
|
+
Returns:
|
|
1245
|
+
A handle corresponding to the `hook_fn` . The handle can be used to remove the added `hook_fn` by calling
|
|
1246
|
+
`handle.remove()` .
|
|
1247
|
+
|
|
1248
|
+
Raises:
|
|
1249
|
+
TypeError: If the `hook_fn` is not a function of python.
|
|
1250
|
+
|
|
1251
|
+
Supported Platforms:
|
|
1252
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1253
|
+
|
|
1254
|
+
Examples:
|
|
1255
|
+
>>> import mindspore as ms
|
|
1256
|
+
>>> from mindspore import Tensor
|
|
1257
|
+
>>> ms.set_context(mode=ms.PYNATIVE_MODE)
|
|
1258
|
+
>>> def hook_fn(grad):
|
|
1259
|
+
... return grad * 2
|
|
1260
|
+
...
|
|
1261
|
+
>>> def hook_test(x, y):
|
|
1262
|
+
... z = x * y
|
|
1263
|
+
... z.register_hook(hook_fn)
|
|
1264
|
+
... z = z * y
|
|
1265
|
+
... return z
|
|
1266
|
+
...
|
|
1267
|
+
>>> ms_grad = ms.grad(hook_test, grad_position=(0,1))
|
|
1268
|
+
>>> output = ms_grad(Tensor(1, ms.float32), Tensor(2, ms.float32))
|
|
1269
|
+
>>> print(output)
|
|
1270
|
+
(Tensor(shape=[], dtype=Float32, value=8), Tensor(shape=[], dtype=Float32, value=6))
|
|
1271
|
+
"""
|
|
1272
|
+
if not check_hook_fn("register_hook", hook_fn):
|
|
1273
|
+
return _TensorHookHandle()
|
|
1274
|
+
handle = _TensorHookHandle()
|
|
1275
|
+
handle.id = Tensor_.register_hook(self, hook_fn)
|
|
1276
|
+
return handle
|
|
1277
|
+
|
|
1097
1278
|
def flush_from_cache(self):
|
|
1098
1279
|
"""
|
|
1099
1280
|
Flush cache data to host if tensor is cache enable.
|
|
@@ -1106,35 +1287,30 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1106
1287
|
>>> print(y)
|
|
1107
1288
|
None
|
|
1108
1289
|
"""
|
|
1109
|
-
self._init_check()
|
|
1110
1290
|
Tensor_._flush_from_cache(self)
|
|
1111
1291
|
|
|
1112
1292
|
def addcdiv(self, tensor1, tensor2, value=1):
|
|
1113
1293
|
r"""
|
|
1114
1294
|
For details, please refer to :func:`mindspore.ops.addcdiv`.
|
|
1115
1295
|
"""
|
|
1116
|
-
|
|
1117
|
-
return tensor_operator_registry.get('addcdiv')()(self, tensor1, tensor2, value)
|
|
1296
|
+
return tensor_operator_registry.get('addcdiv')(self, tensor1, tensor2, value)
|
|
1118
1297
|
|
|
1119
1298
|
def addcmul(self, tensor1, tensor2, value=1):
|
|
1120
1299
|
r"""
|
|
1121
1300
|
For details, please refer to :func:`mindspore.ops.addcmul`.
|
|
1122
1301
|
"""
|
|
1123
|
-
|
|
1124
|
-
return tensor_operator_registry.get('addcmul')()(self, tensor1, tensor2, value)
|
|
1302
|
+
return tensor_operator_registry.get('addcmul')(self, tensor1, tensor2, value)
|
|
1125
1303
|
|
|
1126
1304
|
def add(self, other):
|
|
1127
1305
|
r"""
|
|
1128
1306
|
For details, please refer to :func:`mindspore.ops.add`.
|
|
1129
1307
|
"""
|
|
1130
|
-
|
|
1131
|
-
return tensor_operator_registry.get('add')()(self, other)
|
|
1308
|
+
return tensor_operator_registry.get('add')(self, other)
|
|
1132
1309
|
|
|
1133
1310
|
def subtract(self, other, *, alpha=1):
|
|
1134
1311
|
r"""
|
|
1135
1312
|
For details, please refer to :func:`mindspore.ops.subtract`.
|
|
1136
1313
|
"""
|
|
1137
|
-
self._init_check()
|
|
1138
1314
|
return tensor_operator_registry.get('sub')(self, alpha * other)
|
|
1139
1315
|
|
|
1140
1316
|
def true_divide(self, value):
|
|
@@ -1142,7 +1318,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1142
1318
|
Alias for Tensor.div() with :math:`rounding\_mode=None`.
|
|
1143
1319
|
For details, please refer to :func:`mindspore.ops.div`.
|
|
1144
1320
|
"""
|
|
1145
|
-
self._init_check()
|
|
1146
1321
|
return tensor_operator_registry.get('div')(self, value, rounding_mode=None)
|
|
1147
1322
|
|
|
1148
1323
|
def triu(self, diagonal=0):
|
|
@@ -1153,7 +1328,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1153
1328
|
This is an experimental API that is subject to change or deletion.
|
|
1154
1329
|
|
|
1155
1330
|
"""
|
|
1156
|
-
self._init_check()
|
|
1157
1331
|
validator.check_value_type('diagonal', diagonal, [int], 'triu')
|
|
1158
1332
|
return tensor_operator_registry.get('triu')(self, diagonal)
|
|
1159
1333
|
|
|
@@ -1161,65 +1335,56 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1161
1335
|
r"""
|
|
1162
1336
|
For details, please refer to :func:`mindspore.ops.addbmm`.
|
|
1163
1337
|
"""
|
|
1164
|
-
self._init_check()
|
|
1165
1338
|
return tensor_operator_registry.get('addbmm')(self, batch1, batch2, beta=beta, alpha=alpha)
|
|
1166
1339
|
|
|
1167
1340
|
def addmm(self, mat1, mat2, *, beta=1, alpha=1):
|
|
1168
1341
|
r"""
|
|
1169
1342
|
For details, please refer to :func:`mindspore.ops.addmm`.
|
|
1170
1343
|
"""
|
|
1171
|
-
self._init_check()
|
|
1172
1344
|
return tensor_operator_registry.get('addmm')(self, mat1, mat2, beta=beta, alpha=alpha)
|
|
1173
1345
|
|
|
1174
1346
|
def addr(self, vec1, vec2, beta=1, alpha=1):
|
|
1175
1347
|
r"""
|
|
1176
1348
|
For details, please refer to :func:`mindspore.ops.addr`.
|
|
1177
1349
|
"""
|
|
1178
|
-
self._init_check()
|
|
1179
1350
|
return tensor_operator_registry.get('addr')(self, vec1, vec2, beta=beta, alpha=alpha)
|
|
1180
1351
|
|
|
1181
1352
|
def adjoint(self):
|
|
1182
1353
|
r"""
|
|
1183
1354
|
For details, please refer to :func:`mindspore.ops.adjoint`.
|
|
1184
1355
|
"""
|
|
1185
|
-
self._init_check()
|
|
1186
1356
|
return tensor_operator_registry.get('adjoint')(self)
|
|
1187
1357
|
|
|
1188
1358
|
def all(self, axis=None, keep_dims=False):
|
|
1189
1359
|
r"""
|
|
1190
1360
|
For details, please refer to :func:`mindspore.ops.all`.
|
|
1191
1361
|
"""
|
|
1192
|
-
self._init_check()
|
|
1193
1362
|
return tensor_operator_registry.get('all')(self, axis, keep_dims)
|
|
1194
1363
|
|
|
1195
1364
|
def angle(self):
|
|
1196
1365
|
r"""
|
|
1197
1366
|
For details, please refer to :func:`mindspore.ops.angle`.
|
|
1198
1367
|
"""
|
|
1199
|
-
self._init_check()
|
|
1200
1368
|
return tensor_operator_registry.get('angle')(self)
|
|
1201
1369
|
|
|
1202
1370
|
def any(self, axis=None, keep_dims=False):
|
|
1203
1371
|
r"""
|
|
1204
1372
|
For details, please refer to :func:`mindspore.ops.any`.
|
|
1205
1373
|
"""
|
|
1206
|
-
self._init_check()
|
|
1207
1374
|
if axis is None:
|
|
1208
1375
|
axis = ()
|
|
1209
|
-
return tensor_operator_registry.get('any')(
|
|
1376
|
+
return tensor_operator_registry.get('any')(self, axis, keep_dims)
|
|
1210
1377
|
|
|
1211
1378
|
def atan2(self, other):
|
|
1212
1379
|
r"""
|
|
1213
1380
|
For details, please refer to :func:`mindspore.ops.atan2`.
|
|
1214
1381
|
"""
|
|
1215
|
-
self._init_check()
|
|
1216
1382
|
return tensor_operator_registry.get('atan2')(self, other)
|
|
1217
1383
|
|
|
1218
1384
|
def baddbmm(self, batch1, batch2, beta=1, alpha=1):
|
|
1219
1385
|
r"""
|
|
1220
1386
|
For details, please refer to :func:`mindspore.ops.baddbmm`.
|
|
1221
1387
|
"""
|
|
1222
|
-
self._init_check()
|
|
1223
1388
|
return tensor_operator_registry.get('baddbmm')(self, batch1, batch2, beta=beta, alpha=alpha)
|
|
1224
1389
|
|
|
1225
1390
|
def view(self, *shape):
|
|
@@ -1243,7 +1408,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1243
1408
|
[3. 2.]
|
|
1244
1409
|
[3. 4.]]
|
|
1245
1410
|
"""
|
|
1246
|
-
self._init_check()
|
|
1247
1411
|
if not shape:
|
|
1248
1412
|
raise ValueError("The shape variable should not be empty")
|
|
1249
1413
|
if isinstance(shape[0], tuple):
|
|
@@ -1277,7 +1441,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1277
1441
|
>>> print(output)
|
|
1278
1442
|
[1. 2. 3. 2. 3. 4.]
|
|
1279
1443
|
"""
|
|
1280
|
-
self._init_check()
|
|
1281
1444
|
if not isinstance(other, (Tensor, Tensor_)):
|
|
1282
1445
|
raise TypeError(f"For view_as, the input other must be a Tensor, but got {type(other)}")
|
|
1283
1446
|
return self.view(other.shape)
|
|
@@ -1286,42 +1449,36 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1286
1449
|
r"""
|
|
1287
1450
|
For details, please refer to :func:`mindspore.ops.t`.
|
|
1288
1451
|
"""
|
|
1289
|
-
self._init_check()
|
|
1290
1452
|
return tensor_operator_registry.get("t")(self)
|
|
1291
1453
|
|
|
1292
1454
|
def bitwise_and(self, other):
|
|
1293
1455
|
"""
|
|
1294
1456
|
For details, please refer to :func:`mindspore.ops.bitwise_and`.
|
|
1295
1457
|
"""
|
|
1296
|
-
self._init_check()
|
|
1297
1458
|
return tensor_operator_registry.get('bitwise_and')(self, other)
|
|
1298
1459
|
|
|
1299
1460
|
def bitwise_or(self, other):
|
|
1300
1461
|
"""
|
|
1301
1462
|
For details, please refer to :func:`mindspore.ops.bitwise_or`.
|
|
1302
1463
|
"""
|
|
1303
|
-
self._init_check()
|
|
1304
1464
|
return tensor_operator_registry.get('bitwise_or')(self, other)
|
|
1305
1465
|
|
|
1306
1466
|
def bitwise_xor(self, other):
|
|
1307
1467
|
"""
|
|
1308
1468
|
For details, please refer to :func:`mindspore.ops.bitwise_xor`.
|
|
1309
1469
|
"""
|
|
1310
|
-
self._init_check()
|
|
1311
1470
|
return tensor_operator_registry.get('bitwise_xor')(self, other)
|
|
1312
1471
|
|
|
1313
1472
|
def bitwise_left_shift(self, other):
|
|
1314
1473
|
"""
|
|
1315
1474
|
For details, please refer to :func:`mindspore.ops.bitwise_left_shift`.
|
|
1316
1475
|
"""
|
|
1317
|
-
self._init_check()
|
|
1318
1476
|
return tensor_operator_registry.get('bitwise_left_shift')(self, other)
|
|
1319
1477
|
|
|
1320
1478
|
def bitwise_right_shift(self, other):
|
|
1321
1479
|
"""
|
|
1322
1480
|
For details, please refer to :func:`mindspore.ops.bitwise_right_shift`.
|
|
1323
1481
|
"""
|
|
1324
|
-
self._init_check()
|
|
1325
1482
|
_cast = tensor_operator_registry.get('cast')
|
|
1326
1483
|
other = _cast(other, self.dtype)
|
|
1327
1484
|
return tensor_operator_registry.get('bitwise_right_shift')(self, other)
|
|
@@ -1330,50 +1487,43 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1330
1487
|
"""
|
|
1331
1488
|
For details, please refer to :func:`mindspore.ops.scatter`.
|
|
1332
1489
|
"""
|
|
1333
|
-
self._init_check()
|
|
1334
1490
|
return tensor_operator_registry.get('scatter')(self, axis, index, src)
|
|
1335
1491
|
|
|
1336
1492
|
def scatter_mul(self, indices, updates):
|
|
1337
1493
|
"""
|
|
1338
1494
|
For details, please refer to :func:`mindspore.ops.scatter_mul`.
|
|
1339
1495
|
"""
|
|
1340
|
-
self._init_check()
|
|
1341
1496
|
return tensor_operator_registry.get('tensor_scatter_mul')(self, indices, updates)
|
|
1342
1497
|
|
|
1343
1498
|
def scatter_div(self, indices, updates):
|
|
1344
1499
|
"""
|
|
1345
1500
|
For details, please refer to :func:`mindspore.ops.scatter_div`.
|
|
1346
1501
|
"""
|
|
1347
|
-
self._init_check()
|
|
1348
1502
|
return tensor_operator_registry.get('tensor_scatter_div')(self, indices, updates)
|
|
1349
1503
|
|
|
1350
1504
|
def ger(self, vec2):
|
|
1351
1505
|
"""
|
|
1352
1506
|
For details, please refer to :func:`mindspore.ops.ger`.
|
|
1353
1507
|
"""
|
|
1354
|
-
self._init_check()
|
|
1355
1508
|
return tensor_operator_registry.get('ger')(self, vec2)
|
|
1356
1509
|
|
|
1357
1510
|
def gt(self, x):
|
|
1358
1511
|
"""
|
|
1359
1512
|
For details, please refer to :func:`mindspore.ops.gt`.
|
|
1360
1513
|
"""
|
|
1361
|
-
|
|
1362
|
-
return tensor_operator_registry.get('gt')()(self, x)
|
|
1514
|
+
return tensor_operator_registry.get('gt')(self, x)
|
|
1363
1515
|
|
|
1364
1516
|
def ge(self, x):
|
|
1365
1517
|
"""
|
|
1366
1518
|
For details, please refer to :func:`mindspore.ops.ge`.
|
|
1367
1519
|
"""
|
|
1368
|
-
|
|
1369
|
-
return tensor_operator_registry.get('ge')()(self, x)
|
|
1520
|
+
return tensor_operator_registry.get('ge')(self, x)
|
|
1370
1521
|
|
|
1371
1522
|
def broadcast_to(self, shape):
|
|
1372
1523
|
"""
|
|
1373
1524
|
For details, please refer to :func:`mindspore.ops.broadcast_to`.
|
|
1374
1525
|
"""
|
|
1375
|
-
|
|
1376
|
-
return tensor_operator_registry.get('broadcast_to')(shape)(self)
|
|
1526
|
+
return tensor_operator_registry.get('broadcast_to')(self, shape)
|
|
1377
1527
|
|
|
1378
1528
|
def expand_as(self, x):
|
|
1379
1529
|
"""
|
|
@@ -1397,84 +1547,72 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1397
1547
|
[[1. 2. 3.]
|
|
1398
1548
|
[1. 2. 3.]]
|
|
1399
1549
|
"""
|
|
1400
|
-
|
|
1401
|
-
return tensor_operator_registry.get('broadcast_to')(x.shape)(self)
|
|
1550
|
+
return tensor_operator_registry.get('broadcast_to')(self, x.shape)
|
|
1402
1551
|
|
|
1403
1552
|
def exp(self):
|
|
1404
1553
|
"""
|
|
1405
1554
|
For details, please refer to :func:`mindspore.ops.exp`.
|
|
1406
1555
|
"""
|
|
1407
|
-
self._init_check()
|
|
1408
1556
|
return tensor_operator_registry.get('exp')(self)
|
|
1409
1557
|
|
|
1410
1558
|
def real(self):
|
|
1411
1559
|
r"""
|
|
1412
1560
|
For details, please refer to :func:`mindspore.ops.real`.
|
|
1413
1561
|
"""
|
|
1414
|
-
self._init_check()
|
|
1415
1562
|
return tensor_operator_registry.get('real')(self)
|
|
1416
1563
|
|
|
1417
1564
|
def rsqrt(self):
|
|
1418
1565
|
r"""
|
|
1419
1566
|
For details, please refer to :func:`mindspore.ops.rsqrt`.
|
|
1420
1567
|
"""
|
|
1421
|
-
self._init_check()
|
|
1422
1568
|
return tensor_operator_registry.get('rsqrt')(self)
|
|
1423
1569
|
|
|
1424
1570
|
def reciprocal(self):
|
|
1425
1571
|
r"""
|
|
1426
1572
|
For details, please refer to :func:`mindspore.ops.reciprocal`.
|
|
1427
1573
|
"""
|
|
1428
|
-
self._init_check()
|
|
1429
1574
|
return tensor_operator_registry.get('reciprocal')(self)
|
|
1430
1575
|
|
|
1431
1576
|
def sqrt(self):
|
|
1432
1577
|
"""
|
|
1433
1578
|
For details, please refer to :func:`mindspore.ops.sqrt`.
|
|
1434
1579
|
"""
|
|
1435
|
-
self._init_check()
|
|
1436
1580
|
return tensor_operator_registry.get('sqrt')(self)
|
|
1437
1581
|
|
|
1438
1582
|
def square(self):
|
|
1439
1583
|
"""
|
|
1440
1584
|
For details, please refer to :func:`mindspore.ops.square`.
|
|
1441
1585
|
"""
|
|
1442
|
-
self._init_check()
|
|
1443
1586
|
return tensor_operator_registry.get('square')(self)
|
|
1444
1587
|
|
|
1445
1588
|
def sub(self, y):
|
|
1446
1589
|
r"""
|
|
1447
1590
|
For details, please refer to :func:`mindspore.ops.sub`.
|
|
1448
1591
|
"""
|
|
1449
|
-
self._init_check()
|
|
1450
1592
|
return tensor_operator_registry.get('sub')(self, y)
|
|
1451
1593
|
|
|
1452
1594
|
def tan(self):
|
|
1453
1595
|
"""
|
|
1454
1596
|
For details, please refer to :func:`mindspore.ops.tan`.
|
|
1455
1597
|
"""
|
|
1456
|
-
|
|
1457
|
-
return tensor_operator_registry.get('tan')()(self)
|
|
1598
|
+
return tensor_operator_registry.get('tan')(self)
|
|
1458
1599
|
|
|
1459
1600
|
def tanh(self):
|
|
1460
1601
|
r"""
|
|
1461
1602
|
For details, please refer to :func:`mindspore.ops.tanh`.
|
|
1462
1603
|
"""
|
|
1463
|
-
self._init_check()
|
|
1464
1604
|
return tensor_operator_registry.get('tanh')(self)
|
|
1465
1605
|
|
|
1466
1606
|
def cosh(self):
|
|
1467
1607
|
r"""
|
|
1468
1608
|
For details, please refer to :func:`mindspore.ops.cosh`.
|
|
1469
1609
|
"""
|
|
1470
|
-
|
|
1471
|
-
return tensor_operator_registry.get('cosh')()(self)
|
|
1610
|
+
return tensor_operator_registry.get('cosh')(self)
|
|
1472
1611
|
|
|
1473
1612
|
def acos(self):
|
|
1474
1613
|
r"""
|
|
1475
1614
|
For details, please refer to :func:`mindspore.ops.acos`.
|
|
1476
1615
|
"""
|
|
1477
|
-
self._init_check()
|
|
1478
1616
|
return tensor_operator_registry.get('acos')(self)
|
|
1479
1617
|
|
|
1480
1618
|
def arccos(self):
|
|
@@ -1487,35 +1625,30 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1487
1625
|
r"""
|
|
1488
1626
|
For details, please refer to :func:`mindspore.ops.cos`.
|
|
1489
1627
|
"""
|
|
1490
|
-
self._init_check()
|
|
1491
1628
|
return tensor_operator_registry.get('cos')(self)
|
|
1492
1629
|
|
|
1493
1630
|
def cov(self, *, correction=1, fweights=None, aweights=None):
|
|
1494
1631
|
r"""
|
|
1495
1632
|
For details, please refer to :func:`mindspore.ops.cov`.
|
|
1496
1633
|
"""
|
|
1497
|
-
self._init_check()
|
|
1498
1634
|
return tensor_operator_registry.get('cov')(self, correction=correction, fweights=fweights, aweights=aweights)
|
|
1499
1635
|
|
|
1500
1636
|
def acosh(self):
|
|
1501
1637
|
"""
|
|
1502
1638
|
For details, please refer to :func:`mindspore.ops.acosh`.
|
|
1503
1639
|
"""
|
|
1504
|
-
self._init_check()
|
|
1505
1640
|
return tensor_operator_registry.get('acosh')(self)
|
|
1506
1641
|
|
|
1507
1642
|
def asin(self):
|
|
1508
1643
|
r"""
|
|
1509
1644
|
For details, please refer to :func:`mindspore.ops.asin`.
|
|
1510
1645
|
"""
|
|
1511
|
-
self._init_check()
|
|
1512
1646
|
return tensor_operator_registry.get('asin')(self)
|
|
1513
1647
|
|
|
1514
1648
|
def abs(self):
|
|
1515
1649
|
"""
|
|
1516
1650
|
For details, please refer to :func:`mindspore.ops.abs`.
|
|
1517
1651
|
"""
|
|
1518
|
-
self._init_check()
|
|
1519
1652
|
return tensor_operator_registry.get('abs')(self)
|
|
1520
1653
|
|
|
1521
1654
|
def absolute(self):
|
|
@@ -1528,14 +1661,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1528
1661
|
"""
|
|
1529
1662
|
For details, please refer to :func:`mindspore.ops.ceil`.
|
|
1530
1663
|
"""
|
|
1531
|
-
|
|
1532
|
-
return tensor_operator_registry.get('ceil')()(self)
|
|
1664
|
+
return tensor_operator_registry.get('ceil')(self)
|
|
1533
1665
|
|
|
1534
1666
|
def floor(self):
|
|
1535
1667
|
"""
|
|
1536
1668
|
For details, please refer to :func:`mindspore.ops.floor`.
|
|
1537
1669
|
"""
|
|
1538
|
-
self._init_check()
|
|
1539
1670
|
return tensor_operator_registry.get('floor')(self)
|
|
1540
1671
|
|
|
1541
1672
|
def floor_divide(self, other):
|
|
@@ -1545,21 +1676,18 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1545
1676
|
.. warning::
|
|
1546
1677
|
This is an experimental API that is subject to change or deletion.
|
|
1547
1678
|
"""
|
|
1548
|
-
self._init_check()
|
|
1549
1679
|
return tensor_operator_registry.get('floor_divide')(self, other)
|
|
1550
1680
|
|
|
1551
1681
|
def lerp(self, end, weight):
|
|
1552
1682
|
"""
|
|
1553
1683
|
For details, please refer to :func:`mindspore.ops.lerp`.
|
|
1554
1684
|
"""
|
|
1555
|
-
self._init_check()
|
|
1556
1685
|
return tensor_operator_registry.get('lerp')(self, end, weight)
|
|
1557
1686
|
|
|
1558
1687
|
def negative(self):
|
|
1559
1688
|
r"""
|
|
1560
1689
|
For details, please refer to :func:`mindspore.ops.negative`.
|
|
1561
1690
|
"""
|
|
1562
|
-
self._init_check()
|
|
1563
1691
|
return tensor_operator_registry.get("negative")(self)
|
|
1564
1692
|
|
|
1565
1693
|
# pylint: disable=redefined-builtin
|
|
@@ -1567,14 +1695,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1567
1695
|
"""
|
|
1568
1696
|
For details, please refer to :func:`mindspore.ops.norm`.
|
|
1569
1697
|
"""
|
|
1570
|
-
self._init_check()
|
|
1571
1698
|
return tensor_operator_registry.get('norm')(self, ord, dim, keepdim, dtype=dtype)
|
|
1572
1699
|
|
|
1573
1700
|
def renorm(self, p, axis, maxnorm):
|
|
1574
1701
|
"""
|
|
1575
1702
|
For details, please refer to :func:`mindspore.ops.renorm`.
|
|
1576
1703
|
"""
|
|
1577
|
-
self._init_check()
|
|
1578
1704
|
return tensor_operator_registry.get("renorm")(self, p, axis, maxnorm)
|
|
1579
1705
|
|
|
1580
1706
|
def approximate_equal(self, other, tolerance=1e-5):
|
|
@@ -1584,7 +1710,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1584
1710
|
validator.check_isinstance("x", self, Tensor)
|
|
1585
1711
|
validator.check_isinstance("y", other, Tensor)
|
|
1586
1712
|
validator.check_isinstance("tolerance", tolerance, float)
|
|
1587
|
-
self._init_check()
|
|
1588
1713
|
input_x = self.copy() if self.dtype == mstype.float32 else self.astype(mstype.float16)
|
|
1589
1714
|
input_y = other.copy() if other.dtype == mstype.float32 else other.astype(mstype.float16)
|
|
1590
1715
|
return tensor_operator_registry.get('__lt__')(tensor_operator_registry.get('abs')(
|
|
@@ -1595,14 +1720,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1595
1720
|
r"""
|
|
1596
1721
|
For details, please refer to :func:`mindspore.ops.log1p`.
|
|
1597
1722
|
"""
|
|
1598
|
-
self._init_check()
|
|
1599
1723
|
return tensor_operator_registry.get('log1p')(self)
|
|
1600
1724
|
|
|
1601
1725
|
def logit(self, eps=None):
|
|
1602
1726
|
r"""
|
|
1603
1727
|
For details, please refer to :func:`mindspore.ops.logit`.
|
|
1604
1728
|
"""
|
|
1605
|
-
self._init_check()
|
|
1606
1729
|
if eps is None:
|
|
1607
1730
|
eps = -1.0
|
|
1608
1731
|
validator.check_value_type('eps', eps, (float,), 'Tensor.logit')
|
|
@@ -1612,14 +1735,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1612
1735
|
r"""
|
|
1613
1736
|
For details, please refer to :func:`mindspore.ops.logaddexp`.
|
|
1614
1737
|
"""
|
|
1615
|
-
self._init_check()
|
|
1616
1738
|
return tensor_operator_registry.get('logaddexp')(self, other)
|
|
1617
1739
|
|
|
1618
1740
|
def logaddexp2(self, other):
|
|
1619
1741
|
r"""
|
|
1620
1742
|
For details, please refer to :func:`mindspore.ops.logaddexp2`.
|
|
1621
1743
|
"""
|
|
1622
|
-
self._init_check()
|
|
1623
1744
|
return tensor_operator_registry.get('logaddexp2')(self, other)
|
|
1624
1745
|
|
|
1625
1746
|
def logcumsumexp(self, axis):
|
|
@@ -1629,149 +1750,128 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1629
1750
|
.. warning::
|
|
1630
1751
|
This is an experimental API that is subject to change or deletion.
|
|
1631
1752
|
"""
|
|
1632
|
-
self._init_check()
|
|
1633
1753
|
return tensor_operator_registry.get('logcumsumexp')(self, axis)
|
|
1634
1754
|
|
|
1635
1755
|
def logsumexp(self, axis, keepdims=False):
|
|
1636
1756
|
r"""
|
|
1637
1757
|
For details, please refer to :func:`mindspore.ops.logsumexp`.
|
|
1638
1758
|
"""
|
|
1639
|
-
self._init_check()
|
|
1640
1759
|
return tensor_operator_registry.get('logsumexp')(self, axis, keepdims)
|
|
1641
1760
|
|
|
1642
1761
|
def logdet(self):
|
|
1643
1762
|
r"""
|
|
1644
1763
|
For details, please refer to :func:`mindspore.ops.logdet`.
|
|
1645
1764
|
"""
|
|
1646
|
-
self._init_check()
|
|
1647
1765
|
return tensor_operator_registry.get('logdet')(self)
|
|
1648
1766
|
|
|
1649
1767
|
def i0(self):
|
|
1650
1768
|
r"""
|
|
1651
1769
|
For details, please refer to :func:`mindspore.ops.i0`.
|
|
1652
1770
|
"""
|
|
1653
|
-
self._init_check()
|
|
1654
1771
|
return tensor_operator_registry.get('i0')(self)
|
|
1655
1772
|
|
|
1656
1773
|
def isclose(self, x2, rtol=1e-05, atol=1e-08, equal_nan=False):
|
|
1657
1774
|
"""
|
|
1658
1775
|
For details, please refer to :func:`mindspore.ops.isclose`.
|
|
1659
1776
|
"""
|
|
1660
|
-
self._init_check()
|
|
1661
1777
|
return tensor_operator_registry.get('isclose')(self, x2, rtol, atol, equal_nan)
|
|
1662
1778
|
|
|
1663
1779
|
def isneginf(self):
|
|
1664
1780
|
r"""
|
|
1665
1781
|
For details, please refer to :func:`mindspore.ops.isneginf`.
|
|
1666
1782
|
"""
|
|
1667
|
-
self._init_check()
|
|
1668
1783
|
return tensor_operator_registry.get('isneginf')(self)
|
|
1669
1784
|
|
|
1670
1785
|
def isposinf(self):
|
|
1671
1786
|
r"""
|
|
1672
1787
|
For details, please refer to :func:`mindspore.ops.isposinf`.
|
|
1673
1788
|
"""
|
|
1674
|
-
self._init_check()
|
|
1675
1789
|
return tensor_operator_registry.get('isposinf')(self)
|
|
1676
1790
|
|
|
1677
1791
|
def isreal(self):
|
|
1678
1792
|
r"""
|
|
1679
1793
|
For details, please refer to :func:`mindspore.ops.isreal`.
|
|
1680
1794
|
"""
|
|
1681
|
-
self._init_check()
|
|
1682
1795
|
return tensor_operator_registry.get('isreal')(self)
|
|
1683
1796
|
|
|
1684
1797
|
def isfinite(self):
|
|
1685
1798
|
r"""
|
|
1686
1799
|
For details, please refer to :func:`mindspore.ops.isfinite`.
|
|
1687
1800
|
"""
|
|
1688
|
-
|
|
1689
|
-
return tensor_operator_registry.get('isfinite')()(self)
|
|
1801
|
+
return tensor_operator_registry.get('isfinite')(self)
|
|
1690
1802
|
|
|
1691
1803
|
def is_complex(self):
|
|
1692
1804
|
r"""
|
|
1693
1805
|
For details, please refer to :func:`mindspore.ops.is_complex`.
|
|
1694
1806
|
"""
|
|
1695
|
-
self._init_check()
|
|
1696
1807
|
return tensor_operator_registry.get('is_complex')(self)
|
|
1697
1808
|
|
|
1698
1809
|
def inv(self):
|
|
1699
1810
|
r"""
|
|
1700
1811
|
For details, please refer to :func:`mindspore.ops.inv`.
|
|
1701
1812
|
"""
|
|
1702
|
-
self._init_check()
|
|
1703
1813
|
return tensor_operator_registry.get('inv')(self)
|
|
1704
1814
|
|
|
1705
1815
|
def inverse(self):
|
|
1706
1816
|
r"""
|
|
1707
1817
|
For details, please refer to :func:`mindspore.ops.inverse`.
|
|
1708
1818
|
"""
|
|
1709
|
-
self._init_check()
|
|
1710
1819
|
return tensor_operator_registry.get('inverse')(self)
|
|
1711
1820
|
|
|
1712
1821
|
def invert(self):
|
|
1713
1822
|
r"""
|
|
1714
1823
|
For details, please refer to :func:`mindspore.ops.invert`.
|
|
1715
1824
|
"""
|
|
1716
|
-
self._init_check()
|
|
1717
1825
|
return tensor_operator_registry.get('invert')(self)
|
|
1718
1826
|
|
|
1719
1827
|
def pow(self, exponent):
|
|
1720
1828
|
r"""
|
|
1721
1829
|
For details, please refer to :func:`mindspore.ops.pow`.
|
|
1722
1830
|
"""
|
|
1723
|
-
|
|
1724
|
-
return tensor_operator_registry.get('pow')()(self, exponent)
|
|
1831
|
+
return tensor_operator_registry.get('pow')(self, exponent)
|
|
1725
1832
|
|
|
1726
1833
|
def log(self):
|
|
1727
1834
|
"""
|
|
1728
1835
|
For details, please refer to :func:`mindspore.ops.log`.
|
|
1729
1836
|
"""
|
|
1730
|
-
self._init_check()
|
|
1731
1837
|
return tensor_operator_registry.get('log')(self)
|
|
1732
1838
|
|
|
1733
1839
|
def log10(self):
|
|
1734
1840
|
r"""
|
|
1735
1841
|
For details, please refer to :func:`mindspore.ops.log10`.
|
|
1736
1842
|
"""
|
|
1737
|
-
self._init_check()
|
|
1738
1843
|
return tensor_operator_registry.get('log10')(self)
|
|
1739
1844
|
|
|
1740
1845
|
def log2(self):
|
|
1741
1846
|
r"""
|
|
1742
1847
|
For details, please refer to :func:`mindspore.ops.log2`.
|
|
1743
1848
|
"""
|
|
1744
|
-
self._init_check()
|
|
1745
1849
|
return tensor_operator_registry.get('log2')(self)
|
|
1746
1850
|
|
|
1747
1851
|
def mean(self, axis=None, keep_dims=False):
|
|
1748
1852
|
"""
|
|
1749
1853
|
For details, please refer to :func:`mindspore.ops.mean`.
|
|
1750
1854
|
"""
|
|
1751
|
-
self._init_check()
|
|
1752
1855
|
return tensor_operator_registry.get('mean')(self, axis, keep_dims)
|
|
1753
1856
|
|
|
1754
1857
|
def amin(self, axis=None, keepdims=False, *, initial=None, where=None):
|
|
1755
1858
|
"""
|
|
1756
1859
|
For details, please refer to :func:`mindspore.ops.amin`.
|
|
1757
1860
|
"""
|
|
1758
|
-
self._init_check()
|
|
1759
1861
|
if axis is None:
|
|
1760
1862
|
axis = ()
|
|
1761
1863
|
return tensor_operator_registry.get('amin')(self, axis, keepdims, initial=initial, where=where)
|
|
1762
1864
|
|
|
1763
1865
|
def reverse(self, axis):
|
|
1764
1866
|
"""
|
|
1765
|
-
For details, please refer to :func:`mindspore.ops.
|
|
1867
|
+
For details, please refer to :func:`mindspore.ops.flip`.
|
|
1766
1868
|
"""
|
|
1767
|
-
|
|
1768
|
-
return tensor_operator_registry.get('reverse')(axis)(self)
|
|
1869
|
+
return tensor_operator_registry.get('flip')(self, axis)
|
|
1769
1870
|
|
|
1770
1871
|
def amax(self, axis=None, keepdims=False, *, initial=None, where=None):
|
|
1771
1872
|
"""
|
|
1772
1873
|
For details, please refer to :func:`mindspore.ops.amax`.
|
|
1773
1874
|
"""
|
|
1774
|
-
self._init_check()
|
|
1775
1875
|
if axis is None:
|
|
1776
1876
|
axis = ()
|
|
1777
1877
|
return tensor_operator_registry.get('amax')(self, axis, keepdims, initial=initial, where=where)
|
|
@@ -1780,28 +1880,24 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1780
1880
|
r"""
|
|
1781
1881
|
For details, please refer to :func:`mindspore.ops.aminmax`.
|
|
1782
1882
|
"""
|
|
1783
|
-
self._init_check()
|
|
1784
1883
|
return tensor_operator_registry.get('aminmax')(self, axis=axis, keepdims=keepdims)
|
|
1785
1884
|
|
|
1786
1885
|
def reverse_sequence(self, seq_lengths, seq_dim=0, batch_dim=0):
|
|
1787
1886
|
"""
|
|
1788
1887
|
For details, please refer to :func:`mindspore.ops.reverse_sequence`.
|
|
1789
1888
|
"""
|
|
1790
|
-
|
|
1791
|
-
return tensor_operator_registry.get("reverse_sequence")(seq_dim, batch_dim)(self, seq_lengths)
|
|
1889
|
+
return tensor_operator_registry.get("reverse_sequence")(self, seq_lengths, seq_dim, batch_dim)
|
|
1792
1890
|
|
|
1793
|
-
def prod(self, axis=None, keep_dims=False):
|
|
1891
|
+
def prod(self, axis=None, keep_dims=False, dtype=None):
|
|
1794
1892
|
"""
|
|
1795
1893
|
For details, please refer to :func:`mindspore.ops.prod`.
|
|
1796
1894
|
"""
|
|
1797
|
-
|
|
1798
|
-
return tensor_operator_registry.get('prod')(self, axis, keep_dims)
|
|
1895
|
+
return tensor_operator_registry.get('prod')(self, axis, keep_dims, dtype)
|
|
1799
1896
|
|
|
1800
1897
|
def select(self, condition, y):
|
|
1801
1898
|
r"""
|
|
1802
1899
|
For details, please refer to :func:`mindspore.ops.select`.
|
|
1803
1900
|
"""
|
|
1804
|
-
self._init_check()
|
|
1805
1901
|
if not isinstance(condition, Tensor):
|
|
1806
1902
|
raise TypeError(f"For 'Tensor.select', the argument 'condition' should be Tensor,"
|
|
1807
1903
|
f" but got {type(condition)}.")
|
|
@@ -1816,7 +1912,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1816
1912
|
f" then the tensor type should be float32 but got {self.dtype}")
|
|
1817
1913
|
input_y = y
|
|
1818
1914
|
if isinstance(y, (int, float)):
|
|
1819
|
-
input_y = tensor_operator_registry.get('zeros_like')(
|
|
1915
|
+
input_y = tensor_operator_registry.get('zeros_like')(self) + y
|
|
1820
1916
|
if isinstance(y, int):
|
|
1821
1917
|
input_y = tensor_operator_registry.get('cast')(input_y, mstype.int32)
|
|
1822
1918
|
else:
|
|
@@ -1827,22 +1923,46 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1827
1923
|
r"""
|
|
1828
1924
|
For details, please refer to :func:`mindspore.ops.transpose`.
|
|
1829
1925
|
"""
|
|
1830
|
-
self._init_check()
|
|
1831
1926
|
perm = validator.check_transpose_axis(axes, self.ndim)
|
|
1832
|
-
return tensor_operator_registry.get('transpose')(
|
|
1927
|
+
return tensor_operator_registry.get('transpose')(self, perm)
|
|
1833
1928
|
|
|
1834
1929
|
def col2im(self, output_size, kernel_size, dilation, padding_value, stride):
|
|
1835
1930
|
"""
|
|
1836
1931
|
For details, please refer to :func:`mindspore.ops.col2im`.
|
|
1837
1932
|
"""
|
|
1838
|
-
self._init_check()
|
|
1839
1933
|
return tensor_operator_registry.get('col2im')(self, output_size, kernel_size, dilation, padding_value, stride)
|
|
1840
1934
|
|
|
1841
1935
|
def reshape(self, *shape):
|
|
1936
|
+
r"""
|
|
1937
|
+
Rearranges the input Tensor based on the given `shape` .
|
|
1938
|
+
|
|
1939
|
+
The `shape` can only have one -1 at most, in which case it's inferred from the remaining dimensions and
|
|
1940
|
+
the number of elements in the input.
|
|
1941
|
+
|
|
1942
|
+
Args:
|
|
1943
|
+
shape (Union[int, tuple[int], list[int]]): If `shape` is a tuple or list, its elements should be
|
|
1944
|
+
integers, and only constant value is allowed. i.e., :math:`(y_1, y_2, ..., y_S)`.
|
|
1945
|
+
|
|
1946
|
+
Returns:
|
|
1947
|
+
Tensor, If the given `shape` does not contain -1, the `shape` of tensor is :math:`(y_1, y_2, ..., y_S)`.
|
|
1948
|
+
If the k-th position in the given `shape` is -1, the `shape` of tensor is :math:`(y_1, ..., y_{k-1},
|
|
1949
|
+
\frac{\prod_{i=1}^{R}x_{i}}{y_1\times ...\times y_{k-1}\times y_{k+1}\times...\times y_S} , y_{k+1},
|
|
1950
|
+
..., y_S)`, in where the shape of input tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
1951
|
+
|
|
1952
|
+
Supported Platforms:
|
|
1953
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1954
|
+
|
|
1955
|
+
Examples:
|
|
1956
|
+
>>> import mindspore
|
|
1957
|
+
>>> import numpy as np
|
|
1958
|
+
>>> from mindspore import Tensor, ops
|
|
1959
|
+
>>> input = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
|
|
1960
|
+
>>> output = input.reshape(3, 2)
|
|
1961
|
+
>>> print(output)
|
|
1962
|
+
[[-0.1 0.3]
|
|
1963
|
+
[ 3.6 0.4]
|
|
1964
|
+
[ 0.5 -3.2]]
|
|
1842
1965
|
"""
|
|
1843
|
-
For details, please refer to :func:`mindspore.ops.reshape`.
|
|
1844
|
-
"""
|
|
1845
|
-
self._init_check()
|
|
1846
1966
|
new_shape = validator.check_reshape_shp(shape)
|
|
1847
1967
|
return tensor_operator_registry.get('reshape')(self, new_shape)
|
|
1848
1968
|
|
|
@@ -1871,7 +1991,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1871
1991
|
[ 3.6 0.4]
|
|
1872
1992
|
[ 0.5 -3.2]]
|
|
1873
1993
|
"""
|
|
1874
|
-
self._init_check()
|
|
1875
1994
|
return tensor_operator_registry.get('reshape')(self, other.shape)
|
|
1876
1995
|
|
|
1877
1996
|
def ravel(self):
|
|
@@ -1881,13 +2000,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1881
2000
|
Returns:
|
|
1882
2001
|
Tensor, a 1-D tensor, containing the same elements of the input.
|
|
1883
2002
|
|
|
1884
|
-
Supported Platforms:
|
|
1885
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1886
|
-
|
|
1887
2003
|
See also:
|
|
1888
|
-
:func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
2004
|
+
- :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
2005
|
+
- :func:`mindspore.Tensor.flatten`: Return a copy of the tensor collapsed into one dimension.
|
|
1889
2006
|
|
|
1890
|
-
|
|
2007
|
+
Supported Platforms:
|
|
2008
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1891
2009
|
|
|
1892
2010
|
Examples:
|
|
1893
2011
|
>>> import numpy as np
|
|
@@ -1897,7 +2015,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1897
2015
|
>>> print(output.shape)
|
|
1898
2016
|
(24,)
|
|
1899
2017
|
"""
|
|
1900
|
-
self._init_check()
|
|
1901
2018
|
reshape_op = tensor_operator_registry.get('reshape')
|
|
1902
2019
|
return reshape_op(self, (-1,))
|
|
1903
2020
|
|
|
@@ -1905,77 +2022,66 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1905
2022
|
"""
|
|
1906
2023
|
For details, please refer to :func:`mindspore.ops.round`.
|
|
1907
2024
|
"""
|
|
1908
|
-
|
|
1909
|
-
return tensor_operator_registry.get('round')()(self)
|
|
2025
|
+
return tensor_operator_registry.get('round')(self)
|
|
1910
2026
|
|
|
1911
2027
|
def roll(self, shifts, dims):
|
|
1912
2028
|
"""
|
|
1913
2029
|
For details, please refer to :func:`mindspore.ops.roll`.
|
|
1914
2030
|
"""
|
|
1915
|
-
self._init_check()
|
|
1916
2031
|
return tensor_operator_registry.get('roll')(shifts, dims)(self)
|
|
1917
2032
|
|
|
1918
2033
|
def rot90(self, k, dims):
|
|
1919
2034
|
r"""
|
|
1920
2035
|
For details, please refer to :func:`mindspore.ops.rot90`.
|
|
1921
2036
|
"""
|
|
1922
|
-
self._init_check()
|
|
1923
2037
|
return tensor_operator_registry.get('rot90')(self, k, dims)
|
|
1924
2038
|
|
|
1925
2039
|
def deg2rad(self):
|
|
1926
2040
|
r"""
|
|
1927
2041
|
For details, please refer to :func:`mindspore.ops.deg2rad`.
|
|
1928
2042
|
"""
|
|
1929
|
-
self._init_check()
|
|
1930
2043
|
return tensor_operator_registry.get('deg2rad')(self)
|
|
1931
2044
|
|
|
1932
2045
|
def dot(self, other):
|
|
1933
2046
|
r"""
|
|
1934
2047
|
For details, please refer to :func:`mindspore.ops.dot`.
|
|
1935
2048
|
"""
|
|
1936
|
-
self._init_check()
|
|
1937
2049
|
return tensor_operator_registry.get('dot')(self, other)
|
|
1938
2050
|
|
|
1939
2051
|
def outer(self, vec2):
|
|
1940
2052
|
r"""
|
|
1941
2053
|
For details, please refer to :func:`mindspore.ops.outer`.
|
|
1942
2054
|
"""
|
|
1943
|
-
self._init_check()
|
|
1944
2055
|
return tensor_operator_registry.get('outer')(self, vec2)
|
|
1945
2056
|
|
|
1946
2057
|
def rad2deg(self):
|
|
1947
2058
|
r"""
|
|
1948
2059
|
For details, please refer to :func:`mindspore.ops.rad2deg`.
|
|
1949
2060
|
"""
|
|
1950
|
-
self._init_check()
|
|
1951
2061
|
return tensor_operator_registry.get('rad2deg')(self)
|
|
1952
2062
|
|
|
1953
2063
|
def copysign(self, other):
|
|
1954
2064
|
r"""
|
|
1955
2065
|
For details, please refer to :func:`mindspore.ops.copysign`.
|
|
1956
2066
|
"""
|
|
1957
|
-
self._init_check()
|
|
1958
2067
|
return tensor_operator_registry.get('copysign')(self, other)
|
|
1959
2068
|
|
|
1960
2069
|
def nelement(self):
|
|
1961
2070
|
r"""
|
|
1962
2071
|
Alias for :func:`mindspore.Tensor.numel`.
|
|
1963
2072
|
"""
|
|
1964
|
-
self._init_check()
|
|
1965
2073
|
return tensor_operator_registry.get('nelement')(self)
|
|
1966
2074
|
|
|
1967
2075
|
def numel(self):
|
|
1968
2076
|
r"""
|
|
1969
2077
|
For details, please refer to :func:`mindspore.ops.numel`.
|
|
1970
2078
|
"""
|
|
1971
|
-
self._init_check()
|
|
1972
2079
|
return tensor_operator_registry.get('numel')(self)
|
|
1973
2080
|
|
|
1974
2081
|
def permute(self, *axis):
|
|
1975
2082
|
"""
|
|
1976
2083
|
For details, please refer to :func:`mindspore.ops.permute`.
|
|
1977
2084
|
"""
|
|
1978
|
-
self._init_check()
|
|
1979
2085
|
perm = validator.check_transpose_axis(axis, self.ndim)
|
|
1980
2086
|
return tensor_operator_registry.get('permute')(self, perm)
|
|
1981
2087
|
|
|
@@ -1983,98 +2089,84 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1983
2089
|
"""
|
|
1984
2090
|
For details, please refer to :func:`mindspore.ops.positive`.
|
|
1985
2091
|
"""
|
|
1986
|
-
self._init_check()
|
|
1987
2092
|
return tensor_operator_registry.get("positive")(self)
|
|
1988
2093
|
|
|
1989
2094
|
def remainder(self, divisor):
|
|
1990
2095
|
r"""
|
|
1991
2096
|
For details, please refer to :func:`mindspore.ops.remainder`.
|
|
1992
2097
|
"""
|
|
1993
|
-
self._init_check()
|
|
1994
2098
|
return tensor_operator_registry.get('remainder')(self, divisor)
|
|
1995
2099
|
|
|
1996
2100
|
def flatten(self, order='C', *, start_dim=0, end_dim=-1):
|
|
1997
2101
|
r"""
|
|
1998
2102
|
For details, please refer to :func:`mindspore.ops.flatten`.
|
|
1999
2103
|
"""
|
|
2000
|
-
self._init_check()
|
|
2001
2104
|
return tensor_operator_registry.get('flatten')(self, order, start_dim=start_dim, end_dim=end_dim)
|
|
2002
2105
|
|
|
2003
2106
|
def float_power(self, other):
|
|
2004
2107
|
r"""
|
|
2005
2108
|
For details, please refer to :func:`mindspore.ops.float_power`.
|
|
2006
2109
|
"""
|
|
2007
|
-
self._init_check()
|
|
2008
2110
|
return tensor_operator_registry.get('float_power')(self, other)
|
|
2009
2111
|
|
|
2010
2112
|
def fmax(self, other):
|
|
2011
2113
|
r"""
|
|
2012
2114
|
For details, please refer to :func:`mindspore.ops.fmax`.
|
|
2013
2115
|
"""
|
|
2014
|
-
self._init_check()
|
|
2015
2116
|
return tensor_operator_registry.get('fmax')(self, other)
|
|
2016
2117
|
|
|
2017
2118
|
def fmin(self, other):
|
|
2018
2119
|
r"""
|
|
2019
2120
|
For details, please refer to :func:`mindspore.ops.fmin`.
|
|
2020
2121
|
"""
|
|
2021
|
-
self._init_check()
|
|
2022
2122
|
return tensor_operator_registry.get('fmin')(self, other)
|
|
2023
2123
|
|
|
2024
2124
|
def fmod(self, other):
|
|
2025
2125
|
r"""
|
|
2026
2126
|
For details, please refer to :func:`mindspore.ops.fmod`.
|
|
2027
2127
|
"""
|
|
2028
|
-
self._init_check()
|
|
2029
2128
|
return tensor_operator_registry.get('fmod')(self, other)
|
|
2030
2129
|
|
|
2031
2130
|
def narrow(self, axis, start, length):
|
|
2032
2131
|
"""
|
|
2033
2132
|
For details, please refer to :func:`mindspore.ops.narrow`.
|
|
2034
2133
|
"""
|
|
2035
|
-
self._init_check()
|
|
2036
2134
|
return tensor_operator_registry.get('narrow')(self, axis, start, length)
|
|
2037
2135
|
|
|
2038
2136
|
def swapaxes(self, axis0, axis1):
|
|
2039
2137
|
"""
|
|
2040
2138
|
For details, please refer to :func:`mindspore.ops.swapaxes`.
|
|
2041
2139
|
"""
|
|
2042
|
-
self._init_check()
|
|
2043
2140
|
return tensor_operator_registry.get('swapaxes')(self, axis0, axis1)
|
|
2044
2141
|
|
|
2045
2142
|
def swapdims(self, dim0, dim1):
|
|
2046
2143
|
"""
|
|
2047
2144
|
For details, please refer to :func:`mindspore.ops.swapdims`.
|
|
2048
2145
|
"""
|
|
2049
|
-
self._init_check()
|
|
2050
2146
|
return tensor_operator_registry.get('swapdims')(self, dim0, dim1)
|
|
2051
2147
|
|
|
2052
2148
|
def squeeze(self, axis=None):
|
|
2053
2149
|
"""
|
|
2054
2150
|
For details, please refer to :func:`mindspore.ops.squeeze`.
|
|
2055
2151
|
"""
|
|
2056
|
-
self._init_check()
|
|
2057
2152
|
return tensor_operator_registry.get('squeeze')(self, axis)
|
|
2058
2153
|
|
|
2059
2154
|
def slogdet(self):
|
|
2060
2155
|
"""
|
|
2061
2156
|
For details, please refer to :func:`mindspore.ops.slogdet`.
|
|
2062
2157
|
"""
|
|
2063
|
-
self._init_check()
|
|
2064
2158
|
return tensor_operator_registry.get('slogdet')(self)
|
|
2065
2159
|
|
|
2066
2160
|
def tril(self, diagonal=0):
|
|
2067
2161
|
"""
|
|
2068
2162
|
For details, please refer to :func:`mindspore.ops.tril`.
|
|
2069
2163
|
"""
|
|
2070
|
-
self._init_check()
|
|
2071
2164
|
return tensor_operator_registry.get('tril')(self, diagonal)
|
|
2072
2165
|
|
|
2073
2166
|
def unsqueeze(self, dim):
|
|
2074
2167
|
"""
|
|
2075
2168
|
For details, please refer to :func:`mindspore.ops.unsqueeze`.
|
|
2076
2169
|
"""
|
|
2077
|
-
self._init_check()
|
|
2078
2170
|
validator.check_is_int(dim, 'dim')
|
|
2079
2171
|
validator.check_int_range(dim, -self.ndim - 1, self.ndim + 1, validator.INC_LEFT, 'dim')
|
|
2080
2172
|
return tensor_operator_registry.get('unsqueeze')(self, dim)
|
|
@@ -2083,7 +2175,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2083
2175
|
"""
|
|
2084
2176
|
For details, please refer to :func:`mindspore.ops.expand_dims`.
|
|
2085
2177
|
"""
|
|
2086
|
-
self._init_check()
|
|
2087
2178
|
validator.check_is_int(axis, 'axis')
|
|
2088
2179
|
validator.check_int_range(axis, -self.ndim - 1, self.ndim + 1, validator.INC_LEFT, 'axis')
|
|
2089
2180
|
return tensor_operator_registry.get('expand_dims')(self, axis)
|
|
@@ -2116,7 +2207,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2116
2207
|
>>> print(x.dtype)
|
|
2117
2208
|
Int32
|
|
2118
2209
|
"""
|
|
2119
|
-
self._init_check()
|
|
2120
2210
|
dtype = _check_astype_and_convert(dtype)
|
|
2121
2211
|
if not copy and dtype == self.dtype:
|
|
2122
2212
|
return self
|
|
@@ -2126,7 +2216,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2126
2216
|
"""
|
|
2127
2217
|
For details, please refer to :func:`mindspore.ops.argmax`.
|
|
2128
2218
|
"""
|
|
2129
|
-
self._init_check()
|
|
2130
2219
|
out = tensor_operator_registry.get('argmax')(self, axis, keepdims)
|
|
2131
2220
|
return out
|
|
2132
2221
|
|
|
@@ -2134,7 +2223,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2134
2223
|
"""
|
|
2135
2224
|
For details, please refer to :func:`mindspore.ops.argmin`.
|
|
2136
2225
|
"""
|
|
2137
|
-
self._init_check()
|
|
2138
2226
|
out = tensor_operator_registry.get('argmin')(self, axis, keepdims)
|
|
2139
2227
|
return out
|
|
2140
2228
|
|
|
@@ -2185,7 +2273,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2185
2273
|
"""
|
|
2186
2274
|
if self.shape == ():
|
|
2187
2275
|
return (self, Tensor(0))
|
|
2188
|
-
self._init_check()
|
|
2189
2276
|
return tensor_operator_registry.get('argmax_with_value')(self, axis, keep_dims)
|
|
2190
2277
|
|
|
2191
2278
|
def argmin_with_value(self, axis=0, keep_dims=False):
|
|
@@ -2233,7 +2320,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2233
2320
|
"""
|
|
2234
2321
|
if self.shape == ():
|
|
2235
2322
|
return (self, Tensor(0))
|
|
2236
|
-
self._init_check()
|
|
2237
2323
|
return tensor_operator_registry.get('argmin_with_value')(self, axis, keep_dims)
|
|
2238
2324
|
|
|
2239
2325
|
def cumsum(self, axis=None, dtype=None):
|
|
@@ -2275,15 +2361,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2275
2361
|
"""
|
|
2276
2362
|
For details, please refer to :func:`mindspore.ops.index_select`.
|
|
2277
2363
|
"""
|
|
2278
|
-
self._init_check()
|
|
2279
2364
|
return tensor_operator_registry.get('index_select')(self, axis, index)
|
|
2280
2365
|
|
|
2281
2366
|
def inplace_update(self, v, indices):
|
|
2282
2367
|
"""
|
|
2283
2368
|
For details, please refer to :func:`mindspore.ops.inplace_update`.
|
|
2284
2369
|
"""
|
|
2285
|
-
|
|
2286
|
-
return tensor_operator_registry.get('inplace_update')()(self, indices, v)
|
|
2370
|
+
return tensor_operator_registry.get('inplace_update')(self, v, indices)
|
|
2287
2371
|
|
|
2288
2372
|
def copy(self):
|
|
2289
2373
|
"""
|
|
@@ -2357,15 +2441,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2357
2441
|
Raises:
|
|
2358
2442
|
TypeError: If arguments have types not specified above.
|
|
2359
2443
|
|
|
2360
|
-
Supported Platforms:
|
|
2361
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2362
|
-
|
|
2363
2444
|
See also:
|
|
2364
|
-
:func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
|
|
2365
|
-
|
|
2366
|
-
:func:`mindspore.Tensor.
|
|
2445
|
+
- :func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
|
|
2446
|
+
- :func:`mindspore.Tensor.argmax`: Return the indices of the maximum values along an axis.
|
|
2447
|
+
- :func:`mindspore.Tensor.min`: Return the minimum of a tensor or minimum along an axis.
|
|
2367
2448
|
|
|
2368
|
-
|
|
2449
|
+
Supported Platforms:
|
|
2450
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2369
2451
|
|
|
2370
2452
|
Examples:
|
|
2371
2453
|
>>> import numpy as np
|
|
@@ -2380,7 +2462,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2380
2462
|
>>> print(indices)
|
|
2381
2463
|
[1 1]
|
|
2382
2464
|
"""
|
|
2383
|
-
self._init_check()
|
|
2384
2465
|
if isinstance(axis, (list, tuple)):
|
|
2385
2466
|
reduce_ = tensor_operator_registry.get("reduce")
|
|
2386
2467
|
reduce_max = tensor_operator_registry.get("reduce_max")
|
|
@@ -2428,15 +2509,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2428
2509
|
Raises:
|
|
2429
2510
|
TypeError: If arguments have types not specified above.
|
|
2430
2511
|
|
|
2431
|
-
Supported Platforms:
|
|
2432
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2433
|
-
|
|
2434
2512
|
See also:
|
|
2435
|
-
:func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
|
|
2436
|
-
|
|
2437
|
-
:func:`mindspore.Tensor.
|
|
2513
|
+
- :func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
|
|
2514
|
+
- :func:`mindspore.Tensor.argmax`: Return the indices of the maximum values along an axis.
|
|
2515
|
+
- :func:`mindspore.Tensor.max`: Return the minimum of a tensor or minimum along an axis.
|
|
2438
2516
|
|
|
2439
|
-
|
|
2517
|
+
Supported Platforms:
|
|
2518
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2440
2519
|
|
|
2441
2520
|
Examples:
|
|
2442
2521
|
>>> import numpy as np
|
|
@@ -2460,12 +2539,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2460
2539
|
>>> print(indices)
|
|
2461
2540
|
[0 0]
|
|
2462
2541
|
"""
|
|
2463
|
-
self._init_check()
|
|
2464
2542
|
if isinstance(axis, (list, tuple)):
|
|
2465
2543
|
reduce_ = tensor_operator_registry.get("reduce")
|
|
2466
2544
|
reduce_min = tensor_operator_registry.get("reduce_min")
|
|
2467
2545
|
minimum = tensor_operator_registry.get("minimum")
|
|
2468
|
-
return reduce_(self, reduce_min(keepdims), cmp_fn=minimum
|
|
2546
|
+
return reduce_(self, reduce_min(keepdims), cmp_fn=minimum, axis=axis, keepdims=keepdims,
|
|
2469
2547
|
initial=initial, where=where)
|
|
2470
2548
|
values, indices = tensor_operator_registry.get("min")(self, axis, keepdims, initial=initial, where=where)
|
|
2471
2549
|
if not return_indices:
|
|
@@ -2476,7 +2554,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2476
2554
|
"""
|
|
2477
2555
|
For details, please refer to :func:`mindspore.ops.scatter_add`.
|
|
2478
2556
|
"""
|
|
2479
|
-
self._init_check()
|
|
2480
2557
|
return tensor_operator_registry.get("tensor_scatter_add")(self, indices, updates)
|
|
2481
2558
|
|
|
2482
2559
|
def scatter_sub(self, indices, updates):
|
|
@@ -2489,7 +2566,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2489
2566
|
|
|
2490
2567
|
The last axis of `indices` is the depth of each index vectors. For each index vector,
|
|
2491
2568
|
there must be a corresponding value in `updates`. The shape of `updates` should be
|
|
2492
|
-
equal to the shape of `self[indices]`. For more details, see
|
|
2569
|
+
equal to the shape of `self[indices]`. For more details, see Examples.
|
|
2493
2570
|
|
|
2494
2571
|
Note:
|
|
2495
2572
|
On GPU, if some values of the `indices` are out of bound, instead of raising an index error,
|
|
@@ -2524,28 +2601,30 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2524
2601
|
[[-3.3000002 0.3 3.6 ]
|
|
2525
2602
|
[ 0.4 0.5 -3.2 ]]
|
|
2526
2603
|
"""
|
|
2527
|
-
self._init_check()
|
|
2528
2604
|
return tensor_operator_registry.get('tensor_scatter_sub')(self, indices, updates)
|
|
2529
2605
|
|
|
2530
2606
|
def scatter_min(self, indices, updates):
|
|
2531
2607
|
"""
|
|
2532
2608
|
For details, please refer to :func:`mindspore.ops.scatter_min`.
|
|
2533
2609
|
"""
|
|
2534
|
-
|
|
2535
|
-
return tensor_operator_registry.get('tensor_scatter_min')()(self, indices, updates)
|
|
2610
|
+
return tensor_operator_registry.get('tensor_scatter_min')(self, indices, updates)
|
|
2536
2611
|
|
|
2537
2612
|
def scatter_max(self, indices, updates):
|
|
2538
2613
|
"""
|
|
2539
2614
|
For details, please refer to :func:`mindspore.ops.scatter_max`.
|
|
2540
2615
|
"""
|
|
2541
|
-
|
|
2542
|
-
|
|
2616
|
+
return tensor_operator_registry.get('tensor_scatter_max')(self, indices, updates)
|
|
2617
|
+
|
|
2618
|
+
def softmax(self, axis, dtype=None):
|
|
2619
|
+
"""
|
|
2620
|
+
For details, please refer to :func:`mindspore.ops.softmax`.
|
|
2621
|
+
"""
|
|
2622
|
+
return tensor_operator_registry.get('softmax')(self, axis, dtype=dtype)
|
|
2543
2623
|
|
|
2544
2624
|
def fill(self, value):
|
|
2545
2625
|
"""
|
|
2546
2626
|
`Tensor.fill` is deprecated, please use `ops.fill` instead.
|
|
2547
2627
|
"""
|
|
2548
|
-
self._init_check()
|
|
2549
2628
|
if value is None:
|
|
2550
2629
|
if self.dtype not in (mstype.float16, mstype.float32, mstype.float64):
|
|
2551
2630
|
raise TypeError("For 'Tensor.fill', if the argument 'value' is None, the type of the original "
|
|
@@ -2558,7 +2637,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2558
2637
|
"""
|
|
2559
2638
|
`Tensor.fills` is deprecated, please use `ops.fill` instead.
|
|
2560
2639
|
"""
|
|
2561
|
-
self._init_check()
|
|
2562
2640
|
return tensor_operator_registry.get('fills')(self, value)
|
|
2563
2641
|
|
|
2564
2642
|
def fill_diagonal(self, fill_value, wrap=False):
|
|
@@ -2600,14 +2678,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2600
2678
|
[5. 1. 1.]
|
|
2601
2679
|
[1. 5. 1.]]
|
|
2602
2680
|
"""
|
|
2603
|
-
self._init_check()
|
|
2604
2681
|
return tensor_operator_registry.get('fill_diagonal')(fill_value, wrap)(self)
|
|
2605
2682
|
|
|
2606
2683
|
def masked_fill(self, mask, value):
|
|
2607
2684
|
"""
|
|
2608
2685
|
For details, please refer to :func:`mindspore.ops.masked_fill`.
|
|
2609
2686
|
"""
|
|
2610
|
-
self._init_check()
|
|
2611
2687
|
if isinstance(value, (float, int)):
|
|
2612
2688
|
value = tensor_operator_registry.get("scalar_to_tensor")(value, self.dtype)
|
|
2613
2689
|
if not isinstance(mask, Tensor):
|
|
@@ -2663,13 +2739,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2663
2739
|
r"""
|
|
2664
2740
|
For details, please refer to :func:`mindspore.ops.minimum`.
|
|
2665
2741
|
"""
|
|
2666
|
-
return tensor_operator_registry.get('minimum')(
|
|
2742
|
+
return tensor_operator_registry.get('minimum')(self, other)
|
|
2667
2743
|
|
|
2668
2744
|
def clamp(self, min=None, max=None):
|
|
2669
2745
|
r"""
|
|
2670
2746
|
For details, please refer to :func:`mindspore.ops.clamp`.
|
|
2671
2747
|
"""
|
|
2672
|
-
self._init_check()
|
|
2673
2748
|
return tensor_operator_registry.get('clamp')(self, min, max)
|
|
2674
2749
|
|
|
2675
2750
|
def clip(self, min=None, max=None):
|
|
@@ -2678,10 +2753,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2678
2753
|
"""
|
|
2679
2754
|
return self.clamp(min, max)
|
|
2680
2755
|
|
|
2681
|
-
def _init_check(self):
|
|
2682
|
-
if self.has_init:
|
|
2683
|
-
self.init_data()
|
|
2684
|
-
|
|
2685
2756
|
def init_data(self, slice_index=None, shape=None, opt_shard_group=None):
|
|
2686
2757
|
"""
|
|
2687
2758
|
Get the tensor format data of this Tensor.
|
|
@@ -2698,7 +2769,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2698
2769
|
opt_shard_group(str): Optimizer shard group which is used in auto or semi auto parallel mode
|
|
2699
2770
|
to get one shard of a parameter's slice. For more information about optimizer parallel, please refer to:
|
|
2700
2771
|
`Optimizer Parallel
|
|
2701
|
-
<https://www.mindspore.cn/tutorials/experts/en/
|
|
2772
|
+
<https://www.mindspore.cn/tutorials/experts/en/master/parallel/optimizer_parallel.html>`_.
|
|
2702
2773
|
Default: ``None``.
|
|
2703
2774
|
|
|
2704
2775
|
Returns:
|
|
@@ -2803,13 +2874,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2803
2874
|
Returns:
|
|
2804
2875
|
Tensor.
|
|
2805
2876
|
|
|
2806
|
-
Supported Platforms:
|
|
2807
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2808
|
-
|
|
2809
2877
|
See also:
|
|
2810
|
-
:func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
2878
|
+
- :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
2879
|
+
- :func:`mindspore.Tensor.repeat`: Repeat elements of a tensor.
|
|
2811
2880
|
|
|
2812
|
-
|
|
2881
|
+
Supported Platforms:
|
|
2882
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2813
2883
|
|
|
2814
2884
|
Examples:
|
|
2815
2885
|
>>> import numpy as np
|
|
@@ -2836,7 +2906,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2836
2906
|
diff_size = new_size - cur_size
|
|
2837
2907
|
if diff_size > 0:
|
|
2838
2908
|
pad_val = tensor_operator_registry.get('fill')(self.dtype, (diff_size,), 0)
|
|
2839
|
-
res = tensor_operator_registry.get('concatenate')(
|
|
2909
|
+
res = tensor_operator_registry.get('concatenate')((flattened, pad_val), 0)
|
|
2840
2910
|
else:
|
|
2841
2911
|
res = flattened[:new_size]
|
|
2842
2912
|
return res.reshape(new_shape)
|
|
@@ -2845,70 +2915,60 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2845
2915
|
r"""
|
|
2846
2916
|
For details, please refer to :func:`mindspore.ops.det`.
|
|
2847
2917
|
"""
|
|
2848
|
-
self._init_check()
|
|
2849
2918
|
return tensor_operator_registry.get('det')(self)
|
|
2850
2919
|
|
|
2851
2920
|
def diff(self, n=1, axis=-1, prepend=None, append=None):
|
|
2852
2921
|
r"""
|
|
2853
2922
|
For details, please refer to :func:`mindspore.ops.diff`.
|
|
2854
2923
|
"""
|
|
2855
|
-
self._init_check()
|
|
2856
2924
|
return tensor_operator_registry.get('diff')(self, n, axis, prepend, append)
|
|
2857
2925
|
|
|
2858
2926
|
def frac(self):
|
|
2859
2927
|
r"""
|
|
2860
2928
|
For details, please refer to :func:`mindspore.ops.frac`.
|
|
2861
2929
|
"""
|
|
2862
|
-
self._init_check()
|
|
2863
2930
|
return tensor_operator_registry.get('frac')(self)
|
|
2864
2931
|
|
|
2865
2932
|
def argwhere(self):
|
|
2866
2933
|
r"""
|
|
2867
2934
|
For details, please refer to :func:`mindspore.ops.argwhere`.
|
|
2868
2935
|
"""
|
|
2869
|
-
self._init_check()
|
|
2870
2936
|
return tensor_operator_registry.get('argwhere')(self)
|
|
2871
2937
|
|
|
2872
2938
|
def moveaxis(self, source, destination):
|
|
2873
2939
|
r"""
|
|
2874
2940
|
For details, please refer to :func:`mindspore.ops.moveaxis`.
|
|
2875
2941
|
"""
|
|
2876
|
-
self._init_check()
|
|
2877
2942
|
return tensor_operator_registry.get('moveaxis')(self, source, destination)
|
|
2878
2943
|
|
|
2879
2944
|
def movedim(self, source, destination):
|
|
2880
2945
|
r"""
|
|
2881
2946
|
For details, please refer to :func:`mindspore.ops.movedim`.
|
|
2882
2947
|
"""
|
|
2883
|
-
self._init_check()
|
|
2884
2948
|
return tensor_operator_registry.get('movedim')(self, source, destination)
|
|
2885
2949
|
|
|
2886
2950
|
def digamma(self):
|
|
2887
2951
|
r"""
|
|
2888
2952
|
For details, please refer to :func:`mindspore.ops.digamma`.
|
|
2889
2953
|
"""
|
|
2890
|
-
self._init_check()
|
|
2891
2954
|
return tensor_operator_registry.get('digamma')(self)
|
|
2892
2955
|
|
|
2893
2956
|
def lgamma(self):
|
|
2894
2957
|
r"""
|
|
2895
2958
|
For details, please refer to :func:`mindspore.ops.lgamma`.
|
|
2896
2959
|
"""
|
|
2897
|
-
self._init_check()
|
|
2898
2960
|
return tensor_operator_registry.get('lgamma')(self)
|
|
2899
2961
|
|
|
2900
2962
|
def diagonal(self, offset=0, axis1=0, axis2=1):
|
|
2901
2963
|
"""
|
|
2902
2964
|
For details, please refer to :func:`mindspore.ops.diagonal`.
|
|
2903
2965
|
"""
|
|
2904
|
-
self._init_check()
|
|
2905
2966
|
return tensor_operator_registry.get('diagonal')(self, offset, axis1, axis2)
|
|
2906
2967
|
|
|
2907
2968
|
def diagonal_scatter(self, src, offset=0, dim1=0, dim2=1):
|
|
2908
2969
|
r"""
|
|
2909
2970
|
For details, please refer to :func:`mindspore.ops.diagonal_scatter`.
|
|
2910
2971
|
"""
|
|
2911
|
-
self._init_check()
|
|
2912
2972
|
return tensor_operator_registry.get('diagonal_scatter')(self, src, offset, dim1, dim2)
|
|
2913
2973
|
|
|
2914
2974
|
def trace(self, offset=0, axis1=0, axis2=1, dtype=None):
|
|
@@ -2933,12 +2993,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2933
2993
|
Raises:
|
|
2934
2994
|
ValueError: If the input tensor has less than two dimensions.
|
|
2935
2995
|
|
|
2996
|
+
See also:
|
|
2997
|
+
- :func:`mindspore.Tensor.diagonal`: Return specified diagonals.
|
|
2998
|
+
|
|
2936
2999
|
Supported Platforms:
|
|
2937
3000
|
``Ascend`` ``GPU`` ``CPU``
|
|
2938
3001
|
|
|
2939
|
-
See also:
|
|
2940
|
-
:func:`mindspore.Tensor.diagonal`: Return specified diagonals.
|
|
2941
|
-
|
|
2942
3002
|
Examples:
|
|
2943
3003
|
>>> import numpy as np
|
|
2944
3004
|
>>> from mindspore import Tensor
|
|
@@ -2947,7 +3007,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2947
3007
|
3.0
|
|
2948
3008
|
"""
|
|
2949
3009
|
if offset == 0 and axis1 == 0 and axis2 == 1 and dtype is None:
|
|
2950
|
-
self._init_check()
|
|
2951
3010
|
return tensor_operator_registry.get('trace')(self)
|
|
2952
3011
|
d = self.diagonal(offset, axis1=axis1, axis2=axis2)
|
|
2953
3012
|
shape = d.shape
|
|
@@ -3020,7 +3079,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3020
3079
|
shape_indices = tuple(size_indices if i == axis else 1 for i in range(ndim))
|
|
3021
3080
|
indices = indices.reshape(shape_indices)
|
|
3022
3081
|
shape_indices = shape_ni + (indices.size,) + shape_nk
|
|
3023
|
-
indices = tensor_operator_registry.get('broadcast_to')(shape_indices)
|
|
3082
|
+
indices = tensor_operator_registry.get('broadcast_to')(indices, shape_indices)
|
|
3024
3083
|
|
|
3025
3084
|
res = tensor_operator_registry.get('gather_d')(a, axis, indices)
|
|
3026
3085
|
return res.reshape(shape_out)
|
|
@@ -3065,7 +3124,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3065
3124
|
"""
|
|
3066
3125
|
if isinstance(choices, Tensor):
|
|
3067
3126
|
shape_choice = validator.infer_out_shape(self.shape, choices.shape[1:])
|
|
3068
|
-
choices = tensor_operator_registry.get('broadcast_to')((choices.shape[0],) + shape_choice)
|
|
3127
|
+
choices = tensor_operator_registry.get('broadcast_to')(choices, (choices.shape[0],) + shape_choice)
|
|
3069
3128
|
else:
|
|
3070
3129
|
# broadcasts choices to the same shape if choices is a sequence
|
|
3071
3130
|
choicelist = []
|
|
@@ -3078,14 +3137,14 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3078
3137
|
shape_choice = validator.infer_out_shape(self.shape, *shapes)
|
|
3079
3138
|
tmp = []
|
|
3080
3139
|
for choice in choicelist:
|
|
3081
|
-
tmp.append(tensor_operator_registry.get('broadcast_to')(shape_choice)
|
|
3140
|
+
tmp.append(tensor_operator_registry.get('broadcast_to')(choice, shape_choice))
|
|
3082
3141
|
choices = tensor_operator_registry.get('stack')(tmp, 0)
|
|
3083
3142
|
|
|
3084
3143
|
if self.ndim == 0 or choices.ndim == 0:
|
|
3085
3144
|
raise ValueError(f"For 'Tensor.choose', the original tensor and the argument 'choices' cannot be scalars."
|
|
3086
3145
|
f" Their dimensions should all be > 0, but got the original tensor's dimension "
|
|
3087
3146
|
f"{self.ndim}, 'choices' dimension {choices.ndim}.")
|
|
3088
|
-
a = tensor_operator_registry.get('broadcast_to')(shape_choice)
|
|
3147
|
+
a = tensor_operator_registry.get('broadcast_to')(self, shape_choice)
|
|
3089
3148
|
dtype = choices.dtype
|
|
3090
3149
|
# adjusts dtype for F.tensor_mul and F.gather_nd
|
|
3091
3150
|
a = a.astype(mstype.int32)
|
|
@@ -3097,10 +3156,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3097
3156
|
for i in range(ndim):
|
|
3098
3157
|
dim_grid = Tensor(list(range(a.shape[i])), mstype.int32)
|
|
3099
3158
|
dim_shape = validator.expanded_shape(ndim, a.shape[i], i)
|
|
3100
|
-
dim_grid = tensor_operator_registry.get('broadcast_to')(
|
|
3159
|
+
dim_grid = tensor_operator_registry.get('broadcast_to')(dim_grid.reshape(dim_shape), a.shape)
|
|
3101
3160
|
grids.append(dim_grid)
|
|
3102
3161
|
grid = tensor_operator_registry.get('stack')(grids, -1)
|
|
3103
|
-
indices = tensor_operator_registry.get('concatenate')(
|
|
3162
|
+
indices = tensor_operator_registry.get('concatenate')((a.reshape(a.shape + (1,)), grid), -1)
|
|
3104
3163
|
return tensor_operator_registry.get('gather_nd')(choices, indices).astype(dtype)
|
|
3105
3164
|
|
|
3106
3165
|
def searchsorted(self, v, side='left', sorter=None):
|
|
@@ -3166,7 +3225,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3166
3225
|
r"""
|
|
3167
3226
|
For details, please refer to :func:`mindspore.ops.gather_nd`.
|
|
3168
3227
|
"""
|
|
3169
|
-
self._init_check()
|
|
3170
3228
|
validator.check_value_type('indices', indices, (Tensor, Tensor_,), 'Tensor.gather_nd')
|
|
3171
3229
|
return tensor_operator_registry.get('gather_nd')(self, indices)
|
|
3172
3230
|
|
|
@@ -3174,7 +3232,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3174
3232
|
r"""
|
|
3175
3233
|
For details, please refer to :func:`mindspore.ops.gather`.
|
|
3176
3234
|
"""
|
|
3177
|
-
self._init_check()
|
|
3178
3235
|
validator.check_is_int(axis, 'axis')
|
|
3179
3236
|
validator.check_is_int(batch_dims, "batch_dims")
|
|
3180
3237
|
return tensor_operator_registry.get('gather')(self, input_indices, axis, batch_dims)
|
|
@@ -3202,13 +3259,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3202
3259
|
Returns:
|
|
3203
3260
|
Variance tensor.
|
|
3204
3261
|
|
|
3205
|
-
Supported Platforms:
|
|
3206
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3207
|
-
|
|
3208
3262
|
See also:
|
|
3209
|
-
:func:`mindspore.Tensor.mean`: Reduce a dimension of a tensor by averaging all elements in the dimension.
|
|
3263
|
+
- :func:`mindspore.Tensor.mean`: Reduce a dimension of a tensor by averaging all elements in the dimension.
|
|
3264
|
+
- :func:`mindspore.Tensor.std`: Compute the standard deviation along the specified axis.
|
|
3210
3265
|
|
|
3211
|
-
|
|
3266
|
+
Supported Platforms:
|
|
3267
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
3212
3268
|
|
|
3213
3269
|
Examples:
|
|
3214
3270
|
>>> import numpy as np
|
|
@@ -3255,40 +3311,40 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3255
3311
|
Return sum of tensor elements over a given axis.
|
|
3256
3312
|
|
|
3257
3313
|
Note:
|
|
3258
|
-
Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and
|
|
3259
|
-
`
|
|
3314
|
+
Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are not supported.
|
|
3315
|
+
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
3260
3316
|
|
|
3261
3317
|
Args:
|
|
3262
|
-
axis (Union[None, int, tuple(int), list(int)]): Axis or axes along which a sum is performed.
|
|
3318
|
+
axis (Union[None, int, tuple(int), list(int), Tensor]): Axis or axes along which a sum is performed.
|
|
3263
3319
|
Default: ``None`` .
|
|
3264
|
-
If None, sum all the elements of the input tensor.
|
|
3265
|
-
If the axis is negative, it counts from the last to the first axis
|
|
3266
|
-
If the axis is a tuple or list of ints, a sum is performed on all the axes specified in the tuple
|
|
3267
|
-
or list instead of a single axis or all the axes as before.
|
|
3320
|
+
If ``None`` , sum all the elements of the input tensor.
|
|
3321
|
+
If the `axis` is negative, it counts from the last to the first `axis`.
|
|
3322
|
+
If the `axis` is a tuple or list of ints, a sum is performed on all the axes specified in the tuple
|
|
3323
|
+
or list instead of a single `axis` or all the axes as before.
|
|
3268
3324
|
dtype (:class:`mindspore.dtype`, optional): defaults to ``None`` . Overrides the dtype of the
|
|
3269
3325
|
output Tensor.
|
|
3270
3326
|
keepdims (bool): If this is set to ``True`` , the axes which are reduced are left in the result as
|
|
3271
3327
|
dimensions with size one. With this option, the result will broadcast correctly against the input
|
|
3272
|
-
array. If the default value is passed, then keepdims will not be passed through to the sum method
|
|
3328
|
+
array. If the default value is passed, then `keepdims` will not be passed through to the sum method
|
|
3273
3329
|
of sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
|
|
3274
|
-
implement keepdims any exceptions will be raised. Default: ``False`` .
|
|
3330
|
+
implement `keepdims` any exceptions will be raised. Default: ``False`` .
|
|
3275
3331
|
initial (scalar): Starting value for the sum. Default: ``None`` .
|
|
3276
3332
|
|
|
3277
3333
|
Returns:
|
|
3278
|
-
Tensor. A tensor with the same shape as input, with the specified axis removed.
|
|
3279
|
-
If the input tensor is a 0-d array, or if the axis is ``None`` , a scalar is returned.
|
|
3334
|
+
Tensor. A tensor with the same shape as input, with the specified `axis` removed.
|
|
3335
|
+
If the input tensor is a 0-d array, or if the `axis` is ``None`` , a scalar is returned.
|
|
3280
3336
|
|
|
3281
3337
|
Raises:
|
|
3282
|
-
TypeError: If input is not array_like, or `axis` is not int, tuple of ints
|
|
3338
|
+
TypeError: If input is not array_like, or `axis` is not int, tuple of ints, list of ints or Tensor,
|
|
3283
3339
|
or `keepdims` is not integer, or `initial` is not scalar.
|
|
3284
|
-
ValueError: If any axis is out of range or duplicate axes exist.
|
|
3340
|
+
ValueError: If any `axis` is out of range or duplicate axes exist.
|
|
3341
|
+
|
|
3342
|
+
See also:
|
|
3343
|
+
- :func:`mindspore.Tensor.cumsum`: Return the cumulative sum of the elements along a given `axis`.
|
|
3285
3344
|
|
|
3286
3345
|
Supported Platforms:
|
|
3287
3346
|
``Ascend`` ``GPU`` ``CPU``
|
|
3288
3347
|
|
|
3289
|
-
See also:
|
|
3290
|
-
:func:`mindspore.Tensor.cumsum`: Return the cumulative sum of the elements along a given axis.
|
|
3291
|
-
|
|
3292
3348
|
Examples:
|
|
3293
3349
|
>>> import numpy as np
|
|
3294
3350
|
>>> from mindspore import Tensor
|
|
@@ -3299,14 +3355,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3299
3355
|
>>> print(input_x.sum(axis=1))
|
|
3300
3356
|
[10. 35.]
|
|
3301
3357
|
"""
|
|
3302
|
-
if initial is
|
|
3303
|
-
|
|
3304
|
-
|
|
3305
|
-
if initial is not None:
|
|
3306
|
-
res += initial
|
|
3307
|
-
if dtype is not None:
|
|
3308
|
-
res = res.astype(dtype)
|
|
3309
|
-
return res
|
|
3358
|
+
if initial is None:
|
|
3359
|
+
return tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype)
|
|
3360
|
+
return tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype) + initial
|
|
3310
3361
|
|
|
3311
3362
|
def sum_to_size(self, *size):
|
|
3312
3363
|
r"""
|
|
@@ -3333,7 +3384,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3333
3384
|
>>> print(output.shape)
|
|
3334
3385
|
(1, 3, 1, 3)
|
|
3335
3386
|
"""
|
|
3336
|
-
self._init_check()
|
|
3337
3387
|
x = self
|
|
3338
3388
|
if len(size) == 1 and isinstance(size[0], tuple):
|
|
3339
3389
|
size = size[0]
|
|
@@ -3357,21 +3407,18 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3357
3407
|
"""
|
|
3358
3408
|
For details, please refer to :func:`mindspore.ops.nansum`.
|
|
3359
3409
|
"""
|
|
3360
|
-
self._init_check()
|
|
3361
3410
|
return tensor_operator_registry.get('nansum')(self, axis=axis, keepdims=keepdims, dtype=dtype)
|
|
3362
3411
|
|
|
3363
3412
|
def nanmean(self, axis=None, keepdims=False, *, dtype=None):
|
|
3364
3413
|
r"""
|
|
3365
3414
|
For details, please refer to :func:`mindspore.ops.nanmean`.
|
|
3366
3415
|
"""
|
|
3367
|
-
self._init_check()
|
|
3368
3416
|
return tensor_operator_registry.get('nanmean')(self, axis, keepdims, dtype=dtype)
|
|
3369
3417
|
|
|
3370
3418
|
def nanmedian(self, axis=-1, keepdims=False):
|
|
3371
3419
|
r"""
|
|
3372
3420
|
For details, please refer to :func:`mindspore.ops.nanmedian`.
|
|
3373
3421
|
"""
|
|
3374
|
-
self._init_check()
|
|
3375
3422
|
return tensor_operator_registry.get('nanmedian')(self, axis, keepdims)
|
|
3376
3423
|
|
|
3377
3424
|
def repeat(self, repeats, axis=None):
|
|
@@ -3391,13 +3438,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3391
3438
|
ValueError: If the axis is out of range.
|
|
3392
3439
|
TypeError: If arguments have types not specified above.
|
|
3393
3440
|
|
|
3394
|
-
Supported Platforms:
|
|
3395
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3396
|
-
|
|
3397
3441
|
See also:
|
|
3398
|
-
:func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
3442
|
+
- :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
|
|
3443
|
+
- :func:`mindspore.Tensor.resize`: Changes shape and size of tensor in-place.
|
|
3399
3444
|
|
|
3400
|
-
|
|
3445
|
+
Supported Platforms:
|
|
3446
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
3401
3447
|
|
|
3402
3448
|
Examples:
|
|
3403
3449
|
>>> import numpy as np
|
|
@@ -3446,27 +3492,24 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3446
3492
|
for sub, rep in zip(subs, repeats):
|
|
3447
3493
|
if rep != 0:
|
|
3448
3494
|
repeated_subs.append(tensor_operator_registry.get('repeat_elements')(sub, rep, axis))
|
|
3449
|
-
return tensor_operator_registry.get('concatenate')(axis)
|
|
3495
|
+
return tensor_operator_registry.get('concatenate')(repeated_subs, axis)
|
|
3450
3496
|
|
|
3451
3497
|
def repeat_interleave(self, repeats, dim=None):
|
|
3452
3498
|
"""
|
|
3453
3499
|
For details, please refer to :func:`mindspore.ops.repeat_interleave`.
|
|
3454
3500
|
"""
|
|
3455
|
-
self._init_check()
|
|
3456
3501
|
return tensor_operator_registry.get('repeat_interleave')(self, repeats, dim)
|
|
3457
3502
|
|
|
3458
3503
|
def bernoulli(self, p=0.5, seed=None):
|
|
3459
3504
|
r"""
|
|
3460
3505
|
For details, please refer to :func:`mindspore.ops.bernoulli`.
|
|
3461
3506
|
"""
|
|
3462
|
-
self._init_check()
|
|
3463
3507
|
return tensor_operator_registry.get('bernoulli')(self, p, seed)
|
|
3464
3508
|
|
|
3465
3509
|
def random_categorical(self, num_sample, seed=0, dtype=mstype.int64):
|
|
3466
3510
|
r"""
|
|
3467
3511
|
For details, please refer to :func:`mindspore.ops.random_categorical`.
|
|
3468
3512
|
"""
|
|
3469
|
-
self._init_check()
|
|
3470
3513
|
validator.check_is_int(num_sample, 'num_sample')
|
|
3471
3514
|
validator.check_is_int(seed, 'seed')
|
|
3472
3515
|
return tensor_operator_registry.get('random_categorical')(self, num_sample, seed, dtype)
|
|
@@ -3475,14 +3518,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3475
3518
|
"""
|
|
3476
3519
|
For details, please refer to :func:`mindspore.ops.masked_select`.
|
|
3477
3520
|
"""
|
|
3478
|
-
self._init_check()
|
|
3479
3521
|
return tensor_operator_registry.get('masked_select')(self, mask)
|
|
3480
3522
|
|
|
3481
3523
|
def gather_elements(self, dim, index):
|
|
3482
3524
|
"""
|
|
3483
3525
|
For details, please refer to :func:`mindspore.ops.gather_elements`.
|
|
3484
3526
|
"""
|
|
3485
|
-
self._init_check()
|
|
3486
3527
|
validator.check_value_type('index', index, (Tensor, Tensor_,), 'Tensor.gather_elements')
|
|
3487
3528
|
return tensor_operator_registry.get('gather_elements')(self, dim, index)
|
|
3488
3529
|
|
|
@@ -3490,7 +3531,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3490
3531
|
"""
|
|
3491
3532
|
For details, please refer to :func:`mindspore.ops.nonzero`.
|
|
3492
3533
|
"""
|
|
3493
|
-
self._init_check()
|
|
3494
3534
|
return tensor_operator_registry.get('nonzero')(self)
|
|
3495
3535
|
|
|
3496
3536
|
def svd(self, full_matrices=False, compute_uv=True):
|
|
@@ -3508,42 +3548,36 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3508
3548
|
r"""
|
|
3509
3549
|
For details, please refer to :func:`mindspore.ops.hardshrink`.
|
|
3510
3550
|
"""
|
|
3511
|
-
|
|
3512
|
-
return tensor_operator_registry.get('hardshrink')(lambd)(self)
|
|
3551
|
+
return tensor_operator_registry.get('hardshrink')(self, lambd)
|
|
3513
3552
|
|
|
3514
3553
|
def heaviside(self, values):
|
|
3515
3554
|
r"""
|
|
3516
3555
|
For details, please refer to :func:`mindspore.ops.heaviside`.
|
|
3517
3556
|
"""
|
|
3518
|
-
self._init_check()
|
|
3519
3557
|
return tensor_operator_registry.get('heaviside')(self, values)
|
|
3520
3558
|
|
|
3521
3559
|
def hypot(self, other):
|
|
3522
3560
|
r"""
|
|
3523
3561
|
For details, please refer to :func:`mindspore.ops.hypot`.
|
|
3524
3562
|
"""
|
|
3525
|
-
self._init_check()
|
|
3526
3563
|
return tensor_operator_registry.get('hypot')(self, other)
|
|
3527
3564
|
|
|
3528
3565
|
def soft_shrink(self, lambd=0.5):
|
|
3529
3566
|
r"""
|
|
3530
3567
|
For details, please refer to :func:`mindspore.ops.soft_shrink`.
|
|
3531
3568
|
"""
|
|
3532
|
-
self._init_check()
|
|
3533
3569
|
return tensor_operator_registry.get('soft_shrink')(self, lambd)
|
|
3534
3570
|
|
|
3535
3571
|
def matrix_determinant(self):
|
|
3536
3572
|
r"""
|
|
3537
3573
|
For details, please refer to :func:`mindspore.ops.matrix_determinant`.
|
|
3538
3574
|
"""
|
|
3539
|
-
self._init_check()
|
|
3540
3575
|
return tensor_operator_registry.get('matrix_determinant')(self)
|
|
3541
3576
|
|
|
3542
3577
|
def log_matrix_determinant(self):
|
|
3543
3578
|
r"""
|
|
3544
3579
|
For details, please refer to :func:`mindspore.ops.log_matrix_determinant`.
|
|
3545
3580
|
"""
|
|
3546
|
-
self._init_check()
|
|
3547
3581
|
return tensor_operator_registry.get('log_matrix_determinant')(self)
|
|
3548
3582
|
|
|
3549
3583
|
def to_coo(self):
|
|
@@ -3577,7 +3611,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3577
3611
|
[1 0]] [ 1. -5.] (2, 2)
|
|
3578
3612
|
|
|
3579
3613
|
"""
|
|
3580
|
-
self._init_check()
|
|
3581
3614
|
return tensor_operator_registry.get('dense_to_sparse_coo')(self)
|
|
3582
3615
|
|
|
3583
3616
|
def to_csr(self):
|
|
@@ -3610,7 +3643,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3610
3643
|
>>> print(output.indptr, output.indices, output.values, output.shape)
|
|
3611
3644
|
[0 1 2] [0 0] [ 1. -5.] (2, 2)
|
|
3612
3645
|
"""
|
|
3613
|
-
self._init_check()
|
|
3614
3646
|
return tensor_operator_registry.get('dense_to_sparse_csr')(self)
|
|
3615
3647
|
|
|
3616
3648
|
def tolist(self):
|
|
@@ -3633,42 +3665,36 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3633
3665
|
>>> print(out2)
|
|
3634
3666
|
1
|
|
3635
3667
|
"""
|
|
3636
|
-
self._init_check()
|
|
3637
3668
|
return self.asnumpy().tolist()
|
|
3638
3669
|
|
|
3639
3670
|
def unbind(self, dim=0):
|
|
3640
3671
|
r"""
|
|
3641
3672
|
For details, please refer to :func:`mindspore.ops.unbind`.
|
|
3642
3673
|
"""
|
|
3643
|
-
|
|
3644
|
-
return tensor_operator_registry.get('unbind')(dim)(self)
|
|
3674
|
+
return tensor_operator_registry.get('unbind')(self, dim)
|
|
3645
3675
|
|
|
3646
3676
|
def unsorted_segment_min(self, segment_ids, num_segments):
|
|
3647
3677
|
r"""
|
|
3648
3678
|
For details, please refer to :func:`mindspore.ops.unsorted_segment_min`.
|
|
3649
3679
|
"""
|
|
3650
|
-
self._init_check()
|
|
3651
3680
|
return tensor_operator_registry.get('unsorted_segment_min')(self, segment_ids, num_segments)
|
|
3652
3681
|
|
|
3653
3682
|
def unsorted_segment_max(self, segment_ids, num_segments):
|
|
3654
3683
|
r"""
|
|
3655
3684
|
For details, please refer to :func:`mindspore.ops.unsorted_segment_max`.
|
|
3656
3685
|
"""
|
|
3657
|
-
self._init_check()
|
|
3658
3686
|
return tensor_operator_registry.get('unsorted_segment_max')(self, segment_ids, num_segments)
|
|
3659
3687
|
|
|
3660
3688
|
def unsorted_segment_prod(self, segment_ids, num_segments):
|
|
3661
3689
|
r"""
|
|
3662
3690
|
For details, please refer to :func:`mindspore.ops.unsorted_segment_prod`.
|
|
3663
3691
|
"""
|
|
3664
|
-
self._init_check()
|
|
3665
3692
|
return tensor_operator_registry.get('unsorted_segment_prod')(self, segment_ids, num_segments)
|
|
3666
3693
|
|
|
3667
3694
|
def unique_consecutive(self, return_idx=False, return_counts=False, axis=None):
|
|
3668
3695
|
"""
|
|
3669
3696
|
For details, please refer to :func:`mindspore.ops.unique_consecutive`.
|
|
3670
3697
|
"""
|
|
3671
|
-
self._init_check()
|
|
3672
3698
|
output, idx, counts = tensor_operator_registry.get("unique_consecutive")(return_idx, return_counts, axis)(self)
|
|
3673
3699
|
if return_idx and return_counts:
|
|
3674
3700
|
return output, idx, counts
|
|
@@ -3682,29 +3708,25 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3682
3708
|
"""
|
|
3683
3709
|
For details, please refer to :func:`mindspore.ops.unique_with_pad`.
|
|
3684
3710
|
"""
|
|
3685
|
-
|
|
3686
|
-
return tensor_operator_registry.get("unique_with_pad")()(self, pad_num)
|
|
3711
|
+
return tensor_operator_registry.get("unique_with_pad")(self, pad_num)
|
|
3687
3712
|
|
|
3688
3713
|
def diag(self):
|
|
3689
3714
|
r"""
|
|
3690
3715
|
For details, please refer to :func:`mindspore.ops.diag`.
|
|
3691
3716
|
"""
|
|
3692
|
-
|
|
3693
|
-
return tensor_operator_registry.get('diag')()(self)
|
|
3717
|
+
return tensor_operator_registry.get('diag')(self)
|
|
3694
3718
|
|
|
3695
3719
|
def diagflat(self, offset=0):
|
|
3696
3720
|
r"""
|
|
3697
3721
|
For details, please refer to :func:`mindspore.ops.diagflat`.
|
|
3698
3722
|
"""
|
|
3699
|
-
self._init_check()
|
|
3700
3723
|
return tensor_operator_registry.get('diagflat')(self, offset)
|
|
3701
3724
|
|
|
3702
3725
|
def xdivy(self, y):
|
|
3703
3726
|
r"""
|
|
3704
3727
|
For details, please refer to :func:`mindspore.ops.xdivy`.
|
|
3705
3728
|
"""
|
|
3706
|
-
|
|
3707
|
-
return tensor_operator_registry.get("xdivy")()(self, y)
|
|
3729
|
+
return tensor_operator_registry.get("xdivy")(self, y)
|
|
3708
3730
|
|
|
3709
3731
|
def split(self, split_size_or_sections, axis=0):
|
|
3710
3732
|
"""
|
|
@@ -3716,7 +3738,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3716
3738
|
"""
|
|
3717
3739
|
For details, please refer to :func:`mindspore.ops.tensor_split`.
|
|
3718
3740
|
"""
|
|
3719
|
-
self._init_check()
|
|
3720
3741
|
return tensor_operator_registry.get('tensor_split')(self, indices_or_sections, axis)
|
|
3721
3742
|
|
|
3722
3743
|
def vsplit(self, indices_or_sections):
|
|
@@ -3724,28 +3745,25 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3724
3745
|
For details, please refer to :func:`mindspore.ops.vsplit`.
|
|
3725
3746
|
"""
|
|
3726
3747
|
|
|
3727
|
-
self._init_check()
|
|
3728
3748
|
return tensor_operator_registry.get('vsplit')(self, indices_or_sections)
|
|
3729
3749
|
|
|
3730
3750
|
def hsplit(self, indices_or_sections):
|
|
3731
3751
|
"""
|
|
3732
3752
|
For details, please refer to :func:`mindspore.ops.hsplit`.
|
|
3733
3753
|
"""
|
|
3734
|
-
self._init_check()
|
|
3735
3754
|
return tensor_operator_registry.get('hsplit')(self, indices_or_sections)
|
|
3736
3755
|
|
|
3737
3756
|
def dsplit(self, indices_or_sections):
|
|
3738
3757
|
"""
|
|
3739
3758
|
For details, please refer to :func:`mindspore.ops.dsplit`.
|
|
3740
3759
|
"""
|
|
3741
|
-
self._init_check()
|
|
3742
3760
|
return tensor_operator_registry.get('dsplit')(self, indices_or_sections)
|
|
3743
3761
|
|
|
3744
3762
|
def xlogy(self, y):
|
|
3745
3763
|
r"""
|
|
3746
3764
|
For details, please refer to :func:`mindspore.ops.xlogy`.
|
|
3747
3765
|
"""
|
|
3748
|
-
return tensor_operator_registry.get("xlogy")(
|
|
3766
|
+
return tensor_operator_registry.get("xlogy")(self, y)
|
|
3749
3767
|
|
|
3750
3768
|
def eigvals(self):
|
|
3751
3769
|
r"""
|
|
@@ -3760,13 +3778,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3760
3778
|
r"""
|
|
3761
3779
|
For details, please refer to :func:`mindspore.ops.erf`.
|
|
3762
3780
|
"""
|
|
3763
|
-
return tensor_operator_registry.get("erf")(
|
|
3781
|
+
return tensor_operator_registry.get("erf")(self)
|
|
3764
3782
|
|
|
3765
3783
|
def erfc(self):
|
|
3766
3784
|
r"""
|
|
3767
3785
|
For details, please refer to :func:`mindspore.ops.erfc`.
|
|
3768
3786
|
"""
|
|
3769
|
-
return tensor_operator_registry.get("erfc")(
|
|
3787
|
+
return tensor_operator_registry.get("erfc")(self)
|
|
3770
3788
|
|
|
3771
3789
|
def tile(self, reps):
|
|
3772
3790
|
r"""
|
|
@@ -3778,29 +3796,26 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3778
3796
|
r"""
|
|
3779
3797
|
For details, please refer to :func:`mindspore.ops.topk`.
|
|
3780
3798
|
"""
|
|
3781
|
-
self._init_check()
|
|
3782
3799
|
return tensor_operator_registry.get("topk")(self, k, dim, largest, sorted)
|
|
3783
3800
|
|
|
3784
3801
|
def top_k(self, k, sorted=True):
|
|
3785
3802
|
r"""
|
|
3786
3803
|
`Tensor.top_k` is deprecated, please use `Tensor.topk` instead.
|
|
3787
3804
|
"""
|
|
3788
|
-
self._init_check()
|
|
3789
3805
|
validator.check_is_int(k, 'k')
|
|
3790
3806
|
validator.check_bool(sorted, 'sorted')
|
|
3791
|
-
return tensor_operator_registry.get("top_k")(
|
|
3807
|
+
return tensor_operator_registry.get("top_k")(self, k, sorted)
|
|
3792
3808
|
|
|
3793
3809
|
def sigmoid(self):
|
|
3794
3810
|
r"""
|
|
3795
3811
|
For details, please refer to :func:`mindspore.ops.sigmoid`.
|
|
3796
3812
|
"""
|
|
3797
|
-
return tensor_operator_registry.get("sigmoid")(
|
|
3813
|
+
return tensor_operator_registry.get("sigmoid")(self)
|
|
3798
3814
|
|
|
3799
3815
|
def median(self, axis=-1, keepdims=False):
|
|
3800
3816
|
r"""
|
|
3801
3817
|
For details, please refer to :func:`mindspore.ops.median`.
|
|
3802
3818
|
"""
|
|
3803
|
-
self._init_check()
|
|
3804
3819
|
validator.check_axis_in_range(axis, self.ndim)
|
|
3805
3820
|
return tensor_operator_registry.get('median')(False, axis, keepdims)(self)
|
|
3806
3821
|
|
|
@@ -3808,49 +3823,42 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3808
3823
|
r"""
|
|
3809
3824
|
For details, please refer to :func:`mindspore.ops.addmv`.
|
|
3810
3825
|
"""
|
|
3811
|
-
self._init_check()
|
|
3812
3826
|
return tensor_operator_registry.get('addmv')(self, mat, vec, beta=beta, alpha=alpha)
|
|
3813
3827
|
|
|
3814
3828
|
def asinh(self):
|
|
3815
3829
|
r"""
|
|
3816
3830
|
For details, please refer to :func:`mindspore.ops.asinh`.
|
|
3817
3831
|
"""
|
|
3818
|
-
self._init_check()
|
|
3819
3832
|
return tensor_operator_registry.get('asinh')(self)
|
|
3820
3833
|
|
|
3821
3834
|
def arcsinh(self):
|
|
3822
3835
|
r"""
|
|
3823
3836
|
Alias for :func:`mindspore.Tensor.asinh`.
|
|
3824
3837
|
"""
|
|
3825
|
-
self._init_check()
|
|
3826
3838
|
return tensor_operator_registry.get('arcsinh')(self)
|
|
3827
3839
|
|
|
3828
3840
|
def atan(self):
|
|
3829
3841
|
r"""
|
|
3830
3842
|
For details, please refer to :func:`mindspore.ops.atan`.
|
|
3831
3843
|
"""
|
|
3832
|
-
self._init_check()
|
|
3833
3844
|
return tensor_operator_registry.get('atan')(self)
|
|
3834
3845
|
|
|
3835
3846
|
def atanh(self):
|
|
3836
3847
|
r"""
|
|
3837
3848
|
For details, please refer to :func:`mindspore.ops.atanh`.
|
|
3838
3849
|
"""
|
|
3839
|
-
self._init_check()
|
|
3840
3850
|
return tensor_operator_registry.get('atanh')(self)
|
|
3841
3851
|
|
|
3842
3852
|
def arctanh(self):
|
|
3843
3853
|
r"""
|
|
3844
3854
|
Alias for :func:`mindspore.Tensor.atanh`.
|
|
3845
3855
|
"""
|
|
3846
|
-
self._init_check()
|
|
3847
3856
|
return tensor_operator_registry.get('arctanh')(self)
|
|
3848
3857
|
|
|
3849
3858
|
def bmm(self, mat2):
|
|
3850
3859
|
r"""
|
|
3851
3860
|
For details, please refer to :func:`mindspore.ops.bmm`.
|
|
3852
3861
|
"""
|
|
3853
|
-
self._init_check()
|
|
3854
3862
|
return tensor_operator_registry.get('bmm')(self, mat2)
|
|
3855
3863
|
|
|
3856
3864
|
def to(self, dtype):
|
|
@@ -3880,8 +3888,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3880
3888
|
>>> print(output.dtype)
|
|
3881
3889
|
Int32
|
|
3882
3890
|
"""
|
|
3883
|
-
|
|
3884
|
-
return tensor_operator_registry.get('to')()(self, dtype)
|
|
3891
|
+
return tensor_operator_registry.get('to')(self, dtype)
|
|
3885
3892
|
|
|
3886
3893
|
def type(self, dtype=None):
|
|
3887
3894
|
r"""
|
|
@@ -3907,7 +3914,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3907
3914
|
[[1 2]
|
|
3908
3915
|
[3 4]]
|
|
3909
3916
|
"""
|
|
3910
|
-
self._init_check()
|
|
3911
3917
|
if dtype is None:
|
|
3912
3918
|
return str(self.dtype)
|
|
3913
3919
|
return self.astype(dtype)
|
|
@@ -3934,7 +3940,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3934
3940
|
>>> print(x.dtype)
|
|
3935
3941
|
Int32
|
|
3936
3942
|
"""
|
|
3937
|
-
self._init_check()
|
|
3938
3943
|
return self.astype(other.dtype)
|
|
3939
3944
|
|
|
3940
3945
|
def bool(self):
|
|
@@ -3957,8 +3962,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3957
3962
|
>>> print(output.dtype)
|
|
3958
3963
|
Bool
|
|
3959
3964
|
"""
|
|
3960
|
-
|
|
3961
|
-
return tensor_operator_registry.get('bool')()(self, mstype.bool_)
|
|
3965
|
+
return tensor_operator_registry.get('bool')(self, mstype.bool_)
|
|
3962
3966
|
|
|
3963
3967
|
def float(self):
|
|
3964
3968
|
r"""
|
|
@@ -3979,8 +3983,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3979
3983
|
>>> print(output.dtype)
|
|
3980
3984
|
Float32
|
|
3981
3985
|
"""
|
|
3982
|
-
|
|
3983
|
-
return tensor_operator_registry.get('float')()(self, mstype.float32)
|
|
3986
|
+
return tensor_operator_registry.get('float')(self, mstype.float32)
|
|
3984
3987
|
|
|
3985
3988
|
def half(self):
|
|
3986
3989
|
r"""
|
|
@@ -4001,8 +4004,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4001
4004
|
>>> print(output.dtype)
|
|
4002
4005
|
Float16
|
|
4003
4006
|
"""
|
|
4004
|
-
|
|
4005
|
-
return tensor_operator_registry.get('half')()(self, mstype.float16)
|
|
4007
|
+
return tensor_operator_registry.get('half')(self, mstype.float16)
|
|
4006
4008
|
|
|
4007
4009
|
def int(self):
|
|
4008
4010
|
r"""
|
|
@@ -4023,8 +4025,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4023
4025
|
>>> print(output.dtype)
|
|
4024
4026
|
Int32
|
|
4025
4027
|
"""
|
|
4026
|
-
|
|
4027
|
-
return tensor_operator_registry.get('int')()(self, mstype.int32)
|
|
4028
|
+
return tensor_operator_registry.get('int')(self, mstype.int32)
|
|
4028
4029
|
|
|
4029
4030
|
def long(self):
|
|
4030
4031
|
r"""
|
|
@@ -4045,8 +4046,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4045
4046
|
>>> print(output.dtype)
|
|
4046
4047
|
Int64
|
|
4047
4048
|
"""
|
|
4048
|
-
|
|
4049
|
-
return tensor_operator_registry.get('long')()(self, mstype.int64)
|
|
4049
|
+
return tensor_operator_registry.get('long')(self, mstype.int64)
|
|
4050
4050
|
|
|
4051
4051
|
def short(self):
|
|
4052
4052
|
r"""
|
|
@@ -4068,22 +4068,19 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4068
4068
|
>>> output
|
|
4069
4069
|
Tensor(shape=[5], dtype=Int16, value= [1, 2, 3, 4, 5])
|
|
4070
4070
|
"""
|
|
4071
|
-
self._init_check()
|
|
4072
4071
|
return tensor_operator_registry.get('cast')(self, mstype.int16)
|
|
4073
4072
|
|
|
4074
4073
|
def cholesky(self, upper=False):
|
|
4075
4074
|
r"""
|
|
4076
4075
|
For details, please refer to :func:`mindspore.ops.cholesky`.
|
|
4077
4076
|
"""
|
|
4078
|
-
|
|
4079
|
-
return tensor_operator_registry.get('cholesky')(upper=upper)(self)
|
|
4077
|
+
return tensor_operator_registry.get('cholesky')(self, upper=upper)
|
|
4080
4078
|
|
|
4081
4079
|
def cholesky_inverse(self, upper=False):
|
|
4082
4080
|
r"""
|
|
4083
4081
|
For details, please refer to :func:`mindspore.ops.cholesky_inverse`.
|
|
4084
4082
|
"""
|
|
4085
|
-
|
|
4086
|
-
return tensor_operator_registry.get('cholesky_inverse')(upper=upper)(self)
|
|
4083
|
+
return tensor_operator_registry.get('cholesky_inverse')(self, upper=upper)
|
|
4087
4084
|
|
|
4088
4085
|
def cholesky_solve(self, input2, upper=False):
|
|
4089
4086
|
r"""
|
|
@@ -4092,63 +4089,54 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4092
4089
|
.. warning::
|
|
4093
4090
|
This is an experimental API that is subject to change or deletion.
|
|
4094
4091
|
"""
|
|
4095
|
-
self._init_check()
|
|
4096
4092
|
return tensor_operator_registry.get('cholesky_solve')(self, input2, upper)
|
|
4097
4093
|
|
|
4098
4094
|
def conj(self):
|
|
4099
4095
|
r"""
|
|
4100
4096
|
For details, please refer to :func:`mindspore.ops.conj`.
|
|
4101
4097
|
"""
|
|
4102
|
-
self._init_check()
|
|
4103
4098
|
return tensor_operator_registry.get('conj')(self)
|
|
4104
4099
|
|
|
4105
4100
|
def count_nonzero(self, axis=(), keep_dims=False, dtype=mstype.int32):
|
|
4106
4101
|
r"""
|
|
4107
4102
|
For details, please refer to :func:`mindspore.ops.count_nonzero`.
|
|
4108
4103
|
"""
|
|
4109
|
-
self._init_check()
|
|
4110
4104
|
return tensor_operator_registry.get('count_nonzero')(self, axis, keep_dims, dtype)
|
|
4111
4105
|
|
|
4112
4106
|
def cross(self, other, dim=None):
|
|
4113
4107
|
r"""
|
|
4114
4108
|
For details, please refer to :func:`mindspore.ops.cross`.
|
|
4115
4109
|
"""
|
|
4116
|
-
self._init_check()
|
|
4117
4110
|
return tensor_operator_registry.get('cross')(self, other, dim)
|
|
4118
4111
|
|
|
4119
4112
|
def erfinv(self):
|
|
4120
4113
|
r"""
|
|
4121
4114
|
For details, please refer to :func:`mindspore.ops.erfinv`.
|
|
4122
4115
|
"""
|
|
4123
|
-
self._init_check()
|
|
4124
4116
|
return tensor_operator_registry.get('erfinv')(self)
|
|
4125
4117
|
|
|
4126
4118
|
def less_equal(self, other):
|
|
4127
4119
|
r"""
|
|
4128
4120
|
For details, please refer to :func:`mindspore.ops.less_equal`.
|
|
4129
4121
|
"""
|
|
4130
|
-
self._init_check()
|
|
4131
4122
|
return tensor_operator_registry.get('less_equal')(self, other)
|
|
4132
4123
|
|
|
4133
4124
|
def lcm(self, other):
|
|
4134
4125
|
r"""
|
|
4135
4126
|
For details, please refer to :func:`mindspore.ops.lcm`.
|
|
4136
4127
|
"""
|
|
4137
|
-
self._init_check()
|
|
4138
4128
|
return tensor_operator_registry.get('lcm')(self, other)
|
|
4139
4129
|
|
|
4140
4130
|
def ldexp(self, other):
|
|
4141
4131
|
r"""
|
|
4142
4132
|
For details, please refer to :func:`mindspore.ops.ldexp`.
|
|
4143
4133
|
"""
|
|
4144
|
-
self._init_check()
|
|
4145
4134
|
return tensor_operator_registry.get('ldexp')(self, other)
|
|
4146
4135
|
|
|
4147
4136
|
def fold(self, output_size, kernel_size, dilation=1, padding=0, stride=1):
|
|
4148
4137
|
r"""
|
|
4149
4138
|
For details, please refer to :func:`mindspore.ops.fold`.
|
|
4150
4139
|
"""
|
|
4151
|
-
self._init_check()
|
|
4152
4140
|
return tensor_operator_registry.get('fold')(self, output_size, kernel_size, dilation, padding, stride)
|
|
4153
4141
|
|
|
4154
4142
|
def unfold(self, kernel_size, dilation=1, padding=0, stride=1):
|
|
@@ -4159,70 +4147,62 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4159
4147
|
This is an experimental API that is subject to change or deletion.
|
|
4160
4148
|
|
|
4161
4149
|
"""
|
|
4162
|
-
self._init_check()
|
|
4163
4150
|
return tensor_operator_registry.get('unfold')(self, kernel_size, dilation, padding, stride)
|
|
4164
4151
|
|
|
4165
4152
|
def expand(self, size):
|
|
4166
4153
|
r"""
|
|
4167
4154
|
For details, please refer to :func:`mindspore.ops.broadcast_to`.
|
|
4168
4155
|
"""
|
|
4169
|
-
|
|
4156
|
+
if isinstance(size, Tensor):
|
|
4157
|
+
size = tensor_operator_registry.get('tensortotuple')()(size)
|
|
4170
4158
|
return tensor_operator_registry.get('expand')(self, size)
|
|
4171
4159
|
|
|
4172
4160
|
def cumprod(self, dim, dtype=None):
|
|
4173
4161
|
r"""
|
|
4174
4162
|
For details, please refer to :func:`mindspore.ops.cumprod`.
|
|
4175
4163
|
"""
|
|
4176
|
-
self._init_check()
|
|
4177
4164
|
return tensor_operator_registry.get('cumprod')(self, dim, dtype)
|
|
4178
4165
|
|
|
4179
4166
|
def multiply(self, value):
|
|
4180
4167
|
r"""
|
|
4181
4168
|
For details, please refer to :func:`mindspore.ops.multiply`.
|
|
4182
4169
|
"""
|
|
4183
|
-
self._init_check()
|
|
4184
4170
|
return tensor_operator_registry.get('multiply')(self, value)
|
|
4185
4171
|
|
|
4186
4172
|
def div(self, value, *, rounding_mode=None):
|
|
4187
4173
|
r"""
|
|
4188
4174
|
For details, please refer to :func:`mindspore.ops.div`.
|
|
4189
4175
|
"""
|
|
4190
|
-
self._init_check()
|
|
4191
4176
|
return tensor_operator_registry.get('div')(self, value, rounding_mode=rounding_mode)
|
|
4192
4177
|
|
|
4193
4178
|
def divide(self, value, *, rounding_mode=None):
|
|
4194
4179
|
r"""
|
|
4195
4180
|
Alias for :func:`mindspore.Tensor.div`.
|
|
4196
4181
|
"""
|
|
4197
|
-
self._init_check()
|
|
4198
4182
|
return tensor_operator_registry.get('div')(self, value, rounding_mode=rounding_mode)
|
|
4199
4183
|
|
|
4200
4184
|
def eq(self, other):
|
|
4201
4185
|
r"""
|
|
4202
4186
|
For details, please refer to :func:`mindspore.ops.eq`.
|
|
4203
4187
|
"""
|
|
4204
|
-
self._init_check()
|
|
4205
4188
|
return tensor_operator_registry.get('equal')(self, other)
|
|
4206
4189
|
|
|
4207
4190
|
def equal(self, other):
|
|
4208
4191
|
r"""
|
|
4209
4192
|
For details, please refer to :func:`mindspore.ops.equal`.
|
|
4210
4193
|
"""
|
|
4211
|
-
self._init_check()
|
|
4212
4194
|
return tensor_operator_registry.get('equal')(self, other)
|
|
4213
4195
|
|
|
4214
4196
|
def expm1(self):
|
|
4215
4197
|
r"""
|
|
4216
4198
|
For details, please refer to :func:`mindspore.ops.expm1`.
|
|
4217
4199
|
"""
|
|
4218
|
-
self._init_check()
|
|
4219
4200
|
return tensor_operator_registry.get('expm1')(self)
|
|
4220
4201
|
|
|
4221
4202
|
def index_add(self, dim, index, source, *, alpha=1):
|
|
4222
4203
|
r"""
|
|
4223
4204
|
For details, please refer to :func:`mindspore.ops.index_add`.
|
|
4224
4205
|
"""
|
|
4225
|
-
self._init_check()
|
|
4226
4206
|
check_is_number(alpha, (int, float))
|
|
4227
4207
|
source = tensor_operator_registry.get('__mul__')(source, alpha)
|
|
4228
4208
|
return tensor_operator_registry.get('index_add')(self, indices=index, y=source, axis=dim)
|
|
@@ -4231,42 +4211,36 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4231
4211
|
r"""
|
|
4232
4212
|
For details, please refer to :func:`mindspore.ops.greater`.
|
|
4233
4213
|
"""
|
|
4234
|
-
self._init_check()
|
|
4235
4214
|
return tensor_operator_registry.get('greater')(self, other)
|
|
4236
4215
|
|
|
4237
4216
|
def greater_equal(self, other):
|
|
4238
4217
|
r"""
|
|
4239
4218
|
For details, please refer to :func:`mindspore.ops.greater_equal`.
|
|
4240
4219
|
"""
|
|
4241
|
-
self._init_check()
|
|
4242
4220
|
return tensor_operator_registry.get('greater_equal')(self, other)
|
|
4243
4221
|
|
|
4244
4222
|
def igamma(self, other):
|
|
4245
4223
|
r"""
|
|
4246
4224
|
For details, please refer to :func:`mindspore.ops.igamma`.
|
|
4247
4225
|
"""
|
|
4248
|
-
self._init_check()
|
|
4249
4226
|
return tensor_operator_registry.get('igamma')(self, other)
|
|
4250
4227
|
|
|
4251
4228
|
def igammac(self, other):
|
|
4252
4229
|
r"""
|
|
4253
4230
|
For details, please refer to :func:`mindspore.ops.igammac`.
|
|
4254
4231
|
"""
|
|
4255
|
-
self._init_check()
|
|
4256
4232
|
return tensor_operator_registry.get('igammac')(self, other)
|
|
4257
4233
|
|
|
4258
4234
|
def isinf(self):
|
|
4259
4235
|
r"""
|
|
4260
4236
|
For details, please refer to :func:`mindspore.ops.isinf`.
|
|
4261
4237
|
"""
|
|
4262
|
-
self._init_check()
|
|
4263
4238
|
return tensor_operator_registry.get('isinf')(self)
|
|
4264
4239
|
|
|
4265
4240
|
def isnan(self):
|
|
4266
4241
|
r"""
|
|
4267
4242
|
For details, please refer to :func:`mindspore.ops.isnan`.
|
|
4268
4243
|
"""
|
|
4269
|
-
self._init_check()
|
|
4270
4244
|
return tensor_operator_registry.get('isnan')(self)
|
|
4271
4245
|
|
|
4272
4246
|
def flip(self, dims):
|
|
@@ -4320,14 +4294,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4320
4294
|
r"""
|
|
4321
4295
|
For details, please refer to :func:`mindspore.ops.le`.
|
|
4322
4296
|
"""
|
|
4323
|
-
self._init_check()
|
|
4324
4297
|
return tensor_operator_registry.get('le')(self, other)
|
|
4325
4298
|
|
|
4326
4299
|
def less(self, other):
|
|
4327
4300
|
r"""
|
|
4328
4301
|
For details, please refer to :func:`mindspore.ops.less`.
|
|
4329
4302
|
"""
|
|
4330
|
-
self._init_check()
|
|
4331
4303
|
return tensor_operator_registry.get('less')(self, other)
|
|
4332
4304
|
|
|
4333
4305
|
def lt(self, other):
|
|
@@ -4340,35 +4312,30 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4340
4312
|
r"""
|
|
4341
4313
|
For details, please refer to :func:`mindspore.ops.logical_and`.
|
|
4342
4314
|
"""
|
|
4343
|
-
self._init_check()
|
|
4344
4315
|
return tensor_operator_registry.get('logical_and')(self, other)
|
|
4345
4316
|
|
|
4346
4317
|
def logical_not(self):
|
|
4347
4318
|
r"""
|
|
4348
4319
|
For details, please refer to :func:`mindspore.ops.logical_not`.
|
|
4349
4320
|
"""
|
|
4350
|
-
self._init_check()
|
|
4351
4321
|
return tensor_operator_registry.get('logical_not')(self)
|
|
4352
4322
|
|
|
4353
4323
|
def logical_or(self, other):
|
|
4354
4324
|
r"""
|
|
4355
4325
|
For details, please refer to :func:`mindspore.ops.logical_or`.
|
|
4356
4326
|
"""
|
|
4357
|
-
self._init_check()
|
|
4358
4327
|
return tensor_operator_registry.get('logical_or')(self, other)
|
|
4359
4328
|
|
|
4360
4329
|
def logical_xor(self, other):
|
|
4361
4330
|
r"""
|
|
4362
4331
|
For details, please refer to :func:`mindspore.ops.logical_xor`.
|
|
4363
4332
|
"""
|
|
4364
|
-
self._init_check()
|
|
4365
4333
|
return tensor_operator_registry.get('logical_xor')(self, other)
|
|
4366
4334
|
|
|
4367
4335
|
def lstsq(self, A):
|
|
4368
4336
|
r"""
|
|
4369
4337
|
For details, please refer to :func:`mindspore.ops.lstsq`.
|
|
4370
4338
|
"""
|
|
4371
|
-
self._init_check()
|
|
4372
4339
|
return tensor_operator_registry.get('lstsq')(self, A)
|
|
4373
4340
|
|
|
4374
4341
|
@property
|
|
@@ -4392,28 +4359,24 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4392
4359
|
r"""
|
|
4393
4360
|
For details, please refer to :func:`mindspore.ops.mvlgamma`.
|
|
4394
4361
|
"""
|
|
4395
|
-
self._init_check()
|
|
4396
4362
|
return tensor_operator_registry.get('mvlgamma')(self, p)
|
|
4397
4363
|
|
|
4398
4364
|
def matmul(self, tensor2):
|
|
4399
4365
|
r"""
|
|
4400
4366
|
For details, please refer to :func:`mindspore.ops.matmul`.
|
|
4401
4367
|
"""
|
|
4402
|
-
self._init_check()
|
|
4403
4368
|
return tensor_operator_registry.get('matmul')(self, tensor2)
|
|
4404
4369
|
|
|
4405
4370
|
def inner(self, other):
|
|
4406
4371
|
r"""
|
|
4407
4372
|
For details, please refer to :func:`mindspore.ops.inner`.
|
|
4408
4373
|
"""
|
|
4409
|
-
self._init_check()
|
|
4410
4374
|
return tensor_operator_registry.get('inner')(self, other)
|
|
4411
4375
|
|
|
4412
4376
|
def multinomial(self, num_samples, replacement=True, seed=None):
|
|
4413
4377
|
r"""
|
|
4414
4378
|
For details, please refer to :func:`mindspore.ops.multinomial`.
|
|
4415
4379
|
"""
|
|
4416
|
-
self._init_check()
|
|
4417
4380
|
return tensor_operator_registry.get('multinomial')(self, num_samples, replacement, seed)
|
|
4418
4381
|
|
|
4419
4382
|
def matrix_power(self, n):
|
|
@@ -4424,35 +4387,30 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4424
4387
|
This is an experimental API that is subject to change or deletion.
|
|
4425
4388
|
|
|
4426
4389
|
"""
|
|
4427
|
-
self._init_check()
|
|
4428
4390
|
return tensor_operator_registry.get('matrix_power')(self, n)
|
|
4429
4391
|
|
|
4430
4392
|
def maximum(self, other):
|
|
4431
4393
|
r"""
|
|
4432
4394
|
For details, please refer to :func:`mindspore.ops.maximum`.
|
|
4433
4395
|
"""
|
|
4434
|
-
self._init_check()
|
|
4435
4396
|
return tensor_operator_registry.get('maximum')(self, other)
|
|
4436
4397
|
|
|
4437
4398
|
def mm(self, mat2):
|
|
4438
4399
|
r"""
|
|
4439
4400
|
For details, please refer to :func:`mindspore.ops.mm`.
|
|
4440
4401
|
"""
|
|
4441
|
-
self._init_check()
|
|
4442
4402
|
return tensor_operator_registry.get('mm')(self, mat2)
|
|
4443
4403
|
|
|
4444
4404
|
def msort(self):
|
|
4445
4405
|
r"""
|
|
4446
4406
|
For details, please refer to :func:`mindspore.ops.msort`.
|
|
4447
4407
|
"""
|
|
4448
|
-
self._init_check()
|
|
4449
4408
|
return tensor_operator_registry.get('msort')(self)
|
|
4450
4409
|
|
|
4451
4410
|
def mul(self, value):
|
|
4452
4411
|
r"""
|
|
4453
4412
|
For details, please refer to :func:`mindspore.ops.mul`.
|
|
4454
4413
|
"""
|
|
4455
|
-
self._init_check()
|
|
4456
4414
|
return tensor_operator_registry.get('mul')(self, value)
|
|
4457
4415
|
|
|
4458
4416
|
def nan_to_num(self, nan=0.0, posinf=None, neginf=None):
|
|
@@ -4465,31 +4423,29 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4465
4423
|
r"""
|
|
4466
4424
|
For details, please refer to :func:`mindspore.ops.neg`.
|
|
4467
4425
|
"""
|
|
4468
|
-
self._init_check()
|
|
4469
4426
|
return tensor_operator_registry.get('neg')(self)
|
|
4470
4427
|
|
|
4471
4428
|
def ne(self, other):
|
|
4472
4429
|
r"""
|
|
4473
4430
|
For details, please refer to :func:`mindspore.ops.ne`.
|
|
4474
4431
|
"""
|
|
4475
|
-
self._init_check()
|
|
4476
4432
|
return tensor_operator_registry.get('ne')(self, other)
|
|
4477
4433
|
|
|
4478
4434
|
def not_equal(self, other):
|
|
4479
4435
|
r"""
|
|
4480
4436
|
For details, please refer to :func:`mindspore.ops.not_equal`.
|
|
4481
4437
|
"""
|
|
4482
|
-
self._init_check()
|
|
4483
4438
|
return tensor_operator_registry.get('not_equal')(self, other)
|
|
4484
4439
|
|
|
4485
|
-
def new_zeros(self, size,
|
|
4440
|
+
def new_zeros(self, size, dtype=None):
|
|
4486
4441
|
r"""
|
|
4487
4442
|
Return a tensor of `size` filled with zeros.
|
|
4488
4443
|
|
|
4489
|
-
|
|
4490
|
-
|
|
4444
|
+
.. warning::
|
|
4445
|
+
For argument `size`, Tensor type input will be deprecated in the future version.
|
|
4491
4446
|
|
|
4492
|
-
|
|
4447
|
+
Args:
|
|
4448
|
+
size (Union[int, tuple, list, Tensor]): An int, list or tuple of integers defining the output shape.
|
|
4493
4449
|
dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned tensor has
|
|
4494
4450
|
thesame dtype as `self`. Default: ``None``.
|
|
4495
4451
|
|
|
@@ -4497,7 +4453,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4497
4453
|
Tensor, the shape and dtype is defined above and filled with zeros.
|
|
4498
4454
|
|
|
4499
4455
|
Raises:
|
|
4500
|
-
TypeError: If `size` is
|
|
4456
|
+
TypeError: If `size` is neither an int nor an tuple/list/Tensor of int.
|
|
4501
4457
|
|
|
4502
4458
|
Supported Platforms:
|
|
4503
4459
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4512,21 +4468,17 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4512
4468
|
[[0. 0.]
|
|
4513
4469
|
[0. 0.]]
|
|
4514
4470
|
"""
|
|
4515
|
-
|
|
4516
|
-
if isinstance(size, list):
|
|
4517
|
-
size = tuple(size)
|
|
4518
|
-
self._init_check()
|
|
4519
|
-
_dtype = self.dtype if dtype is None else dtype
|
|
4520
|
-
return tensor_operator_registry.get('zeros')(size, _dtype)
|
|
4471
|
+
return tensor_operator_registry.get('zeros')(size, dtype)
|
|
4521
4472
|
|
|
4522
|
-
def new_ones(self, size,
|
|
4473
|
+
def new_ones(self, size, dtype=None):
|
|
4523
4474
|
r"""
|
|
4524
4475
|
Return a tensor of `size` filled with ones.
|
|
4525
4476
|
|
|
4526
|
-
|
|
4527
|
-
|
|
4477
|
+
.. warning::
|
|
4478
|
+
For argument `size`, Tensor type input will be deprecated in the future version.
|
|
4528
4479
|
|
|
4529
|
-
|
|
4480
|
+
Args:
|
|
4481
|
+
size (Union[int, tuple, list, Tensor]): An int, list or tuple of integers defining the output shape.
|
|
4530
4482
|
dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned
|
|
4531
4483
|
tensor has the same dtype as `self`. Default: ``None``.
|
|
4532
4484
|
|
|
@@ -4534,7 +4486,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4534
4486
|
Tensor, the shape and dtype is defined above and filled with ones.
|
|
4535
4487
|
|
|
4536
4488
|
Raises:
|
|
4537
|
-
TypeError: If `size` is
|
|
4489
|
+
TypeError: If `size` is neither an int nor an tuple/list/Tensor of int.
|
|
4538
4490
|
|
|
4539
4491
|
Supported Platforms:
|
|
4540
4492
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4549,109 +4501,90 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4549
4501
|
[[1. 1.]
|
|
4550
4502
|
[1. 1.]]
|
|
4551
4503
|
"""
|
|
4552
|
-
|
|
4553
|
-
if isinstance(size, list):
|
|
4554
|
-
size = tuple(size)
|
|
4555
|
-
self._init_check()
|
|
4556
|
-
_dtype = self.dtype if dtype is None else dtype
|
|
4557
|
-
return tensor_operator_registry.get('ones')(size, _dtype)
|
|
4504
|
+
return tensor_operator_registry.get('ones')(size, dtype)
|
|
4558
4505
|
|
|
4559
4506
|
def sign(self):
|
|
4560
4507
|
r"""
|
|
4561
4508
|
For details, please refer to :func:`mindspore.ops.sign`.
|
|
4562
4509
|
"""
|
|
4563
|
-
self._init_check()
|
|
4564
4510
|
return tensor_operator_registry.get('sign')(self)
|
|
4565
4511
|
|
|
4566
4512
|
def signbit(self):
|
|
4567
4513
|
"""
|
|
4568
4514
|
For details, please refer to :func:`mindspore.ops.signbit`.
|
|
4569
4515
|
"""
|
|
4570
|
-
self._init_check()
|
|
4571
4516
|
return tensor_operator_registry.get('signbit')(self)
|
|
4572
4517
|
|
|
4573
4518
|
def sgn(self):
|
|
4574
4519
|
"""
|
|
4575
4520
|
For details, please refer to :func:`mindspore.ops.sgn`.
|
|
4576
4521
|
"""
|
|
4577
|
-
self._init_check()
|
|
4578
4522
|
return tensor_operator_registry.get('sgn')(self)
|
|
4579
4523
|
|
|
4580
4524
|
def sin(self):
|
|
4581
4525
|
r"""
|
|
4582
4526
|
For details, please refer to :func:`mindspore.ops.sin`.
|
|
4583
4527
|
"""
|
|
4584
|
-
self._init_check()
|
|
4585
4528
|
return tensor_operator_registry.get('sin')(self)
|
|
4586
4529
|
|
|
4587
4530
|
def sinc(self):
|
|
4588
4531
|
r"""
|
|
4589
4532
|
For details, please refer to :func:`mindspore.ops.sinc`.
|
|
4590
4533
|
"""
|
|
4591
|
-
self._init_check()
|
|
4592
4534
|
return tensor_operator_registry.get('sinc')(self)
|
|
4593
4535
|
|
|
4594
4536
|
def sinh(self):
|
|
4595
4537
|
r"""
|
|
4596
4538
|
For details, please refer to :func:`mindspore.ops.sinh`.
|
|
4597
4539
|
"""
|
|
4598
|
-
self._init_check()
|
|
4599
4540
|
return tensor_operator_registry.get('sinh')(self)
|
|
4600
4541
|
|
|
4601
4542
|
def sort(self, axis=-1, descending=False):
|
|
4602
4543
|
r"""
|
|
4603
4544
|
For details, please refer to :func:`mindspore.ops.sort`.
|
|
4604
4545
|
"""
|
|
4605
|
-
self._init_check()
|
|
4606
4546
|
return tensor_operator_registry.get('sort')(self, axis=axis, descending=descending)
|
|
4607
4547
|
|
|
4608
4548
|
def argsort(self, axis=-1, descending=False):
|
|
4609
4549
|
"""
|
|
4610
4550
|
For details, please refer to :func:`mindspore.ops.argsort`.
|
|
4611
4551
|
"""
|
|
4612
|
-
self._init_check()
|
|
4613
4552
|
return tensor_operator_registry.get('argsort')(self, axis, descending)
|
|
4614
4553
|
|
|
4615
4554
|
def trunc(self):
|
|
4616
4555
|
r"""
|
|
4617
4556
|
For details, please refer to :func:`mindspore.ops.trunc`.
|
|
4618
4557
|
"""
|
|
4619
|
-
self._init_check()
|
|
4620
4558
|
return tensor_operator_registry.get('trunc')(self)
|
|
4621
4559
|
|
|
4622
4560
|
def where(self, condition, y):
|
|
4623
4561
|
r"""
|
|
4624
4562
|
For details, please refer to :func:`mindspore.ops.where`.
|
|
4625
4563
|
"""
|
|
4626
|
-
self._init_check()
|
|
4627
4564
|
return tensor_operator_registry.get('where')(condition, self, y)
|
|
4628
4565
|
|
|
4629
4566
|
def imag(self):
|
|
4630
4567
|
r"""
|
|
4631
4568
|
For details, please refer to :func:`mindspore.ops.imag`.
|
|
4632
4569
|
"""
|
|
4633
|
-
self._init_check()
|
|
4634
4570
|
return tensor_operator_registry.get('imag')(self)
|
|
4635
4571
|
|
|
4636
4572
|
def quantile(self, q, axis=None, keepdims=False):
|
|
4637
4573
|
r"""
|
|
4638
4574
|
For details, please refer to :func:`mindspore.ops.quantile`.
|
|
4639
4575
|
"""
|
|
4640
|
-
self._init_check()
|
|
4641
4576
|
return tensor_operator_registry.get('quantile')(self, q, axis, keepdims)
|
|
4642
4577
|
|
|
4643
4578
|
def nanquantile(self, q, axis=None, keepdims=False):
|
|
4644
4579
|
"""
|
|
4645
4580
|
For details, please refer to :func:`mindspore.ops.nanquantile`.
|
|
4646
4581
|
"""
|
|
4647
|
-
self._init_check()
|
|
4648
4582
|
return tensor_operator_registry.get('nanquantile')(self, q, axis, keepdims)
|
|
4649
4583
|
|
|
4650
4584
|
def orgqr(self, input2):
|
|
4651
4585
|
r"""
|
|
4652
4586
|
For details, please refer to :func:`mindspore.ops.orgqr`.
|
|
4653
4587
|
"""
|
|
4654
|
-
self._init_check()
|
|
4655
4588
|
return tensor_operator_registry.get('orgqr')(self, input2)
|
|
4656
4589
|
|
|
4657
4590
|
def lu_solve(self, LU_data, LU_pivots):
|
|
@@ -4661,7 +4594,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4661
4594
|
.. warning::
|
|
4662
4595
|
This is an experimental API that is subject to change or deletion.
|
|
4663
4596
|
"""
|
|
4664
|
-
self._init_check()
|
|
4665
4597
|
return tensor_operator_registry.get('lu_solve')(self, LU_data, LU_pivots)
|
|
4666
4598
|
|
|
4667
4599
|
|
|
@@ -4669,14 +4601,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4669
4601
|
r"""
|
|
4670
4602
|
For details, please refer to :func:`mindspore.ops.nextafter`.
|
|
4671
4603
|
"""
|
|
4672
|
-
self._init_check()
|
|
4673
4604
|
return tensor_operator_registry.get('nextafter')(self, other)
|
|
4674
4605
|
|
|
4675
4606
|
def qr(self, some=True):
|
|
4676
4607
|
r"""
|
|
4677
4608
|
For details, please refer to :func:`mindspore.ops.qr`.
|
|
4678
4609
|
"""
|
|
4679
|
-
self._init_check()
|
|
4680
4610
|
validator.check_value_type('some', some, bool, 'Tensor.qr')
|
|
4681
4611
|
return tensor_operator_registry.get('qr')(self, 'reduced' if some else 'complete')
|
|
4682
4612
|
|
|
@@ -4686,7 +4616,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4686
4616
|
For details, please refer to :func:`mindspore.ops.ormqr`,
|
|
4687
4617
|
Args `input2` and `input3` correspond to the args `tau` and `other` of :func:`mindspore.ops.ormqr`.
|
|
4688
4618
|
"""
|
|
4689
|
-
self._init_check()
|
|
4690
4619
|
return tensor_operator_registry.get('ormqr')(self, input2, input3, left, transpose)
|
|
4691
4620
|
|
|
4692
4621
|
|
|
@@ -4728,7 +4657,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4728
4657
|
>>> print(output)
|
|
4729
4658
|
[5. 6. 3. 7.]
|
|
4730
4659
|
"""
|
|
4731
|
-
self._init_check()
|
|
4732
4660
|
return tensor_operator_registry.get('masked_scatter')()(self, mask, x)
|
|
4733
4661
|
|
|
4734
4662
|
|
|
@@ -4780,7 +4708,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4780
4708
|
[[1 5 3]
|
|
4781
4709
|
[4 8 9]]
|
|
4782
4710
|
"""
|
|
4783
|
-
self._init_check()
|
|
4784
4711
|
validator.check_value_type('accumulate', accumulate, bool, 'Tensor.index_put')
|
|
4785
4712
|
_index_put = tensor_operator_registry.get('index_put')(0 if accumulate is False else 1)
|
|
4786
4713
|
return _index_put(self, values, indices)
|
|
@@ -4799,7 +4726,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4799
4726
|
>>> x = ms.Tensor([1, 2, 3], ms.int64)
|
|
4800
4727
|
>>> x._offload()
|
|
4801
4728
|
"""
|
|
4802
|
-
self._init_check()
|
|
4803
4729
|
return Tensor_._offload(self)
|
|
4804
4730
|
|
|
4805
4731
|
|
|
@@ -4841,9 +4767,9 @@ def _check_tensor_input(input_data=None, dtype=None, shape=None, init=None):
|
|
|
4841
4767
|
raise ValueError("init, dtype and shape must have values at the same time.")
|
|
4842
4768
|
|
|
4843
4769
|
if input_data is not None:
|
|
4844
|
-
if isinstance(input_data, np.ndarray) and input_data.ndim
|
|
4770
|
+
if isinstance(input_data, np.ndarray) and input_data.ndim >= 1 and input_data.size == 0:
|
|
4845
4771
|
raise ValueError("input_data can not contain zero dimension.")
|
|
4846
|
-
if isinstance(input_data, (tuple, list)) and np.array(input_data).ndim
|
|
4772
|
+
if isinstance(input_data, (tuple, list)) and np.array(input_data).ndim >= 1 \
|
|
4847
4773
|
and np.array(input_data).size == 0:
|
|
4848
4774
|
raise ValueError("input_data can not contain zero dimension.")
|
|
4849
4775
|
|