mindspore 2.0.0a0__cp37-none-any.whl → 2.0.0rc1__cp37-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +9064 -0
- mindspore/__init__.py +4 -2
- mindspore/_akg/akg/composite/build_module.py +11 -0
- mindspore/_akg/akg/config/repository_cuda.json +11 -0
- mindspore/_akg/akg/tvm/contrib/nvcc.py +4 -3
- mindspore/_c_dataengine.cpython-37m-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-37m-aarch64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-37m-aarch64-linux-gnu.so +0 -0
- mindspore/_check_jit_forbidden_api.py +102 -0
- mindspore/_checkparam.py +1066 -1001
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +4 -3
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -48
- mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
- mindspore/_extends/parse/__init__.py +5 -3
- mindspore/_extends/parse/namespace.py +16 -1
- mindspore/_extends/parse/parser.py +107 -22
- mindspore/_extends/parse/resources.py +0 -7
- mindspore/_extends/parse/standard_method.py +885 -413
- mindspore/_mindspore_offline_debug.cpython-37m-aarch64-linux-gnu.so +0 -0
- mindspore/amp.py +52 -57
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/boost.py +2 -2
- mindspore/boost/boost_cell_wrapper.py +38 -20
- mindspore/boost/dim_reduce.py +3 -3
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +4 -6
- mindspore/common/_decorator.py +2 -0
- mindspore/common/_register_for_adapter.py +55 -0
- mindspore/common/_stub_tensor.py +201 -0
- mindspore/common/_utils.py +41 -7
- mindspore/common/api.py +215 -141
- mindspore/common/dtype.py +8 -1
- mindspore/common/dump.py +2 -2
- mindspore/common/initializer.py +4 -2
- mindspore/common/jit_config.py +17 -13
- mindspore/common/mutable.py +33 -13
- mindspore/common/parameter.py +23 -21
- mindspore/common/seed.py +8 -24
- mindspore/common/sparse_tensor.py +62 -41
- mindspore/common/tensor.py +852 -1154
- mindspore/communication/__init__.py +2 -2
- mindspore/communication/_comm_helper.py +11 -4
- mindspore/communication/management.py +22 -21
- mindspore/config/op_info.config +501 -1008
- mindspore/config/super_bar_config.json +512 -0
- mindspore/context.py +201 -23
- mindspore/dataset/__init__.py +6 -6
- mindspore/dataset/audio/__init__.py +7 -7
- mindspore/dataset/audio/transforms.py +670 -30
- mindspore/dataset/audio/utils.py +47 -4
- mindspore/dataset/audio/validators.py +223 -1
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/core/config.py +210 -14
- mindspore/dataset/core/validator_helpers.py +2 -2
- mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
- mindspore/dataset/debug/debug_hook.py +65 -0
- mindspore/dataset/debug/pre_defined_hook.py +67 -0
- mindspore/dataset/engine/__init__.py +7 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +322 -66
- mindspore/dataset/engine/datasets_audio.py +80 -76
- mindspore/dataset/engine/datasets_standard_format.py +51 -38
- mindspore/dataset/engine/datasets_text.py +232 -118
- mindspore/dataset/engine/datasets_user_defined.py +41 -17
- mindspore/dataset/engine/datasets_vision.py +746 -225
- mindspore/dataset/engine/graphdata.py +75 -10
- mindspore/dataset/engine/iterators.py +45 -5
- mindspore/dataset/engine/offload.py +48 -28
- mindspore/dataset/engine/validators.py +117 -8
- mindspore/dataset/text/__init__.py +6 -5
- mindspore/dataset/text/transforms.py +86 -3
- mindspore/dataset/text/utils.py +6 -4
- mindspore/dataset/text/validators.py +25 -0
- mindspore/dataset/transforms/__init__.py +3 -2
- mindspore/dataset/transforms/c_transforms.py +1 -1
- mindspore/dataset/transforms/transforms.py +2 -2
- mindspore/dataset/utils/__init__.py +2 -1
- mindspore/dataset/utils/line_reader.py +121 -0
- mindspore/dataset/vision/__init__.py +2 -3
- mindspore/dataset/vision/c_transforms.py +9 -9
- mindspore/dataset/vision/py_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +2 -0
- mindspore/dataset/vision/transforms.py +160 -161
- mindspore/dataset/vision/utils.py +3 -3
- mindspore/experimental/map_parameter.py +38 -26
- mindspore/include/OWNERS +0 -1
- mindspore/include/api/callback/callback.h +9 -13
- mindspore/include/api/callback/ckpt_saver.h +2 -2
- mindspore/include/api/callback/loss_monitor.h +2 -2
- mindspore/include/api/callback/lr_scheduler.h +5 -5
- mindspore/include/api/callback/time_monitor.h +2 -2
- mindspore/include/api/callback/train_accuracy.h +4 -6
- mindspore/include/api/cfg.h +19 -6
- mindspore/include/api/context.h +44 -9
- mindspore/include/api/delegate.h +1 -1
- mindspore/include/api/metrics/accuracy.h +2 -2
- mindspore/include/api/metrics/metrics.h +4 -3
- mindspore/include/api/model.h +9 -4
- mindspore/include/api/model_parallel_runner.h +2 -2
- mindspore/include/api/net.h +12 -11
- mindspore/include/api/serialization.h +19 -3
- mindspore/include/api/types.h +3 -3
- mindspore/include/dataset/constants.h +7 -0
- mindspore/include/dataset/text.h +59 -0
- mindspore/include/mindapi/base/type_id.h +1 -0
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libicudata.so.69 +0 -0
- mindspore/lib/libicui18n.so.69 +0 -0
- mindspore/lib/libicuuc.so.69 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/cpu/libakg.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
- mindspore/lib/plugin/{libmindspore_ascend.so → libmindspore_ascend.so.2} +0 -0
- mindspore/log.py +1 -1
- mindspore/mindrecord/filereader.py +18 -0
- mindspore/mindrecord/filewriter.py +197 -34
- mindspore/mindrecord/shardreader.py +9 -0
- mindspore/mindrecord/shardwriter.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +3 -3
- mindspore/mindrecord/tools/cifar10_to_mr.py +3 -3
- mindspore/mindrecord/tools/csv_to_mr.py +3 -3
- mindspore/mindrecord/tools/imagenet_to_mr.py +16 -11
- mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
- mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
- mindspore/nn/__init__.py +0 -4
- mindspore/nn/cell.py +204 -132
- mindspore/nn/dynamic_lr.py +1 -1
- mindspore/nn/grad/cell_grad.py +7 -6
- mindspore/nn/layer/__init__.py +5 -4
- mindspore/nn/layer/activation.py +40 -89
- mindspore/nn/layer/basic.py +255 -624
- mindspore/nn/layer/channel_shuffle.py +7 -6
- mindspore/nn/layer/combined.py +1 -1
- mindspore/nn/layer/container.py +41 -4
- mindspore/nn/layer/conv.py +64 -28
- mindspore/nn/layer/dense.py +9 -8
- mindspore/nn/layer/embedding.py +27 -25
- mindspore/nn/layer/image.py +53 -46
- mindspore/nn/layer/math.py +97 -105
- mindspore/nn/layer/normalization.py +117 -86
- mindspore/nn/layer/padding.py +185 -95
- mindspore/nn/layer/pooling.py +817 -414
- mindspore/nn/layer/rnn_cells.py +10 -15
- mindspore/nn/layer/rnns.py +37 -38
- mindspore/nn/layer/thor_layer.py +11 -12
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +701 -0
- mindspore/nn/learning_rate_schedule.py +8 -8
- mindspore/nn/loss/__init__.py +5 -4
- mindspore/nn/loss/loss.py +334 -199
- mindspore/nn/optim/ada_grad.py +6 -6
- mindspore/nn/optim/adadelta.py +2 -3
- mindspore/nn/optim/adafactor.py +4 -5
- mindspore/nn/optim/adam.py +126 -62
- mindspore/nn/optim/adamax.py +3 -4
- mindspore/nn/optim/adasum.py +6 -6
- mindspore/nn/optim/asgd.py +2 -2
- mindspore/nn/optim/ftrl.py +67 -38
- mindspore/nn/optim/lamb.py +4 -5
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +43 -4
- mindspore/nn/optim/momentum.py +6 -5
- mindspore/nn/optim/optimizer.py +3 -1
- mindspore/nn/optim/proximal_ada_grad.py +2 -2
- mindspore/nn/optim/rmsprop.py +1 -1
- mindspore/nn/optim/rprop.py +8 -9
- mindspore/nn/optim/sgd.py +19 -13
- mindspore/nn/optim/thor.py +10 -15
- mindspore/nn/probability/__init__.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +4 -4
- mindspore/nn/probability/bijector/invert.py +1 -1
- mindspore/nn/probability/bijector/softplus.py +2 -2
- mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
- mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
- mindspore/nn/probability/distribution/_utils/utils.py +9 -15
- mindspore/nn/probability/distribution/bernoulli.py +3 -3
- mindspore/nn/probability/distribution/beta.py +1 -1
- mindspore/nn/probability/distribution/categorical.py +5 -7
- mindspore/nn/probability/distribution/cauchy.py +3 -3
- mindspore/nn/probability/distribution/distribution.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -2
- mindspore/nn/probability/distribution/gamma.py +3 -3
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +3 -3
- mindspore/nn/probability/distribution/half_normal.py +15 -11
- mindspore/nn/probability/distribution/laplace.py +16 -13
- mindspore/nn/probability/distribution/logistic.py +2 -2
- mindspore/nn/probability/distribution/normal.py +1 -1
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/student_t.py +20 -15
- mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +3 -3
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +27 -10
- mindspore/nn/wrap/grad_reducer.py +2 -2
- mindspore/nn/wrap/loss_scale.py +40 -24
- mindspore/numpy/array_creations.py +33 -22
- mindspore/numpy/array_ops.py +35 -30
- mindspore/numpy/logic_ops.py +6 -27
- mindspore/numpy/math_ops.py +22 -19
- mindspore/numpy/utils.py +1 -1
- mindspore/numpy/utils_const.py +108 -58
- mindspore/ops/_constants.py +0 -6
- mindspore/ops/_grad/__init__.py +2 -1
- mindspore/ops/_grad/grad_array_ops.py +86 -117
- mindspore/ops/_grad/grad_base.py +23 -1
- mindspore/ops/_grad/grad_clip_ops.py +2 -3
- mindspore/ops/_grad/grad_comm_ops.py +34 -24
- mindspore/ops/_grad/grad_implementations.py +9 -45
- mindspore/ops/_grad/grad_inner_ops.py +47 -4
- mindspore/ops/_grad/grad_math_ops.py +142 -117
- mindspore/ops/_grad/grad_nn_ops.py +71 -165
- mindspore/ops/_grad/grad_sequence_ops.py +296 -0
- mindspore/ops/_grad/grad_sparse.py +7 -6
- mindspore/ops/_grad_experimental/__init__.py +1 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +150 -15
- mindspore/ops/_grad_experimental/grad_image_ops.py +16 -7
- mindspore/ops/_grad_experimental/grad_inner_ops.py +1 -22
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +4 -11
- mindspore/ops/_grad_experimental/grad_math_ops.py +210 -89
- mindspore/ops/_grad_experimental/grad_nn_ops.py +26 -22
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +49 -8
- mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +4 -4
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
- mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/correction_mul.py +2 -2
- mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -5
- mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -4
- mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +236 -4
- mindspore/ops/_op_impl/aicpu/abs.py +36 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_v1.py → adaptive_avg_pool_2d.py} +6 -5
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/add.py +43 -0
- mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
- mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
- mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -43
- mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
- mindspore/{compression/common/__init__.py → ops/_op_impl/aicpu/bessel_i0.py} +15 -8
- mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
- mindspore/ops/_op_impl/aicpu/conj.py +11 -0
- mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +0 -3
- mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_grad_v1.py → digamma.py} +7 -9
- mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
- mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
- mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
- mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
- mindspore/ops/_op_impl/aicpu/greater.py +41 -0
- mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
- mindspore/ops/_op_impl/aicpu/less.py +41 -0
- mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/lgamma.py} +16 -10
- mindspore/ops/_op_impl/aicpu/mirror_pad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mul.py +3 -1
- mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
- mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
- mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
- mindspore/ops/_op_impl/aicpu/polar.py +32 -0
- mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
- mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
- mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
- mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
- mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
- mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
- mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
- mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
- mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
- mindspore/ops/_op_impl/aicpu/trans_data.py +1 -0
- mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
- mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
- mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
- mindspore/ops/_op_impl/cpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
- mindspore/ops/_op_impl/tbe/__init__.py +27 -611
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
- mindspore/ops/_op_impl/tbe/cast.py +0 -2
- mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +2 -2
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
- mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
- mindspore/ops/_op_impl/tbe/matmul_ds.py +2 -0
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/scatter_mul.py +2 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -2
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
- mindspore/ops/_register_for_op.py +1 -0
- mindspore/ops/_utils/__init__.py +1 -2
- mindspore/ops/_utils/utils.py +19 -40
- mindspore/ops/_vmap/vmap_array_ops.py +116 -38
- mindspore/ops/_vmap/vmap_base.py +16 -9
- mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
- mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +7 -5
- mindspore/ops/_vmap/vmap_image_ops.py +12 -5
- mindspore/ops/_vmap/vmap_math_ops.py +46 -5
- mindspore/ops/_vmap/vmap_nn_ops.py +15 -21
- mindspore/ops/_vmap/vmap_random_ops.py +1 -1
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +220 -106
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -23
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -17
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +39 -41
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +41 -43
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +51 -57
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +24 -25
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +53 -53
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +77 -85
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +37 -39
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +70 -72
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +17 -17
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
- mindspore/ops/bprop_mindir/generate_mindir.py +2 -0
- mindspore/ops/composite/__init__.py +7 -8
- mindspore/ops/composite/base.py +101 -47
- mindspore/ops/composite/math_ops.py +188 -158
- mindspore/ops/composite/multitype_ops/_compile_utils.py +415 -170
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +142 -87
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +2 -3
- mindspore/ops/composite/multitype_ops/getitem_impl.py +31 -3
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +21 -5
- mindspore/ops/composite/multitype_ops/not_in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
- mindspore/ops/composite/multitype_ops/setitem_impl.py +21 -3
- mindspore/ops/composite/multitype_ops/sub_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +35 -4
- mindspore/ops/function/__init__.py +152 -8
- mindspore/ops/function/array_func.py +2555 -674
- mindspore/ops/function/clip_func.py +209 -13
- mindspore/ops/function/debug_func.py +2 -2
- mindspore/ops/function/grad/__init__.py +2 -1
- mindspore/ops/function/grad/grad_func.py +147 -62
- mindspore/ops/function/image_func.py +54 -38
- mindspore/ops/function/linalg_func.py +167 -16
- mindspore/ops/function/math_func.py +4849 -1492
- mindspore/ops/function/nn_func.py +2573 -988
- mindspore/ops/function/other_func.py +115 -0
- mindspore/ops/function/parameter_func.py +3 -3
- mindspore/ops/function/random_func.py +790 -73
- mindspore/ops/function/sparse_func.py +98 -78
- mindspore/ops/function/sparse_unary_func.py +54 -53
- mindspore/ops/function/spectral_func.py +27 -24
- mindspore/ops/function/vmap_func.py +22 -2
- mindspore/ops/functional.py +97 -37
- mindspore/ops/op_info_register.py +70 -28
- mindspore/ops/operations/__init__.py +47 -14
- mindspore/ops/operations/_csr_ops.py +7 -7
- mindspore/ops/operations/_embedding_cache_ops.py +5 -5
- mindspore/ops/operations/_grad_ops.py +276 -187
- mindspore/ops/operations/_inner_ops.py +319 -113
- mindspore/ops/operations/_ms_kernel.py +10 -8
- mindspore/ops/operations/_ocr_ops.py +9 -9
- mindspore/ops/operations/_opaque_predicate_registry.py +4 -0
- mindspore/ops/operations/_quant_ops.py +137 -102
- mindspore/ops/operations/_rl_inner_ops.py +121 -60
- mindspore/ops/operations/_scalar_ops.py +466 -0
- mindspore/ops/operations/_sequence_ops.py +1004 -2
- mindspore/ops/operations/_tensor_array.py +10 -11
- mindspore/ops/operations/_thor_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +801 -466
- mindspore/ops/operations/comm_ops.py +51 -49
- mindspore/ops/operations/control_ops.py +2 -2
- mindspore/ops/operations/custom_ops.py +123 -44
- mindspore/ops/operations/debug_ops.py +24 -24
- mindspore/ops/operations/image_ops.py +240 -153
- mindspore/ops/operations/inner_ops.py +34 -50
- mindspore/ops/operations/linalg_ops.py +31 -9
- mindspore/ops/operations/math_ops.py +988 -757
- mindspore/ops/operations/nn_ops.py +965 -819
- mindspore/ops/operations/other_ops.py +51 -40
- mindspore/ops/operations/random_ops.py +204 -122
- mindspore/ops/operations/rl_ops.py +8 -9
- mindspore/ops/operations/sparse_ops.py +254 -93
- mindspore/ops/operations/spectral_ops.py +35 -3
- mindspore/ops/primitive.py +111 -9
- mindspore/parallel/_auto_parallel_context.py +189 -83
- mindspore/parallel/_offload_context.py +185 -0
- mindspore/parallel/_parallel_serialization.py +99 -7
- mindspore/parallel/_ps_context.py +9 -5
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +7 -1
- mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
- mindspore/{nn/transformer → parallel/_transformer}/layers.py +6 -37
- mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
- mindspore/{nn/transformer → parallel/_transformer}/moe.py +20 -16
- mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
- mindspore/{nn/transformer → parallel/_transformer}/transformer.py +48 -111
- mindspore/parallel/_utils.py +1 -2
- mindspore/parallel/algo_parameter_config.py +1 -1
- mindspore/parallel/checkpoint_transform.py +37 -34
- mindspore/parallel/shard.py +17 -18
- mindspore/profiler/common/validator/validate_path.py +2 -2
- mindspore/profiler/envprofiling.py +69 -47
- mindspore/profiler/parser/ascend_timeline_generator.py +49 -42
- mindspore/profiler/parser/base_timeline_generator.py +49 -56
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +98 -78
- mindspore/profiler/parser/hwts_log_parser.py +1 -1
- mindspore/profiler/parser/integrator.py +15 -14
- mindspore/profiler/parser/minddata_analyzer.py +2 -2
- mindspore/profiler/parser/msadvisor_analyzer.py +12 -25
- mindspore/profiler/parser/msadvisor_parser.py +2 -4
- mindspore/profiler/parser/optime_parser.py +17 -18
- mindspore/profiler/parser/profiler_info.py +2 -1
- mindspore/profiler/profiling.py +218 -186
- mindspore/rewrite/__init__.py +3 -1
- mindspore/rewrite/api/node.py +1 -114
- mindspore/rewrite/api/node_type.py +3 -0
- mindspore/rewrite/api/pattern_engine.py +31 -1
- mindspore/rewrite/api/scoped_value.py +4 -4
- mindspore/rewrite/api/symbol_tree.py +3 -78
- mindspore/rewrite/api/tree_node_helper.py +1 -1
- mindspore/rewrite/ast_creator_register.py +1 -0
- mindspore/rewrite/ast_helpers/__init__.py +2 -2
- mindspore/rewrite/ast_helpers/ast_creator.py +1 -2
- mindspore/rewrite/ast_helpers/ast_finder.py +65 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +11 -3
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +18 -2
- mindspore/rewrite/namespace.py +0 -2
- mindspore/rewrite/node.py +157 -11
- mindspore/rewrite/parsers/assign_parser.py +231 -53
- mindspore/rewrite/parsers/class_def_parser.py +187 -109
- mindspore/rewrite/parsers/for_parser.py +24 -14
- mindspore/rewrite/parsers/function_def_parser.py +21 -4
- mindspore/rewrite/parsers/if_parser.py +6 -2
- mindspore/rewrite/sparsify/__init__.py +0 -0
- mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
- mindspore/rewrite/sparsify/sparsify.py +109 -0
- mindspore/rewrite/sparsify/utils.py +173 -0
- mindspore/rewrite/symbol_tree.py +256 -133
- mindspore/rewrite/symbol_tree_builder.py +38 -1
- mindspore/run_check/_check_version.py +69 -63
- mindspore/run_check/run_check.py +2 -1
- mindspore/scipy/linalg.py +10 -114
- mindspore/scipy/ops.py +2 -2
- mindspore/scipy/ops_wrapper.py +1 -1
- mindspore/scipy/optimize/_bfgs.py +1 -1
- mindspore/scipy/optimize/_lagrange.py +200 -0
- mindspore/scipy/optimize/line_search.py +3 -2
- mindspore/scipy/optimize/minimize.py +41 -2
- mindspore/scipy/sparse/__init__.py +2 -2
- mindspore/scipy/sparse/linalg.py +4 -464
- mindspore/scipy/utils.py +1 -1
- mindspore/scipy/utils_const.py +7 -1
- mindspore/train/__init__.py +1 -1
- mindspore/train/_utils.py +28 -5
- mindspore/train/amp.py +273 -102
- mindspore/train/callback/_backup_and_restore.py +5 -5
- mindspore/train/callback/_callback.py +2 -2
- mindspore/train/callback/_checkpoint.py +3 -3
- mindspore/train/callback/_early_stop.py +3 -3
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +29 -31
- mindspore/train/callback/_loss_monitor.py +3 -3
- mindspore/train/callback/_on_request_exit.py +3 -3
- mindspore/train/callback/_reduce_lr_on_plateau.py +4 -4
- mindspore/train/callback/_summary_collector.py +23 -16
- mindspore/train/callback/_time_monitor.py +3 -3
- mindspore/train/checkpoint_pb2.py +68 -8
- mindspore/train/data_sink.py +15 -3
- mindspore/train/dataset_helper.py +10 -15
- mindspore/train/loss_scale_manager.py +8 -11
- mindspore/train/metrics/__init__.py +1 -1
- mindspore/train/metrics/bleu_score.py +1 -1
- mindspore/train/metrics/confusion_matrix.py +1 -1
- mindspore/train/metrics/cosine_similarity.py +1 -1
- mindspore/train/metrics/dice.py +2 -2
- mindspore/train/metrics/fbeta.py +1 -1
- mindspore/train/metrics/hausdorff_distance.py +4 -3
- mindspore/train/metrics/mean_surface_distance.py +2 -2
- mindspore/train/metrics/occlusion_sensitivity.py +1 -1
- mindspore/train/metrics/perplexity.py +1 -1
- mindspore/train/metrics/precision.py +1 -1
- mindspore/train/metrics/recall.py +1 -1
- mindspore/train/metrics/roc.py +2 -2
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +116 -37
- mindspore/train/model.py +45 -28
- mindspore/train/serialization.py +295 -188
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +43 -13
- mindspore/train/train_thor/convert_utils.py +2 -2
- mindspore/train/train_thor/dataset_helper.py +3 -3
- mindspore/version.py +1 -1
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +3 -2
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +648 -574
- mindspore/compression/__init__.py +0 -19
- mindspore/compression/common/constant.py +0 -124
- mindspore/compression/export/__init__.py +0 -19
- mindspore/compression/export/quant_export.py +0 -515
- mindspore/compression/quant/__init__.py +0 -28
- mindspore/compression/quant/qat.py +0 -634
- mindspore/compression/quant/quant_utils.py +0 -462
- mindspore/compression/quant/quantizer.py +0 -68
- mindspore/nn/layer/quant.py +0 -1868
- mindspore/nn/layer/rnn_utils.py +0 -90
- mindspore/nn/probability/dpn/__init__.py +0 -22
- mindspore/nn/probability/dpn/vae/__init__.py +0 -25
- mindspore/nn/probability/dpn/vae/cvae.py +0 -140
- mindspore/nn/probability/dpn/vae/vae.py +0 -124
- mindspore/nn/probability/infer/__init__.py +0 -22
- mindspore/nn/probability/infer/variational/elbo.py +0 -70
- mindspore/nn/probability/infer/variational/svi.py +0 -84
- mindspore/nn/probability/toolbox/__init__.py +0 -22
- mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
- mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -364
- mindspore/nn/probability/transforms/__init__.py +0 -22
- mindspore/nn/probability/transforms/transform_bnn.py +0 -262
- mindspore/nn/probability/zhusuan/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
- mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
- mindspore/ops/_op_impl/aicpu/parallel_concat.py +0 -42
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Cast_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/MatMul_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/Transpose_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -15
- mindspore/ops/composite/array_ops.py +0 -241
- mindspore/ops/composite/clip_ops.py +0 -134
- mindspore/ops/composite/random_ops.py +0 -426
- mindspore/ops/composite/vmap_ops.py +0 -38
- mindspore/parallel/nn/__init__.py +0 -42
- mindspore/parallel/nn/loss.py +0 -22
- mindspore/parallel/nn/moe.py +0 -21
- mindspore/parallel/nn/op_parallel_config.py +0 -22
- mindspore/parallel/nn/transformer.py +0 -31
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
mindspore/common/tensor.py
CHANGED
|
@@ -16,21 +16,24 @@
|
|
|
16
16
|
|
|
17
17
|
__all__ = ['Tensor']
|
|
18
18
|
|
|
19
|
+
import abc
|
|
19
20
|
import math
|
|
20
21
|
import numbers
|
|
21
22
|
import numpy as np
|
|
22
23
|
|
|
23
|
-
from mindspore.communication.management import
|
|
24
|
+
from mindspore.communication.management import get_group_size
|
|
24
25
|
from mindspore.common._utils import is_shape_unknown
|
|
25
26
|
from mindspore.common.seed import get_seed
|
|
26
27
|
from mindspore import context
|
|
27
28
|
from mindspore import log as logger
|
|
28
29
|
from mindspore.common import dtype as mstype
|
|
29
|
-
|
|
30
|
+
|
|
31
|
+
from mindspore.common._utils import get_slice_num
|
|
30
32
|
from mindspore.common._register_for_tensor import tensor_operator_registry
|
|
31
33
|
from mindspore._c_expression import Tensor as Tensor_
|
|
32
|
-
from mindspore
|
|
33
|
-
from mindspore._checkparam import
|
|
34
|
+
from mindspore import _checkparam as validator
|
|
35
|
+
from mindspore._checkparam import check_is_number, is_stub_tensor
|
|
36
|
+
from mindspore._check_jit_forbidden_api import jit_forbidden_register
|
|
34
37
|
|
|
35
38
|
np_types = (np.int8, np.int16, np.int32, np.int64,
|
|
36
39
|
np.uint8, np.uint16, np.uint32, np.uint64, np.float16,
|
|
@@ -40,14 +43,14 @@ np_types = (np.int8, np.int16, np.int32, np.int64,
|
|
|
40
43
|
def _check_input_data_type(input_data):
|
|
41
44
|
"""Check the type of input_data for Tensor"""
|
|
42
45
|
validator.check_value_type('input_data', input_data,
|
|
43
|
-
(Tensor_, np.ndarray, np.str_, list, tuple, float, int, bool, complex),
|
|
46
|
+
(Tensor_, Tensor, np.ndarray, np.str_, list, tuple, float, int, bool, complex),
|
|
44
47
|
'Tensor')
|
|
45
48
|
valid_dtypes = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64,
|
|
46
49
|
np.float16, np.float32, np.float64, np.bool_, np.str_, np.complex64, np.complex128)
|
|
47
50
|
if isinstance(input_data, np.ndarray) and input_data.dtype not in valid_dtypes and \
|
|
48
51
|
input_data.dtype.kind != 'U' and input_data.dtype.kind != 'S': # Support dtype np.str_
|
|
52
|
+
new_line = '\n'
|
|
49
53
|
for index, x in np.ndenumerate(input_data):
|
|
50
|
-
new_line = '\n'
|
|
51
54
|
if np.array(x).dtype not in valid_dtypes:
|
|
52
55
|
raise TypeError(f"initializing tensor by numpy array failed, because the "
|
|
53
56
|
f"element type '{type(x)}' of array is not supported.\n"
|
|
@@ -65,7 +68,13 @@ def _check_input_data_type(input_data):
|
|
|
65
68
|
f"For Tensor, the input_data is {input_data} that contain unsupported element.")
|
|
66
69
|
|
|
67
70
|
|
|
68
|
-
class
|
|
71
|
+
class _TensorMeta(type(Tensor_), abc.ABCMeta):
|
|
72
|
+
"""
|
|
73
|
+
Meta class for Tensor. Used internally.
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
69
78
|
"""
|
|
70
79
|
Tensor is a data structure that stores an n-dimensional array.
|
|
71
80
|
|
|
@@ -95,6 +104,10 @@ class Tensor(Tensor_):
|
|
|
95
104
|
Outputs:
|
|
96
105
|
Tensor.
|
|
97
106
|
|
|
107
|
+
Note:
|
|
108
|
+
The default value None of `input_data` works as a placeholder, it does not mean that we can create a NoneType
|
|
109
|
+
Tensor.
|
|
110
|
+
|
|
98
111
|
Examples:
|
|
99
112
|
>>> import numpy as np
|
|
100
113
|
>>> import mindspore as ms
|
|
@@ -149,8 +162,12 @@ class Tensor(Tensor_):
|
|
|
149
162
|
|
|
150
163
|
def __init__(self, input_data=None, dtype=None, shape=None, init=None, internal=False, const_arg=False):
|
|
151
164
|
self.init_finished = False
|
|
165
|
+
if is_stub_tensor(input_data):
|
|
166
|
+
input_data = input_data.stub_sync()
|
|
167
|
+
|
|
152
168
|
if internal:
|
|
153
|
-
|
|
169
|
+
if input_data is not None:
|
|
170
|
+
Tensor_.__init__(self, input_data)
|
|
154
171
|
else:
|
|
155
172
|
# If input data is numpy number, convert it to np array
|
|
156
173
|
if isinstance(input_data, np_types):
|
|
@@ -168,8 +185,8 @@ class Tensor(Tensor_):
|
|
|
168
185
|
else:
|
|
169
186
|
_check_input_data_type(input_data)
|
|
170
187
|
if dtype is not None:
|
|
171
|
-
validator.check_type_name(
|
|
172
|
-
|
|
188
|
+
validator.check_type_name('dtype', dtype, mstype.number_type +
|
|
189
|
+
(mstype.bool_, mstype.string), "Tensor")
|
|
173
190
|
else:
|
|
174
191
|
dtype = self._set_default_dtype(input_data, dtype)
|
|
175
192
|
|
|
@@ -180,8 +197,8 @@ class Tensor(Tensor_):
|
|
|
180
197
|
Tensor_.__init__(self, input_data, dtype)
|
|
181
198
|
else:
|
|
182
199
|
Tensor_.__init__(self, input_data)
|
|
200
|
+
validator.check_value_type('const_arg', const_arg, bool, 'Tensor')
|
|
183
201
|
|
|
184
|
-
validator.check_value_type('const_arg', const_arg, bool, 'Tensor')
|
|
185
202
|
self.const_arg = const_arg
|
|
186
203
|
self.virtual_flag = False
|
|
187
204
|
self.init = init
|
|
@@ -196,6 +213,16 @@ class Tensor(Tensor_):
|
|
|
196
213
|
self.slice_num_of_persistent_data_ = None
|
|
197
214
|
self.slice_shape_of_persistent_data_ = None
|
|
198
215
|
|
|
216
|
+
@classmethod
|
|
217
|
+
def __subclasshook__(cls, sub):
|
|
218
|
+
"""
|
|
219
|
+
Subclass with stub_sync attr will be instance of Tensor
|
|
220
|
+
"""
|
|
221
|
+
if cls is Tensor:
|
|
222
|
+
if any("stub_sync" in s.__dict__ for s in sub.__mro__):
|
|
223
|
+
return True
|
|
224
|
+
return NotImplemented
|
|
225
|
+
|
|
199
226
|
@staticmethod
|
|
200
227
|
def _set_default_dtype(input_data, dtype):
|
|
201
228
|
"""Set tensor default dtype"""
|
|
@@ -301,22 +328,16 @@ class Tensor(Tensor_):
|
|
|
301
328
|
return tensor_operator_registry.get('__add__')(self, other)
|
|
302
329
|
|
|
303
330
|
def __and__(self, other):
|
|
304
|
-
if Tensor._use_logical_kernel(self, other):
|
|
305
|
-
return tensor_operator_registry.get('logical_and')(self, other)
|
|
306
331
|
if isinstance(other, (int, bool, float, Tensor)):
|
|
307
332
|
return tensor_operator_registry.get('bitwise_and')(self, other)
|
|
308
333
|
raise TypeError("Unsupported operand type(s) for &: 'Tensor' and '{}'".format(type(other)))
|
|
309
334
|
|
|
310
335
|
def __xor__(self, other):
|
|
311
|
-
if Tensor._use_logical_kernel(self, other):
|
|
312
|
-
return tensor_operator_registry.get('logical_xor')(self, other)
|
|
313
336
|
if isinstance(other, (int, bool, float, Tensor)):
|
|
314
337
|
return tensor_operator_registry.get('bitwise_xor')(self, other)
|
|
315
338
|
raise TypeError("Unsupported operand type(s) for ^: 'Tensor' and '{}'".format(type(other)))
|
|
316
339
|
|
|
317
340
|
def __or__(self, other):
|
|
318
|
-
if Tensor._use_logical_kernel(self, other):
|
|
319
|
-
return tensor_operator_registry.get('logical_or')(self, other)
|
|
320
341
|
if isinstance(other, (int, bool, float, Tensor)):
|
|
321
342
|
return tensor_operator_registry.get('bitwise_or')(self, other)
|
|
322
343
|
raise TypeError("Unsupported operand type(s) for |: 'Tensor' and '{}'".format(type(other)))
|
|
@@ -449,6 +470,20 @@ class Tensor(Tensor_):
|
|
|
449
470
|
"""Return the number of tensor dimensions."""
|
|
450
471
|
return len(self._shape)
|
|
451
472
|
|
|
473
|
+
@property
|
|
474
|
+
def H(self):
|
|
475
|
+
"""
|
|
476
|
+
Returns a view of a matrix (2-D tensor) conjugated and transposed.
|
|
477
|
+
x.H is equivalent to `mindspore.Tensor.swapaxes(0, 1).conj()` for complex matrices and
|
|
478
|
+
`mindspore.Tensor.swapaxes(0, 1)` for real matrices.
|
|
479
|
+
"""
|
|
480
|
+
if self.ndim != 2:
|
|
481
|
+
raise ValueError(f"For tensor.H only support 2-D Tensor, but got {self.ndim}-D.")
|
|
482
|
+
output = self.swapaxes(0, 1)
|
|
483
|
+
if self.dtype in (mstype.complex64, mstype.complex128):
|
|
484
|
+
return output.conj()
|
|
485
|
+
return output
|
|
486
|
+
|
|
452
487
|
@property
|
|
453
488
|
def has_init(self):
|
|
454
489
|
"""Whether tensor is initialized."""
|
|
@@ -500,168 +535,13 @@ class Tensor(Tensor_):
|
|
|
500
535
|
|
|
501
536
|
return Tensor(Tensor_.from_numpy(array))
|
|
502
537
|
|
|
503
|
-
@staticmethod
|
|
504
|
-
def frombuffer(buffer, dtype=mstype.float64, count=-1, offset=0):
|
|
505
|
-
r"""
|
|
506
|
-
Creates a 1-dimensional :class:`Tensor` from an object that implements
|
|
507
|
-
the Python buffer protocol.
|
|
508
|
-
Skips the first :attr:`offset` bytes in the buffer, and interprets the rest of
|
|
509
|
-
the raw bytes as a 1-dimensional tensor of type :attr:`dtype` with :attr:`count`
|
|
510
|
-
elements.
|
|
511
|
-
|
|
512
|
-
Args:
|
|
513
|
-
buffer (object): a Python object that exposes the buffer interface.
|
|
514
|
-
dtype (mindspore.dtype): the desired data type of returned tensor.
|
|
515
|
-
count (int, optional): the number of desired elements to be read. If negative,
|
|
516
|
-
all the elements (until the end of the buffer) will be read. Default: -1.
|
|
517
|
-
offset (int, optional): the number of bytes to skip at the start of the buffer. Default: 0.
|
|
518
|
-
|
|
519
|
-
Returns:
|
|
520
|
-
a 1-dimensional Tensor from an object that implements the Python buffer protocol.
|
|
521
|
-
|
|
522
|
-
Supported Platforms:
|
|
523
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
524
|
-
|
|
525
|
-
Examples:
|
|
526
|
-
>>> from array import array
|
|
527
|
-
>>> import numpy as np
|
|
528
|
-
>>> import mindspore
|
|
529
|
-
>>> from mindspore import Tensor
|
|
530
|
-
>>> input_array = array("d", [1, 2, 3, 4])
|
|
531
|
-
>>> input_array
|
|
532
|
-
array('d', [1.0, 2.0, 3.0, 4.0])
|
|
533
|
-
>>> output = Tensor.frombuffer(input_array, mindspore.int32)
|
|
534
|
-
>>> print(output)
|
|
535
|
-
[1 2 3 4]
|
|
536
|
-
"""
|
|
537
|
-
res = np.frombuffer(buffer=buffer, dtype=np.float64, count=count, offset=offset)
|
|
538
|
-
result = Tensor(res, dtype=dtype)
|
|
539
|
-
return result
|
|
540
|
-
|
|
541
|
-
@staticmethod
|
|
542
|
-
def empty_strided(size, stride, dtype=mstype.float64, seed=None):
|
|
543
|
-
r"""
|
|
544
|
-
Creates a tensor with the specified :attr:`size` and :attr:`stride` and filled with undefined data.
|
|
545
|
-
|
|
546
|
-
Args:
|
|
547
|
-
size (tuple of python:ints): the shape of the output tensor.
|
|
548
|
-
stride (tuple of python:ints): the strides of the output tensor.
|
|
549
|
-
dtype (mindspore.dtype, optional): the desired data type of returned tensor.
|
|
550
|
-
|
|
551
|
-
Returns:
|
|
552
|
-
a tensor with the specified size and stride and filled with undefined data.
|
|
553
|
-
|
|
554
|
-
Supported Platforms:
|
|
555
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
556
|
-
|
|
557
|
-
Examples:
|
|
558
|
-
>>> from mindspore import Tensor
|
|
559
|
-
>>> size = (3, 3)
|
|
560
|
-
>>> stride = (1, 3)
|
|
561
|
-
>>> output = Tensor.empty_strided(size, stride, seed = 0)
|
|
562
|
-
>>> print(output)
|
|
563
|
-
[[0.00000000e+00 7.15189366e+10 0.00000000e+00]
|
|
564
|
-
[0.00000000e+00 0.00000000e+00 6.45894113e+10]
|
|
565
|
-
[0.00000000e+00 8.91773001e+10 9.63662761e+10]]
|
|
566
|
-
"""
|
|
567
|
-
np.random.seed(seed)
|
|
568
|
-
tensor_ = Tensor(np.random.uniform(low=0, high=10e10, size=size))
|
|
569
|
-
tensor_array = tensor_.asnumpy()
|
|
570
|
-
stride_tensor = tensor_.as_strided(shape=size, strides=stride)
|
|
571
|
-
stride_array = stride_tensor.asnumpy()
|
|
572
|
-
stride_array.resize(len(stride_array) * len(stride_array[0]))
|
|
573
|
-
for i in range(size[0]):
|
|
574
|
-
for j in range(size[1]):
|
|
575
|
-
if not sum(stride_array - tensor_array[i][j]) < 0.01:
|
|
576
|
-
tensor_array[i][j] = 0.0
|
|
577
|
-
return Tensor(tensor_array, dtype=dtype)
|
|
578
|
-
|
|
579
|
-
@staticmethod
|
|
580
|
-
def poisson(shape, mean, seed=0, seed2=0):
|
|
581
|
-
r"""
|
|
582
|
-
Returns a tensor of the same size as `input` with each element sampled from a Poisson
|
|
583
|
-
distribution with rate parameter given by the corresponding element in `input` i.e.,
|
|
584
|
-
\text{out}_i \sim \text{Poisson}(\text{input}_i)out*i*∼Poisson(input*i*),
|
|
585
|
-
and self as a tensor is the μ parameter .the distribution was constructed with.
|
|
586
|
-
The parameter defines mean number of occurrences of the event.
|
|
587
|
-
It must be greater than 0. With float32 data type.
|
|
588
|
-
|
|
589
|
-
Args:
|
|
590
|
-
seed (int, option): set the random seed (0 to 2**32)
|
|
591
|
-
seed2 (int, option): set the random seed2 (0 to 2**32)
|
|
592
|
-
|
|
593
|
-
Inputs:
|
|
594
|
-
- **shape** (tuple) - The shape of random tensor to be generated. Only constant value is allowed.
|
|
595
|
-
|
|
596
|
-
Returns:
|
|
597
|
-
out (Union[Tensor, int]), with the same shape as input_tensor.
|
|
598
|
-
|
|
599
|
-
Raises:
|
|
600
|
-
TypeError: If neither `seed` nor `seed2` is an int.
|
|
601
|
-
TypeError: If `shape` is not a tuple.
|
|
602
|
-
TypeError: If `mean` is not a Tensor whose dtype is not float32.
|
|
603
|
-
|
|
604
|
-
Supported Platforms:
|
|
605
|
-
``Ascend``
|
|
606
|
-
|
|
607
|
-
Examples:
|
|
608
|
-
>>> shape = (4, 1)
|
|
609
|
-
>>> mean = Tensor(np.array([5.0, 10.0]), mstype.float32)
|
|
610
|
-
>>> output = Tensor.Poisson(shape, mean, seed=5)
|
|
611
|
-
>>> result = output.shape
|
|
612
|
-
>>> print(result)
|
|
613
|
-
(4, 2)
|
|
614
|
-
"""
|
|
615
|
-
return tensor_operator_registry.get('poisson')(seed, seed2)(shape, mean)
|
|
616
|
-
|
|
617
|
-
@staticmethod
|
|
618
|
-
def as_tensor(data, dtype=None):
|
|
619
|
-
r"""
|
|
620
|
-
convert data to tensor in mindspore.
|
|
621
|
-
|
|
622
|
-
Args:
|
|
623
|
-
data (array_like): Initial data for the tensor. Can be a list, tuple,
|
|
624
|
-
NumPy ndarray, scalar, and other types.
|
|
625
|
-
dtype (mindspore.dtype, optional): the desired data type of returned tensor.
|
|
626
|
-
Default: if None, infers data type from data.
|
|
627
|
-
|
|
628
|
-
Returns:
|
|
629
|
-
Tensor contains the data and the dtype is in mindspore.
|
|
630
|
-
|
|
631
|
-
Supported Platforms:
|
|
632
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
633
|
-
|
|
634
|
-
Examples:
|
|
635
|
-
>>> import numpy as np
|
|
636
|
-
>>> import mindspore as ms
|
|
637
|
-
>>> import mindspore.nn as nn
|
|
638
|
-
>>> from mindspore import Tensor
|
|
639
|
-
>>> input_data = np.array([1, 2, 3])
|
|
640
|
-
>>> ms_tensor = Tensor.as_tensor(input_data)
|
|
641
|
-
>>> ms_tensor
|
|
642
|
-
Tensor(shape=[3], dtype=Int64, value= [1, 2, 3])
|
|
643
|
-
"""
|
|
644
|
-
return Tensor(data, dtype=dtype)
|
|
645
|
-
|
|
646
|
-
@staticmethod
|
|
647
|
-
def _use_logical_kernel(me, other) -> bool:
|
|
648
|
-
"""
|
|
649
|
-
Decide to use logical kernel or bitwise kernel for &|^ operations.
|
|
650
|
-
If self or other is bool or bool tensor, then return true, use logical kernel,
|
|
651
|
-
else false to use bitwise kernel.
|
|
652
|
-
"""
|
|
653
|
-
def _is_bool_or_bool_tensor(data):
|
|
654
|
-
return isinstance(data, bool) or (isinstance(data, Tensor) and data.dtype == mstype.bool_)
|
|
655
|
-
if _is_bool_or_bool_tensor(me) and _is_bool_or_bool_tensor(other):
|
|
656
|
-
return True
|
|
657
|
-
return False
|
|
658
|
-
|
|
659
538
|
def ndimension(self):
|
|
660
539
|
r"""
|
|
661
540
|
Alias for :func:`mindspore.Tensor.ndim`.
|
|
662
541
|
"""
|
|
663
542
|
return len(self._shape)
|
|
664
543
|
|
|
544
|
+
@jit_forbidden_register
|
|
665
545
|
def set_const_arg(self, const_arg=True):
|
|
666
546
|
"""
|
|
667
547
|
Specify whether the tensor is a constant when it is used for the argument of a network.
|
|
@@ -727,9 +607,25 @@ class Tensor(Tensor_):
|
|
|
727
607
|
Returns:
|
|
728
608
|
Tensor, Tensor that's been assigned.
|
|
729
609
|
"""
|
|
610
|
+
if is_stub_tensor(value):
|
|
611
|
+
value = value.stub_sync()
|
|
730
612
|
self.assign_value_cpp(value)
|
|
731
613
|
return self
|
|
732
614
|
|
|
615
|
+
def bincount(self, weights=None, minlength=0):
|
|
616
|
+
r"""
|
|
617
|
+
For details, please refer to :func:`mindspore.ops.bincount`.
|
|
618
|
+
"""
|
|
619
|
+
self._init_check()
|
|
620
|
+
return tensor_operator_registry.get('bincount')(self, weights, minlength)
|
|
621
|
+
|
|
622
|
+
def chunk(self, chunks, axis=0):
|
|
623
|
+
r"""
|
|
624
|
+
For details, please refer to :func:`mindspore.ops.chunk`.
|
|
625
|
+
"""
|
|
626
|
+
self._init_check()
|
|
627
|
+
return tensor_operator_registry.get('chunk')(self, chunks, axis)
|
|
628
|
+
|
|
733
629
|
def item(self, index=None):
|
|
734
630
|
"""
|
|
735
631
|
Get the item at the specified index of the tensor.
|
|
@@ -765,7 +661,7 @@ class Tensor(Tensor_):
|
|
|
765
661
|
Insert scalar into a tensor (scalar is cast to tensor's dtype, if possible).
|
|
766
662
|
|
|
767
663
|
There must be at least 1 argument, and define the last argument as item.
|
|
768
|
-
Then, tensor.itemset(\*args) is equivalent to :math:`
|
|
664
|
+
Then, tensor.itemset(\*args) is equivalent to :math:`Tensor[args] = item`.
|
|
769
665
|
|
|
770
666
|
Args:
|
|
771
667
|
args (Union[(numbers.Number), (int/tuple(int), numbers.Number)]): The arguments that
|
|
@@ -776,7 +672,7 @@ class Tensor(Tensor_):
|
|
|
776
672
|
It is either an int or a tuple.
|
|
777
673
|
|
|
778
674
|
Returns:
|
|
779
|
-
A new tensor that doesn't affect the original tensor, with value set by :math:`
|
|
675
|
+
A new tensor that doesn't affect the original tensor, with value set by :math:`Tensor[args] = item`.
|
|
780
676
|
|
|
781
677
|
Raises:
|
|
782
678
|
ValueError: If the length of the first argument is not equal to self.ndim.
|
|
@@ -823,8 +719,7 @@ class Tensor(Tensor_):
|
|
|
823
719
|
|
|
824
720
|
def numpy(self):
|
|
825
721
|
"""
|
|
826
|
-
|
|
827
|
-
<https://www.mindspore.cn/docs/en/r2.0.0-alpha/api_python/mindspore/Tensor/mindspore.Tensor.asnumpy.html>`_.
|
|
722
|
+
Alias for :func:`mindspore.Tensor.asnumpy`.
|
|
828
723
|
"""
|
|
829
724
|
return self.asnumpy()
|
|
830
725
|
|
|
@@ -859,6 +754,22 @@ class Tensor(Tensor_):
|
|
|
859
754
|
"""
|
|
860
755
|
return self.slice_num_of_persistent_data_
|
|
861
756
|
|
|
757
|
+
def histc(self, bins=100, min=0., max=0.):
|
|
758
|
+
"""
|
|
759
|
+
For details, please refer to :func:`mindspore.ops.histc`.
|
|
760
|
+
"""
|
|
761
|
+
self._init_check()
|
|
762
|
+
validator.check_value_type('min', min, (int, float,), 'Tensor.histc')
|
|
763
|
+
validator.check_value_type('max', max, (int, float,), 'Tensor.histc')
|
|
764
|
+
return tensor_operator_registry.get('histc')(self, bins, float(min), float(max))
|
|
765
|
+
|
|
766
|
+
def geqrf(self):
|
|
767
|
+
"""
|
|
768
|
+
For details, please refer to :func:`mindspore.ops.geqrf`.
|
|
769
|
+
"""
|
|
770
|
+
self._init_check()
|
|
771
|
+
return tensor_operator_registry.get('geqrf')(self)
|
|
772
|
+
|
|
862
773
|
def slice_shape_of_persistent_data(self):
|
|
863
774
|
"""
|
|
864
775
|
Get slice shape of tensor after cut to slice size.
|
|
@@ -900,28 +811,26 @@ class Tensor(Tensor_):
|
|
|
900
811
|
self._init_check()
|
|
901
812
|
Tensor_._flush_from_cache(self)
|
|
902
813
|
|
|
903
|
-
def addcdiv(self,
|
|
814
|
+
def addcdiv(self, tensor1, tensor2, value=1):
|
|
904
815
|
r"""
|
|
905
816
|
For details, please refer to :func:`mindspore.ops.addcdiv`.
|
|
906
817
|
"""
|
|
907
818
|
self._init_check()
|
|
908
|
-
return tensor_operator_registry.get('addcdiv')()(self,
|
|
819
|
+
return tensor_operator_registry.get('addcdiv')()(self, tensor1, tensor2, value)
|
|
909
820
|
|
|
910
|
-
def addcmul(self,
|
|
821
|
+
def addcmul(self, tensor1, tensor2, value=1):
|
|
911
822
|
r"""
|
|
912
823
|
For details, please refer to :func:`mindspore.ops.addcmul`.
|
|
913
824
|
"""
|
|
914
|
-
|
|
915
825
|
self._init_check()
|
|
916
|
-
return tensor_operator_registry.get('addcmul')()(self,
|
|
826
|
+
return tensor_operator_registry.get('addcmul')()(self, tensor1, tensor2, value)
|
|
917
827
|
|
|
918
|
-
def add(self,
|
|
828
|
+
def add(self, other):
|
|
919
829
|
r"""
|
|
920
830
|
For details, please refer to :func:`mindspore.ops.add`.
|
|
921
831
|
"""
|
|
922
|
-
|
|
923
832
|
self._init_check()
|
|
924
|
-
return tensor_operator_registry.get('add')()(self,
|
|
833
|
+
return tensor_operator_registry.get('add')()(self, other)
|
|
925
834
|
|
|
926
835
|
def subtract(self, other, *, alpha=1):
|
|
927
836
|
r"""
|
|
@@ -936,29 +845,15 @@ class Tensor(Tensor_):
|
|
|
936
845
|
For details, please refer to :func:`mindspore.ops.div`.
|
|
937
846
|
"""
|
|
938
847
|
self._init_check()
|
|
939
|
-
return tensor_operator_registry.get('div')(self, value, None)
|
|
848
|
+
return tensor_operator_registry.get('div')(self, value, rounding_mode=None)
|
|
940
849
|
|
|
941
850
|
def triu(self, diagonal=0):
|
|
942
851
|
r"""
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
Args:
|
|
946
|
-
diagonal (int): The index of diagonal. Default: 0.
|
|
947
|
-
|
|
948
|
-
Returns:
|
|
949
|
-
Tensor, a tensor has the same shape and data type as input.
|
|
950
|
-
|
|
951
|
-
Raises:
|
|
952
|
-
TypeError: If `diagonal` is not an int.
|
|
953
|
-
TypeError: If `x` is not an Tensor.
|
|
954
|
-
ValueError: If length of shape of x is less than 1.
|
|
955
|
-
|
|
956
|
-
Supported Platforms:
|
|
957
|
-
``GPU`` ``CPU``
|
|
852
|
+
For details, please refer to :func:`mindspore.ops.triu`.
|
|
958
853
|
"""
|
|
959
854
|
self._init_check()
|
|
960
855
|
validator.check_value_type('diagonal', diagonal, [int], 'triu')
|
|
961
|
-
return tensor_operator_registry.get('triu')(diagonal)
|
|
856
|
+
return tensor_operator_registry.get('triu')(self, diagonal)
|
|
962
857
|
|
|
963
858
|
def addbmm(self, batch1, batch2, *, beta=1, alpha=1):
|
|
964
859
|
r"""
|
|
@@ -988,34 +883,10 @@ class Tensor(Tensor_):
|
|
|
988
883
|
self._init_check()
|
|
989
884
|
return tensor_operator_registry.get('adjoint')(self)
|
|
990
885
|
|
|
991
|
-
def all(self, axis=
|
|
992
|
-
"""
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
Args:
|
|
996
|
-
axis (Union[None, int, tuple(int)]): Dimensions of reduction.
|
|
997
|
-
When the axis is None or empty tuple, reduce all dimensions. When the axis is int or
|
|
998
|
-
tuple(int), if the dimension of Tensor is dim, the value range is [-dim, dim). Default: ().
|
|
999
|
-
keep_dims (bool): Whether to keep the reduced dimensions. Default: False.
|
|
1000
|
-
|
|
1001
|
-
Returns:
|
|
1002
|
-
Tensor, if all tensor elements along the given axis evaluate to True, its value is True,
|
|
1003
|
-
otherwise its value is False. If the axis is None or empty tuple, reduce all dimensions.
|
|
1004
|
-
|
|
1005
|
-
Supported Platforms:
|
|
1006
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1007
|
-
|
|
1008
|
-
See also:
|
|
1009
|
-
:func:`mindspore.Tensor.any`: Check any tensor element along a given axis evaluate to True.
|
|
1010
|
-
|
|
1011
|
-
Examples:
|
|
1012
|
-
>>> from mindspore import Tensor
|
|
1013
|
-
>>> a = Tensor([True, True, False])
|
|
1014
|
-
>>> output = a.all()
|
|
1015
|
-
>>> print(output)
|
|
1016
|
-
False
|
|
886
|
+
def all(self, axis=None, keep_dims=False):
|
|
887
|
+
r"""
|
|
888
|
+
For details, please refer to :func:`mindspore.ops.all`.
|
|
1017
889
|
"""
|
|
1018
|
-
|
|
1019
890
|
self._init_check()
|
|
1020
891
|
if axis is None:
|
|
1021
892
|
axis = ()
|
|
@@ -1028,45 +899,21 @@ class Tensor(Tensor_):
|
|
|
1028
899
|
self._init_check()
|
|
1029
900
|
return tensor_operator_registry.get('angle')(self)
|
|
1030
901
|
|
|
1031
|
-
def any(self, axis=
|
|
1032
|
-
"""
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
Args:
|
|
1036
|
-
axis (Union[None, int, tuple(int)]): Dimensions of reduction.
|
|
1037
|
-
When the axis is None or empty tuple, reduce all dimensions. When the axis is int or
|
|
1038
|
-
tuple(int), if the dimension of Tensor is dim, the value range is [-dim, dim). Default: ().
|
|
1039
|
-
keep_dims (bool): Whether to keep the reduced dimensions. Default: False.
|
|
1040
|
-
|
|
1041
|
-
Returns:
|
|
1042
|
-
Tensor, if any tensor element along the given axis evaluates to True, its value is True,
|
|
1043
|
-
otherwise its value is False. If the axis is None or empty tuple, reduce all dimensions.
|
|
1044
|
-
|
|
1045
|
-
Supported Platforms:
|
|
1046
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1047
|
-
|
|
1048
|
-
See also:
|
|
1049
|
-
:func:`mindspore.Tensor.all`: Check all tensor elements along a given axis evaluate to True.
|
|
1050
|
-
|
|
1051
|
-
Examples:
|
|
1052
|
-
>>> from mindspore import Tensor
|
|
1053
|
-
>>> a = Tensor([True, True, False])
|
|
1054
|
-
>>> output = a.any()
|
|
1055
|
-
>>> print(output)
|
|
1056
|
-
True
|
|
902
|
+
def any(self, axis=None, keep_dims=False):
|
|
903
|
+
r"""
|
|
904
|
+
For details, please refer to :func:`mindspore.ops.any`.
|
|
1057
905
|
"""
|
|
1058
|
-
|
|
1059
906
|
self._init_check()
|
|
1060
907
|
if axis is None:
|
|
1061
908
|
axis = ()
|
|
1062
909
|
return tensor_operator_registry.get('any')(keep_dims)(self, axis)
|
|
1063
910
|
|
|
1064
|
-
def atan2(self,
|
|
911
|
+
def atan2(self, other):
|
|
1065
912
|
r"""
|
|
1066
913
|
For details, please refer to :func:`mindspore.ops.atan2`.
|
|
1067
914
|
"""
|
|
1068
915
|
self._init_check()
|
|
1069
|
-
return tensor_operator_registry.get('atan2')(self,
|
|
916
|
+
return tensor_operator_registry.get('atan2')(self, other)
|
|
1070
917
|
|
|
1071
918
|
def baddbmm(self, batch1, batch2, beta=1, alpha=1):
|
|
1072
919
|
r"""
|
|
@@ -1105,26 +952,84 @@ class Tensor(Tensor_):
|
|
|
1105
952
|
shape = shape[0]
|
|
1106
953
|
return tensor_operator_registry.get('reshape')()(self, shape)
|
|
1107
954
|
|
|
1108
|
-
def
|
|
955
|
+
def view_as(self, other):
|
|
956
|
+
r"""
|
|
957
|
+
View self Tensor as the same shape as `other` .
|
|
958
|
+
|
|
959
|
+
Args:
|
|
960
|
+
other(Tensor): The returned Tensor has the same shape as `other`.
|
|
961
|
+
|
|
962
|
+
Returns:
|
|
963
|
+
Tensor, has the same shape as `other`.
|
|
964
|
+
|
|
965
|
+
Raises:
|
|
966
|
+
TypeError: If `other` is not a Tensor.
|
|
967
|
+
|
|
968
|
+
Supported Platforms:
|
|
969
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
970
|
+
|
|
971
|
+
Examples:
|
|
972
|
+
>>> a = Tensor([[1, 2, 3], [2, 3, 4]], mstype.float32)
|
|
973
|
+
>>> b = Tensor([1, 1, 1, 1, 1, 1], mstype.float32)
|
|
974
|
+
>>> output = a.view_as(b)
|
|
975
|
+
>>> print(output)
|
|
976
|
+
[1. 2. 3. 2. 3. 4.]
|
|
977
|
+
"""
|
|
978
|
+
self._init_check()
|
|
979
|
+
if not isinstance(other, (Tensor, Tensor_)):
|
|
980
|
+
raise TypeError(f"For view_as, the input other must be a Tensor, but got {type(other)}")
|
|
981
|
+
return self.view(other.shape)
|
|
982
|
+
|
|
983
|
+
def t(self):
|
|
984
|
+
r"""
|
|
985
|
+
For details, please refer to :func:`mindspore.ops.t`.
|
|
986
|
+
"""
|
|
987
|
+
self._init_check()
|
|
988
|
+
return tensor_operator_registry.get("t")(self)
|
|
989
|
+
|
|
990
|
+
def bitwise_and(self, other):
|
|
1109
991
|
"""
|
|
1110
992
|
For details, please refer to :func:`mindspore.ops.bitwise_and`.
|
|
1111
993
|
"""
|
|
1112
994
|
self._init_check()
|
|
1113
|
-
return tensor_operator_registry.get('bitwise_and')(self,
|
|
995
|
+
return tensor_operator_registry.get('bitwise_and')(self, other)
|
|
1114
996
|
|
|
1115
|
-
def bitwise_or(self,
|
|
997
|
+
def bitwise_or(self, other):
|
|
1116
998
|
"""
|
|
1117
999
|
For details, please refer to :func:`mindspore.ops.bitwise_or`.
|
|
1118
1000
|
"""
|
|
1119
1001
|
self._init_check()
|
|
1120
|
-
return tensor_operator_registry.get('bitwise_or')(self,
|
|
1002
|
+
return tensor_operator_registry.get('bitwise_or')(self, other)
|
|
1121
1003
|
|
|
1122
|
-
def bitwise_xor(self,
|
|
1004
|
+
def bitwise_xor(self, other):
|
|
1123
1005
|
"""
|
|
1124
1006
|
For details, please refer to :func:`mindspore.ops.bitwise_xor`.
|
|
1125
1007
|
"""
|
|
1126
1008
|
self._init_check()
|
|
1127
|
-
return tensor_operator_registry.get('bitwise_xor')(self,
|
|
1009
|
+
return tensor_operator_registry.get('bitwise_xor')(self, other)
|
|
1010
|
+
|
|
1011
|
+
def bitwise_left_shift(self, other):
|
|
1012
|
+
"""
|
|
1013
|
+
For details, please refer to :func:`mindspore.ops.bitwise_left_shift`.
|
|
1014
|
+
"""
|
|
1015
|
+
self._init_check()
|
|
1016
|
+
return tensor_operator_registry.get('bitwise_left_shift')(self, other)
|
|
1017
|
+
|
|
1018
|
+
def bitwise_right_shift(self, other):
|
|
1019
|
+
"""
|
|
1020
|
+
For details, please refer to :func:`mindspore.ops.bitwise_right_shift`.
|
|
1021
|
+
"""
|
|
1022
|
+
self._init_check()
|
|
1023
|
+
_cast = tensor_operator_registry.get('cast')
|
|
1024
|
+
other = _cast(other, self.dtype)
|
|
1025
|
+
return tensor_operator_registry.get('bitwise_right_shift')(self, other)
|
|
1026
|
+
|
|
1027
|
+
def scatter(self, axis, index, src):
|
|
1028
|
+
"""
|
|
1029
|
+
For details, please refer to :func:`mindspore.ops.scatter`.
|
|
1030
|
+
"""
|
|
1031
|
+
self._init_check()
|
|
1032
|
+
return tensor_operator_registry.get('scatter')(self, axis, index, src)
|
|
1128
1033
|
|
|
1129
1034
|
def scatter_mul(self, indices, updates):
|
|
1130
1035
|
"""
|
|
@@ -1140,12 +1045,12 @@ class Tensor(Tensor_):
|
|
|
1140
1045
|
self._init_check()
|
|
1141
1046
|
return tensor_operator_registry.get('tensor_scatter_div')(self, indices, updates)
|
|
1142
1047
|
|
|
1143
|
-
def ger(self,
|
|
1048
|
+
def ger(self, vec2):
|
|
1144
1049
|
"""
|
|
1145
1050
|
For details, please refer to :func:`mindspore.ops.ger`.
|
|
1146
1051
|
"""
|
|
1147
1052
|
self._init_check()
|
|
1148
|
-
return tensor_operator_registry.get('ger')(self,
|
|
1053
|
+
return tensor_operator_registry.get('ger')(self, vec2)
|
|
1149
1054
|
|
|
1150
1055
|
def gt(self, x):
|
|
1151
1056
|
"""
|
|
@@ -1200,6 +1105,27 @@ class Tensor(Tensor_):
|
|
|
1200
1105
|
self._init_check()
|
|
1201
1106
|
return tensor_operator_registry.get('exp')()(self)
|
|
1202
1107
|
|
|
1108
|
+
def real(self):
|
|
1109
|
+
r"""
|
|
1110
|
+
For details, please refer to :func:`mindspore.ops.real`.
|
|
1111
|
+
"""
|
|
1112
|
+
self._init_check()
|
|
1113
|
+
return tensor_operator_registry.get('real')(self)
|
|
1114
|
+
|
|
1115
|
+
def rsqrt(self):
|
|
1116
|
+
r"""
|
|
1117
|
+
For details, please refer to :func:`mindspore.ops.rsqrt`.
|
|
1118
|
+
"""
|
|
1119
|
+
self._init_check()
|
|
1120
|
+
return tensor_operator_registry.get('rsqrt')(self)
|
|
1121
|
+
|
|
1122
|
+
def reciprocal(self):
|
|
1123
|
+
r"""
|
|
1124
|
+
For details, please refer to :func:`mindspore.ops.reciprocal`.
|
|
1125
|
+
"""
|
|
1126
|
+
self._init_check()
|
|
1127
|
+
return tensor_operator_registry.get('reciprocal')(self)
|
|
1128
|
+
|
|
1203
1129
|
def sqrt(self):
|
|
1204
1130
|
"""
|
|
1205
1131
|
For details, please refer to :func:`mindspore.ops.sqrt`.
|
|
@@ -1257,55 +1183,21 @@ class Tensor(Tensor_):
|
|
|
1257
1183
|
|
|
1258
1184
|
def cos(self):
|
|
1259
1185
|
r"""
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
.. math::
|
|
1263
|
-
out_i = cos(x_i)
|
|
1264
|
-
|
|
1265
|
-
.. warning::
|
|
1266
|
-
Currently support Float16, Float32 data type. If use Float64, there may
|
|
1267
|
-
be a problem of missing precision.
|
|
1268
|
-
|
|
1269
|
-
Returns:
|
|
1270
|
-
Tensor, has the same shape as `x`.
|
|
1271
|
-
|
|
1272
|
-
Supported Platforms:
|
|
1273
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1274
|
-
|
|
1275
|
-
Examples:
|
|
1276
|
-
>>> from mindspore import Tensor
|
|
1277
|
-
>>> a = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
|
|
1278
|
-
>>> output = a.cos()
|
|
1279
|
-
>>> print(output)
|
|
1280
|
-
[0.971338 0.6748758 0.95233357 0.9959527]
|
|
1186
|
+
For details, please refer to :func:`mindspore.ops.cos`.
|
|
1281
1187
|
"""
|
|
1282
1188
|
self._init_check()
|
|
1283
1189
|
return tensor_operator_registry.get('cos')(self)
|
|
1284
1190
|
|
|
1285
|
-
def
|
|
1191
|
+
def cov(self, *, correction=1, fweights=None, aweights=None):
|
|
1286
1192
|
r"""
|
|
1287
|
-
|
|
1288
|
-
|
|
1289
|
-
|
|
1290
|
-
|
|
1291
|
-
out_i = \cosh^{-1}(input_i)
|
|
1292
|
-
|
|
1293
|
-
.. warning::
|
|
1294
|
-
Given an input tensor x, the function computes inverse hyperbolic cosine of every element.
|
|
1295
|
-
Input range is [1, inf].
|
|
1296
|
-
|
|
1297
|
-
Returns:
|
|
1298
|
-
Tensor, has the same shape as `x`.
|
|
1299
|
-
|
|
1300
|
-
Supported Platforms:
|
|
1301
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1193
|
+
For details, please refer to :func:`mindspore.ops.cov`.
|
|
1194
|
+
"""
|
|
1195
|
+
self._init_check()
|
|
1196
|
+
return tensor_operator_registry.get('cov')(self, correction=correction, fweights=fweights, aweights=aweights)
|
|
1302
1197
|
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
>>> output = a.acosh()
|
|
1307
|
-
>>> print(output)
|
|
1308
|
-
[0. 0.9624237 1.7627472 5.298292]
|
|
1198
|
+
def acosh(self):
|
|
1199
|
+
"""
|
|
1200
|
+
For details, please refer to :func:`mindspore.ops.acosh`.
|
|
1309
1201
|
"""
|
|
1310
1202
|
self._init_check()
|
|
1311
1203
|
return tensor_operator_registry.get('acosh')(self)
|
|
@@ -1358,19 +1250,20 @@ class Tensor(Tensor_):
|
|
|
1358
1250
|
self._init_check()
|
|
1359
1251
|
return tensor_operator_registry.get("negative")(self)
|
|
1360
1252
|
|
|
1361
|
-
|
|
1253
|
+
# pylint: disable=redefined-builtin
|
|
1254
|
+
def norm(self, ord=None, dim=None, keepdim=False, *, dtype=None):
|
|
1362
1255
|
"""
|
|
1363
1256
|
For details, please refer to :func:`mindspore.ops.norm`.
|
|
1364
1257
|
"""
|
|
1365
1258
|
self._init_check()
|
|
1366
|
-
return tensor_operator_registry.get('norm')(self,
|
|
1259
|
+
return tensor_operator_registry.get('norm')(self, ord, dim, keepdim, dtype=dtype)
|
|
1367
1260
|
|
|
1368
|
-
def renorm(self, p,
|
|
1261
|
+
def renorm(self, p, axis, maxnorm):
|
|
1369
1262
|
"""
|
|
1370
1263
|
For details, please refer to :func:`mindspore.ops.renorm`.
|
|
1371
1264
|
"""
|
|
1372
1265
|
self._init_check()
|
|
1373
|
-
return tensor_operator_registry.get("renorm")(self, p,
|
|
1266
|
+
return tensor_operator_registry.get("renorm")(self, p, axis, maxnorm)
|
|
1374
1267
|
|
|
1375
1268
|
def approximate_equal(self, other, tolerance=1e-5):
|
|
1376
1269
|
r"""
|
|
@@ -1386,13 +1279,6 @@ class Tensor(Tensor_):
|
|
|
1386
1279
|
tensor_operator_registry.get('__sub__')(input_x, input_y)
|
|
1387
1280
|
), tolerance)
|
|
1388
1281
|
|
|
1389
|
-
def matrix_determinant(self):
|
|
1390
|
-
r"""
|
|
1391
|
-
For details, please refer to :func:`mindspore.ops.matrix_determinant`.
|
|
1392
|
-
"""
|
|
1393
|
-
self._init_check()
|
|
1394
|
-
return tensor_operator_registry.get('matrix_determinant')(self)
|
|
1395
|
-
|
|
1396
1282
|
def log1p(self):
|
|
1397
1283
|
r"""
|
|
1398
1284
|
For details, please refer to :func:`mindspore.ops.log1p`.
|
|
@@ -1401,7 +1287,6 @@ class Tensor(Tensor_):
|
|
|
1401
1287
|
return tensor_operator_registry.get('log1p')(self)
|
|
1402
1288
|
|
|
1403
1289
|
def logit(self, eps=None):
|
|
1404
|
-
|
|
1405
1290
|
r"""
|
|
1406
1291
|
For details, please refer to :func:`mindspore.ops.logit`.
|
|
1407
1292
|
"""
|
|
@@ -1415,26 +1300,29 @@ class Tensor(Tensor_):
|
|
|
1415
1300
|
r"""
|
|
1416
1301
|
For details, please refer to :func:`mindspore.ops.logaddexp`.
|
|
1417
1302
|
"""
|
|
1303
|
+
self._init_check()
|
|
1418
1304
|
return tensor_operator_registry.get('logaddexp')(self, other)
|
|
1419
1305
|
|
|
1420
1306
|
def logaddexp2(self, other):
|
|
1421
1307
|
r"""
|
|
1422
1308
|
For details, please refer to :func:`mindspore.ops.logaddexp2`.
|
|
1423
1309
|
"""
|
|
1310
|
+
self._init_check()
|
|
1424
1311
|
return tensor_operator_registry.get('logaddexp2')(self, other)
|
|
1425
1312
|
|
|
1426
|
-
def logsumexp(self,
|
|
1313
|
+
def logsumexp(self, axis, keepdims=False):
|
|
1427
1314
|
r"""
|
|
1428
1315
|
For details, please refer to :func:`mindspore.ops.logsumexp`.
|
|
1429
1316
|
"""
|
|
1430
|
-
|
|
1317
|
+
self._init_check()
|
|
1318
|
+
return tensor_operator_registry.get('logsumexp')(self, axis, keepdims)
|
|
1431
1319
|
|
|
1432
|
-
def
|
|
1320
|
+
def logdet(self):
|
|
1433
1321
|
r"""
|
|
1434
|
-
For details, please refer to :func:`mindspore.ops.
|
|
1322
|
+
For details, please refer to :func:`mindspore.ops.logdet`.
|
|
1435
1323
|
"""
|
|
1436
1324
|
self._init_check()
|
|
1437
|
-
return tensor_operator_registry.get('
|
|
1325
|
+
return tensor_operator_registry.get('logdet')(self)
|
|
1438
1326
|
|
|
1439
1327
|
def i0(self):
|
|
1440
1328
|
r"""
|
|
@@ -1450,6 +1338,27 @@ class Tensor(Tensor_):
|
|
|
1450
1338
|
self._init_check()
|
|
1451
1339
|
return tensor_operator_registry.get('isclose')(self, x2, rtol, atol, equal_nan)
|
|
1452
1340
|
|
|
1341
|
+
def isneginf(self):
|
|
1342
|
+
r"""
|
|
1343
|
+
For details, please refer to :func:`mindspore.ops.isneginf`.
|
|
1344
|
+
"""
|
|
1345
|
+
self._init_check()
|
|
1346
|
+
return tensor_operator_registry.get('isneginf')(self)
|
|
1347
|
+
|
|
1348
|
+
def isposinf(self):
|
|
1349
|
+
r"""
|
|
1350
|
+
For details, please refer to :func:`mindspore.ops.isposinf`.
|
|
1351
|
+
"""
|
|
1352
|
+
self._init_check()
|
|
1353
|
+
return tensor_operator_registry.get('isposinf')(self)
|
|
1354
|
+
|
|
1355
|
+
def isreal(self):
|
|
1356
|
+
r"""
|
|
1357
|
+
For details, please refer to :func:`mindspore.ops.isreal`.
|
|
1358
|
+
"""
|
|
1359
|
+
self._init_check()
|
|
1360
|
+
return tensor_operator_registry.get('isreal')(self)
|
|
1361
|
+
|
|
1453
1362
|
def isfinite(self):
|
|
1454
1363
|
r"""
|
|
1455
1364
|
For details, please refer to :func:`mindspore.ops.isfinite`.
|
|
@@ -1457,6 +1366,13 @@ class Tensor(Tensor_):
|
|
|
1457
1366
|
self._init_check()
|
|
1458
1367
|
return tensor_operator_registry.get('isfinite')()(self)
|
|
1459
1368
|
|
|
1369
|
+
def is_complex(self):
|
|
1370
|
+
r"""
|
|
1371
|
+
For details, please refer to :func:`mindspore.ops.is_complex`.
|
|
1372
|
+
"""
|
|
1373
|
+
self._init_check()
|
|
1374
|
+
return tensor_operator_registry.get('is_complex')(self)
|
|
1375
|
+
|
|
1460
1376
|
def inv(self):
|
|
1461
1377
|
r"""
|
|
1462
1378
|
For details, please refer to :func:`mindspore.ops.inv`.
|
|
@@ -1464,6 +1380,13 @@ class Tensor(Tensor_):
|
|
|
1464
1380
|
self._init_check()
|
|
1465
1381
|
return tensor_operator_registry.get('inv')(self)
|
|
1466
1382
|
|
|
1383
|
+
def inverse(self):
|
|
1384
|
+
r"""
|
|
1385
|
+
For details, please refer to :func:`mindspore.ops.inverse`.
|
|
1386
|
+
"""
|
|
1387
|
+
self._init_check()
|
|
1388
|
+
return tensor_operator_registry.get('inverse')(self)
|
|
1389
|
+
|
|
1467
1390
|
def invert(self):
|
|
1468
1391
|
r"""
|
|
1469
1392
|
For details, please refer to :func:`mindspore.ops.invert`.
|
|
@@ -1471,12 +1394,12 @@ class Tensor(Tensor_):
|
|
|
1471
1394
|
self._init_check()
|
|
1472
1395
|
return tensor_operator_registry.get('invert')(self)
|
|
1473
1396
|
|
|
1474
|
-
def pow(self,
|
|
1397
|
+
def pow(self, exponent):
|
|
1475
1398
|
r"""
|
|
1476
1399
|
For details, please refer to :func:`mindspore.ops.pow`.
|
|
1477
1400
|
"""
|
|
1478
1401
|
self._init_check()
|
|
1479
|
-
return tensor_operator_registry.get('pow')()(self,
|
|
1402
|
+
return tensor_operator_registry.get('pow')()(self, exponent)
|
|
1480
1403
|
|
|
1481
1404
|
def log(self):
|
|
1482
1405
|
"""
|
|
@@ -1499,7 +1422,7 @@ class Tensor(Tensor_):
|
|
|
1499
1422
|
self._init_check()
|
|
1500
1423
|
return tensor_operator_registry.get('log2')(self)
|
|
1501
1424
|
|
|
1502
|
-
def mean(self, axis=
|
|
1425
|
+
def mean(self, axis=None, keep_dims=False):
|
|
1503
1426
|
"""
|
|
1504
1427
|
For details, please refer to :func:`mindspore.ops.mean`.
|
|
1505
1428
|
"""
|
|
@@ -1508,12 +1431,14 @@ class Tensor(Tensor_):
|
|
|
1508
1431
|
axis = ()
|
|
1509
1432
|
return tensor_operator_registry.get('mean')(keep_dims)(self, axis)
|
|
1510
1433
|
|
|
1511
|
-
def amin(self, axis=
|
|
1434
|
+
def amin(self, axis=None, keepdims=False, *, initial=None, where=None):
|
|
1512
1435
|
"""
|
|
1513
1436
|
For details, please refer to :func:`mindspore.ops.amin`.
|
|
1514
1437
|
"""
|
|
1515
1438
|
self._init_check()
|
|
1516
|
-
|
|
1439
|
+
if axis is None:
|
|
1440
|
+
axis = ()
|
|
1441
|
+
return tensor_operator_registry.get('amin')(self, axis, keepdims, initial=initial, where=where)
|
|
1517
1442
|
|
|
1518
1443
|
def reverse(self, axis):
|
|
1519
1444
|
"""
|
|
@@ -1522,13 +1447,15 @@ class Tensor(Tensor_):
|
|
|
1522
1447
|
self._init_check()
|
|
1523
1448
|
return tensor_operator_registry.get('reverse')(axis)(self)
|
|
1524
1449
|
|
|
1525
|
-
def amax(self, axis=
|
|
1450
|
+
def amax(self, axis=None, keepdims=False, *, initial=None, where=None):
|
|
1526
1451
|
"""
|
|
1527
1452
|
For details, please refer to :func:`mindspore.ops.amax`.
|
|
1528
1453
|
"""
|
|
1529
1454
|
self._init_check()
|
|
1530
|
-
|
|
1531
|
-
|
|
1455
|
+
if axis is None:
|
|
1456
|
+
axis = ()
|
|
1457
|
+
return tensor_operator_registry.get('amax')(self, axis, keepdims, initial=initial, where=where)
|
|
1458
|
+
|
|
1532
1459
|
def reverse_sequence(self, seq_lengths, seq_dim=0, batch_dim=0):
|
|
1533
1460
|
"""
|
|
1534
1461
|
For details, please refer to :func:`mindspore.ops.reverse_sequence`.
|
|
@@ -1536,7 +1463,7 @@ class Tensor(Tensor_):
|
|
|
1536
1463
|
self._init_check()
|
|
1537
1464
|
return tensor_operator_registry.get("reverse_sequence")(seq_dim, batch_dim)(self, seq_lengths)
|
|
1538
1465
|
|
|
1539
|
-
def prod(self, axis=
|
|
1466
|
+
def prod(self, axis=None, keep_dims=False):
|
|
1540
1467
|
"""
|
|
1541
1468
|
For details, please refer to :func:`mindspore.ops.prod`.
|
|
1542
1469
|
"""
|
|
@@ -1663,39 +1590,7 @@ class Tensor(Tensor_):
|
|
|
1663
1590
|
|
|
1664
1591
|
def rot90(self, k, dims):
|
|
1665
1592
|
r"""
|
|
1666
|
-
|
|
1667
|
-
Rotation direction is from the first towards the second axis if k > 0,
|
|
1668
|
-
and from the second towards the first for k < 0.
|
|
1669
|
-
|
|
1670
|
-
Args:
|
|
1671
|
-
k (int): Number of times to rotate.
|
|
1672
|
-
dims (Union[list(int), tuple(int)]): Axis to rotate.
|
|
1673
|
-
|
|
1674
|
-
Returns:
|
|
1675
|
-
Tensor.
|
|
1676
|
-
|
|
1677
|
-
Raises:
|
|
1678
|
-
TypeError: If `x` is not a Tensor.
|
|
1679
|
-
TypeError: If `k` is not an integer.
|
|
1680
|
-
TypeError: If `dims` is not a list or a tuple of integers.
|
|
1681
|
-
ValueError: If the length of `dims` is not `2`.
|
|
1682
|
-
ValueError: If any dims is out of range of [-self.ndim, self.ndim).
|
|
1683
|
-
RuntimeError: If rotation dims are not different.
|
|
1684
|
-
|
|
1685
|
-
Supported Platforms:
|
|
1686
|
-
``Ascend`` ``GPU``
|
|
1687
|
-
|
|
1688
|
-
Examples:
|
|
1689
|
-
>>> import numpy as np
|
|
1690
|
-
>>> import mindspore as ms
|
|
1691
|
-
>>> from mindspore import Tensor
|
|
1692
|
-
>>> x = Tensor(np.array([[0, 1], [2, 3]])).astype(np.float32)
|
|
1693
|
-
>>> k = 1
|
|
1694
|
-
>>> dims = [0, 1]
|
|
1695
|
-
>>> output = x.rot90(k, dims)
|
|
1696
|
-
>>> print(output)
|
|
1697
|
-
[[1. 3.]
|
|
1698
|
-
[0. 2.]]
|
|
1593
|
+
For details, please refer to :func:`mindspore.ops.rot90`.
|
|
1699
1594
|
"""
|
|
1700
1595
|
self._init_check()
|
|
1701
1596
|
return tensor_operator_registry.get('rot90')(self, k, dims)
|
|
@@ -1707,6 +1602,13 @@ class Tensor(Tensor_):
|
|
|
1707
1602
|
self._init_check()
|
|
1708
1603
|
return tensor_operator_registry.get('deg2rad')(self)
|
|
1709
1604
|
|
|
1605
|
+
def dot(self, other):
|
|
1606
|
+
r"""
|
|
1607
|
+
For details, please refer to :func:`mindspore.ops.dot`.
|
|
1608
|
+
"""
|
|
1609
|
+
self._init_check()
|
|
1610
|
+
return tensor_operator_registry.get('dot')(self, other)
|
|
1611
|
+
|
|
1710
1612
|
def rad2deg(self):
|
|
1711
1613
|
r"""
|
|
1712
1614
|
For details, please refer to :func:`mindspore.ops.rad2deg`.
|
|
@@ -1735,16 +1637,13 @@ class Tensor(Tensor_):
|
|
|
1735
1637
|
self._init_check()
|
|
1736
1638
|
return tensor_operator_registry.get('numel')(self)
|
|
1737
1639
|
|
|
1738
|
-
def permute(self, *
|
|
1640
|
+
def permute(self, *axis):
|
|
1739
1641
|
"""
|
|
1740
1642
|
For details, please refer to :func:`mindspore.ops.permute`.
|
|
1741
1643
|
"""
|
|
1742
1644
|
self._init_check()
|
|
1743
|
-
|
|
1744
|
-
|
|
1745
|
-
if len(dims) == 1:
|
|
1746
|
-
return tensor_operator_registry.get("permute")(self, *dims)
|
|
1747
|
-
return tensor_operator_registry.get("permute")(self, dims)
|
|
1645
|
+
perm = validator.check_transpose_axis(axis, self.ndim)
|
|
1646
|
+
return tensor_operator_registry.get('permute')(self, perm)
|
|
1748
1647
|
|
|
1749
1648
|
def positive(self):
|
|
1750
1649
|
"""
|
|
@@ -1760,20 +1659,26 @@ class Tensor(Tensor_):
|
|
|
1760
1659
|
self._init_check()
|
|
1761
1660
|
return tensor_operator_registry.get('remainder')(self, divisor)
|
|
1762
1661
|
|
|
1763
|
-
def flatten(self, order='C'):
|
|
1662
|
+
def flatten(self, order='C', *, start_dim=0, end_dim=-1):
|
|
1764
1663
|
r"""
|
|
1765
1664
|
For details, please refer to :func:`mindspore.ops.flatten`.
|
|
1766
1665
|
"""
|
|
1767
1666
|
self._init_check()
|
|
1768
|
-
|
|
1769
|
-
trans_op = tensor_operator_registry.get('transpose')()
|
|
1667
|
+
return tensor_operator_registry.get('flatten')(self, order, start_dim=start_dim, end_dim=end_dim)
|
|
1770
1668
|
|
|
1771
|
-
|
|
1772
|
-
|
|
1773
|
-
|
|
1669
|
+
def float_power(self, other):
|
|
1670
|
+
r"""
|
|
1671
|
+
For details, please refer to :func:`mindspore.ops.float_power`.
|
|
1672
|
+
"""
|
|
1673
|
+
self._init_check()
|
|
1674
|
+
return tensor_operator_registry.get('float_power')(self, other)
|
|
1774
1675
|
|
|
1775
|
-
|
|
1776
|
-
|
|
1676
|
+
def fmod(self, other):
|
|
1677
|
+
r"""
|
|
1678
|
+
For details, please refer to :func:`mindspore.ops.fmod`.
|
|
1679
|
+
"""
|
|
1680
|
+
self._init_check()
|
|
1681
|
+
return tensor_operator_registry.get('fmod')(self, other)
|
|
1777
1682
|
|
|
1778
1683
|
def narrow(self, axis, start, length):
|
|
1779
1684
|
"""
|
|
@@ -1782,49 +1687,19 @@ class Tensor(Tensor_):
|
|
|
1782
1687
|
self._init_check()
|
|
1783
1688
|
return tensor_operator_registry.get('narrow')(self, axis, start, length)
|
|
1784
1689
|
|
|
1785
|
-
def swapaxes(self,
|
|
1690
|
+
def swapaxes(self, axis0, axis1):
|
|
1786
1691
|
"""
|
|
1787
|
-
|
|
1788
|
-
|
|
1789
|
-
Args:
|
|
1790
|
-
axis1 (int): First axis.
|
|
1791
|
-
axis2 (int): Second axis.
|
|
1792
|
-
|
|
1793
|
-
Returns:
|
|
1794
|
-
Transposed tensor, has the same data type as the input.
|
|
1795
|
-
|
|
1796
|
-
Raises:
|
|
1797
|
-
TypeError: If `axis1` or `axis2` is not integer.
|
|
1798
|
-
ValueError: If `axis1` or `axis2` is not in the range of :math:`[-ndim, ndim-1]`.
|
|
1799
|
-
|
|
1800
|
-
Supported Platforms:
|
|
1801
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1802
|
-
|
|
1803
|
-
Examples:
|
|
1804
|
-
>>> import numpy as np
|
|
1805
|
-
>>> from mindspore import Tensor
|
|
1806
|
-
>>> x = Tensor(np.ones((2,3,4), dtype=np.float32))
|
|
1807
|
-
>>> output = x.swapaxes(0, 2)
|
|
1808
|
-
>>> print(output.shape)
|
|
1809
|
-
(4,3,2)
|
|
1692
|
+
For details, please refer to :func:`mindspore.ops.swapaxes`.
|
|
1810
1693
|
"""
|
|
1811
1694
|
self._init_check()
|
|
1812
|
-
|
|
1813
|
-
|
|
1814
|
-
if axis1 == axis2:
|
|
1815
|
-
return self
|
|
1816
|
-
if axis1 > axis2:
|
|
1817
|
-
axis1, axis2 = axis2, axis1
|
|
1818
|
-
|
|
1819
|
-
perm = tuple(range(0, self.ndim))
|
|
1820
|
-
if axis2 + 1 < self.ndim:
|
|
1821
|
-
new_perm = perm[0:axis1] + perm[axis2:axis2 + 1] + \
|
|
1822
|
-
perm[axis1 + 1:axis2] + perm[axis1:axis1 + 1] + perm[axis2 + 1:]
|
|
1823
|
-
else:
|
|
1824
|
-
new_perm = perm[0:axis1] + perm[axis2:axis2 + 1] + \
|
|
1825
|
-
perm[axis1 + 1:axis2] + perm[axis1:axis1 + 1]
|
|
1695
|
+
return tensor_operator_registry.get('swapaxes')(self, axis0, axis1)
|
|
1826
1696
|
|
|
1827
|
-
|
|
1697
|
+
def swapdims(self, dim0, dim1):
|
|
1698
|
+
"""
|
|
1699
|
+
For details, please refer to :func:`mindspore.ops.swapdims`.
|
|
1700
|
+
"""
|
|
1701
|
+
self._init_check()
|
|
1702
|
+
return tensor_operator_registry.get('swapdims')(self, dim0, dim1)
|
|
1828
1703
|
|
|
1829
1704
|
def squeeze(self, axis=None):
|
|
1830
1705
|
"""
|
|
@@ -1836,13 +1711,27 @@ class Tensor(Tensor_):
|
|
|
1836
1711
|
new_shape = validator.prepare_shape_for_squeeze(self.shape, axis)
|
|
1837
1712
|
return tensor_operator_registry.get('reshape')()(self, new_shape)
|
|
1838
1713
|
|
|
1714
|
+
def slogdet(self):
|
|
1715
|
+
"""
|
|
1716
|
+
For details, please refer to :func:`mindspore.ops.slogdet`.
|
|
1717
|
+
"""
|
|
1718
|
+
self._init_check()
|
|
1719
|
+
return tensor_operator_registry.get('slogdet')(self)
|
|
1720
|
+
|
|
1721
|
+
def tril(self, diagonal=0):
|
|
1722
|
+
"""
|
|
1723
|
+
For details, please refer to :func:`mindspore.ops.tril`.
|
|
1724
|
+
"""
|
|
1725
|
+
self._init_check()
|
|
1726
|
+
return tensor_operator_registry.get('tril')(self, diagonal)
|
|
1727
|
+
|
|
1839
1728
|
def unsqueeze(self, dim):
|
|
1840
1729
|
"""
|
|
1841
1730
|
For details, please refer to :func:`mindspore.ops.unsqueeze`.
|
|
1842
1731
|
"""
|
|
1843
1732
|
self._init_check()
|
|
1844
1733
|
validator.check_is_int(dim, 'dim')
|
|
1845
|
-
validator.check_int_range(dim, -self.ndim - 1, self.ndim + 1,
|
|
1734
|
+
validator.check_int_range(dim, -self.ndim - 1, self.ndim + 1, validator.INC_LEFT, 'dim')
|
|
1846
1735
|
return tensor_operator_registry.get('unsqueeze')(self, dim)
|
|
1847
1736
|
|
|
1848
1737
|
def expand_dims(self, axis):
|
|
@@ -1851,7 +1740,7 @@ class Tensor(Tensor_):
|
|
|
1851
1740
|
"""
|
|
1852
1741
|
self._init_check()
|
|
1853
1742
|
validator.check_is_int(axis, 'axis')
|
|
1854
|
-
validator.check_int_range(axis, -self.ndim - 1, self.ndim + 1,
|
|
1743
|
+
validator.check_int_range(axis, -self.ndim - 1, self.ndim + 1, validator.INC_LEFT, 'axis')
|
|
1855
1744
|
return tensor_operator_registry.get('expand_dims')(self, axis)
|
|
1856
1745
|
|
|
1857
1746
|
def astype(self, dtype, copy=True):
|
|
@@ -1860,7 +1749,7 @@ class Tensor(Tensor_):
|
|
|
1860
1749
|
|
|
1861
1750
|
Args:
|
|
1862
1751
|
dtype (Union[:class:`mindspore.dtype`, numpy.dtype, str]): Designated tensor dtype, can be in
|
|
1863
|
-
format of
|
|
1752
|
+
format of `mindspore.dtype.float32` or `numpy.float32` or `float32`.
|
|
1864
1753
|
copy (bool, optional): By default, astype always returns a newly allocated
|
|
1865
1754
|
tensor. If this is set to false, the input tensor is returned instead
|
|
1866
1755
|
of a copy. Default: True.
|
|
@@ -1962,12 +1851,12 @@ class Tensor(Tensor_):
|
|
|
1962
1851
|
|
|
1963
1852
|
Examples:
|
|
1964
1853
|
>>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
|
|
1965
|
-
>>>
|
|
1966
|
-
>>> print(
|
|
1967
|
-
|
|
1968
|
-
>>>
|
|
1969
|
-
>>> print(
|
|
1970
|
-
[
|
|
1854
|
+
>>> output, index = x.argmax_with_value()
|
|
1855
|
+
>>> print(output, index)
|
|
1856
|
+
0.7 3
|
|
1857
|
+
>>> output, index = x.argmax_with_value(keep_dims=True)
|
|
1858
|
+
>>> print(output, index)
|
|
1859
|
+
[0.7] [3]
|
|
1971
1860
|
"""
|
|
1972
1861
|
if self.shape == ():
|
|
1973
1862
|
return (Tensor(0), self)
|
|
@@ -2009,12 +1898,12 @@ class Tensor(Tensor_):
|
|
|
2009
1898
|
|
|
2010
1899
|
Examples:
|
|
2011
1900
|
>>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
|
|
2012
|
-
>>>
|
|
2013
|
-
>>> print(
|
|
2014
|
-
0 0
|
|
2015
|
-
>>>
|
|
2016
|
-
>>> print(
|
|
2017
|
-
[0] [0
|
|
1901
|
+
>>> output, index = x.argmin_with_value()
|
|
1902
|
+
>>> print(output, index
|
|
1903
|
+
0.0 0
|
|
1904
|
+
>>> output, index = x.argmin_with_value(keep_dims=True)
|
|
1905
|
+
>>> print(output, index)
|
|
1906
|
+
[0.0] [0]
|
|
2018
1907
|
"""
|
|
2019
1908
|
if self.shape == ():
|
|
2020
1909
|
return (Tensor(0), self)
|
|
@@ -2050,18 +1939,25 @@ class Tensor(Tensor_):
|
|
|
2050
1939
|
"""
|
|
2051
1940
|
return tensor_operator_registry.get('cummax')(self, axis)
|
|
2052
1941
|
|
|
2053
|
-
def index_fill(self,
|
|
1942
|
+
def index_fill(self, axis, index, value):
|
|
2054
1943
|
"""
|
|
2055
1944
|
For details, please refer to :func:`mindspore.ops.index_fill`.
|
|
2056
1945
|
"""
|
|
2057
|
-
return tensor_operator_registry.get('index_fill')(self,
|
|
1946
|
+
return tensor_operator_registry.get('index_fill')(self, axis, index, value)
|
|
1947
|
+
|
|
1948
|
+
def index_select(self, axis, index):
|
|
1949
|
+
"""
|
|
1950
|
+
For details, please refer to :func:`mindspore.ops.index_select`.
|
|
1951
|
+
"""
|
|
1952
|
+
self._init_check()
|
|
1953
|
+
return tensor_operator_registry.get('index_select')(self, axis, index)
|
|
2058
1954
|
|
|
2059
1955
|
def inplace_update(self, v, indices):
|
|
2060
1956
|
"""
|
|
2061
1957
|
For details, please refer to :func:`mindspore.ops.inplace_update`.
|
|
2062
1958
|
"""
|
|
2063
1959
|
self._init_check()
|
|
2064
|
-
return tensor_operator_registry.get('inplace_update')(
|
|
1960
|
+
return tensor_operator_registry.get('inplace_update')()(self, indices, v)
|
|
2065
1961
|
|
|
2066
1962
|
def copy(self):
|
|
2067
1963
|
"""
|
|
@@ -2099,7 +1995,7 @@ class Tensor(Tensor_):
|
|
|
2099
1995
|
x = x.astype(origin_dtype)
|
|
2100
1996
|
return x
|
|
2101
1997
|
|
|
2102
|
-
def max(self, axis=None, keepdims=False, initial=None, where=True):
|
|
1998
|
+
def max(self, axis=None, keepdims=False, *, initial=None, where=True, return_indices=False):
|
|
2103
1999
|
"""
|
|
2104
2000
|
Return the maximum of a tensor or maximum along an axis.
|
|
2105
2001
|
|
|
@@ -2112,13 +2008,17 @@ class Tensor(Tensor_):
|
|
|
2112
2008
|
If this is set to True, the axes which are reduced are left in the
|
|
2113
2009
|
result as dimensions with size one. With this option, the result will
|
|
2114
2010
|
broadcast correctly against the input array. Default: False.
|
|
2011
|
+
|
|
2012
|
+
Keyword Args:
|
|
2115
2013
|
initial (scalar, optional):
|
|
2116
2014
|
The minimum value of an output element. Must be present to allow
|
|
2117
2015
|
computation on empty slice. Default: None.
|
|
2118
|
-
where (bool
|
|
2016
|
+
where (Tensor[bool], optional):
|
|
2119
2017
|
A boolean tensor which is broadcasted to match the dimensions of array,
|
|
2120
2018
|
and selects elements to include in the reduction. If non-default value
|
|
2121
2019
|
is passed, initial must also be provided. Default: True.
|
|
2020
|
+
return_indices (bool, optional): Whether to return the index of the maximum value. Default: False.
|
|
2021
|
+
If `axis` is a list or tuple of ints, it must be False.
|
|
2122
2022
|
|
|
2123
2023
|
Returns:
|
|
2124
2024
|
Tensor or scalar, maximum of input tensor. If `axis` is None, the result is a scalar
|
|
@@ -2144,22 +2044,99 @@ class Tensor(Tensor_):
|
|
|
2144
2044
|
>>> output = a.max()
|
|
2145
2045
|
>>> print(output)
|
|
2146
2046
|
3.0
|
|
2047
|
+
>>> value, indices = a.max(axis=0, return_indices=True)
|
|
2048
|
+
>>> print(value)
|
|
2049
|
+
[2. 3.]
|
|
2050
|
+
>>> print(indices)
|
|
2051
|
+
[1 1]
|
|
2147
2052
|
"""
|
|
2148
|
-
|
|
2149
|
-
|
|
2150
|
-
|
|
2151
|
-
|
|
2152
|
-
|
|
2053
|
+
self._init_check()
|
|
2054
|
+
if isinstance(axis, (list, tuple)):
|
|
2055
|
+
reduce_ = tensor_operator_registry.get("reduce")
|
|
2056
|
+
reduce_max = tensor_operator_registry.get("reduce_max")
|
|
2057
|
+
maximum = tensor_operator_registry.get("maximum")
|
|
2058
|
+
return reduce_(self, reduce_max(keepdims), cmp_fn=maximum, axis=axis, keepdims=keepdims,
|
|
2059
|
+
initial=initial, where=where)
|
|
2060
|
+
values, indices = tensor_operator_registry.get("max")(self, axis, keepdims, initial=initial, where=where)
|
|
2061
|
+
if not return_indices:
|
|
2062
|
+
return values
|
|
2063
|
+
return values, indices
|
|
2153
2064
|
|
|
2154
|
-
def min(self, axis=None, keepdims=False, initial=None, where=True):
|
|
2065
|
+
def min(self, axis=None, keepdims=False, *, initial=None, where=True, return_indices=False):
|
|
2155
2066
|
"""
|
|
2156
|
-
|
|
2157
|
-
|
|
2158
|
-
|
|
2159
|
-
|
|
2160
|
-
|
|
2161
|
-
|
|
2162
|
-
|
|
2067
|
+
Return the minimum of a tensor or minimum along an axis.
|
|
2068
|
+
|
|
2069
|
+
Args:
|
|
2070
|
+
axis (Union[None, int, list, tuple of ints], optional): An axis or
|
|
2071
|
+
axes along which to operate. By default, flattened input is used. If
|
|
2072
|
+
`axis` is a tuple of ints, the minimum is selected over multiple axes,
|
|
2073
|
+
instead of a single axis or all the axes as before. Default: None.
|
|
2074
|
+
keepdims (bool, optional):
|
|
2075
|
+
If True, the axes which are reduced are left in the
|
|
2076
|
+
result as dimensions with size one. With this option, the result will
|
|
2077
|
+
broadcast correctly against the input array. Default: False.
|
|
2078
|
+
|
|
2079
|
+
Keyword Args:
|
|
2080
|
+
initial (scalar, optional):
|
|
2081
|
+
The minimum value of an output element. Must be present to allow
|
|
2082
|
+
computation on empty slice. Default: None.
|
|
2083
|
+
where (bool Tensor, optional):
|
|
2084
|
+
A boolean tensor which is broadcasted to match the dimensions of array,
|
|
2085
|
+
and selects elements to include in the reduction. If non-default value
|
|
2086
|
+
is passed, initial must also be provided. Default: True.
|
|
2087
|
+
return_indices (bool, optional): Whether to return the index of the minimum value. Default: False.
|
|
2088
|
+
If `axis` is a list or tuple of ints, it must be False.
|
|
2089
|
+
|
|
2090
|
+
Returns:
|
|
2091
|
+
Tensor or scalar, minimum of input tensor. If `axis` is None, the result is a scalar
|
|
2092
|
+
value. If `axis` is given, the result is a tensor of dimension ``self.ndim - 1``.
|
|
2093
|
+
|
|
2094
|
+
Raises:
|
|
2095
|
+
TypeError: If arguments have types not specified above.
|
|
2096
|
+
|
|
2097
|
+
Supported Platforms:
|
|
2098
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2099
|
+
|
|
2100
|
+
See also:
|
|
2101
|
+
:func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
|
|
2102
|
+
|
|
2103
|
+
:func:`mindspore.Tensor.argmax`: Return the indices of the maximum values along an axis.
|
|
2104
|
+
|
|
2105
|
+
:func:`mindspore.Tensor.max`: Return the minimum of a tensor or minimum along an axis.
|
|
2106
|
+
|
|
2107
|
+
Examples:
|
|
2108
|
+
>>> import numpy as np
|
|
2109
|
+
>>> from mindspore import Tensor
|
|
2110
|
+
>>> a = Tensor(np.arange(4).reshape((2, 2)).astype('float32'))
|
|
2111
|
+
>>> output = a.min()
|
|
2112
|
+
>>> print(output)
|
|
2113
|
+
0.0
|
|
2114
|
+
>>> output = a.min(axis=0)
|
|
2115
|
+
>>> print(output)
|
|
2116
|
+
[0. 1.]
|
|
2117
|
+
>>> output = a.min(axis=0, initial=9, where=Tensor([False]))
|
|
2118
|
+
>>> print(output)
|
|
2119
|
+
[9. 9.]
|
|
2120
|
+
>>> output = a.min(axis=0, initial=9, where=Tensor([False, True]))
|
|
2121
|
+
>>> print(output)
|
|
2122
|
+
[9. 1.]
|
|
2123
|
+
>>> value, indices = a.min(axis=0, return_indices=True)
|
|
2124
|
+
>>> print(value)
|
|
2125
|
+
[0. 1.]
|
|
2126
|
+
>>> print(indices)
|
|
2127
|
+
[0 0]
|
|
2128
|
+
"""
|
|
2129
|
+
self._init_check()
|
|
2130
|
+
if isinstance(axis, (list, tuple)):
|
|
2131
|
+
reduce_ = tensor_operator_registry.get("reduce")
|
|
2132
|
+
reduce_min = tensor_operator_registry.get("reduce_min")
|
|
2133
|
+
minimum = tensor_operator_registry.get("minimum")
|
|
2134
|
+
return reduce_(self, reduce_min(keepdims), cmp_fn=minimum(), axis=axis, keepdims=keepdims,
|
|
2135
|
+
initial=initial, where=where)
|
|
2136
|
+
values, indices = tensor_operator_registry.get("min")(self, axis, keepdims, initial=initial, where=where)
|
|
2137
|
+
if not return_indices:
|
|
2138
|
+
return values
|
|
2139
|
+
return values, indices
|
|
2163
2140
|
|
|
2164
2141
|
def scatter_add(self, indices, updates):
|
|
2165
2142
|
"""
|
|
@@ -2232,7 +2209,7 @@ class Tensor(Tensor_):
|
|
|
2232
2209
|
|
|
2233
2210
|
def fill(self, value):
|
|
2234
2211
|
"""
|
|
2235
|
-
|
|
2212
|
+
`Tensor.fill` is deprecated, please use `ops.fill` instead.
|
|
2236
2213
|
"""
|
|
2237
2214
|
if value is None:
|
|
2238
2215
|
if self.dtype not in (mstype.float16, mstype.float32, mstype.float64):
|
|
@@ -2247,7 +2224,7 @@ class Tensor(Tensor_):
|
|
|
2247
2224
|
|
|
2248
2225
|
def fills(self, value):
|
|
2249
2226
|
"""
|
|
2250
|
-
|
|
2227
|
+
`Tensor.fills` is deprecated, please use `ops.fill` instead.
|
|
2251
2228
|
"""
|
|
2252
2229
|
self._init_check()
|
|
2253
2230
|
return tensor_operator_registry.get('fills')(self, value)
|
|
@@ -2314,68 +2291,18 @@ class Tensor(Tensor_):
|
|
|
2314
2291
|
"""
|
|
2315
2292
|
return tensor_operator_registry.get('minimum')()(self, other)
|
|
2316
2293
|
|
|
2317
|
-
def
|
|
2294
|
+
def clamp(self, min=None, max=None):
|
|
2295
|
+
r"""
|
|
2296
|
+
For details, please refer to :func:`mindspore.ops.clamp`.
|
|
2318
2297
|
"""
|
|
2319
|
-
|
|
2320
|
-
|
|
2321
|
-
Given an interval, values outside the interval are clipped to the interval edges.
|
|
2322
|
-
For example, if an interval of :math:`[0, 1]` is specified, values smaller than 0 become 0,
|
|
2323
|
-
and values larger than 1 become 1.
|
|
2324
|
-
|
|
2325
|
-
Note:
|
|
2326
|
-
Currently, clip with `xmin=nan` or `xmax=nan` is not supported.
|
|
2327
|
-
|
|
2328
|
-
Args:
|
|
2329
|
-
xmin (Tensor, scalar, None): Minimum value. If None, clipping is not performed
|
|
2330
|
-
on the lower interval edge. Not more than one of `xmin` and `xmax` may be None.
|
|
2331
|
-
xmax (Tensor, scalar, None): Maximum value. If None, clipping is not performed
|
|
2332
|
-
on the upper interval edge. Not more than one of `xmin` and `xmax` may be None.
|
|
2333
|
-
If `xmin` or `xmax` are tensors, then `xmin`, `xmax` and the given tensor
|
|
2334
|
-
will be broadcasted to match their shapes.
|
|
2335
|
-
dtype (:class:`mindspore.dtype`, optional): Overrides the dtype of the
|
|
2336
|
-
output Tensor. Default is None.
|
|
2337
|
-
|
|
2338
|
-
Returns:
|
|
2339
|
-
Tensor, a tensor with the elements of the input tensor, but where values
|
|
2340
|
-
< `xmin` are replaced with `xmin`, and those > `xmax` with `xmax`.
|
|
2341
|
-
|
|
2342
|
-
Raises:
|
|
2343
|
-
TypeError: If inputs have types not specified above.
|
|
2344
|
-
ValueError: If the shapes of `x1` and `x2` cannot broadcast, or both `xmin` and `xmax` are `None`.
|
|
2345
|
-
|
|
2346
|
-
Supported Platforms:
|
|
2347
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2298
|
+
self._init_check()
|
|
2299
|
+
return tensor_operator_registry.get('clamp')(self, min, max)
|
|
2348
2300
|
|
|
2349
|
-
|
|
2350
|
-
|
|
2351
|
-
|
|
2352
|
-
>>> y = x.clip(0, 2)
|
|
2353
|
-
>>> print(y)
|
|
2354
|
-
[1. 2. 2. 0. 0. 2. 2. 0.]
|
|
2355
|
-
>>> t = Tensor([1, 1, 1, 1, 1, 1, 1, 1])
|
|
2356
|
-
>>> y = x.clip(t, 2)
|
|
2357
|
-
>>> print(y)
|
|
2358
|
-
[1. 2. 2. 1. 1. 2. 2. 1.]
|
|
2301
|
+
def clip(self, min=None, max=None):
|
|
2302
|
+
r"""
|
|
2303
|
+
Alias for :func:`mindspore.Tensor.clamp`.
|
|
2359
2304
|
"""
|
|
2360
|
-
|
|
2361
|
-
raise ValueError("For 'Tensor.clip', the argument 'xmin' and 'xman' cannot all be None.")
|
|
2362
|
-
x = self
|
|
2363
|
-
# F.maximum/minimum does not support when both operands are scalar
|
|
2364
|
-
if xmin is not None:
|
|
2365
|
-
xmin = Tensor(xmin).astype(x.dtype)
|
|
2366
|
-
if x.ndim == 0 and xmin.ndim == 0:
|
|
2367
|
-
x = tensor_operator_registry.get("maximum")(x.reshape((1,)), xmin).squeeze()
|
|
2368
|
-
else:
|
|
2369
|
-
x = tensor_operator_registry.get("maximum")(x, xmin)
|
|
2370
|
-
if xmax is not None:
|
|
2371
|
-
xmax = Tensor(xmax).astype(x.dtype)
|
|
2372
|
-
if x.ndim == 0 and xmax.ndim == 0:
|
|
2373
|
-
x = tensor_operator_registry.get("minimum")()(x.reshape((1,)), xmax).squeeze()
|
|
2374
|
-
else:
|
|
2375
|
-
x = tensor_operator_registry.get("minimum")()(x, xmax)
|
|
2376
|
-
if dtype is not None and dtype != x.dtype:
|
|
2377
|
-
return x.astype(dtype)
|
|
2378
|
-
return x
|
|
2305
|
+
return self.clamp(min, max)
|
|
2379
2306
|
|
|
2380
2307
|
def _init_check(self):
|
|
2381
2308
|
if self.has_init:
|
|
@@ -2420,7 +2347,7 @@ class Tensor(Tensor_):
|
|
|
2420
2347
|
# At embedding cache scenes, we need limit the size of memory for tensor.
|
|
2421
2348
|
# And save out of range data to persistent storage to support TB-Level size of tensor.
|
|
2422
2349
|
data_shape = list(shape)
|
|
2423
|
-
slice_num_of_persistent_data =
|
|
2350
|
+
slice_num_of_persistent_data = get_slice_num(self.dtype, shape)
|
|
2424
2351
|
if slice_num_of_persistent_data > 1:
|
|
2425
2352
|
slice_first_dim = math.ceil(shape[0] / slice_num_of_persistent_data)
|
|
2426
2353
|
data_shape[0] = slice_first_dim
|
|
@@ -2443,9 +2370,9 @@ class Tensor(Tensor_):
|
|
|
2443
2370
|
self._np_seed = np.random.get_state()[1][0]
|
|
2444
2371
|
self.need_set_seed = (slice_index is not None)
|
|
2445
2372
|
self._global_seed = global_seed
|
|
2446
|
-
self.
|
|
2373
|
+
self._seed_offset = 1
|
|
2447
2374
|
if self.need_set_seed:
|
|
2448
|
-
self.
|
|
2375
|
+
self._seed_offset = get_group_size() * 2
|
|
2449
2376
|
|
|
2450
2377
|
def __enter__(self):
|
|
2451
2378
|
if self.need_set_seed:
|
|
@@ -2456,7 +2383,7 @@ class Tensor(Tensor_):
|
|
|
2456
2383
|
else:
|
|
2457
2384
|
np.random.seed(slice_index + Tensor.delta_seed)
|
|
2458
2385
|
self.init.seed = slice_index + Tensor.delta_seed
|
|
2459
|
-
Tensor.delta_seed += self.
|
|
2386
|
+
Tensor.delta_seed += self._seed_offset
|
|
2460
2387
|
|
|
2461
2388
|
def __exit__(self, ptype, value, trace):
|
|
2462
2389
|
if self.need_set_seed:
|
|
@@ -2465,10 +2392,6 @@ class Tensor(Tensor_):
|
|
|
2465
2392
|
|
|
2466
2393
|
with seed_context(self.init):
|
|
2467
2394
|
self.init(data)
|
|
2468
|
-
if opt_shard_group:
|
|
2469
|
-
rank = get_rank(opt_shard_group)
|
|
2470
|
-
size = get_group_size(opt_shard_group)
|
|
2471
|
-
data = np.split(data, size)[rank]
|
|
2472
2395
|
self.init = None
|
|
2473
2396
|
|
|
2474
2397
|
# At embedding cache scenes. When size of tensor is out of range, we store data to persistent storage
|
|
@@ -2478,44 +2401,6 @@ class Tensor(Tensor_):
|
|
|
2478
2401
|
self.assign_value(Tensor_.from_numpy(data))
|
|
2479
2402
|
return self
|
|
2480
2403
|
|
|
2481
|
-
def to_tensor(self, slice_index=None, shape=None, opt_shard_group=None):
|
|
2482
|
-
"""
|
|
2483
|
-
Return init_data() and get the tensor format data of this Tensor.
|
|
2484
|
-
|
|
2485
|
-
Note:
|
|
2486
|
-
The usage of `to_tensor` is deprecated. Please use `init_data`.
|
|
2487
|
-
|
|
2488
|
-
Args:
|
|
2489
|
-
slice_index (int): Slice index of a parameter's slices.
|
|
2490
|
-
It is used when initialize a slice of a parameter, it guarantees that devices
|
|
2491
|
-
using the same slice can generate the same tensor. Default: None.
|
|
2492
|
-
shape (list[int]): Shape of the slice, it is used when initialize a slice of the parameter. Default: None.
|
|
2493
|
-
opt_shard_group(str): Optimizer shard group which is used in auto or semi auto parallel mode
|
|
2494
|
-
to get one shard of a parameter's slice. Default: None.
|
|
2495
|
-
|
|
2496
|
-
Returns:
|
|
2497
|
-
Initialized Tensor.
|
|
2498
|
-
|
|
2499
|
-
Raises:
|
|
2500
|
-
TypeError: `indices` is neither int32 nor int64.
|
|
2501
|
-
ValueError: The length of the shape of the tensor is less than the last dimension of `indices`.
|
|
2502
|
-
|
|
2503
|
-
Supported Platforms:
|
|
2504
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2505
|
-
|
|
2506
|
-
Examples:
|
|
2507
|
-
>>> import mindspore as ms
|
|
2508
|
-
>>> from mindspore.common.initializer import initializer, Constant
|
|
2509
|
-
>>> x = initializer(Constant(1), [2, 2], ms.float32)
|
|
2510
|
-
>>> out = x.to_tensor()
|
|
2511
|
-
>>> print(out)
|
|
2512
|
-
[[1. 1.]
|
|
2513
|
-
[1. 1.]]
|
|
2514
|
-
"""
|
|
2515
|
-
logger.warning("WARN_DEPRECATED: The usage of to_tensor is deprecated."
|
|
2516
|
-
" Please use init_data")
|
|
2517
|
-
return self.init_data(slice_index, shape, opt_shard_group)
|
|
2518
|
-
|
|
2519
2404
|
def resize(self, *new_shape):
|
|
2520
2405
|
"""
|
|
2521
2406
|
Changes shape and size of tensor in-place.
|
|
@@ -2575,10 +2460,59 @@ class Tensor(Tensor_):
|
|
|
2575
2460
|
|
|
2576
2461
|
def det(self):
|
|
2577
2462
|
r"""
|
|
2578
|
-
|
|
2463
|
+
For details, please refer to :func:`mindspore.ops.det`.
|
|
2579
2464
|
"""
|
|
2580
2465
|
self._init_check()
|
|
2581
|
-
return tensor_operator_registry.get('
|
|
2466
|
+
return tensor_operator_registry.get('det')(self)
|
|
2467
|
+
|
|
2468
|
+
def diff(self, n=1, axis=-1, prepend=None, append=None):
|
|
2469
|
+
r"""
|
|
2470
|
+
For details, please refer to :func:`mindspore.ops.diff`.
|
|
2471
|
+
"""
|
|
2472
|
+
self._init_check()
|
|
2473
|
+
return tensor_operator_registry.get('diff')(self, n, axis, prepend, append)
|
|
2474
|
+
|
|
2475
|
+
def frac(self):
|
|
2476
|
+
r"""
|
|
2477
|
+
For details, please refer to :func:`mindspore.ops.frac`.
|
|
2478
|
+
"""
|
|
2479
|
+
self._init_check()
|
|
2480
|
+
return tensor_operator_registry.get('frac')(self)
|
|
2481
|
+
|
|
2482
|
+
def argwhere(self):
|
|
2483
|
+
r"""
|
|
2484
|
+
For details, please refer to :func:`mindspore.ops.argwhere`.
|
|
2485
|
+
"""
|
|
2486
|
+
self._init_check()
|
|
2487
|
+
return tensor_operator_registry.get('argwhere')(self)
|
|
2488
|
+
|
|
2489
|
+
def moveaxis(self, source, destination):
|
|
2490
|
+
r"""
|
|
2491
|
+
For details, please refer to :func:`mindspore.ops.moveaxis`.
|
|
2492
|
+
"""
|
|
2493
|
+
self._init_check()
|
|
2494
|
+
return tensor_operator_registry.get('moveaxis')(self, source, destination)
|
|
2495
|
+
|
|
2496
|
+
def movedim(self, source, destination):
|
|
2497
|
+
r"""
|
|
2498
|
+
For details, please refer to :func:`mindspore.ops.movedim`.
|
|
2499
|
+
"""
|
|
2500
|
+
self._init_check()
|
|
2501
|
+
return tensor_operator_registry.get('movedim')(self, source, destination)
|
|
2502
|
+
|
|
2503
|
+
def digamma(self):
|
|
2504
|
+
r"""
|
|
2505
|
+
For details, please refer to :func:`mindspore.ops.digamma`.
|
|
2506
|
+
"""
|
|
2507
|
+
self._init_check()
|
|
2508
|
+
return tensor_operator_registry.get('digamma')(self)
|
|
2509
|
+
|
|
2510
|
+
def lgamma(self):
|
|
2511
|
+
r"""
|
|
2512
|
+
For details, please refer to :func:`mindspore.ops.lgamma`.
|
|
2513
|
+
"""
|
|
2514
|
+
self._init_check()
|
|
2515
|
+
return tensor_operator_registry.get('lgamma')(self)
|
|
2582
2516
|
|
|
2583
2517
|
def diagonal(self, offset=0, axis1=0, axis2=1):
|
|
2584
2518
|
"""
|
|
@@ -2607,11 +2541,11 @@ class Tensor(Tensor_):
|
|
|
2607
2541
|
elif offset != 0:
|
|
2608
2542
|
e = e.astype(mstype.float32)
|
|
2609
2543
|
if offset > 0:
|
|
2610
|
-
e_left = tensor_operator_registry.get('fill')(
|
|
2544
|
+
e_left = tensor_operator_registry.get('fill')(mstype.float32, (n, offset), 0)
|
|
2611
2545
|
e_right = e[..., 0:m - offset:1]
|
|
2612
2546
|
e = tensor_operator_registry.get('concatenate')(1)((e_left, e_right)).astype(dtype)
|
|
2613
2547
|
elif offset < 0:
|
|
2614
|
-
e_upper = tensor_operator_registry.get('fill')(
|
|
2548
|
+
e_upper = tensor_operator_registry.get('fill')(mstype.float32, (-offset, m), 0)
|
|
2615
2549
|
e_lower = e[0:n + offset:1, ...]
|
|
2616
2550
|
e = tensor_operator_registry.get('concatenate')(0)((e_upper, e_lower)).astype(dtype)
|
|
2617
2551
|
e = tensor_operator_registry.get('broadcast_to')(shape)(e)
|
|
@@ -2668,6 +2602,9 @@ class Tensor(Tensor_):
|
|
|
2668
2602
|
>>> print(x.trace())
|
|
2669
2603
|
3.0
|
|
2670
2604
|
"""
|
|
2605
|
+
if offset == 0 and axis1 == 0 and axis2 == 1 and dtype is None:
|
|
2606
|
+
self._init_check()
|
|
2607
|
+
return tensor_operator_registry.get('trace')(self)
|
|
2671
2608
|
d = self.diagonal(offset, axis1=axis1, axis2=axis2)
|
|
2672
2609
|
shape = d.shape
|
|
2673
2610
|
if dtype is None:
|
|
@@ -2873,7 +2810,7 @@ class Tensor(Tensor_):
|
|
|
2873
2810
|
i = tensor_operator_registry.get('fill')(mstype.int32, shape, 0)
|
|
2874
2811
|
j = tensor_operator_registry.get('fill')(mstype.int32, shape, a.size)
|
|
2875
2812
|
|
|
2876
|
-
sort_range = tuple(range(
|
|
2813
|
+
sort_range = tuple(range(math.ceil(math.log2(tensor_operator_registry.get('shape_mul')(a.shape) + 1))))
|
|
2877
2814
|
for _ in sort_range:
|
|
2878
2815
|
mid = (i - -j) // 2
|
|
2879
2816
|
mask = less_op(v, tensor_operator_registry.get('gather_nd')(a, mid.reshape(mid.shape + (1,))))
|
|
@@ -2886,16 +2823,17 @@ class Tensor(Tensor_):
|
|
|
2886
2823
|
For details, please refer to :func:`mindspore.ops.gather_nd`.
|
|
2887
2824
|
"""
|
|
2888
2825
|
self._init_check()
|
|
2889
|
-
validator.check_value_type('indices', indices, (Tensor_,), 'Tensor.gather_nd')
|
|
2826
|
+
validator.check_value_type('indices', indices, (Tensor, Tensor_,), 'Tensor.gather_nd')
|
|
2890
2827
|
return tensor_operator_registry.get('gather_nd')(self, indices)
|
|
2891
2828
|
|
|
2892
|
-
def gather(self, input_indices, axis):
|
|
2829
|
+
def gather(self, input_indices, axis, batch_dims=0):
|
|
2893
2830
|
r"""
|
|
2894
2831
|
For details, please refer to :func:`mindspore.ops.gather`.
|
|
2895
2832
|
"""
|
|
2896
2833
|
self._init_check()
|
|
2897
2834
|
validator.check_is_int(axis, 'axis')
|
|
2898
|
-
|
|
2835
|
+
validator.check_is_int(batch_dims, "batch_dims")
|
|
2836
|
+
return tensor_operator_registry.get('gather')(self, input_indices, axis, batch_dims)
|
|
2899
2837
|
|
|
2900
2838
|
def var(self, axis=None, ddof=0, keepdims=False):
|
|
2901
2839
|
"""
|
|
@@ -3038,6 +2976,55 @@ class Tensor(Tensor_):
|
|
|
3038
2976
|
res += initial
|
|
3039
2977
|
return res.astype(dtype)
|
|
3040
2978
|
|
|
2979
|
+
def sum_to_size(self, *size):
|
|
2980
|
+
r"""
|
|
2981
|
+
Sum self Tensor to the `size`. `size` must be expandable to the Tensor size.
|
|
2982
|
+
|
|
2983
|
+
Args:
|
|
2984
|
+
size (Union[tuple(int), int]): The expected shape of output Tensor.
|
|
2985
|
+
|
|
2986
|
+
Returns:
|
|
2987
|
+
Tensor, the sum result of self Tensor according to the `size`.
|
|
2988
|
+
|
|
2989
|
+
Raises:
|
|
2990
|
+
ValueError: If `size` is not expandable to the size of self Tensor.
|
|
2991
|
+
|
|
2992
|
+
Supported Platforms:
|
|
2993
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2994
|
+
|
|
2995
|
+
Examples:
|
|
2996
|
+
>>> x = Tensor(np.random.randn(3, 3, 3, 3, 3, 3), mindspore.float32)
|
|
2997
|
+
>>> output = x.sum_to_size((1, 3, 1, 3))
|
|
2998
|
+
>>> print(output.shape)
|
|
2999
|
+
(1, 3, 1, 3)
|
|
3000
|
+
"""
|
|
3001
|
+
self._init_check()
|
|
3002
|
+
x = self
|
|
3003
|
+
if len(size) == 1 and isinstance(size[0], tuple):
|
|
3004
|
+
size = size[0]
|
|
3005
|
+
shape_x = x.shape
|
|
3006
|
+
if len(size) > x.ndim:
|
|
3007
|
+
raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_x}.")
|
|
3008
|
+
if len(size) < x.ndim:
|
|
3009
|
+
pre_axis = tuple([axis for axis in range(x.ndim - len(size))])
|
|
3010
|
+
x = x.sum(pre_axis)
|
|
3011
|
+
axes = []
|
|
3012
|
+
for i, element in enumerate(size):
|
|
3013
|
+
if element != x.shape[i] and element == 1:
|
|
3014
|
+
axes.append(i)
|
|
3015
|
+
elif element != x.shape[i]:
|
|
3016
|
+
raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_x}.")
|
|
3017
|
+
if axes:
|
|
3018
|
+
return x.sum(tuple(axes), keepdims=True)
|
|
3019
|
+
return x
|
|
3020
|
+
|
|
3021
|
+
def nansum(self, axis=None, keepdims=False, dtype=None):
|
|
3022
|
+
"""
|
|
3023
|
+
For details, please refer to :func:`mindspore.ops.nansum`.
|
|
3024
|
+
"""
|
|
3025
|
+
self._init_check()
|
|
3026
|
+
return tensor_operator_registry.get('nansum')(self, axis=axis, keepdims=keepdims, dtype=dtype)
|
|
3027
|
+
|
|
3041
3028
|
def repeat(self, repeats, axis=None):
|
|
3042
3029
|
"""
|
|
3043
3030
|
Repeat elements of a tensor.
|
|
@@ -3105,7 +3092,7 @@ class Tensor(Tensor_):
|
|
|
3105
3092
|
raise ValueError(f"For 'Tensor.repeat', the length of 'repeats' must be the same as the shape of the "
|
|
3106
3093
|
f"original tensor in the 'axis' dimension, but got the length of 'repeats' "
|
|
3107
3094
|
f"{len(repeats)}, the shape of the original tensor in the 'axis' dimension {size}.")
|
|
3108
|
-
subs = tensor_operator_registry.get('
|
|
3095
|
+
subs = tensor_operator_registry.get('tensor_split')(input_x, size, axis)
|
|
3109
3096
|
repeated_subs = []
|
|
3110
3097
|
for sub, rep in zip(subs, repeats):
|
|
3111
3098
|
if rep != 0:
|
|
@@ -3117,10 +3104,9 @@ class Tensor(Tensor_):
|
|
|
3117
3104
|
For details, please refer to :func:`mindspore.ops.repeat_interleave`.
|
|
3118
3105
|
"""
|
|
3119
3106
|
self._init_check()
|
|
3120
|
-
dim = dim if dim is not None else 0
|
|
3121
3107
|
return tensor_operator_registry.get('repeat_interleave')(self, repeats, dim)
|
|
3122
3108
|
|
|
3123
|
-
def bernoulli(self, p=0.5, seed
|
|
3109
|
+
def bernoulli(self, p=0.5, seed=None):
|
|
3124
3110
|
r"""
|
|
3125
3111
|
For details, please refer to :func:`mindspore.ops.bernoulli`.
|
|
3126
3112
|
"""
|
|
@@ -3128,278 +3114,44 @@ class Tensor(Tensor_):
|
|
|
3128
3114
|
validator.check_is_int(seed, 'seed')
|
|
3129
3115
|
return tensor_operator_registry.get('bernoulli')(self, p, seed)
|
|
3130
3116
|
|
|
3131
|
-
def
|
|
3117
|
+
def random_categorical(self, num_sample, seed=0, dtype=mstype.int64):
|
|
3132
3118
|
r"""
|
|
3133
|
-
|
|
3134
|
-
|
|
3135
|
-
|
|
3136
|
-
|
|
3137
|
-
|
|
3138
|
-
|
|
3139
|
-
containing the cumsum of probabilities, must be 1 or 2 dimensions.
|
|
3140
|
-
|
|
3141
|
-
Args:
|
|
3142
|
-
seed (int): Random seed, must be non-negative. Default: 0.
|
|
3143
|
-
seed2 (int): Random seed2, must be non-negative. Default: 0.
|
|
3144
|
-
|
|
3145
|
-
Inputs:
|
|
3146
|
-
- **num_samples** (int32) - number of samples to draw.
|
|
3147
|
-
|
|
3148
|
-
Outputs:
|
|
3149
|
-
Tensor with the same rows as `self`, each row has num_samples sampled indices.
|
|
3150
|
-
|
|
3151
|
-
Raises:
|
|
3152
|
-
TypeError: If neither `seed` nor `seed2` is an int.
|
|
3153
|
-
TypeError: If `self` is not a Tensor whose dtype is float32.
|
|
3154
|
-
TypeError: If dtype of `num_samples` is not int32.
|
|
3119
|
+
For details, please refer to :func:`mindspore.ops.random_categorical`.
|
|
3120
|
+
"""
|
|
3121
|
+
self._init_check()
|
|
3122
|
+
validator.check_is_int(num_sample, 'num_sample')
|
|
3123
|
+
validator.check_is_int(seed, 'seed')
|
|
3124
|
+
return tensor_operator_registry.get('random_categorical')(self, num_sample, seed, dtype)
|
|
3155
3125
|
|
|
3156
|
-
|
|
3157
|
-
|
|
3126
|
+
def masked_select(self, mask):
|
|
3127
|
+
"""
|
|
3128
|
+
For details, please refer to :func:`mindspore.ops.masked_select`.
|
|
3129
|
+
"""
|
|
3130
|
+
self._init_check()
|
|
3131
|
+
return tensor_operator_registry.get('masked_select')(self, mask)
|
|
3158
3132
|
|
|
3159
|
-
|
|
3160
|
-
|
|
3161
|
-
|
|
3162
|
-
>>> x = Tensor([0., 9., 4., 0.], mindspore.float32)
|
|
3163
|
-
>>> output = x.multinomial(num_samples=2,seed=10)
|
|
3164
|
-
>>> print(output)
|
|
3165
|
-
[2 1]
|
|
3133
|
+
def gather_elements(self, dim, index):
|
|
3134
|
+
"""
|
|
3135
|
+
For details, please refer to :func:`mindspore.ops.gather_elements`.
|
|
3166
3136
|
"""
|
|
3167
3137
|
self._init_check()
|
|
3168
|
-
validator.
|
|
3169
|
-
|
|
3170
|
-
return tensor_operator_registry.get('multinomial')(seed, seed2)(self, num_samples)
|
|
3138
|
+
validator.check_value_type('index', index, (Tensor, Tensor_,), 'Tensor.gather_elements')
|
|
3139
|
+
return tensor_operator_registry.get('gather_elements')(self, dim, index)
|
|
3171
3140
|
|
|
3172
|
-
def
|
|
3173
|
-
|
|
3174
|
-
|
|
3175
|
-
|
|
3141
|
+
def nonzero(self):
|
|
3142
|
+
"""
|
|
3143
|
+
For details, please refer to :func:`mindspore.ops.nonzero`.
|
|
3144
|
+
"""
|
|
3145
|
+
self._init_check()
|
|
3146
|
+
return tensor_operator_registry.get('nonzero')(self)
|
|
3176
3147
|
|
|
3177
|
-
|
|
3178
|
-
|
|
3179
|
-
|
|
3180
|
-
|
|
3181
|
-
|
|
3182
|
-
|
|
3183
|
-
|
|
3184
|
-
Raises:
|
|
3185
|
-
TypeError: If dtype of the input_tensor is not int or float.
|
|
3186
|
-
|
|
3187
|
-
Supported Platforms:
|
|
3188
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3189
|
-
|
|
3190
|
-
Examples:
|
|
3191
|
-
>>> import mindspore
|
|
3192
|
-
>>> import numpy as np
|
|
3193
|
-
>>> from mindspore import Tensor
|
|
3194
|
-
>>> input_x = Tensor(np.array([[1, 2, 3, 9], [1, 2, 3, 9]]), mindspore.int8)
|
|
3195
|
-
>>> output = input_x.rand_like(seed = 0)
|
|
3196
|
-
>>> print(output)
|
|
3197
|
-
[[0.5488135 0.71518937 0.60276338 0.54488318]
|
|
3198
|
-
[0.4236548 0.64589411 0.43758721 0.891773 ]]
|
|
3199
|
-
>>> input_p = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
|
|
3200
|
-
>>> output = input_p.rand_like(seed = 0)
|
|
3201
|
-
>>> print(output)
|
|
3202
|
-
[0.5488135 0.71518937 0.60276338]
|
|
3203
|
-
"""
|
|
3204
|
-
input_tensor = self
|
|
3205
|
-
input_tensor = np.array(input_tensor)
|
|
3206
|
-
shape_ = input_tensor.shape
|
|
3207
|
-
input_tensor = input_tensor.reshape(-1)
|
|
3208
|
-
x = len(input_tensor)
|
|
3209
|
-
np.random.seed(seed)
|
|
3210
|
-
return Tensor(np.array([np.random.rand(1) for i in range(x)]).reshape(shape_))
|
|
3211
|
-
|
|
3212
|
-
def randint_like(self, high, low=0, seed=None):
|
|
3213
|
-
r"""
|
|
3214
|
-
Returns a tensor with the same size as the input tensor,
|
|
3215
|
-
and the numerical value is a random number on the interval [low, high],
|
|
3216
|
-
if only one int type data is entered, the default value is high,
|
|
3217
|
-
if two integers are entered, they are low and high respectively.
|
|
3218
|
-
|
|
3219
|
-
Args:
|
|
3220
|
-
input_tensor (Union[Tensor, int, float]): the size of input will determine size of the output tensor.
|
|
3221
|
-
low (int, optional) – Lowest integer to be drawn from the distribution. Default: 0.
|
|
3222
|
-
high (int) – One above the highest integer to be drawn from the distribution.
|
|
3223
|
-
seed (int, optional): set the random seed (0 to 2**32).
|
|
3224
|
-
|
|
3225
|
-
Returns:
|
|
3226
|
-
out (Union[Tensor, int]), with the same shape as input_tensor.
|
|
3227
|
-
|
|
3228
|
-
Raises:
|
|
3229
|
-
TypeError: If dtype of the input_tensor is not int or float.
|
|
3230
|
-
|
|
3231
|
-
Supported Platforms:
|
|
3232
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3233
|
-
|
|
3234
|
-
Examples:
|
|
3235
|
-
>>> import mindspore
|
|
3236
|
-
>>> import numpy as np
|
|
3237
|
-
>>> from mindspore import Tensor
|
|
3238
|
-
>>> input_x = Tensor(np.array([1., 2., 3., 4., 5.]), mindspore.float32)
|
|
3239
|
-
>>> output = input_x.randint_like(20, seed = 0)
|
|
3240
|
-
>>> print(output)
|
|
3241
|
-
[12 15 0 3 3]
|
|
3242
|
-
>>> output = input_x.randint_like(20, 100, seed = 0)
|
|
3243
|
-
>>> print(output)
|
|
3244
|
-
[64 67 84 87 87]
|
|
3245
|
-
"""
|
|
3246
|
-
input_tensor = self
|
|
3247
|
-
input_tensor = np.array(input_tensor)
|
|
3248
|
-
shape_ = input_tensor.shape
|
|
3249
|
-
input_tensor = input_tensor.reshape(-1)
|
|
3250
|
-
if low > high:
|
|
3251
|
-
high, low = low, high
|
|
3252
|
-
x = len(input_tensor)
|
|
3253
|
-
np.random.seed(seed)
|
|
3254
|
-
return Tensor(np.array([np.random.randint(low, high) for i in range(x)]).reshape(shape_))
|
|
3255
|
-
|
|
3256
|
-
def randn_like(self, seed=None):
|
|
3257
|
-
r"""
|
|
3258
|
-
Returns a tensor with the same size as input that is filled with random
|
|
3259
|
-
numbers from a normal distribution with mean 0 and variance 1.
|
|
3260
|
-
|
|
3261
|
-
Args:
|
|
3262
|
-
input_tensor (Union[Tensor, int, float]): the size of input will determine size of the output tensor.
|
|
3263
|
-
seed (int, optional): set the random seed (0 to 2**32).
|
|
3264
|
-
|
|
3265
|
-
Returns:
|
|
3266
|
-
out (Union[Tensor, int]), with the same shape as input_tensor.
|
|
3267
|
-
|
|
3268
|
-
Raises:
|
|
3269
|
-
TypeError: If dtype of the input_tensor is not int or float.
|
|
3270
|
-
|
|
3271
|
-
Supported Platforms:
|
|
3272
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3273
|
-
|
|
3274
|
-
Examples:
|
|
3275
|
-
>>> import mindspore
|
|
3276
|
-
>>> import numpy as np
|
|
3277
|
-
>>> from mindspore import Tensor
|
|
3278
|
-
>>> input_x = Tensor(np.array([1., 2., 3., 4., 5.]), mindspore.float32)
|
|
3279
|
-
>>> output = input_x.randn_like(seed = 0)
|
|
3280
|
-
>>> print(output)
|
|
3281
|
-
[1.7640524 0.4001572 0.978738 2.2408931 1.867558 ]
|
|
3282
|
-
>>> input_p = Tensor(np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]), mindspore.int32)
|
|
3283
|
-
>>> output = input_p.randn_like(seed = 0)
|
|
3284
|
-
>>> print(output)
|
|
3285
|
-
[[ 1.7640524 0.4001572 0.978738 2.2408931 1.867558 ]
|
|
3286
|
-
[-0.9772779 0.95008844 -0.1513572 -0.10321885 0.41059852]]
|
|
3287
|
-
"""
|
|
3288
|
-
input_tensor = np.array(self)
|
|
3289
|
-
shape_ = input_tensor.shape
|
|
3290
|
-
input_tensor = input_tensor.reshape(-1)
|
|
3291
|
-
x = len(input_tensor)
|
|
3292
|
-
np.random.seed(seed)
|
|
3293
|
-
return Tensor([np.random.randn() for i in range(x)]).reshape(shape_)
|
|
3294
|
-
|
|
3295
|
-
def as_strided(self, shape=None, strides=None, subok=False, writeable=True):
|
|
3296
|
-
r"""
|
|
3297
|
-
as_strided(input, size, stride, storage_offset=0) -> Tensor
|
|
3298
|
-
Create a view of an existing `mindspore.Tensor` :attr:`x` with specified
|
|
3299
|
-
:attr:`shape`, :attr:`stride` and :attr:`subok`.
|
|
3300
|
-
|
|
3301
|
-
Args:
|
|
3302
|
-
x (Tensor): the input tensor.
|
|
3303
|
-
shape (tuple or ints): the shape of the output tensor
|
|
3304
|
-
stride (tuple or ints): the stride of the output tensor
|
|
3305
|
-
subok (int, optional): the offset in the underlying storage of the output tensor
|
|
3306
|
-
|
|
3307
|
-
Returns:
|
|
3308
|
-
Tensor viewed by strides and subok.
|
|
3309
|
-
|
|
3310
|
-
Supported Platforms:
|
|
3311
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3312
|
-
|
|
3313
|
-
Examples:
|
|
3314
|
-
>>> import numpy as np
|
|
3315
|
-
>>> from mindspore import Tensor
|
|
3316
|
-
>>> X = np.arange(9, dtype=np.int32).reshape(3,3)
|
|
3317
|
-
>>> output = Tensor(X).as_strided((2, 2), (1, 1))
|
|
3318
|
-
>>> print(output)
|
|
3319
|
-
[[0 1]
|
|
3320
|
-
[1 2]]
|
|
3321
|
-
"""
|
|
3322
|
-
dtype_ = self.dtype
|
|
3323
|
-
x = self.asnumpy()
|
|
3324
|
-
n = x.strides[1]
|
|
3325
|
-
strides = tuple(np.array(strides) * n)
|
|
3326
|
-
return Tensor(np.lib.stride_tricks.as_strided(x, shape, strides, subok, writeable), dtype=dtype_)
|
|
3327
|
-
|
|
3328
|
-
def randperm(self, max_length=1, pad=-1):
|
|
3329
|
-
r"""
|
|
3330
|
-
Generates n random samples from 0 to n-1 without repeating. If `max_length` > n,
|
|
3331
|
-
the last `max_length-n` elements will be filled with `pad`.
|
|
3332
|
-
|
|
3333
|
-
Args:
|
|
3334
|
-
max_length (int): Number of items expected to get and the number must be greater than 0. Default: 1.
|
|
3335
|
-
pad (int): The pad value to be filled. Default: -1.
|
|
3336
|
-
dtype (mindspore.dtype): The type of output. Default: mindspore.int32.
|
|
3337
|
-
|
|
3338
|
-
Inputs:
|
|
3339
|
-
- **n** (Tensor[int32]) - The input tensor with shape: (1,) and the number must be in [0, `max_length`].
|
|
3340
|
-
|
|
3341
|
-
Outputs:
|
|
3342
|
-
- **output** (Tensor) - The output Tensor with shape: (`max_length`,) and type: `dtype`.
|
|
3343
|
-
|
|
3344
|
-
Raises:
|
|
3345
|
-
TypeError: If neither `max_length` nor `pad` is an int.
|
|
3346
|
-
TypeError: If `self` has non-Int elements.
|
|
3347
|
-
TypeError: If `self` has negative elements.
|
|
3348
|
-
|
|
3349
|
-
Supported Platforms:
|
|
3350
|
-
``Ascend`` ``GPU``
|
|
3351
|
-
|
|
3352
|
-
Examples:
|
|
3353
|
-
>>> # The result of every execution is different because this operator will generate n random samples.
|
|
3354
|
-
>>> from mindspore import Tensor
|
|
3355
|
-
>>> import mindspore
|
|
3356
|
-
>>> n = Tensor([20], dtype=mindspore.int32)
|
|
3357
|
-
>>> output = n.randperm(max_length=30, pad=-1)
|
|
3358
|
-
>>> print(output)
|
|
3359
|
-
[15 6 11 19 14 16 9 5 13 18 4 10 8 0 17 2 1 12 3 7
|
|
3360
|
-
-1 -1 -1 -1 -1 -1 -1 -1 -1 -1]
|
|
3361
|
-
"""
|
|
3362
|
-
self._init_check()
|
|
3363
|
-
return tensor_operator_registry.get('randperm')(max_length, pad)(self)
|
|
3364
|
-
|
|
3365
|
-
def random_categorical(self, num_sample, seed=0, dtype=mstype.int64):
|
|
3366
|
-
r"""
|
|
3367
|
-
For details, please refer to :func:`mindspore.ops.random_categorical`.
|
|
3368
|
-
"""
|
|
3369
|
-
self._init_check()
|
|
3370
|
-
validator.check_is_int(num_sample, 'num_sample')
|
|
3371
|
-
validator.check_is_int(seed, 'seed')
|
|
3372
|
-
return tensor_operator_registry.get('random_categorical')(self, num_sample, seed, dtype)
|
|
3373
|
-
|
|
3374
|
-
def masked_select(self, mask):
|
|
3375
|
-
"""
|
|
3376
|
-
For details, please refer to :func:`mindspore.ops.masked_select`.
|
|
3377
|
-
"""
|
|
3378
|
-
self._init_check()
|
|
3379
|
-
return tensor_operator_registry.get('masked_select')(self, mask)
|
|
3380
|
-
|
|
3381
|
-
def gather_elements(self, dim, index):
|
|
3382
|
-
"""
|
|
3383
|
-
For details, please refer to :func:`mindspore.ops.gather_elements`.
|
|
3384
|
-
"""
|
|
3385
|
-
self._init_check()
|
|
3386
|
-
validator.check_value_type('index', index, (Tensor_,), 'Tensor.gather_elements')
|
|
3387
|
-
return tensor_operator_registry.get('gather_elements')(self, dim, index)
|
|
3388
|
-
|
|
3389
|
-
def nonzero(self):
|
|
3390
|
-
"""
|
|
3391
|
-
For details, please refer to :func:`mindspore.ops.nonzero`.
|
|
3392
|
-
"""
|
|
3393
|
-
self._init_check()
|
|
3394
|
-
return tensor_operator_registry.get('nonzero')(self)
|
|
3395
|
-
|
|
3396
|
-
def svd(self, full_matrices=False, compute_uv=True):
|
|
3397
|
-
"""
|
|
3398
|
-
For details, please refer to :func:`mindspore.ops.svd`.
|
|
3399
|
-
"""
|
|
3400
|
-
svd_op = tensor_operator_registry.get("svd")
|
|
3401
|
-
if compute_uv:
|
|
3402
|
-
return svd_op(full_matrices, compute_uv)(self)
|
|
3148
|
+
def svd(self, full_matrices=False, compute_uv=True):
|
|
3149
|
+
"""
|
|
3150
|
+
For details, please refer to :func:`mindspore.ops.svd`.
|
|
3151
|
+
"""
|
|
3152
|
+
svd_op = tensor_operator_registry.get("svd")
|
|
3153
|
+
if compute_uv:
|
|
3154
|
+
return svd_op(full_matrices, compute_uv)(self)
|
|
3403
3155
|
|
|
3404
3156
|
s, _, _ = svd_op(full_matrices, compute_uv)(self)
|
|
3405
3157
|
return s
|
|
@@ -3418,7 +3170,6 @@ class Tensor(Tensor_):
|
|
|
3418
3170
|
self._init_check()
|
|
3419
3171
|
return tensor_operator_registry.get('heaviside')(self, values)
|
|
3420
3172
|
|
|
3421
|
-
|
|
3422
3173
|
def hypot(self, other):
|
|
3423
3174
|
r"""
|
|
3424
3175
|
For details, please refer to :func:`mindspore.ops.hypot`.
|
|
@@ -3431,7 +3182,21 @@ class Tensor(Tensor_):
|
|
|
3431
3182
|
For details, please refer to :func:`mindspore.ops.soft_shrink`.
|
|
3432
3183
|
"""
|
|
3433
3184
|
self._init_check()
|
|
3434
|
-
return tensor_operator_registry.get('soft_shrink')(lambd)
|
|
3185
|
+
return tensor_operator_registry.get('soft_shrink')(self, lambd)
|
|
3186
|
+
|
|
3187
|
+
def matrix_determinant(self):
|
|
3188
|
+
r"""
|
|
3189
|
+
For details, please refer to :func:`mindspore.ops.matrix_determinant`.
|
|
3190
|
+
"""
|
|
3191
|
+
self._init_check()
|
|
3192
|
+
return tensor_operator_registry.get('matrix_determinant')(self)
|
|
3193
|
+
|
|
3194
|
+
def log_matrix_determinant(self):
|
|
3195
|
+
r"""
|
|
3196
|
+
For details, please refer to :func:`mindspore.ops.log_matrix_determinant`.
|
|
3197
|
+
"""
|
|
3198
|
+
self._init_check()
|
|
3199
|
+
return tensor_operator_registry.get('log_matrix_determinant')(self)
|
|
3435
3200
|
|
|
3436
3201
|
def to_coo(self):
|
|
3437
3202
|
"""
|
|
@@ -3556,6 +3321,13 @@ class Tensor(Tensor_):
|
|
|
3556
3321
|
self._init_check()
|
|
3557
3322
|
return tensor_operator_registry.get('diag')()(self)
|
|
3558
3323
|
|
|
3324
|
+
def diagflat(self, offset=0):
|
|
3325
|
+
r"""
|
|
3326
|
+
For details, please refer to :func:`mindspore.ops.diagflat`.
|
|
3327
|
+
"""
|
|
3328
|
+
self._init_check()
|
|
3329
|
+
return tensor_operator_registry.get('diagflat')(self, offset)
|
|
3330
|
+
|
|
3559
3331
|
def xdivy(self, y):
|
|
3560
3332
|
r"""
|
|
3561
3333
|
For details, please refer to :func:`mindspore.ops.xdivy`.
|
|
@@ -3563,11 +3335,40 @@ class Tensor(Tensor_):
|
|
|
3563
3335
|
self._init_check()
|
|
3564
3336
|
return tensor_operator_registry.get("xdivy")()(self, y)
|
|
3565
3337
|
|
|
3566
|
-
def split(self, axis=0
|
|
3338
|
+
def split(self, split_size_or_sections, axis=0):
|
|
3567
3339
|
"""
|
|
3568
3340
|
For details, please refer to :func:`mindspore.ops.split`.
|
|
3569
3341
|
"""
|
|
3570
|
-
return tensor_operator_registry.get('split')(
|
|
3342
|
+
return tensor_operator_registry.get('split')(self, split_size_or_sections, axis)
|
|
3343
|
+
|
|
3344
|
+
def tensor_split(self, indices_or_sections, axis=0):
|
|
3345
|
+
"""
|
|
3346
|
+
For details, please refer to :func:`mindspore.ops.tensor_split`.
|
|
3347
|
+
"""
|
|
3348
|
+
self._init_check()
|
|
3349
|
+
return tensor_operator_registry.get('tensor_split')(self, indices_or_sections, axis)
|
|
3350
|
+
|
|
3351
|
+
def vsplit(self, indices_or_sections):
|
|
3352
|
+
"""
|
|
3353
|
+
For details, please refer to :func:`mindspore.ops.vsplit`.
|
|
3354
|
+
"""
|
|
3355
|
+
|
|
3356
|
+
self._init_check()
|
|
3357
|
+
return tensor_operator_registry.get('vsplit')(self, indices_or_sections)
|
|
3358
|
+
|
|
3359
|
+
def hsplit(self, indices_or_sections):
|
|
3360
|
+
"""
|
|
3361
|
+
For details, please refer to :func:`mindspore.ops.hsplit`.
|
|
3362
|
+
"""
|
|
3363
|
+
self._init_check()
|
|
3364
|
+
return tensor_operator_registry.get('hsplit')(self, indices_or_sections)
|
|
3365
|
+
|
|
3366
|
+
def dsplit(self, indices_or_sections):
|
|
3367
|
+
"""
|
|
3368
|
+
For details, please refer to :func:`mindspore.ops.dsplit`.
|
|
3369
|
+
"""
|
|
3370
|
+
self._init_check()
|
|
3371
|
+
return tensor_operator_registry.get('dsplit')(self, indices_or_sections)
|
|
3571
3372
|
|
|
3572
3373
|
def xlogy(self, y):
|
|
3573
3374
|
r"""
|
|
@@ -3593,9 +3394,16 @@ class Tensor(Tensor_):
|
|
|
3593
3394
|
"""
|
|
3594
3395
|
return tensor_operator_registry.get('tile')()(self, multiples)
|
|
3595
3396
|
|
|
3397
|
+
def topk(self, k, dim=None, largest=True, sorted=True):
|
|
3398
|
+
r"""
|
|
3399
|
+
For details, please refer to :func:`mindspore.ops.topk`.
|
|
3400
|
+
"""
|
|
3401
|
+
self._init_check()
|
|
3402
|
+
return tensor_operator_registry.get("topk")(self, k, dim, largest, sorted)
|
|
3403
|
+
|
|
3596
3404
|
def top_k(self, k, sorted=True):
|
|
3597
3405
|
r"""
|
|
3598
|
-
|
|
3406
|
+
`Tensor.top_k` is deprecated, please use `Tensor.topk` instead.
|
|
3599
3407
|
"""
|
|
3600
3408
|
self._init_check()
|
|
3601
3409
|
validator.check_is_int(k, 'k')
|
|
@@ -3603,7 +3411,7 @@ class Tensor(Tensor_):
|
|
|
3603
3411
|
return tensor_operator_registry.get("top_k")(sorted)(self, k)
|
|
3604
3412
|
|
|
3605
3413
|
def sigmoid(self):
|
|
3606
|
-
"""
|
|
3414
|
+
r"""
|
|
3607
3415
|
For details, please refer to :func:`mindspore.ops.sigmoid`.
|
|
3608
3416
|
"""
|
|
3609
3417
|
return tensor_operator_registry.get("sigmoid")()(self)
|
|
@@ -3618,47 +3426,10 @@ class Tensor(Tensor_):
|
|
|
3618
3426
|
|
|
3619
3427
|
def addmv(self, mat, vec, beta=1, alpha=1):
|
|
3620
3428
|
r"""
|
|
3621
|
-
|
|
3622
|
-
|
|
3623
|
-
If `mat` is a :math:`(N, M)` tensor, `vec` is a 1-D tensor of size :math:`M`, then `x` must be broadcastable
|
|
3624
|
-
with a 1-D tensor of size :math:`N` and `out` will be 1-D tensor of size :math:`N`.
|
|
3625
|
-
|
|
3626
|
-
The optional values `beta` and `alpha` are the matrix-vector product between `mat` and `vec` and the scale
|
|
3627
|
-
factor for the added tensor `x` respectively. If `beta` is 0, then `x` will be ignored.
|
|
3628
|
-
|
|
3629
|
-
.. math::
|
|
3630
|
-
output = β x + α (mat @ vec)
|
|
3631
|
-
|
|
3632
|
-
Args:
|
|
3633
|
-
mat (Tensor): The first tensor to be multiplied. The shape of the tensor is :math:`(N, M)`.
|
|
3634
|
-
vec (Tensor): The second tensor to be multiplied. The shape of the tensor is :math:`(M,)`.
|
|
3635
|
-
beta (scalar[int, float, bool], optional): Multiplier for `x` (β). The `beta` must be int or
|
|
3636
|
-
float or bool, Default: 1.
|
|
3637
|
-
alpha (scalar[int, float, bool], optional): Multiplier for `mat` @ `vec` (α). The `alpha` must
|
|
3638
|
-
be int or float or bool, Default: 1.
|
|
3639
|
-
|
|
3640
|
-
Returns:
|
|
3641
|
-
Tensor, the shape of the output tensor is :math:`(N,)`, has the same dtype as `x`.
|
|
3642
|
-
|
|
3643
|
-
Raises:
|
|
3644
|
-
TypeError: If `mat`, `vec`, `x` is not a Tensor.
|
|
3645
|
-
TypeError: If input tensor and `x`, `mat`, 'vec' are not the same dtype.
|
|
3646
|
-
ValueError: If `mat` is not a 2-D Tensor.
|
|
3647
|
-
If `x`, `vec` is not a 1-D Tensor.
|
|
3648
|
-
|
|
3649
|
-
Supported Platforms:
|
|
3650
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3651
|
-
|
|
3652
|
-
Examples:
|
|
3653
|
-
>>> x = Tensor(np.array([2., 3.]).astype(np.float32))
|
|
3654
|
-
>>> mat = Tensor(np.array([[2., 5., 3.], [4., 2., 2.]]).astype(np.float32))
|
|
3655
|
-
>>> vec = Tensor(np.array([3., 2., 4.]).astype(np.float32))
|
|
3656
|
-
>>> output = x.addmv(mat, vec)
|
|
3657
|
-
>>> print(output)
|
|
3658
|
-
[30. 27.]
|
|
3429
|
+
For details, please refer to :func:`mindspore.ops.addmv`.
|
|
3659
3430
|
"""
|
|
3660
3431
|
self._init_check()
|
|
3661
|
-
return tensor_operator_registry.get('addmv')(self, mat, vec, beta=
|
|
3432
|
+
return tensor_operator_registry.get('addmv')(self, mat, vec, beta=beta, alpha=alpha)
|
|
3662
3433
|
|
|
3663
3434
|
def asinh(self):
|
|
3664
3435
|
r"""
|
|
@@ -3671,7 +3442,8 @@ class Tensor(Tensor_):
|
|
|
3671
3442
|
r"""
|
|
3672
3443
|
Alias for :func:`mindspore.Tensor.asinh`.
|
|
3673
3444
|
"""
|
|
3674
|
-
|
|
3445
|
+
self._init_check()
|
|
3446
|
+
return tensor_operator_registry.get('arcsinh')(self)
|
|
3675
3447
|
|
|
3676
3448
|
def atan(self):
|
|
3677
3449
|
r"""
|
|
@@ -3691,7 +3463,8 @@ class Tensor(Tensor_):
|
|
|
3691
3463
|
r"""
|
|
3692
3464
|
Alias for :func:`mindspore.Tensor.atanh`.
|
|
3693
3465
|
"""
|
|
3694
|
-
|
|
3466
|
+
self._init_check()
|
|
3467
|
+
return tensor_operator_registry.get('arctanh')(self)
|
|
3695
3468
|
|
|
3696
3469
|
def bmm(self, mat2):
|
|
3697
3470
|
r"""
|
|
@@ -3705,7 +3478,7 @@ class Tensor(Tensor_):
|
|
|
3705
3478
|
Performs tensor dtype conversion.
|
|
3706
3479
|
|
|
3707
3480
|
Args:
|
|
3708
|
-
dtype (
|
|
3481
|
+
dtype (Number): The valid data type of the output tensor. Only constant value is allowed.
|
|
3709
3482
|
|
|
3710
3483
|
Returns:
|
|
3711
3484
|
Tensor, converted to the specified `dtype`.
|
|
@@ -3937,19 +3710,19 @@ class Tensor(Tensor_):
|
|
|
3937
3710
|
self._init_check()
|
|
3938
3711
|
return tensor_operator_registry.get('multiply')(self, value)
|
|
3939
3712
|
|
|
3940
|
-
def div(self,
|
|
3713
|
+
def div(self, value, *, rounding_mode=None):
|
|
3941
3714
|
r"""
|
|
3942
3715
|
For details, please refer to :func:`mindspore.ops.div`.
|
|
3943
3716
|
"""
|
|
3944
3717
|
self._init_check()
|
|
3945
|
-
return tensor_operator_registry.get('div')(self,
|
|
3718
|
+
return tensor_operator_registry.get('div')(self, value, rounding_mode=rounding_mode)
|
|
3946
3719
|
|
|
3947
|
-
def divide(self,
|
|
3720
|
+
def divide(self, value, *, rounding_mode=None):
|
|
3948
3721
|
r"""
|
|
3949
|
-
|
|
3722
|
+
Alias for :func:`mindspore.Tensor.div`.
|
|
3950
3723
|
"""
|
|
3951
3724
|
self._init_check()
|
|
3952
|
-
return tensor_operator_registry.get('div')(self,
|
|
3725
|
+
return tensor_operator_registry.get('div')(self, value, rounding_mode=rounding_mode)
|
|
3953
3726
|
|
|
3954
3727
|
def equal(self, other):
|
|
3955
3728
|
r"""
|
|
@@ -4078,6 +3851,12 @@ class Tensor(Tensor_):
|
|
|
4078
3851
|
self._init_check()
|
|
4079
3852
|
return tensor_operator_registry.get('less')(self, other)
|
|
4080
3853
|
|
|
3854
|
+
def lt(self, other):
|
|
3855
|
+
"""
|
|
3856
|
+
Alias for :func:`mindspore.Tensor.less`.
|
|
3857
|
+
"""
|
|
3858
|
+
return self.less(other)
|
|
3859
|
+
|
|
4081
3860
|
def logical_and(self, other):
|
|
4082
3861
|
r"""
|
|
4083
3862
|
For details, please refer to :func:`mindspore.ops.logical_and`.
|
|
@@ -4108,354 +3887,256 @@ class Tensor(Tensor_):
|
|
|
4108
3887
|
|
|
4109
3888
|
def lstsq(self, A):
|
|
4110
3889
|
r"""
|
|
4111
|
-
|
|
4112
|
-
matrix `x` of size :math:`(m \times n)` and matrix `a` of size :math:`(m \times k)`.
|
|
4113
|
-
|
|
4114
|
-
If :math:`m \geq n`, `lstsq` solves the least-squares problem:
|
|
4115
|
-
|
|
4116
|
-
.. math::
|
|
4117
|
-
|
|
4118
|
-
\begin{array}{ll}
|
|
4119
|
-
\min_y & \|xy-a\|_2.
|
|
4120
|
-
\end{array}
|
|
4121
|
-
|
|
4122
|
-
If :math:`m < n`, `lstsq` solves the least-norm problem:
|
|
4123
|
-
|
|
4124
|
-
.. math::
|
|
4125
|
-
|
|
4126
|
-
\begin{array}{llll}
|
|
4127
|
-
\min_y & \|y\|_2 & \text{subject to} & xy = a.
|
|
4128
|
-
\end{array}
|
|
4129
|
-
|
|
4130
|
-
Args:
|
|
4131
|
-
A (Tensor) - The m by k matrix equivalent to `a` in above.
|
|
4132
|
-
The input tensor whose data type is float16, float32 or float64.
|
|
4133
|
-
|
|
4134
|
-
Returns:
|
|
4135
|
-
Tensor, the least squares or minimum norm problems solution, which has shape :math:`(n \times k)`.
|
|
4136
|
-
The data type is the same with `input`.
|
|
4137
|
-
|
|
4138
|
-
Raises:
|
|
4139
|
-
TypeError: If `A` is not a Tensor.
|
|
4140
|
-
TypeError: If dtype of input tensor or `A` is not one of: float16, float32, float64.
|
|
4141
|
-
TypeError: If the dtypes of input tensor and `A` are not the same.
|
|
4142
|
-
ValueError: If the dimension of input tensor is not equal to 2.
|
|
4143
|
-
ValueError: If the dimension of `A` is not equal to 2 or 1.
|
|
4144
|
-
ValueError: If the length of input_dims[0] is not equal to the length of A_dims[0].
|
|
4145
|
-
|
|
4146
|
-
Supported Platforms:
|
|
4147
|
-
``CPU``
|
|
4148
|
-
|
|
4149
|
-
Examples:
|
|
4150
|
-
>>> x = Tensor(np.array([[2,1,5],[3,5,1],[1,1,1]]),mindspore.float32)
|
|
4151
|
-
>>> a = Tensor(np.array([[10,5],[15,8],[7,4]]),mindspore.float32)
|
|
4152
|
-
>>> output = x.lstsq(a)
|
|
4153
|
-
>>> print(output)
|
|
4154
|
-
[[17.000002 11.000002 ]
|
|
4155
|
-
[-6.5000005 -4.500001 ]
|
|
4156
|
-
[-3.500002 -2.5000017]]
|
|
3890
|
+
For details, please refer to :func:`mindspore.ops.lstsq`.
|
|
4157
3891
|
"""
|
|
4158
3892
|
self._init_check()
|
|
4159
3893
|
return tensor_operator_registry.get('lstsq')(self, A)
|
|
4160
3894
|
|
|
4161
|
-
|
|
3895
|
+
@property
|
|
3896
|
+
def mH(self):
|
|
4162
3897
|
r"""
|
|
4163
|
-
|
|
4164
|
-
|
|
4165
|
-
|
|
4166
|
-
|
|
4167
|
-
.. math::
|
|
4168
|
-
|
|
4169
|
-
\log (\Gamma_{p}(a))=C+\sum_{i=1}^{p} \log (\Gamma(a-\frac{i-1}{2}))
|
|
4170
|
-
|
|
4171
|
-
where :math:`C = \log(\pi) \times \frac{p(p-1)}{4}` and :math:`\Gamma(\cdot)` is the Gamma function.
|
|
4172
|
-
|
|
4173
|
-
Args:
|
|
4174
|
-
p (int): The number of dimensions. And the value of `p` must be greater than or equal to 1.
|
|
4175
|
-
|
|
4176
|
-
Returns:
|
|
4177
|
-
Tensor, has the same shape and type as input tensor.
|
|
4178
|
-
|
|
4179
|
-
Raises:
|
|
4180
|
-
TypeError: If dtype of input tensor is neither float32 nor float64.
|
|
4181
|
-
TypeError: If `p` is not an int.
|
|
4182
|
-
ValueError: If `p` is not greater than or equal to 1.
|
|
4183
|
-
ValueError: If all elements of input tensor are not greater than (p-1)/2.
|
|
3898
|
+
Accessing this property is equivalent to Calling self.adjoint().
|
|
3899
|
+
For details, please refer to :func:`mindspore.ops.adjoint`.
|
|
3900
|
+
"""
|
|
3901
|
+
return self.adjoint()
|
|
4184
3902
|
|
|
4185
|
-
|
|
4186
|
-
|
|
3903
|
+
@property
|
|
3904
|
+
def mT(self):
|
|
3905
|
+
r"""
|
|
3906
|
+
Returns the Tensor that exchanges the last two dimensions.
|
|
3907
|
+
Accessing the attribute, x.mT, is equal to calling the method, x.swapaxes(-2, -1).
|
|
3908
|
+
For details, please refer to :func:`mindspore.Tensor.swapaxes`.
|
|
3909
|
+
"""
|
|
3910
|
+
return self.swapaxes(-2, -1)
|
|
4187
3911
|
|
|
4188
|
-
|
|
4189
|
-
|
|
4190
|
-
|
|
4191
|
-
>>> print(y)
|
|
4192
|
-
[[2.694925 5.402975 9.140645]
|
|
4193
|
-
[5.402975 1.596312 13.64045]]
|
|
3912
|
+
def mvlgamma(self, p):
|
|
3913
|
+
r"""
|
|
3914
|
+
For details, please refer to :func:`mindspore.ops.mvlgamma`.
|
|
4194
3915
|
"""
|
|
4195
3916
|
self._init_check()
|
|
4196
3917
|
return tensor_operator_registry.get('mvlgamma')(self, p)
|
|
4197
3918
|
|
|
4198
3919
|
def matmul(self, tensor2):
|
|
4199
3920
|
r"""
|
|
4200
|
-
|
|
4201
|
-
|
|
4202
|
-
Note:
|
|
4203
|
-
Numpy arguments `out`, `casting`, `order`, `subok`, `signature`, and `extobj` are
|
|
4204
|
-
not supported.
|
|
4205
|
-
On CPU, the supported dtypes are np.float16 and np.float32.
|
|
4206
|
-
On GPU, the supported dtypes are np.float16 and np.float32.
|
|
4207
|
-
|
|
4208
|
-
Args:
|
|
4209
|
-
tensor2 (Tensor): Second input tensor, scalar not allowed.
|
|
4210
|
-
The last dimension of input tensor must be the same size as the second last dimension of `tensor2`.
|
|
4211
|
-
And the shape of input tensor and tensor2 could be broadcast.
|
|
4212
|
-
|
|
4213
|
-
Returns:
|
|
4214
|
-
Tensor or scalar, the matrix product of the inputs. This is a scalar only
|
|
4215
|
-
when both input tensor, `tensor2` are 1-d vectors.
|
|
4216
|
-
|
|
4217
|
-
Raises:
|
|
4218
|
-
ValueError: If the last dimension of input tensor is not the same size as the
|
|
4219
|
-
second-to-last dimension of `tensor2`, or if a scalar value is passed in.
|
|
4220
|
-
ValueError: If the shape of input tensor and `tensor2` could not broadcast together.
|
|
4221
|
-
|
|
4222
|
-
Supported Platforms:
|
|
4223
|
-
``Ascend`` ``CPU`` ``GPU``
|
|
4224
|
-
|
|
4225
|
-
Examples:
|
|
4226
|
-
>>> x = Tensor(np.arange(2*3*4).reshape(2, 3, 4), mindspore.float32)
|
|
4227
|
-
>>> y = Tensor(np.arange(4*5).reshape(4, 5), mindspore.float32)
|
|
4228
|
-
>>> output = x.matmul(y)
|
|
4229
|
-
>>> print(output)
|
|
4230
|
-
[[[ 70. 76. 82. 88. 94.]
|
|
4231
|
-
[ 190. 212. 234. 256. 278.]
|
|
4232
|
-
[ 310. 348. 386. 424. 462.]]
|
|
4233
|
-
[[ 430. 484. 538. 592. 646.]
|
|
4234
|
-
[ 550. 620. 690. 760. 830.]
|
|
4235
|
-
[ 670. 756. 842. 928. 1014.]]]
|
|
3921
|
+
For details, please refer to :func:`mindspore.ops.matmul`.
|
|
4236
3922
|
"""
|
|
4237
3923
|
self._init_check()
|
|
4238
3924
|
return tensor_operator_registry.get('matmul')(self, tensor2)
|
|
4239
3925
|
|
|
4240
|
-
def
|
|
3926
|
+
def inner(self, other):
|
|
4241
3927
|
r"""
|
|
4242
|
-
|
|
4243
|
-
|
|
4244
|
-
|
|
4245
|
-
|
|
4246
|
-
types consistent.
|
|
4247
|
-
- The inputs must be two tensors or one tensor and one scalar.
|
|
4248
|
-
- When the inputs are two tensors,
|
|
4249
|
-
dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
|
|
4250
|
-
- When the inputs are one tensor and one scalar,
|
|
4251
|
-
the scalar could only be a constant.
|
|
4252
|
-
- Broadcasting is supported.
|
|
4253
|
-
- If one of the elements being compared is a NaN, then that element is returned.
|
|
4254
|
-
|
|
4255
|
-
.. math::
|
|
4256
|
-
output_i = max(input_i, other_i)
|
|
4257
|
-
|
|
4258
|
-
Args:
|
|
4259
|
-
other (Union[Tensor, Number, bool]): The second input is a number or
|
|
4260
|
-
a bool when the first input is a tensor or a tensor whose data type is number or bool.
|
|
4261
|
-
|
|
4262
|
-
Returns:
|
|
4263
|
-
Tensor, the shape is the same as the one after broadcasting,
|
|
4264
|
-
and the data type is the one with higher precision or higher digits among the two inputs.
|
|
3928
|
+
For details, please refer to :func:`mindspore.ops.inner`.
|
|
3929
|
+
"""
|
|
3930
|
+
self._init_check()
|
|
3931
|
+
return tensor_operator_registry.get('inner')(self, other)
|
|
4265
3932
|
|
|
4266
|
-
|
|
4267
|
-
|
|
4268
|
-
|
|
3933
|
+
def multinomial(self, num_samples, replacement=True, seed=None):
|
|
3934
|
+
r"""
|
|
3935
|
+
For details, please refer to :func:`mindspore.ops.multinomial`.
|
|
3936
|
+
"""
|
|
3937
|
+
self._init_check()
|
|
3938
|
+
return tensor_operator_registry.get('multinomial')(self, num_samples, replacement, seed)
|
|
4269
3939
|
|
|
4270
|
-
|
|
4271
|
-
|
|
3940
|
+
def matrix_power(self, n):
|
|
3941
|
+
r"""
|
|
3942
|
+
For details, please refer to :func:`mindspore.ops.matrix_power`.
|
|
3943
|
+
"""
|
|
3944
|
+
self._init_check()
|
|
3945
|
+
return tensor_operator_registry.get('matrix_power')(self, n)
|
|
4272
3946
|
|
|
4273
|
-
|
|
4274
|
-
|
|
4275
|
-
|
|
4276
|
-
>>> output = x.maximum(y)
|
|
4277
|
-
>>> print(output)
|
|
4278
|
-
[4. 5. 6.]
|
|
3947
|
+
def maximum(self, other):
|
|
3948
|
+
r"""
|
|
3949
|
+
For details, please refer to :func:`mindspore.ops.maximum`.
|
|
4279
3950
|
"""
|
|
4280
3951
|
self._init_check()
|
|
4281
3952
|
return tensor_operator_registry.get('maximum')(self, other)
|
|
4282
3953
|
|
|
4283
|
-
def
|
|
3954
|
+
def mm(self, mat2):
|
|
4284
3955
|
r"""
|
|
4285
|
-
|
|
4286
|
-
|
|
4287
|
-
|
|
4288
|
-
|
|
4289
|
-
the data types consistent.
|
|
4290
|
-
- The inputs must be two tensors or one tensor and one scalar.
|
|
4291
|
-
- When the inputs are two tensors,
|
|
4292
|
-
dtypes of them cannot be bool at the same time, and the shapes of them can be broadcast.
|
|
4293
|
-
- When the inputs are one tensor and one scalar, the scalar could only be a constant.
|
|
4294
|
-
|
|
4295
|
-
Args:
|
|
4296
|
-
value (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
|
|
4297
|
-
the second input should be a number.Number or bool value, or a Tensor whose data type is number
|
|
4298
|
-
or bool\_. When the first input is Scalar, the second input must be a Tensor whose data type is
|
|
4299
|
-
number or bool\_.
|
|
4300
|
-
|
|
4301
|
-
Returns:
|
|
4302
|
-
Tensor, the shape is the same as the one after broadcasting,
|
|
4303
|
-
and the data type is the one with higher precision or higher digits among the two inputs.
|
|
4304
|
-
|
|
4305
|
-
Raises:
|
|
4306
|
-
TypeError: If input tensor and `value` is not one of the following: Tensor, number.Number, bool.
|
|
4307
|
-
ValueError: If input tensor and `value` are not the same shape.
|
|
3956
|
+
For details, please refer to :func:`mindspore.ops.mm`.
|
|
3957
|
+
"""
|
|
3958
|
+
self._init_check()
|
|
3959
|
+
return tensor_operator_registry.get('mm')(self, mat2)
|
|
4308
3960
|
|
|
4309
|
-
|
|
4310
|
-
|
|
3961
|
+
def msort(self):
|
|
3962
|
+
r"""
|
|
3963
|
+
For details, please refer to :func:`mindspore.ops.msort`.
|
|
3964
|
+
"""
|
|
3965
|
+
self._init_check()
|
|
3966
|
+
return tensor_operator_registry.get('msort')(self)
|
|
4311
3967
|
|
|
4312
|
-
|
|
4313
|
-
|
|
4314
|
-
|
|
4315
|
-
>>> output = x.mul(y)
|
|
4316
|
-
>>> print(output)
|
|
4317
|
-
[ 4. 10. 18.]
|
|
3968
|
+
def mul(self, value):
|
|
3969
|
+
r"""
|
|
3970
|
+
For details, please refer to :func:`mindspore.ops.mul`.
|
|
4318
3971
|
"""
|
|
4319
3972
|
self._init_check()
|
|
4320
3973
|
return tensor_operator_registry.get('mul')(self, value)
|
|
4321
3974
|
|
|
3975
|
+
def nan_to_num(self, nan=0.0, posinf=None, neginf=None):
|
|
3976
|
+
"""
|
|
3977
|
+
For details, please refer to :func:`mindspore.ops.nan_to_num`.
|
|
3978
|
+
"""
|
|
3979
|
+
return tensor_operator_registry.get('nan_to_num')(self, nan, posinf, neginf)
|
|
3980
|
+
|
|
4322
3981
|
def neg(self):
|
|
4323
3982
|
r"""
|
|
4324
|
-
|
|
4325
|
-
|
|
4326
|
-
.. math::
|
|
4327
|
-
|
|
4328
|
-
out_{i} = - x_{i}
|
|
4329
|
-
|
|
4330
|
-
Returns:
|
|
4331
|
-
Tensor, has the same shape and dtype as input.
|
|
4332
|
-
|
|
4333
|
-
Supported Platforms:
|
|
4334
|
-
``Ascend`` ``CPU`` ``GPU``
|
|
4335
|
-
|
|
4336
|
-
Examples:
|
|
4337
|
-
>>> x = Tensor(np.array([1, 2, -1, 2, 0, -3.5]), mindspore.float32)
|
|
4338
|
-
>>> output = x.neg()
|
|
4339
|
-
>>> print(output)
|
|
4340
|
-
[-1. -2. 1. -2. 0. 3.5]
|
|
3983
|
+
For details, please refer to :func:`mindspore.ops.neg`.
|
|
4341
3984
|
"""
|
|
4342
3985
|
self._init_check()
|
|
4343
3986
|
return tensor_operator_registry.get('neg')(self)
|
|
4344
3987
|
|
|
4345
3988
|
def ne(self, other):
|
|
4346
3989
|
r"""
|
|
4347
|
-
|
|
3990
|
+
For details, please refer to :func:`mindspore.ops.ne`.
|
|
3991
|
+
"""
|
|
3992
|
+
self._init_check()
|
|
3993
|
+
return tensor_operator_registry.get('ne')(self, other)
|
|
4348
3994
|
|
|
4349
|
-
|
|
4350
|
-
|
|
4351
|
-
|
|
4352
|
-
|
|
4353
|
-
|
|
4354
|
-
|
|
4355
|
-
|
|
3995
|
+
def not_equal(self, other):
|
|
3996
|
+
r"""
|
|
3997
|
+
For details, please refer to :func:`mindspore.ops.not_equal`.
|
|
3998
|
+
"""
|
|
3999
|
+
self._init_check()
|
|
4000
|
+
return tensor_operator_registry.get('not_equal')(self, other)
|
|
4001
|
+
|
|
4002
|
+
def new_zeros(self, size, *, dtype=None):
|
|
4003
|
+
r"""
|
|
4004
|
+
Return a tensor of `size` filled with zeros.
|
|
4356
4005
|
|
|
4357
4006
|
Args:
|
|
4358
|
-
|
|
4359
|
-
|
|
4007
|
+
size (Union[int, tuple, list]): An int, list or tuple of integers defining the output shape.
|
|
4008
|
+
|
|
4009
|
+
Keyword Args:
|
|
4010
|
+
dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned tensor has
|
|
4011
|
+
thesame dtype as `self`. Default: None.
|
|
4360
4012
|
|
|
4361
4013
|
Returns:
|
|
4362
|
-
Tensor, the shape
|
|
4014
|
+
Tensor, the shape and dtype is defined above and filled with zeros.
|
|
4363
4015
|
|
|
4364
4016
|
Raises:
|
|
4365
|
-
TypeError: If
|
|
4366
|
-
TypeError: If neither input tensor and `other` is a Tensor.
|
|
4017
|
+
TypeError: If `size` is not an int, list or tuple of integers.
|
|
4367
4018
|
|
|
4368
4019
|
Supported Platforms:
|
|
4369
|
-
``Ascend`` ``
|
|
4020
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
4370
4021
|
|
|
4371
4022
|
Examples:
|
|
4372
4023
|
>>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
|
|
4373
|
-
>>> output = x.
|
|
4024
|
+
>>> output = x.new_zeros((2, 2))
|
|
4374
4025
|
>>> print(output)
|
|
4375
|
-
[
|
|
4026
|
+
[[0. 0.]
|
|
4027
|
+
[0. 0.]]
|
|
4376
4028
|
"""
|
|
4029
|
+
validator.check_value_type('size', size, [list, int, tuple], 'Tensor.new_zeros')
|
|
4030
|
+
if isinstance(size, list):
|
|
4031
|
+
size = tuple(size)
|
|
4377
4032
|
self._init_check()
|
|
4378
|
-
|
|
4033
|
+
_dtype = self.dtype if dtype is None else dtype
|
|
4034
|
+
return tensor_operator_registry.get('zeros')(size, _dtype)
|
|
4379
4035
|
|
|
4380
|
-
def
|
|
4036
|
+
def new_ones(self, size, *, dtype=None):
|
|
4381
4037
|
r"""
|
|
4382
|
-
|
|
4038
|
+
Return a tensor of `size` filled with ones.
|
|
4383
4039
|
|
|
4384
|
-
|
|
4040
|
+
Args:
|
|
4041
|
+
size (Union[int, tuple, list]): An int, list or tuple of integers defining the output shape.
|
|
4385
4042
|
|
|
4386
|
-
|
|
4043
|
+
Keyword Args:
|
|
4044
|
+
dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned
|
|
4045
|
+
tensor has the same dtype as `self`. Default: None.
|
|
4387
4046
|
|
|
4388
4047
|
Returns:
|
|
4389
|
-
Tensor,
|
|
4048
|
+
Tensor, the shape and dtype is defined above and filled with ones.
|
|
4049
|
+
|
|
4050
|
+
Raises:
|
|
4051
|
+
TypeError: If `size` is not an int, list or tuple of integers.
|
|
4390
4052
|
|
|
4391
4053
|
Supported Platforms:
|
|
4392
|
-
``Ascend`` ``
|
|
4054
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
4393
4055
|
|
|
4394
4056
|
Examples:
|
|
4395
|
-
>>> x = Tensor(np.array([
|
|
4396
|
-
>>> output = x.
|
|
4057
|
+
>>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
|
|
4058
|
+
>>> output = x.new_ones((2, 2))
|
|
4397
4059
|
>>> print(output)
|
|
4398
|
-
[
|
|
4060
|
+
[[1. 1.]
|
|
4061
|
+
[1. 1.]]
|
|
4399
4062
|
"""
|
|
4063
|
+
validator.check_value_type('size', size, [list, int, tuple], 'Tensor.new_zeros')
|
|
4064
|
+
if isinstance(size, list):
|
|
4065
|
+
size = tuple(size)
|
|
4400
4066
|
self._init_check()
|
|
4401
|
-
|
|
4067
|
+
_dtype = self.dtype if dtype is None else dtype
|
|
4068
|
+
return tensor_operator_registry.get('ones')(size, _dtype)
|
|
4402
4069
|
|
|
4403
|
-
def
|
|
4070
|
+
def sign(self):
|
|
4404
4071
|
r"""
|
|
4405
|
-
|
|
4406
|
-
|
|
4407
|
-
|
|
4408
|
-
|
|
4409
|
-
descending (bool, optional): Controls the sorting order. If descending is True, then the elements
|
|
4410
|
-
are sorted in descending order by value. Default: False.
|
|
4072
|
+
For details, please refer to :func:`mindspore.ops.sign`.
|
|
4073
|
+
"""
|
|
4074
|
+
self._init_check()
|
|
4075
|
+
return tensor_operator_registry.get('sign')(self)
|
|
4411
4076
|
|
|
4412
|
-
|
|
4413
|
-
|
|
4414
|
-
|
|
4077
|
+
def signbit(self):
|
|
4078
|
+
"""
|
|
4079
|
+
For details, please refer to :func:`mindspore.ops.signbit`.
|
|
4080
|
+
"""
|
|
4081
|
+
self._init_check()
|
|
4082
|
+
return tensor_operator_registry.get('signbit')(self)
|
|
4415
4083
|
|
|
4416
|
-
|
|
4417
|
-
|
|
4418
|
-
|
|
4419
|
-
|
|
4420
|
-
|
|
4084
|
+
def sgn(self):
|
|
4085
|
+
"""
|
|
4086
|
+
For details, please refer to :func:`mindspore.ops.sgn`.
|
|
4087
|
+
"""
|
|
4088
|
+
self._init_check()
|
|
4089
|
+
return tensor_operator_registry.get('sgn')(self)
|
|
4421
4090
|
|
|
4422
|
-
|
|
4423
|
-
|
|
4091
|
+
def sin(self):
|
|
4092
|
+
r"""
|
|
4093
|
+
For details, please refer to :func:`mindspore.ops.sin`.
|
|
4094
|
+
"""
|
|
4095
|
+
self._init_check()
|
|
4096
|
+
return tensor_operator_registry.get('sin')(self)
|
|
4424
4097
|
|
|
4425
|
-
|
|
4426
|
-
|
|
4427
|
-
|
|
4428
|
-
>>> print(output)
|
|
4429
|
-
(Tensor(shape=[3, 3], dtype=Float16, value=
|
|
4430
|
-
[[ 1.0000e+00, 2.0000e+00, 8.0000e+00],
|
|
4431
|
-
[ 3.0000e+00, 5.0000e+00, 9.0000e+00],
|
|
4432
|
-
[ 4.0000e+00, 6.0000e+00, 7.0000e+00]]), Tensor(shape=[3, 3], dtype=Int32, value=
|
|
4433
|
-
[[2, 1, 0],
|
|
4434
|
-
[2, 0, 1],
|
|
4435
|
-
[0, 1, 2]]))
|
|
4098
|
+
def sinc(self):
|
|
4099
|
+
r"""
|
|
4100
|
+
For details, please refer to :func:`mindspore.ops.sinc`.
|
|
4436
4101
|
"""
|
|
4437
4102
|
self._init_check()
|
|
4438
|
-
return tensor_operator_registry.get('
|
|
4103
|
+
return tensor_operator_registry.get('sinc')(self)
|
|
4439
4104
|
|
|
4440
|
-
def
|
|
4105
|
+
def sinh(self):
|
|
4441
4106
|
r"""
|
|
4442
|
-
|
|
4107
|
+
For details, please refer to :func:`mindspore.ops.sinh`.
|
|
4108
|
+
"""
|
|
4109
|
+
self._init_check()
|
|
4110
|
+
return tensor_operator_registry.get('sinh')(self)
|
|
4443
4111
|
|
|
4444
|
-
|
|
4445
|
-
|
|
4112
|
+
def sort(self, axis=-1, descending=False):
|
|
4113
|
+
r"""
|
|
4114
|
+
For details, please refer to :func:`mindspore.ops.sort`.
|
|
4115
|
+
"""
|
|
4116
|
+
self._init_check()
|
|
4117
|
+
return tensor_operator_registry.get('sort')(self, axis=axis, descending=descending)
|
|
4446
4118
|
|
|
4447
|
-
|
|
4448
|
-
|
|
4119
|
+
def argsort(self, axis=-1, descending=False):
|
|
4120
|
+
"""
|
|
4121
|
+
For details, please refer to :func:`mindspore.ops.argsort`.
|
|
4122
|
+
"""
|
|
4123
|
+
self._init_check()
|
|
4124
|
+
return tensor_operator_registry.get('argsort')(self, axis, descending)
|
|
4449
4125
|
|
|
4450
|
-
|
|
4451
|
-
|
|
4452
|
-
|
|
4453
|
-
>>> print(output)
|
|
4454
|
-
[3. 0. 0. -3.]
|
|
4126
|
+
def trunc(self):
|
|
4127
|
+
r"""
|
|
4128
|
+
For details, please refer to :func:`mindspore.ops.trunc`.
|
|
4455
4129
|
"""
|
|
4456
4130
|
self._init_check()
|
|
4457
4131
|
return tensor_operator_registry.get('trunc')(self)
|
|
4458
4132
|
|
|
4133
|
+
def where(self, condition, y):
|
|
4134
|
+
r"""
|
|
4135
|
+
For details, please refer to :func:`mindspore.ops.where`.
|
|
4136
|
+
"""
|
|
4137
|
+
self._init_check()
|
|
4138
|
+
return tensor_operator_registry.get('where')(condition, self, y)
|
|
4139
|
+
|
|
4459
4140
|
def imag(self):
|
|
4460
4141
|
r"""
|
|
4461
4142
|
Returns a new tensor containing imaginary value of the input tensor.
|
|
@@ -4465,7 +4146,7 @@ class Tensor(Tensor_):
|
|
|
4465
4146
|
Tensor, the shape is the same as the input tensor.
|
|
4466
4147
|
|
|
4467
4148
|
Supported Platforms:
|
|
4468
|
-
``
|
|
4149
|
+
``GPU`` ``CPU``
|
|
4469
4150
|
|
|
4470
4151
|
Examples:
|
|
4471
4152
|
>>> x = Tensor(np.asarray(np.complex(1.3 + 0.4j)), mindspore.complex64)
|
|
@@ -4477,6 +4158,23 @@ class Tensor(Tensor_):
|
|
|
4477
4158
|
return tensor_operator_registry.get('imag')(self)
|
|
4478
4159
|
|
|
4479
4160
|
|
|
4161
|
+
def nextafter(self, other):
|
|
4162
|
+
r"""
|
|
4163
|
+
For details, please refer to :func:`mindspore.ops.nextafter`.
|
|
4164
|
+
"""
|
|
4165
|
+
self._init_check()
|
|
4166
|
+
return tensor_operator_registry.get('nextafter')(self, other)
|
|
4167
|
+
|
|
4168
|
+
|
|
4169
|
+
def qr(self, some=True):
|
|
4170
|
+
r"""
|
|
4171
|
+
For details, please refer to :func:`mindspore.ops.qr`.
|
|
4172
|
+
"""
|
|
4173
|
+
self._init_check()
|
|
4174
|
+
validator.check_value_type('some', some, bool, 'Tensor.qr')
|
|
4175
|
+
return tensor_operator_registry.get('qr')(self, 'reduced' if some else 'complete')
|
|
4176
|
+
|
|
4177
|
+
|
|
4480
4178
|
def _vm_compare(*args):
|
|
4481
4179
|
"""Implement `vm_compare` for tensor."""
|
|
4482
4180
|
obj_str = args[-1]
|