mindspore 2.0.0a0__cp37-cp37m-win_amd64.whl → 2.0.0rc1__cp37-cp37m-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +4 -2
- mindspore/_c_dataengine.cp37-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp37-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp37-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +102 -0
- mindspore/_checkparam.py +1066 -1001
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +4 -3
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -48
- mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
- mindspore/_extends/parse/__init__.py +5 -3
- mindspore/_extends/parse/namespace.py +16 -1
- mindspore/_extends/parse/parser.py +107 -22
- mindspore/_extends/parse/resources.py +0 -7
- mindspore/_extends/parse/standard_method.py +885 -413
- mindspore/amp.py +52 -57
- mindspore/boost/boost.py +2 -2
- mindspore/boost/boost_cell_wrapper.py +38 -20
- mindspore/boost/dim_reduce.py +3 -3
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +4 -6
- mindspore/common/_decorator.py +2 -0
- mindspore/common/_register_for_adapter.py +55 -0
- mindspore/common/_stub_tensor.py +201 -0
- mindspore/common/_utils.py +41 -7
- mindspore/common/api.py +215 -141
- mindspore/common/dtype.py +8 -1
- mindspore/common/dump.py +2 -2
- mindspore/common/initializer.py +4 -2
- mindspore/common/jit_config.py +17 -13
- mindspore/common/mutable.py +33 -13
- mindspore/common/parameter.py +23 -21
- mindspore/common/seed.py +8 -24
- mindspore/common/sparse_tensor.py +62 -41
- mindspore/common/tensor.py +852 -1154
- mindspore/communication/__init__.py +2 -2
- mindspore/communication/_comm_helper.py +11 -4
- mindspore/communication/management.py +22 -21
- mindspore/config/op_info.config +501 -1008
- mindspore/context.py +201 -23
- mindspore/dataset/__init__.py +6 -6
- mindspore/dataset/audio/__init__.py +7 -7
- mindspore/dataset/audio/transforms.py +670 -30
- mindspore/dataset/audio/utils.py +47 -4
- mindspore/dataset/audio/validators.py +223 -1
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/core/config.py +210 -14
- mindspore/dataset/core/validator_helpers.py +2 -2
- mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
- mindspore/dataset/debug/debug_hook.py +65 -0
- mindspore/dataset/debug/pre_defined_hook.py +67 -0
- mindspore/dataset/engine/__init__.py +7 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +322 -66
- mindspore/dataset/engine/datasets_audio.py +80 -76
- mindspore/dataset/engine/datasets_standard_format.py +51 -38
- mindspore/dataset/engine/datasets_text.py +232 -118
- mindspore/dataset/engine/datasets_user_defined.py +41 -17
- mindspore/dataset/engine/datasets_vision.py +746 -225
- mindspore/dataset/engine/graphdata.py +75 -10
- mindspore/dataset/engine/iterators.py +45 -5
- mindspore/dataset/engine/offload.py +48 -28
- mindspore/dataset/engine/validators.py +117 -8
- mindspore/dataset/text/__init__.py +6 -5
- mindspore/dataset/text/transforms.py +86 -3
- mindspore/dataset/text/utils.py +6 -4
- mindspore/dataset/text/validators.py +25 -0
- mindspore/dataset/transforms/__init__.py +3 -2
- mindspore/dataset/transforms/c_transforms.py +1 -1
- mindspore/dataset/transforms/transforms.py +2 -2
- mindspore/dataset/utils/__init__.py +2 -1
- mindspore/dataset/utils/line_reader.py +121 -0
- mindspore/dataset/vision/__init__.py +2 -3
- mindspore/dataset/vision/c_transforms.py +9 -9
- mindspore/dataset/vision/py_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +2 -0
- mindspore/dataset/vision/transforms.py +160 -161
- mindspore/dataset/vision/utils.py +3 -3
- mindspore/experimental/map_parameter.py +38 -26
- mindspore/include/OWNERS +0 -1
- mindspore/include/api/callback/callback.h +9 -13
- mindspore/include/api/callback/ckpt_saver.h +2 -2
- mindspore/include/api/callback/loss_monitor.h +2 -2
- mindspore/include/api/callback/lr_scheduler.h +5 -5
- mindspore/include/api/callback/time_monitor.h +2 -2
- mindspore/include/api/callback/train_accuracy.h +4 -6
- mindspore/include/api/cfg.h +19 -6
- mindspore/include/api/context.h +44 -9
- mindspore/include/api/delegate.h +1 -1
- mindspore/include/api/metrics/accuracy.h +2 -2
- mindspore/include/api/metrics/metrics.h +4 -3
- mindspore/include/api/model.h +9 -4
- mindspore/include/api/model_parallel_runner.h +2 -2
- mindspore/include/api/net.h +12 -11
- mindspore/include/api/serialization.h +19 -3
- mindspore/include/api/types.h +3 -3
- mindspore/include/dataset/constants.h +7 -0
- mindspore/include/dataset/text.h +59 -0
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +1 -1
- mindspore/mindrecord/filereader.py +18 -0
- mindspore/mindrecord/filewriter.py +197 -34
- mindspore/mindrecord/shardreader.py +9 -0
- mindspore/mindrecord/shardwriter.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +3 -3
- mindspore/mindrecord/tools/cifar10_to_mr.py +3 -3
- mindspore/mindrecord/tools/csv_to_mr.py +3 -3
- mindspore/mindrecord/tools/imagenet_to_mr.py +16 -11
- mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
- mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/nn/__init__.py +0 -4
- mindspore/nn/cell.py +204 -132
- mindspore/nn/dynamic_lr.py +1 -1
- mindspore/nn/grad/cell_grad.py +7 -6
- mindspore/nn/layer/__init__.py +5 -4
- mindspore/nn/layer/activation.py +40 -89
- mindspore/nn/layer/basic.py +255 -624
- mindspore/nn/layer/channel_shuffle.py +7 -6
- mindspore/nn/layer/combined.py +1 -1
- mindspore/nn/layer/container.py +41 -4
- mindspore/nn/layer/conv.py +64 -28
- mindspore/nn/layer/dense.py +9 -8
- mindspore/nn/layer/embedding.py +27 -25
- mindspore/nn/layer/image.py +53 -46
- mindspore/nn/layer/math.py +97 -105
- mindspore/nn/layer/normalization.py +117 -86
- mindspore/nn/layer/padding.py +185 -95
- mindspore/nn/layer/pooling.py +817 -414
- mindspore/nn/layer/rnn_cells.py +10 -15
- mindspore/nn/layer/rnns.py +37 -38
- mindspore/nn/layer/thor_layer.py +11 -12
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +701 -0
- mindspore/nn/learning_rate_schedule.py +8 -8
- mindspore/nn/loss/__init__.py +5 -4
- mindspore/nn/loss/loss.py +334 -199
- mindspore/nn/optim/ada_grad.py +6 -6
- mindspore/nn/optim/adadelta.py +2 -3
- mindspore/nn/optim/adafactor.py +4 -5
- mindspore/nn/optim/adam.py +126 -62
- mindspore/nn/optim/adamax.py +3 -4
- mindspore/nn/optim/adasum.py +6 -6
- mindspore/nn/optim/asgd.py +2 -2
- mindspore/nn/optim/ftrl.py +67 -38
- mindspore/nn/optim/lamb.py +4 -5
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +43 -4
- mindspore/nn/optim/momentum.py +6 -5
- mindspore/nn/optim/optimizer.py +3 -1
- mindspore/nn/optim/proximal_ada_grad.py +2 -2
- mindspore/nn/optim/rmsprop.py +1 -1
- mindspore/nn/optim/rprop.py +8 -9
- mindspore/nn/optim/sgd.py +19 -13
- mindspore/nn/optim/thor.py +10 -15
- mindspore/nn/probability/__init__.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +4 -4
- mindspore/nn/probability/bijector/invert.py +1 -1
- mindspore/nn/probability/bijector/softplus.py +2 -2
- mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
- mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
- mindspore/nn/probability/distribution/_utils/utils.py +9 -15
- mindspore/nn/probability/distribution/bernoulli.py +3 -3
- mindspore/nn/probability/distribution/beta.py +1 -1
- mindspore/nn/probability/distribution/categorical.py +5 -7
- mindspore/nn/probability/distribution/cauchy.py +3 -3
- mindspore/nn/probability/distribution/distribution.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -2
- mindspore/nn/probability/distribution/gamma.py +3 -3
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +3 -3
- mindspore/nn/probability/distribution/half_normal.py +15 -11
- mindspore/nn/probability/distribution/laplace.py +16 -13
- mindspore/nn/probability/distribution/logistic.py +2 -2
- mindspore/nn/probability/distribution/normal.py +1 -1
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/student_t.py +20 -15
- mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +3 -3
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +27 -10
- mindspore/nn/wrap/grad_reducer.py +2 -2
- mindspore/nn/wrap/loss_scale.py +40 -24
- mindspore/numpy/array_creations.py +33 -22
- mindspore/numpy/array_ops.py +35 -30
- mindspore/numpy/logic_ops.py +6 -27
- mindspore/numpy/math_ops.py +22 -19
- mindspore/numpy/utils.py +1 -1
- mindspore/numpy/utils_const.py +108 -58
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_constants.py +0 -6
- mindspore/ops/_grad/__init__.py +2 -1
- mindspore/ops/_grad/grad_array_ops.py +86 -117
- mindspore/ops/_grad/grad_base.py +23 -1
- mindspore/ops/_grad/grad_clip_ops.py +2 -3
- mindspore/ops/_grad/grad_comm_ops.py +34 -24
- mindspore/ops/_grad/grad_implementations.py +9 -45
- mindspore/ops/_grad/grad_inner_ops.py +47 -4
- mindspore/ops/_grad/grad_math_ops.py +142 -117
- mindspore/ops/_grad/grad_nn_ops.py +71 -165
- mindspore/ops/_grad/grad_sequence_ops.py +296 -0
- mindspore/ops/_grad/grad_sparse.py +7 -6
- mindspore/ops/_grad_experimental/__init__.py +1 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +150 -15
- mindspore/ops/_grad_experimental/grad_image_ops.py +16 -7
- mindspore/ops/_grad_experimental/grad_inner_ops.py +1 -22
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +4 -11
- mindspore/ops/_grad_experimental/grad_math_ops.py +210 -89
- mindspore/ops/_grad_experimental/grad_nn_ops.py +26 -22
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +49 -8
- mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +4 -4
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
- mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/correction_mul.py +2 -2
- mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -5
- mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -4
- mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +236 -4
- mindspore/ops/_op_impl/aicpu/abs.py +36 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_v1.py → adaptive_avg_pool_2d.py} +6 -5
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/add.py +43 -0
- mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
- mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
- mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -43
- mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
- mindspore/{compression/common/__init__.py → ops/_op_impl/aicpu/bessel_i0.py} +15 -8
- mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
- mindspore/ops/_op_impl/aicpu/conj.py +11 -0
- mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +0 -3
- mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_grad_v1.py → digamma.py} +7 -9
- mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
- mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
- mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
- mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
- mindspore/ops/_op_impl/aicpu/greater.py +41 -0
- mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
- mindspore/ops/_op_impl/aicpu/less.py +41 -0
- mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/lgamma.py} +16 -10
- mindspore/ops/_op_impl/aicpu/mirror_pad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mul.py +3 -1
- mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
- mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
- mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
- mindspore/ops/_op_impl/aicpu/polar.py +32 -0
- mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
- mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
- mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
- mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
- mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
- mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
- mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
- mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
- mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
- mindspore/ops/_op_impl/aicpu/trans_data.py +1 -0
- mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
- mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
- mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
- mindspore/ops/_op_impl/cpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
- mindspore/ops/_op_impl/tbe/__init__.py +27 -611
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
- mindspore/ops/_op_impl/tbe/cast.py +0 -2
- mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +2 -2
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
- mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
- mindspore/ops/_op_impl/tbe/matmul_ds.py +2 -0
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/scatter_mul.py +2 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -2
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
- mindspore/ops/_register_for_op.py +1 -0
- mindspore/ops/_utils/__init__.py +1 -2
- mindspore/ops/_utils/utils.py +19 -40
- mindspore/ops/_vmap/vmap_array_ops.py +116 -38
- mindspore/ops/_vmap/vmap_base.py +16 -9
- mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
- mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +7 -5
- mindspore/ops/_vmap/vmap_image_ops.py +12 -5
- mindspore/ops/_vmap/vmap_math_ops.py +46 -5
- mindspore/ops/_vmap/vmap_nn_ops.py +15 -21
- mindspore/ops/_vmap/vmap_random_ops.py +1 -1
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +220 -106
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -23
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -17
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +39 -41
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +41 -43
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +51 -57
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +24 -25
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +53 -53
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +77 -85
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +37 -39
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +70 -72
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +17 -17
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
- mindspore/ops/bprop_mindir/generate_mindir.py +2 -0
- mindspore/ops/composite/__init__.py +7 -8
- mindspore/ops/composite/base.py +101 -47
- mindspore/ops/composite/math_ops.py +188 -158
- mindspore/ops/composite/multitype_ops/_compile_utils.py +415 -170
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +142 -87
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +2 -3
- mindspore/ops/composite/multitype_ops/getitem_impl.py +31 -3
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +21 -5
- mindspore/ops/composite/multitype_ops/not_in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
- mindspore/ops/composite/multitype_ops/setitem_impl.py +21 -3
- mindspore/ops/composite/multitype_ops/sub_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +35 -4
- mindspore/ops/function/__init__.py +152 -8
- mindspore/ops/function/array_func.py +2555 -674
- mindspore/ops/function/clip_func.py +209 -13
- mindspore/ops/function/debug_func.py +2 -2
- mindspore/ops/function/grad/__init__.py +2 -1
- mindspore/ops/function/grad/grad_func.py +147 -62
- mindspore/ops/function/image_func.py +54 -38
- mindspore/ops/function/linalg_func.py +167 -16
- mindspore/ops/function/math_func.py +4849 -1492
- mindspore/ops/function/nn_func.py +2573 -988
- mindspore/ops/function/other_func.py +115 -0
- mindspore/ops/function/parameter_func.py +3 -3
- mindspore/ops/function/random_func.py +790 -73
- mindspore/ops/function/sparse_func.py +98 -78
- mindspore/ops/function/sparse_unary_func.py +54 -53
- mindspore/ops/function/spectral_func.py +27 -24
- mindspore/ops/function/vmap_func.py +22 -2
- mindspore/ops/functional.py +97 -37
- mindspore/ops/op_info_register.py +70 -28
- mindspore/ops/operations/__init__.py +47 -14
- mindspore/ops/operations/_csr_ops.py +7 -7
- mindspore/ops/operations/_embedding_cache_ops.py +5 -5
- mindspore/ops/operations/_grad_ops.py +276 -187
- mindspore/ops/operations/_inner_ops.py +319 -113
- mindspore/ops/operations/_ms_kernel.py +10 -8
- mindspore/ops/operations/_ocr_ops.py +9 -9
- mindspore/ops/operations/_opaque_predicate_registry.py +4 -0
- mindspore/ops/operations/_quant_ops.py +137 -102
- mindspore/ops/operations/_rl_inner_ops.py +121 -60
- mindspore/ops/operations/_scalar_ops.py +466 -0
- mindspore/ops/operations/_sequence_ops.py +1004 -2
- mindspore/ops/operations/_tensor_array.py +10 -11
- mindspore/ops/operations/_thor_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +801 -466
- mindspore/ops/operations/comm_ops.py +51 -49
- mindspore/ops/operations/control_ops.py +2 -2
- mindspore/ops/operations/custom_ops.py +123 -44
- mindspore/ops/operations/debug_ops.py +24 -24
- mindspore/ops/operations/image_ops.py +240 -153
- mindspore/ops/operations/inner_ops.py +34 -50
- mindspore/ops/operations/linalg_ops.py +31 -9
- mindspore/ops/operations/math_ops.py +988 -757
- mindspore/ops/operations/nn_ops.py +965 -819
- mindspore/ops/operations/other_ops.py +51 -40
- mindspore/ops/operations/random_ops.py +204 -122
- mindspore/ops/operations/rl_ops.py +8 -9
- mindspore/ops/operations/sparse_ops.py +254 -93
- mindspore/ops/operations/spectral_ops.py +35 -3
- mindspore/ops/primitive.py +111 -9
- mindspore/parallel/_auto_parallel_context.py +189 -83
- mindspore/parallel/_offload_context.py +185 -0
- mindspore/parallel/_parallel_serialization.py +99 -7
- mindspore/parallel/_ps_context.py +9 -5
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +7 -1
- mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
- mindspore/{nn/transformer → parallel/_transformer}/layers.py +6 -37
- mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
- mindspore/{nn/transformer → parallel/_transformer}/moe.py +20 -16
- mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
- mindspore/{nn/transformer → parallel/_transformer}/transformer.py +48 -111
- mindspore/parallel/_utils.py +1 -2
- mindspore/parallel/algo_parameter_config.py +1 -1
- mindspore/parallel/checkpoint_transform.py +37 -34
- mindspore/parallel/shard.py +17 -18
- mindspore/profiler/common/validator/validate_path.py +2 -2
- mindspore/profiler/envprofiling.py +69 -47
- mindspore/profiler/parser/ascend_timeline_generator.py +49 -42
- mindspore/profiler/parser/base_timeline_generator.py +49 -56
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +98 -78
- mindspore/profiler/parser/hwts_log_parser.py +1 -1
- mindspore/profiler/parser/integrator.py +15 -14
- mindspore/profiler/parser/minddata_analyzer.py +2 -2
- mindspore/profiler/parser/msadvisor_analyzer.py +12 -25
- mindspore/profiler/parser/msadvisor_parser.py +2 -4
- mindspore/profiler/parser/optime_parser.py +17 -18
- mindspore/profiler/parser/profiler_info.py +2 -1
- mindspore/profiler/profiling.py +218 -186
- mindspore/rewrite/__init__.py +3 -1
- mindspore/rewrite/api/node.py +1 -114
- mindspore/rewrite/api/node_type.py +3 -0
- mindspore/rewrite/api/pattern_engine.py +31 -1
- mindspore/rewrite/api/scoped_value.py +4 -4
- mindspore/rewrite/api/symbol_tree.py +3 -78
- mindspore/rewrite/api/tree_node_helper.py +1 -1
- mindspore/rewrite/ast_creator_register.py +1 -0
- mindspore/rewrite/ast_helpers/__init__.py +2 -2
- mindspore/rewrite/ast_helpers/ast_creator.py +1 -2
- mindspore/rewrite/ast_helpers/ast_finder.py +65 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +11 -3
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +18 -2
- mindspore/rewrite/namespace.py +0 -2
- mindspore/rewrite/node.py +157 -11
- mindspore/rewrite/parsers/assign_parser.py +231 -53
- mindspore/rewrite/parsers/class_def_parser.py +187 -109
- mindspore/rewrite/parsers/for_parser.py +24 -14
- mindspore/rewrite/parsers/function_def_parser.py +21 -4
- mindspore/rewrite/parsers/if_parser.py +6 -2
- mindspore/rewrite/sparsify/__init__.py +0 -0
- mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
- mindspore/rewrite/sparsify/sparsify.py +109 -0
- mindspore/rewrite/sparsify/utils.py +173 -0
- mindspore/rewrite/symbol_tree.py +256 -133
- mindspore/rewrite/symbol_tree_builder.py +38 -1
- mindspore/run_check/_check_version.py +69 -63
- mindspore/run_check/run_check.py +2 -1
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +1 -1
- mindspore/train/_utils.py +28 -5
- mindspore/train/amp.py +273 -102
- mindspore/train/callback/_backup_and_restore.py +5 -5
- mindspore/train/callback/_callback.py +2 -2
- mindspore/train/callback/_checkpoint.py +3 -3
- mindspore/train/callback/_early_stop.py +3 -3
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +29 -31
- mindspore/train/callback/_loss_monitor.py +3 -3
- mindspore/train/callback/_on_request_exit.py +3 -3
- mindspore/train/callback/_reduce_lr_on_plateau.py +4 -4
- mindspore/train/callback/_summary_collector.py +23 -16
- mindspore/train/callback/_time_monitor.py +3 -3
- mindspore/train/checkpoint_pb2.py +68 -8
- mindspore/train/data_sink.py +15 -3
- mindspore/train/dataset_helper.py +10 -15
- mindspore/train/loss_scale_manager.py +8 -11
- mindspore/train/metrics/__init__.py +1 -1
- mindspore/train/metrics/bleu_score.py +1 -1
- mindspore/train/metrics/confusion_matrix.py +1 -1
- mindspore/train/metrics/cosine_similarity.py +1 -1
- mindspore/train/metrics/dice.py +2 -2
- mindspore/train/metrics/fbeta.py +1 -1
- mindspore/train/metrics/hausdorff_distance.py +4 -3
- mindspore/train/metrics/mean_surface_distance.py +2 -2
- mindspore/train/metrics/occlusion_sensitivity.py +1 -1
- mindspore/train/metrics/perplexity.py +1 -1
- mindspore/train/metrics/precision.py +1 -1
- mindspore/train/metrics/recall.py +1 -1
- mindspore/train/metrics/roc.py +2 -2
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +116 -37
- mindspore/train/model.py +45 -28
- mindspore/train/serialization.py +295 -188
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +43 -13
- mindspore/train/train_thor/convert_utils.py +2 -2
- mindspore/train/train_thor/dataset_helper.py +3 -3
- mindspore/turbojpeg.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +3 -2
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +610 -541
- mindspore/compression/__init__.py +0 -19
- mindspore/compression/common/constant.py +0 -124
- mindspore/compression/export/__init__.py +0 -19
- mindspore/compression/export/quant_export.py +0 -515
- mindspore/compression/quant/__init__.py +0 -28
- mindspore/compression/quant/qat.py +0 -634
- mindspore/compression/quant/quant_utils.py +0 -462
- mindspore/compression/quant/quantizer.py +0 -68
- mindspore/nn/layer/quant.py +0 -1868
- mindspore/nn/layer/rnn_utils.py +0 -90
- mindspore/nn/probability/dpn/__init__.py +0 -22
- mindspore/nn/probability/dpn/vae/__init__.py +0 -25
- mindspore/nn/probability/dpn/vae/cvae.py +0 -140
- mindspore/nn/probability/dpn/vae/vae.py +0 -124
- mindspore/nn/probability/infer/__init__.py +0 -22
- mindspore/nn/probability/infer/variational/elbo.py +0 -70
- mindspore/nn/probability/infer/variational/svi.py +0 -84
- mindspore/nn/probability/toolbox/__init__.py +0 -22
- mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
- mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -364
- mindspore/nn/probability/transforms/__init__.py +0 -22
- mindspore/nn/probability/transforms/transform_bnn.py +0 -262
- mindspore/nn/probability/zhusuan/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
- mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
- mindspore/ops/_op_impl/aicpu/parallel_concat.py +0 -42
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Cast_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/MatMul_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/Transpose_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -15
- mindspore/ops/composite/array_ops.py +0 -241
- mindspore/ops/composite/clip_ops.py +0 -134
- mindspore/ops/composite/random_ops.py +0 -426
- mindspore/ops/composite/vmap_ops.py +0 -38
- mindspore/parallel/nn/__init__.py +0 -42
- mindspore/parallel/nn/loss.py +0 -22
- mindspore/parallel/nn/moe.py +0 -21
- mindspore/parallel/nn/op_parallel_config.py +0 -22
- mindspore/parallel/nn/transformer.py +0 -31
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -17,12 +17,20 @@
|
|
|
17
17
|
from __future__ import absolute_import
|
|
18
18
|
|
|
19
19
|
import builtins
|
|
20
|
-
|
|
20
|
+
import operator
|
|
21
|
+
import numbers
|
|
21
22
|
import numpy as np
|
|
22
23
|
|
|
24
|
+
import mindspore as ms
|
|
23
25
|
import mindspore.common.dtype as mstype
|
|
24
26
|
from mindspore.ops import operations as P
|
|
25
27
|
from mindspore.ops.primitive import constexpr
|
|
28
|
+
from mindspore.ops.primitive import _primexpr
|
|
29
|
+
import mindspore.ops.function as ops
|
|
30
|
+
from mindspore.ops import functional as F
|
|
31
|
+
from mindspore.ops.operations._inner_ops import DynamicBroadcastTo
|
|
32
|
+
from mindspore.ops.operations._sequence_ops import TupleToTensor
|
|
33
|
+
from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
|
|
26
34
|
|
|
27
35
|
from mindspore.ops.operations.array_ops import (
|
|
28
36
|
UniqueConsecutive,
|
|
@@ -43,16 +51,19 @@ from mindspore.ops.operations.array_ops import (
|
|
|
43
51
|
Expand,
|
|
44
52
|
Lstsq,
|
|
45
53
|
Mvlgamma,
|
|
54
|
+
Tril,
|
|
55
|
+
Argmax
|
|
46
56
|
)
|
|
47
57
|
from mindspore.ops.operations.array_ops import TensorScatterElements
|
|
48
58
|
from mindspore.common import Tensor
|
|
49
59
|
from mindspore.ops._primitive_cache import _get_cache_prim
|
|
50
|
-
from mindspore
|
|
51
|
-
from mindspore.
|
|
60
|
+
from mindspore import _checkparam as validator
|
|
61
|
+
from mindspore._c_expression import Tensor as Tensor_
|
|
52
62
|
|
|
63
|
+
tuple_to_tensor_ = TupleToTensor()
|
|
53
64
|
eye_ = P.Eye()
|
|
54
|
-
fill_ = P.Fill()
|
|
55
65
|
fills_ = Fills()
|
|
66
|
+
fill_ = P.Fill()
|
|
56
67
|
ones_ = P.Ones()
|
|
57
68
|
ones_like_ = P.OnesLike()
|
|
58
69
|
tile_ = P.Tile()
|
|
@@ -95,14 +106,16 @@ tensor_select_ = P.Select()
|
|
|
95
106
|
index_fill_ = IndexFill()
|
|
96
107
|
unsorted_segment_sum_ = P.UnsortedSegmentSum()
|
|
97
108
|
population_count_ = P.PopulationCount()
|
|
109
|
+
reduce_max = P.ReduceMax()
|
|
110
|
+
reduce_min = P.ReduceMin()
|
|
98
111
|
|
|
99
112
|
|
|
100
|
-
@
|
|
113
|
+
@_primexpr
|
|
101
114
|
def get_x_shape(x_shape):
|
|
102
|
-
if
|
|
103
|
-
return (-1,)
|
|
104
|
-
if -2 in x_shape:
|
|
115
|
+
if F.is_sequence_shape_unknown(x_shape):
|
|
105
116
|
return (-2,)
|
|
117
|
+
if F.is_sequence_value_unknown(x_shape):
|
|
118
|
+
return (-1,)
|
|
106
119
|
s = 1
|
|
107
120
|
for i in x_shape:
|
|
108
121
|
s = s * i
|
|
@@ -114,6 +127,9 @@ def _check_attr_dtype(param_name, input_dtype, allow_dtypes, cls_name):
|
|
|
114
127
|
validator.check_value_type(param_name, input_dtype, allow_dtypes, cls_name)
|
|
115
128
|
|
|
116
129
|
|
|
130
|
+
check_flatten_order_const = constexpr(validator.check_flatten_order)
|
|
131
|
+
|
|
132
|
+
|
|
117
133
|
##############################
|
|
118
134
|
# Tensor Creation Functions.
|
|
119
135
|
##############################
|
|
@@ -132,7 +148,7 @@ def _get_type(x):
|
|
|
132
148
|
"""get the dtype of input"""
|
|
133
149
|
if isinstance(x, Tensor):
|
|
134
150
|
return x.dtype
|
|
135
|
-
return
|
|
151
|
+
return F.typeof(x)
|
|
136
152
|
|
|
137
153
|
|
|
138
154
|
def _get_max_type(start, end, step):
|
|
@@ -161,18 +177,18 @@ def arange(start=0, end=None, step=1, *, dtype=None):
|
|
|
161
177
|
`step` up to but not including `end`.
|
|
162
178
|
|
|
163
179
|
Args:
|
|
164
|
-
start (Union[float, int, Tensor], optional): The
|
|
180
|
+
start (Union[float, int, Tensor], optional): The start of the interval.
|
|
165
181
|
If Tensor, the shape must be (). Default: 0.
|
|
166
|
-
end (Union[float, int, Tensor], optional):
|
|
182
|
+
end (Union[float, int, Tensor], optional): The end of the interval, exclusive.
|
|
167
183
|
If Tensor, the shape must be ().
|
|
168
184
|
Default: None. If None, it defaults to the value of `start`, and 0 is used as the starting value.
|
|
169
185
|
step (Union[float, int, Tensor], optional): Number that increments `start`.
|
|
170
186
|
If Tensor, the shape must be (). Default: 1.
|
|
171
187
|
|
|
172
188
|
Keyword Args:
|
|
173
|
-
dtype (mindspore.dtype, optional): The
|
|
174
|
-
If
|
|
175
|
-
|
|
189
|
+
dtype (mindspore.dtype, optional): The required data type of returned Tensor. Default: None.
|
|
190
|
+
If the value is not specified or is None, the type with the highest precision in the
|
|
191
|
+
`start`, `end`, and `step` parameters is inferred.
|
|
176
192
|
|
|
177
193
|
Returns:
|
|
178
194
|
A 1-D Tensor, with the same type as the inputs.
|
|
@@ -227,7 +243,60 @@ def arange(start=0, end=None, step=1, *, dtype=None):
|
|
|
227
243
|
return data
|
|
228
244
|
|
|
229
245
|
|
|
230
|
-
def
|
|
246
|
+
def cat(tensors, axis=0):
|
|
247
|
+
r"""
|
|
248
|
+
Connect input tensors along with the given axis.
|
|
249
|
+
|
|
250
|
+
The input data is a tuple or a list of tensors. These tensors have the same rank :math:`R`.
|
|
251
|
+
Set the given axis as :math:`m`, and :math:`0 \le m < R`. Set the number of input tensors as :math:`N`.
|
|
252
|
+
For the :math:`i`-th tensor :math:`t_i`, it has the shape of :math:`(x_1, x_2, ..., x_{mi}, ..., x_R)`.
|
|
253
|
+
:math:`x_{mi}` is the :math:`m`-th dimension of the :math:`t_i`. Then, the shape of the output tensor is
|
|
254
|
+
|
|
255
|
+
.. math::
|
|
256
|
+
|
|
257
|
+
(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)
|
|
258
|
+
|
|
259
|
+
Args:
|
|
260
|
+
tensors (Union[tuple, list]): A tuple or a list of input tensors.
|
|
261
|
+
Suppose there are two tensors in this tuple or list, namely t1 and t2.
|
|
262
|
+
To perform `concat` in the axis 0 direction, except for the :math:`0`-th axis,
|
|
263
|
+
all other dimensions should be equal, that is,
|
|
264
|
+
:math:`t1.shape[1] = t2.shape[1], t1.shape[2] = t2.shape[2], ..., t1.shape[R-1] = t2.shape[R-1]`,
|
|
265
|
+
where :math:`R` represents the rank of tensor.
|
|
266
|
+
axis (int): The specified axis, whose value is in range :math:`[-R, R)`. Default: 0.
|
|
267
|
+
|
|
268
|
+
Returns:
|
|
269
|
+
Tensor, the shape is :math:`(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)`.
|
|
270
|
+
The data type is the same with `tensors`.
|
|
271
|
+
|
|
272
|
+
Raises:
|
|
273
|
+
TypeError: If `axis` is not an int.
|
|
274
|
+
ValueError: If `tensors` have different dimension of tensor.
|
|
275
|
+
ValueError: If `axis` not in range :math:`[-R, R)`.
|
|
276
|
+
RuntimeError: If tensor's shape in `tensors` except for `axis` are different.
|
|
277
|
+
|
|
278
|
+
Supported Platforms:
|
|
279
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
280
|
+
|
|
281
|
+
Examples:
|
|
282
|
+
>>> input_x1 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
|
|
283
|
+
>>> input_x2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
|
|
284
|
+
>>> output = ops.cat((input_x1, input_x2))
|
|
285
|
+
>>> print(output)
|
|
286
|
+
[[0. 1.]
|
|
287
|
+
[2. 1.]
|
|
288
|
+
[0. 1.]
|
|
289
|
+
[2. 1.]]
|
|
290
|
+
>>> output = ops.cat((input_x1, input_x2), 1)
|
|
291
|
+
>>> print(output)
|
|
292
|
+
[[0. 1. 0. 1.]
|
|
293
|
+
[2. 1. 2. 1.]]
|
|
294
|
+
"""
|
|
295
|
+
_concat = _get_cache_prim(P.Concat)(axis)
|
|
296
|
+
return _concat(tensors)
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
def eye(n, m=None, dtype=None):
|
|
231
300
|
"""
|
|
232
301
|
Creates a tensor with ones on the diagonal and zeros in the rest.
|
|
233
302
|
|
|
@@ -238,12 +307,14 @@ def eye(n, m, t):
|
|
|
238
307
|
Args:
|
|
239
308
|
n (int): The number of rows of returned tensor. Constant value only.
|
|
240
309
|
m (int): The number of columns of returned tensor. Constant value only.
|
|
241
|
-
|
|
242
|
-
|
|
310
|
+
Default: if None, the number of columns is as the same as n.
|
|
311
|
+
dtype (mindspore.dtype): MindSpore's dtype, the data type of the returned tensor.
|
|
312
|
+
The data type can be bool or Number.
|
|
313
|
+
Default: None, the data type of the returned tensor is mindspore.float32.
|
|
243
314
|
|
|
244
315
|
Returns:
|
|
245
316
|
Tensor, a tensor with ones on the diagonal and the rest of elements are zero. The shape of `output` depends on
|
|
246
|
-
the user's Inputs `n` and `m`. And the data type depends on Inputs `
|
|
317
|
+
the user's Inputs `n` and `m`. And the data type depends on Inputs `dtype`.
|
|
247
318
|
|
|
248
319
|
Raises:
|
|
249
320
|
TypeError: If `m` or `n` is not an int.
|
|
@@ -264,8 +335,135 @@ def eye(n, m, t):
|
|
|
264
335
|
[[1. 0.]]
|
|
265
336
|
>>> print(output.dtype)
|
|
266
337
|
Float64
|
|
338
|
+
>>> output = ops.eye(2, dtype=mindspore.int32)
|
|
339
|
+
>>> print(output)
|
|
340
|
+
[[1 0]
|
|
341
|
+
[0 1]]
|
|
342
|
+
>>> print(output.dtype)
|
|
343
|
+
Int32
|
|
344
|
+
>>> output = ops.eye(2)
|
|
345
|
+
>>> print(output)
|
|
346
|
+
[[1. 0.]
|
|
347
|
+
[0. 1.]]
|
|
348
|
+
>>> print(output.dtype)
|
|
349
|
+
Float32
|
|
350
|
+
"""
|
|
351
|
+
if m is None:
|
|
352
|
+
m = n
|
|
353
|
+
if dtype is None:
|
|
354
|
+
dtype = ms.float32
|
|
355
|
+
return eye_(n, m, dtype)
|
|
356
|
+
|
|
357
|
+
|
|
358
|
+
def hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None):
|
|
359
|
+
r"""
|
|
360
|
+
Returns the Hamming window.
|
|
361
|
+
|
|
362
|
+
.. math::
|
|
363
|
+
|
|
364
|
+
w[n]=\alpha − \beta \cos \left( \frac{2 \pi n}{N - 1} \right),
|
|
365
|
+
|
|
366
|
+
where :math:`N` is the full window size.
|
|
367
|
+
|
|
368
|
+
Args:
|
|
369
|
+
window_length (int): The size of returned window. Must be a non negative integer.
|
|
370
|
+
periodic (bool, optional): If True, return a periodic window. If False, return a symmetric window.
|
|
371
|
+
alpha (float, optional): The coefficient α.
|
|
372
|
+
beta (float, optional): The coefficient β.
|
|
373
|
+
|
|
374
|
+
Keyword Args:
|
|
375
|
+
dtype (mindspore.dtype, optional): The output window data type. Default: None.
|
|
376
|
+
|
|
377
|
+
Returns:
|
|
378
|
+
Tensor, a 1-D tensor of size (window_length) containing the window.
|
|
379
|
+
|
|
380
|
+
Raises:
|
|
381
|
+
TypeError: If `window_length` is a negative integer.
|
|
382
|
+
TypeError: If `periodic` is not bool.
|
|
383
|
+
|
|
384
|
+
Supported Platforms:
|
|
385
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
386
|
+
|
|
387
|
+
Examples:
|
|
388
|
+
>>> print(ops.hamming_window(6, False))
|
|
389
|
+
[0.08 0.39785218 0.91214782 0.91214782 0.39785218 0.08]
|
|
390
|
+
"""
|
|
391
|
+
if not isinstance(window_length, int):
|
|
392
|
+
raise TypeError(f"For array function 'hamming_window', 'window_length' must be int, but got" \
|
|
393
|
+
f" {type(window_length)}.")
|
|
394
|
+
if window_length < 0:
|
|
395
|
+
raise ValueError(f"For array function 'hamming_window', 'window_length' must be non negative number.")
|
|
396
|
+
if not isinstance(periodic, bool):
|
|
397
|
+
raise TypeError(f"For array function 'hamming_window', 'periodic' must be bool, but got {type(periodic)}.")
|
|
398
|
+
if not isinstance(alpha, float):
|
|
399
|
+
raise TypeError(f"For array function 'hamming_window', 'alpha' must be float, but got {type(alpha)}.")
|
|
400
|
+
if not isinstance(beta, float):
|
|
401
|
+
raise TypeError(f"For array function 'hamming_window', 'beta' must be float, but got {type(beta)}.")
|
|
402
|
+
if window_length <= 1:
|
|
403
|
+
return Tensor(np.ones(window_length))
|
|
404
|
+
if dtype is not None and dtype not in mstype.float_type:
|
|
405
|
+
raise TypeError(f"For array function 'hamming_window', 'dtype' must be floating point dtypes, but got {dtype}.")
|
|
406
|
+
|
|
407
|
+
if periodic:
|
|
408
|
+
window_length += 1
|
|
409
|
+
n = arange(0, window_length)
|
|
410
|
+
w = alpha - beta * ops.cos((2 * np.pi / (window_length - 1)) * n)
|
|
411
|
+
|
|
412
|
+
if dtype is not None:
|
|
413
|
+
w = P.Cast()(w, dtype)
|
|
414
|
+
return w[:-1] if periodic else w
|
|
415
|
+
|
|
416
|
+
|
|
417
|
+
def where(condition, x, y):
|
|
418
|
+
r"""
|
|
419
|
+
Selects elements from `x` or `y` based on `condition` and returns a tensor.
|
|
420
|
+
|
|
421
|
+
.. math::
|
|
422
|
+
output_i = \begin{cases} x_i,\quad &if\ condition_i \\ y_i,\quad &otherwise \end{cases}
|
|
423
|
+
|
|
424
|
+
Args:
|
|
425
|
+
condition (Tensor[bool]): If True, yield `x`, otherwise yield `y`.
|
|
426
|
+
x (Union[Tensor, Scalar]): When `condition` is True, values to select from.
|
|
427
|
+
y (Union[Tensor, Scalar]): When `condition` is False, values to select from.
|
|
428
|
+
|
|
429
|
+
Returns:
|
|
430
|
+
Tensor, elements are selected from `x` and `y`.
|
|
431
|
+
|
|
432
|
+
Raises:
|
|
433
|
+
TypeError: If `condition` is not a Tensor.
|
|
434
|
+
TypeError: If both `x` and `y` are scalars.
|
|
435
|
+
ValueError: If `condition`, `x` and `y` can not broadcast to each other.
|
|
436
|
+
|
|
437
|
+
Supported Platforms:
|
|
438
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
439
|
+
|
|
440
|
+
Examples:
|
|
441
|
+
>>> a = Tensor(np.arange(4).reshape((2, 2)), mstype.float32)
|
|
442
|
+
>>> b = Tensor(np.ones((2, 2)), mstype.float32)
|
|
443
|
+
>>> condition = a < 3
|
|
444
|
+
>>> output = ops.where(condition, a, b)
|
|
445
|
+
>>> print(output)
|
|
446
|
+
[[0. 1.]
|
|
447
|
+
[2. 1.]]
|
|
267
448
|
"""
|
|
268
|
-
|
|
449
|
+
if not isinstance(condition, Tensor):
|
|
450
|
+
raise TypeError(f"For 'where', 'condition' must be a Tensor, but got {type(condition)}.")
|
|
451
|
+
if isinstance(x, (int, float)):
|
|
452
|
+
if not isinstance(y, Tensor):
|
|
453
|
+
raise TypeError(f"For 'where', at least one of 'x' and 'y' should be Tensor, \
|
|
454
|
+
but got x:{type(x)}, y:{type(y)}.")
|
|
455
|
+
x = cast_(x, y.dtype)
|
|
456
|
+
elif isinstance(y, (int, float)):
|
|
457
|
+
if not isinstance(x, Tensor):
|
|
458
|
+
raise TypeError(f"For 'where', at least one of 'x' and 'y' should be Tensor, \
|
|
459
|
+
but got x:{type(x)}, y:{type(y)}.")
|
|
460
|
+
y = cast_(y, x.dtype)
|
|
461
|
+
output_shape = _calc_broadcast_shape(x.shape, y.shape, condition.shape)
|
|
462
|
+
condition = broadcast_to(condition, output_shape)
|
|
463
|
+
x = broadcast_to(x, output_shape)
|
|
464
|
+
y = broadcast_to(y, output_shape)
|
|
465
|
+
_select = P.Select()
|
|
466
|
+
return _select(condition, x, y)
|
|
269
467
|
|
|
270
468
|
|
|
271
469
|
def reverse(x, axis):
|
|
@@ -277,10 +475,10 @@ def reverse(x, axis):
|
|
|
277
475
|
|
|
278
476
|
Args:
|
|
279
477
|
x (Tensor): The target tensor. The data type is Number except float64.
|
|
280
|
-
The shape is :math:`(N
|
|
478
|
+
The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
281
479
|
axis (Union[tuple(int), list(int)]): The indices of the dimensions to reverse.
|
|
282
480
|
|
|
283
|
-
|
|
481
|
+
Outputs:
|
|
284
482
|
Tensor, has the same shape and type as `x`.
|
|
285
483
|
|
|
286
484
|
Raises:
|
|
@@ -305,6 +503,33 @@ def reverse(x, axis):
|
|
|
305
503
|
return P.ReverseV2(axis)(x)
|
|
306
504
|
|
|
307
505
|
|
|
506
|
+
def ravel(input):
|
|
507
|
+
"""
|
|
508
|
+
Expand the multidimensional Tensor into 1D along the 0 axis direction.
|
|
509
|
+
|
|
510
|
+
Args:
|
|
511
|
+
input (Tensor): A tensor to be flattened.
|
|
512
|
+
|
|
513
|
+
Returns:
|
|
514
|
+
Tensor, a 1-D tensor, containing the same elements of the input.
|
|
515
|
+
|
|
516
|
+
Raises:
|
|
517
|
+
TypeError: If argument `input` is not Tensor.
|
|
518
|
+
|
|
519
|
+
Supported Platforms:
|
|
520
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
521
|
+
|
|
522
|
+
Examples:
|
|
523
|
+
>>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
|
|
524
|
+
>>> output = ops.ravel(x)
|
|
525
|
+
>>> print(output)
|
|
526
|
+
[0. 1. 2. 1.]
|
|
527
|
+
>>> print(output.shape)
|
|
528
|
+
(4,)
|
|
529
|
+
"""
|
|
530
|
+
return ops.reshape(input, (-1,))
|
|
531
|
+
|
|
532
|
+
|
|
308
533
|
def matrix_band_part(x, lower, upper):
|
|
309
534
|
r"""
|
|
310
535
|
Copy a tensor setting everything outside a central band in each innermost matrix to zero.
|
|
@@ -332,7 +557,7 @@ def matrix_band_part(x, lower, upper):
|
|
|
332
557
|
ValueError: If the shape of `upper` is not equal to 0D.
|
|
333
558
|
|
|
334
559
|
Supported Platforms:
|
|
335
|
-
|
|
560
|
+
|
|
336
561
|
|
|
337
562
|
Examples:
|
|
338
563
|
>>> x = Tensor(np.ones([2, 4, 4]).astype(np.float32))
|
|
@@ -382,6 +607,27 @@ def padding(x, pad_dim_size=8):
|
|
|
382
607
|
return padding_(x)
|
|
383
608
|
|
|
384
609
|
|
|
610
|
+
@constexpr
|
|
611
|
+
def _check_axis_type(axis, type_int=True, type_tuple=True, type_list=True, ops_name="ops"):
|
|
612
|
+
"""Check axis argument type."""
|
|
613
|
+
if type_int and isinstance(axis, int):
|
|
614
|
+
return True
|
|
615
|
+
if (type_tuple and isinstance(axis, tuple)) or (type_list and isinstance(axis, list)):
|
|
616
|
+
for ax in axis:
|
|
617
|
+
if not isinstance(ax, int):
|
|
618
|
+
raise TypeError(f"For {ops_name}, each axis must be integer, but got {type(ax)} in {axis}.")
|
|
619
|
+
return True
|
|
620
|
+
|
|
621
|
+
type_str = ""
|
|
622
|
+
if type_int:
|
|
623
|
+
type_str += "int, "
|
|
624
|
+
if type_tuple:
|
|
625
|
+
type_str += "tuple, "
|
|
626
|
+
if type_list:
|
|
627
|
+
type_str += "list, "
|
|
628
|
+
raise TypeError(f"For {ops_name}, the axis should be {type_str}, but got {type(axis)}.")
|
|
629
|
+
|
|
630
|
+
|
|
385
631
|
def one_hot(indices, depth, on_value, off_value, axis=-1):
|
|
386
632
|
r"""
|
|
387
633
|
Computes a one-hot tensor.
|
|
@@ -396,13 +642,14 @@ def one_hot(indices, depth, on_value, off_value, axis=-1):
|
|
|
396
642
|
indices(Tensor): A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
|
|
397
643
|
Data type must be uint8, int32 or int64.
|
|
398
644
|
depth(int): A scalar defining the depth of the one-hot dimension.
|
|
399
|
-
on_value(Tensor): A value to fill in output when `indices[j] = i`.
|
|
645
|
+
on_value(Union[Tensor, int, float]): A value to fill in output when `indices[j] = i`.
|
|
400
646
|
Support uint8, uint16, uint32, uint64, int8, int16, int32, int64, float16, float32, float64,
|
|
401
647
|
bool, complex64, complex128.
|
|
402
|
-
off_value(Tensor): A value to fill in output when `indices[j] != i`.
|
|
648
|
+
off_value(Union[Tensor, int, float]): A value to fill in output when `indices[j] != i`.
|
|
403
649
|
Has the same data type as `on_value`.
|
|
404
650
|
axis(int): Position to insert the value. e.g. If shape of `self` is :math:`(N, C)`, and `axis` is -1,
|
|
405
|
-
the output shape will be :math:`(N, C,
|
|
651
|
+
the output shape will be :math:`(N, C, depth)`, If `axis` is 0,
|
|
652
|
+
the output shape will be :math:`(depth, N, C)`.
|
|
406
653
|
Default: -1.
|
|
407
654
|
|
|
408
655
|
Returns:
|
|
@@ -427,26 +674,30 @@ def one_hot(indices, depth, on_value, off_value, axis=-1):
|
|
|
427
674
|
[0. 1. 0.]
|
|
428
675
|
[0. 0. 1.]]
|
|
429
676
|
"""
|
|
677
|
+
if not isinstance(on_value, Tensor):
|
|
678
|
+
on_value = Tensor(on_value)
|
|
679
|
+
if not isinstance(off_value, Tensor):
|
|
680
|
+
off_value = Tensor(off_value)
|
|
430
681
|
onehot = _get_cache_prim(P.OneHot)(axis)
|
|
431
682
|
return onehot(indices, depth, on_value, off_value)
|
|
432
683
|
|
|
433
684
|
|
|
434
|
-
def fill(type, shape, value):
|
|
685
|
+
def fill(type, shape, value): # pylint: disable=redefined-outer-name
|
|
435
686
|
"""
|
|
436
687
|
Create a Tensor of the specified shape and fill it with the specified value.
|
|
437
688
|
|
|
438
689
|
Args:
|
|
439
690
|
type (mindspore.dtype): The specified type of output tensor. The data type only supports
|
|
440
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.0
|
|
441
|
-
`number <https://www.mindspore.cn/docs/en/r2.0
|
|
442
|
-
shape (tuple[int]): The specified shape of output tensor.
|
|
443
|
-
value (Union(number.Number, bool)): Value to fill the returned tensor.
|
|
691
|
+
`bool_ <https://www.mindspore.cn/docs/en/r2.0/api_python/mindspore.html#mindspore.dtype>`_ and
|
|
692
|
+
`number <https://www.mindspore.cn/docs/en/r2.0/api_python/mindspore.html#mindspore.dtype>`_ .
|
|
693
|
+
shape (Union(Tensor, tuple[int])): The specified shape of output tensor.
|
|
694
|
+
value (Union(Tensor, number.Number, bool)): Value to fill the returned tensor.
|
|
444
695
|
|
|
445
696
|
Returns:
|
|
446
697
|
Tensor.
|
|
447
698
|
|
|
448
699
|
Raises:
|
|
449
|
-
TypeError: If `shape` is not a tuple.
|
|
700
|
+
TypeError: If `shape` is not a tuple or a tensor.
|
|
450
701
|
|
|
451
702
|
Supported Platforms:
|
|
452
703
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -465,36 +716,159 @@ def fill(type, shape, value):
|
|
|
465
716
|
return fill_(type, shape, value)
|
|
466
717
|
|
|
467
718
|
|
|
468
|
-
def
|
|
719
|
+
def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-name
|
|
720
|
+
"""
|
|
721
|
+
Create a Tensor of the specified shape and fill it with the specified value.
|
|
722
|
+
|
|
723
|
+
Args:
|
|
724
|
+
size (Union(tuple[int], list[int])): The specified shape of output tensor.
|
|
725
|
+
fill_value (number.Number): Value to fill the returned tensor. Complex numbers are not supported for now.
|
|
726
|
+
|
|
727
|
+
Keyword Args:
|
|
728
|
+
dtype (mindspore.dtype): The specified type of output tensor. `bool_` and `number` are supported, for details,
|
|
729
|
+
please refer to :class:`mindspore.dtype` . Default: None.
|
|
730
|
+
|
|
731
|
+
Returns:
|
|
732
|
+
Tensor.
|
|
733
|
+
|
|
734
|
+
Raises:
|
|
735
|
+
TypeError: If `size` is not a tuple or list.
|
|
736
|
+
ValueError: The element in `size` is less than 0.
|
|
737
|
+
|
|
738
|
+
Supported Platforms:
|
|
739
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
740
|
+
|
|
741
|
+
Examples:
|
|
742
|
+
>>> output = ops.full((2, 2), 1)
|
|
743
|
+
>>> print(output)
|
|
744
|
+
[[1. 1.]
|
|
745
|
+
[1. 1.]]
|
|
746
|
+
>>> output = ops.full((3, 3), 0)
|
|
747
|
+
>>> print(output)
|
|
748
|
+
[[0. 0. 0.]
|
|
749
|
+
[0. 0. 0.]
|
|
750
|
+
[0. 0. 0.]]
|
|
751
|
+
"""
|
|
752
|
+
if not isinstance(size, (list, tuple)):
|
|
753
|
+
raise TypeError(f"For 'ops.full', 'size' must be a tuple or list of ints, but got {type(size)}.")
|
|
754
|
+
if dtype is None:
|
|
755
|
+
dtype = mstype.int64
|
|
756
|
+
if dtype not in mstype.all_types:
|
|
757
|
+
raise TypeError(f"For 'ops.full', 'dtype' must be mindspore.type, but got {dtype}.")
|
|
758
|
+
if isinstance(size, list):
|
|
759
|
+
size = tuple(size)
|
|
760
|
+
return fill_(dtype, size, fill_value)
|
|
761
|
+
|
|
762
|
+
|
|
763
|
+
def full_like(input, fill_value, *, dtype=None):
|
|
469
764
|
"""
|
|
470
|
-
|
|
765
|
+
Return a Tensor of the same shape as `input` and filled with `fill_value`.
|
|
471
766
|
|
|
472
767
|
Args:
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
768
|
+
input (Tensor): input Tensor and the output Tensor have the same shape as `input`.
|
|
769
|
+
fill_value (Number): Value to fill the returned Tensor. Complex numbers are not supported for now.
|
|
770
|
+
|
|
771
|
+
Keyword Args:
|
|
772
|
+
dtype (mindspore.dtype, optional): The specified type of output tensor. `bool_` and `number` are supported,
|
|
773
|
+
for details, please refer to :class:`mindspore.dtype` . Default: None.
|
|
477
774
|
|
|
478
775
|
Returns:
|
|
479
|
-
Tensor
|
|
776
|
+
Tensor.
|
|
480
777
|
|
|
481
778
|
Raises:
|
|
482
|
-
TypeError: If `
|
|
483
|
-
TypeError: If `value` has types not specified above.
|
|
484
|
-
RuntimeError: If `value` cannot be converted to the same type as `x`.
|
|
485
|
-
ValueError: If `value` is a tensor and the length of dimension is not 0.
|
|
779
|
+
TypeError: If `input` is not a Tensor.
|
|
486
780
|
|
|
487
781
|
Supported Platforms:
|
|
488
|
-
``GPU``
|
|
782
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
489
783
|
|
|
490
784
|
Examples:
|
|
491
|
-
>>>
|
|
492
|
-
>>>
|
|
493
|
-
>>> x = Tensor(np.arange(4).reshape((2, 2)).astype('float32'))
|
|
494
|
-
>>> output = ops.fills(x, 1)
|
|
785
|
+
>>> input = Tensor([[0, 1], [2, 1]], dtype=mindspore.int32)
|
|
786
|
+
>>> output = ops.full_like(input, 1)
|
|
495
787
|
>>> print(output)
|
|
496
788
|
[[1. 1.]
|
|
497
789
|
[1. 1.]]
|
|
790
|
+
>>> input = Tensor([[0, 1, 1], [2, 1, 2], [1, 3, 4]], dtype=mindspore.int32)
|
|
791
|
+
>>> output = ops.full_like(input, 0)
|
|
792
|
+
>>> print(output)
|
|
793
|
+
[[0. 0. 0.]
|
|
794
|
+
[0. 0. 0.]
|
|
795
|
+
[0. 0. 0.]]
|
|
796
|
+
"""
|
|
797
|
+
if not isinstance(input, Tensor):
|
|
798
|
+
raise TypeError(f"For ops.full_like, the argument 'x' must be tensor, but got {type(input)}")
|
|
799
|
+
if dtype is None:
|
|
800
|
+
dtype = input.dtype
|
|
801
|
+
return full(input.shape, fill_value, dtype=dtype)
|
|
802
|
+
|
|
803
|
+
|
|
804
|
+
def chunk(input, chunks, axis=0):
|
|
805
|
+
"""
|
|
806
|
+
Cut the input Tensor into `chunks` sub-tensors along the specified axis.
|
|
807
|
+
|
|
808
|
+
Note:
|
|
809
|
+
This function may return less then the specified number of chunks!
|
|
810
|
+
|
|
811
|
+
Args:
|
|
812
|
+
input (Tensor): A Tensor to be cut.
|
|
813
|
+
chunks (int): Number of sub-tensors to cut.
|
|
814
|
+
axis (int, optional): Specify the dimensions that you want to split. Default: 0.
|
|
815
|
+
|
|
816
|
+
Returns:
|
|
817
|
+
A tuple of sub-tensors.
|
|
818
|
+
|
|
819
|
+
Raises:
|
|
820
|
+
TypeError: If argument `input` is not Tensor.
|
|
821
|
+
TypeError: The sum of `chunks` is not int.
|
|
822
|
+
TypeError: If argument `axis` is not int.
|
|
823
|
+
ValueError: If argument `axis` is out of range of :math:`[-input.ndim, input.ndim)` .
|
|
824
|
+
ValueError: If argument `chunks` is not positive number.
|
|
825
|
+
|
|
826
|
+
Supported Platforms:
|
|
827
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
828
|
+
|
|
829
|
+
Examples:
|
|
830
|
+
>>> input_x = np.arange(9).astype("float32")
|
|
831
|
+
>>> output = ops.chunk(Tensor(input_x), 3)
|
|
832
|
+
>>> print(output)
|
|
833
|
+
(Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
|
|
834
|
+
Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
|
|
835
|
+
Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))
|
|
836
|
+
"""
|
|
837
|
+
if not isinstance(input, Tensor):
|
|
838
|
+
raise TypeError(f'For ops.chunk parameter `input` must be Tensor, but got {type(input)}')
|
|
839
|
+
_check_axis_type(axis, True, False, False, "ops.chunk")
|
|
840
|
+
arr_axis = _canonicalize_axis(axis, input.ndim)
|
|
841
|
+
|
|
842
|
+
if not isinstance(chunks, int):
|
|
843
|
+
raise TypeError(f"For ops.chunk type of argument `chunks` should be integer, but got {type(chunks)}")
|
|
844
|
+
if chunks <= 0:
|
|
845
|
+
raise ValueError(f"For ops.chunk parameter 'chunks' must be greater than 0, but got {chunks}")
|
|
846
|
+
|
|
847
|
+
arr_shape = input.shape
|
|
848
|
+
length_along_dim = arr_shape[arr_axis]
|
|
849
|
+
|
|
850
|
+
if chunks > length_along_dim:
|
|
851
|
+
res = P.Split(arr_axis, length_along_dim)(input)
|
|
852
|
+
elif length_along_dim % chunks == 0:
|
|
853
|
+
res = P.Split(arr_axis, chunks)(input)
|
|
854
|
+
else:
|
|
855
|
+
block_size = int(np.ceil(length_along_dim / chunks))
|
|
856
|
+
true_chunks = int(length_along_dim // block_size)
|
|
857
|
+
length1 = true_chunks * block_size
|
|
858
|
+
length2 = length_along_dim - length1
|
|
859
|
+
start1 = _list_comprehensions(rank(input), 0, True)
|
|
860
|
+
size1 = _tuple_setitem(arr_shape, arr_axis, length1)
|
|
861
|
+
start2 = _tuple_setitem(start1, arr_axis, length1)
|
|
862
|
+
size2 = _tuple_setitem(arr_shape, arr_axis, length2)
|
|
863
|
+
res = P.Split(arr_axis, true_chunks)(tensor_slice(input, start1, size1))
|
|
864
|
+
if length2:
|
|
865
|
+
res += P.Split(arr_axis, 1)(tensor_slice(input, start2, size2))
|
|
866
|
+
return res
|
|
867
|
+
|
|
868
|
+
|
|
869
|
+
def fills(x, value):
|
|
870
|
+
"""
|
|
871
|
+
`fills` is deprecated, please use `ops.fill` instead.
|
|
498
872
|
"""
|
|
499
873
|
if isinstance(value, float):
|
|
500
874
|
value_ = value
|
|
@@ -511,16 +885,17 @@ def fills(x, value):
|
|
|
511
885
|
return fills_(x, value_)
|
|
512
886
|
|
|
513
887
|
|
|
514
|
-
def ones(shape,
|
|
888
|
+
def ones(shape, dtype=None): # pylint: disable=redefined-outer-name
|
|
515
889
|
r"""
|
|
516
890
|
Creates a tensor filled with value ones.
|
|
517
891
|
|
|
518
|
-
Creates a tensor with shape described by the first argument and
|
|
519
|
-
|
|
892
|
+
Creates a tensor with shape described by the first argument and fills it with value ones in type of the second
|
|
893
|
+
argument.
|
|
520
894
|
|
|
521
895
|
Args:
|
|
522
896
|
shape (Union[tuple[int], int]): The specified shape of output tensor. Only constant positive int is allowed.
|
|
523
|
-
|
|
897
|
+
dtype (:class:`mindspore.dtype`): The specified type of output tensor. If `dtype` is None,
|
|
898
|
+
`mindspore.float32` will be used. Default: None.
|
|
524
899
|
|
|
525
900
|
Returns:
|
|
526
901
|
Tensor, has the same type and shape as input shape value.
|
|
@@ -536,71 +911,162 @@ def ones(shape, type):
|
|
|
536
911
|
>>> print(output)
|
|
537
912
|
[[1. 1.]
|
|
538
913
|
[1. 1.]]
|
|
539
|
-
>>> output = ops.ones((3, 3), mindspore.float32)
|
|
540
|
-
>>> print(output)
|
|
541
|
-
[[1. 1. 1.]
|
|
542
|
-
[1. 1. 1.]
|
|
543
|
-
[1. 1. 1.]]
|
|
544
914
|
"""
|
|
545
|
-
|
|
915
|
+
_dtype = mstype.float32 if dtype is None else dtype
|
|
916
|
+
ones_op = P.FillV2()
|
|
917
|
+
value = Tensor(1, _dtype)
|
|
918
|
+
if isinstance(shape, int):
|
|
919
|
+
shape = tuple([shape])
|
|
920
|
+
shape_tensor = shape
|
|
921
|
+
if isinstance(shape, (list, tuple)) and not shape:
|
|
922
|
+
shape_tensor = Tensor(shape, dtype=mstype.int64)
|
|
923
|
+
elif not isinstance(shape, Tensor):
|
|
924
|
+
shape_tensor = Tensor(shape)
|
|
925
|
+
if shape_tensor.ndim == 0 and shape_tensor.size == 1:
|
|
926
|
+
shape_tensor = shape_tensor.reshape(1)
|
|
927
|
+
output = ones_op(shape_tensor, value)
|
|
928
|
+
return output
|
|
546
929
|
|
|
547
930
|
|
|
548
|
-
def ones_like(
|
|
931
|
+
def ones_like(input, *, dtype=None):
|
|
549
932
|
"""
|
|
550
|
-
Returns a Tensor with a value of 1 and its shape
|
|
933
|
+
Returns a Tensor with a value of 1 and its shape is the same as the input.
|
|
551
934
|
|
|
552
935
|
Args:
|
|
553
|
-
|
|
936
|
+
input (Tensor): Tensor of any dimension.
|
|
937
|
+
|
|
938
|
+
Keyword Args:
|
|
939
|
+
dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype` is None,
|
|
940
|
+
the dtype of the input tensor will be used. Default: None.
|
|
554
941
|
|
|
555
942
|
Returns:
|
|
556
|
-
Tensor, has the same shape
|
|
943
|
+
Tensor, has the same shape as `input` but filled with ones.
|
|
557
944
|
|
|
558
945
|
Raises:
|
|
559
|
-
TypeError: If `
|
|
946
|
+
TypeError: If `input` is not a Tensor.
|
|
560
947
|
|
|
561
948
|
Supported Platforms:
|
|
562
949
|
``Ascend`` ``GPU`` ``CPU``
|
|
563
950
|
|
|
564
951
|
Examples:
|
|
565
|
-
>>>
|
|
566
|
-
>>> output = ops.ones_like(
|
|
952
|
+
>>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
|
|
953
|
+
>>> output = ops.ones_like(x)
|
|
567
954
|
>>> print(output)
|
|
568
955
|
[[1 1]
|
|
569
956
|
[1 1]]
|
|
570
957
|
"""
|
|
571
|
-
|
|
958
|
+
ones_like_op = P.OnesLike()
|
|
959
|
+
output = ones_like_op(input)
|
|
960
|
+
_dtype = input.dtype if dtype is None else dtype
|
|
961
|
+
output = cast_(output, _dtype)
|
|
962
|
+
return output
|
|
963
|
+
|
|
964
|
+
|
|
965
|
+
def zeros(size, dtype=None): # pylint: disable=redefined-outer-name
|
|
966
|
+
r"""
|
|
967
|
+
Creates a tensor filled with 0 with shape described by `shape` and fills it with value 0 in type of `dtype`.
|
|
968
|
+
|
|
969
|
+
Args:
|
|
970
|
+
size (Union[tuple[int], int]): The specified shape of output tensor. Only constant positive int is allowed.
|
|
971
|
+
dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor. If `dtype` is None,
|
|
972
|
+
mindspore.float32 will be used. Default: None.
|
|
973
|
+
|
|
974
|
+
Returns:
|
|
975
|
+
Tensor, has the same dtype and size as input.
|
|
976
|
+
|
|
977
|
+
Raises:
|
|
978
|
+
TypeError: If `size` is neither a tuple of int nor an int.
|
|
979
|
+
|
|
980
|
+
Supported Platforms:
|
|
981
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
982
|
+
|
|
983
|
+
Examples:
|
|
984
|
+
>>> output = ops.zeros((2, 2), mindspore.float32)
|
|
985
|
+
>>> print(output)
|
|
986
|
+
[[0. 0.]
|
|
987
|
+
[0. 0.]]
|
|
988
|
+
"""
|
|
989
|
+
zero_op = P.FillV2()
|
|
990
|
+
_dtype = mstype.float32 if dtype is None else dtype
|
|
991
|
+
value = Tensor(0, _dtype)
|
|
992
|
+
if isinstance(size, int):
|
|
993
|
+
size = tuple([size])
|
|
994
|
+
shape_tensor = size
|
|
995
|
+
if isinstance(size, (list, tuple)) and not size:
|
|
996
|
+
shape_tensor = Tensor(size, dtype=mstype.int64)
|
|
997
|
+
elif not isinstance(size, Tensor):
|
|
998
|
+
shape_tensor = Tensor(size, dtype=mstype.int64)
|
|
999
|
+
if shape_tensor.ndim == 0 and shape_tensor.size == 1:
|
|
1000
|
+
shape_tensor = shape_tensor.reshape(1)
|
|
1001
|
+
output = zero_op(shape_tensor, value)
|
|
1002
|
+
return output
|
|
1003
|
+
|
|
1004
|
+
|
|
1005
|
+
def zeros_like(input, *, dtype=None):
|
|
1006
|
+
r"""
|
|
1007
|
+
Creates a tensor filled with 0, with the same size as x, and the given dtype.
|
|
1008
|
+
|
|
1009
|
+
If `dtype = None`, the tensor will have the same dtype as input `input`.
|
|
1010
|
+
|
|
1011
|
+
Args:
|
|
1012
|
+
input (Tensor): Tensor of any dimension.
|
|
1013
|
+
|
|
1014
|
+
Keyword Args:
|
|
1015
|
+
dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype` is None,
|
|
1016
|
+
the dtype of the input tensor will be used. Default: None.
|
|
1017
|
+
|
|
1018
|
+
Returns:
|
|
1019
|
+
Tensor, filled with 0.
|
|
1020
|
+
|
|
1021
|
+
Raises:
|
|
1022
|
+
TypeError: If dtype is not a MindSpore dtype.
|
|
1023
|
+
|
|
1024
|
+
Supported Platforms:
|
|
1025
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1026
|
+
|
|
1027
|
+
Examples:
|
|
1028
|
+
>>> x = Tensor(np.arange(4).reshape(2, 2))
|
|
1029
|
+
>>> output = ops.zeros_like(x, dtype=mindspore.float32)
|
|
1030
|
+
>>> print(output)
|
|
1031
|
+
[[0. 0.]
|
|
1032
|
+
[0. 0.]]
|
|
1033
|
+
"""
|
|
1034
|
+
_dtype = input.dtype if dtype is None else dtype
|
|
1035
|
+
zeros_like_op = P.ZerosLike()
|
|
1036
|
+
output = zeros_like_op(input)
|
|
1037
|
+
output = cast_(output, _dtype)
|
|
1038
|
+
return output
|
|
572
1039
|
|
|
573
1040
|
|
|
574
|
-
def tile(
|
|
1041
|
+
def tile(input, multiples):
|
|
575
1042
|
r"""
|
|
576
1043
|
Replicates an input tensor with given multiples times.
|
|
577
1044
|
|
|
578
|
-
Creates a new tensor by replicating `
|
|
579
|
-
output tensor has `
|
|
1045
|
+
Creates a new tensor by replicating `input` `multiples` times. The i'th dimension of
|
|
1046
|
+
output tensor has `input.shape[i] * multiples[i]` elements, and the values of `input`
|
|
580
1047
|
are replicated `multiples[i]` times along the i'th dimension.
|
|
581
1048
|
|
|
582
1049
|
Note:
|
|
583
|
-
The length of `multiples` must be greater or equal to the length of dimension in `
|
|
1050
|
+
The length of `multiples` must be greater or equal to the length of dimension in `input`.
|
|
584
1051
|
|
|
585
1052
|
Args:
|
|
586
|
-
|
|
1053
|
+
input (Tensor): 1-D or higher dimensional Tensor. Set the shape of input tensor as
|
|
587
1054
|
:math:`(x_1, x_2, ..., x_S)` .
|
|
588
|
-
|
|
589
1055
|
multiples (tuple[int]): The parameter that specifies the number of replications,
|
|
590
1056
|
the parameter type is tuple, and the data type is int, i.e., :math:`(y_1, y_2, ..., y_S)`.
|
|
591
|
-
The length of `multiples` cannot be smaller than the length of the shape of `
|
|
1057
|
+
The length of `multiples` cannot be smaller than the length of the shape of `input`.
|
|
592
1058
|
Only constant value is allowed.
|
|
593
1059
|
|
|
594
1060
|
Returns:
|
|
595
|
-
Tensor, has the same data type as the `
|
|
596
|
-
the dimension of `
|
|
1061
|
+
Tensor, has the same data type as the `input`. Suppose the length of `multiples` is `d`,
|
|
1062
|
+
the dimension of `input` is `input.dim`, and the shape of `input` is :math:`(x_1, x_2, ..., x_S)`.
|
|
597
1063
|
|
|
598
|
-
- If `
|
|
1064
|
+
- If `input.dim = d`, then the shape of their corresponding positions can be multiplied, and
|
|
599
1065
|
the shape of Outputs is :math:`(x_1*y_1, x_2*y_2, ..., x_S*y_R)`.
|
|
600
|
-
- If `
|
|
601
|
-
lengths are consistent. Such as set the shape of `
|
|
1066
|
+
- If `input.dim < d`, fill in multiple 1 in the length of the shape of `input` until their
|
|
1067
|
+
lengths are consistent. Such as set the shape of `input` as :math:`(1, ..., x_1, x_2, ..., x_S)`,
|
|
602
1068
|
then the shape of their corresponding positions can be multiplied, and the shape of Outputs is
|
|
603
|
-
:math:`(1*y_1, ..., x_S*
|
|
1069
|
+
:math:`(1*y_1, ..., x_R*y_R, x_S*y_S)`.
|
|
604
1070
|
|
|
605
1071
|
Raises:
|
|
606
1072
|
TypeError: If `multiples` is not a tuple or its elements are not all int.
|
|
@@ -635,13 +1101,13 @@ def tile(input_x, multiples):
|
|
|
635
1101
|
[1. 2. 1. 2.]
|
|
636
1102
|
[3. 4. 3. 4.]]]
|
|
637
1103
|
"""
|
|
638
|
-
return tile_(
|
|
1104
|
+
return tile_(input, multiples)
|
|
639
1105
|
|
|
640
1106
|
|
|
641
|
-
def range(start,
|
|
1107
|
+
def range(start, end, step):
|
|
642
1108
|
r"""
|
|
643
1109
|
Creates a sequence of numbers that begins at `start` and extends by increments of
|
|
644
|
-
`
|
|
1110
|
+
`limit` up to but not including `end`.
|
|
645
1111
|
|
|
646
1112
|
The types of all 3 inputs must be the same. The type of the resulting tensor is
|
|
647
1113
|
the same as the type of the inputs.
|
|
@@ -649,34 +1115,34 @@ def range(start, limit, delta):
|
|
|
649
1115
|
Args:
|
|
650
1116
|
start (Tensor): A scalar Tensor. The first number in the sequence. Must have
|
|
651
1117
|
type: int32 ,int64, float32 or float64.
|
|
652
|
-
|
|
1118
|
+
end (Tensor): A scalar Tensor. Upper limit of the sequence, exclusive. Must
|
|
653
1119
|
have type: int32 ,int64, float32 or float64.
|
|
654
|
-
|
|
1120
|
+
step (Tensor): A scalar Tensor. Number that increments `start`. Must have
|
|
655
1121
|
type: int32 ,int64, float32 or float64.
|
|
656
1122
|
|
|
657
1123
|
Returns:
|
|
658
1124
|
A 1-D Tensor, with the same type as the inputs.
|
|
659
1125
|
|
|
660
1126
|
Raises:
|
|
661
|
-
TypeError: If `start`, `
|
|
662
|
-
TypeError: If datatype of `start`, `
|
|
663
|
-
TypeError: If datatype of `start`, `
|
|
664
|
-
ValueError: If `
|
|
665
|
-
ValueError: If `start` >= `
|
|
666
|
-
ValueError: If `start` <= `
|
|
1127
|
+
TypeError: If `start`, `end` or `step` is not scalar Tensor.
|
|
1128
|
+
TypeError: If datatype of `start`, `end` or `step` is not same.
|
|
1129
|
+
TypeError: If datatype of `start`, `end` or `step` is not supported.
|
|
1130
|
+
ValueError: If `step` = 0.
|
|
1131
|
+
ValueError: If `start` >= `end` when `step` > 0.
|
|
1132
|
+
ValueError: If `start` <= `end` when `step` < 0.
|
|
667
1133
|
|
|
668
1134
|
Supported Platforms:
|
|
669
|
-
``
|
|
1135
|
+
``GPU`` ``CPU``
|
|
670
1136
|
|
|
671
1137
|
Examples:
|
|
672
1138
|
>>> start = Tensor(0, mstype.int32)
|
|
673
|
-
>>>
|
|
674
|
-
>>>
|
|
675
|
-
>>> output = ops.range(start,
|
|
1139
|
+
>>> end = Tensor(10, mstype.int32)
|
|
1140
|
+
>>> step = Tensor(4, mstype.int32)
|
|
1141
|
+
>>> output = ops.range(start, end, step)
|
|
676
1142
|
>>> print(output)
|
|
677
1143
|
[0 4 8]
|
|
678
1144
|
"""
|
|
679
|
-
return range_(start,
|
|
1145
|
+
return range_(start, end, step)
|
|
680
1146
|
|
|
681
1147
|
|
|
682
1148
|
##############################
|
|
@@ -684,7 +1150,7 @@ def range(start, limit, delta):
|
|
|
684
1150
|
##############################
|
|
685
1151
|
|
|
686
1152
|
|
|
687
|
-
def unique(
|
|
1153
|
+
def unique(input):
|
|
688
1154
|
"""
|
|
689
1155
|
Returns the unique elements of input tensor and also return a tensor containing the index of each value of input
|
|
690
1156
|
tensor corresponding to the output unique tensor.
|
|
@@ -696,20 +1162,20 @@ def unique(x):
|
|
|
696
1162
|
To get the same shape between `idx` and `y`, please ref to :class:'mindspore.ops.UniqueWithPad' operator.
|
|
697
1163
|
|
|
698
1164
|
Args:
|
|
699
|
-
|
|
1165
|
+
input (Tensor): The input tensor.
|
|
700
1166
|
The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
701
1167
|
|
|
702
1168
|
.. warning::
|
|
703
|
-
This is an experimental
|
|
1169
|
+
This is an experimental API that is subject to change or deletion.
|
|
704
1170
|
|
|
705
1171
|
Returns:
|
|
706
1172
|
Tuple, containing Tensor objects (`y`, `idx`), `y` is a tensor with the
|
|
707
|
-
same type as `
|
|
1173
|
+
same type as `input`, and contains the unique elements in `input`.
|
|
708
1174
|
`idx` is a tensor containing indices of elements in
|
|
709
|
-
the input corresponding to the output tensor, have the same shape with `
|
|
1175
|
+
the input corresponding to the output tensor, have the same shape with `input`.
|
|
710
1176
|
|
|
711
1177
|
Raises:
|
|
712
|
-
TypeError: If `
|
|
1178
|
+
TypeError: If `input` is not a Tensor.
|
|
713
1179
|
|
|
714
1180
|
Supported Platforms:
|
|
715
1181
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -734,10 +1200,10 @@ def unique(x):
|
|
|
734
1200
|
unique_op = _get_cache_prim(P.Unique)()
|
|
735
1201
|
reshape_op = _get_cache_prim(P.Reshape)()
|
|
736
1202
|
|
|
737
|
-
shape_x =
|
|
1203
|
+
shape_x = input.shape
|
|
738
1204
|
length_x = get_x_shape(shape_x)
|
|
739
|
-
|
|
740
|
-
y, idx = unique_op(
|
|
1205
|
+
input = reshape_op(input, length_x)
|
|
1206
|
+
y, idx = unique_op(input)
|
|
741
1207
|
idx = reshape_op(idx, shape_x)
|
|
742
1208
|
return y, idx
|
|
743
1209
|
|
|
@@ -789,38 +1255,38 @@ def unique_with_pad(x, pad_num):
|
|
|
789
1255
|
return unique_with_pad_(x, pad_num)
|
|
790
1256
|
|
|
791
1257
|
|
|
792
|
-
def unique_consecutive(
|
|
1258
|
+
def unique_consecutive(input, return_idx=False, return_counts=False, axis=None):
|
|
793
1259
|
"""
|
|
794
1260
|
Returns the elements that are unique in each consecutive group of equivalent elements in the input tensor.
|
|
795
1261
|
|
|
796
1262
|
Args:
|
|
797
|
-
|
|
798
|
-
return_idx (bool, optional): Whether to return the
|
|
799
|
-
|
|
1263
|
+
input (Tensor): The input tensor.
|
|
1264
|
+
return_idx (bool, optional): Whether to return the index of where the element in the original input
|
|
1265
|
+
maps to the position in the output. Default: False.
|
|
800
1266
|
return_counts (bool, optional): Whether to return the counts of each unique element. Default: False.
|
|
801
1267
|
axis (int, optional): The dimension to apply unique. If None, the unique of the flattened input is
|
|
802
1268
|
returned. If specified, it must be int32 or int64. Default: None.
|
|
803
1269
|
|
|
804
1270
|
Returns:
|
|
805
1271
|
A tensor or a tuple of tensors containing tensor objects (`output`, `idx`, `counts`). `output` has the
|
|
806
|
-
same type as `
|
|
807
|
-
True, there will be an additional returned tensor, `idx`, which has the same shape as `
|
|
1272
|
+
same type as `input` and is used to represent the output list of unique scalar elements. If `return_idx` is
|
|
1273
|
+
True, there will be an additional returned tensor, `idx`, which has the same shape as `input` and represents
|
|
808
1274
|
the index of where the element in the original input maps to the position in the output. If `return_counts`
|
|
809
1275
|
is True, there will be an additional returned tensor, `counts`, which represents the number of occurrences
|
|
810
1276
|
for each unique value or tensor.
|
|
811
1277
|
|
|
812
1278
|
Raises:
|
|
813
|
-
TypeError: If `
|
|
814
|
-
|
|
1279
|
+
TypeError: If `input` is not a Tensor.
|
|
1280
|
+
TypeError: If dtype of `input` is not supported.
|
|
1281
|
+
TypeError: If `return_idx` is not a bool.
|
|
1282
|
+
TypeError: If `return_counts` is not a bool.
|
|
1283
|
+
TypeError: If `axis` is not an int.
|
|
1284
|
+
ValueError: If `axis` is not in the range of :math:`[-ndim, ndim-1]`.
|
|
815
1285
|
|
|
816
1286
|
Supported Platforms:
|
|
817
|
-
``Ascend`` ``GPU``
|
|
1287
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
818
1288
|
|
|
819
1289
|
Examples:
|
|
820
|
-
>>> import numpy as np
|
|
821
|
-
>>> from mindspore import ops
|
|
822
|
-
>>> from mindspore import Tensor
|
|
823
|
-
>>> from mindspore import dtype as mstype
|
|
824
1290
|
>>> x = Tensor(np.array([1, 1, 2, 2, 3, 1, 1, 2]), mstype.int32)
|
|
825
1291
|
>>> output, idx, counts = ops.unique_consecutive(x, True, True, None)
|
|
826
1292
|
>>> print(output)
|
|
@@ -830,8 +1296,11 @@ def unique_consecutive(x, return_idx=False, return_counts=False, axis=None):
|
|
|
830
1296
|
>>> print(counts)
|
|
831
1297
|
[2 2 1 2 1]
|
|
832
1298
|
"""
|
|
1299
|
+
|
|
1300
|
+
if not isinstance(input, (Tensor, Tensor_)):
|
|
1301
|
+
raise TypeError("For 'unique_consecutive', 'input' must be Tensor.")
|
|
833
1302
|
unique_consecutive_op = _get_cache_prim(UniqueConsecutive)(return_idx, return_counts, axis)
|
|
834
|
-
output, idx, counts = unique_consecutive_op(
|
|
1303
|
+
output, idx, counts = unique_consecutive_op(input)
|
|
835
1304
|
if return_idx and return_counts:
|
|
836
1305
|
return output, idx, counts
|
|
837
1306
|
if return_idx:
|
|
@@ -843,35 +1312,36 @@ def unique_consecutive(x, return_idx=False, return_counts=False, axis=None):
|
|
|
843
1312
|
|
|
844
1313
|
def searchsorted(sorted_sequence, values, *, out_int32=False, right=False):
|
|
845
1314
|
"""
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
indices.
|
|
1315
|
+
Return the position indices such that after inserting the values into the `sorted_sequence`, the order of innermost
|
|
1316
|
+
dimension of the `sorted_sequence` remains unchanged.
|
|
849
1317
|
|
|
850
1318
|
Args:
|
|
851
|
-
sorted_sequence (Tensor): The
|
|
1319
|
+
sorted_sequence (Tensor): The input tensor.
|
|
852
1320
|
It must contain a monotonically increasing sequence on the innermost dimension.
|
|
853
|
-
values (Tensor): The
|
|
1321
|
+
values (Tensor): The value that should be inserted.
|
|
1322
|
+
|
|
1323
|
+
Keyword Args:
|
|
854
1324
|
out_int32 (bool, optional): Output datatype. If True, the output datatype will be int32;
|
|
855
1325
|
if False, the output datatype will be int64. Default: False.
|
|
856
1326
|
right (bool, optional): Search Strategy. If True, return the last suitable index found;
|
|
857
1327
|
if False, return the first such index. Default: False.
|
|
858
1328
|
|
|
859
1329
|
Returns:
|
|
860
|
-
Tensor containing the indices from the innermost dimension of
|
|
861
|
-
if insert the corresponding value in the values tensor, the order of
|
|
862
|
-
whose datatype is int32 if out_int32 is True, otherwise int64, and shape is the same as the shape of values
|
|
1330
|
+
Tensor containing the indices from the innermost dimension of `sorted_sequence` such that,
|
|
1331
|
+
if insert the corresponding value in the `values` tensor, the order of `sorted_sequence` would be preserved,
|
|
1332
|
+
whose datatype is int32 if out_int32 is True, otherwise int64, and shape is the same as the shape of `values`.
|
|
863
1333
|
|
|
864
1334
|
Raises:
|
|
865
1335
|
ValueError: If the dimension of `sorted_sequence` isn't 1 and all dimensions except the last dimension of
|
|
866
1336
|
`sorted_sequence` and `values` are different.
|
|
867
1337
|
|
|
868
1338
|
Supported Platforms:
|
|
869
|
-
``CPU``
|
|
1339
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
870
1340
|
|
|
871
1341
|
Examples:
|
|
872
1342
|
>>> sorted_sequence = Tensor(np.array([[0, 1, 3, 5, 7], [2, 4, 6, 8, 10]]), mindspore.float32)
|
|
873
1343
|
>>> values = Tensor(np.array([[3, 6, 9], [3, 6, 9]]), mindspore.float32)
|
|
874
|
-
>>> output = ops.
|
|
1344
|
+
>>> output = ops.searchsorted(sorted_sequence, values)
|
|
875
1345
|
>>> print(output)
|
|
876
1346
|
[[2 4 5]
|
|
877
1347
|
[1 2 4]]
|
|
@@ -883,42 +1353,42 @@ def searchsorted(sorted_sequence, values, *, out_int32=False, right=False):
|
|
|
883
1353
|
return search_sorted_(sorted_sequence, values)
|
|
884
1354
|
|
|
885
1355
|
|
|
886
|
-
def ger(
|
|
1356
|
+
def ger(input, vec2):
|
|
887
1357
|
r"""
|
|
888
|
-
Ger product of `
|
|
889
|
-
shape :math:`(m,)` and `
|
|
1358
|
+
Ger product of `input` and `vec2`. Calculate the outer product of two arrays. If `input` is a 1D Tensor of
|
|
1359
|
+
shape :math:`(m,)` and `vec2` is a 1D Tensor of shape :math:`(n,)`, then `output` must be a 2D Tensor of shape
|
|
890
1360
|
:math:`(m, n)`.
|
|
891
1361
|
|
|
892
1362
|
Note:
|
|
893
1363
|
Currently Ascend does not support float64 data input.
|
|
894
1364
|
|
|
895
1365
|
Args:
|
|
896
|
-
|
|
897
|
-
|
|
1366
|
+
input (Tensor): input Tensor, with dtype of float16, float32 or float64.
|
|
1367
|
+
vec2 (Tensor): input Tensor, with dtype of float16, float32 or float64, must have the same dtype as `input`.
|
|
898
1368
|
|
|
899
1369
|
Returns:
|
|
900
|
-
Tensor, output matrix with the same dtype as inputs. With `
|
|
901
|
-
`
|
|
1370
|
+
Tensor, output matrix with the same dtype as inputs. With `input` shape :math:`(m,)` and
|
|
1371
|
+
`vec2` shape of :math:`(n,)`, the `output` has shape :math:`(m, n)`.
|
|
902
1372
|
|
|
903
1373
|
Raises:
|
|
904
|
-
TypeError: If `
|
|
905
|
-
TypeError: If the dtype of `
|
|
906
|
-
TypeError: If the dtype of `
|
|
1374
|
+
TypeError: If `input` or `vec2` is not a 1-D Tensor.
|
|
1375
|
+
TypeError: If the dtype of `input` and `vec2` is not float16, float32 or float64.
|
|
1376
|
+
TypeError: If the dtype of `input` and `vec2` are not the same.
|
|
907
1377
|
|
|
908
1378
|
Supported Platforms:
|
|
909
1379
|
``Ascend`` ``GPU`` ``CPU``
|
|
910
1380
|
|
|
911
1381
|
Examples:
|
|
912
|
-
>>>
|
|
913
|
-
>>>
|
|
914
|
-
>>> output = ops.ger(
|
|
1382
|
+
>>> input = Tensor([1., 2., 3., 4.], mindspore.float32)
|
|
1383
|
+
>>> vec2 = Tensor([1., 2., 3.], mindspore.float32)
|
|
1384
|
+
>>> output = ops.ger(input, vec2)
|
|
915
1385
|
>>> print(output)
|
|
916
1386
|
[[ 1. 2. 3.]
|
|
917
1387
|
[ 2. 4. 6.]
|
|
918
1388
|
[ 3. 6. 9.]
|
|
919
1389
|
[ 4. 8. 12.]]
|
|
920
1390
|
"""
|
|
921
|
-
return ger_(
|
|
1391
|
+
return ger_(input, vec2)
|
|
922
1392
|
|
|
923
1393
|
|
|
924
1394
|
def size(input_x):
|
|
@@ -928,7 +1398,7 @@ def size(input_x):
|
|
|
928
1398
|
|
|
929
1399
|
Args:
|
|
930
1400
|
input_x (Tensor): Input parameters, the shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is
|
|
931
|
-
`number <https://www.mindspore.cn/docs/en/r2.0
|
|
1401
|
+
`number <https://www.mindspore.cn/docs/en/r2.0/api_python/mindspore.html#mindspore.dtype>`_.
|
|
932
1402
|
|
|
933
1403
|
Returns:
|
|
934
1404
|
int. A scalar representing the elements' size of `input_x`, tensor is the number of elements
|
|
@@ -951,11 +1421,7 @@ def size(input_x):
|
|
|
951
1421
|
|
|
952
1422
|
def shape(input_x):
|
|
953
1423
|
"""
|
|
954
|
-
Returns the shape of the input tensor.
|
|
955
|
-
|
|
956
|
-
static shape: A shape that can be obtained without running the graph. It is an inherent property of tensor and
|
|
957
|
-
may be unknown. The static shape information can be completed by artificial setting.
|
|
958
|
-
No matter what the input of the graph is, the static shape is not affected.
|
|
1424
|
+
Returns the shape of the input tensor.
|
|
959
1425
|
|
|
960
1426
|
Args:
|
|
961
1427
|
input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
@@ -984,10 +1450,10 @@ def dyn_shape(input_x):
|
|
|
984
1450
|
Returns the shape of the input tensor.
|
|
985
1451
|
|
|
986
1452
|
Args:
|
|
987
|
-
input_x (Tensor): The
|
|
1453
|
+
input_x (Tensor): The input Tensor.
|
|
988
1454
|
|
|
989
1455
|
Returns:
|
|
990
|
-
Tensor
|
|
1456
|
+
Tensor, the shape of `input_x` .
|
|
991
1457
|
|
|
992
1458
|
Raises:
|
|
993
1459
|
TypeError: If `input_x` is not a Tensor.
|
|
@@ -1034,16 +1500,16 @@ def rank(input_x):
|
|
|
1034
1500
|
return rank_(input_x)
|
|
1035
1501
|
|
|
1036
1502
|
|
|
1037
|
-
def reshape(
|
|
1503
|
+
def reshape(input, shape):
|
|
1038
1504
|
"""
|
|
1039
1505
|
Rearranges the input Tensor based on the given shape.
|
|
1040
1506
|
|
|
1041
|
-
The '
|
|
1507
|
+
The 'shape' can only have one -1 at most, in which case it’s inferred from the remaining dimensions and
|
|
1042
1508
|
the number of elements in the input.
|
|
1043
1509
|
|
|
1044
1510
|
Args:
|
|
1045
|
-
|
|
1046
|
-
|
|
1511
|
+
input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
1512
|
+
shape (Union[tuple[int], Tensor[int]]): Constructed by multiple
|
|
1047
1513
|
integers, i.e., :math:`(y_1, y_2, ..., y_S)`. Only constant value is allowed.
|
|
1048
1514
|
|
|
1049
1515
|
Returns:
|
|
@@ -1058,14 +1524,14 @@ def reshape(input_x, input_shape):
|
|
|
1058
1524
|
``Ascend`` ``GPU`` ``CPU``
|
|
1059
1525
|
|
|
1060
1526
|
Examples:
|
|
1061
|
-
>>>
|
|
1062
|
-
>>> output = ops.reshape(
|
|
1527
|
+
>>> input = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
|
|
1528
|
+
>>> output = ops.reshape(input, (3, 2))
|
|
1063
1529
|
>>> print(output)
|
|
1064
1530
|
[[-0.1 0.3]
|
|
1065
1531
|
[ 3.6 0.4]
|
|
1066
1532
|
[ 0.5 -3.2]]
|
|
1067
1533
|
"""
|
|
1068
|
-
return reshape_(
|
|
1534
|
+
return reshape_(input, shape)
|
|
1069
1535
|
|
|
1070
1536
|
|
|
1071
1537
|
def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
|
|
@@ -1127,20 +1593,30 @@ def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
|
|
|
1127
1593
|
return P.ReverseSequence(seq_dim=seq_dim, batch_dim=batch_dim)(x, seq_lengths)
|
|
1128
1594
|
|
|
1129
1595
|
|
|
1130
|
-
def flatten(
|
|
1596
|
+
def flatten(input, order='C', *, start_dim=1, end_dim=-1):
|
|
1131
1597
|
r"""
|
|
1132
|
-
|
|
1598
|
+
Flatten a tensor along dimensions from `start_dim` to `start_dim`.
|
|
1133
1599
|
|
|
1134
1600
|
Args:
|
|
1135
|
-
|
|
1601
|
+
input (Tensor): The input Tensor.
|
|
1602
|
+
order (str, optional): Only 'C' and 'F' are supported. 'C' means to flatten in row-major (C-style) order.
|
|
1603
|
+
'F' means to flatten in column-major (Fortran-style) order. Default: 'C'.
|
|
1604
|
+
|
|
1605
|
+
Keyword Args:
|
|
1606
|
+
start_dim (int, optional): The first dimension to flatten. Default: 1.
|
|
1607
|
+
end_dim (int, optional): The last dimension to flatten. Default: -1.
|
|
1136
1608
|
|
|
1137
1609
|
Returns:
|
|
1138
|
-
Tensor
|
|
1139
|
-
|
|
1610
|
+
Tensor. If no dimensions are flattened, returns the original `input`, otherwise return the flattened Tensor.
|
|
1611
|
+
If `input` is a 0-dimensional Tensor, a 1-dimensional Tensor will be returned.
|
|
1140
1612
|
|
|
1141
1613
|
Raises:
|
|
1142
|
-
TypeError: If `
|
|
1143
|
-
|
|
1614
|
+
TypeError: If `input` is not a Tensor.
|
|
1615
|
+
TypeError: If `order` is not string type.
|
|
1616
|
+
ValueError: If `order` is string type, but not 'C' or 'F'.
|
|
1617
|
+
TypeError: If `start_dim` or `end_dim` is not int.
|
|
1618
|
+
ValueError: If `start_dim` is greater than `end_dim` after canonicalized.
|
|
1619
|
+
ValueError: If `start_dim` or `end_dim` is not in range of [-input.dim, input.dim-1].
|
|
1144
1620
|
|
|
1145
1621
|
Supported Platforms:
|
|
1146
1622
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1151,8 +1627,50 @@ def flatten(input_x):
|
|
|
1151
1627
|
>>> print(output.shape)
|
|
1152
1628
|
(1, 24)
|
|
1153
1629
|
"""
|
|
1154
|
-
|
|
1155
|
-
|
|
1630
|
+
def canonicalize_axis(axis, x_rank):
|
|
1631
|
+
ndim = x_rank if x_rank != 0 else 1
|
|
1632
|
+
if axis < -ndim or axis >= ndim:
|
|
1633
|
+
const_utils.raise_value_error("'start_dim' or 'end_dim' out of range.")
|
|
1634
|
+
return axis if axis >= 0 else axis + ndim
|
|
1635
|
+
|
|
1636
|
+
# Check the types of arguments.
|
|
1637
|
+
if not isinstance(input, Tensor):
|
|
1638
|
+
raise TypeError(f"For 'flatten', argument 'input' must be Tensor.")
|
|
1639
|
+
if not isinstance(start_dim, int) or not isinstance(end_dim, int):
|
|
1640
|
+
raise TypeError(f"For 'flatten', both 'start_dim' and 'end_dim' must be int.")
|
|
1641
|
+
check_flatten_order_const(order)
|
|
1642
|
+
if order == 'F':
|
|
1643
|
+
perm = F.make_range(0, F.rank(input))
|
|
1644
|
+
new_order = F.tuple_reversed(perm)
|
|
1645
|
+
input = _get_cache_prim(P.Transpose)()(input, new_order)
|
|
1646
|
+
|
|
1647
|
+
# Handle the default case.
|
|
1648
|
+
x_shape = shape_(input)
|
|
1649
|
+
x_rank = rank_(input)
|
|
1650
|
+
if start_dim == 1 and end_dim == -1:
|
|
1651
|
+
if x_rank in (0, 1):
|
|
1652
|
+
return reshape_(input, (-1,))
|
|
1653
|
+
return _get_cache_prim(P.Flatten)()(input)
|
|
1654
|
+
|
|
1655
|
+
# Check axis.
|
|
1656
|
+
start_dim = canonicalize_axis(start_dim, x_rank)
|
|
1657
|
+
end_dim = canonicalize_axis(end_dim, x_rank)
|
|
1658
|
+
if start_dim > end_dim:
|
|
1659
|
+
const_utils.raise_value_error("For 'flatten', 'start_dim' cannot come after 'end_dim'.")
|
|
1660
|
+
# If input is a 0-dimensional Tensor, a 1-dimensional Tensor will be returned.
|
|
1661
|
+
if x_rank in (0, 1):
|
|
1662
|
+
return reshape_(input, (-1,))
|
|
1663
|
+
# If no dimensions to flatten, return the original object.
|
|
1664
|
+
if start_dim == end_dim:
|
|
1665
|
+
return input
|
|
1666
|
+
# Flatten elements along specified dimensions.
|
|
1667
|
+
dim_length = 1
|
|
1668
|
+
idx = start_dim
|
|
1669
|
+
while idx <= end_dim:
|
|
1670
|
+
dim_length *= x_shape[idx]
|
|
1671
|
+
idx += 1
|
|
1672
|
+
new_shape = x_shape[:start_dim] + (dim_length,) + x_shape[end_dim + 1:]
|
|
1673
|
+
return reshape_(input, new_shape)
|
|
1156
1674
|
|
|
1157
1675
|
|
|
1158
1676
|
@constexpr
|
|
@@ -1165,7 +1683,7 @@ def _check_select_type_match(scalar, tensor_type, scalar_name, tensor_name):
|
|
|
1165
1683
|
f"then the input[{tensor_name}] must be a Tensor of float32.")
|
|
1166
1684
|
|
|
1167
1685
|
|
|
1168
|
-
@
|
|
1686
|
+
@_primexpr
|
|
1169
1687
|
def _check_select_shape_match(input_shape, cond_shape, tensor_name):
|
|
1170
1688
|
if input_shape != cond_shape:
|
|
1171
1689
|
raise ValueError(f"For functional operator[select], the cond shape must be same as {tensor_name} shape.")
|
|
@@ -1183,10 +1701,52 @@ def _check_select_type(is_cond_tensor, is_x_scalar, is_y_scalar, is_x_tensor, is
|
|
|
1183
1701
|
f"then the input[x] must be a Tensor.")
|
|
1184
1702
|
|
|
1185
1703
|
|
|
1186
|
-
|
|
1187
|
-
|
|
1704
|
+
@constexpr
|
|
1705
|
+
def _check_select_shape_same(cond_shape, x_shape, y_shape):
|
|
1706
|
+
"""Check if input of select has same shape."""
|
|
1707
|
+
return cond_shape == x_shape and x_shape == y_shape and cond_shape == y_shape
|
|
1708
|
+
|
|
1709
|
+
|
|
1710
|
+
@constexpr
|
|
1711
|
+
def get_max_value(x, y, z):
|
|
1712
|
+
"""Get the maximum value of x, y and z."""
|
|
1713
|
+
if x >= y and x >= z:
|
|
1714
|
+
return x
|
|
1715
|
+
if y >= x and y >= z:
|
|
1716
|
+
return y
|
|
1717
|
+
return z
|
|
1718
|
+
|
|
1719
|
+
|
|
1720
|
+
@constexpr
|
|
1721
|
+
def _calc_broadcast_shape(cond_shape, x_shape, y_shape):
|
|
1722
|
+
"""Calculate broadcast shape for select"""
|
|
1723
|
+
converted_shape = []
|
|
1724
|
+
cond_reverse = cond_shape[::-1]
|
|
1725
|
+
x_reverse = x_shape[::-1]
|
|
1726
|
+
y_reverse = y_shape[::-1]
|
|
1727
|
+
max_len = get_max_value(len(cond_reverse), len(x_reverse), len(y_reverse))
|
|
1728
|
+
i = 0
|
|
1729
|
+
while i < max_len:
|
|
1730
|
+
cond_element = 1 if i >= len(cond_reverse) else cond_reverse[i]
|
|
1731
|
+
x_element = 1 if i >= len(x_reverse) else x_reverse[i]
|
|
1732
|
+
y_element = 1 if i >= len(y_reverse) else y_reverse[i]
|
|
1733
|
+
broadcast_element = get_max_value(cond_element, x_element, y_element)
|
|
1734
|
+
if cond_element not in (1, broadcast_element):
|
|
1735
|
+
raise ValueError(f"For select, condition input can not broadcast at index {i}")
|
|
1736
|
+
if x_element not in (1, broadcast_element):
|
|
1737
|
+
raise ValueError(f"For select, x input can not broadcast at index {i}")
|
|
1738
|
+
if y_element not in (1, broadcast_element):
|
|
1739
|
+
raise ValueError(f"For select, y input can not broadcast at index {i}")
|
|
1740
|
+
converted_shape.append(broadcast_element)
|
|
1741
|
+
i = i + 1
|
|
1742
|
+
converted_shape.reverse()
|
|
1743
|
+
return tuple(converted_shape)
|
|
1744
|
+
|
|
1745
|
+
|
|
1746
|
+
def select(cond, x, y):
|
|
1747
|
+
r"""
|
|
1188
1748
|
The conditional tensor determines whether the corresponding element in the output must be
|
|
1189
|
-
selected from
|
|
1749
|
+
selected from `x` (if true) or `y` (if false) based on the value of each element.
|
|
1190
1750
|
|
|
1191
1751
|
It can be defined as:
|
|
1192
1752
|
|
|
@@ -1200,20 +1760,20 @@ def select(cond, x, y):
|
|
|
1200
1760
|
cond (Tensor[bool]): The condition tensor, decides which element is chosen.
|
|
1201
1761
|
The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
|
|
1202
1762
|
x (Union[Tensor, int, float]): The first Tensor or number to be selected.
|
|
1203
|
-
If x is a Tensor, the shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
|
|
1204
|
-
it will be cast to the type of int32 or float32,
|
|
1205
|
-
One of x and y must be a Tensor.
|
|
1763
|
+
If x is a Tensor, the shape is or can be broadcadt to :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
|
|
1764
|
+
If x is an int or a float, it will be cast to the type of int32 or float32,
|
|
1765
|
+
and broadcast to the same shape as y. One of x and y must be a Tensor.
|
|
1206
1766
|
y (Union[Tensor, int, float]): The second Tensor or number to be selected.
|
|
1207
|
-
If y is a Tensor, The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
|
|
1208
|
-
it will be cast to the type of int32 or float32,
|
|
1209
|
-
One of x and y must be a Tensor.
|
|
1767
|
+
If y is a Tensor, The shape is or can be broadcadt to :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
|
|
1768
|
+
If y is an int or a float, it will be cast to the type of int32 or float32,
|
|
1769
|
+
and broadcast to the same shape as x. One of x and y must be a Tensor.
|
|
1210
1770
|
|
|
1211
1771
|
Returns:
|
|
1212
1772
|
Tensor, has the same shape as `cond`.
|
|
1213
1773
|
|
|
1214
1774
|
Raises:
|
|
1215
1775
|
TypeError: If `x` or `y` is not a Tensor, int or float.
|
|
1216
|
-
ValueError: The shapes of inputs
|
|
1776
|
+
ValueError: The shapes of inputs can not be broadcast.
|
|
1217
1777
|
|
|
1218
1778
|
Supported Platforms:
|
|
1219
1779
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1260,6 +1820,19 @@ def select(cond, x, y):
|
|
|
1260
1820
|
input_y = cast_(input_y, mstype.int32)
|
|
1261
1821
|
else:
|
|
1262
1822
|
input_y = cast_(input_y, mstype.float32)
|
|
1823
|
+
|
|
1824
|
+
if is_x_tensor and is_y_tensor and is_cond_tensor:
|
|
1825
|
+
x_shape = F.shape(x)
|
|
1826
|
+
y_shape = F.shape(y)
|
|
1827
|
+
cond_shape = F.shape(cond)
|
|
1828
|
+
all_constant = F.isconstant(cond_shape) and F.isconstant(x_shape) and F.isconstant(y_shape)
|
|
1829
|
+
if all_constant and not _check_select_shape_same(cond_shape, x_shape, y_shape):
|
|
1830
|
+
broadcast_shape = _calc_broadcast_shape(cond_shape, x_shape, y_shape)
|
|
1831
|
+
new_cond = F.broadcast_to(cond, broadcast_shape)
|
|
1832
|
+
new_x = F.broadcast_to(x, broadcast_shape)
|
|
1833
|
+
new_y = F.broadcast_to(y, broadcast_shape)
|
|
1834
|
+
return tensor_select_(new_cond, new_x, new_y)
|
|
1835
|
+
|
|
1263
1836
|
return tensor_select_(cond, input_x, input_y)
|
|
1264
1837
|
|
|
1265
1838
|
|
|
@@ -1279,9 +1852,9 @@ def strided_slice(input_x,
|
|
|
1279
1852
|
Starting from the beginning position, the fragment continues adding strides to the index until
|
|
1280
1853
|
all dimensions are not less than the ending position.
|
|
1281
1854
|
|
|
1282
|
-
|
|
1283
|
-
- `begin`
|
|
1284
|
-
- `begin`
|
|
1855
|
+
.. warning::
|
|
1856
|
+
- `begin` , `end` and `strides` must have the same shape.
|
|
1857
|
+
- `begin` , `end` and `strides` are all 1-D Tensor, and their shape size
|
|
1285
1858
|
must not greater than the dim of `input_x`.
|
|
1286
1859
|
|
|
1287
1860
|
During the slicing process, the fragment (end-begin)/strides are extracted from each dimension.
|
|
@@ -1321,7 +1894,7 @@ def strided_slice(input_x,
|
|
|
1321
1894
|
|
|
1322
1895
|
If the ith bit of `ellipsis_mask` is 1, as many unspecified dimensions as needed
|
|
1323
1896
|
will be inserted between other dimensions. Only one non-zero bit is allowed
|
|
1324
|
-
in `ellipsis_mask`. For
|
|
1897
|
+
in `ellipsis_mask`. For Tensor `input_x` with shape :math:`(5, 6, 7, 8)`, `input_x[2:,...,:6]`
|
|
1325
1898
|
is equivalent to `input_x[2:5,:,:,0:6]` , `input_x[2:,...]` is equivalent
|
|
1326
1899
|
to `input_x[2:5,:,:,:]`.
|
|
1327
1900
|
|
|
@@ -1449,7 +2022,6 @@ def slice(input_x, begin, size):
|
|
|
1449
2022
|
|
|
1450
2023
|
Args:
|
|
1451
2024
|
input_x (Tensor): The target tensor.
|
|
1452
|
-
The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
1453
2025
|
begin (Union[tuple, list]): The beginning of the slice. Only constant value(>=0) is allowed.
|
|
1454
2026
|
size (Union[tuple, list]): The size of the slice. Only constant value is allowed.
|
|
1455
2027
|
|
|
@@ -1488,81 +2060,34 @@ def slice(input_x, begin, size):
|
|
|
1488
2060
|
return tensor_slice(input_x, begin, size)
|
|
1489
2061
|
|
|
1490
2062
|
|
|
1491
|
-
def concat(
|
|
1492
|
-
|
|
1493
|
-
|
|
1494
|
-
|
|
1495
|
-
The input data is a tuple of tensors. These tensors have the same rank :math:`R`. Set the given axis as :math:`m`,
|
|
1496
|
-
and :math:`0 \le m < R`. Set the number of input tensors as :math:`N`. For the :math:`i`-th tensor :math:`t_i`,
|
|
1497
|
-
it has the shape of :math:`(x_1, x_2, ..., x_{mi}, ..., x_R)`. :math:`x_{mi}` is the :math:`m`-th dimension of the
|
|
1498
|
-
:math:`t_i`. Then, the shape of the output tensor is
|
|
1499
|
-
|
|
1500
|
-
.. math::
|
|
1501
|
-
|
|
1502
|
-
(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)
|
|
1503
|
-
|
|
1504
|
-
Args:
|
|
1505
|
-
input_x (tuple, list): A tuple or a list of input tensors.
|
|
1506
|
-
Suppose there are two tensors in this tuple or list, namely t1 and t2.
|
|
1507
|
-
To perform `concat` in the axis 0 direction, except for the :math:`0`-th axis,
|
|
1508
|
-
all other dimensions should be equal, that is,
|
|
1509
|
-
:math:`t1.shape[1] = t2.shape[1], t1.shape[2] = t2.shape[2], ..., t1.shape[R-1] = t2.shape[R-1]`,
|
|
1510
|
-
axis (int): The specified axis, whose value is in range :math:`[-R, R)`. Default: 0.
|
|
1511
|
-
|
|
1512
|
-
Returns:
|
|
1513
|
-
Tensor, the shape is :math:`(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)`.
|
|
1514
|
-
The data type is the same with `input_x`.
|
|
1515
|
-
|
|
1516
|
-
Raises:
|
|
1517
|
-
TypeError: If `axis` is not an int.
|
|
1518
|
-
ValueError: If `input_x` have different dimension of tensor.
|
|
1519
|
-
ValueError: If `axis` not in range :math:`[-R, R)`.
|
|
1520
|
-
RuntimeError: If tensor's shape in `input_x` except for `axis` are different.
|
|
1521
|
-
|
|
1522
|
-
Supported Platforms:
|
|
1523
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1524
|
-
|
|
1525
|
-
Examples:
|
|
1526
|
-
>>> input_x1 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
|
|
1527
|
-
>>> input_x2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
|
|
1528
|
-
>>> output = ops.concat((input_x1, input_x2))
|
|
1529
|
-
>>> print(output)
|
|
1530
|
-
[[0. 1.]
|
|
1531
|
-
[2. 1.]
|
|
1532
|
-
[0. 1.]
|
|
1533
|
-
[2. 1.]]
|
|
1534
|
-
>>> output = ops.concat((input_x1, input_x2), 1)
|
|
1535
|
-
>>> print(output)
|
|
1536
|
-
[[0. 1. 0. 1.]
|
|
1537
|
-
[2. 1. 2. 1.]]
|
|
1538
|
-
"""
|
|
1539
|
-
_concat = _get_cache_prim(P.Concat)(axis)
|
|
1540
|
-
return _concat(input_x)
|
|
2063
|
+
def concat(tensors, axis=0):
|
|
2064
|
+
"""Alias for :func:`mindspore.ops.cat()`"""
|
|
2065
|
+
return cat(tensors, axis)
|
|
1541
2066
|
|
|
1542
2067
|
|
|
1543
|
-
def stack(
|
|
2068
|
+
def stack(tensors, axis=0):
|
|
1544
2069
|
r"""
|
|
1545
2070
|
Stacks a list of tensors in specified axis.
|
|
1546
2071
|
|
|
1547
2072
|
Stacks the list of input tensors with the same rank `R`, output is a tensor of rank `(R+1)`.
|
|
1548
2073
|
|
|
1549
2074
|
Given input tensors of shape :math:`(x_1, x_2, ..., x_R)`. Set the number of input tensors as `N`.
|
|
1550
|
-
If :math:`
|
|
2075
|
+
If :math:`axis \ge 0`, the shape of the output tensor is
|
|
1551
2076
|
:math:`(x_1, x_2, ..., x_{axis}, N, x_{axis+1}, ..., x_R)`.
|
|
1552
2077
|
|
|
1553
2078
|
Args:
|
|
1554
|
-
|
|
2079
|
+
tensors (Union[tuple, list]): A Tuple or list of Tensor objects with the same shape and type.
|
|
1555
2080
|
axis (int): Dimension to stack. Default: 0.
|
|
1556
2081
|
Negative values wrap around. The range is [-(R+1), R+1).
|
|
1557
2082
|
|
|
1558
2083
|
Returns:
|
|
1559
|
-
Tensor. A stacked Tensor with the same type as `
|
|
2084
|
+
Tensor. A stacked Tensor with the same type as `tensors`.
|
|
1560
2085
|
|
|
1561
2086
|
Raises:
|
|
1562
|
-
TypeError: If the data types of elements in `
|
|
1563
|
-
ValueError: If the length of `
|
|
2087
|
+
TypeError: If the data types of elements in `tensors` are not the same.
|
|
2088
|
+
ValueError: If the length of `tensors` is not greater than 0;
|
|
1564
2089
|
or if axis is out of the range [-(R+1), R+1);
|
|
1565
|
-
or if the shapes of elements in
|
|
2090
|
+
or if the shapes of elements in tensors are not the same.
|
|
1566
2091
|
|
|
1567
2092
|
Supported Platforms:
|
|
1568
2093
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1576,7 +2101,7 @@ def stack(input_x, axis=0):
|
|
|
1576
2101
|
[2. 3.]]
|
|
1577
2102
|
"""
|
|
1578
2103
|
_stack = _get_cache_prim(P.Stack)(axis)
|
|
1579
|
-
return _stack(
|
|
2104
|
+
return _stack(tensors)
|
|
1580
2105
|
|
|
1581
2106
|
|
|
1582
2107
|
def unstack(input_x, axis=0):
|
|
@@ -1615,17 +2140,17 @@ def unstack(input_x, axis=0):
|
|
|
1615
2140
|
return _unstack(input_x)
|
|
1616
2141
|
|
|
1617
2142
|
|
|
1618
|
-
def unbind(
|
|
2143
|
+
def unbind(input, dim=0):
|
|
1619
2144
|
r"""
|
|
1620
2145
|
Removes a tensor dimension in specified axis.
|
|
1621
2146
|
|
|
1622
2147
|
Unstacks a tensor of rank `R` along axis dimension, and output tensors will have rank `(R-1)`.
|
|
1623
2148
|
|
|
1624
|
-
Given a tensor of shape :math:`(
|
|
1625
|
-
|
|
2149
|
+
Given a tensor of shape :math:`(n_1, n_2, ..., n_R)` and a specified `dim`,
|
|
2150
|
+
shape of the output tensors is :math:`(n_1, n_2, ..., n_{dim}, n_{dim+2}, ..., n_R)`.
|
|
1626
2151
|
|
|
1627
2152
|
Args:
|
|
1628
|
-
|
|
2153
|
+
input (Tensor): The shape is :math:`(n_1, n_2, ..., n_R)`.
|
|
1629
2154
|
A tensor to be unstacked and the rank of the tensor must be greater than 0.
|
|
1630
2155
|
dim (int): Dimension along which to unpack. Negative values wrap around. The range is [-R, R). Default: 0.
|
|
1631
2156
|
|
|
@@ -1646,7 +2171,7 @@ def unbind(x, dim=0):
|
|
|
1646
2171
|
Tensor(shape=[3], dtype=Int64, value=[7, 8, 9]))
|
|
1647
2172
|
"""
|
|
1648
2173
|
_unstack = _get_cache_prim(P.Unstack)(dim)
|
|
1649
|
-
return _unstack(
|
|
2174
|
+
return _unstack(input)
|
|
1650
2175
|
|
|
1651
2176
|
|
|
1652
2177
|
def expand_dims(input_x, axis):
|
|
@@ -1684,27 +2209,23 @@ def expand_dims(input_x, axis):
|
|
|
1684
2209
|
return expand_dims_(input_x, axis)
|
|
1685
2210
|
|
|
1686
2211
|
|
|
1687
|
-
def unsqueeze(
|
|
2212
|
+
def unsqueeze(input, dim):
|
|
1688
2213
|
"""
|
|
1689
|
-
Adds an additional dimension to `
|
|
1690
|
-
|
|
1691
|
-
Note:
|
|
1692
|
-
If the specified dim is a negative number, the index is counted
|
|
1693
|
-
backward from the end and starts at 1.
|
|
2214
|
+
Adds an additional dimension to `input` at the given dim.
|
|
1694
2215
|
|
|
1695
2216
|
Args:
|
|
1696
|
-
|
|
2217
|
+
input (Tensor): The shape of tensor is :math:`(n_1, n_2, ..., n_R)`.
|
|
1697
2218
|
dim (int): Specifies the dimension index at which to expand
|
|
1698
|
-
the shape of `
|
|
1699
|
-
`[-
|
|
2219
|
+
the shape of `input`. The value of `dim` must be in the range
|
|
2220
|
+
`[-input.ndim-1, input.ndim]`. Only constant value is allowed.
|
|
1700
2221
|
|
|
1701
2222
|
Returns:
|
|
1702
|
-
Tensor, the shape of tensor is :math:`(1,
|
|
1703
|
-
value of `dim` is 0. It has the same data type as `
|
|
2223
|
+
Tensor, the shape of tensor is :math:`(1, n_1, n_2, ..., n_R)` if the
|
|
2224
|
+
value of `dim` is 0. It has the same data type as `input`.
|
|
1704
2225
|
|
|
1705
2226
|
Raises:
|
|
1706
2227
|
TypeError: If `dim` is not an int.
|
|
1707
|
-
ValueError: If `dim` is not in the valid range :math:`[-
|
|
2228
|
+
ValueError: If `dim` is not in the valid range :math:`[-input.ndim-1, input.ndim]`.
|
|
1708
2229
|
|
|
1709
2230
|
Supported Platforms:
|
|
1710
2231
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1716,16 +2237,16 @@ def unsqueeze(input_x, dim):
|
|
|
1716
2237
|
[[[2. 2.]
|
|
1717
2238
|
[2. 2.]]]
|
|
1718
2239
|
"""
|
|
1719
|
-
return expand_dims_(
|
|
2240
|
+
return expand_dims_(input, dim)
|
|
1720
2241
|
|
|
1721
2242
|
|
|
1722
|
-
def squeeze(
|
|
2243
|
+
def squeeze(input, axis=None):
|
|
1723
2244
|
"""
|
|
1724
2245
|
Return the Tensor after deleting the dimension of size 1 in the specified `axis`.
|
|
1725
2246
|
|
|
1726
|
-
If :math:`axis=
|
|
2247
|
+
If :math:`axis=None`, it will remove all the dimensions of size 1.
|
|
1727
2248
|
If `axis` is specified, it will remove the dimensions of size 1 in the given `axis`.
|
|
1728
|
-
For example, if the dimension is not specified :math:`axis=
|
|
2249
|
+
For example, if the dimension is not specified :math:`axis=None`, input shape is (A, 1, B, C, 1, D),
|
|
1729
2250
|
then the shape of the output Tensor is (A, B, C, D). If the dimension is specified, the squeeze operation
|
|
1730
2251
|
is only performed in the specified dimension. If input shape is (A, 1, B), input Tensor will not be
|
|
1731
2252
|
changed when :math:`axis=0` , but when :math:`axis=1` , the shape of the input Tensor will be changed to (A, B).
|
|
@@ -1736,16 +2257,16 @@ def squeeze(input_x, axis=()):
|
|
|
1736
2257
|
- The dimension index starts at 0 and must be in the range `[-input.ndim, input.ndim]`.
|
|
1737
2258
|
|
|
1738
2259
|
Args:
|
|
1739
|
-
|
|
2260
|
+
input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
1740
2261
|
axis (Union[int, tuple(int)]): Specifies the dimension indexes of shape to be removed, which will remove
|
|
1741
2262
|
all the dimensions of size 1 in the given axis parameter. If specified, it must be int32 or int64.
|
|
1742
|
-
Default:
|
|
2263
|
+
Default: None, an empty tuple will be used.
|
|
1743
2264
|
|
|
1744
2265
|
Returns:
|
|
1745
2266
|
Tensor, the shape of tensor is :math:`(x_1, x_2, ..., x_S)`.
|
|
1746
2267
|
|
|
1747
2268
|
Raises:
|
|
1748
|
-
TypeError: If `
|
|
2269
|
+
TypeError: If `input` is not a tensor.
|
|
1749
2270
|
TypeError: If `axis` is neither an int nor tuple.
|
|
1750
2271
|
TypeError: If `axis` is a tuple whose elements are not all int.
|
|
1751
2272
|
ValueError: If the corresponding dimension of the specified axis isn't equal to 1.
|
|
@@ -1754,18 +2275,20 @@ def squeeze(input_x, axis=()):
|
|
|
1754
2275
|
``Ascend`` ``GPU`` ``CPU``
|
|
1755
2276
|
|
|
1756
2277
|
Examples:
|
|
1757
|
-
>>>
|
|
1758
|
-
>>> output = ops.squeeze(
|
|
2278
|
+
>>> input = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
|
|
2279
|
+
>>> output = ops.squeeze(input)
|
|
1759
2280
|
>>> print(output)
|
|
1760
2281
|
[[1. 1.]
|
|
1761
2282
|
[1. 1.]
|
|
1762
2283
|
[1. 1.]]
|
|
1763
2284
|
"""
|
|
2285
|
+
if axis is None:
|
|
2286
|
+
axis = ()
|
|
1764
2287
|
squeeze_ = _get_cache_prim(P.Squeeze)(axis)
|
|
1765
|
-
return squeeze_(
|
|
2288
|
+
return squeeze_(input)
|
|
1766
2289
|
|
|
1767
2290
|
|
|
1768
|
-
def transpose(
|
|
2291
|
+
def transpose(input, input_perm):
|
|
1769
2292
|
"""
|
|
1770
2293
|
Permutes the dimensions of the input tensor according to input permutation.
|
|
1771
2294
|
|
|
@@ -1773,35 +2296,35 @@ def transpose(input_x, input_perm):
|
|
|
1773
2296
|
To convert a 1-D array into a 2D column vector please refer the class: mindspore.ops.ExpandDims.
|
|
1774
2297
|
For a 2-D array, this is a standard matrix transpose. For an n-D array, if axes are given,
|
|
1775
2298
|
their order indicates how the axes are permuted (see Examples).
|
|
1776
|
-
If axes are not provided and a.shape
|
|
1777
|
-
then a.transpose().shape
|
|
2299
|
+
If axes are not provided and a.shape is :math:`(i[0], i[1], ... i[n-2], i[n-1])`,
|
|
2300
|
+
then a.transpose().shape is :math:`(i[n-1], i[n-2], ... i[1], i[0])`.
|
|
1778
2301
|
|
|
1779
2302
|
Note:
|
|
1780
|
-
On GPU and CPU, if the value of `input_perm` is negative, its actual value is `input_perm[i] + rank(
|
|
2303
|
+
On GPU and CPU, if the value of `input_perm` is negative, its actual value is `input_perm[i] + rank(input)`.
|
|
1781
2304
|
Negative value of `input_perm` is not supported on Ascend.
|
|
1782
2305
|
|
|
1783
2306
|
Args:
|
|
1784
|
-
|
|
2307
|
+
input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
1785
2308
|
input_perm (tuple[int]): The permutation to be converted. The elements in `input_perm` are composed of
|
|
1786
|
-
the indexes of each dimension of `
|
|
1787
|
-
the same. Only constant value is allowed. Must be in the range [-rank(
|
|
2309
|
+
the indexes of each dimension of `input`. The length of `input_perm` and the shape of `input` must be
|
|
2310
|
+
the same. Only constant value is allowed. Must be in the range [-rank(input), rank(input)).
|
|
1788
2311
|
|
|
1789
2312
|
Returns:
|
|
1790
|
-
Tensor, the type of output tensor is the same as `
|
|
1791
|
-
shape of `
|
|
2313
|
+
Tensor, the type of output tensor is the same as `input` and the shape of output tensor is decided by the
|
|
2314
|
+
shape of `input` and the value of `input_perm`.
|
|
1792
2315
|
|
|
1793
2316
|
Raises:
|
|
1794
2317
|
TypeError: If `input_perm` is not a tuple.
|
|
1795
|
-
ValueError: If length of shape of `
|
|
2318
|
+
ValueError: If length of shape of `input` is not equal to length of shape of `input_perm`.
|
|
1796
2319
|
ValueError: If the same element exists in `input_perm`.
|
|
1797
2320
|
|
|
1798
2321
|
Supported Platforms:
|
|
1799
2322
|
``Ascend`` ``GPU`` ``CPU``
|
|
1800
2323
|
|
|
1801
2324
|
Examples:
|
|
1802
|
-
>>>
|
|
2325
|
+
>>> input = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32)
|
|
1803
2326
|
>>> input_perm = (0, 2, 1)
|
|
1804
|
-
>>> output = ops.transpose(
|
|
2327
|
+
>>> output = ops.transpose(input, input_perm)
|
|
1805
2328
|
>>> print(output)
|
|
1806
2329
|
[[[ 1. 4.]
|
|
1807
2330
|
[ 2. 5.]
|
|
@@ -1810,7 +2333,7 @@ def transpose(input_x, input_perm):
|
|
|
1810
2333
|
[ 8. 11.]
|
|
1811
2334
|
[ 9. 12.]]]
|
|
1812
2335
|
"""
|
|
1813
|
-
return transpose_(
|
|
2336
|
+
return transpose_(input, input_perm)
|
|
1814
2337
|
|
|
1815
2338
|
|
|
1816
2339
|
def scatter_mul(input_x, indices, updates):
|
|
@@ -1914,9 +2437,20 @@ def scatter_max(input_x, indices, updates):
|
|
|
1914
2437
|
Using given values to update tensor value through the max operation, along with the input indices.
|
|
1915
2438
|
This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
|
|
1916
2439
|
|
|
2440
|
+
for each :math:`i, ..., j` in `indices.shape`:
|
|
2441
|
+
|
|
2442
|
+
.. math::
|
|
2443
|
+
|
|
2444
|
+
\text{input_x}[\text{indices}[i, ..., j], :]
|
|
2445
|
+
= max(\text{input_x}[\text{indices}[i, ..., j], :], \text{updates}[i, ..., j, :])
|
|
2446
|
+
|
|
2447
|
+
Inputs of `input_x` and `updates` follow the implicit type conversion rules to keep the data types consistent.
|
|
2448
|
+
If they have different data types, the lower priority data type will be converted to the relatively highest
|
|
2449
|
+
priority data type. A RuntimeError will be reported when `updates` does not support conversion to the data type
|
|
2450
|
+
required by `input_x`.
|
|
2451
|
+
|
|
1917
2452
|
Args:
|
|
1918
2453
|
input_x (Parameter): The target tensor, with data type of Parameter.
|
|
1919
|
-
The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
|
|
1920
2454
|
indices (Tensor): The index to do max operation whose data type must be mindspore.int32.
|
|
1921
2455
|
updates (Tensor): The tensor doing the max operation with `input_x`,
|
|
1922
2456
|
the data type is same as `input_x`, the shape is `indices.shape + x.shape[1:]`.
|
|
@@ -1933,7 +2467,7 @@ def scatter_max(input_x, indices, updates):
|
|
|
1933
2467
|
and `updates` is greater than 8 dimensions.
|
|
1934
2468
|
|
|
1935
2469
|
Supported Platforms:
|
|
1936
|
-
``Ascend`` ``
|
|
2470
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1937
2471
|
|
|
1938
2472
|
Examples:
|
|
1939
2473
|
>>> input_x = Parameter(Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32), name="input_x")
|
|
@@ -1954,7 +2488,7 @@ def scatter_add(input_x, indices, updates):
|
|
|
1954
2488
|
|
|
1955
2489
|
Args:
|
|
1956
2490
|
input_x (Parameter): The target tensor, with data type of Parameter.
|
|
1957
|
-
The shape is :math:`(N
|
|
2491
|
+
The shape is :math:`(N, *)` where :math:`*` means,any number of additional dimensions.
|
|
1958
2492
|
indices (Tensor): The index to do add operation whose data type must be int32 or int64.
|
|
1959
2493
|
updates (Tensor): The tensor doing the add operation with `input_x`,
|
|
1960
2494
|
the data type is same as `input_x`, the shape is `indices.shape + x.shape[1:]`.
|
|
@@ -2007,7 +2541,6 @@ def scatter_min(input_x, indices, updates):
|
|
|
2007
2541
|
|
|
2008
2542
|
Args:
|
|
2009
2543
|
input_x (Parameter): The target tensor, with data type of Parameter.
|
|
2010
|
-
The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
|
|
2011
2544
|
indices (Tensor): The index to do min operation whose data type must be mindspore.int32 or mindspore.int64.
|
|
2012
2545
|
updates (Tensor): The tensor doing the min operation with `input_x`,
|
|
2013
2546
|
the data type is same as `input_x`, the shape is `indices.shape + input_x.shape[1:]`.
|
|
@@ -2060,20 +2593,19 @@ def scatter_div(input_x, indices, updates):
|
|
|
2060
2593
|
|
|
2061
2594
|
Args:
|
|
2062
2595
|
input_x (Parameter): The target tensor, with data type of Parameter.
|
|
2063
|
-
The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
|
|
2064
2596
|
indices (Tensor): The index to do divide operation whose data type must be mindspore.int32 or
|
|
2065
2597
|
mindspore.int64.
|
|
2066
|
-
updates (Tensor): The tensor doing the divide operation with `input_x`,
|
|
2067
|
-
the
|
|
2598
|
+
updates (Tensor): The tensor doing the divide operation with `input_x`, the data type is same as `input_x`,
|
|
2599
|
+
the shape is `indices.shape + input_x.shape[1:]`.
|
|
2068
2600
|
|
|
2069
2601
|
Returns:
|
|
2070
|
-
Tensor, the updated `input_x`, has the same
|
|
2602
|
+
Tensor, the updated `input_x`, has the same type and shape as `input_x`.
|
|
2071
2603
|
|
|
2072
2604
|
Raises:
|
|
2073
|
-
TypeError: If `indices` is not
|
|
2605
|
+
TypeError: If the type of `indices` is not one of the following dtype: int32, int64.
|
|
2074
2606
|
ValueError: If the shape of `updates` is not equal to `indices.shape + input_x.shape[1:]`.
|
|
2075
|
-
RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
|
|
2076
|
-
|
|
2607
|
+
RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter is required
|
|
2608
|
+
when data type conversion of Parameter is not supported.
|
|
2077
2609
|
RuntimeError: On the Ascend platform, the input data dimension of `input_x` , `indices`
|
|
2078
2610
|
and `updates` is greater than 8 dimensions.
|
|
2079
2611
|
|
|
@@ -2246,7 +2778,6 @@ def scatter_update(input_x, indices, updates):
|
|
|
2246
2778
|
|
|
2247
2779
|
Args:
|
|
2248
2780
|
input_x (Parameter): The target tensor, with data type of Parameter.
|
|
2249
|
-
The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
|
|
2250
2781
|
indices (Tensor): The index of input tensor. With int32 or int64 data type.
|
|
2251
2782
|
If there are duplicates in indices, the order for updating is undefined.
|
|
2252
2783
|
updates (Tensor): The tensor to update the input tensor, has the same type as input,
|
|
@@ -2297,7 +2828,6 @@ def scatter_nd_add(input_x, indices, updates, use_locking=False):
|
|
|
2297
2828
|
|
|
2298
2829
|
Args:
|
|
2299
2830
|
input_x (Parameter): The target tensor, with data type of Parameter.
|
|
2300
|
-
The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
|
|
2301
2831
|
indices (Tensor): The index to do min operation whose data type must be mindspore.int32 or mindspore.int64.
|
|
2302
2832
|
The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
|
|
2303
2833
|
updates (Tensor): The tensor doing the addition operation with `input_x`,
|
|
@@ -2370,7 +2900,6 @@ def scatter_nd_sub(input_x, indices, updates, use_locking=False):
|
|
|
2370
2900
|
|
|
2371
2901
|
Args:
|
|
2372
2902
|
input_x (Parameter): The target tensor, with data type of Parameter.
|
|
2373
|
-
The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
|
|
2374
2903
|
indices (Tensor): The index of input tensor, with int32 or int64 data type.
|
|
2375
2904
|
The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
|
|
2376
2905
|
updates (Tensor): The tensor doing the subtraction operation with `input_x`, has the same type as input.
|
|
@@ -2442,8 +2971,7 @@ def scatter_nd_mul(input_x, indices, updates, use_locking=False):
|
|
|
2442
2971
|
:math:`(i_0, i_1, ..., i_{Q-2}, x\_shape_N, ..., x\_shape_{P-1})`.
|
|
2443
2972
|
|
|
2444
2973
|
Args:
|
|
2445
|
-
input_x (Parameter):
|
|
2446
|
-
The shape is :math:`(N,*)`, where :math:`*` means any number of additional dimensions.
|
|
2974
|
+
input_x (Parameter): Input parameter.
|
|
2447
2975
|
indices (Tensor): The index to do multiplication operation whose data type must be mindspore.int32 or
|
|
2448
2976
|
mindspore.int64. The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
|
|
2449
2977
|
updates (Tensor): The tensor to do the multiplication operation with `input_x`.
|
|
@@ -2516,7 +3044,6 @@ def scatter_nd_div(input_x, indices, updates, use_locking=False):
|
|
|
2516
3044
|
|
|
2517
3045
|
Args:
|
|
2518
3046
|
input_x (Parameter): The target tensor, with data type of Parameter.
|
|
2519
|
-
The shape is :math:`(N,*)`, where :math:`*` means any number of additional dimensions.
|
|
2520
3047
|
indices (Tensor): The index to do div operation whose data type must be mindspore.int32 or mindspore.int64.
|
|
2521
3048
|
The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
|
|
2522
3049
|
updates (Tensor): The tensor to do the div operation with `input_x`.
|
|
@@ -2590,7 +3117,6 @@ def scatter_nd_max(input_x, indices, updates, use_locking=False):
|
|
|
2590
3117
|
|
|
2591
3118
|
Args:
|
|
2592
3119
|
input_x (Parameter): The target tensor, with data type of Parameter.
|
|
2593
|
-
The shape is :math:`(N,*)`, where :math:`*` means any number of additional dimensions.
|
|
2594
3120
|
indices (Tensor): The index to do maximum operation whose data type must be mindspore.int32 or mindspore.int64.
|
|
2595
3121
|
The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
|
|
2596
3122
|
updates (Tensor): The tensor to do the max operation with `input_x`.
|
|
@@ -2725,31 +3251,32 @@ def sort(input_x, axis=-1, descending=False):
|
|
|
2725
3251
|
Args:
|
|
2726
3252
|
input_x(Tensor): The input tensor to sort.
|
|
2727
3253
|
The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
2728
|
-
axis (int): The dimension to sort along. Default: -1.
|
|
2729
|
-
descending (bool): Controls the sort order. If descending is True
|
|
2730
|
-
are sorted in descending order
|
|
3254
|
+
axis (int, optional): The dimension to sort along. Default: -1.
|
|
3255
|
+
descending (bool, optional): Controls the sort order. If `descending` is True, the elements
|
|
3256
|
+
are sorted in descending order, or else sorted in ascending order. Default: False.
|
|
2731
3257
|
|
|
2732
3258
|
.. warning::
|
|
2733
|
-
Currently, the data types of Float16, UInt8, Int8, Int16, Int32, Int64 are supported.
|
|
3259
|
+
Currently, the data types of Float16, UInt8, Int8, Int16, Int32, Int64 are well supported.
|
|
2734
3260
|
If use Float32, it may cause loss of accuracy.
|
|
2735
3261
|
|
|
2736
3262
|
Returns:
|
|
2737
|
-
|
|
2738
|
-
|
|
3263
|
+
|
|
3264
|
+
- y1, a tensor whose values are the sorted values, with the same shape and data type as input.
|
|
3265
|
+
- y2, a tensor that consists of the indices of the elements in the original input tensor.
|
|
3266
|
+
Data type is int32.
|
|
2739
3267
|
|
|
2740
3268
|
Raises:
|
|
2741
3269
|
TypeError: If `axis` is not an int.
|
|
2742
3270
|
TypeError: If `descending` is not a bool.
|
|
2743
|
-
TypeError: If dtype of `
|
|
2744
|
-
ValueError: If `axis` is not in range of [-len(
|
|
3271
|
+
TypeError: If dtype of `input_x` is neither float16, float32, uint8, int8, int16, int32, int64.
|
|
3272
|
+
ValueError: If `axis` is not in range of [-len(input_x.shape), len(input_x.shape)).
|
|
2745
3273
|
|
|
2746
3274
|
Supported Platforms:
|
|
2747
3275
|
``Ascend`` ``GPU`` ``CPU``
|
|
2748
3276
|
|
|
2749
3277
|
Examples:
|
|
2750
3278
|
>>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
|
|
2751
|
-
>>>
|
|
2752
|
-
>>> output = sort(x)
|
|
3279
|
+
>>> output = ops.sort(x)
|
|
2753
3280
|
>>> # The output below is based on the Ascend platform.
|
|
2754
3281
|
>>> print(output)
|
|
2755
3282
|
(Tensor(shape=[3, 3], dtype=Float16, value=
|
|
@@ -2764,7 +3291,36 @@ def sort(input_x, axis=-1, descending=False):
|
|
|
2764
3291
|
return _sort(input_x)
|
|
2765
3292
|
|
|
2766
3293
|
|
|
2767
|
-
def
|
|
3294
|
+
def argsort(input, axis=-1, descending=False):
|
|
3295
|
+
r"""
|
|
3296
|
+
Sorts the input tensor along the given dimension in specified order and return the sorted indices.
|
|
3297
|
+
|
|
3298
|
+
Args:
|
|
3299
|
+
input(Tensor): The input tensor to sort.
|
|
3300
|
+
axis (int): The axis to sort along. Default: -1, means the last axis
|
|
3301
|
+
descending (bool): The sort order. If `descending` is True then the elements
|
|
3302
|
+
are sorted in descending order by value. Otherwise sort in ascending order. Default: False.
|
|
3303
|
+
|
|
3304
|
+
Returns:
|
|
3305
|
+
Tensor, the indices of sorted input tensor. Data type is int32.
|
|
3306
|
+
|
|
3307
|
+
Supported Platforms:
|
|
3308
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
3309
|
+
|
|
3310
|
+
Examples:
|
|
3311
|
+
>>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
|
|
3312
|
+
>>> sort = ops.argsort(x)
|
|
3313
|
+
>>> print(sort)
|
|
3314
|
+
[[2 1 0]
|
|
3315
|
+
[2 0 1]
|
|
3316
|
+
[0 1 2]]
|
|
3317
|
+
"""
|
|
3318
|
+
_sort = _get_cache_prim(P.Sort)(axis, descending)
|
|
3319
|
+
_, arg_sort = _sort(input)
|
|
3320
|
+
return arg_sort
|
|
3321
|
+
|
|
3322
|
+
|
|
3323
|
+
def gather(input_params, input_indices, axis, batch_dims=0):
|
|
2768
3324
|
r"""
|
|
2769
3325
|
Returns the slice of the input tensor corresponding to the elements of `input_indices` on the specified `axis`.
|
|
2770
3326
|
|
|
@@ -2775,22 +3331,24 @@ def gather(input_params, input_indices, axis):
|
|
|
2775
3331
|
where params represents the input `input_params`, and indices represents the index to be sliced `input_indices`.
|
|
2776
3332
|
|
|
2777
3333
|
.. note::
|
|
2778
|
-
|
|
2779
|
-
|
|
3334
|
+
1. The value of input_indices must be in the range of `[0, input_param.shape[axis])`, the result is undefined
|
|
3335
|
+
out of range.
|
|
2780
3336
|
|
|
2781
|
-
|
|
2782
|
-
|
|
2783
|
-
|
|
3337
|
+
2. The data type of input_params cannot be
|
|
3338
|
+
`bool_ <https://www.mindspore.cn/docs/en/r2.0/api_python/mindspore.html#mindspore.dtype>`_ on Ascend
|
|
3339
|
+
platform currently.
|
|
2784
3340
|
|
|
2785
3341
|
Args:
|
|
2786
3342
|
input_params (Tensor): The original Tensor. The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
2787
3343
|
input_indices (Tensor): Index tensor to be sliced, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
|
|
2788
3344
|
Specifies the indices of elements of the original Tensor. The data type can be int32 or int64.
|
|
2789
|
-
axis (int): Specifies the dimension index to gather indices.
|
|
3345
|
+
axis (int): Specifies the dimension index to gather indices. It must be greater than or equal to `batch_dims`.
|
|
3346
|
+
batch_dims (int): Specifies the number of batch dimensions. It must be less than or euqal to the rank
|
|
3347
|
+
of `input_indices`. Default: 0.
|
|
2790
3348
|
|
|
2791
3349
|
Returns:
|
|
2792
3350
|
Tensor, the shape of tensor is
|
|
2793
|
-
:math:`input\_params.shape[:axis] + input\_indices.shape + input\_params.shape[axis + 1:]`.
|
|
3351
|
+
:math:`input\_params.shape[:axis] + input\_indices.shape[batch\_dims:] + input\_params.shape[axis + 1:]`.
|
|
2794
3352
|
|
|
2795
3353
|
Raises:
|
|
2796
3354
|
TypeError: If `axis` is not an int.
|
|
@@ -2814,8 +3372,8 @@ def gather(input_params, input_indices, axis):
|
|
|
2814
3372
|
>>> axis = 0
|
|
2815
3373
|
>>> output = ops.gather(input_params, input_indices, axis)
|
|
2816
3374
|
>>> print(output)
|
|
2817
|
-
[[
|
|
2818
|
-
[
|
|
3375
|
+
[[1. 3.]
|
|
3376
|
+
[3. 7.]]
|
|
2819
3377
|
>>> # case3: input_indices is a Tensor with shape (2, ) and
|
|
2820
3378
|
>>> # input_params is a Tensor with shape (3, 4) and axis is 0.
|
|
2821
3379
|
>>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
|
|
@@ -2823,20 +3381,20 @@ def gather(input_params, input_indices, axis):
|
|
|
2823
3381
|
>>> axis = 0
|
|
2824
3382
|
>>> output = ops.gather(input_params, input_indices, axis)
|
|
2825
3383
|
>>> print(output)
|
|
2826
|
-
[[1. 2. 3. 4.]
|
|
2827
|
-
[9. 10. 11. 12.]]
|
|
3384
|
+
[[ 1. 2. 3. 4.]
|
|
3385
|
+
[ 9. 10. 11. 12.]]
|
|
2828
3386
|
>>> # case4: input_indices is a Tensor with shape (2, ) and
|
|
2829
|
-
>>> # input_params is a Tensor with shape (3, 4) and axis is 1.
|
|
3387
|
+
>>> # input_params is a Tensor with shape (3, 4) and axis is 1, batch_dims is 1.
|
|
2830
3388
|
>>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
|
|
2831
|
-
>>> input_indices = Tensor(np.array([0, 2]), mindspore.int32)
|
|
3389
|
+
>>> input_indices = Tensor(np.array([0, 2, 1]), mindspore.int32)
|
|
2832
3390
|
>>> axis = 1
|
|
2833
|
-
>>>
|
|
3391
|
+
>>> batch_dims = 1
|
|
3392
|
+
>>> output = ops.gather(input_params, input_indices, axis, batch_dims)
|
|
2834
3393
|
>>> print(output)
|
|
2835
|
-
[
|
|
2836
|
-
[5. 7.]
|
|
2837
|
-
[9. 11.]]
|
|
3394
|
+
[ 1. 7. 10.]
|
|
2838
3395
|
"""
|
|
2839
|
-
|
|
3396
|
+
_gather = _get_cache_prim(P.Gather)(batch_dims)
|
|
3397
|
+
return _gather(input_params, input_indices, axis)
|
|
2840
3398
|
|
|
2841
3399
|
|
|
2842
3400
|
def gather_d(x, dim, index):
|
|
@@ -2860,7 +3418,7 @@ def gather_d(x, dim, index):
|
|
|
2860
3418
|
return gather_d_(x, dim, index)
|
|
2861
3419
|
|
|
2862
3420
|
|
|
2863
|
-
def gather_elements(
|
|
3421
|
+
def gather_elements(input, dim, index):
|
|
2864
3422
|
"""
|
|
2865
3423
|
Gathers elements along an axis specified by dim.
|
|
2866
3424
|
|
|
@@ -2874,25 +3432,26 @@ def gather_elements(x, dim, index):
|
|
|
2874
3432
|
|
|
2875
3433
|
output[i][j][k] = x[i][j][index[i][j][k]] # if dim == 2
|
|
2876
3434
|
|
|
2877
|
-
`
|
|
2878
|
-
If `dim` = i, `
|
|
3435
|
+
`input` and `index` have the same length of dimensions, and all dimensions except `dim` have the same size.
|
|
3436
|
+
If `dim` = i, `input` is an n-D tensor with shape :math:`(z_0, z_1, ..., z_i, ..., z_{n-1})`,
|
|
2879
3437
|
the `index` must be an n-D tensor with shape :math:`(z_0, z_1, ..., y, ..., z_{n-1})`
|
|
2880
3438
|
where `y`>=1 and the output will have the same shape with `index`.
|
|
2881
3439
|
|
|
2882
3440
|
Args:
|
|
2883
|
-
|
|
2884
|
-
dim (int): The axis along which to index. It must be int32 or int64. The value range is [-
|
|
3441
|
+
input (Tensor): The input tensor.
|
|
3442
|
+
dim (int): The axis along which to index. It must be int32 or int64. The value range is [-input.ndim,
|
|
3443
|
+
input.ndim).
|
|
2885
3444
|
index (Tensor): The indices of elements to gather. It can be one of the following data types:
|
|
2886
|
-
int32, int64. The value range of each index element is [-
|
|
3445
|
+
int32, int64. The value range of each index element is [-input.shape(dim), input.shape(dim)).
|
|
2887
3446
|
|
|
2888
3447
|
Returns:
|
|
2889
|
-
Tensor, has the same shape as index tensor, the shape of tensor is :math:`(z_1,
|
|
2890
|
-
and has the same data type with `
|
|
3448
|
+
Tensor, has the same shape as index tensor, the shape of tensor is :math:`(z_0, z_1, ..., y, ..., z_{n-1})`,
|
|
3449
|
+
and has the same data type with `input`.
|
|
2891
3450
|
|
|
2892
3451
|
Raises:
|
|
2893
3452
|
TypeError: If dtype of `dim` or `index` is neither int32 nor int64.
|
|
2894
|
-
ValueError: If length of shape of `
|
|
2895
|
-
ValueError: If the size of the dimension except `dim` is not equal between `
|
|
3453
|
+
ValueError: If length of shape of `input` is not equal to length of shape of `index`.
|
|
3454
|
+
ValueError: If the size of the dimension except `dim` is not equal between `input` and `index`.
|
|
2896
3455
|
ValueError: If the value of `dim` is not in the expected range.
|
|
2897
3456
|
|
|
2898
3457
|
Supported Platforms:
|
|
@@ -2910,7 +3469,7 @@ def gather_elements(x, dim, index):
|
|
|
2910
3469
|
[[1 1]
|
|
2911
3470
|
[4 3]]
|
|
2912
3471
|
"""
|
|
2913
|
-
return gather_d_(
|
|
3472
|
+
return gather_d_(input, dim, index)
|
|
2914
3473
|
|
|
2915
3474
|
|
|
2916
3475
|
def gather_nd(input_x, indices):
|
|
@@ -2930,7 +3489,6 @@ def gather_nd(input_x, indices):
|
|
|
2930
3489
|
|
|
2931
3490
|
Args:
|
|
2932
3491
|
input_x (Tensor): The target tensor to gather values.
|
|
2933
|
-
The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
|
|
2934
3492
|
indices (Tensor): The index tensor, with int32 or int64 data type.
|
|
2935
3493
|
|
|
2936
3494
|
Returns:
|
|
@@ -2984,6 +3542,7 @@ def tensor_scatter_add(input_x, indices, updates):
|
|
|
2984
3542
|
Raises:
|
|
2985
3543
|
TypeError: If dtype of `indices` is neither int32 nor int64.
|
|
2986
3544
|
ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.
|
|
3545
|
+
RuntimeError: If a value of `indices` is not in `input_x`.
|
|
2987
3546
|
|
|
2988
3547
|
Supported Platforms:
|
|
2989
3548
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -3036,6 +3595,7 @@ def tensor_scatter_sub(input_x, indices, updates):
|
|
|
3036
3595
|
Raises:
|
|
3037
3596
|
TypeError: If dtype of `indices` is neither int32 nor int64.
|
|
3038
3597
|
ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.
|
|
3598
|
+
RuntimeError: If a value of `indices` is not in `input_x`.
|
|
3039
3599
|
|
|
3040
3600
|
Supported Platforms:
|
|
3041
3601
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -3083,6 +3643,7 @@ def tensor_scatter_max(input_x, indices, updates):
|
|
|
3083
3643
|
Raises:
|
|
3084
3644
|
TypeError: If dtype of `indices` is neither int32 nor int64.
|
|
3085
3645
|
ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.
|
|
3646
|
+
RuntimeError: If a value of `indices` is not in `input_x`.
|
|
3086
3647
|
|
|
3087
3648
|
Supported Platforms:
|
|
3088
3649
|
``GPU`` ``CPU``
|
|
@@ -3134,6 +3695,7 @@ def tensor_scatter_min(input_x, indices, updates):
|
|
|
3134
3695
|
Raises:
|
|
3135
3696
|
TypeError: If dtype of `indices` is neither int32 nor int64.
|
|
3136
3697
|
ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.
|
|
3698
|
+
RuntimeError: If a value of `indices` is not in `input_x`.
|
|
3137
3699
|
|
|
3138
3700
|
Supported Platforms:
|
|
3139
3701
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -3186,8 +3748,7 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
|
|
|
3186
3748
|
the corresponding `updates` will not be updated to `input_x`.
|
|
3187
3749
|
|
|
3188
3750
|
Args:
|
|
3189
|
-
input_x (Tensor): The target tensor.
|
|
3190
|
-
The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
|
|
3751
|
+
input_x (Tensor): The target tensor. The rank of `input` must be at least 1.
|
|
3191
3752
|
indices (Tensor): The index to do add operation whose data type must be mindspore.int32 or
|
|
3192
3753
|
mindspore.int64. Same rank as input_x. And accepted range is [-s, s) where s is the size along axis.
|
|
3193
3754
|
updates (Tensor): The tensor doing the add operation with `input_x`, has the same type as input_x,
|
|
@@ -3204,7 +3765,7 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
|
|
|
3204
3765
|
ValueError: If the shape of `updates` is not equal to the shape of `indices`.
|
|
3205
3766
|
ValueError: If the rank of `updates` is not equal to the rank of `input_x`.
|
|
3206
3767
|
RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
|
|
3207
|
-
|
|
3768
|
+
is required when data type conversion of Parameter is not supported.
|
|
3208
3769
|
|
|
3209
3770
|
Supported Platforms:
|
|
3210
3771
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -3223,6 +3784,64 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
|
|
|
3223
3784
|
return _tensor_scatter_elements(input_x, indices, updates)
|
|
3224
3785
|
|
|
3225
3786
|
|
|
3787
|
+
def scatter(input, axis, index, src):
|
|
3788
|
+
"""
|
|
3789
|
+
Update the value in `src` to `input` according to the specified index.
|
|
3790
|
+
Refer to :func:`mindspore.ops.tensor_scatter_elements` for more details.
|
|
3791
|
+
|
|
3792
|
+
Args:
|
|
3793
|
+
input (Tensor): The target tensor. The rank of `input` must be at least 1.
|
|
3794
|
+
axis (int): Which axis to scatter. Accepted range is [-r, r) where r = rank(input).
|
|
3795
|
+
index (Tensor): The index to do update operation whose data type must be mindspore.int32 or
|
|
3796
|
+
mindspore.int64. Same rank as `input` . And accepted range is [-s, s) where s is the size along axis.
|
|
3797
|
+
src (Tensor): The tensor doing the update operation with `input` , has the same type as `input` ,
|
|
3798
|
+
and the shape of `src` should be equal to the shape of `index` .
|
|
3799
|
+
|
|
3800
|
+
Returns:
|
|
3801
|
+
Tensor, has the same shape and type as `input` .
|
|
3802
|
+
|
|
3803
|
+
Raises:
|
|
3804
|
+
TypeError: If `index` is neither int32 nor int64.
|
|
3805
|
+
ValueError: If anyone of the rank among `input` , `index` and `src` less than 1.
|
|
3806
|
+
ValueError: If the shape of `src` is not equal to the shape of `index` .
|
|
3807
|
+
ValueError: If the rank of `src` is not equal to the rank of `input` .
|
|
3808
|
+
RuntimeError: If the data type of `input` and `src` conversion of Parameter
|
|
3809
|
+
is required when data type conversion of Parameter is not supported.
|
|
3810
|
+
|
|
3811
|
+
Supported Platforms:
|
|
3812
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
3813
|
+
|
|
3814
|
+
Examples:
|
|
3815
|
+
>>> input = Tensor(np.array([[1, 2, 3, 4, 5]]), dtype=ms.float32)
|
|
3816
|
+
>>> src = Tensor(np.array([[8, 8]]), dtype=ms.float32)
|
|
3817
|
+
>>> index = Tensor(np.array([[2, 4]]), dtype=ms.int64)
|
|
3818
|
+
>>> out = ops.scatter(input=input, axis=1, index=index, src=src)
|
|
3819
|
+
>>> print(out)
|
|
3820
|
+
[[1. 2. 8. 4. 8.]]
|
|
3821
|
+
>>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
|
|
3822
|
+
>>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
|
|
3823
|
+
>>> index = Tensor(np.array([[0, 0, 0], [2, 2, 2], [4, 4, 4]]), dtype=ms.int64)
|
|
3824
|
+
>>> out = ops.scatter(input=input, axis=0, index=index, src=src)
|
|
3825
|
+
>>> print(out)
|
|
3826
|
+
[[1. 2. 3. 0. 0.]
|
|
3827
|
+
[0. 0. 0. 0. 0.]
|
|
3828
|
+
[4. 5. 6. 0. 0.]
|
|
3829
|
+
[0. 0. 0. 0. 0.]
|
|
3830
|
+
[7. 8. 9. 0. 0.]]
|
|
3831
|
+
>>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
|
|
3832
|
+
>>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
|
|
3833
|
+
>>> index = Tensor(np.array([[0, 2, 4], [0, 2, 4], [0, 2, 4]]), dtype=ms.int64)
|
|
3834
|
+
>>> out = ops.scatter(input=input, axis=1, index=index, src=src)
|
|
3835
|
+
>>> print(out)
|
|
3836
|
+
[[1. 0. 2. 0. 3.]
|
|
3837
|
+
[4. 0. 5. 0. 6.]
|
|
3838
|
+
[7. 0. 8. 0. 9.]
|
|
3839
|
+
[0. 0. 0. 0. 0.]
|
|
3840
|
+
[0. 0. 0. 0. 0.]]
|
|
3841
|
+
"""
|
|
3842
|
+
return F.tensor_scatter_elements(input_x=input, indices=index, updates=src, axis=axis)
|
|
3843
|
+
|
|
3844
|
+
|
|
3226
3845
|
def space_to_batch_nd(input_x, block_size, paddings):
|
|
3227
3846
|
r"""
|
|
3228
3847
|
Divides a tensor's spatial dimensions into blocks and combines the block sizes with the original batch.
|
|
@@ -3231,8 +3850,7 @@ def space_to_batch_nd(input_x, block_size, paddings):
|
|
|
3231
3850
|
and after division, the output tensor's spatial dimension is the corresponding number of blocks.
|
|
3232
3851
|
The output tensor's batch dimension is the product of the original batch and the product of `block_size`.
|
|
3233
3852
|
Before division, the spatial dimensions of the input are zero padded according to paddings if necessary.
|
|
3234
|
-
Assume input shape is :math:`(n, c_1, ... c_k, w_1, ..., w_M)
|
|
3235
|
-
:math:`block\_size` and :math:`paddings`, then the shape of the output tensor will be
|
|
3853
|
+
Assume input shape is :math:`(n, c_1, ... c_k, w_1, ..., w_M)`, then the shape of the output tensor will be
|
|
3236
3854
|
:math:`(n', c_1, ... c_k, w'_1, ..., w'_M)`, where
|
|
3237
3855
|
|
|
3238
3856
|
.. math::
|
|
@@ -3294,11 +3912,13 @@ def batch_to_space_nd(input_x, block_shape, crops):
|
|
|
3294
3912
|
respectively.
|
|
3295
3913
|
|
|
3296
3914
|
If the input shape is :math:`(n, c_1, ... c_k, w_1, ..., w_M)`, the output shape is
|
|
3297
|
-
:math:`(n, c_1, ... c_k,
|
|
3298
|
-
|
|
3299
|
-
:math:`n' = n//(block\_shape[0]*...*block\_shape[M-1])`
|
|
3915
|
+
:math:`(n', c_1, ... c_k, w'_1, ..., w'_M)`, where
|
|
3300
3916
|
|
|
3301
|
-
|
|
3917
|
+
.. math::
|
|
3918
|
+
\begin{array}{ll} \\
|
|
3919
|
+
n' = n//(block\_shape[0]*...*block\_shape[M-1]) \\
|
|
3920
|
+
w'_i = w_i*block\_shape[i-1]-crops[i-1][0]-crops[i-1][1]
|
|
3921
|
+
\end{array}
|
|
3302
3922
|
|
|
3303
3923
|
Args:
|
|
3304
3924
|
input_x (Tensor): The input tensor. It must be greater or equal to 2-D tensor(equal to 4-D tensor on Ascend),
|
|
@@ -3311,17 +3931,10 @@ def batch_to_space_nd(input_x, block_shape, crops):
|
|
|
3311
3931
|
Each contains 2 integer values. All values must be >= 0. crops[i] specifies the crops values for spatial
|
|
3312
3932
|
dimension i, which corresponds to input dimension i + offset,where offset = N-M, and N is the number of
|
|
3313
3933
|
input dimensions. It is required that
|
|
3314
|
-
|
|
3315
3934
|
:math:`input\_shape[i+offset]*block\_shape[i] > crops[i][0]+crops[i][1]`
|
|
3316
3935
|
|
|
3317
3936
|
Returns:
|
|
3318
|
-
Tensor, the output tensor with the same type as input.
|
|
3319
|
-
:math:`(n, c_1, ... c_k, w_1, ..., w_M)` with block_shape and crops. The output shape will be
|
|
3320
|
-
:math:`(n', c_1, ... c_k, w'_1, ..., w'_M)`, where
|
|
3321
|
-
|
|
3322
|
-
:math:`n' = n//(block\_shape[0]*...*block\_shape[M-1])`
|
|
3323
|
-
|
|
3324
|
-
:math:`w'_i = w_i*block\_shape[i-1]-crops[i-1][0]-crops[i-1][1]`
|
|
3937
|
+
Tensor, the output tensor with the same type as input.
|
|
3325
3938
|
|
|
3326
3939
|
Raises:
|
|
3327
3940
|
TypeError: If `block_shape` is not one of list, tuple, int.
|
|
@@ -3351,19 +3964,19 @@ def batch_to_space_nd(input_x, block_shape, crops):
|
|
|
3351
3964
|
return _batch_to_space_nd(input_x)
|
|
3352
3965
|
|
|
3353
3966
|
|
|
3354
|
-
def nonzero(
|
|
3967
|
+
def nonzero(input):
|
|
3355
3968
|
"""
|
|
3356
3969
|
Return a Tensor of the positions of all non-zero values.
|
|
3357
3970
|
|
|
3358
3971
|
Args:
|
|
3359
|
-
|
|
3972
|
+
input (Tensor): The shape of Tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is int, float or bool.
|
|
3360
3973
|
|
|
3361
3974
|
Returns:
|
|
3362
3975
|
Tensor, a 2-D Tensor whose data type is int64, containing the positions of all non-zero values of the input.
|
|
3363
3976
|
|
|
3364
3977
|
Raises:
|
|
3365
|
-
TypeError: If `
|
|
3366
|
-
ValueError: If
|
|
3978
|
+
TypeError: If `input` is not Tensor.
|
|
3979
|
+
ValueError: If dim of `x` equals to 0.
|
|
3367
3980
|
|
|
3368
3981
|
Supported Platforms:
|
|
3369
3982
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -3378,8 +3991,14 @@ def nonzero(x):
|
|
|
3378
3991
|
>>> print(output)
|
|
3379
3992
|
[[0 0 0]
|
|
3380
3993
|
[0 1 0]]
|
|
3994
|
+
>>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
|
|
3995
|
+
>>> output = ops.nonzero(x)
|
|
3996
|
+
>>> print(output)
|
|
3997
|
+
[[0]
|
|
3998
|
+
[2]
|
|
3999
|
+
[4]]
|
|
3381
4000
|
"""
|
|
3382
|
-
return nonzero_(
|
|
4001
|
+
return nonzero_(input)
|
|
3383
4002
|
|
|
3384
4003
|
|
|
3385
4004
|
def matrix_diag(x, k=0, num_rows=-1, num_cols=-1, padding_value=0, align="RIGHT_LEFT"):
|
|
@@ -3394,34 +4013,44 @@ def matrix_diag(x, k=0, num_rows=-1, num_cols=-1, padding_value=0, align="RIGHT_
|
|
|
3394
4013
|
|
|
3395
4014
|
Args:
|
|
3396
4015
|
x (Tensor): The diagonal Tensor.
|
|
3397
|
-
k (Union[int, Tensor], optional): A Tensor of type int32.
|
|
4016
|
+
k (Union[int, Tensor], optional): Diagonal offsets. A Tensor of type int32. Positive value means superdiagonal,
|
|
3398
4017
|
0 refers to the main diagonal, and negative value means subdiagonals. `k` can be a single integer
|
|
3399
4018
|
(for a single diagonal) or a pair of integers specifying the low and high ends of a matrix band.
|
|
3400
4019
|
k[0] must not be larger than k[1]. The value must be in the range of given or derivated `num_rows`
|
|
3401
4020
|
and `num_cols`, meaning value of k must be in (-num_rows, num_cols). Default: 0.
|
|
3402
|
-
num_rows (Union[int, Tensor], optional):
|
|
3403
|
-
|
|
4021
|
+
num_rows (Union[int, Tensor], optional): The number of rows of the output Tensor. A Tensor of type int32 with
|
|
4022
|
+
only one value. If `num_rows` is -1, indicating that the innermost matrix of the output Tensor is a square
|
|
3404
4023
|
matrix, and the real number of rows will be derivated by other inputs. That is
|
|
3405
4024
|
:math:`num\_rows = x.shape[-1] - min(k[1], 0)`. Otherwise, the value must be equal or greater than
|
|
3406
4025
|
:math:`x.shape[-1] - min(k[1], 0)`. Default: -1.
|
|
3407
|
-
num_cols (Union[int, Tensor], optional):
|
|
3408
|
-
|
|
3409
|
-
|
|
4026
|
+
num_cols (Union[int, Tensor], optional): The number of columns of
|
|
4027
|
+
the output Tensor. A Tensor of type int32 with only one value.
|
|
4028
|
+
If `num_cols` is -1, indicating that the innermost matrix of the output
|
|
3410
4029
|
Tensor is a square matrix, and the real number of columns will be derivated by other inputs.
|
|
3411
4030
|
That is :math:`num\_cols = x.shape[-1] + max(k[0], 0)`. Otherwise, the value must be equal or
|
|
3412
4031
|
greater than :math:`x.shape[-1] - min(k[1], 0)`. Default: -1.
|
|
3413
|
-
padding_value (Union[int, float, Tensor], optional):
|
|
3414
|
-
|
|
3415
|
-
align (str, optional):
|
|
3416
|
-
"
|
|
3417
|
-
|
|
3418
|
-
|
|
4032
|
+
padding_value (Union[int, float, Tensor], optional): The number to fill the area outside the specified
|
|
4033
|
+
diagonal band. A Tensor with only one value. Have the same dtype as x. Default: 0.
|
|
4034
|
+
align (str, optional): specifies how superdiagonals and subdiagonals should be aligned.
|
|
4035
|
+
Supported values:"RIGHT_LEFT", "LEFT_RIGHT", "LEFT_LEFT", "RIGHT_RIGHT".
|
|
4036
|
+
Default: "RIGHT_LEFT".
|
|
4037
|
+
|
|
4038
|
+
- When set to "RIGHT_LEFT", the alignment of superdiagonals will be towards the right side
|
|
4039
|
+
(padding the row on the left), while subdiagonals will be towards the left side
|
|
4040
|
+
(padding the row on the right)
|
|
4041
|
+
- When set to "LEFT_RIGHT", the alignment of superdiagonals will be towards the left side
|
|
4042
|
+
(padding the row on the right), while subdiagonals will be towards the right side
|
|
4043
|
+
(padding the row on the left)
|
|
4044
|
+
- When set to "LEFT_LEFT", the alignment of both superdiagonals and subdiagonals will be towards
|
|
4045
|
+
the left side(padding the row on the right).
|
|
4046
|
+
- When set to "RIGHT_RIGHT", the alignment of both superdiagonals and subdiagonals will be towards
|
|
4047
|
+
the right side(padding the row on the left).
|
|
3419
4048
|
|
|
3420
4049
|
Returns:
|
|
3421
4050
|
A Tensor. Has the same type as `x`.
|
|
3422
|
-
Suppose `x` has r dimensions with shape
|
|
3423
|
-
|
|
3424
|
-
Otherwise, it has rank r with shape
|
|
4051
|
+
Suppose `x` has r dimensions with shape :math:`(I, J, ..., M, N)` . The output Tensor has rank r + 1 with shape
|
|
4052
|
+
:math:`(I, J, ..., M, num_rows, num_cols)` when only one diagonal is given (k is an integer or k[0] == k[1]).
|
|
4053
|
+
Otherwise, it has rank r with shape :math:`(I, J, ..., num_rows, num_cols)` .
|
|
3425
4054
|
|
|
3426
4055
|
Raises:
|
|
3427
4056
|
TypeError: If `x` is not Tensor.
|
|
@@ -3495,11 +4124,11 @@ def matrix_diag_part(x, k=0, padding_value=0, align="RIGHT_LEFT"):
|
|
|
3495
4124
|
|
|
3496
4125
|
Returns:
|
|
3497
4126
|
A Tensor. Has the same type as `x`.
|
|
3498
|
-
Assume `x` has r dimensions :math:`
|
|
4127
|
+
Assume `x` has r dimensions :math:`(I, J, ..., L, M, N)` . Let `max_diag_len` be the maximum length among all
|
|
3499
4128
|
diagonals to be extracted, :math:`max\_diag\_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
|
|
3500
4129
|
Let `num_diags` be the number of diagonals to extract, :math:`num\_diags = k[1] - k[0] + 1`.
|
|
3501
|
-
If :math:`num\_diags == 1`, the output tensor is of rank r - 1 with shape :math:`
|
|
3502
|
-
Otherwise, the output tensor has rank r with dimensions :math:`
|
|
4130
|
+
If :math:`num\_diags == 1`, the output tensor is of rank r - 1 with shape :math:`(I, J, ..., L, max\_diag\_len)`
|
|
4131
|
+
Otherwise, the output tensor has rank r with dimensions :math:`(I, J, ..., L, num\_diags, max\_diag\_len)` .
|
|
3503
4132
|
|
|
3504
4133
|
Raises:
|
|
3505
4134
|
TypeError: If `x` is not Tensor.
|
|
@@ -3534,7 +4163,7 @@ def matrix_diag_part(x, k=0, padding_value=0, align="RIGHT_LEFT"):
|
|
|
3534
4163
|
return matrix_diag_part_v3(x, k, padding_value)
|
|
3535
4164
|
|
|
3536
4165
|
|
|
3537
|
-
def matrix_set_diag(x, diagonal, k=0, align="RIGHT_LEFT"):
|
|
4166
|
+
def matrix_set_diag(x, diagonal, k=0, align="RIGHT_LEFT"): # pylint: disable=redefined-outer-name
|
|
3538
4167
|
r"""
|
|
3539
4168
|
Returns a batched matrix tensor with new batched diagonal values.
|
|
3540
4169
|
Given x and diagonal, this operation returns a tensor with the same shape and values as x, except for the specified
|
|
@@ -3544,20 +4173,20 @@ def matrix_set_diag(x, diagonal, k=0, align="RIGHT_LEFT"):
|
|
|
3544
4173
|
The diagonal :math:`shape[-1]` must be
|
|
3545
4174
|
equal to the longest diagonal value max_diag_len calculated
|
|
3546
4175
|
by :math:`min(x.shape[-2] + min(k[1], 0), x.shape[-1] + min(-k[0], 0))`.
|
|
3547
|
-
Let x have r + 1 dimensions :math:`
|
|
3548
|
-
The diagonal tensor has rank r with shape :math:`
|
|
4176
|
+
Let x have r + 1 dimensions :math:`(I, J, ..., L, M, N)` .
|
|
4177
|
+
The diagonal tensor has rank r with shape :math:`(I, J, ..., L, max\_diag\_len)`
|
|
3549
4178
|
when k is an integer or :math:`k[0] == k[1]`. Otherwise, it has rank r + 1
|
|
3550
|
-
with shape :math:`
|
|
4179
|
+
with shape :math:`(I, J, ... L, num\_diags, max\_diag\_len)` .
|
|
3551
4180
|
|
|
3552
4181
|
Args:
|
|
3553
4182
|
x (Tensor): Rank r + 1, where r >= 1.
|
|
3554
|
-
diagonal (Tensor): A Tensor. Have the same dtype as x. Rank r when k is an integer or k[0] == k[1]
|
|
4183
|
+
diagonal (Tensor): A Tensor. Have the same dtype as x. Rank r when k is an integer or :math:`k[0] == k[1]`.
|
|
3555
4184
|
Otherwise, it has rank r + 1.
|
|
3556
4185
|
k (Union[int, Tensor], optional): A int32 Scalar or int32 Tensor. Diagonal offset(s). Positive value means
|
|
3557
4186
|
superdiagonal, 0 refers to the main diagonal, and negative value means subdiagonals. k can be a
|
|
3558
4187
|
single integer (for a single diagonal) or a pair of integers specifying the low and high ends of
|
|
3559
4188
|
a matrix band. k[0] must not be larger than k[1].
|
|
3560
|
-
The alue of k has restructions, meaning value of k must be in (-x.shape[-2], x.shape[-1])
|
|
4189
|
+
The alue of k has restructions, meaning value of k must be in :math:`(-x.shape[-2], x.shape[-1])`.
|
|
3561
4190
|
Input k must be const Tensor when taking Graph mode.
|
|
3562
4191
|
align (str, optional): An optional string from: "RIGHT_LEFT"(default), "LEFT_RIGHT", "LEFT_LEFT",
|
|
3563
4192
|
"RIGHT_RIGHT". Align is a string specifying how superdiagonals and subdiagonals should be aligned,
|
|
@@ -3565,8 +4194,8 @@ def matrix_set_diag(x, diagonal, k=0, align="RIGHT_LEFT"):
|
|
|
3565
4194
|
to the left (right-pads the row).
|
|
3566
4195
|
|
|
3567
4196
|
Returns:
|
|
3568
|
-
Tensor, The same type as x. Let x has r+1 dimensions
|
|
3569
|
-
The output is a tensor of rank r+1 with dimensions
|
|
4197
|
+
Tensor, The same type as x. Let x has r+1 dimensions :math:`(I, J, ..., L, M, N)` .
|
|
4198
|
+
The output is a tensor of rank r+1 with dimensions :math:`(I, J, ..., L, M, N)` , the same as input x.
|
|
3570
4199
|
|
|
3571
4200
|
Raises:
|
|
3572
4201
|
TypeError: If input `x` or `diagonal` is not Tensor.
|
|
@@ -3579,13 +4208,14 @@ def matrix_set_diag(x, diagonal, k=0, align="RIGHT_LEFT"):
|
|
|
3579
4208
|
ValueError: If k[1] is not greater equal to k[0] in case the size of `k` is 2.
|
|
3580
4209
|
ValueError: If the `diagonal` rank size don't match with input `x` rank size.
|
|
3581
4210
|
ValueError: If the `diagonal` shape value don't match with input `x` shape value.
|
|
3582
|
-
ValueError: If the diagonal
|
|
3583
|
-
ValueError: If the value of `k` is not in (-x.shape[-2], x.shape[-1])
|
|
3584
|
-
ValueError: If the diagonal.shape[-1] is not equal to the max_diag_len calculated by
|
|
3585
|
-
|
|
4211
|
+
ValueError: If the diagonal :math:`shape[-2]` is not equal to num_diags calculated by :math:`k[1]-k[0]+1`.
|
|
4212
|
+
ValueError: If the value of `k` is not in :math:`(-x.shape[-2], x.shape[-1])`.
|
|
4213
|
+
ValueError: If the diagonal.shape[-1] is not equal to the max_diag_len calculated by
|
|
4214
|
+
:math:`min(x.shape[-2] + min(k[1],
|
|
4215
|
+
0), x.shape[-1] + min(-k[0], 0))`.
|
|
3586
4216
|
|
|
3587
4217
|
Supported Platforms:
|
|
3588
|
-
``GPU`` ``CPU``
|
|
4218
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
3589
4219
|
|
|
3590
4220
|
Examples:
|
|
3591
4221
|
>>> x = Tensor(np.array([[7, 7, 7, 7],
|
|
@@ -3611,7 +4241,7 @@ def matrix_set_diag(x, diagonal, k=0, align="RIGHT_LEFT"):
|
|
|
3611
4241
|
return matrix_set_diag_v3_op(x, diagonal, k)
|
|
3612
4242
|
|
|
3613
4243
|
|
|
3614
|
-
def meshgrid(inputs, indexing='xy'):
|
|
4244
|
+
def meshgrid(*inputs, indexing='xy'):
|
|
3615
4245
|
"""
|
|
3616
4246
|
Generates coordinate matrices from given coordinate tensors.
|
|
3617
4247
|
|
|
@@ -3619,14 +4249,16 @@ def meshgrid(inputs, indexing='xy'):
|
|
|
3619
4249
|
coordinate tensors for evaluating expressions on an N-D grid.
|
|
3620
4250
|
|
|
3621
4251
|
Args:
|
|
3622
|
-
inputs (
|
|
3623
|
-
The length of
|
|
3624
|
-
|
|
3625
|
-
|
|
3626
|
-
|
|
3627
|
-
|
|
4252
|
+
inputs (List[Tensor]): List of 1-D tensors.
|
|
4253
|
+
The length of inputs should be greater than 1. The data type is Number.
|
|
4254
|
+
|
|
4255
|
+
Keyword Args:
|
|
4256
|
+
indexing (str, optional): Cartesian ('xy', default) or
|
|
4257
|
+
matrix ('ij') indexing of output. Valid options: xy' or 'ij'. In the 2-D case with
|
|
4258
|
+
inputs of length `M` and `N`, the outputs are of shape :math:`(N, M)`
|
|
4259
|
+
for 'xy' indexing and :math:`(M, N)` for 'ij' indexing. In the 3-D
|
|
3628
4260
|
case with inputs of length `M`, `N` and `P`, outputs are of shape
|
|
3629
|
-
|
|
4261
|
+
:math:`(N, M, P)` for 'xy' indexing and :math:`(M, N, P)` for 'ij' indexing. Default: 'xy'.
|
|
3630
4262
|
|
|
3631
4263
|
Returns:
|
|
3632
4264
|
Tensors, a Tuple of N N-D Tensor objects. The data type is the same with the Inputs.
|
|
@@ -3636,7 +4268,7 @@ def meshgrid(inputs, indexing='xy'):
|
|
|
3636
4268
|
ValueError: If `indexing` is neither 'xy' nor 'ij'.
|
|
3637
4269
|
|
|
3638
4270
|
Supported Platforms:
|
|
3639
|
-
``Ascend`` ``
|
|
4271
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
3640
4272
|
|
|
3641
4273
|
Examples:
|
|
3642
4274
|
>>> import numpy as np
|
|
@@ -3645,8 +4277,7 @@ def meshgrid(inputs, indexing='xy'):
|
|
|
3645
4277
|
>>> x = Tensor(np.array([1, 2, 3, 4]).astype(np.int32))
|
|
3646
4278
|
>>> y = Tensor(np.array([5, 6, 7]).astype(np.int32))
|
|
3647
4279
|
>>> z = Tensor(np.array([8, 9, 0, 1, 2]).astype(np.int32))
|
|
3648
|
-
>>>
|
|
3649
|
-
>>> output = ops.meshgrid(inputs, indexing='xy')
|
|
4280
|
+
>>> output = ops.meshgrid(x, y, z, indexing='xy')
|
|
3650
4281
|
>>> print(output)
|
|
3651
4282
|
(Tensor(shape=[3, 4, 5], dtype=Int32, value=
|
|
3652
4283
|
[[[1, 1, 1, 1, 1],
|
|
@@ -3692,33 +4323,34 @@ def meshgrid(inputs, indexing='xy'):
|
|
|
3692
4323
|
return meshgrid_op(inputs)
|
|
3693
4324
|
|
|
3694
4325
|
|
|
3695
|
-
def affine_grid(theta,
|
|
4326
|
+
def affine_grid(theta, size, align_corners=False):
|
|
3696
4327
|
r"""
|
|
3697
|
-
|
|
4328
|
+
Returns a 2D or 3D flow field (sampling grid) based on `theta`, a batch of affine matrices.
|
|
3698
4329
|
|
|
3699
4330
|
Args:
|
|
3700
4331
|
theta (Tensor): The input tensor of flow field whose dtype is float16, float32.
|
|
3701
|
-
Input batch of affine matrices with shape
|
|
3702
|
-
|
|
3703
|
-
The value of target output with format
|
|
4332
|
+
Input batch of affine matrices with shape :math:`(N, 2, 3)` for 2D grid or :math:`(N, 3, 4)` for 3D grid.
|
|
4333
|
+
size (tuple[int]): The target output image size.
|
|
4334
|
+
The value of target output with format :math:`(N, C, H, W)` for 2D grid or :math:`(N, C, D, H, W)` for 3D
|
|
4335
|
+
grid.
|
|
3704
4336
|
align_corners (bool, optional): Geometrically, each pixel of input is viewed as a squqre instead of dot.
|
|
3705
4337
|
If True, consider extremum -1 and 1 referring to the centers of the pixels rather than pixel corners.
|
|
3706
4338
|
The default value is False, extremum -1 and 1 refer to the corners of the pixels, so that sampling is
|
|
3707
|
-
irrelevant to resolution of the image.
|
|
4339
|
+
irrelevant to resolution of the image. Default: False.
|
|
3708
4340
|
Returns:
|
|
3709
|
-
Tensor, a tensor whose data type is same as 'theta', and the shape is
|
|
3710
|
-
or
|
|
4341
|
+
Tensor, a tensor whose data type is same as 'theta', and the shape is :math:`(N, H, W, 2)` for 2D grid
|
|
4342
|
+
or :math:`(N, D, H, W, 3)` for 3D grid.
|
|
3711
4343
|
|
|
3712
4344
|
Raises:
|
|
3713
|
-
TypeError: If `theta` is not a Tensor or `
|
|
3714
|
-
ValueError: If the shape of `theta` is not
|
|
3715
|
-
ValueError: If the size of `
|
|
3716
|
-
ValueError: If the shape of `theta` is
|
|
3717
|
-
If the shape of `theta` is
|
|
3718
|
-
ValueError: If the
|
|
4345
|
+
TypeError: If `theta` is not a Tensor or `size` is not a tuple.
|
|
4346
|
+
ValueError: If the shape of `theta` is not :math:`(N, 2, 3)` or :math:`(N, 3, 4)`.
|
|
4347
|
+
ValueError: If the size of `size` is not 4 or 5.
|
|
4348
|
+
ValueError: If the shape of `theta` is :math:`(N, 2, 3)`, the size of `size` is not 4;
|
|
4349
|
+
If the shape of `theta` is :math:`(N, 3, 4)`, the size of `size` is not 5.
|
|
4350
|
+
ValueError: If the size[0] is not equal to the shape[0] of theta.
|
|
3719
4351
|
|
|
3720
4352
|
Supported Platforms:
|
|
3721
|
-
``GPU``
|
|
4353
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
3722
4354
|
|
|
3723
4355
|
Examples:
|
|
3724
4356
|
>>> import mindspore
|
|
@@ -3736,10 +4368,10 @@ def affine_grid(theta, output_size, align_corners=False):
|
|
|
3736
4368
|
[ 0.78333336 0.06666666]]]]
|
|
3737
4369
|
"""
|
|
3738
4370
|
affine_grid_op = AffineGrid(align_corners)
|
|
3739
|
-
return affine_grid_op(theta,
|
|
4371
|
+
return affine_grid_op(theta, size)
|
|
3740
4372
|
|
|
3741
4373
|
|
|
3742
|
-
def broadcast_to(
|
|
4374
|
+
def broadcast_to(input, shape): # pylint: disable=redefined-outer-name
|
|
3743
4375
|
"""
|
|
3744
4376
|
Broadcasts input tensor to a given shape. The dim of input shape must be smaller
|
|
3745
4377
|
than or equal to that of target shape. Suppose input shape is :math:`(x_1, x_2, ..., x_m)`,
|
|
@@ -3757,6 +4389,8 @@ def broadcast_to(x, shape):
|
|
|
3757
4389
|
|
|
3758
4390
|
Case 1: If the value of the target shape in the dimension is -1, the value of the
|
|
3759
4391
|
output shape in the dimension is the value of the corresponding input shape in the dimension.
|
|
4392
|
+
With an input shape :math:`(3, 3)`, target
|
|
4393
|
+
shape :math:`(-1, 3)`, the output shape is :math:`(3, 3)`.
|
|
3760
4394
|
|
|
3761
4395
|
Case 2: If the value of target shape in the dimension is not -1, but the corresponding
|
|
3762
4396
|
value in the input shape is 1, then the corresponding value of the output shape
|
|
@@ -3775,18 +4409,17 @@ def broadcast_to(x, shape):
|
|
|
3775
4409
|
input shape :math:`(1, 5, 9)`, the filled input shape will be :math:`(1, 1, 1, 1, 5, 9)` and thus the
|
|
3776
4410
|
output shape is :math:`(3, 1, 4, 1, 5, 9)`.
|
|
3777
4411
|
|
|
3778
|
-
If the first :math:`*` dims of output shape have -1 in it, it implies this -1 is
|
|
4412
|
+
If the first :math:`*` dims of output shape have -1 in it, it implies this -1 is corresponding to
|
|
3779
4413
|
a non-existing dim so they're not broadcastable. With target shape :math:`(3, -1, 4, 1, 5, 9)`,
|
|
3780
4414
|
input shape :math:`(1, 5, 9)`, instead of operating the dim-filling process first, it raises errors directly.
|
|
3781
4415
|
|
|
3782
4416
|
Args:
|
|
3783
|
-
|
|
3784
|
-
The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
|
|
4417
|
+
input (Tensor): The input Tensor. Supported types are: float16, float32, int32, int8, uint8, bool.
|
|
3785
4418
|
shape (tuple): The target shape to broadcast. Can be fully specified, or have -1 in one position
|
|
3786
4419
|
where it will be substituted by the input tensor's shape in that position, see example.
|
|
3787
4420
|
|
|
3788
4421
|
Returns:
|
|
3789
|
-
Tensor, with the given `shape` and the same data type as `
|
|
4422
|
+
Tensor, with the given `shape` and the same data type as `input`.
|
|
3790
4423
|
|
|
3791
4424
|
Raises:
|
|
3792
4425
|
TypeError: If `shape` is not a tuple.
|
|
@@ -3810,8 +4443,11 @@ def broadcast_to(x, shape):
|
|
|
3810
4443
|
[[1. 1.]
|
|
3811
4444
|
[2. 2.]]
|
|
3812
4445
|
"""
|
|
4446
|
+
if isinstance(shape, Tensor) or F.is_sequence_value_unknown(shape):
|
|
4447
|
+
_dyn_broadcast_to = _get_cache_prim(DynamicBroadcastTo)()
|
|
4448
|
+
return _dyn_broadcast_to(input, shape)
|
|
3813
4449
|
_broadcast_to = _get_cache_prim(P.BroadcastTo)(shape)
|
|
3814
|
-
return _broadcast_to(
|
|
4450
|
+
return _broadcast_to(input)
|
|
3815
4451
|
|
|
3816
4452
|
|
|
3817
4453
|
def unsorted_segment_min(x, segment_ids, num_segments):
|
|
@@ -3960,14 +4596,14 @@ def unsorted_segment_prod(x, segment_ids, num_segments):
|
|
|
3960
4596
|
return unsorted_segment_prod_(x, segment_ids, num_segments)
|
|
3961
4597
|
|
|
3962
4598
|
|
|
3963
|
-
def index_fill(x,
|
|
4599
|
+
def index_fill(x, axis, index, value):
|
|
3964
4600
|
"""
|
|
3965
|
-
Fills the elements under the `
|
|
4601
|
+
Fills the elements under the `axis` dimension of the input Tensor `x` with the input `value`
|
|
3966
4602
|
by selecting the indices in the order given in `index`.
|
|
3967
4603
|
|
|
3968
4604
|
Args:
|
|
3969
4605
|
x (Tensor): Input Tensor. The supported data type is Number or Bool.
|
|
3970
|
-
|
|
4606
|
+
axis (Union[int, Tensor]): Dimension along which to fill the input Tensor. Only supports
|
|
3971
4607
|
an int number or a 0-dimensional Tensor, whose data type is int32 or int64.
|
|
3972
4608
|
index (Tensor): Indices of the input Tensor to fill in. The dtype must be int32.
|
|
3973
4609
|
value (Union[bool, int, float, Tensor]): Value to fill the returned Tensor. If `value` is
|
|
@@ -3979,20 +4615,20 @@ def index_fill(x, dim, index, value):
|
|
|
3979
4615
|
|
|
3980
4616
|
Raises:
|
|
3981
4617
|
TypeError: If `x` is not a Tensor.
|
|
3982
|
-
TypeError: If `
|
|
3983
|
-
TypeError: When `
|
|
4618
|
+
TypeError: If `axis` is neither int number nor Tensor.
|
|
4619
|
+
TypeError: When `axis` is a Tensor, its dtype is not int32 or int64.
|
|
3984
4620
|
TypeError: If `index` is not a Tensor.
|
|
3985
4621
|
TypeError: If dtype of `index` is not int32.
|
|
3986
4622
|
TypeError: If `value` is not a bool, int, float, or Tensor.
|
|
3987
4623
|
TypeError: When `value` is a Tensor, the dtype of `x` and `value` are not the same.
|
|
3988
|
-
ValueError: If `
|
|
4624
|
+
ValueError: If `axis` is a Tensor and its rank is not equal to 0.
|
|
3989
4625
|
ValueError: If the rank of `index` is greater than 1D.
|
|
3990
4626
|
ValueError: When `value` is a Tensor and its rank is not equal to 0.
|
|
3991
|
-
RuntimeError: If the value of `
|
|
3992
|
-
RuntimeError: If the values of `index` are out the range of `[-x.shape[
|
|
4627
|
+
RuntimeError: If the value of `axis` is out the range of `[-x.ndim, x.ndim - 1]`.
|
|
4628
|
+
RuntimeError: If the values of `index` are out the range of `[-x.shape[axis], x.shape[axis]-1]`.
|
|
3993
4629
|
|
|
3994
4630
|
Supported Platforms:
|
|
3995
|
-
``GPU``
|
|
4631
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
3996
4632
|
|
|
3997
4633
|
Examples:
|
|
3998
4634
|
>>> import mindspore
|
|
@@ -4008,11 +4644,69 @@ def index_fill(x, dim, index, value):
|
|
|
4008
4644
|
[-2. 5. -2.]
|
|
4009
4645
|
[-2. 8. -2.]]
|
|
4010
4646
|
"""
|
|
4011
|
-
if isinstance(
|
|
4012
|
-
|
|
4647
|
+
if isinstance(axis, int) and not isinstance(axis, bool):
|
|
4648
|
+
axis = cast_(axis, mstype.int32)
|
|
4013
4649
|
if isinstance(value, (bool, float, int)):
|
|
4014
4650
|
value = cast_(value, x.dtype)
|
|
4015
|
-
return index_fill_(x,
|
|
4651
|
+
return index_fill_(x, axis, index, value)
|
|
4652
|
+
|
|
4653
|
+
|
|
4654
|
+
@constexpr
|
|
4655
|
+
def _check_check_axis_in_range(axis, ndim):
|
|
4656
|
+
"""Checks axes are with the bounds of ndim"""
|
|
4657
|
+
axis = validator.check_axis_in_range(axis, ndim)
|
|
4658
|
+
return axis
|
|
4659
|
+
|
|
4660
|
+
|
|
4661
|
+
def index_select(input, axis, index):
|
|
4662
|
+
"""
|
|
4663
|
+
Generates a new Tensor that accesses the values of `input` along the specified `axis` dimension
|
|
4664
|
+
using the indices specified in `index`. The new Tensor has the same number of dimensions as `input`,
|
|
4665
|
+
with the size of the `axis` dimension being equal to the length of `index`, and the size of all other
|
|
4666
|
+
dimensions will be unchanged from the original `input` Tensor.
|
|
4667
|
+
|
|
4668
|
+
.. note::
|
|
4669
|
+
The value of index must be in the range of `[0, input.shape[axis])`, the result is undefined out of range.
|
|
4670
|
+
|
|
4671
|
+
Args:
|
|
4672
|
+
input (Tensor): The input Tensor.
|
|
4673
|
+
axis (int): The dimension to be indexed.
|
|
4674
|
+
index (Tensor): A 1-D Tensor with the indices to access in `input` along the specified axis.
|
|
4675
|
+
|
|
4676
|
+
Returns:
|
|
4677
|
+
Tensor, has the same dtype as input Tensor.
|
|
4678
|
+
|
|
4679
|
+
Raises:
|
|
4680
|
+
TypeError: If `input` or `index` is not a Tensor.
|
|
4681
|
+
TypeError: If `axis` is not int number.
|
|
4682
|
+
ValueError: If the value of `axis` is out the range of `[-input.ndim, input.ndim - 1]`.
|
|
4683
|
+
ValueError: If the dimension of `index` is not equal to 1.
|
|
4684
|
+
|
|
4685
|
+
Supported Platforms:
|
|
4686
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
4687
|
+
|
|
4688
|
+
Examples:
|
|
4689
|
+
>>> import mindspore
|
|
4690
|
+
>>> from mindspore import Tensor, ops
|
|
4691
|
+
>>> import numpy as np
|
|
4692
|
+
>>> input = Tensor(np.arange(16).astype(np.float32).reshape(2, 2, 4))
|
|
4693
|
+
>>> print(input)
|
|
4694
|
+
[[[ 0. 1. 2. 3.]
|
|
4695
|
+
[ 4. 5. 6. 7.]]
|
|
4696
|
+
[[ 8. 9. 10. 11.]
|
|
4697
|
+
[12. 13. 14. 15.]]]
|
|
4698
|
+
>>> index = Tensor([0,], mindspore.int32)
|
|
4699
|
+
>>> y = ops.index_select(input, 1, index)
|
|
4700
|
+
>>> print(y)
|
|
4701
|
+
[[[ 0. 1. 2. 3.]]
|
|
4702
|
+
[[ 8. 9. 10. 11.]]]
|
|
4703
|
+
"""
|
|
4704
|
+
if not (isinstance(input, Tensor) and isinstance(index, Tensor)):
|
|
4705
|
+
raise TypeError(f"For 'index_select', `input` and `index` must be all tensors.")
|
|
4706
|
+
if index.ndim != 1:
|
|
4707
|
+
raise ValueError(f"For 'index_select', the dimension of `index` must be 1, but got {index.ndim}")
|
|
4708
|
+
axis = _check_check_axis_in_range(axis, input.ndim)
|
|
4709
|
+
return gather_(input, index, axis)
|
|
4016
4710
|
|
|
4017
4711
|
|
|
4018
4712
|
def population_count(input_x):
|
|
@@ -4049,6 +4743,28 @@ def population_count(input_x):
|
|
|
4049
4743
|
##############################
|
|
4050
4744
|
|
|
4051
4745
|
|
|
4746
|
+
def is_tensor(obj):
|
|
4747
|
+
r"""
|
|
4748
|
+
Check whether the input object is a :class:`mindspore.Tensor` .
|
|
4749
|
+
|
|
4750
|
+
Args:
|
|
4751
|
+
obj (Object): input object.
|
|
4752
|
+
|
|
4753
|
+
Returns:
|
|
4754
|
+
Bool. Return True if `obj` is a Tensor, otherwise, return False.
|
|
4755
|
+
|
|
4756
|
+
Supported Platforms:
|
|
4757
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
4758
|
+
|
|
4759
|
+
Examples:
|
|
4760
|
+
>>> from mindspore import Tensor, ops
|
|
4761
|
+
>>> a = Tensor([1.9, 2.2, 3.1])
|
|
4762
|
+
>>> ops.is_tensor(a)
|
|
4763
|
+
True
|
|
4764
|
+
"""
|
|
4765
|
+
return isinstance(obj, Tensor)
|
|
4766
|
+
|
|
4767
|
+
|
|
4052
4768
|
def scalar_cast(input_x, input_y):
|
|
4053
4769
|
"""
|
|
4054
4770
|
Casts the input scalar to another type.
|
|
@@ -4101,6 +4817,7 @@ def tensor_scatter_mul(input_x, indices, updates):
|
|
|
4101
4817
|
Raises:
|
|
4102
4818
|
TypeError: If dtype of `indices` is neither int32 nor int64.
|
|
4103
4819
|
ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.
|
|
4820
|
+
RuntimeError: If a value of `indices` is not in `input_x`.
|
|
4104
4821
|
|
|
4105
4822
|
Supported Platforms:
|
|
4106
4823
|
``GPU`` ``CPU``
|
|
@@ -4156,6 +4873,7 @@ def tensor_scatter_div(input_x, indices, updates):
|
|
|
4156
4873
|
Raises:
|
|
4157
4874
|
TypeError: If dtype of `indices` is neither int32 nor int64.
|
|
4158
4875
|
ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.
|
|
4876
|
+
RuntimeError: If a value of `indices` is not in `input_x`.
|
|
4159
4877
|
|
|
4160
4878
|
Supported Platforms:
|
|
4161
4879
|
``GPU`` ``CPU``
|
|
@@ -4175,12 +4893,19 @@ def tensor_scatter_div(input_x, indices, updates):
|
|
|
4175
4893
|
return tensor_scatter_div_(input_x, indices, updates)
|
|
4176
4894
|
|
|
4177
4895
|
|
|
4896
|
+
def scalar_to_array(input_x):
|
|
4897
|
+
"""
|
|
4898
|
+
The interface is deprecated. Please use the :func:`mindspore.ops.scalar_to_tensor` instead.
|
|
4899
|
+
"""
|
|
4900
|
+
return P.ScalarToArray()(input_x)
|
|
4901
|
+
|
|
4902
|
+
|
|
4178
4903
|
def scalar_to_tensor(input_x, dtype=mstype.float32):
|
|
4179
4904
|
"""
|
|
4180
4905
|
Converts a scalar to a `Tensor`, and converts the data type to the specified type.
|
|
4181
4906
|
|
|
4182
4907
|
Args:
|
|
4183
|
-
input_x (Union[int, float]): The input is a scalar. Only constant value is allowed.
|
|
4908
|
+
input_x (Union[bool, int, float]): The input is a scalar. Only constant value is allowed.
|
|
4184
4909
|
dtype (mindspore.dtype): The target data type. Default: mindspore.float32. Only
|
|
4185
4910
|
constant value is allowed.
|
|
4186
4911
|
|
|
@@ -4188,7 +4913,7 @@ def scalar_to_tensor(input_x, dtype=mstype.float32):
|
|
|
4188
4913
|
Tensor. 0-D Tensor and the content is the input.
|
|
4189
4914
|
|
|
4190
4915
|
Raises:
|
|
4191
|
-
TypeError: If `input_x` is neither int nor float.
|
|
4916
|
+
TypeError: If `input_x` is neither bool nor int nor float.
|
|
4192
4917
|
|
|
4193
4918
|
Supported Platforms:
|
|
4194
4919
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4233,23 +4958,27 @@ def tuple_to_array(input_x):
|
|
|
4233
4958
|
>>> print(output)
|
|
4234
4959
|
[1 2 3]
|
|
4235
4960
|
"""
|
|
4236
|
-
|
|
4961
|
+
if isinstance(input_x[0], int):
|
|
4962
|
+
dtype = mstype.int32
|
|
4963
|
+
else:
|
|
4964
|
+
dtype = mstype.float32
|
|
4965
|
+
return tuple_to_tensor_(input_x, dtype)
|
|
4237
4966
|
|
|
4238
4967
|
|
|
4239
|
-
def masked_select(
|
|
4968
|
+
def masked_select(input, mask):
|
|
4240
4969
|
"""
|
|
4241
4970
|
Returns a new 1-D Tensor which indexes the `x` tensor according to the boolean `mask`.
|
|
4242
4971
|
The shapes of the `mask` tensor and the `x` tensor don't need to match, but they must be broadcastable.
|
|
4243
4972
|
|
|
4244
4973
|
Args:
|
|
4245
|
-
|
|
4974
|
+
input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
4246
4975
|
mask (Tensor[bool]): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
4247
4976
|
|
|
4248
4977
|
Returns:
|
|
4249
|
-
A 1-D Tensor, with the same type as `
|
|
4978
|
+
A 1-D Tensor, with the same type as `input`.
|
|
4250
4979
|
|
|
4251
4980
|
Raises:
|
|
4252
|
-
TypeError: If `
|
|
4981
|
+
TypeError: If `input` or `mask` is not a Tensor.
|
|
4253
4982
|
TypeError: If dtype of `mask` is not bool.
|
|
4254
4983
|
|
|
4255
4984
|
Supported Platforms:
|
|
@@ -4265,7 +4994,7 @@ def masked_select(x, mask):
|
|
|
4265
4994
|
>>> print(output)
|
|
4266
4995
|
[1 3]
|
|
4267
4996
|
"""
|
|
4268
|
-
return masked_select_(
|
|
4997
|
+
return masked_select_(input, mask)
|
|
4269
4998
|
|
|
4270
4999
|
|
|
4271
5000
|
def masked_fill(input_x, mask, value):
|
|
@@ -4307,26 +5036,26 @@ def masked_fill(input_x, mask, value):
|
|
|
4307
5036
|
return masked_fill_(input_x, mask, value)
|
|
4308
5037
|
|
|
4309
5038
|
|
|
4310
|
-
def diag(
|
|
5039
|
+
def diag(input):
|
|
4311
5040
|
r"""
|
|
4312
5041
|
Constructs a diagonal tensor with a given diagonal values.
|
|
4313
5042
|
|
|
4314
|
-
Assume `
|
|
4315
|
-
rank 2k with dimensions :math:`
|
|
4316
|
-
:math:`output[i_1,..., i_k, i_1,..., i_k] =
|
|
5043
|
+
Assume `input` has dimensions :math:`(D_1,... D_k)` , the output is a tensor of
|
|
5044
|
+
rank 2k with dimensions :math:`(D_1,..., D_k, D_1,..., D_k)` where:
|
|
5045
|
+
:math:`output[i_1,..., i_k, i_1,..., i_k] = input[i_1,..., i_k]` and 0 everywhere else.
|
|
4317
5046
|
|
|
4318
5047
|
Args:
|
|
4319
|
-
|
|
5048
|
+
input (Tensor): The input tensor.
|
|
4320
5049
|
|
|
4321
5050
|
Returns:
|
|
4322
|
-
Tensor, has the same dtype as the `
|
|
5051
|
+
Tensor, has the same dtype as the `input`.
|
|
4323
5052
|
|
|
4324
5053
|
Raises:
|
|
4325
|
-
TypeError: If `
|
|
4326
|
-
ValueError: If rank of `
|
|
5054
|
+
TypeError: If `input` is not a Tensor.
|
|
5055
|
+
ValueError: If rank of `input` is less than 1.
|
|
4327
5056
|
|
|
4328
5057
|
Supported Platforms:
|
|
4329
|
-
``Ascend`` ``GPU``
|
|
5058
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
4330
5059
|
|
|
4331
5060
|
Examples:
|
|
4332
5061
|
>>> from mindspore import Tensor
|
|
@@ -4339,7 +5068,58 @@ def diag(input_x):
|
|
|
4339
5068
|
[0 0 3 0]
|
|
4340
5069
|
[0 0 0 4]]
|
|
4341
5070
|
"""
|
|
4342
|
-
return diag_(
|
|
5071
|
+
return diag_(input)
|
|
5072
|
+
|
|
5073
|
+
|
|
5074
|
+
def diagflat(input, offset=0):
|
|
5075
|
+
r"""
|
|
5076
|
+
Create a 2-D Tensor which diagonal is the flattened `input` .
|
|
5077
|
+
|
|
5078
|
+
Args:
|
|
5079
|
+
input (Tensor): Input Tensor, which is flattened and set as the diagonal of the output.
|
|
5080
|
+
offset (int, optional): `offset` controls which diagonal to choose. Default: 0.
|
|
5081
|
+
|
|
5082
|
+
- When `offset` is zero, the diagonal chosen is the main diagonal.
|
|
5083
|
+
- When `offset` is a positive integer, the diagonal chosen is up the main diagonal.
|
|
5084
|
+
- When `offset` is a negative integer, the diagonal chosen is down the main diagonal.
|
|
5085
|
+
|
|
5086
|
+
Returns:
|
|
5087
|
+
The 2-D Tensor, whose diagonal is the flattened `input`.
|
|
5088
|
+
|
|
5089
|
+
Raises:
|
|
5090
|
+
TypeError: If `input` is not a tensor.
|
|
5091
|
+
TypeError: If `offset` is not an integer.
|
|
5092
|
+
|
|
5093
|
+
Supported Platforms:
|
|
5094
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
5095
|
+
|
|
5096
|
+
Examples:
|
|
5097
|
+
>>> x = Tensor([1, 2], mindspore.float32)
|
|
5098
|
+
>>> output = ops.diagflat(x, 1)
|
|
5099
|
+
>>> print(output)
|
|
5100
|
+
[[0. 1. 0.]
|
|
5101
|
+
[0. 0. 2.]
|
|
5102
|
+
[0. 0. 0.]]
|
|
5103
|
+
"""
|
|
5104
|
+
if not isinstance(input, Tensor):
|
|
5105
|
+
raise TypeError(f"For diagflat, the input x must be tensor, but got {type(input)}")
|
|
5106
|
+
if not isinstance(offset, int):
|
|
5107
|
+
raise TypeError(f"For diagflat, the offset must be int, but got {type(offset)}")
|
|
5108
|
+
offset_abs = abs(offset)
|
|
5109
|
+
if input.size == 0:
|
|
5110
|
+
return zeros((offset_abs, offset_abs), input.dtype)
|
|
5111
|
+
input = input.ravel()
|
|
5112
|
+
res = diag(input)
|
|
5113
|
+
if offset != 0:
|
|
5114
|
+
pad_y = zeros((input.size + offset_abs, offset_abs), input.dtype)
|
|
5115
|
+
pad_x = zeros((offset_abs, input.size), input.dtype)
|
|
5116
|
+
if offset < 0:
|
|
5117
|
+
res = cat((pad_x, res), axis=0)
|
|
5118
|
+
res = cat((res, pad_y), axis=1)
|
|
5119
|
+
else:
|
|
5120
|
+
res = cat((res, pad_x), axis=0)
|
|
5121
|
+
res = cat((pad_y, res), axis=1)
|
|
5122
|
+
return res
|
|
4343
5123
|
|
|
4344
5124
|
|
|
4345
5125
|
def col2im(input_x, output_size, kernel_size, dilation, padding_value, stride):
|
|
@@ -4371,7 +5151,7 @@ def col2im(input_x, output_size, kernel_size, dilation, padding_value, stride):
|
|
|
4371
5151
|
ValueError: If input_x.shape[3] does not match the calculated number of sliding blocks.
|
|
4372
5152
|
|
|
4373
5153
|
Supported Platforms:
|
|
4374
|
-
``GPU``
|
|
5154
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
4375
5155
|
|
|
4376
5156
|
Examples:
|
|
4377
5157
|
>>> x = Tensor(input_data=np.random.rand(16, 16, 4, 25), dtype=mstype.float32)
|
|
@@ -4384,153 +5164,636 @@ def col2im(input_x, output_size, kernel_size, dilation, padding_value, stride):
|
|
|
4384
5164
|
return c2i(input_x, output_size)
|
|
4385
5165
|
|
|
4386
5166
|
|
|
4387
|
-
def
|
|
4388
|
-
|
|
4389
|
-
Splits the input tensor into
|
|
4390
|
-
|
|
4391
|
-
|
|
4392
|
-
|
|
5167
|
+
def _split_int(x, split_size_or_sections, axis):
|
|
5168
|
+
"""
|
|
5169
|
+
Splits the input tensor `x` into multiple sub-tensors along the axis according to the given `split_size_or_sections`
|
|
5170
|
+
with int type.
|
|
5171
|
+
"""
|
|
5172
|
+
arr_shape = x.shape
|
|
5173
|
+
length_along_dim = arr_shape[axis]
|
|
5174
|
+
if split_size_or_sections > length_along_dim:
|
|
5175
|
+
res = P.Split(axis, 1)(x)
|
|
5176
|
+
elif length_along_dim % split_size_or_sections == 0:
|
|
5177
|
+
sections = length_along_dim // split_size_or_sections
|
|
5178
|
+
res = P.Split(axis, sections)(x)
|
|
5179
|
+
else:
|
|
5180
|
+
num_sections = length_along_dim // split_size_or_sections
|
|
5181
|
+
length1 = num_sections * split_size_or_sections
|
|
5182
|
+
length2 = length_along_dim - length1
|
|
5183
|
+
start1 = _list_comprehensions(rank(x), 0, True)
|
|
5184
|
+
size1 = _tuple_setitem(arr_shape, axis, length1)
|
|
5185
|
+
start2 = _tuple_setitem(start1, axis, length1)
|
|
5186
|
+
size2 = _tuple_setitem(arr_shape, axis, length2)
|
|
5187
|
+
res = P.Split(axis, num_sections)(tensor_slice(x, start1, size1)) + \
|
|
5188
|
+
P.Split(axis, 1)(tensor_slice(x, start2, size2))
|
|
5189
|
+
return res
|
|
5190
|
+
|
|
5191
|
+
|
|
5192
|
+
def _split_sub_tensors(x, split_size_or_sections, axis):
|
|
5193
|
+
"""
|
|
5194
|
+
Splits the input tensor `x` into multiple sub-tensors along the axis according to the given `split_size_or_sections`
|
|
5195
|
+
with type of tuple or list.
|
|
5196
|
+
"""
|
|
5197
|
+
new_indices = [0]
|
|
5198
|
+
for i, split_size in enumerate(split_size_or_sections):
|
|
5199
|
+
new_indices.append(new_indices[i] + split_size)
|
|
5200
|
+
new_indices = new_indices[1:]
|
|
5201
|
+
sub_tensors = []
|
|
5202
|
+
strides = _list_comprehensions(x.ndim, 1, True)
|
|
5203
|
+
begin = _list_comprehensions(x.ndim, 0)
|
|
5204
|
+
end = _list_comprehensions(x.shape)
|
|
5205
|
+
for i, idx in enumerate(new_indices):
|
|
5206
|
+
begin[axis] = 0 if i == 0 else new_indices[i - 1]
|
|
5207
|
+
end[axis] = idx
|
|
5208
|
+
sliced_tensor = strided_slice(x, tuple(begin), tuple(end), strides)
|
|
5209
|
+
sub_tensors.append(sliced_tensor)
|
|
5210
|
+
return sub_tensors
|
|
5211
|
+
|
|
5212
|
+
|
|
5213
|
+
def split(tensor, split_size_or_sections, axis=0):
|
|
5214
|
+
"""
|
|
5215
|
+
Splits the Tensor into chunks along the given axis.
|
|
4393
5216
|
|
|
4394
5217
|
Args:
|
|
4395
|
-
|
|
4396
|
-
|
|
4397
|
-
|
|
5218
|
+
tensor (Tensor): A Tensor to be divided.
|
|
5219
|
+
split_size_or_sections (Union[int, tuple(int), list(int)]):
|
|
5220
|
+
If `split_size_or_sections` is an int type, `tensor` will be split into equally sized chunks,
|
|
5221
|
+
each chunk with size `split_size_or_sections`. Last chunk will be smaller than `split_size_or_sections`
|
|
5222
|
+
if `tensor.shape[axis]` is not divisible by `split_size_or_sections`.
|
|
5223
|
+
If `split_size_or_sections` is a list type, then `tensor` will be split into len(split_size_or_sections)
|
|
5224
|
+
chunks with sizes `split_size_or_sections` along the given `axis`.
|
|
5225
|
+
axis (int): The axis along which to split. Default: 0.
|
|
4398
5226
|
|
|
4399
5227
|
Returns:
|
|
4400
|
-
tuple
|
|
4401
|
-
:math:`(y_1, y_2, ..., y_S)`. And the data type is the same with `input_x`.
|
|
5228
|
+
A tuple of sub-tensors.
|
|
4402
5229
|
|
|
4403
5230
|
Raises:
|
|
4404
|
-
TypeError: If
|
|
4405
|
-
|
|
4406
|
-
|
|
4407
|
-
|
|
5231
|
+
TypeError: If argument `tensor` is not Tensor.
|
|
5232
|
+
TypeError: If argument `axis` is not Tensor.
|
|
5233
|
+
ValueError: If argument `axis` is out of range of :math:`[-tensor.ndim, tensor.ndim)` .
|
|
5234
|
+
TypeError: If each element in 'split_size_or_sections' is not integer.
|
|
5235
|
+
TypeError: If argument `indices_or_sections` is not int, tuple(int) or list(int).
|
|
5236
|
+
ValueError: The sum of 'split_size_or_sections' is not equal to x.shape[axis].
|
|
4408
5237
|
|
|
4409
5238
|
Supported Platforms:
|
|
4410
5239
|
``Ascend`` ``GPU`` ``CPU``
|
|
4411
5240
|
|
|
4412
5241
|
Examples:
|
|
4413
|
-
>>>
|
|
4414
|
-
>>>
|
|
4415
|
-
|
|
4416
|
-
|
|
4417
|
-
|
|
4418
|
-
|
|
4419
|
-
|
|
4420
|
-
|
|
4421
|
-
|
|
4422
|
-
|
|
4423
|
-
|
|
4424
|
-
|
|
4425
|
-
|
|
4426
|
-
|
|
4427
|
-
|
|
4428
|
-
|
|
4429
|
-
|
|
4430
|
-
|
|
4431
|
-
|
|
4432
|
-
|
|
4433
|
-
|
|
4434
|
-
|
|
4435
|
-
|
|
4436
|
-
|
|
4437
|
-
|
|
4438
|
-
|
|
4439
|
-
|
|
4440
|
-
|
|
4441
|
-
|
|
4442
|
-
|
|
4443
|
-
|
|
4444
|
-
|
|
4445
|
-
|
|
4446
|
-
|
|
5242
|
+
>>> input_x = np.arange(9).astype("float32")
|
|
5243
|
+
>>> output = ops.split(Tensor(input_x), 3)
|
|
5244
|
+
>>> print(output)
|
|
5245
|
+
(Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
|
|
5246
|
+
Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
|
|
5247
|
+
Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))
|
|
5248
|
+
"""
|
|
5249
|
+
if not isinstance(tensor, Tensor):
|
|
5250
|
+
raise TypeError(f'expect `tensor` is a Tensor, but got {type(tensor)}')
|
|
5251
|
+
if type(axis) is not int:
|
|
5252
|
+
raise TypeError(f"Type of Argument `axis` should be integer but got {type(axis)}")
|
|
5253
|
+
arr_axis = _canonicalize_axis(axis, tensor.ndim)
|
|
5254
|
+
|
|
5255
|
+
if type(split_size_or_sections) is int:
|
|
5256
|
+
if split_size_or_sections > 0:
|
|
5257
|
+
res = _split_int(tensor, split_size_or_sections, arr_axis)
|
|
5258
|
+
else:
|
|
5259
|
+
raise ValueError(f"For split, the value of 'split_size_or_sections' must be more than zero, "
|
|
5260
|
+
f"but got {split_size_or_sections}.")
|
|
5261
|
+
elif isinstance(split_size_or_sections, (list, tuple)):
|
|
5262
|
+
for item in split_size_or_sections:
|
|
5263
|
+
if type(item) is not int:
|
|
5264
|
+
raise TypeError(f"Each element in 'split_size_or_sections' should be integer, but got {type(item)}.")
|
|
5265
|
+
if item < 0:
|
|
5266
|
+
raise TypeError(f"Each element in 'split_size_or_sections' should be non-negative, "
|
|
5267
|
+
f"but got {split_size_or_sections}.")
|
|
5268
|
+
|
|
5269
|
+
if sum(split_size_or_sections) != tensor.shape[arr_axis]:
|
|
5270
|
+
raise ValueError(f"The sum of 'split_size_or_sections' should be equal to {tensor.shape[arr_axis]}, "
|
|
5271
|
+
f"but got {sum(split_size_or_sections)}.")
|
|
5272
|
+
res = _split_sub_tensors(tensor, split_size_or_sections, arr_axis)
|
|
5273
|
+
else:
|
|
5274
|
+
raise TypeError(f"Type of Argument `split_size_or_sections` should be integer, tuple(int) or list(int), " \
|
|
5275
|
+
f"but got {type(split_size_or_sections)}")
|
|
5276
|
+
return tuple(res)
|
|
4447
5277
|
|
|
4448
|
-
.. warning::
|
|
4449
|
-
- If there are multiple maximum values, the index of the first maximum value is used.
|
|
4450
|
-
- The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "x".
|
|
4451
5278
|
|
|
4452
|
-
|
|
5279
|
+
def tril(input, diagonal=0): # pylint: disable=redefined-outer-name
|
|
5280
|
+
"""
|
|
5281
|
+
Returns the lower triangle part of 'input' (elements that contain the diagonal and below),
|
|
5282
|
+
and set the other elements to zeros.
|
|
4453
5283
|
|
|
4454
5284
|
Args:
|
|
4455
|
-
|
|
4456
|
-
|
|
4457
|
-
|
|
4458
|
-
|
|
4459
|
-
the output will reduce dimension if false. Default: False.
|
|
5285
|
+
input (Tensor): A Tensor with shape :math:`(x_1, x_2, ..., x_R)`. The rank must be at least 2.
|
|
5286
|
+
Supporting all number types including bool.
|
|
5287
|
+
diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: 0,
|
|
5288
|
+
indicating the main diagonal.
|
|
4460
5289
|
|
|
4461
5290
|
Returns:
|
|
4462
|
-
|
|
4463
|
-
tensor.
|
|
4464
|
-
|
|
4465
|
-
- index (Tensor) - The index for the maximum value of the input tensor, with dtype int32. If `keep_dims`
|
|
4466
|
-
is true, the shape of output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`.
|
|
4467
|
-
Otherwise, the shape is :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)` .
|
|
4468
|
-
- values (Tensor) - The maximum value of input tensor, with the same shape as index, and same dtype as x.
|
|
5291
|
+
Tensor, the same shape and data type as the input `x`.
|
|
4469
5292
|
|
|
4470
5293
|
Raises:
|
|
4471
|
-
TypeError: If `x` is not Tensor.
|
|
4472
|
-
TypeError: If `
|
|
4473
|
-
TypeError: If `
|
|
5294
|
+
TypeError: If `x` is not a Tensor.
|
|
5295
|
+
TypeError: If `diagonal` is not an int.
|
|
5296
|
+
TypeError: If the type of `x` is neither number nor bool.
|
|
5297
|
+
ValueError: If the rank of `x` is less than 2.
|
|
4474
5298
|
|
|
4475
5299
|
Supported Platforms:
|
|
4476
5300
|
``Ascend`` ``GPU`` ``CPU``
|
|
4477
5301
|
|
|
4478
5302
|
Examples:
|
|
4479
|
-
>>> x = Tensor(np.array([
|
|
4480
|
-
|
|
4481
|
-
|
|
4482
|
-
|
|
4483
|
-
>>>
|
|
4484
|
-
>>> print(
|
|
4485
|
-
[
|
|
4486
|
-
|
|
4487
|
-
|
|
4488
|
-
|
|
4489
|
-
|
|
4490
|
-
|
|
4491
|
-
|
|
4492
|
-
|
|
4493
|
-
|
|
4494
|
-
|
|
4495
|
-
|
|
5303
|
+
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
5304
|
+
... [ 5, 6, 7, 8],
|
|
5305
|
+
... [10, 11, 12, 13],
|
|
5306
|
+
... [14, 15, 16, 17]]))
|
|
5307
|
+
>>> result = ops.tril(x)
|
|
5308
|
+
>>> print(result)
|
|
5309
|
+
[[ 1 0 0 0]
|
|
5310
|
+
[ 5 6 0 0]
|
|
5311
|
+
[10 11 12 0]
|
|
5312
|
+
[14 15 16 17]]
|
|
5313
|
+
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
5314
|
+
... [ 5, 6, 7, 8],
|
|
5315
|
+
... [10, 11, 12, 13],
|
|
5316
|
+
... [14, 15, 16, 17]]))
|
|
5317
|
+
>>> result = ops.tril(x, diagonal=1)
|
|
5318
|
+
>>> print(result)
|
|
5319
|
+
[[ 1 2 0 0]
|
|
5320
|
+
[ 5 6 7 0]
|
|
5321
|
+
[10 11 12 13]
|
|
5322
|
+
[14 15 16 17]]
|
|
5323
|
+
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
5324
|
+
... [ 5, 6, 7, 8],
|
|
5325
|
+
... [10, 11, 12, 13],
|
|
5326
|
+
... [14, 15, 16, 17]]))
|
|
5327
|
+
>>> result = ops.tril(x, diagonal=-1)
|
|
5328
|
+
>>> print(result)
|
|
5329
|
+
[[ 0 0 0 0]
|
|
5330
|
+
[ 5 0 0 0]
|
|
5331
|
+
[10 11 0 0]
|
|
5332
|
+
[14 15 16 0]]
|
|
5333
|
+
"""
|
|
5334
|
+
tril_ = Tril(diagonal)
|
|
5335
|
+
return tril_(input)
|
|
5336
|
+
|
|
5337
|
+
|
|
5338
|
+
def triu(input, diagonal=0): # pylint: disable=redefined-outer-name
|
|
5339
|
+
r"""
|
|
5340
|
+
Returns the upper triangle part of 'input' (elements that contain the diagonal and below),
|
|
5341
|
+
and set the other elements to zeros.
|
|
4496
5342
|
|
|
4497
5343
|
Args:
|
|
4498
|
-
|
|
4499
|
-
|
|
4500
|
-
|
|
4501
|
-
Default: None.
|
|
4502
|
-
keepdims (bool, optional): Whether the output tensor retains the specified
|
|
4503
|
-
dimension. Ignored if `axis` is None. Default: False.
|
|
5344
|
+
input (Tensor): The input tensor with shape :math:`(N,∗)` where ∗ means any number of additional dimensions.
|
|
5345
|
+
diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: 0,
|
|
5346
|
+
indicating the main diagonal.
|
|
4504
5347
|
|
|
4505
5348
|
Returns:
|
|
4506
|
-
Tensor,
|
|
5349
|
+
Tensor, a tensor has the same shape and data type as input.
|
|
4507
5350
|
|
|
4508
5351
|
Raises:
|
|
4509
|
-
|
|
5352
|
+
TypeError: If `diagonal` is not an int.
|
|
5353
|
+
TypeError: If `input` is not a Tensor.
|
|
5354
|
+
ValueError: If length of shape of `input` is less than 1.
|
|
4510
5355
|
|
|
4511
5356
|
Supported Platforms:
|
|
4512
|
-
``
|
|
5357
|
+
``GPU`` ``CPU``
|
|
4513
5358
|
|
|
4514
5359
|
Examples:
|
|
4515
|
-
>>> x = Tensor(np.array([[1,
|
|
4516
|
-
|
|
4517
|
-
|
|
4518
|
-
[
|
|
4519
|
-
|
|
4520
|
-
|
|
4521
|
-
|
|
4522
|
-
|
|
5360
|
+
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
5361
|
+
... [ 5, 6, 7, 8],
|
|
5362
|
+
... [10, 11, 12, 13],
|
|
5363
|
+
... [14, 15, 16, 17]]))
|
|
5364
|
+
>>> result = ops.triu(x)
|
|
5365
|
+
>>> print(result)
|
|
5366
|
+
[[ 1 2 3 4]
|
|
5367
|
+
[ 0 6 7 8]
|
|
5368
|
+
[ 0 0 12 13]
|
|
5369
|
+
[ 0 0 0 17]]
|
|
5370
|
+
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
5371
|
+
... [ 5, 6, 7, 8],
|
|
5372
|
+
... [10, 11, 12, 13],
|
|
5373
|
+
... [14, 15, 16, 17]]))
|
|
5374
|
+
>>> result = ops.triu(x, diagonal=1)
|
|
5375
|
+
>>> print(result)
|
|
5376
|
+
[[ 0 2 3 4]
|
|
5377
|
+
[ 0 0 7 8]
|
|
5378
|
+
[ 0 0 0 13]
|
|
5379
|
+
[ 0 0 0 0]]
|
|
5380
|
+
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
5381
|
+
... [ 5, 6, 7, 8],
|
|
5382
|
+
... [10, 11, 12, 13],
|
|
5383
|
+
... [14, 15, 16, 17]]))
|
|
5384
|
+
>>> result = ops.triu(x, diagonal=-1)
|
|
5385
|
+
>>> print(result)
|
|
5386
|
+
[[ 1 2 3 4]
|
|
5387
|
+
[ 5 6 7 8]
|
|
5388
|
+
[ 0 11 12 13]
|
|
5389
|
+
[ 0 0 16 17]]
|
|
5390
|
+
"""
|
|
5391
|
+
return _get_cache_prim(P.Triu)(diagonal)(input)
|
|
5392
|
+
|
|
5393
|
+
|
|
5394
|
+
@constexpr
|
|
5395
|
+
def _canonicalize_axis(axis, ndim):
|
|
5396
|
+
"""
|
|
5397
|
+
Check axes are within the number of dimensions of tensor x and normalize the negative axes.
|
|
5398
|
+
|
|
5399
|
+
Args:
|
|
5400
|
+
axis (Union[int, tuple(int), list(int)]): Axes of the tensor.
|
|
5401
|
+
ndim (int): The number of dimensions of the tensor.
|
|
5402
|
+
|
|
5403
|
+
Return:
|
|
5404
|
+
Axis (Union[int, tuple(int)]). If input is integer, return integer, else tuple.
|
|
5405
|
+
"""
|
|
5406
|
+
if isinstance(axis, int):
|
|
5407
|
+
axis = [axis]
|
|
5408
|
+
for ax in axis:
|
|
5409
|
+
if not isinstance(ax, int):
|
|
5410
|
+
raise TypeError(f'axis should be integers, not {type(ax)}')
|
|
5411
|
+
if not -ndim <= ax < ndim:
|
|
5412
|
+
raise ValueError(f'axis {ax} is out of bounds for array of dimension {ndim}')
|
|
5413
|
+
|
|
5414
|
+
def canonicalizer(ax):
|
|
5415
|
+
return ax + ndim if ax < 0 else ax
|
|
5416
|
+
|
|
5417
|
+
axis = tuple([canonicalizer(ax) for ax in axis])
|
|
5418
|
+
if all(axis.count(el) <= 1 for el in axis):
|
|
5419
|
+
return tuple(sorted(axis)) if len(axis) > 1 else axis[0]
|
|
5420
|
+
raise ValueError(f"duplicate axis in {axis}.")
|
|
5421
|
+
|
|
5422
|
+
|
|
5423
|
+
@constexpr
|
|
5424
|
+
def _list_comprehensions(obj, item=None, return_tuple=False):
|
|
5425
|
+
"""
|
|
5426
|
+
Generates a new list or tuple by list comprehension.
|
|
5427
|
+
|
|
5428
|
+
Args:
|
|
5429
|
+
obj (Union[int, list, tuple]):
|
|
5430
|
+
If integer, it will be the length of the returned tuple/list.
|
|
5431
|
+
item: The value to be filled. Default: None.
|
|
5432
|
+
If None, the values in the new list/tuple are the same as obj
|
|
5433
|
+
or range(obj) when obj is integer.
|
|
5434
|
+
return_tuple(bool): If true, returns tuple, else returns list.
|
|
5435
|
+
|
|
5436
|
+
Returns:
|
|
5437
|
+
List or tuple.
|
|
5438
|
+
"""
|
|
5439
|
+
lst = obj
|
|
5440
|
+
if isinstance(obj, int):
|
|
5441
|
+
lst = np.arange(obj)
|
|
5442
|
+
if item is None:
|
|
5443
|
+
res = list(lst)
|
|
5444
|
+
else:
|
|
5445
|
+
res = [item for _ in lst]
|
|
5446
|
+
if return_tuple:
|
|
5447
|
+
return tuple(res)
|
|
5448
|
+
return res
|
|
5449
|
+
|
|
5450
|
+
|
|
5451
|
+
@constexpr
|
|
5452
|
+
def _tuple_setitem(tup, idx, value):
|
|
5453
|
+
"""
|
|
5454
|
+
Returns a tuple with specified `idx` set to `value`.
|
|
5455
|
+
"""
|
|
5456
|
+
tup = list(tup)
|
|
5457
|
+
tup[idx] = value
|
|
5458
|
+
return tuple(tup)
|
|
5459
|
+
|
|
5460
|
+
|
|
5461
|
+
def _tensor_split_sub_tensors(x, indices_or_sections, axis):
|
|
5462
|
+
"""
|
|
5463
|
+
Splits the input tensor `x` into multiple sub-tensors along the axis according to the given `indices_or_sections`
|
|
5464
|
+
with type of tuple or list.
|
|
5465
|
+
"""
|
|
5466
|
+
length_along_dim = x.shape[axis]
|
|
5467
|
+
indices_or_sections = tuple(indices_or_sections)
|
|
5468
|
+
indices_or_sections += (length_along_dim,)
|
|
5469
|
+
|
|
5470
|
+
sub_tensors = []
|
|
5471
|
+
strides = _list_comprehensions(x.ndim, 1, True)
|
|
5472
|
+
begin = _list_comprehensions(x.ndim, 0)
|
|
5473
|
+
end = _list_comprehensions(x.shape)
|
|
5474
|
+
for i, idx in enumerate(indices_or_sections):
|
|
5475
|
+
begin[axis] = 0 if i == 0 else indices_or_sections[i - 1]
|
|
5476
|
+
end[axis] = idx
|
|
5477
|
+
sliced_tensor = strided_slice(x, tuple(begin), tuple(end), strides)
|
|
5478
|
+
sub_tensors.append(sliced_tensor)
|
|
5479
|
+
return tuple(sub_tensors)
|
|
5480
|
+
|
|
5481
|
+
|
|
5482
|
+
def _tensor_split_sub_int(x, indices_or_sections, axis):
|
|
5483
|
+
"""
|
|
5484
|
+
Splits the input tensor `x` into multiple sub-tensors along the axis according to the given `indices_or_sections`
|
|
5485
|
+
with type if int.
|
|
5486
|
+
"""
|
|
5487
|
+
arr_shape = x.shape
|
|
5488
|
+
length_along_dim = arr_shape[axis]
|
|
5489
|
+
if indices_or_sections > length_along_dim:
|
|
5490
|
+
res = P.Split(axis, length_along_dim)(x)
|
|
5491
|
+
indices_or_sections_n = [length_along_dim, length_along_dim + 1]
|
|
5492
|
+
res2 = _tensor_split_sub_tensors(x, indices_or_sections_n, axis)
|
|
5493
|
+
for _ in np.arange(length_along_dim, indices_or_sections):
|
|
5494
|
+
res += tuple(res2)[1:]
|
|
5495
|
+
elif length_along_dim % indices_or_sections == 0:
|
|
5496
|
+
res = P.Split(axis, indices_or_sections)(x)
|
|
5497
|
+
else:
|
|
5498
|
+
num_long_tensor = length_along_dim % indices_or_sections
|
|
5499
|
+
num_short_tensor = indices_or_sections - num_long_tensor
|
|
5500
|
+
length1 = num_long_tensor * (length_along_dim // indices_or_sections + 1)
|
|
5501
|
+
length2 = length_along_dim - length1
|
|
5502
|
+
start1 = _list_comprehensions(rank(x), 0, True)
|
|
5503
|
+
size1 = _tuple_setitem(arr_shape, axis, length1)
|
|
5504
|
+
start2 = _tuple_setitem(start1, axis, length1)
|
|
5505
|
+
size2 = _tuple_setitem(arr_shape, axis, length2)
|
|
5506
|
+
res = P.Split(axis, num_long_tensor)(tensor_slice(x, start1, size1)) + \
|
|
5507
|
+
P.Split(axis, num_short_tensor)(tensor_slice(x, start2, size2))
|
|
5508
|
+
return res
|
|
5509
|
+
|
|
5510
|
+
|
|
5511
|
+
def tensor_split(input, indices_or_sections, axis=0):
|
|
5512
|
+
r"""
|
|
5513
|
+
Splits a tensor into multiple sub-tensors along the given axis.
|
|
5514
|
+
|
|
5515
|
+
Args:
|
|
5516
|
+
input (Tensor): A Tensor to be divided.
|
|
5517
|
+
indices_or_sections (Union[int, tuple(int), list(int)]):
|
|
5518
|
+
|
|
5519
|
+
- If `indices_or_sections` is an integer n, input tensor will be split into n sections.
|
|
5520
|
+
|
|
5521
|
+
- If :math:`input.size(axis)` can be divisible by n, sub-sections will have equal size
|
|
5522
|
+
:math:`input.size(axis) / n` .
|
|
5523
|
+
- If :math:`input.size(axis)` is not divisible by n, the first :math:`input.size(axis) % n` sections
|
|
5524
|
+
will have size :math:`x.size(axis) // n + 1` , and the rest will have
|
|
5525
|
+
size :math:`input.size(axis) // n` .
|
|
5526
|
+
|
|
5527
|
+
- If `indices_or_sections` is of type tuple(int) or list(int), the input tensor will be split at the
|
|
5528
|
+
indices in the list or tuple. For example, given parameters :math:`indices\_or\_sections=[1, 4]`
|
|
5529
|
+
and :math:`axis=0` , the input tensor will be split into sections :math:`input[:1]` ,
|
|
5530
|
+
:math:`input[1:4]` , and :math:`input[4:]` .
|
|
5531
|
+
|
|
5532
|
+
axis (int): The axis along which to split. Default: 0.
|
|
5533
|
+
|
|
5534
|
+
Returns:
|
|
5535
|
+
A tuple of sub-tensors.
|
|
5536
|
+
|
|
5537
|
+
Raises:
|
|
5538
|
+
TypeError: If argument `input` is not Tensor.
|
|
5539
|
+
TypeError: If argument `axis` is not int.
|
|
5540
|
+
ValueError: If argument `axis` is out of range of :math:`[-input.ndim, input.ndim)` .
|
|
5541
|
+
TypeError: If each element in 'indices_or_sections' is not integer.
|
|
5542
|
+
TypeError: If argument `indices_or_sections` is not int, tuple(int) or list(int).
|
|
5543
|
+
|
|
5544
|
+
Supported Platforms:
|
|
5545
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
5546
|
+
|
|
5547
|
+
Examples:
|
|
5548
|
+
>>> input_x = np.arange(9).astype("float32")
|
|
5549
|
+
>>> output = ops.tensor_split(Tensor(input_x), 3)
|
|
5550
|
+
>>> print(output)
|
|
5551
|
+
(Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
|
|
5552
|
+
Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
|
|
5553
|
+
Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))
|
|
5554
|
+
"""
|
|
5555
|
+
if not isinstance(input, Tensor):
|
|
5556
|
+
raise TypeError(f'expect `x` is a Tensor, but got {type(input)}')
|
|
5557
|
+
|
|
5558
|
+
if type(axis) is not int:
|
|
5559
|
+
raise TypeError(f"Type of Argument `axis` should be integer but got {type(axis)}")
|
|
5560
|
+
handle_axis = _canonicalize_axis(axis, input.ndim)
|
|
5561
|
+
if type(indices_or_sections) is int:
|
|
5562
|
+
if indices_or_sections > 0:
|
|
5563
|
+
res = _tensor_split_sub_int(input, indices_or_sections, handle_axis)
|
|
5564
|
+
else:
|
|
5565
|
+
raise ValueError(f"For tensor_split, the value of 'indices_or_sections' must be more than zero "
|
|
5566
|
+
f"but got {indices_or_sections}")
|
|
5567
|
+
elif isinstance(indices_or_sections, (list, tuple)):
|
|
5568
|
+
for item in indices_or_sections:
|
|
5569
|
+
if type(item) is not int:
|
|
5570
|
+
raise TypeError(f"Each element in 'indices_or_sections' should be integer, but got {type(item)}.")
|
|
5571
|
+
res = _tensor_split_sub_tensors(input, indices_or_sections, handle_axis)
|
|
5572
|
+
else:
|
|
5573
|
+
raise TypeError(f"Type of Argument `indices_or_sections` should be integer, tuple(int) or list(int), " \
|
|
5574
|
+
f"but got {type(indices_or_sections)}")
|
|
5575
|
+
|
|
5576
|
+
return res
|
|
5577
|
+
|
|
5578
|
+
|
|
5579
|
+
def vsplit(input, indices_or_sections):
|
|
5580
|
+
"""
|
|
5581
|
+
Splits `input` with two or more dimensions, into multiple sub-tensors vertically
|
|
5582
|
+
according to `indices_or_sections`.
|
|
5583
|
+
|
|
5584
|
+
It is equivalent to `ops.tensor_split` with :math:`axis=0` .
|
|
5585
|
+
|
|
5586
|
+
Args:
|
|
5587
|
+
input (Tensor): A Tensor to be divided.
|
|
5588
|
+
indices_or_sections (Union[int, tuple(int), list(int)]): See argument in :func:`mindspore.ops.tensor_split`.
|
|
5589
|
+
|
|
5590
|
+
Returns:
|
|
5591
|
+
A list of sub-tensors.
|
|
5592
|
+
|
|
5593
|
+
Supported Platforms:
|
|
5594
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
5595
|
+
|
|
5596
|
+
Examples:
|
|
5597
|
+
>>> input_x = np.arange(9).reshape((3, 3)).astype('float32')
|
|
5598
|
+
>>> output = ops.vsplit(Tensor(input_x), 3)
|
|
5599
|
+
>>> print(output)
|
|
5600
|
+
(Tensor(shape=[1, 3], dtype=Float32, value=[[ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]]),
|
|
5601
|
+
Tensor(shape=[1, 3], dtype=Float32, value=[[ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]]),
|
|
5602
|
+
Tensor(shape=[1, 3], dtype=Float32, value=[[ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]]))
|
|
5603
|
+
"""
|
|
5604
|
+
if not isinstance(input, Tensor):
|
|
5605
|
+
raise TypeError(f'expect `x` is a Tensor, but got {type(input)}')
|
|
5606
|
+
if input.ndim < 1:
|
|
5607
|
+
raise ValueError(f'vsplit expect `x` is a Tensor with at least 1 dimension, but got {input.ndim}')
|
|
5608
|
+
return tensor_split(input, indices_or_sections, 0)
|
|
5609
|
+
|
|
5610
|
+
|
|
5611
|
+
def hsplit(input, indices_or_sections):
|
|
5612
|
+
"""
|
|
5613
|
+
Splits a tensor into multiple sub-tensors horizontally.
|
|
5614
|
+
It is equivalent to `ops.tensor_split` with :math:`axis=1` .
|
|
5615
|
+
|
|
5616
|
+
Args:
|
|
5617
|
+
input (Tensor): A Tensor to be divided.
|
|
5618
|
+
indices_or_sections (Union[int, tuple(int), list(int)]): See argument in :func:`mindspore.ops.tensor_split`.
|
|
5619
|
+
|
|
5620
|
+
Returns:
|
|
5621
|
+
A list of sub-tensors.
|
|
5622
|
+
|
|
5623
|
+
Raises:
|
|
5624
|
+
TypeError: If `input` is not Tensor.
|
|
5625
|
+
ValueError: If dimension of `input` is less than 2.
|
|
5626
|
+
|
|
5627
|
+
Supported Platforms:
|
|
5628
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
5629
|
+
|
|
5630
|
+
Examples:
|
|
5631
|
+
>>> input_x = np.arange(6).reshape((2, 3)).astype('float32')
|
|
5632
|
+
>>> output = ops.hsplit(Tensor(input_x), 3)
|
|
5633
|
+
>>> print(output)
|
|
5634
|
+
(Tensor(shape=[2, 1], dtype=Float32, value=[[ 0.00000000e+00], [ 3.00000000e+00]]),
|
|
5635
|
+
Tensor(shape=[2, 1], dtype=Float32, value=[[ 1.00000000e+00], [ 4.00000000e+00]]),
|
|
5636
|
+
Tensor(shape=[2, 1], dtype=Float32, value=[[ 2.00000000e+00], [ 5.00000000e+00]]))
|
|
5637
|
+
"""
|
|
5638
|
+
if not isinstance(input, Tensor):
|
|
5639
|
+
raise TypeError(f'expect `x` is a Tensor, but got {type(input)}')
|
|
5640
|
+
if input.ndim < 2:
|
|
5641
|
+
raise ValueError(f'hsplit expect `x` is a Tensor with at least 2 dimension, but got {input.ndim}')
|
|
5642
|
+
|
|
5643
|
+
return tensor_split(input, indices_or_sections, 1)
|
|
5644
|
+
|
|
5645
|
+
|
|
5646
|
+
def dsplit(input, indices_or_sections):
|
|
5647
|
+
"""
|
|
5648
|
+
Splits a tensor into multiple sub-tensors along the 3rd axis.
|
|
5649
|
+
It is equivalent to `ops.tensor_split` with :math:`axis=2` .
|
|
5650
|
+
|
|
5651
|
+
Args:
|
|
5652
|
+
input (Tensor): A Tensor to be divided.
|
|
5653
|
+
indices_or_sections (Union[int, tuple(int), list(int)]): See argument in :func:`mindspore.ops.tensor_split`.
|
|
5654
|
+
|
|
5655
|
+
Returns:
|
|
5656
|
+
A list of sub-tensors.
|
|
5657
|
+
|
|
5658
|
+
Supported Platforms:
|
|
5659
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
5660
|
+
|
|
5661
|
+
Examples:
|
|
5662
|
+
>>> input_x = np.arange(6).reshape((1, 2, 3)).astype('float32')
|
|
5663
|
+
>>> output = ops.dsplit(Tensor(input_x), 3)
|
|
5664
|
+
>>> print(output)
|
|
5665
|
+
(Tensor(shape=[1, 2, 1], dtype=Float32, value=[[[ 0.00000000e+00], [ 3.00000000e+00]]]),
|
|
5666
|
+
Tensor(shape=[1, 2, 1], dtype=Float32, value=[[[ 1.00000000e+00], [ 4.00000000e+00]]]),
|
|
5667
|
+
Tensor(shape=[1, 2, 1], dtype=Float32, value=[[[ 2.00000000e+00], [ 5.00000000e+00]]]))
|
|
5668
|
+
"""
|
|
5669
|
+
if not isinstance(input, Tensor):
|
|
5670
|
+
raise TypeError(f'expect `x` is a Tensor, but got {type(input)}')
|
|
5671
|
+
if input.ndim < 3:
|
|
5672
|
+
raise ValueError(f'dsplit expect `x` is a Tensor with at least 3 dimension, but got {input.ndim}')
|
|
5673
|
+
|
|
5674
|
+
return tensor_split(input, indices_or_sections, 2)
|
|
5675
|
+
|
|
5676
|
+
|
|
5677
|
+
def _init_and_select_elem(input, initial, where, cmp_fn): # pylint: disable=redefined-outer-name
|
|
5678
|
+
"""Initialize the input according to Initial, and select the element according to where."""
|
|
5679
|
+
if initial is not None:
|
|
5680
|
+
initial = ops.fill(input.dtype, input.shape, initial)
|
|
5681
|
+
input = cmp_fn(input, initial)
|
|
5682
|
+
|
|
5683
|
+
if isinstance(where, Tensor):
|
|
5684
|
+
if initial is None:
|
|
5685
|
+
raise ValueError('initial value must be provided for where masks')
|
|
5686
|
+
where = where.broadcast_to(input.shape)
|
|
5687
|
+
initial = initial.broadcast_to(input.shape)
|
|
5688
|
+
input = ops.select(where, input, initial)
|
|
5689
|
+
return input
|
|
5690
|
+
|
|
5691
|
+
|
|
5692
|
+
def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylint: disable=redefined-outer-name
|
|
5693
|
+
"""
|
|
5694
|
+
Calculates the maximum value along with the given axis for the input tensor. It returns the maximum values and
|
|
5695
|
+
indices.
|
|
5696
|
+
|
|
5697
|
+
Note:
|
|
5698
|
+
In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
|
|
5699
|
+
|
|
5700
|
+
.. warning::
|
|
5701
|
+
- If there are multiple maximum values, the index of the first maximum value is used.
|
|
5702
|
+
- The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "input".
|
|
5703
|
+
|
|
5704
|
+
Also see: :class:`mindspore.ops.ArgMaxWithValue`.
|
|
5705
|
+
|
|
5706
|
+
Args:
|
|
5707
|
+
input (Tensor): The input tensor, can be any dimension. Complex tensor is not supported for now.
|
|
5708
|
+
axis (int): The dimension to reduce. Default: 0.
|
|
5709
|
+
keepdims (bool): Whether to reduce dimension, if true, the output will keep same dimension with the input,
|
|
5710
|
+
the output will reduce dimension if false. Default: False.
|
|
5711
|
+
|
|
5712
|
+
Keyword Args:
|
|
5713
|
+
initial (scalar, optional): The minimum value of an output element. Must be present to allow computation
|
|
5714
|
+
on empty slice. Default: None.
|
|
5715
|
+
where (Tensor[bool], optional): A Tensor indicating whether you need to replace the primitive value in 'input'
|
|
5716
|
+
with the initial value. If True, do not replace, if False, replace. The 'where' position is False and the
|
|
5717
|
+
corresponding 'initial' value must be provided. Default value: None, which indicates True by default.
|
|
5718
|
+
|
|
5719
|
+
Returns:
|
|
5720
|
+
tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the maximum value of the input
|
|
5721
|
+
tensor.
|
|
5722
|
+
|
|
5723
|
+
- values (Tensor) - The maximum value of input tensor, with the same shape as index, and same dtype as x.
|
|
5724
|
+
- index (Tensor) - The index for the maximum value of the input tensor, with dtype int32. If `keepdims`
|
|
5725
|
+
is true, the shape of output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`.
|
|
5726
|
+
Otherwise, the shape is :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)` .
|
|
5727
|
+
|
|
5728
|
+
Raises:
|
|
5729
|
+
TypeError: If `input` is not Tensor.
|
|
5730
|
+
TypeError: If `keepdims` is not a bool.
|
|
5731
|
+
TypeError: If `axis` is not an int.
|
|
5732
|
+
TypeError: If `initial` is not a number.
|
|
5733
|
+
|
|
5734
|
+
Supported Platforms:
|
|
5735
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
5736
|
+
|
|
5737
|
+
Examples:
|
|
5738
|
+
>>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
|
|
5739
|
+
>>> output, index, = ops.max(x, keepdims=True)
|
|
5740
|
+
>>> print(output, index)
|
|
5741
|
+
0.7 0
|
|
5742
|
+
"""
|
|
5743
|
+
if not input.shape:
|
|
5744
|
+
return (input, Tensor(0, dtype=mstype.int32))
|
|
4523
5745
|
if axis is None:
|
|
4524
|
-
|
|
4525
|
-
|
|
4526
|
-
|
|
4527
|
-
|
|
4528
|
-
|
|
4529
|
-
|
|
5746
|
+
return (reduce_max(input), Tensor(0, dtype=mstype.int32))
|
|
5747
|
+
if initial is not None and not isinstance(initial, numbers.Number):
|
|
5748
|
+
raise TypeError(f"For 'max', 'initial' must be a scalar, but got {type(initial)}")
|
|
5749
|
+
if axis is not None and not isinstance(axis, int):
|
|
5750
|
+
raise TypeError(f"For 'max', 'axis' must be int, but got {type(axis)}")
|
|
5751
|
+
input = _init_and_select_elem(input, initial, where, ops.maximum)
|
|
5752
|
+
argmax_with_value_op = ArgMaxWithValue(axis, keepdims)
|
|
5753
|
+
indices, values = argmax_with_value_op(input)
|
|
5754
|
+
return values, indices
|
|
5755
|
+
|
|
5756
|
+
|
|
5757
|
+
def argmax(input, dim=None, keepdim=False):
|
|
5758
|
+
"""
|
|
5759
|
+
Return the indices of the maximum values of a tensor across a dimension.
|
|
5760
|
+
|
|
5761
|
+
Args:
|
|
5762
|
+
input (Tensor): Input tensor.
|
|
5763
|
+
dim (Union[int, None], optional): The dimension to reduce. If `dim` is None, the indices of the maximum
|
|
5764
|
+
value within the flattened input will be returned. Default: None.
|
|
5765
|
+
keepdim (bool, optional): Whether the output tensor retains the specified
|
|
5766
|
+
dimension. Ignored if `dim` is None. Default: False.
|
|
5767
|
+
|
|
5768
|
+
Returns:
|
|
5769
|
+
Tensor, indices of the maximum values across a dimension.
|
|
5770
|
+
|
|
5771
|
+
Raises:
|
|
5772
|
+
ValueError: If `dim` is out of range.
|
|
5773
|
+
|
|
5774
|
+
Supported Platforms:
|
|
5775
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
5776
|
+
|
|
5777
|
+
Examples:
|
|
5778
|
+
>>> x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
|
|
5779
|
+
>>> output = ops.argmax(x, dim=-1)
|
|
5780
|
+
>>> print(output)
|
|
5781
|
+
[1 0 0]
|
|
5782
|
+
"""
|
|
5783
|
+
if not input.shape:
|
|
5784
|
+
return Tensor(0)
|
|
5785
|
+
is_dim_none = False
|
|
5786
|
+
if dim is None:
|
|
5787
|
+
input = reshape_(input, (-1,))
|
|
5788
|
+
dim = 0
|
|
5789
|
+
is_dim_none = True
|
|
5790
|
+
out = _get_cache_prim(Argmax)(dim, mstype.int64)(input)
|
|
5791
|
+
if keepdim and not is_dim_none:
|
|
5792
|
+
out = expand_dims_(out, dim)
|
|
4530
5793
|
return out
|
|
4531
5794
|
|
|
4532
5795
|
|
|
4533
|
-
def min(
|
|
5796
|
+
def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylint: disable=redefined-outer-name
|
|
4534
5797
|
"""
|
|
4535
5798
|
Calculates the minimum value along with the given axis for the input tensor. It returns the minimum values and
|
|
4536
5799
|
indices.
|
|
@@ -4543,68 +5806,77 @@ def min(x, axis=0, keep_dims=False):
|
|
|
4543
5806
|
- The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "x".
|
|
4544
5807
|
|
|
4545
5808
|
Args:
|
|
4546
|
-
|
|
4547
|
-
|
|
4548
|
-
|
|
4549
|
-
|
|
4550
|
-
|
|
5809
|
+
input (Tensor): The input tensor, can be any dimension. Complex tensor is not supported for now.
|
|
5810
|
+
axis (int): The dimension to reduce. Default: None.
|
|
5811
|
+
keepdims (bool): Whether to reduce dimension, if true the output will keep the same dimension as the input,
|
|
5812
|
+
the output will reduce dimension if false. Default: False.
|
|
5813
|
+
|
|
5814
|
+
Keyword Args:
|
|
5815
|
+
initial (scalar, optional): The maximum value of an output element. Must be present to allow computation
|
|
5816
|
+
on empty slice. Default: None.
|
|
5817
|
+
where (Tensor[bool], optional): A Tensor indicating whether to replace the primitive value in `input`
|
|
5818
|
+
with the value in `initial`. If True, do not replace, otherwise replace. For the index of True in `where`,
|
|
5819
|
+
the corresponding value in `initial` must be assigned. Default: None, which indicates True by default.
|
|
4551
5820
|
|
|
4552
5821
|
Returns:
|
|
4553
5822
|
tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the minimum value of the input
|
|
4554
5823
|
tensor.
|
|
4555
5824
|
|
|
4556
|
-
- **
|
|
5825
|
+
- **values** (Tensor) - The minimum value of input tensor, with the same
|
|
5826
|
+
shape as `index`, and same dtype as `x`.
|
|
5827
|
+
- **index** (Tensor) - The index for the minimum value of the input tensor, with dtype int32. If `keepdims`
|
|
4557
5828
|
is true, the shape of output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`.
|
|
4558
5829
|
Otherwise, the shape is :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)` .
|
|
4559
|
-
- **values** (Tensor) - The minimum value of input tensor, with the same shape
|
|
4560
|
-
as `index`, and same dtype as `x`.
|
|
4561
5830
|
|
|
4562
5831
|
Raises:
|
|
4563
5832
|
TypeError: If `x` is not Tensor.
|
|
4564
|
-
TypeError: If `
|
|
5833
|
+
TypeError: If `keepdims` is not a bool.
|
|
4565
5834
|
TypeError: If `axis` is not an int.
|
|
5835
|
+
TypeError: If `initial` is not a number.
|
|
4566
5836
|
|
|
4567
5837
|
Supported Platforms:
|
|
4568
5838
|
``Ascend`` ``GPU`` ``CPU``
|
|
4569
5839
|
|
|
4570
5840
|
Examples:
|
|
4571
5841
|
>>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
|
|
4572
|
-
>>>
|
|
4573
|
-
>>> print(
|
|
4574
|
-
0 0
|
|
4575
|
-
>>> index, output = ops.min(x, keep_dims=True)
|
|
4576
|
-
>>> print(index, output)
|
|
4577
|
-
[0] [0.0]
|
|
5842
|
+
>>> output, index = ops.min(x, keepdims=True)
|
|
5843
|
+
>>> print(output, index)
|
|
5844
|
+
0.0 0
|
|
4578
5845
|
"""
|
|
4579
|
-
if
|
|
4580
|
-
return (Tensor(0
|
|
4581
|
-
|
|
4582
|
-
|
|
5846
|
+
if not input.shape:
|
|
5847
|
+
return (input, Tensor(0, dtype=mstype.int32))
|
|
5848
|
+
if axis is None:
|
|
5849
|
+
return (reduce_min(input), Tensor(0, dtype=mstype.int32))
|
|
5850
|
+
if initial is not None and not isinstance(initial, numbers.Number):
|
|
5851
|
+
raise TypeError(f"For 'min', 'initial' must be a scalar, but got {type(initial)}")
|
|
5852
|
+
if axis is not None and not isinstance(axis, int):
|
|
5853
|
+
raise TypeError(f"For 'min', 'axis' must be int, but got {type(axis)}")
|
|
5854
|
+
input = _init_and_select_elem(input, initial, where, ops.minimum)
|
|
5855
|
+
argmin_with_value_ = ArgMinWithValue(axis=axis, keep_dims=keepdims)
|
|
5856
|
+
indices, values = argmin_with_value_(input)
|
|
5857
|
+
return values, indices
|
|
4583
5858
|
|
|
4584
5859
|
|
|
4585
|
-
def aminmax(
|
|
5860
|
+
def aminmax(input, *, axis=0, keepdims=False):
|
|
4586
5861
|
"""
|
|
4587
|
-
|
|
4588
|
-
|
|
4589
|
-
Calculates the minimum value and maximum values along with the given axis for the input tensor.
|
|
4590
|
-
It returns the minimum values and maximum values.
|
|
4591
|
-
|
|
4592
|
-
.. warning::
|
|
4593
|
-
- The value range of "axis" is [-rank, rank). "rank" is the dimension length of "x".
|
|
5862
|
+
It returns the minimum and maximum value along the given axis of input tensor.
|
|
4594
5863
|
|
|
4595
5864
|
Args:
|
|
4596
|
-
|
|
5865
|
+
input (Tensor): The input tensor, can be any dimension. Set the shape of input tensor as
|
|
4597
5866
|
:math:`(x_1, x_2, ..., x_N)` .
|
|
4598
|
-
|
|
4599
|
-
|
|
4600
|
-
|
|
5867
|
+
|
|
5868
|
+
Keyword Args:
|
|
5869
|
+
axis (int, optional): The dimension to reduce. The value range of `axis` is [-rank, rank),
|
|
5870
|
+
where "rank" is the dimension of `input`. Default: 0.
|
|
5871
|
+
keepdims (bool, optional): Whether to maintain dimension. When set to True, the output will keep the same
|
|
5872
|
+
dimension as the input, or the dimension specified by `axis` is reduced. Default: False.
|
|
4601
5873
|
|
|
4602
5874
|
Returns:
|
|
4603
|
-
tuple (Tensor),
|
|
5875
|
+
tuple (Tensor), containing the minimum value and maximum value of the input tensor.
|
|
4604
5876
|
|
|
4605
|
-
- If `keepdims` is
|
|
5877
|
+
- If `keepdims` is True, the shape of output tensors is
|
|
4606
5878
|
:math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`.
|
|
4607
|
-
|
|
5879
|
+
- If `keepdims` is False, the shape of output tensors is
|
|
4608
5880
|
:math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.
|
|
4609
5881
|
|
|
4610
5882
|
Raises:
|
|
@@ -4613,28 +5885,31 @@ def aminmax(x, *, axis=0, keepdims=False):
|
|
|
4613
5885
|
ValueError: If `axis` is not in range [-rank, rank).
|
|
4614
5886
|
|
|
4615
5887
|
Supported Platforms:
|
|
4616
|
-
``CPU``
|
|
5888
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
4617
5889
|
|
|
4618
5890
|
Examples:
|
|
4619
5891
|
>>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
|
|
4620
5892
|
>>> output0, output1 = ops.aminmax(x)
|
|
4621
5893
|
>>> print(output0, output1)
|
|
4622
5894
|
0.0 0.7
|
|
5895
|
+
>>> output2, output3 = ops.aminmax(x, axis=-1, keepdims=True)
|
|
5896
|
+
>>> print(output2, output3)
|
|
5897
|
+
[0.] [0.7]
|
|
4623
5898
|
"""
|
|
4624
5899
|
argmin_with_value_op = P.ArgMinWithValue(axis, keepdims)
|
|
4625
5900
|
argmax_with_value_op = P.ArgMaxWithValue(axis, keepdims)
|
|
4626
|
-
_, output0 = argmin_with_value_op(
|
|
4627
|
-
_, output1 = argmax_with_value_op(
|
|
5901
|
+
_, output0 = argmin_with_value_op(input)
|
|
5902
|
+
_, output1 = argmax_with_value_op(input)
|
|
4628
5903
|
return output0, output1
|
|
4629
5904
|
|
|
4630
5905
|
|
|
4631
|
-
def narrow(
|
|
5906
|
+
def narrow(input, axis, start, length):
|
|
4632
5907
|
"""
|
|
4633
5908
|
Returns a narrowed tensor from input tensor, and
|
|
4634
5909
|
the dimension axis is input from start to start + length.
|
|
4635
5910
|
|
|
4636
5911
|
Args:
|
|
4637
|
-
|
|
5912
|
+
input (Tensor): the tensor to narrow.
|
|
4638
5913
|
axis (int): the axis along which to narrow.
|
|
4639
5914
|
start (int): the starting dimension.
|
|
4640
5915
|
length (int): the distance to the ending dimension.
|
|
@@ -4665,15 +5940,15 @@ def narrow(inputs, axis, start, length):
|
|
|
4665
5940
|
[ 5 6]
|
|
4666
5941
|
[ 8 9]]
|
|
4667
5942
|
"""
|
|
4668
|
-
validator.check_axis_in_range(axis,
|
|
4669
|
-
validator.check_int_range(start, 0,
|
|
4670
|
-
validator.check_int_range(length, 1,
|
|
5943
|
+
validator.check_axis_in_range(axis, input.ndim)
|
|
5944
|
+
validator.check_int_range(start, 0, input.shape[axis], validator.INC_LEFT)
|
|
5945
|
+
validator.check_int_range(length, 1, input.shape[axis] - start, validator.INC_BOTH)
|
|
4671
5946
|
|
|
4672
|
-
begins = [0] *
|
|
5947
|
+
begins = [0] * input.ndim
|
|
4673
5948
|
begins[axis] = start
|
|
4674
|
-
sizes =
|
|
5949
|
+
sizes = list(input.shape)
|
|
4675
5950
|
sizes[axis] = length
|
|
4676
|
-
return P.Slice()(
|
|
5951
|
+
return P.Slice()(input, begins, sizes)
|
|
4677
5952
|
|
|
4678
5953
|
|
|
4679
5954
|
def unsorted_segment_sum(input_x, segment_ids, num_segments):
|
|
@@ -4681,7 +5956,8 @@ def unsorted_segment_sum(input_x, segment_ids, num_segments):
|
|
|
4681
5956
|
Computes the sum of a tensor along segments.
|
|
4682
5957
|
|
|
4683
5958
|
Calculates a tensor such that :math:`\text{output}[i] = \sum_{segment\_ids[j] == i} \text{data}[j, \ldots]`, where
|
|
4684
|
-
:math:`j
|
|
5959
|
+
:math:`j,...` is a tuple describing the index of element in data.
|
|
5960
|
+
`segment_ids` selects which elements in data to sum
|
|
4685
5961
|
up. Segment_ids does not need to be sorted, and it does not need to cover all values in the entire valid value
|
|
4686
5962
|
range.
|
|
4687
5963
|
|
|
@@ -4732,73 +6008,107 @@ def unsorted_segment_sum(input_x, segment_ids, num_segments):
|
|
|
4732
6008
|
return unsorted_segment_sum_(input_x, segment_ids, num_segments)
|
|
4733
6009
|
|
|
4734
6010
|
|
|
4735
|
-
def
|
|
6011
|
+
def topk(input, k, dim=None, largest=True, sorted=True):
|
|
4736
6012
|
r"""
|
|
4737
|
-
Finds values and indices of the `k` largest entries along
|
|
6013
|
+
Finds values and indices of the `k` largest or smallest entries along a given dimension.
|
|
4738
6014
|
|
|
4739
6015
|
.. warning::
|
|
4740
|
-
- If sorted is set to
|
|
6016
|
+
- If sorted is set to False, it will use the aicpu operator, the performance may be reduced. In addition, due to
|
|
6017
|
+
different memory layout and traversal methods on different platforms, the display order of calculation results
|
|
6018
|
+
may be inconsistent when `sorted` is False.
|
|
4741
6019
|
|
|
4742
|
-
If the `
|
|
4743
|
-
and outputs its value and index as a Tensor.
|
|
6020
|
+
If the `input` is a one-dimensional Tensor, finds the `k` largest or smallest entries in the Tensor,
|
|
6021
|
+
and outputs its value and index as a Tensor. values[`k`] is the `k` largest item in `input`,
|
|
4744
6022
|
and its index is indices [`k`].
|
|
4745
6023
|
|
|
4746
6024
|
For a multi-dimensional matrix,
|
|
4747
|
-
calculates the first `k` entries in
|
|
6025
|
+
calculates the first or last `k` entries in a given dimension, therefore:
|
|
4748
6026
|
|
|
4749
6027
|
.. math::
|
|
4750
6028
|
|
|
4751
|
-
values.shape = indices.shape
|
|
6029
|
+
values.shape = indices.shape
|
|
4752
6030
|
|
|
4753
6031
|
If the two compared elements are the same, the one with the smaller index value is returned first.
|
|
4754
6032
|
|
|
4755
6033
|
Args:
|
|
4756
|
-
|
|
4757
|
-
k (int): The number of top elements to be computed along the last dimension, constant input is needed.
|
|
4758
|
-
|
|
4759
|
-
|
|
6034
|
+
input (Tensor): Input to be computed, data type must be float16, float32 or int32.
|
|
6035
|
+
k (int): The number of top or bottom elements to be computed along the last dimension, constant input is needed.
|
|
6036
|
+
dim (int, optional): The dimension to sort along. Default: None.
|
|
6037
|
+
largest (bool, optional): If largest is False then the k smallest elements are returned. Default: True.
|
|
6038
|
+
sorted (bool, optional): If True, the obtained elements will be sorted by the values in descending order.
|
|
6039
|
+
If False, the obtained elements will not be sorted. Default: True.
|
|
4760
6040
|
|
|
4761
6041
|
Returns:
|
|
4762
|
-
|
|
6042
|
+
A tuple consisting of `values` and `indexes`.
|
|
4763
6043
|
|
|
4764
|
-
- values (Tensor): The `k` largest elements in each slice of the
|
|
6044
|
+
- values (Tensor): The `k` largest or smallest elements in each slice of the given dimension.
|
|
4765
6045
|
- indices (Tensor): The indices of values within the last dimension of input.
|
|
4766
6046
|
|
|
4767
6047
|
Raises:
|
|
4768
6048
|
TypeError: If `sorted` is not a bool.
|
|
4769
|
-
TypeError: If `
|
|
6049
|
+
TypeError: If `input` is not a Tensor.
|
|
4770
6050
|
TypeError: If `k` is not an int.
|
|
4771
|
-
TypeError: If dtype of `
|
|
6051
|
+
TypeError: If dtype of `input` is not one of the following: float16, float32 or int32.
|
|
4772
6052
|
|
|
4773
6053
|
Supported Platforms:
|
|
4774
6054
|
``Ascend`` ``GPU`` ``CPU``
|
|
4775
6055
|
|
|
4776
6056
|
Examples:
|
|
4777
|
-
>>>
|
|
6057
|
+
>>> import mindspore as ms
|
|
4778
6058
|
>>> from mindspore import ops
|
|
4779
|
-
>>>
|
|
4780
|
-
|
|
4781
|
-
|
|
4782
|
-
>>>
|
|
4783
|
-
>>> print(
|
|
4784
|
-
(Tensor(shape=[3], dtype=
|
|
4785
|
-
|
|
6059
|
+
>>> x = ms.Tensor([[0.5368, 0.2447, 0.4302, 0.9673],
|
|
6060
|
+
... [0.4388, 0.6525, 0.4685, 0.1868],
|
|
6061
|
+
... [0.3563, 0.5152, 0.9675, 0.8230]], dtype=ms.float32)
|
|
6062
|
+
>>> output = ops.topk(x, 2, dim=1)
|
|
6063
|
+
>>> print(output)
|
|
6064
|
+
(Tensor(shape=[3, 2], dtype=Float32, value=
|
|
6065
|
+
[[ 9.67299998e-01, 5.36800027e-01],
|
|
6066
|
+
[ 6.52499974e-01, 4.68499988e-01],
|
|
6067
|
+
[ 9.67499971e-01, 8.23000014e-01]]), Tensor(shape=[3, 2], dtype=Int32, value=
|
|
6068
|
+
[[3, 0],
|
|
6069
|
+
[1, 2],
|
|
6070
|
+
[2, 3]]))
|
|
6071
|
+
>>> output2 = ops.topk(x, 2, dim=1, largest=False)
|
|
6072
|
+
>>> print(output2)
|
|
6073
|
+
(Tensor(shape=[3, 2], dtype=Float32, value=
|
|
6074
|
+
[[ 2.44700000e-01, 4.30200011e-01],
|
|
6075
|
+
[ 1.86800003e-01, 4.38800007e-01],
|
|
6076
|
+
[ 3.56299996e-01, 5.15200019e-01]]), Tensor(shape=[3, 2], dtype=Int32, value=
|
|
6077
|
+
[[1, 2],
|
|
6078
|
+
[3, 0],
|
|
6079
|
+
[0, 1]]))
|
|
4786
6080
|
"""
|
|
4787
6081
|
top_k_ = _get_cache_prim(P.TopK)(sorted)
|
|
4788
|
-
|
|
6082
|
+
if not largest:
|
|
6083
|
+
input = -input
|
|
6084
|
+
if dim is None or dim == input.ndim - 1:
|
|
6085
|
+
if not largest:
|
|
6086
|
+
res = top_k_(input, k)
|
|
6087
|
+
values, indices = -res[0], res[1]
|
|
6088
|
+
return values, indices
|
|
6089
|
+
return top_k_(input, k)
|
|
6090
|
+
input = input.swapaxes(dim, input.ndim - 1)
|
|
6091
|
+
output = top_k_(input, k)
|
|
6092
|
+
values = output[0].swapaxes(dim, input.ndim - 1)
|
|
6093
|
+
indices = output[1].swapaxes(dim, input.ndim - 1)
|
|
6094
|
+
if not largest:
|
|
6095
|
+
res = (-values, indices)
|
|
6096
|
+
else:
|
|
6097
|
+
res = (values, indices)
|
|
6098
|
+
return res
|
|
4789
6099
|
|
|
4790
6100
|
|
|
4791
6101
|
def expand(input_x, size):
|
|
4792
6102
|
r"""
|
|
4793
|
-
Returns a new
|
|
6103
|
+
Returns a new tensor where the dimension of size is expanded to a larger size.
|
|
4794
6104
|
|
|
4795
6105
|
Note:
|
|
4796
|
-
|
|
4797
|
-
|
|
4798
|
-
|
|
6106
|
+
- If the `size` for a dimension is -1, it means no change for the size of that dimension.
|
|
6107
|
+
- When a Tensor is expanded to a larger number of dimensions, the new ones will be appended at
|
|
6108
|
+
the front, and for the new dimensions, the `size` can not be -1.
|
|
4799
6109
|
|
|
4800
6110
|
Args:
|
|
4801
|
-
input_x (Tensor): The shape of tensor is (x_1, x_2, ..., x_R)
|
|
6111
|
+
input_x (Tensor): A Tensor to be expanded. The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
4802
6112
|
size (Tensor): The expanded shape of `input_x`.
|
|
4803
6113
|
|
|
4804
6114
|
Returns:
|
|
@@ -4819,13 +6129,13 @@ def expand(input_x, size):
|
|
|
4819
6129
|
``Ascend`` ``CPU``
|
|
4820
6130
|
|
|
4821
6131
|
Examples:
|
|
4822
|
-
>>> input_x = Tensor(np.array([[
|
|
6132
|
+
>>> input_x = Tensor(np.array([[2], [3], [4]]), mindspore.float32)
|
|
4823
6133
|
>>> size = Tensor(np.array([3,4]), mindspore.int32)
|
|
4824
6134
|
>>> y = ops.expand(input_x, size)
|
|
4825
6135
|
>>> print(y)
|
|
4826
|
-
[[
|
|
4827
|
-
[
|
|
4828
|
-
[
|
|
6136
|
+
[[2. 2. 2. 2.]
|
|
6137
|
+
[3. 3. 3. 3.]
|
|
6138
|
+
[4. 4. 4. 4.]]
|
|
4829
6139
|
"""
|
|
4830
6140
|
expand_op = _get_cache_prim(Expand)()
|
|
4831
6141
|
return expand_op(input_x, size)
|
|
@@ -4836,7 +6146,7 @@ def _check_fold_param(param, param_name):
|
|
|
4836
6146
|
"""Check the parameters of fold op."""
|
|
4837
6147
|
validator.check_value_type(param_name, param, [int, list, tuple], 'fold')
|
|
4838
6148
|
param = (param, param) if isinstance(param, int) else param
|
|
4839
|
-
validator.check(param_name + " size", len(param), "", 2,
|
|
6149
|
+
validator.check(param_name + " size", len(param), "", 2, validator.EQ, 'fold')
|
|
4840
6150
|
if param_name == "padding":
|
|
4841
6151
|
validator.check_non_negative_int_sequence(param, param_name, 'fold')
|
|
4842
6152
|
else:
|
|
@@ -4864,7 +6174,7 @@ def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
4864
6174
|
for height and width. If type is int, it means that height equal with width. Default: 1.
|
|
4865
6175
|
|
|
4866
6176
|
Returns:
|
|
4867
|
-
A Tensor, with same type as `input` , format of the
|
|
6177
|
+
A Tensor, with same type as `input` , format of the Tensor is (N, C, H, W).
|
|
4868
6178
|
|
|
4869
6179
|
Raises:
|
|
4870
6180
|
TypeError: If `kernel_size`, `dilation`, `padding`, `stride` data type is not int, tuple or list.
|
|
@@ -4875,7 +6185,7 @@ def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
4875
6185
|
ValueError: If `input.shape[3]` does not match the calculated number of sliding blocks.
|
|
4876
6186
|
|
|
4877
6187
|
Supported Platforms:
|
|
4878
|
-
``
|
|
6188
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
4879
6189
|
|
|
4880
6190
|
Examples:
|
|
4881
6191
|
>>> x = Tensor(input_data=np.random.rand(16, 16, 4, 25), dtype=mstype.float32)
|
|
@@ -4885,9 +6195,9 @@ def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
4885
6195
|
(16, 16, 8, 8)
|
|
4886
6196
|
"""
|
|
4887
6197
|
kernel_size = _check_fold_param(kernel_size, "kernel_size")
|
|
4888
|
-
dilation = _check_fold_param(
|
|
4889
|
-
padding = _check_fold_param(
|
|
4890
|
-
stride = _check_fold_param(
|
|
6198
|
+
dilation = _check_fold_param(dilation, "dilation")
|
|
6199
|
+
padding = _check_fold_param(padding, "padding")
|
|
6200
|
+
stride = _check_fold_param(stride, "stride")
|
|
4891
6201
|
fold_op = _get_cache_prim(Col2Im)(kernel_size, dilation, padding, stride)
|
|
4892
6202
|
return fold_op(input, output_size)
|
|
4893
6203
|
|
|
@@ -4897,7 +6207,7 @@ def _check_unfold_params(param, param_name, param_size):
|
|
|
4897
6207
|
"""Check the parameters of unfold op."""
|
|
4898
6208
|
validator.check_value_type(param_name, param, [int, tuple, list], 'unfold')
|
|
4899
6209
|
param = (param, param) if isinstance(param, int) else param
|
|
4900
|
-
validator.check(param_name + " size", len(param), "", param_size,
|
|
6210
|
+
validator.check(param_name + " size", len(param), "", param_size, validator.IN, 'unfold')
|
|
4901
6211
|
if param_name == "padding":
|
|
4902
6212
|
validator.check_non_negative_int_sequence(param, param_name, 'unfold')
|
|
4903
6213
|
else:
|
|
@@ -4907,8 +6217,8 @@ def _check_unfold_params(param, param_name, param_size):
|
|
|
4907
6217
|
|
|
4908
6218
|
def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
|
|
4909
6219
|
"""
|
|
4910
|
-
|
|
4911
|
-
|
|
6220
|
+
Reshapes a tensor of format (N, C, H, W) by extracting sliding local blocks from the input Tensor
|
|
6221
|
+
and concatenating them along a new dimension.
|
|
4912
6222
|
|
|
4913
6223
|
.. warning::
|
|
4914
6224
|
- Currently, only 4-D input tensors (batched image-like tensors) are supported.
|
|
@@ -4920,10 +6230,9 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
4920
6230
|
dilation (Union[int, tuple[int], list[int]], optional): The dilation of the window, should be two int
|
|
4921
6231
|
for height and width. If type is int, it means that height equal with width. Default: 1.
|
|
4922
6232
|
padding (Union[int, tuple[int], list[int]], optional): The pad of the window, that must be
|
|
4923
|
-
a tuple/list of one or two
|
|
6233
|
+
a tuple/list of one or two `int` for height and width.
|
|
4924
6234
|
If one int, pad_height = pad_width.
|
|
4925
6235
|
If two int, pad_height = padding[0], pad_width = padding[1].
|
|
4926
|
-
If four int, padding = [pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]
|
|
4927
6236
|
Default: 0.
|
|
4928
6237
|
stride (Union[int, tuple[int], list[int]], optional): The stride of the window, should be two int
|
|
4929
6238
|
for height and width. If type is int, it means that height equal with width. Default: 1.
|
|
@@ -4938,13 +6247,13 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
4938
6247
|
ValueError: If `padding` value is less than zero.
|
|
4939
6248
|
|
|
4940
6249
|
Supported Platforms:
|
|
4941
|
-
``Ascend`` ``CPU``
|
|
6250
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
4942
6251
|
|
|
4943
6252
|
Examples:
|
|
4944
|
-
>>> x = Tensor(np.random.rand(4, 4, 32, 32), mindspore.
|
|
6253
|
+
>>> x = Tensor(np.random.rand(4, 4, 32, 32), mindspore.float64)
|
|
4945
6254
|
>>> output = ops.unfold(x, kernel_size=3, dilation=1, stride=1)
|
|
4946
6255
|
>>> print(output.shape)
|
|
4947
|
-
(4,
|
|
6256
|
+
(4, 4, 9, 900)
|
|
4948
6257
|
"""
|
|
4949
6258
|
kernel_size = _check_unfold_params(kernel_size, "kernel_size", [1, 2])
|
|
4950
6259
|
dilation = _check_unfold_params(dilation, "dilation", [1, 2])
|
|
@@ -4953,7 +6262,6 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
4953
6262
|
unfold_op = _get_cache_prim(Im2Col)(ksizes=kernel_size,
|
|
4954
6263
|
strides=stride,
|
|
4955
6264
|
dilations=dilation,
|
|
4956
|
-
padding_mode="CALCULATED",
|
|
4957
6265
|
pads=padding)
|
|
4958
6266
|
return unfold_op(input)
|
|
4959
6267
|
|
|
@@ -4970,11 +6278,10 @@ def diagonal(input, offset=0, dim1=0, dim2=1):
|
|
|
4970
6278
|
Returns specified diagonals of `input`.
|
|
4971
6279
|
|
|
4972
6280
|
If `input` is 2-D, returns the diagonal of `input` with the given offset.
|
|
4973
|
-
If `
|
|
6281
|
+
If `input` has more than two
|
|
4974
6282
|
dimensions, then the axes specified by `dim1` and `dim2` are used to determine
|
|
4975
|
-
the 2-D sub-array whose diagonal is returned.
|
|
4976
|
-
|
|
4977
|
-
to the right equal to the size of the resulting diagonals.
|
|
6283
|
+
the 2-D sub-array whose diagonal is returned. In this case, remove the `dim1` and `dim2` dimensions of `input`
|
|
6284
|
+
and insert the last dimension of `input` by the diagonal elements determined by `dim1` and `dim2`.
|
|
4978
6285
|
|
|
4979
6286
|
Args:
|
|
4980
6287
|
input (Tensor): Array from which the diagonals are taken.
|
|
@@ -5027,14 +6334,14 @@ def diagonal(input, offset=0, dim1=0, dim2=1):
|
|
|
5027
6334
|
elif offset != 0:
|
|
5028
6335
|
e = e.astype(mstype.float32)
|
|
5029
6336
|
if offset > 0:
|
|
5030
|
-
e_left = fill_op(
|
|
6337
|
+
e_left = fill_op(mstype.float32, (n, offset), 0)
|
|
5031
6338
|
e_right = e[..., 0:m - offset:1]
|
|
5032
6339
|
e = _get_cache_prim(P.Concat)(1)((e_left, e_right)).astype(dtype)
|
|
5033
6340
|
elif offset < 0:
|
|
5034
|
-
e_upper = fill_op(
|
|
6341
|
+
e_upper = fill_op(mstype.float32, (-offset, m), 0)
|
|
5035
6342
|
e_lower = e[0:n + offset:1, ...]
|
|
5036
6343
|
e = _get_cache_prim(P.Concat)(0)((e_upper, e_lower)).astype(dtype)
|
|
5037
|
-
e =
|
|
6344
|
+
e = F.broadcast_to(e, x_shape)
|
|
5038
6345
|
|
|
5039
6346
|
prod_val = _get_cache_prim(P.Mul)()(input, e)
|
|
5040
6347
|
res = _get_cache_prim(P.ReduceSum)()(prod_val.astype(mstype.float32), -1)
|
|
@@ -5042,7 +6349,7 @@ def diagonal(input, offset=0, dim1=0, dim2=1):
|
|
|
5042
6349
|
begin = ()
|
|
5043
6350
|
for _ in np.arange((x_ndim - 2)):
|
|
5044
6351
|
begin += (0,)
|
|
5045
|
-
last_dim_begin = np.max((0, -offset))
|
|
6352
|
+
last_dim_begin = np.max((0, -offset)).astype(np.int64)
|
|
5046
6353
|
begin += (last_dim_begin,)
|
|
5047
6354
|
res_size = res.shape[:-1]
|
|
5048
6355
|
last_dim_end = np.min((x_shape[-2], np.max((0, (x_shape[-1] - offset))))) - last_dim_begin
|
|
@@ -5074,10 +6381,12 @@ def lstsq(input, A):
|
|
|
5074
6381
|
\min_y & \|y\|_2 & \text{subject to} & xy = a.
|
|
5075
6382
|
\end{array}
|
|
5076
6383
|
|
|
6384
|
+
where `y` is the returned tensor.
|
|
6385
|
+
|
|
5077
6386
|
Args:
|
|
5078
|
-
input (Tensor): The m
|
|
6387
|
+
input (Tensor): The :math:`(m \times n)` matrix equivalent to :math:`x` in above.
|
|
5079
6388
|
The input tensor whose data type is float16, float32 or float64.
|
|
5080
|
-
A (Tensor): The m
|
|
6389
|
+
A (Tensor): The :math:`(m \times k)` matrix equivalent to :math:`a` in above.
|
|
5081
6390
|
The input tensor whose data type is float16, float32 or float64.
|
|
5082
6391
|
|
|
5083
6392
|
Returns:
|
|
@@ -5110,18 +6419,18 @@ def lstsq(input, A):
|
|
|
5110
6419
|
|
|
5111
6420
|
def mvlgamma(input, p):
|
|
5112
6421
|
r"""
|
|
5113
|
-
|
|
6422
|
+
Returns the results of the multivariate log-gamma function with dimension `p` element-wise.
|
|
5114
6423
|
|
|
5115
6424
|
The mathematical calculation process of Mvlgamma is shown as follows:
|
|
5116
6425
|
|
|
5117
6426
|
.. math::
|
|
5118
6427
|
|
|
5119
|
-
\log (\Gamma_{p}(
|
|
6428
|
+
\log (\Gamma_{p}(input))=C+\sum_{i=1}^{p} \log (\Gamma(input-\frac{i-1}{2}))
|
|
5120
6429
|
|
|
5121
6430
|
where :math:`C = \log(\pi) \times \frac{p(p-1)}{4}` and :math:`\Gamma(\cdot)` is the Gamma function.
|
|
5122
6431
|
|
|
5123
6432
|
Args:
|
|
5124
|
-
input (Tensor): The tensor
|
|
6433
|
+
input (Tensor): The input tensor of the multivariate log-gamma function,
|
|
5125
6434
|
which must be one of the following types: float32, float64.
|
|
5126
6435
|
The shape is :math:`(N,*)`, where :math:`*` means any number of additional dimensions.
|
|
5127
6436
|
And the value of any element in `input` must be greater than :math:`(p - 1) / 2`.
|
|
@@ -5137,7 +6446,7 @@ def mvlgamma(input, p):
|
|
|
5137
6446
|
ValueError: If not all elements of `input` are greater than :math:`(p - 1) / 2`.
|
|
5138
6447
|
|
|
5139
6448
|
Supported Platforms:
|
|
5140
|
-
``
|
|
6449
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
5141
6450
|
|
|
5142
6451
|
Examples:
|
|
5143
6452
|
>>> x = Tensor(np.array([[3, 4, 5], [4, 2, 6]]), mindspore.float32)
|
|
@@ -5150,6 +6459,543 @@ def mvlgamma(input, p):
|
|
|
5150
6459
|
return mvlgamma_op(input)
|
|
5151
6460
|
|
|
5152
6461
|
|
|
6462
|
+
def argwhere(input):
|
|
6463
|
+
"""
|
|
6464
|
+
Return a Tensor of the positions of all non-zero values.
|
|
6465
|
+
|
|
6466
|
+
Args:
|
|
6467
|
+
input (Tensor): The input tensor. The data type is Number or Bool.
|
|
6468
|
+
|
|
6469
|
+
Returns:
|
|
6470
|
+
Tensor, a 2-D Tensor whose data type is int64, containing the positions of all non-zero values of the input.
|
|
6471
|
+
|
|
6472
|
+
Raises:
|
|
6473
|
+
TypeError: If `input` is not Tensor.
|
|
6474
|
+
ValueError: If dim of `input` equals to 0.
|
|
6475
|
+
|
|
6476
|
+
Supported Platforms:
|
|
6477
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
6478
|
+
|
|
6479
|
+
Examples:
|
|
6480
|
+
>>> import mindspore
|
|
6481
|
+
>>> from mindspore import Tensor, ops
|
|
6482
|
+
>>> import numpy as np
|
|
6483
|
+
>>> x = Tensor(np.array([[[1, 0], [-5, 0]]]), mindspore.int32)
|
|
6484
|
+
>>> output = ops.argwhere(x)
|
|
6485
|
+
>>> print(output)
|
|
6486
|
+
[[0 0 0]
|
|
6487
|
+
[0 1 0]]
|
|
6488
|
+
"""
|
|
6489
|
+
return nonzero_(input)
|
|
6490
|
+
|
|
6491
|
+
|
|
6492
|
+
def column_stack(tensors):
|
|
6493
|
+
"""
|
|
6494
|
+
Stacks 1-D tensors as columns into a 2-D tensor. 2-D tensors are stacked as-is,
|
|
6495
|
+
like ops.hstack.
|
|
6496
|
+
|
|
6497
|
+
Args:
|
|
6498
|
+
tensors (Union[Tensor, tuple, list]): A sequence of 1-D or 2-D tensors. All
|
|
6499
|
+
of them must have the same shape except the axis to be concatenated.
|
|
6500
|
+
|
|
6501
|
+
Returns:
|
|
6502
|
+
2-D Tensor, formed by stacking the given tensors.
|
|
6503
|
+
|
|
6504
|
+
Raises:
|
|
6505
|
+
TypeError: If `tensors` is not Tensor, list or tuple.
|
|
6506
|
+
ValueError: If `tensors` is empty.
|
|
6507
|
+
|
|
6508
|
+
Supported Platforms:
|
|
6509
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
6510
|
+
|
|
6511
|
+
Examples:
|
|
6512
|
+
>>> from mindspore import Tensor, ops
|
|
6513
|
+
>>> x1 = Tensor([1, 1, 1])
|
|
6514
|
+
>>> x2 = Tensor([2, 2, 2])
|
|
6515
|
+
>>> output = ops.column_stack((x1, x2))
|
|
6516
|
+
>>> print(output)
|
|
6517
|
+
[[1 2]
|
|
6518
|
+
[1 2]
|
|
6519
|
+
[1 2]]
|
|
6520
|
+
"""
|
|
6521
|
+
if not isinstance(tensors, (list, tuple)):
|
|
6522
|
+
raise TypeError(f"For column_stack, the input must be list or tuple or tensor, but got {type(tensors)}.")
|
|
6523
|
+
|
|
6524
|
+
trans_x = ()
|
|
6525
|
+
_expand_dims = _get_cache_prim(P.ExpandDims)()
|
|
6526
|
+
for tensor in tensors:
|
|
6527
|
+
if tensor.ndim < 1:
|
|
6528
|
+
tensor = _expand_dims(tensor, 0)
|
|
6529
|
+
if tensor.ndim == 1:
|
|
6530
|
+
tensor = _expand_dims(tensor, 1)
|
|
6531
|
+
trans_x += (tensor,)
|
|
6532
|
+
if not trans_x:
|
|
6533
|
+
raise ValueError(f"For column_stack, the input must have at least 1 tensor, but got 0.")
|
|
6534
|
+
_concat = _get_cache_prim(P.Concat)(-1)
|
|
6535
|
+
return _concat(trans_x)
|
|
6536
|
+
|
|
6537
|
+
|
|
6538
|
+
def hstack(tensors):
|
|
6539
|
+
"""
|
|
6540
|
+
Stacks tensors in sequence horizontally.
|
|
6541
|
+
This is equivalent to concatenation along the second axis, except for 1-D tensors
|
|
6542
|
+
where it concatenates along the first axis.
|
|
6543
|
+
|
|
6544
|
+
Args:
|
|
6545
|
+
tensors (Union[Tensor, tuple, list]): A sequence of 1-D or 2-D tensors. The
|
|
6546
|
+
tensors must have the same shape along all but the second axis, except
|
|
6547
|
+
1-D tensors which can be any length.
|
|
6548
|
+
|
|
6549
|
+
Returns:
|
|
6550
|
+
Stacked Tensor, formed by stacking the given tensors.
|
|
6551
|
+
|
|
6552
|
+
Raises:
|
|
6553
|
+
TypeError: If `tensors` is not Tensor, list or tuple.
|
|
6554
|
+
ValueError: If `tensors` is empty.
|
|
6555
|
+
|
|
6556
|
+
Supported Platforms:
|
|
6557
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
6558
|
+
|
|
6559
|
+
Examples:
|
|
6560
|
+
>>> from mindspore import Tensor, ops
|
|
6561
|
+
>>> x1 = Tensor([1, 1, 1])
|
|
6562
|
+
>>> x2 = Tensor([2, 2, 2])
|
|
6563
|
+
>>> output = ops.hstack((x1, x2))
|
|
6564
|
+
>>> print(output)
|
|
6565
|
+
[1. 1. 1. 2. 2. 2.]
|
|
6566
|
+
"""
|
|
6567
|
+
if not isinstance(tensors, (list, tuple)):
|
|
6568
|
+
raise TypeError(f"For hstack, the input must be list or tuple, but got {type(tensors)}.")
|
|
6569
|
+
|
|
6570
|
+
tuple_of_tensor = ()
|
|
6571
|
+
for tensor in tensors:
|
|
6572
|
+
if tensor.ndim < 1:
|
|
6573
|
+
tensor = expand_dims_(tensor, 0)
|
|
6574
|
+
tuple_of_tensor += (tensor,)
|
|
6575
|
+
if not tuple_of_tensor:
|
|
6576
|
+
raise ValueError("For hstack, the input must have at least 1 tensor, but got 0.")
|
|
6577
|
+
if tuple_of_tensor[0].ndim <= 1:
|
|
6578
|
+
_concat = _get_cache_prim(P.Concat)(0)
|
|
6579
|
+
return _concat(tuple_of_tensor)
|
|
6580
|
+
_concat = _get_cache_prim(P.Concat)(1)
|
|
6581
|
+
return _concat(tuple_of_tensor)
|
|
6582
|
+
|
|
6583
|
+
|
|
6584
|
+
@constexpr
|
|
6585
|
+
def _check_axis_valid(axis, ndim):
|
|
6586
|
+
"""
|
|
6587
|
+
Checks axis are valid given ndim, and returns axis that can be passed
|
|
6588
|
+
to the built-in operator (non-negative, int or tuple).
|
|
6589
|
+
"""
|
|
6590
|
+
if axis is None:
|
|
6591
|
+
axis = F.make_range(ndim)
|
|
6592
|
+
return axis
|
|
6593
|
+
if isinstance(axis, (tuple, list)):
|
|
6594
|
+
axis = tuple(map(lambda x: _check_check_axis_in_range(x, ndim), axis))
|
|
6595
|
+
return axis
|
|
6596
|
+
return (_check_check_axis_in_range(axis, ndim),)
|
|
6597
|
+
|
|
6598
|
+
|
|
6599
|
+
@constexpr
|
|
6600
|
+
def _get_moved_perm(ndim, source, destination):
|
|
6601
|
+
"""
|
|
6602
|
+
Helper function for movedim, returns permutation after moving axis
|
|
6603
|
+
from source to destination.
|
|
6604
|
+
"""
|
|
6605
|
+
dest_sorted_idx = [i for i, _ in sorted(enumerate(destination), key=operator.itemgetter(1))]
|
|
6606
|
+
axis_orig = [i for i in builtins.range(0, ndim) if i not in source]
|
|
6607
|
+
|
|
6608
|
+
k = 0
|
|
6609
|
+
m = 0
|
|
6610
|
+
perm = []
|
|
6611
|
+
for i in dest_sorted_idx:
|
|
6612
|
+
# inserts an axis that has been moved, denoted by n, and axis that remain
|
|
6613
|
+
# in their original position, indexed from k to k + n - m, into index m in
|
|
6614
|
+
# the list of permuted axis
|
|
6615
|
+
n = destination[i]
|
|
6616
|
+
j = k + n - m
|
|
6617
|
+
perm += axis_orig[k:j]
|
|
6618
|
+
perm.append(source[i])
|
|
6619
|
+
k += n - m
|
|
6620
|
+
m = n + 1
|
|
6621
|
+
perm += axis_orig[k:]
|
|
6622
|
+
return tuple(perm)
|
|
6623
|
+
|
|
6624
|
+
|
|
6625
|
+
def movedim(x, source, destination):
|
|
6626
|
+
"""
|
|
6627
|
+
Moves axis of an array from source to destination.
|
|
6628
|
+
|
|
6629
|
+
Other axis remain in their original order.
|
|
6630
|
+
|
|
6631
|
+
Args:
|
|
6632
|
+
x (Tensor): The tensor array whose axis should be reordered.
|
|
6633
|
+
source (Union[int, sequence[int]]): Original positions of the
|
|
6634
|
+
axis to move. These must be unique.
|
|
6635
|
+
destination (Union[int, sequence[int]]): Destination positions
|
|
6636
|
+
for each of the original axis. These must also be unique.
|
|
6637
|
+
|
|
6638
|
+
Returns:
|
|
6639
|
+
Tensor, array with moved axis.
|
|
6640
|
+
|
|
6641
|
+
Raises:
|
|
6642
|
+
ValueError: If axis are out of the range of `[-a.ndim, a.ndim)`, or
|
|
6643
|
+
if the axis contain duplicates.
|
|
6644
|
+
|
|
6645
|
+
Supported Platforms:
|
|
6646
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
6647
|
+
|
|
6648
|
+
Examples:
|
|
6649
|
+
>>> # case1 : moving single axis
|
|
6650
|
+
>>> from mindspore import ops, Tensor
|
|
6651
|
+
>>> import numpy as np
|
|
6652
|
+
>>> x = Tensor(np.zeros((3, 4, 5)))
|
|
6653
|
+
>>> output = ops.movedim(x, 0, -1)
|
|
6654
|
+
>>> print(output.shape)
|
|
6655
|
+
(4, 5, 3)
|
|
6656
|
+
>>> # case 2 : moving multiple axes
|
|
6657
|
+
>>> from mindspore import ops, Tensor
|
|
6658
|
+
>>> import numpy as np
|
|
6659
|
+
>>> x = Tensor(np.zeros((3, 4, 5)))
|
|
6660
|
+
>>> output = ops.movedim(x, (0, 2), (1, 2))
|
|
6661
|
+
>>> print(output.shape)
|
|
6662
|
+
(4, 3, 5)
|
|
6663
|
+
"""
|
|
6664
|
+
ndim = F.rank(x)
|
|
6665
|
+
source = _check_axis_valid(source, ndim)
|
|
6666
|
+
destination = _check_axis_valid(destination, ndim)
|
|
6667
|
+
if len(source) != len(destination):
|
|
6668
|
+
raise ValueError(
|
|
6669
|
+
f"For `source` and `destination` arguments, the number of elements must be the same, but got 'source':"
|
|
6670
|
+
f" {len(source)} and 'destination': {len(destination)}.")
|
|
6671
|
+
perm = _get_moved_perm(ndim, source, destination)
|
|
6672
|
+
return _get_cache_prim(P.Transpose)()(x, perm)
|
|
6673
|
+
|
|
6674
|
+
|
|
6675
|
+
def moveaxis(x, source, destination):
|
|
6676
|
+
"""
|
|
6677
|
+
Alias for `ops.movedim`. Moves axis of an array from source to destination.
|
|
6678
|
+
|
|
6679
|
+
Refer to :func:`mindspore.ops.movedim` for more detail.
|
|
6680
|
+
|
|
6681
|
+
Supported Platforms:
|
|
6682
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
6683
|
+
|
|
6684
|
+
Examples:
|
|
6685
|
+
>>> from mindspore import ops, Tensor
|
|
6686
|
+
>>> import numpy as np
|
|
6687
|
+
>>> x = Tensor(np.zeros((3, 4, 5)))
|
|
6688
|
+
>>> output = ops.moveaxis(x, 0, -1)
|
|
6689
|
+
>>> print(output.shape)
|
|
6690
|
+
(4, 5, 3)
|
|
6691
|
+
"""
|
|
6692
|
+
|
|
6693
|
+
return movedim(x, source, destination)
|
|
6694
|
+
|
|
6695
|
+
|
|
6696
|
+
@constexpr
|
|
6697
|
+
def _check_swapaxes_axis(axes, ndim):
|
|
6698
|
+
return validator.check_swapaxes_axis(axes, ndim)
|
|
6699
|
+
|
|
6700
|
+
|
|
6701
|
+
def swapaxes(input, axis0, axis1):
|
|
6702
|
+
'''
|
|
6703
|
+
Interchange two axes of a tensor.
|
|
6704
|
+
|
|
6705
|
+
Args:
|
|
6706
|
+
input(Tensor): Input tensor.
|
|
6707
|
+
axis0 (int): First axis.
|
|
6708
|
+
axis1 (int): Second axis.
|
|
6709
|
+
|
|
6710
|
+
Returns:
|
|
6711
|
+
Transposed tensor, has the same data type as `input`.
|
|
6712
|
+
|
|
6713
|
+
Raises:
|
|
6714
|
+
TypeError: If argument `input` is not Tensor.
|
|
6715
|
+
TypeError: If `axis0` or `axis1` is not integer.
|
|
6716
|
+
ValueError: If `axis0` or `axis1` is not in the range of :math:`[-ndim, ndim-1]`.
|
|
6717
|
+
|
|
6718
|
+
Supported Platforms:
|
|
6719
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
6720
|
+
|
|
6721
|
+
Examples:
|
|
6722
|
+
>>> import numpy as np
|
|
6723
|
+
>>> import mindspore.ops as ops
|
|
6724
|
+
>>> from mindspore import Tensor
|
|
6725
|
+
>>> input = Tensor(np.ones((2,3,4), dtype=np.float32))
|
|
6726
|
+
>>> output = ops.swapaxes(input, 0, 2)
|
|
6727
|
+
>>> print(output.shape)
|
|
6728
|
+
(4,3,2)
|
|
6729
|
+
'''
|
|
6730
|
+
if not isinstance(input, Tensor):
|
|
6731
|
+
raise TypeError(f'For ops.swapaxes, parameter `input` must be Tensor, but got {type(input)}')
|
|
6732
|
+
|
|
6733
|
+
axis0, axis1 = _check_swapaxes_axis((axis0, axis1), input.ndim)
|
|
6734
|
+
if axis0 == axis1:
|
|
6735
|
+
return input
|
|
6736
|
+
if axis0 > axis1:
|
|
6737
|
+
axis0, axis1 = axis1, axis0
|
|
6738
|
+
|
|
6739
|
+
perm = F.make_range(0, input.ndim)
|
|
6740
|
+
if axis1 + 1 < input.ndim:
|
|
6741
|
+
new_perm = perm[0:axis0] + perm[axis1:axis1 + 1] + \
|
|
6742
|
+
perm[axis0 + 1:axis1] + perm[axis0:axis0 + 1] + perm[axis1 + 1:]
|
|
6743
|
+
else:
|
|
6744
|
+
new_perm = perm[0:axis0] + perm[axis1:axis1 + 1] + \
|
|
6745
|
+
perm[axis0 + 1:axis1] + perm[axis0:axis0 + 1]
|
|
6746
|
+
|
|
6747
|
+
return _get_cache_prim(P.Transpose)()(input, new_perm)
|
|
6748
|
+
|
|
6749
|
+
|
|
6750
|
+
def swapdims(input, dim0, dim1):
|
|
6751
|
+
'''
|
|
6752
|
+
Interchange two dims of a tensor.
|
|
6753
|
+
This function is equivalent to :func:`mindspore.ops.swapaxes` function.
|
|
6754
|
+
|
|
6755
|
+
Args:
|
|
6756
|
+
input(Tensor): Input tensor.
|
|
6757
|
+
dim0 (int): First dim.
|
|
6758
|
+
dim1 (int): Second dim.
|
|
6759
|
+
|
|
6760
|
+
Returns:
|
|
6761
|
+
Transposed tensor, has the same data type as `input`.
|
|
6762
|
+
|
|
6763
|
+
Raises:
|
|
6764
|
+
TypeError: If argument `input` is not Tensor.
|
|
6765
|
+
TypeError: If `dim0` or `dim1` is not integer.
|
|
6766
|
+
ValueError: If `dim0` or `dim1` is not in the range of :math:`[-ndim, ndim-1]`.
|
|
6767
|
+
|
|
6768
|
+
Supported Platforms:
|
|
6769
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
6770
|
+
|
|
6771
|
+
Examples:
|
|
6772
|
+
>>> import numpy as np
|
|
6773
|
+
>>> import mindspore.ops as ops
|
|
6774
|
+
>>> from mindspore import Tensor
|
|
6775
|
+
>>> input = Tensor(np.ones((2,3,4), dtype=np.float32))
|
|
6776
|
+
>>> output = ops.swapdims(input, 0, 2)
|
|
6777
|
+
>>> print(output.shape)
|
|
6778
|
+
(4,3,2)
|
|
6779
|
+
'''
|
|
6780
|
+
return F.swapaxes(input, dim0, dim1)
|
|
6781
|
+
|
|
6782
|
+
|
|
6783
|
+
@constexpr
|
|
6784
|
+
def _check_is_int(arg_value, arg_name, op_name):
|
|
6785
|
+
arg_value = validator.check_is_int(arg_value, arg_name, op_name)
|
|
6786
|
+
return arg_value
|
|
6787
|
+
|
|
6788
|
+
|
|
6789
|
+
@constexpr
|
|
6790
|
+
def _check_positive_int(arg_value, arg_name, op_name):
|
|
6791
|
+
arg_value = validator.check_positive_int(arg_value, arg_name, op_name)
|
|
6792
|
+
return arg_value
|
|
6793
|
+
|
|
6794
|
+
|
|
6795
|
+
@constexpr
|
|
6796
|
+
def _check_axis_range(arg_value, limit, arg_name, op_name):
|
|
6797
|
+
arg_value = validator.check_int_range(arg_value, -limit, limit, validator.INC_LEFT, arg_name, op_name)
|
|
6798
|
+
return arg_value
|
|
6799
|
+
|
|
6800
|
+
|
|
6801
|
+
@_primexpr
|
|
6802
|
+
def _cal_repeat_dims(x_rank, rep, expand_axis):
|
|
6803
|
+
rep_dims = [1] * (x_rank + 1)
|
|
6804
|
+
rep_dims[expand_axis] = rep
|
|
6805
|
+
return tuple(rep_dims)
|
|
6806
|
+
|
|
6807
|
+
|
|
6808
|
+
@constexpr
|
|
6809
|
+
def _cal_reshape(x_shape, rep, axis):
|
|
6810
|
+
x_reshape = list(x_shape)
|
|
6811
|
+
x_reshape[axis] *= rep
|
|
6812
|
+
return tuple(x_reshape)
|
|
6813
|
+
|
|
6814
|
+
|
|
6815
|
+
def repeat_interleave(input, repeats, axis=None):
|
|
6816
|
+
"""
|
|
6817
|
+
Repeat elements of a tensor along an axis, like `numpy.repeat`.
|
|
6818
|
+
|
|
6819
|
+
Args:
|
|
6820
|
+
input (Tensor): The tensor to repeat values for. Must be of type: float16,
|
|
6821
|
+
float32, int8, uint8, int16, int32, or int64.
|
|
6822
|
+
repeats (int): The number of times to repeat, must be positive.
|
|
6823
|
+
axis (int, optional): The axis along which to repeat, default: None. if dims is None, the input Tensor will be
|
|
6824
|
+
flattened and the output will alse be flattened.
|
|
6825
|
+
|
|
6826
|
+
Returns:
|
|
6827
|
+
One tensor with values repeated along the specified axis. If input has shape
|
|
6828
|
+
:math:`(s1, s2, ..., sn)` and axis is i, the output will have shape :math:`(s1, s2, ...,
|
|
6829
|
+
si * repeats, ..., sn)`. The output type will be the same as the type of `input`.
|
|
6830
|
+
|
|
6831
|
+
Supported Platforms:
|
|
6832
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
6833
|
+
|
|
6834
|
+
Examples:
|
|
6835
|
+
>>> input = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
|
|
6836
|
+
>>> output = ops.repeat_interleave(input, repeats=2, axis=0)
|
|
6837
|
+
>>> print(output)
|
|
6838
|
+
[[0 1 2]
|
|
6839
|
+
[0 1 2]
|
|
6840
|
+
[3 4 5]
|
|
6841
|
+
[3 4 5]]
|
|
6842
|
+
"""
|
|
6843
|
+
if axis is None:
|
|
6844
|
+
input = input.reshape(-1)
|
|
6845
|
+
axis = 0
|
|
6846
|
+
return repeat_elements(input, repeats, axis)
|
|
6847
|
+
|
|
6848
|
+
|
|
6849
|
+
def repeat_elements(x, rep, axis=0):
|
|
6850
|
+
"""
|
|
6851
|
+
Repeat elements of a tensor along an axis, like `np.repeat` .
|
|
6852
|
+
|
|
6853
|
+
Args:
|
|
6854
|
+
x (Tensor): The tensor to repeat values for. Must be of type: float16,
|
|
6855
|
+
float32, int8, uint8, int16, int32, or int64.
|
|
6856
|
+
rep (int): The number of times to repeat, must be positive.
|
|
6857
|
+
axis (int): The axis along which to repeat, default 0.
|
|
6858
|
+
|
|
6859
|
+
Returns:
|
|
6860
|
+
One tensor with values repeated along the specified axis. If x has shape
|
|
6861
|
+
:math:`(s1, s2, ..., sn)` and axis is i, the output will have shape :math:`(s1, s2, ..., si * rep, ..., sn)`.
|
|
6862
|
+
The output type will be the same as the type of `x`.
|
|
6863
|
+
|
|
6864
|
+
Supported Platforms:
|
|
6865
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
6866
|
+
|
|
6867
|
+
Examples:
|
|
6868
|
+
>>> # case 1 : repeat on axis 0
|
|
6869
|
+
>>> x = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
|
|
6870
|
+
>>> output = ops.repeat_elements(x, rep = 2, axis = 0)
|
|
6871
|
+
>>> print(output)
|
|
6872
|
+
[[0 1 2]
|
|
6873
|
+
[0 1 2]
|
|
6874
|
+
[3 4 5]
|
|
6875
|
+
[3 4 5]]
|
|
6876
|
+
>>> # case 2 : repeat on axis 1
|
|
6877
|
+
>>> x = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
|
|
6878
|
+
>>> output = ops.repeat_elements(x, rep = 2, axis = 1)
|
|
6879
|
+
>>> print(output)
|
|
6880
|
+
[[0 0 1 1 2 2]
|
|
6881
|
+
[3 3 4 4 5 5]]
|
|
6882
|
+
"""
|
|
6883
|
+
const_utils.check_type_valid(F.dtype(x), mstype.number_type, 'input x')
|
|
6884
|
+
rep = _check_positive_int(rep, "rep", "repeat_elements")
|
|
6885
|
+
axis = _check_is_int(axis, "axis", "repeat_elements")
|
|
6886
|
+
shape_op = P.Shape()
|
|
6887
|
+
rank_op = P.Rank()
|
|
6888
|
+
tile_op = P.Tile()
|
|
6889
|
+
expand_dims_op = P.ExpandDims()
|
|
6890
|
+
reshape_op = P.Reshape()
|
|
6891
|
+
x_rank = rank_op(x)
|
|
6892
|
+
axis = _check_axis_range(axis, x_rank, "axis", "repeat_elements")
|
|
6893
|
+
expand_axis = axis + 1
|
|
6894
|
+
x_expand = expand_dims_op(x, expand_axis)
|
|
6895
|
+
rep_dims = _cal_repeat_dims(x_rank, rep, expand_axis)
|
|
6896
|
+
x_expand = tile_op(x_expand, rep_dims)
|
|
6897
|
+
x_shape = shape_op(x)
|
|
6898
|
+
x_reshape = _cal_reshape(x_shape, rep, axis)
|
|
6899
|
+
x_rep = reshape_op(x_expand, x_reshape)
|
|
6900
|
+
return x_rep
|
|
6901
|
+
|
|
6902
|
+
|
|
6903
|
+
@_primexpr
|
|
6904
|
+
def _check_sequence_mask_input_len(input_shape, prim_name=None):
|
|
6905
|
+
msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
|
|
6906
|
+
if not input_shape:
|
|
6907
|
+
raise ValueError(f"{msg_prefix} input_shape must be greater than 0, but got {input_shape}.")
|
|
6908
|
+
# broadcast only supports 7d shape
|
|
6909
|
+
shape_size = len(input_shape)
|
|
6910
|
+
if shape_size >= 7:
|
|
6911
|
+
raise ValueError(f"{msg_prefix} dimension of input_shape must be less than 7, but got {shape_size}d.")
|
|
6912
|
+
|
|
6913
|
+
|
|
6914
|
+
def sequence_mask(lengths, maxlen=None):
|
|
6915
|
+
"""
|
|
6916
|
+
Returns a mask tensor representing the first N positions of each cell.
|
|
6917
|
+
|
|
6918
|
+
If `lengths` has shape :math:`(d_1, d_2, ..., d_n)`, then the resulting tensor mask has type and shape
|
|
6919
|
+
:math:`(d_1, d_2, ..., d_n, maxlen)`, with mask :math:`[i_1, i_2, ..., i_n, j] = (j < lengths[i_1, i_2, ..., i_n])`.
|
|
6920
|
+
|
|
6921
|
+
Args:
|
|
6922
|
+
lengths (Tensor): Tensor to calculate the mask for. All values in this tensor should be
|
|
6923
|
+
less than or equal to `maxlen`. Values greater than `maxlen` will be treated as `maxlen`.
|
|
6924
|
+
maxlen (int): size of the last dimension of returned tensor. Must be positive and same
|
|
6925
|
+
type as elements in `lengths`. Default is None.
|
|
6926
|
+
|
|
6927
|
+
Returns:
|
|
6928
|
+
One mask tensor of shape `lengths.shape + (maxlen,)` .
|
|
6929
|
+
|
|
6930
|
+
Raises:
|
|
6931
|
+
TypeError: If `lengths` is not a Tensor.
|
|
6932
|
+
TypeError: If `maxlen` is not an int.
|
|
6933
|
+
TypeError: If dtype of `lengths` is neither int32 nor int64.
|
|
6934
|
+
|
|
6935
|
+
Supported Platforms:
|
|
6936
|
+
``GPU`` ``CPU``
|
|
6937
|
+
|
|
6938
|
+
Examples:
|
|
6939
|
+
>>> # case 1: When maxlen is assigned
|
|
6940
|
+
>>> x = Tensor(np.array([1, 2, 3, 4]))
|
|
6941
|
+
>>> output = ops.sequence_mask(x, 5)
|
|
6942
|
+
>>> print(output)
|
|
6943
|
+
[[ True False False False False]
|
|
6944
|
+
[ True True False False False]
|
|
6945
|
+
[ True True True False False]
|
|
6946
|
+
[ True True True True False]]
|
|
6947
|
+
>>> # case 2: When there is 0 in x
|
|
6948
|
+
>>> x = Tensor(np.array([[1, 3], [2, 0]]))
|
|
6949
|
+
>>> output = ops.sequence_mask(x, 5)
|
|
6950
|
+
>>> print(output)
|
|
6951
|
+
[[[ True False False False False]
|
|
6952
|
+
[ True True True False False]]
|
|
6953
|
+
[[ True True False False False]
|
|
6954
|
+
[False False False False False]]]
|
|
6955
|
+
>>> # case 3: when the maxlen is not assigned
|
|
6956
|
+
>>> x = Tensor(np.array([[1, 3], [2, 4]]))
|
|
6957
|
+
>>> output = ops.sequence_mask(x)
|
|
6958
|
+
>>> print(output)
|
|
6959
|
+
[[[ True False False False ]
|
|
6960
|
+
[ True True True False ]]
|
|
6961
|
+
[[ True True False False ]
|
|
6962
|
+
[ True True True True ]]]
|
|
6963
|
+
"""
|
|
6964
|
+
|
|
6965
|
+
argmax_op = P.ArgMaxWithValue()
|
|
6966
|
+
reshape_op = P.Reshape()
|
|
6967
|
+
range_op = P.Range()
|
|
6968
|
+
expand_op = P.ExpandDims()
|
|
6969
|
+
cast_op = P.Cast()
|
|
6970
|
+
to_tensor_op = P.ScalarToTensor()
|
|
6971
|
+
shape_op = P.Shape()
|
|
6972
|
+
|
|
6973
|
+
const_utils.check_type_valid(F.dtype(lengths), [mstype.int64, mstype.int32], 'lengths')
|
|
6974
|
+
_check_sequence_mask_input_len(shape_op(lengths), "sequence_mask")
|
|
6975
|
+
|
|
6976
|
+
if maxlen is None:
|
|
6977
|
+
flatten_data = reshape_op(lengths, (-1,))
|
|
6978
|
+
flatten_data = cast_op(flatten_data, mstype.float32)
|
|
6979
|
+
_, value = argmax_op(flatten_data)
|
|
6980
|
+
maxlen = cast_op(value, mstype.int32)
|
|
6981
|
+
else:
|
|
6982
|
+
maxlen = _check_positive_int(maxlen, "maxlen", "sequence_mask")
|
|
6983
|
+
maxlen = to_tensor_op(maxlen, mstype.int32)
|
|
6984
|
+
|
|
6985
|
+
range_vector = range_op(to_tensor_op(0, mstype.int32), maxlen, to_tensor_op(1, mstype.int32))
|
|
6986
|
+
mask = expand_op(lengths, -1)
|
|
6987
|
+
result = range_vector < mask
|
|
6988
|
+
return result
|
|
6989
|
+
|
|
6990
|
+
|
|
6991
|
+
def top_k(input_x, k, sorted=True):
|
|
6992
|
+
r"""
|
|
6993
|
+
`top_k` is deprecated, please use `ops.topk` instead.
|
|
6994
|
+
"""
|
|
6995
|
+
top_k_ = _get_cache_prim(P.TopK)(sorted)
|
|
6996
|
+
return top_k_(input_x, k)
|
|
6997
|
+
|
|
6998
|
+
|
|
5153
6999
|
__all__ = [
|
|
5154
7000
|
'unique',
|
|
5155
7001
|
'unique_with_pad',
|
|
@@ -5165,10 +7011,16 @@ __all__ = [
|
|
|
5165
7011
|
'ger',
|
|
5166
7012
|
'ones',
|
|
5167
7013
|
'ones_like',
|
|
7014
|
+
'zeros',
|
|
7015
|
+
'zeros_like',
|
|
5168
7016
|
'shape',
|
|
5169
7017
|
'shape_',
|
|
5170
7018
|
'reverse',
|
|
5171
7019
|
'reverse_sequence',
|
|
7020
|
+
'hamming_window',
|
|
7021
|
+
'chunk',
|
|
7022
|
+
'full',
|
|
7023
|
+
'full_like',
|
|
5172
7024
|
'dyn_shape',
|
|
5173
7025
|
'rank',
|
|
5174
7026
|
'range',
|
|
@@ -5179,11 +7031,14 @@ __all__ = [
|
|
|
5179
7031
|
'tensor_slice',
|
|
5180
7032
|
'strided_slice',
|
|
5181
7033
|
'slice',
|
|
7034
|
+
'cat',
|
|
5182
7035
|
'concat',
|
|
5183
7036
|
'stack',
|
|
5184
7037
|
'unbind',
|
|
5185
7038
|
'unstack',
|
|
7039
|
+
'is_tensor',
|
|
5186
7040
|
'scalar_cast',
|
|
7041
|
+
'scalar_to_array',
|
|
5187
7042
|
'scalar_to_tensor',
|
|
5188
7043
|
'space_to_batch_nd',
|
|
5189
7044
|
'batch_to_space_nd',
|
|
@@ -5206,6 +7061,7 @@ __all__ = [
|
|
|
5206
7061
|
'tensor_scatter_max',
|
|
5207
7062
|
'tensor_scatter_min',
|
|
5208
7063
|
'tensor_scatter_elements',
|
|
7064
|
+
'scatter',
|
|
5209
7065
|
'unsorted_segment_min',
|
|
5210
7066
|
'unsorted_segment_max',
|
|
5211
7067
|
'unsorted_segment_prod',
|
|
@@ -5216,7 +7072,9 @@ __all__ = [
|
|
|
5216
7072
|
'one_hot',
|
|
5217
7073
|
'masked_fill',
|
|
5218
7074
|
'masked_select',
|
|
7075
|
+
'where',
|
|
5219
7076
|
'narrow',
|
|
7077
|
+
'ravel',
|
|
5220
7078
|
'scatter_add',
|
|
5221
7079
|
'scatter_mul',
|
|
5222
7080
|
'scatter_max',
|
|
@@ -5224,29 +7082,52 @@ __all__ = [
|
|
|
5224
7082
|
'scatter_div',
|
|
5225
7083
|
'scatter_update',
|
|
5226
7084
|
'select',
|
|
7085
|
+
'tril',
|
|
7086
|
+
'triu',
|
|
5227
7087
|
'nonzero',
|
|
5228
7088
|
'matrix_diag',
|
|
5229
7089
|
'matrix_diag_part',
|
|
5230
7090
|
'matrix_set_diag',
|
|
5231
7091
|
'diag',
|
|
7092
|
+
'diagflat',
|
|
5232
7093
|
'meshgrid',
|
|
5233
7094
|
'affine_grid',
|
|
5234
7095
|
'meshgrid',
|
|
5235
7096
|
'broadcast_to',
|
|
5236
7097
|
'col2im',
|
|
5237
7098
|
'split',
|
|
5238
|
-
|
|
7099
|
+
'tensor_split',
|
|
7100
|
+
'vsplit',
|
|
7101
|
+
'hsplit',
|
|
7102
|
+
'dsplit',
|
|
7103
|
+
'index_fill',
|
|
7104
|
+
'index_select',
|
|
5239
7105
|
'max',
|
|
5240
7106
|
'argmax',
|
|
5241
7107
|
'min',
|
|
5242
7108
|
'unsorted_segment_sum',
|
|
5243
7109
|
'population_count',
|
|
5244
|
-
'
|
|
7110
|
+
'topk',
|
|
5245
7111
|
'expand',
|
|
5246
7112
|
'fold',
|
|
5247
7113
|
'unfold',
|
|
5248
7114
|
'diagonal',
|
|
5249
7115
|
'lstsq',
|
|
5250
7116
|
'mvlgamma',
|
|
7117
|
+
'swapaxes',
|
|
7118
|
+
'swapdims',
|
|
7119
|
+
'searchsorted',
|
|
7120
|
+
'argsort',
|
|
7121
|
+
'sequence_mask',
|
|
7122
|
+
'repeat_elements',
|
|
7123
|
+
'repeat_interleave',
|
|
7124
|
+
'argwhere',
|
|
7125
|
+
'column_stack',
|
|
7126
|
+
'hstack',
|
|
7127
|
+
'movedim',
|
|
7128
|
+
'moveaxis',
|
|
7129
|
+
'aminmax',
|
|
7130
|
+
'sort',
|
|
7131
|
+
'top_k'
|
|
5251
7132
|
]
|
|
5252
7133
|
__all__.sort()
|