mindspore 2.0.0a0__cp37-cp37m-win_amd64.whl → 2.0.0rc1__cp37-cp37m-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +4 -2
- mindspore/_c_dataengine.cp37-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp37-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp37-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +102 -0
- mindspore/_checkparam.py +1066 -1001
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +4 -3
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -48
- mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
- mindspore/_extends/parse/__init__.py +5 -3
- mindspore/_extends/parse/namespace.py +16 -1
- mindspore/_extends/parse/parser.py +107 -22
- mindspore/_extends/parse/resources.py +0 -7
- mindspore/_extends/parse/standard_method.py +885 -413
- mindspore/amp.py +52 -57
- mindspore/boost/boost.py +2 -2
- mindspore/boost/boost_cell_wrapper.py +38 -20
- mindspore/boost/dim_reduce.py +3 -3
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +4 -6
- mindspore/common/_decorator.py +2 -0
- mindspore/common/_register_for_adapter.py +55 -0
- mindspore/common/_stub_tensor.py +201 -0
- mindspore/common/_utils.py +41 -7
- mindspore/common/api.py +215 -141
- mindspore/common/dtype.py +8 -1
- mindspore/common/dump.py +2 -2
- mindspore/common/initializer.py +4 -2
- mindspore/common/jit_config.py +17 -13
- mindspore/common/mutable.py +33 -13
- mindspore/common/parameter.py +23 -21
- mindspore/common/seed.py +8 -24
- mindspore/common/sparse_tensor.py +62 -41
- mindspore/common/tensor.py +852 -1154
- mindspore/communication/__init__.py +2 -2
- mindspore/communication/_comm_helper.py +11 -4
- mindspore/communication/management.py +22 -21
- mindspore/config/op_info.config +501 -1008
- mindspore/context.py +201 -23
- mindspore/dataset/__init__.py +6 -6
- mindspore/dataset/audio/__init__.py +7 -7
- mindspore/dataset/audio/transforms.py +670 -30
- mindspore/dataset/audio/utils.py +47 -4
- mindspore/dataset/audio/validators.py +223 -1
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/core/config.py +210 -14
- mindspore/dataset/core/validator_helpers.py +2 -2
- mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
- mindspore/dataset/debug/debug_hook.py +65 -0
- mindspore/dataset/debug/pre_defined_hook.py +67 -0
- mindspore/dataset/engine/__init__.py +7 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +322 -66
- mindspore/dataset/engine/datasets_audio.py +80 -76
- mindspore/dataset/engine/datasets_standard_format.py +51 -38
- mindspore/dataset/engine/datasets_text.py +232 -118
- mindspore/dataset/engine/datasets_user_defined.py +41 -17
- mindspore/dataset/engine/datasets_vision.py +746 -225
- mindspore/dataset/engine/graphdata.py +75 -10
- mindspore/dataset/engine/iterators.py +45 -5
- mindspore/dataset/engine/offload.py +48 -28
- mindspore/dataset/engine/validators.py +117 -8
- mindspore/dataset/text/__init__.py +6 -5
- mindspore/dataset/text/transforms.py +86 -3
- mindspore/dataset/text/utils.py +6 -4
- mindspore/dataset/text/validators.py +25 -0
- mindspore/dataset/transforms/__init__.py +3 -2
- mindspore/dataset/transforms/c_transforms.py +1 -1
- mindspore/dataset/transforms/transforms.py +2 -2
- mindspore/dataset/utils/__init__.py +2 -1
- mindspore/dataset/utils/line_reader.py +121 -0
- mindspore/dataset/vision/__init__.py +2 -3
- mindspore/dataset/vision/c_transforms.py +9 -9
- mindspore/dataset/vision/py_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +2 -0
- mindspore/dataset/vision/transforms.py +160 -161
- mindspore/dataset/vision/utils.py +3 -3
- mindspore/experimental/map_parameter.py +38 -26
- mindspore/include/OWNERS +0 -1
- mindspore/include/api/callback/callback.h +9 -13
- mindspore/include/api/callback/ckpt_saver.h +2 -2
- mindspore/include/api/callback/loss_monitor.h +2 -2
- mindspore/include/api/callback/lr_scheduler.h +5 -5
- mindspore/include/api/callback/time_monitor.h +2 -2
- mindspore/include/api/callback/train_accuracy.h +4 -6
- mindspore/include/api/cfg.h +19 -6
- mindspore/include/api/context.h +44 -9
- mindspore/include/api/delegate.h +1 -1
- mindspore/include/api/metrics/accuracy.h +2 -2
- mindspore/include/api/metrics/metrics.h +4 -3
- mindspore/include/api/model.h +9 -4
- mindspore/include/api/model_parallel_runner.h +2 -2
- mindspore/include/api/net.h +12 -11
- mindspore/include/api/serialization.h +19 -3
- mindspore/include/api/types.h +3 -3
- mindspore/include/dataset/constants.h +7 -0
- mindspore/include/dataset/text.h +59 -0
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +1 -1
- mindspore/mindrecord/filereader.py +18 -0
- mindspore/mindrecord/filewriter.py +197 -34
- mindspore/mindrecord/shardreader.py +9 -0
- mindspore/mindrecord/shardwriter.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +3 -3
- mindspore/mindrecord/tools/cifar10_to_mr.py +3 -3
- mindspore/mindrecord/tools/csv_to_mr.py +3 -3
- mindspore/mindrecord/tools/imagenet_to_mr.py +16 -11
- mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
- mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/nn/__init__.py +0 -4
- mindspore/nn/cell.py +204 -132
- mindspore/nn/dynamic_lr.py +1 -1
- mindspore/nn/grad/cell_grad.py +7 -6
- mindspore/nn/layer/__init__.py +5 -4
- mindspore/nn/layer/activation.py +40 -89
- mindspore/nn/layer/basic.py +255 -624
- mindspore/nn/layer/channel_shuffle.py +7 -6
- mindspore/nn/layer/combined.py +1 -1
- mindspore/nn/layer/container.py +41 -4
- mindspore/nn/layer/conv.py +64 -28
- mindspore/nn/layer/dense.py +9 -8
- mindspore/nn/layer/embedding.py +27 -25
- mindspore/nn/layer/image.py +53 -46
- mindspore/nn/layer/math.py +97 -105
- mindspore/nn/layer/normalization.py +117 -86
- mindspore/nn/layer/padding.py +185 -95
- mindspore/nn/layer/pooling.py +817 -414
- mindspore/nn/layer/rnn_cells.py +10 -15
- mindspore/nn/layer/rnns.py +37 -38
- mindspore/nn/layer/thor_layer.py +11 -12
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +701 -0
- mindspore/nn/learning_rate_schedule.py +8 -8
- mindspore/nn/loss/__init__.py +5 -4
- mindspore/nn/loss/loss.py +334 -199
- mindspore/nn/optim/ada_grad.py +6 -6
- mindspore/nn/optim/adadelta.py +2 -3
- mindspore/nn/optim/adafactor.py +4 -5
- mindspore/nn/optim/adam.py +126 -62
- mindspore/nn/optim/adamax.py +3 -4
- mindspore/nn/optim/adasum.py +6 -6
- mindspore/nn/optim/asgd.py +2 -2
- mindspore/nn/optim/ftrl.py +67 -38
- mindspore/nn/optim/lamb.py +4 -5
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +43 -4
- mindspore/nn/optim/momentum.py +6 -5
- mindspore/nn/optim/optimizer.py +3 -1
- mindspore/nn/optim/proximal_ada_grad.py +2 -2
- mindspore/nn/optim/rmsprop.py +1 -1
- mindspore/nn/optim/rprop.py +8 -9
- mindspore/nn/optim/sgd.py +19 -13
- mindspore/nn/optim/thor.py +10 -15
- mindspore/nn/probability/__init__.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +4 -4
- mindspore/nn/probability/bijector/invert.py +1 -1
- mindspore/nn/probability/bijector/softplus.py +2 -2
- mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
- mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
- mindspore/nn/probability/distribution/_utils/utils.py +9 -15
- mindspore/nn/probability/distribution/bernoulli.py +3 -3
- mindspore/nn/probability/distribution/beta.py +1 -1
- mindspore/nn/probability/distribution/categorical.py +5 -7
- mindspore/nn/probability/distribution/cauchy.py +3 -3
- mindspore/nn/probability/distribution/distribution.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -2
- mindspore/nn/probability/distribution/gamma.py +3 -3
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +3 -3
- mindspore/nn/probability/distribution/half_normal.py +15 -11
- mindspore/nn/probability/distribution/laplace.py +16 -13
- mindspore/nn/probability/distribution/logistic.py +2 -2
- mindspore/nn/probability/distribution/normal.py +1 -1
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/student_t.py +20 -15
- mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +3 -3
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +27 -10
- mindspore/nn/wrap/grad_reducer.py +2 -2
- mindspore/nn/wrap/loss_scale.py +40 -24
- mindspore/numpy/array_creations.py +33 -22
- mindspore/numpy/array_ops.py +35 -30
- mindspore/numpy/logic_ops.py +6 -27
- mindspore/numpy/math_ops.py +22 -19
- mindspore/numpy/utils.py +1 -1
- mindspore/numpy/utils_const.py +108 -58
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_constants.py +0 -6
- mindspore/ops/_grad/__init__.py +2 -1
- mindspore/ops/_grad/grad_array_ops.py +86 -117
- mindspore/ops/_grad/grad_base.py +23 -1
- mindspore/ops/_grad/grad_clip_ops.py +2 -3
- mindspore/ops/_grad/grad_comm_ops.py +34 -24
- mindspore/ops/_grad/grad_implementations.py +9 -45
- mindspore/ops/_grad/grad_inner_ops.py +47 -4
- mindspore/ops/_grad/grad_math_ops.py +142 -117
- mindspore/ops/_grad/grad_nn_ops.py +71 -165
- mindspore/ops/_grad/grad_sequence_ops.py +296 -0
- mindspore/ops/_grad/grad_sparse.py +7 -6
- mindspore/ops/_grad_experimental/__init__.py +1 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +150 -15
- mindspore/ops/_grad_experimental/grad_image_ops.py +16 -7
- mindspore/ops/_grad_experimental/grad_inner_ops.py +1 -22
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +4 -11
- mindspore/ops/_grad_experimental/grad_math_ops.py +210 -89
- mindspore/ops/_grad_experimental/grad_nn_ops.py +26 -22
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +49 -8
- mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +4 -4
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
- mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/correction_mul.py +2 -2
- mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -5
- mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -4
- mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +236 -4
- mindspore/ops/_op_impl/aicpu/abs.py +36 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_v1.py → adaptive_avg_pool_2d.py} +6 -5
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/add.py +43 -0
- mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
- mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
- mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -43
- mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
- mindspore/{compression/common/__init__.py → ops/_op_impl/aicpu/bessel_i0.py} +15 -8
- mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
- mindspore/ops/_op_impl/aicpu/conj.py +11 -0
- mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +0 -3
- mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_grad_v1.py → digamma.py} +7 -9
- mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
- mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
- mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
- mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
- mindspore/ops/_op_impl/aicpu/greater.py +41 -0
- mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
- mindspore/ops/_op_impl/aicpu/less.py +41 -0
- mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/lgamma.py} +16 -10
- mindspore/ops/_op_impl/aicpu/mirror_pad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mul.py +3 -1
- mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
- mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
- mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
- mindspore/ops/_op_impl/aicpu/polar.py +32 -0
- mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
- mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
- mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
- mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
- mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
- mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
- mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
- mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
- mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
- mindspore/ops/_op_impl/aicpu/trans_data.py +1 -0
- mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
- mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
- mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
- mindspore/ops/_op_impl/cpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
- mindspore/ops/_op_impl/tbe/__init__.py +27 -611
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
- mindspore/ops/_op_impl/tbe/cast.py +0 -2
- mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +2 -2
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
- mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
- mindspore/ops/_op_impl/tbe/matmul_ds.py +2 -0
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/scatter_mul.py +2 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -2
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
- mindspore/ops/_register_for_op.py +1 -0
- mindspore/ops/_utils/__init__.py +1 -2
- mindspore/ops/_utils/utils.py +19 -40
- mindspore/ops/_vmap/vmap_array_ops.py +116 -38
- mindspore/ops/_vmap/vmap_base.py +16 -9
- mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
- mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +7 -5
- mindspore/ops/_vmap/vmap_image_ops.py +12 -5
- mindspore/ops/_vmap/vmap_math_ops.py +46 -5
- mindspore/ops/_vmap/vmap_nn_ops.py +15 -21
- mindspore/ops/_vmap/vmap_random_ops.py +1 -1
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +220 -106
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -23
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -17
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +39 -41
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +41 -43
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +51 -57
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +24 -25
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +53 -53
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +77 -85
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +37 -39
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +70 -72
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +17 -17
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
- mindspore/ops/bprop_mindir/generate_mindir.py +2 -0
- mindspore/ops/composite/__init__.py +7 -8
- mindspore/ops/composite/base.py +101 -47
- mindspore/ops/composite/math_ops.py +188 -158
- mindspore/ops/composite/multitype_ops/_compile_utils.py +415 -170
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +142 -87
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +2 -3
- mindspore/ops/composite/multitype_ops/getitem_impl.py +31 -3
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +21 -5
- mindspore/ops/composite/multitype_ops/not_in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
- mindspore/ops/composite/multitype_ops/setitem_impl.py +21 -3
- mindspore/ops/composite/multitype_ops/sub_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +35 -4
- mindspore/ops/function/__init__.py +152 -8
- mindspore/ops/function/array_func.py +2555 -674
- mindspore/ops/function/clip_func.py +209 -13
- mindspore/ops/function/debug_func.py +2 -2
- mindspore/ops/function/grad/__init__.py +2 -1
- mindspore/ops/function/grad/grad_func.py +147 -62
- mindspore/ops/function/image_func.py +54 -38
- mindspore/ops/function/linalg_func.py +167 -16
- mindspore/ops/function/math_func.py +4849 -1492
- mindspore/ops/function/nn_func.py +2573 -988
- mindspore/ops/function/other_func.py +115 -0
- mindspore/ops/function/parameter_func.py +3 -3
- mindspore/ops/function/random_func.py +790 -73
- mindspore/ops/function/sparse_func.py +98 -78
- mindspore/ops/function/sparse_unary_func.py +54 -53
- mindspore/ops/function/spectral_func.py +27 -24
- mindspore/ops/function/vmap_func.py +22 -2
- mindspore/ops/functional.py +97 -37
- mindspore/ops/op_info_register.py +70 -28
- mindspore/ops/operations/__init__.py +47 -14
- mindspore/ops/operations/_csr_ops.py +7 -7
- mindspore/ops/operations/_embedding_cache_ops.py +5 -5
- mindspore/ops/operations/_grad_ops.py +276 -187
- mindspore/ops/operations/_inner_ops.py +319 -113
- mindspore/ops/operations/_ms_kernel.py +10 -8
- mindspore/ops/operations/_ocr_ops.py +9 -9
- mindspore/ops/operations/_opaque_predicate_registry.py +4 -0
- mindspore/ops/operations/_quant_ops.py +137 -102
- mindspore/ops/operations/_rl_inner_ops.py +121 -60
- mindspore/ops/operations/_scalar_ops.py +466 -0
- mindspore/ops/operations/_sequence_ops.py +1004 -2
- mindspore/ops/operations/_tensor_array.py +10 -11
- mindspore/ops/operations/_thor_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +801 -466
- mindspore/ops/operations/comm_ops.py +51 -49
- mindspore/ops/operations/control_ops.py +2 -2
- mindspore/ops/operations/custom_ops.py +123 -44
- mindspore/ops/operations/debug_ops.py +24 -24
- mindspore/ops/operations/image_ops.py +240 -153
- mindspore/ops/operations/inner_ops.py +34 -50
- mindspore/ops/operations/linalg_ops.py +31 -9
- mindspore/ops/operations/math_ops.py +988 -757
- mindspore/ops/operations/nn_ops.py +965 -819
- mindspore/ops/operations/other_ops.py +51 -40
- mindspore/ops/operations/random_ops.py +204 -122
- mindspore/ops/operations/rl_ops.py +8 -9
- mindspore/ops/operations/sparse_ops.py +254 -93
- mindspore/ops/operations/spectral_ops.py +35 -3
- mindspore/ops/primitive.py +111 -9
- mindspore/parallel/_auto_parallel_context.py +189 -83
- mindspore/parallel/_offload_context.py +185 -0
- mindspore/parallel/_parallel_serialization.py +99 -7
- mindspore/parallel/_ps_context.py +9 -5
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +7 -1
- mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
- mindspore/{nn/transformer → parallel/_transformer}/layers.py +6 -37
- mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
- mindspore/{nn/transformer → parallel/_transformer}/moe.py +20 -16
- mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
- mindspore/{nn/transformer → parallel/_transformer}/transformer.py +48 -111
- mindspore/parallel/_utils.py +1 -2
- mindspore/parallel/algo_parameter_config.py +1 -1
- mindspore/parallel/checkpoint_transform.py +37 -34
- mindspore/parallel/shard.py +17 -18
- mindspore/profiler/common/validator/validate_path.py +2 -2
- mindspore/profiler/envprofiling.py +69 -47
- mindspore/profiler/parser/ascend_timeline_generator.py +49 -42
- mindspore/profiler/parser/base_timeline_generator.py +49 -56
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +98 -78
- mindspore/profiler/parser/hwts_log_parser.py +1 -1
- mindspore/profiler/parser/integrator.py +15 -14
- mindspore/profiler/parser/minddata_analyzer.py +2 -2
- mindspore/profiler/parser/msadvisor_analyzer.py +12 -25
- mindspore/profiler/parser/msadvisor_parser.py +2 -4
- mindspore/profiler/parser/optime_parser.py +17 -18
- mindspore/profiler/parser/profiler_info.py +2 -1
- mindspore/profiler/profiling.py +218 -186
- mindspore/rewrite/__init__.py +3 -1
- mindspore/rewrite/api/node.py +1 -114
- mindspore/rewrite/api/node_type.py +3 -0
- mindspore/rewrite/api/pattern_engine.py +31 -1
- mindspore/rewrite/api/scoped_value.py +4 -4
- mindspore/rewrite/api/symbol_tree.py +3 -78
- mindspore/rewrite/api/tree_node_helper.py +1 -1
- mindspore/rewrite/ast_creator_register.py +1 -0
- mindspore/rewrite/ast_helpers/__init__.py +2 -2
- mindspore/rewrite/ast_helpers/ast_creator.py +1 -2
- mindspore/rewrite/ast_helpers/ast_finder.py +65 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +11 -3
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +18 -2
- mindspore/rewrite/namespace.py +0 -2
- mindspore/rewrite/node.py +157 -11
- mindspore/rewrite/parsers/assign_parser.py +231 -53
- mindspore/rewrite/parsers/class_def_parser.py +187 -109
- mindspore/rewrite/parsers/for_parser.py +24 -14
- mindspore/rewrite/parsers/function_def_parser.py +21 -4
- mindspore/rewrite/parsers/if_parser.py +6 -2
- mindspore/rewrite/sparsify/__init__.py +0 -0
- mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
- mindspore/rewrite/sparsify/sparsify.py +109 -0
- mindspore/rewrite/sparsify/utils.py +173 -0
- mindspore/rewrite/symbol_tree.py +256 -133
- mindspore/rewrite/symbol_tree_builder.py +38 -1
- mindspore/run_check/_check_version.py +69 -63
- mindspore/run_check/run_check.py +2 -1
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +1 -1
- mindspore/train/_utils.py +28 -5
- mindspore/train/amp.py +273 -102
- mindspore/train/callback/_backup_and_restore.py +5 -5
- mindspore/train/callback/_callback.py +2 -2
- mindspore/train/callback/_checkpoint.py +3 -3
- mindspore/train/callback/_early_stop.py +3 -3
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +29 -31
- mindspore/train/callback/_loss_monitor.py +3 -3
- mindspore/train/callback/_on_request_exit.py +3 -3
- mindspore/train/callback/_reduce_lr_on_plateau.py +4 -4
- mindspore/train/callback/_summary_collector.py +23 -16
- mindspore/train/callback/_time_monitor.py +3 -3
- mindspore/train/checkpoint_pb2.py +68 -8
- mindspore/train/data_sink.py +15 -3
- mindspore/train/dataset_helper.py +10 -15
- mindspore/train/loss_scale_manager.py +8 -11
- mindspore/train/metrics/__init__.py +1 -1
- mindspore/train/metrics/bleu_score.py +1 -1
- mindspore/train/metrics/confusion_matrix.py +1 -1
- mindspore/train/metrics/cosine_similarity.py +1 -1
- mindspore/train/metrics/dice.py +2 -2
- mindspore/train/metrics/fbeta.py +1 -1
- mindspore/train/metrics/hausdorff_distance.py +4 -3
- mindspore/train/metrics/mean_surface_distance.py +2 -2
- mindspore/train/metrics/occlusion_sensitivity.py +1 -1
- mindspore/train/metrics/perplexity.py +1 -1
- mindspore/train/metrics/precision.py +1 -1
- mindspore/train/metrics/recall.py +1 -1
- mindspore/train/metrics/roc.py +2 -2
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +116 -37
- mindspore/train/model.py +45 -28
- mindspore/train/serialization.py +295 -188
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +43 -13
- mindspore/train/train_thor/convert_utils.py +2 -2
- mindspore/train/train_thor/dataset_helper.py +3 -3
- mindspore/turbojpeg.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +3 -2
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +610 -541
- mindspore/compression/__init__.py +0 -19
- mindspore/compression/common/constant.py +0 -124
- mindspore/compression/export/__init__.py +0 -19
- mindspore/compression/export/quant_export.py +0 -515
- mindspore/compression/quant/__init__.py +0 -28
- mindspore/compression/quant/qat.py +0 -634
- mindspore/compression/quant/quant_utils.py +0 -462
- mindspore/compression/quant/quantizer.py +0 -68
- mindspore/nn/layer/quant.py +0 -1868
- mindspore/nn/layer/rnn_utils.py +0 -90
- mindspore/nn/probability/dpn/__init__.py +0 -22
- mindspore/nn/probability/dpn/vae/__init__.py +0 -25
- mindspore/nn/probability/dpn/vae/cvae.py +0 -140
- mindspore/nn/probability/dpn/vae/vae.py +0 -124
- mindspore/nn/probability/infer/__init__.py +0 -22
- mindspore/nn/probability/infer/variational/elbo.py +0 -70
- mindspore/nn/probability/infer/variational/svi.py +0 -84
- mindspore/nn/probability/toolbox/__init__.py +0 -22
- mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
- mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -364
- mindspore/nn/probability/transforms/__init__.py +0 -22
- mindspore/nn/probability/transforms/transform_bnn.py +0 -262
- mindspore/nn/probability/zhusuan/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
- mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
- mindspore/ops/_op_impl/aicpu/parallel_concat.py +0 -42
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Cast_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/MatMul_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/Transpose_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -15
- mindspore/ops/composite/array_ops.py +0 -241
- mindspore/ops/composite/clip_ops.py +0 -134
- mindspore/ops/composite/random_ops.py +0 -426
- mindspore/ops/composite/vmap_ops.py +0 -38
- mindspore/parallel/nn/__init__.py +0 -42
- mindspore/parallel/nn/loss.py +0 -22
- mindspore/parallel/nn/moe.py +0 -21
- mindspore/parallel/nn/op_parallel_config.py +0 -22
- mindspore/parallel/nn/transformer.py +0 -31
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# This is the Python adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
|
|
2
2
|
#
|
|
3
|
-
# Copyright 2020-
|
|
3
|
+
# Copyright 2020-2023 Huawei Technologies Co., Ltd
|
|
4
4
|
#
|
|
5
5
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
6
6
|
# you may not use this file except in compliance with the License.
|
|
@@ -20,16 +20,18 @@ from __future__ import absolute_import
|
|
|
20
20
|
from mindspore import Tensor, CSRTensor, COOTensor
|
|
21
21
|
from mindspore import dtype as mstype
|
|
22
22
|
from mindspore._c_expression import Tensor as Tensor_
|
|
23
|
-
from mindspore.
|
|
23
|
+
from mindspore.common import mutable
|
|
24
24
|
import mindspore.common._monad as monad
|
|
25
25
|
from mindspore.common.sparse_tensor import RowTensorInner
|
|
26
26
|
from mindspore.ops.composite.base import _append, _insert, _pop, _list_clear, _reverse, \
|
|
27
|
-
|
|
27
|
+
_extend, _dict_clear, _haskey, _update, _fromkeys
|
|
28
28
|
|
|
29
|
-
from ...
|
|
30
|
-
from ..._checkparam import check_is_number
|
|
29
|
+
from ... import _checkparam as validator
|
|
30
|
+
from ..._checkparam import check_is_number, check_reshape_shp, prepare_shape_for_squeeze, \
|
|
31
|
+
check_axis_in_range, check_axis_valid, check_and_canonicalize_axes
|
|
31
32
|
from ...ops import functional as F
|
|
32
33
|
from ...ops import operations as P
|
|
34
|
+
from ...ops import composite
|
|
33
35
|
from ...ops.composite import tail, MultitypeFuncGraph, env_get, hyper_add, \
|
|
34
36
|
zeros_like, ones_like, repeat_elements
|
|
35
37
|
from ...ops.composite.multitype_ops import _constexpr_utils as const_utils
|
|
@@ -38,11 +40,13 @@ from ...ops.operations.math_ops import Median
|
|
|
38
40
|
from ...ops.operations._inner_ops import Format, issubclass_
|
|
39
41
|
from ...ops.operations import _csr_ops
|
|
40
42
|
from ...ops.operations import _map_tensor_ops
|
|
41
|
-
from ...ops.primitive import constexpr
|
|
43
|
+
from ...ops.primitive import constexpr, _primexpr
|
|
42
44
|
from ...common import dtype as mstype
|
|
43
|
-
from ...ops.operations._sequence_ops import ListAppend
|
|
45
|
+
from ...ops.operations._sequence_ops import ListAppend, ListInsert, SequenceMax, SequenceMin, \
|
|
46
|
+
SequenceIndex
|
|
44
47
|
|
|
45
|
-
__all__ = ['MultitypeFuncGraph', 'env_get',
|
|
48
|
+
__all__ = ['MultitypeFuncGraph', 'env_get',
|
|
49
|
+
'hyper_add', 'zeros_like', 'ones_like']
|
|
46
50
|
|
|
47
51
|
shape_ = P.Shape()
|
|
48
52
|
dtype_ = P.DType()
|
|
@@ -55,8 +59,6 @@ _reduce_sum_default = P.ReduceSum()
|
|
|
55
59
|
_reduce_sum_keepdims = P.ReduceSum(True)
|
|
56
60
|
_mean_keepdims = P.ReduceMean(True)
|
|
57
61
|
_csr_mm = _csr_ops.CSRMM()
|
|
58
|
-
_addcdiv = P.Addcdiv()
|
|
59
|
-
_addcmul = P.Addcmul()
|
|
60
62
|
|
|
61
63
|
itemsize_map = {mstype.bool_: 1, mstype.int8: 1, mstype.uint8: 1,
|
|
62
64
|
mstype.float16: 2, mstype.int16: 2, mstype.uint16: 2,
|
|
@@ -66,7 +68,7 @@ itemsize_map = {mstype.bool_: 1, mstype.int8: 1, mstype.uint8: 1,
|
|
|
66
68
|
nan_tensor = Tensor(float('nan'), dtype=mstype.float32)
|
|
67
69
|
|
|
68
70
|
|
|
69
|
-
def mean(x, axis=
|
|
71
|
+
def mean(x, axis=None, keep_dims=False):
|
|
70
72
|
"""
|
|
71
73
|
Reduces a dimension of a tensor by averaging all elements in the dimension.
|
|
72
74
|
|
|
@@ -100,14 +102,14 @@ def ndimension(x):
|
|
|
100
102
|
return len(x.shape)
|
|
101
103
|
|
|
102
104
|
|
|
103
|
-
def prod(
|
|
105
|
+
def prod(input, axis=None, keep_dims=False):
|
|
104
106
|
"""
|
|
105
107
|
Reduces a dimension of a tensor by product all elements in the dimension.
|
|
106
108
|
|
|
107
109
|
Args:
|
|
108
|
-
|
|
110
|
+
input (Tensor): Input Tensor.
|
|
109
111
|
axis (Union[None, int, tuple(int), list(int)]): Dimensions of reduction,
|
|
110
|
-
when axis is None or empty tuple, reduce all dimensions. Default:
|
|
112
|
+
when axis is None or empty tuple, reduce all dimensions. Default: None.
|
|
111
113
|
keep_dims (bool): Whether to keep the reduced dimensions. Default: False.
|
|
112
114
|
|
|
113
115
|
Returns:
|
|
@@ -124,41 +126,41 @@ def prod(x, axis=(), keep_dims=False):
|
|
|
124
126
|
>>> print(output)
|
|
125
127
|
6.0
|
|
126
128
|
"""
|
|
127
|
-
return F.prod(
|
|
129
|
+
return F.prod(input, axis, keep_dims)
|
|
128
130
|
|
|
129
131
|
|
|
130
|
-
def addcdiv(
|
|
132
|
+
def addcdiv(input, tensor1, tensor2, value=1):
|
|
131
133
|
"""
|
|
132
|
-
Performs the element-wise division of tensor
|
|
134
|
+
Performs the element-wise division of tensor tensor1 by tensor tensor2,
|
|
133
135
|
multiply the result by the scalar value and add it to input_data.
|
|
134
136
|
|
|
135
137
|
Args:
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
value (Tensor): The multiplier for
|
|
138
|
+
input (Tensor): The tensor to be added.
|
|
139
|
+
tensor1 (Tensor): The numerator tensor.
|
|
140
|
+
tensor1 (Tensor): The denominator tensor.
|
|
141
|
+
value (Union[Tensor, Number]): The multiplier for tensor1/tensor2. Default: 1.
|
|
140
142
|
|
|
141
143
|
Returns:
|
|
142
|
-
Tensor, has the same shape and dtype as
|
|
144
|
+
Tensor, has the same shape and dtype as tensor1 / tensor2.
|
|
143
145
|
"""
|
|
144
|
-
return
|
|
146
|
+
return F.addcdiv(input, tensor1, tensor2, value)
|
|
145
147
|
|
|
146
148
|
|
|
147
|
-
def addcmul(
|
|
149
|
+
def addcmul(input, tensor1, tensor2, value=1):
|
|
148
150
|
"""
|
|
149
|
-
Performs the element-wise product of tensor
|
|
151
|
+
Performs the element-wise product of tensor tensor1 and tensor tensor2,
|
|
150
152
|
multiply the result by the scalar value and add it to input_data.
|
|
151
153
|
|
|
152
154
|
Args:
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
value (Tensor): The multiplier for
|
|
155
|
+
input (Tensor): The tensor to be added.
|
|
156
|
+
tensor1 (Tensor): The tensor to be multiplied.
|
|
157
|
+
tensor2 (Tensor): The tensor to be multiplied.
|
|
158
|
+
value (Union[Tensor, Number]): The multiplier for tensor1*tensor2. Default: 1.
|
|
157
159
|
|
|
158
160
|
Returns:
|
|
159
|
-
Tensor, has the same shape and dtype as
|
|
161
|
+
Tensor, has the same shape and dtype as tensor1 * tensor2.
|
|
160
162
|
"""
|
|
161
|
-
return
|
|
163
|
+
return F.addcmul(input, tensor1, tensor2, value)
|
|
162
164
|
|
|
163
165
|
|
|
164
166
|
def all_(x, axis=(), keep_dims=False):
|
|
@@ -205,12 +207,41 @@ def any_(x, axis=(), keep_dims=False):
|
|
|
205
207
|
return reduce_any(x, axis)
|
|
206
208
|
|
|
207
209
|
|
|
208
|
-
def atan2(
|
|
210
|
+
def atan2(input, other):
|
|
209
211
|
r"""
|
|
210
212
|
Computes the first input tensor multiplied by the logarithm of second input tensor element-wise.
|
|
211
213
|
Refer to :func:`mindspore.ops.atan2` for more details.
|
|
212
214
|
"""
|
|
213
|
-
return F.atan2(
|
|
215
|
+
return F.atan2(input, other)
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def bincount(x, weights=None, minlength=0):
|
|
219
|
+
r"""
|
|
220
|
+
For details, please refer to :func:`mindspore.ops.bincount`.
|
|
221
|
+
"""
|
|
222
|
+
return F.bincount(x, weights, minlength)
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def H(x):
|
|
226
|
+
"""Returns a view of a matrix (2-D tensor) conjugated and transposed."""
|
|
227
|
+
output = x.swapaxes(0, 1)
|
|
228
|
+
if x.dtype in (mstype.complex64, mstype.complex128):
|
|
229
|
+
return output.conj()
|
|
230
|
+
return output
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
def histc(x, bins=100, min=0., max=0.):
|
|
234
|
+
"""
|
|
235
|
+
For details, please refer to :func:`mindspore.ops.histc`.
|
|
236
|
+
"""
|
|
237
|
+
return F.histc(x, bins, min, max)
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
def geqrf(x):
|
|
241
|
+
"""
|
|
242
|
+
For details, please refer to :func:`mindspore.ops.geqrf`.
|
|
243
|
+
"""
|
|
244
|
+
return F.geqrf(x)
|
|
214
245
|
|
|
215
246
|
|
|
216
247
|
def size_(x):
|
|
@@ -226,8 +257,6 @@ def size_(x):
|
|
|
226
257
|
Returns:
|
|
227
258
|
size(int).
|
|
228
259
|
"""
|
|
229
|
-
if not shape_(x):
|
|
230
|
-
return size_op_(x) + 1
|
|
231
260
|
return size_op_(x)
|
|
232
261
|
|
|
233
262
|
|
|
@@ -278,6 +307,27 @@ def strides_(x):
|
|
|
278
307
|
return strides
|
|
279
308
|
|
|
280
309
|
|
|
310
|
+
def slogdet(x):
|
|
311
|
+
r"""
|
|
312
|
+
For details, please refer to :func:`mindspore.ops.slogdet`.
|
|
313
|
+
"""
|
|
314
|
+
return F.slogdet(x)
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
def chunk(x, chunks, axis=0):
|
|
318
|
+
r"""
|
|
319
|
+
For details, please refer to :func:`mindspore.ops.chunk`.
|
|
320
|
+
"""
|
|
321
|
+
return F.chunk(x, chunks, axis)
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
def tril(x, diagonal=0):
|
|
325
|
+
r"""
|
|
326
|
+
For details, please refer to :func:`mindspore.ops.tril`.
|
|
327
|
+
"""
|
|
328
|
+
return F.tril(x, diagonal)
|
|
329
|
+
|
|
330
|
+
|
|
281
331
|
def hasattr(x, attr): # pylint: disable=redefined-builtin
|
|
282
332
|
"""
|
|
283
333
|
Return whether an object has the attribute.
|
|
@@ -337,6 +387,16 @@ def minimum(x, y):
|
|
|
337
387
|
return F.minimum(x, y)
|
|
338
388
|
|
|
339
389
|
|
|
390
|
+
def multinomial(input, num_samples, replacement=True, seed=None):
|
|
391
|
+
r"""
|
|
392
|
+
Returns a tensor sampled from the multinomial probability distribution located in the corresponding
|
|
393
|
+
row of the input tensor.
|
|
394
|
+
|
|
395
|
+
Refer to :func:`mindspore.ops.multinomial` for more detail.
|
|
396
|
+
"""
|
|
397
|
+
return F.multinomial(input, num_samples, replacement, seed)
|
|
398
|
+
|
|
399
|
+
|
|
340
400
|
def tile(x, multiples):
|
|
341
401
|
r"""
|
|
342
402
|
Replicates an input tensor with given multiples times.
|
|
@@ -484,7 +544,7 @@ def reshape(x, *shape):
|
|
|
484
544
|
[ 3.6 0.4]
|
|
485
545
|
[ 0.5 -3.2]]
|
|
486
546
|
"""
|
|
487
|
-
new_shape =
|
|
547
|
+
new_shape = check_reshape_shp(shape)
|
|
488
548
|
return F.reshape(x, new_shape)
|
|
489
549
|
|
|
490
550
|
|
|
@@ -612,17 +672,21 @@ def ravel(x):
|
|
|
612
672
|
return reshape(x, (-1,))
|
|
613
673
|
|
|
614
674
|
|
|
615
|
-
def flatten(x, order='C'):
|
|
675
|
+
def flatten(x, order='C', *, start_dim=0, end_dim=-1):
|
|
616
676
|
r"""
|
|
617
|
-
|
|
677
|
+
Flatten a tensor along dimensions from `start_dim` to `start_dim`.
|
|
618
678
|
|
|
619
679
|
Args:
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
(Fortran-style) order.
|
|
680
|
+
x (Tensor): Input tensor.
|
|
681
|
+
order (str, optional): Only 'C' and 'F' are supported. 'C' means to flatten in row-major (C-style) order.
|
|
682
|
+
'F' means to flatten in column-major (Fortran-style) order. Default: 'C'.
|
|
683
|
+
|
|
684
|
+
Keyword Args:
|
|
685
|
+
start_dim (int, optional): The first dimension to flatten. Default: 0.
|
|
686
|
+
end_dim (int, optional): The last dimension to flatten. Default: -1.
|
|
623
687
|
|
|
624
688
|
Returns:
|
|
625
|
-
Tensor,
|
|
689
|
+
Tensor. If `x` is a 0-dimensional, a 1-dimensional Tensor will be returned.
|
|
626
690
|
|
|
627
691
|
Supported Platforms:
|
|
628
692
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -630,6 +694,9 @@ def flatten(x, order='C'):
|
|
|
630
694
|
Raises:
|
|
631
695
|
TypeError: If `order` is not string type.
|
|
632
696
|
ValueError: If `order` is string type, but not 'C' or 'F'.
|
|
697
|
+
TypeError: If `start_dim` or `end_dim` is not int.
|
|
698
|
+
ValueError: If `start_dim` is greater than `end_dim` after canonicalized.
|
|
699
|
+
ValueError: If `start_dim` or `end_dim` is not in range of [-x.dim, x.dim-1].
|
|
633
700
|
|
|
634
701
|
Examples:
|
|
635
702
|
>>> import numpy as np
|
|
@@ -639,58 +706,28 @@ def flatten(x, order='C'):
|
|
|
639
706
|
>>> print(output.shape)
|
|
640
707
|
(24,)
|
|
641
708
|
"""
|
|
642
|
-
order =
|
|
643
|
-
if order == 'C':
|
|
644
|
-
return F.reshape(x, (-1,))
|
|
645
|
-
|
|
646
|
-
perm = F.make_range(0, F.rank(x))
|
|
647
|
-
new_order = F.tuple_reversed(perm)
|
|
648
|
-
return F.reshape(F.transpose(x, new_order), (-1,))
|
|
709
|
+
return F.flatten(x, order, start_dim=start_dim, end_dim=end_dim)
|
|
649
710
|
|
|
650
711
|
|
|
651
|
-
def
|
|
712
|
+
def scatter(self, axis, index, src):
|
|
652
713
|
"""
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
axis1 (int): First axis.
|
|
657
|
-
axis2 (int): Second axis.
|
|
658
|
-
|
|
659
|
-
Returns:
|
|
660
|
-
Transposed tensor, has the same data type as the input.
|
|
661
|
-
|
|
662
|
-
Raises:
|
|
663
|
-
TypeError: If `axis1` or `axis2` is not integer.
|
|
664
|
-
ValueError: If `axis1` or `axis2` is not in the range of :math:`[-ndim, ndim-1]`.
|
|
714
|
+
Update the value in `src` to tensor according to the specified index.
|
|
715
|
+
"""
|
|
716
|
+
return F.scatter(self, axis, index, src)
|
|
665
717
|
|
|
666
|
-
Supported Platforms:
|
|
667
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
668
718
|
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
>>> x = Tensor(np.ones((2,3,4), dtype=np.float32))
|
|
673
|
-
>>> output = x.swapaxes(0, 2)
|
|
674
|
-
>>> print(output.shape)
|
|
675
|
-
(4,3,2)
|
|
719
|
+
def swapaxes(input, axis0, axis1):
|
|
720
|
+
"""
|
|
721
|
+
Interchange two axes of a tensor.
|
|
676
722
|
"""
|
|
677
|
-
|
|
723
|
+
return F.swapaxes(input, axis0, axis1)
|
|
678
724
|
|
|
679
|
-
if axis1 == axis2:
|
|
680
|
-
return x
|
|
681
|
-
if axis1 > axis2:
|
|
682
|
-
axis1, axis2 = axis2, axis1
|
|
683
|
-
|
|
684
|
-
perm = F.make_range(0, x.ndim)
|
|
685
|
-
new_perm = None
|
|
686
|
-
if axis2 + 1 < x.ndim:
|
|
687
|
-
new_perm = perm[0:axis1] + perm[axis2:axis2 + 1] + \
|
|
688
|
-
perm[axis1 + 1:axis2] + perm[axis1:axis1 + 1] + perm[axis2 + 1:]
|
|
689
|
-
else:
|
|
690
|
-
new_perm = perm[0:axis1] + perm[axis2:axis2 + 1] + \
|
|
691
|
-
perm[axis1 + 1:axis2] + perm[axis1:axis1 + 1]
|
|
692
725
|
|
|
693
|
-
|
|
726
|
+
def swapdims(x, dim0, dim1):
|
|
727
|
+
"""
|
|
728
|
+
Interchange two dims of a tensor.
|
|
729
|
+
"""
|
|
730
|
+
return F.swapdims(x, dim0, dim1)
|
|
694
731
|
|
|
695
732
|
|
|
696
733
|
def squeeze(x, axis=None):
|
|
@@ -722,41 +759,13 @@ def squeeze(x, axis=None):
|
|
|
722
759
|
if axis is None:
|
|
723
760
|
return F.squeeze(x)
|
|
724
761
|
# yield squeezed shape based on the axes
|
|
725
|
-
new_shape =
|
|
762
|
+
new_shape = prepare_shape_for_squeeze(shape, axis)
|
|
726
763
|
return F.reshape(x, new_shape)
|
|
727
764
|
|
|
728
765
|
|
|
729
|
-
def unbind(
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
Unstacks a tensor of rank `R` along axis dimension, and output tensors will have rank `(R-1)`.
|
|
734
|
-
|
|
735
|
-
Given a tensor of shape :math:`(x_1, x_2, ..., x_R)`. If :math:`0 \le axis`,
|
|
736
|
-
the shape of tensor in output is :math:`(x_1, x_2, ..., x_{axis}, x_{axis+2}, ..., x_R)`.
|
|
737
|
-
|
|
738
|
-
Args:
|
|
739
|
-
x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`.
|
|
740
|
-
A tensor to be unstacked and the rank of the tensor must be greater than 0.
|
|
741
|
-
dim (int): Dimension along which to unpack. Negative values wrap around. The range is [-R, R). Default: 0.
|
|
742
|
-
|
|
743
|
-
Returns:
|
|
744
|
-
A tuple of tensors, the shape of each objects is the same.
|
|
745
|
-
|
|
746
|
-
Raises:
|
|
747
|
-
ValueError: If axis is out of the range [-R, R).
|
|
748
|
-
|
|
749
|
-
Supported Platforms:
|
|
750
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
751
|
-
|
|
752
|
-
Examples:
|
|
753
|
-
>>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
|
|
754
|
-
>>> output = x.unbind()
|
|
755
|
-
>>> print(output)
|
|
756
|
-
(Tensor(shape=[3], dtype=Int64, value=[1, 2, 3]), Tensor(shape=[3], dtype=Int64, value=[4, 5, 6]),
|
|
757
|
-
Tensor(shape=[3], dtype=Int64, value=[7, 8, 9]))
|
|
758
|
-
"""
|
|
759
|
-
return P.Unstack(axis=dim)(x)
|
|
766
|
+
def unbind(input, dim=0):
|
|
767
|
+
"""For details, please refer to :func:`mindspore.ops.unbind`."""
|
|
768
|
+
return P.Unstack(axis=dim)(input)
|
|
760
769
|
|
|
761
770
|
|
|
762
771
|
def argmax(x, axis=None, keepdims=False):
|
|
@@ -834,7 +843,7 @@ def argmin(x, axis=None, keepdims=False):
|
|
|
834
843
|
axis = 0
|
|
835
844
|
is_axis_none = True
|
|
836
845
|
else:
|
|
837
|
-
axis =
|
|
846
|
+
axis = check_axis_in_range(axis, F.rank(x))
|
|
838
847
|
out = P.Argmin(axis)(x)
|
|
839
848
|
if keepdims and not is_axis_none:
|
|
840
849
|
out = expand_dims(out, axis)
|
|
@@ -851,7 +860,7 @@ def argmin_with_value(x, axis=0, keep_dims=False):
|
|
|
851
860
|
return F.min(x, axis, keep_dims)
|
|
852
861
|
|
|
853
862
|
|
|
854
|
-
def median(
|
|
863
|
+
def median(input, global_median, axis=0, keep_dims=False):
|
|
855
864
|
r"""
|
|
856
865
|
Computes the median of input tensor.
|
|
857
866
|
|
|
@@ -859,9 +868,38 @@ def median(x, global_median, axis=0, keep_dims=False):
|
|
|
859
868
|
When attr `global_median` is True, the second output Tensor value is meaningless.
|
|
860
869
|
|
|
861
870
|
"""
|
|
862
|
-
|
|
871
|
+
check_axis_in_range(axis, input.ndim)
|
|
863
872
|
median_ = Median(global_median, axis, keep_dims)
|
|
864
|
-
return median_(
|
|
873
|
+
return median_(input)
|
|
874
|
+
|
|
875
|
+
|
|
876
|
+
def msort(x):
|
|
877
|
+
"""
|
|
878
|
+
For details, please refer to :func:`mindspore.ops.msort`.
|
|
879
|
+
"""
|
|
880
|
+
return F.msort(x)
|
|
881
|
+
|
|
882
|
+
|
|
883
|
+
def mm(mat1, mat2):
|
|
884
|
+
"""
|
|
885
|
+
For details, please refer to :func:`mindspore.ops.mm`.
|
|
886
|
+
"""
|
|
887
|
+
return F.mm(mat1, mat2)
|
|
888
|
+
|
|
889
|
+
|
|
890
|
+
def mT(x):
|
|
891
|
+
"""
|
|
892
|
+
Returns a view of this tensor with the last two dimensions transposed.
|
|
893
|
+
x.mT is equivalent to x.transpose(-2, -1).
|
|
894
|
+
"""
|
|
895
|
+
return swapaxes(x, -2, -1)
|
|
896
|
+
|
|
897
|
+
|
|
898
|
+
def nan_to_num(x, nan=0.0, posinf=None, neginf=None):
|
|
899
|
+
"""
|
|
900
|
+
For details, please refer to :func:`mindspore.ops.nan_to_num`.
|
|
901
|
+
"""
|
|
902
|
+
return F.nan_to_num(x, nan, posinf, neginf)
|
|
865
903
|
|
|
866
904
|
|
|
867
905
|
def cumsum(x, axis=None, dtype=None):
|
|
@@ -904,7 +942,7 @@ def cumsum(x, axis=None, dtype=None):
|
|
|
904
942
|
if axis is None:
|
|
905
943
|
x = x.ravel()
|
|
906
944
|
axis = 0
|
|
907
|
-
|
|
945
|
+
check_axis_in_range(axis, x.ndim)
|
|
908
946
|
if dtype is not None:
|
|
909
947
|
dtype = check_astype_dtype_const(dtype)
|
|
910
948
|
if original_dtype != dtype:
|
|
@@ -926,12 +964,19 @@ def cummax(x, axis):
|
|
|
926
964
|
return F.cummax(x, axis)
|
|
927
965
|
|
|
928
966
|
|
|
929
|
-
def index_fill(x,
|
|
967
|
+
def index_fill(x, axis, index, value):
|
|
930
968
|
"""
|
|
931
|
-
Fills the elements under the
|
|
969
|
+
Fills the elements under the axis dimension of the input Tensor with the input value
|
|
932
970
|
by selecting the indices in the order given in index.
|
|
933
971
|
"""
|
|
934
|
-
return F.index_fill(x,
|
|
972
|
+
return F.index_fill(x, axis, index, value)
|
|
973
|
+
|
|
974
|
+
|
|
975
|
+
def index_select(x, axis, index):
|
|
976
|
+
"""
|
|
977
|
+
Returns a new tensor which indexes the `x` tensor along dimension `axis` using the entries in `index` .
|
|
978
|
+
"""
|
|
979
|
+
return F.index_select(x, axis, index)
|
|
935
980
|
|
|
936
981
|
|
|
937
982
|
def copy(x):
|
|
@@ -972,7 +1017,8 @@ def copy(x):
|
|
|
972
1017
|
return x
|
|
973
1018
|
|
|
974
1019
|
|
|
975
|
-
def max(
|
|
1020
|
+
def max(input, axis=None, keepdims=False, *, initial=None, # pylint: disable=redefined-builtin
|
|
1021
|
+
where=True, return_indices=False): # pylint: disable=redefined-outer-name
|
|
976
1022
|
"""
|
|
977
1023
|
Returns the maximum of a tensor or maximum along an axis.
|
|
978
1024
|
|
|
@@ -986,6 +1032,8 @@ def max(x, axis=None, keepdims=False, initial=None, where=True): # pylint: disa
|
|
|
986
1032
|
If this is set to True, the axes which are reduced are left in the
|
|
987
1033
|
result as dimensions with size one. With this option, the result will
|
|
988
1034
|
broadcast correctly against the input array.
|
|
1035
|
+
|
|
1036
|
+
Keyword Args:
|
|
989
1037
|
initial (scalar, optional):
|
|
990
1038
|
The minimum value of an output element. Must be present to allow
|
|
991
1039
|
computation on empty slice.
|
|
@@ -993,6 +1041,8 @@ def max(x, axis=None, keepdims=False, initial=None, where=True): # pylint: disa
|
|
|
993
1041
|
A boolean array which is broadcasted to match the dimensions of array,
|
|
994
1042
|
and selects elements to include in the reduction. If non-default value
|
|
995
1043
|
is passed, initial must also be provided.
|
|
1044
|
+
return_indices (bool, optional): Whether to return the index of the minimum value. Default: False.
|
|
1045
|
+
If `axis` is a list or tuple of ints, it must be False.
|
|
996
1046
|
|
|
997
1047
|
Returns:
|
|
998
1048
|
Tensor or scalar, maximum of input tensor. If `axis` is None, the result is a scalar
|
|
@@ -1002,7 +1052,7 @@ def max(x, axis=None, keepdims=False, initial=None, where=True): # pylint: disa
|
|
|
1002
1052
|
TypeError: if the input is not a tensor.
|
|
1003
1053
|
|
|
1004
1054
|
Supported Platforms:
|
|
1005
|
-
|
|
1055
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1006
1056
|
|
|
1007
1057
|
Examples:
|
|
1008
1058
|
>>> import numpy as np
|
|
@@ -1013,11 +1063,17 @@ def max(x, axis=None, keepdims=False, initial=None, where=True): # pylint: disa
|
|
|
1013
1063
|
>>> print(output)
|
|
1014
1064
|
3.0
|
|
1015
1065
|
"""
|
|
1016
|
-
|
|
1017
|
-
|
|
1066
|
+
if isinstance(axis, (list, tuple)):
|
|
1067
|
+
return compile_utils.reduce_(input, P.ReduceMax(keepdims), cmp_fn=F.maximum,
|
|
1068
|
+
axis=axis, keepdims=keepdims, initial=initial, where=where)
|
|
1069
|
+
values, indices = F.max(input, axis, keepdims, initial=initial, where=where)
|
|
1070
|
+
if not return_indices:
|
|
1071
|
+
return values
|
|
1072
|
+
return values, indices
|
|
1018
1073
|
|
|
1019
1074
|
|
|
1020
|
-
def min(
|
|
1075
|
+
def min(input, axis=None, keepdims=False, *, initial=None, # pylint: disable=redefined-builtin
|
|
1076
|
+
where=True, return_indices=False): # pylint: disable=redefined-outer-name
|
|
1021
1077
|
"""
|
|
1022
1078
|
Returns the minimum of a tensor or minimum along an axis.
|
|
1023
1079
|
|
|
@@ -1031,6 +1087,8 @@ def min(x, axis=None, keepdims=False, initial=None, where=True): # pylint: disa
|
|
|
1031
1087
|
If this is set to True, the axes which are reduced are left in the
|
|
1032
1088
|
result as dimensions with size one. With this option, the result will
|
|
1033
1089
|
broadcast correctly against the input array.
|
|
1090
|
+
|
|
1091
|
+
Keyword Args:
|
|
1034
1092
|
initial (scalar, optional):
|
|
1035
1093
|
The maximum value of an output element. Must be present to allow
|
|
1036
1094
|
computation on empty slice.
|
|
@@ -1038,6 +1096,8 @@ def min(x, axis=None, keepdims=False, initial=None, where=True): # pylint: disa
|
|
|
1038
1096
|
A boolean array which is broadcasted to match the dimensions of array,
|
|
1039
1097
|
and selects elements to include in the reduction. If non-default value
|
|
1040
1098
|
is passed, initial must also be provided.
|
|
1099
|
+
return_indices (bool, optional): Whether to return the index of the minimum value. Default: False.
|
|
1100
|
+
If `axis` is a list or tuple of ints, it must be False.
|
|
1041
1101
|
|
|
1042
1102
|
Returns:
|
|
1043
1103
|
Tensor or scalar, minimum of `a`. If axis is None, the result is a scalar
|
|
@@ -1058,8 +1118,13 @@ def min(x, axis=None, keepdims=False, initial=None, where=True): # pylint: disa
|
|
|
1058
1118
|
>>> print(output)
|
|
1059
1119
|
0.0
|
|
1060
1120
|
"""
|
|
1061
|
-
|
|
1062
|
-
|
|
1121
|
+
if isinstance(axis, (list, tuple)):
|
|
1122
|
+
return compile_utils.reduce_(input, P.ReduceMin(keepdims), cmp_fn=F.minimum,
|
|
1123
|
+
axis=axis, keepdims=keepdims, initial=initial, where=where)
|
|
1124
|
+
values, indices = F.min(input, axis, keepdims, initial=initial, where=where)
|
|
1125
|
+
if not return_indices:
|
|
1126
|
+
return values
|
|
1127
|
+
return values, indices
|
|
1063
1128
|
|
|
1064
1129
|
|
|
1065
1130
|
def pow(x, y): # pylint: disable=redefined-builtin
|
|
@@ -1076,40 +1141,40 @@ def log(x):
|
|
|
1076
1141
|
return F.log(x)
|
|
1077
1142
|
|
|
1078
1143
|
|
|
1079
|
-
def log10(
|
|
1144
|
+
def log10(input):
|
|
1080
1145
|
"""
|
|
1081
1146
|
Calculate the base-10 logarithm of Tensor.
|
|
1082
1147
|
"""
|
|
1083
|
-
return F.log10(
|
|
1148
|
+
return F.log10(input)
|
|
1084
1149
|
|
|
1085
1150
|
|
|
1086
|
-
def log2(
|
|
1151
|
+
def log2(input):
|
|
1087
1152
|
"""
|
|
1088
1153
|
Calculate the base-2 logarithm of Tensor.
|
|
1089
1154
|
"""
|
|
1090
|
-
return F.log2(
|
|
1155
|
+
return F.log2(input)
|
|
1091
1156
|
|
|
1092
1157
|
|
|
1093
|
-
def logaddexp(
|
|
1158
|
+
def logaddexp(input, other):
|
|
1094
1159
|
"""
|
|
1095
1160
|
Computes the logarithm of the sum of exponentiations of the inputs.
|
|
1096
1161
|
"""
|
|
1097
|
-
return F.logaddexp(
|
|
1162
|
+
return F.logaddexp(input, other)
|
|
1098
1163
|
|
|
1099
1164
|
|
|
1100
|
-
def logaddexp2(
|
|
1165
|
+
def logaddexp2(input, other):
|
|
1101
1166
|
"""
|
|
1102
1167
|
Computes the logarithm of the sum of exponentiations in base of 2 of the inputs.
|
|
1103
1168
|
"""
|
|
1104
|
-
return F.logaddexp2(
|
|
1169
|
+
return F.logaddexp2(input, other)
|
|
1105
1170
|
|
|
1106
1171
|
|
|
1107
|
-
def logsumexp(
|
|
1172
|
+
def logsumexp(input, axis, keepdims=False):
|
|
1108
1173
|
"""
|
|
1109
1174
|
Reduces a dimension of a tensor by calculating exponential for all elements in the dimension,
|
|
1110
1175
|
then calculate logarithm of the sum.
|
|
1111
1176
|
"""
|
|
1112
|
-
return F.logsumexp(
|
|
1177
|
+
return F.logsumexp(input, axis, keepdims)
|
|
1113
1178
|
|
|
1114
1179
|
|
|
1115
1180
|
def round_(x):
|
|
@@ -1148,6 +1213,13 @@ def deg2rad(x):
|
|
|
1148
1213
|
return F.deg2rad(x)
|
|
1149
1214
|
|
|
1150
1215
|
|
|
1216
|
+
def dot(input, other):
|
|
1217
|
+
r"""
|
|
1218
|
+
For details, please refer to :func:`mindspore.ops.dot`.
|
|
1219
|
+
"""
|
|
1220
|
+
return composite.dot(input, other)
|
|
1221
|
+
|
|
1222
|
+
|
|
1151
1223
|
def copysign(x, other):
|
|
1152
1224
|
"""
|
|
1153
1225
|
Create a new floating-point tensor with the magnitude of `x` and the sign of `other`, element-wise.
|
|
@@ -1155,43 +1227,41 @@ def copysign(x, other):
|
|
|
1155
1227
|
return F.copysign(x, other)
|
|
1156
1228
|
|
|
1157
1229
|
|
|
1158
|
-
def numel(
|
|
1230
|
+
def numel(input):
|
|
1159
1231
|
"""
|
|
1160
1232
|
Returns a Scalar of type int that represents the total number of elements in the Tensor.
|
|
1161
1233
|
"""
|
|
1162
|
-
return F.numel(
|
|
1234
|
+
return F.numel(input)
|
|
1163
1235
|
|
|
1164
1236
|
|
|
1165
|
-
def permute(
|
|
1237
|
+
def permute(input, *axis):
|
|
1166
1238
|
"""
|
|
1167
1239
|
Permutes the dimensions of the input tensor according to input permutation.
|
|
1168
1240
|
"""
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
return F.permute(x, *dims)
|
|
1173
|
-
return F.permute(x, dims)
|
|
1241
|
+
ndim = F.rank(input)
|
|
1242
|
+
perm = check_transpose_axis_const(axis, ndim)
|
|
1243
|
+
return F.permute(input, perm)
|
|
1174
1244
|
|
|
1175
1245
|
|
|
1176
|
-
def positive(
|
|
1246
|
+
def positive(input):
|
|
1177
1247
|
"""
|
|
1178
1248
|
Return self Tensor.
|
|
1179
1249
|
"""
|
|
1180
|
-
return F.positive(
|
|
1250
|
+
return F.positive(input)
|
|
1181
1251
|
|
|
1182
1252
|
|
|
1183
|
-
def remainder(
|
|
1253
|
+
def remainder(input, divisor):
|
|
1184
1254
|
"""
|
|
1185
1255
|
Returns element-wise remainder of division.
|
|
1186
1256
|
"""
|
|
1187
|
-
return F.remainder(
|
|
1257
|
+
return F.remainder(input, divisor)
|
|
1188
1258
|
|
|
1189
1259
|
|
|
1190
|
-
def unique_consecutive(
|
|
1260
|
+
def unique_consecutive(input, return_idx=False, return_counts=False, axis=None):
|
|
1191
1261
|
"""
|
|
1192
1262
|
Returns the elements that are unique in each consecutive group of equivalent elements in the input tensor.
|
|
1193
1263
|
"""
|
|
1194
|
-
return F.unique_consecutive(
|
|
1264
|
+
return F.unique_consecutive(input, return_idx, return_counts, axis)
|
|
1195
1265
|
|
|
1196
1266
|
|
|
1197
1267
|
def unique_with_pad(x, pad_num):
|
|
@@ -1244,9 +1314,11 @@ def resize(x, *new_shape):
|
|
|
1244
1314
|
return res.reshape(new_shape)
|
|
1245
1315
|
|
|
1246
1316
|
|
|
1247
|
-
def det(
|
|
1248
|
-
"""
|
|
1249
|
-
|
|
1317
|
+
def det(input):
|
|
1318
|
+
"""
|
|
1319
|
+
Computes the determinant of one or more square matrices.
|
|
1320
|
+
"""
|
|
1321
|
+
return F.det(input)
|
|
1250
1322
|
|
|
1251
1323
|
|
|
1252
1324
|
def diagonal(x, offset=0, axis1=0, axis2=1):
|
|
@@ -1284,7 +1356,8 @@ def diagonal(x, offset=0, axis1=0, axis2=1):
|
|
|
1284
1356
|
"""
|
|
1285
1357
|
ndim = x.ndim
|
|
1286
1358
|
if ndim < 2:
|
|
1287
|
-
const_utils.raise_value_error(
|
|
1359
|
+
const_utils.raise_value_error(
|
|
1360
|
+
'diagonal requires an array of at least two dimensions')
|
|
1288
1361
|
dtype = x.dtype
|
|
1289
1362
|
|
|
1290
1363
|
axes = check_axis_valid((axis1, axis2), ndim)
|
|
@@ -1311,7 +1384,7 @@ def diagonal(x, offset=0, axis1=0, axis2=1):
|
|
|
1311
1384
|
e_upper = F.fill(dtype, (-offset, m), 0)
|
|
1312
1385
|
e_lower = e[0:n + offset:1, ...]
|
|
1313
1386
|
e = P.Concat(0)((e_upper, e_lower)).astype(dtype)
|
|
1314
|
-
e =
|
|
1387
|
+
e = F.broadcast_to(e, shape)
|
|
1315
1388
|
|
|
1316
1389
|
prod_val = F.tensor_mul(x, e)
|
|
1317
1390
|
res = F.reduce_sum(prod_val.astype(mstype.float32), -1)
|
|
@@ -1331,6 +1404,20 @@ def diagonal(x, offset=0, axis1=0, axis2=1):
|
|
|
1331
1404
|
return res.astype(dtype)
|
|
1332
1405
|
|
|
1333
1406
|
|
|
1407
|
+
def digamma(input):
|
|
1408
|
+
"""
|
|
1409
|
+
Computes the logarithmic derivative of the gamma function on input.
|
|
1410
|
+
"""
|
|
1411
|
+
return F.digamma(input)
|
|
1412
|
+
|
|
1413
|
+
|
|
1414
|
+
def lgamma(input):
|
|
1415
|
+
"""
|
|
1416
|
+
Computes the natural logarithm of the absolute value of the gamma function on input.
|
|
1417
|
+
"""
|
|
1418
|
+
return F.lgamma(input)
|
|
1419
|
+
|
|
1420
|
+
|
|
1334
1421
|
def i0(x):
|
|
1335
1422
|
"""
|
|
1336
1423
|
For details, please refer to :func:`mindspore.ops.i0`.
|
|
@@ -1345,6 +1432,27 @@ def isclose(x1, x2, rtol=1e-05, atol=1e-08, equal_nan=False):
|
|
|
1345
1432
|
return F.isclose(x1, x2, rtol, atol, equal_nan)
|
|
1346
1433
|
|
|
1347
1434
|
|
|
1435
|
+
def isneginf(input):
|
|
1436
|
+
"""
|
|
1437
|
+
Tests element-wise for negative infinity, returns result as bool array.
|
|
1438
|
+
"""
|
|
1439
|
+
return F.isneginf(input)
|
|
1440
|
+
|
|
1441
|
+
|
|
1442
|
+
def isposinf(input):
|
|
1443
|
+
"""
|
|
1444
|
+
Tests element-wise for positive infinity, returns result as bool array.
|
|
1445
|
+
"""
|
|
1446
|
+
return F.isposinf(input)
|
|
1447
|
+
|
|
1448
|
+
|
|
1449
|
+
def isreal(input):
|
|
1450
|
+
"""
|
|
1451
|
+
Tests element-wise for real number.
|
|
1452
|
+
"""
|
|
1453
|
+
return F.isreal(input)
|
|
1454
|
+
|
|
1455
|
+
|
|
1348
1456
|
def flip(x, dims):
|
|
1349
1457
|
"""
|
|
1350
1458
|
For details, please refer to :func:`mindspore.ops.flip`.
|
|
@@ -1366,6 +1474,20 @@ def flipud(x):
|
|
|
1366
1474
|
return F.flipud(x)
|
|
1367
1475
|
|
|
1368
1476
|
|
|
1477
|
+
def float_power(x, exponent):
|
|
1478
|
+
"""
|
|
1479
|
+
For details, please refer to :func:`mindspore.ops.float_power`.
|
|
1480
|
+
"""
|
|
1481
|
+
return F.float_power(x, exponent)
|
|
1482
|
+
|
|
1483
|
+
|
|
1484
|
+
def fmod(x, other):
|
|
1485
|
+
"""
|
|
1486
|
+
For details, please refer to :func:`mindspore.ops.fmod`.
|
|
1487
|
+
"""
|
|
1488
|
+
return F.fmod(x, other)
|
|
1489
|
+
|
|
1490
|
+
|
|
1369
1491
|
def is_floating_point(x):
|
|
1370
1492
|
"""
|
|
1371
1493
|
For details, please refer to :func:`mindspore.ops.is_floating_point`.
|
|
@@ -1380,6 +1502,13 @@ def is_signed(x):
|
|
|
1380
1502
|
return x.dtype in mstype.signed_type
|
|
1381
1503
|
|
|
1382
1504
|
|
|
1505
|
+
def is_complex(x):
|
|
1506
|
+
"""
|
|
1507
|
+
For details, please refer to :func:`mindspore.ops.is_complex`.
|
|
1508
|
+
"""
|
|
1509
|
+
return F.is_complex(x)
|
|
1510
|
+
|
|
1511
|
+
|
|
1383
1512
|
def inv(x):
|
|
1384
1513
|
"""
|
|
1385
1514
|
Computes Reciprocal of input tensor element-wise.
|
|
@@ -1387,6 +1516,13 @@ def inv(x):
|
|
|
1387
1516
|
return F.inv(x)
|
|
1388
1517
|
|
|
1389
1518
|
|
|
1519
|
+
def inverse(input):
|
|
1520
|
+
"""
|
|
1521
|
+
Computes the inverse of a square matrix.
|
|
1522
|
+
"""
|
|
1523
|
+
return F.inverse(input)
|
|
1524
|
+
|
|
1525
|
+
|
|
1390
1526
|
def invert(x):
|
|
1391
1527
|
"""
|
|
1392
1528
|
Flips all bits of input tensor element-wise.
|
|
@@ -1425,6 +1561,8 @@ def trace(x, offset=0, axis1=0, axis2=1, dtype=None):
|
|
|
1425
1561
|
>>> print(x.trace())
|
|
1426
1562
|
3.0
|
|
1427
1563
|
"""
|
|
1564
|
+
if offset == 0 and axis1 == 0 and axis2 == 1 and dtype is None:
|
|
1565
|
+
return F.trace(x)
|
|
1428
1566
|
d = x.diagonal(offset, axis1=axis1, axis2=axis2)
|
|
1429
1567
|
shape = d.shape
|
|
1430
1568
|
if dtype is None:
|
|
@@ -1473,14 +1611,15 @@ def take(x, indices, axis=None, mode='clip'):
|
|
|
1473
1611
|
[4 3 6]
|
|
1474
1612
|
"""
|
|
1475
1613
|
if mode not in ('raise', 'wrap', 'clip'):
|
|
1476
|
-
const_utils.raise_value_error(
|
|
1614
|
+
const_utils.raise_value_error(
|
|
1615
|
+
'raise should be one of "raise", "wrap", or "clip"')
|
|
1477
1616
|
if axis is None:
|
|
1478
1617
|
a = x.ravel()
|
|
1479
1618
|
axis = 0
|
|
1480
1619
|
else:
|
|
1481
1620
|
a = x
|
|
1482
1621
|
ndim = a.ndim
|
|
1483
|
-
axis =
|
|
1622
|
+
axis = check_axis_in_range(axis, ndim)
|
|
1484
1623
|
|
|
1485
1624
|
shape_a = a.shape
|
|
1486
1625
|
shape_indices = indices.shape
|
|
@@ -1494,12 +1633,26 @@ def take(x, indices, axis=None, mode='clip'):
|
|
|
1494
1633
|
shape_indices = expanded_shape(ndim, size_indices, axis)
|
|
1495
1634
|
indices = indices.reshape(shape_indices)
|
|
1496
1635
|
shape_indices = shape_ni + (indices.size,) + shape_nk
|
|
1497
|
-
indices =
|
|
1636
|
+
indices = F.broadcast_to(indices, shape_indices)
|
|
1498
1637
|
|
|
1499
1638
|
res = F.gather_d(a, axis, indices)
|
|
1500
1639
|
return res.reshape(shape_out)
|
|
1501
1640
|
|
|
1502
1641
|
|
|
1642
|
+
def _infer_out_shape(*shapes):
|
|
1643
|
+
"""
|
|
1644
|
+
Returns shape of output after broadcasting. Raises ValueError if shapes cannot be broadcast.
|
|
1645
|
+
"""
|
|
1646
|
+
shape_out = list()
|
|
1647
|
+
max_len = ms_max([len(it) for it in shapes])
|
|
1648
|
+
for i in range(max_len):
|
|
1649
|
+
items = [it[i-(max_len-len(it))] if i - (max_len - len(it))
|
|
1650
|
+
>= 0 else 1 for it in shapes]
|
|
1651
|
+
max_size = 0 if 0 in items else ms_max(items)
|
|
1652
|
+
shape_out.append(max_size)
|
|
1653
|
+
return tuple(shape_out)
|
|
1654
|
+
|
|
1655
|
+
|
|
1503
1656
|
def choose(x, choices, mode='clip'):
|
|
1504
1657
|
"""
|
|
1505
1658
|
Construct an array from an index array and a list of arrays to choose from.
|
|
@@ -1537,8 +1690,8 @@ def choose(x, choices, mode='clip'):
|
|
|
1537
1690
|
[20 31 12 3]
|
|
1538
1691
|
"""
|
|
1539
1692
|
if check_is_tensor(F.typeof(choices)):
|
|
1540
|
-
shape_choice =
|
|
1541
|
-
choices =
|
|
1693
|
+
shape_choice = _infer_out_shape(x.shape, choices.shape[1:])
|
|
1694
|
+
choices = F.broadcast_to(choices, (choices.shape[0],) + shape_choice)
|
|
1542
1695
|
else:
|
|
1543
1696
|
# broadcasts choices to the same shape if choices is a sequence
|
|
1544
1697
|
choicelist = []
|
|
@@ -1548,27 +1701,29 @@ def choose(x, choices, mode='clip'):
|
|
|
1548
1701
|
choice = const_utils.make_tensor(choice)
|
|
1549
1702
|
shapes += (choice.shape,)
|
|
1550
1703
|
choicelist.append(choice)
|
|
1551
|
-
shape_choice =
|
|
1704
|
+
shape_choice = _infer_out_shape(x.shape, *shapes)
|
|
1552
1705
|
tmp = []
|
|
1553
1706
|
for choice in choicelist:
|
|
1554
|
-
tmp.append(
|
|
1707
|
+
tmp.append(F.broadcast_to(choice, shape_choice))
|
|
1555
1708
|
choices = F.stack(tmp)
|
|
1556
1709
|
|
|
1557
1710
|
if x.ndim == 0 or choices.ndim == 0:
|
|
1558
1711
|
const_utils.raise_value_error('input cannot be scalars')
|
|
1559
|
-
a =
|
|
1712
|
+
a = F.broadcast_to(x, shape_choice)
|
|
1560
1713
|
dtype = choices.dtype
|
|
1561
1714
|
# adjusts dtype for F.tensor_mul and F.gather_nd
|
|
1562
1715
|
a = a.astype(mstype.int32)
|
|
1563
1716
|
choices = choices.astype(mstype.int32)
|
|
1564
|
-
a = compile_utils.check_indices(
|
|
1717
|
+
a = compile_utils.check_indices(
|
|
1718
|
+
choices.shape[0], a, mode, allow_negative_index=False)
|
|
1565
1719
|
|
|
1566
1720
|
grids = []
|
|
1567
1721
|
ndim = len(a.shape)
|
|
1568
1722
|
for i in range(ndim):
|
|
1569
|
-
dim_grid = const_utils.make_tensor(
|
|
1723
|
+
dim_grid = const_utils.make_tensor(
|
|
1724
|
+
F.make_range(a.shape[i]), mstype.int32)
|
|
1570
1725
|
dim_shape = expanded_shape(ndim, a.shape[i], i)
|
|
1571
|
-
dim_grid =
|
|
1726
|
+
dim_grid = F.broadcast_to(dim_grid.reshape(dim_shape), a.shape)
|
|
1572
1727
|
grids.append(dim_grid)
|
|
1573
1728
|
grid = P.Stack(-1)(grids)
|
|
1574
1729
|
indices = P.Concat(-1)((a.reshape(a.shape + (1,)), grid))
|
|
@@ -1603,6 +1758,12 @@ def searchsorted(x, v, side='left', sorter=None):
|
|
|
1603
1758
|
>>> print(x.searchsorted(3))
|
|
1604
1759
|
2
|
|
1605
1760
|
"""
|
|
1761
|
+
def get_log2_size(size):
|
|
1762
|
+
"""Get log2 size"""
|
|
1763
|
+
log2_res = F.log2(F.cast(Tensor(size), mstype.float32))
|
|
1764
|
+
ceil_res = F.ceil(log2_res)
|
|
1765
|
+
cast_res = F.cast(ceil_res, mstype.int64)
|
|
1766
|
+
return cast_res
|
|
1606
1767
|
if side not in ('left', 'right'):
|
|
1607
1768
|
const_utils.raise_value_error('invalid value for keyword "side"')
|
|
1608
1769
|
a = x.astype(mstype.float32)
|
|
@@ -1611,7 +1772,8 @@ def searchsorted(x, v, side='left', sorter=None):
|
|
|
1611
1772
|
shape = v.shape
|
|
1612
1773
|
if sorter is not None:
|
|
1613
1774
|
if sorter.ndim != 1 or sorter.size != a.size:
|
|
1614
|
-
const_utils.raise_value_error(
|
|
1775
|
+
const_utils.raise_value_error(
|
|
1776
|
+
'sorter must be 1-D array with the same size as `a`')
|
|
1615
1777
|
sorter = const_utils.make_tensor(sorter)
|
|
1616
1778
|
sorter = sorter.reshape(sorter.shape + (1,))
|
|
1617
1779
|
a = F.gather_nd(a, sorter)
|
|
@@ -1619,43 +1781,20 @@ def searchsorted(x, v, side='left', sorter=None):
|
|
|
1619
1781
|
i = F.fill(mstype.int32, shape, 0)
|
|
1620
1782
|
j = F.fill(mstype.int32, shape, a.size)
|
|
1621
1783
|
|
|
1622
|
-
|
|
1623
|
-
|
|
1784
|
+
loop_num = get_log2_size(F.shape_mul(a.shape) + 1)
|
|
1785
|
+
index = Tensor([0])
|
|
1786
|
+
while index < loop_num:
|
|
1624
1787
|
mid = (i - F.neg_tensor(j)) // 2
|
|
1625
1788
|
mask = less_op(v, F.gather_nd(a, mid.reshape(mid.shape + (1,))))
|
|
1626
1789
|
i = F.select(mask, i, mid)
|
|
1627
1790
|
j = F.select(mask, mid, j)
|
|
1791
|
+
index += 1
|
|
1628
1792
|
return j
|
|
1629
1793
|
|
|
1630
1794
|
|
|
1631
1795
|
def fill(x, value):
|
|
1632
1796
|
"""
|
|
1633
|
-
|
|
1634
|
-
|
|
1635
|
-
Note:
|
|
1636
|
-
Unlike Numpy, tensor.fill() will always returns a new tensor, instead of
|
|
1637
|
-
filling the original tensor.
|
|
1638
|
-
|
|
1639
|
-
Args:
|
|
1640
|
-
value (Union[None, int, float, bool]): All elements of a will be assigned this value.
|
|
1641
|
-
|
|
1642
|
-
Returns:
|
|
1643
|
-
Tensor, with the original dtype and shape as input tensor.
|
|
1644
|
-
|
|
1645
|
-
Raises:
|
|
1646
|
-
TypeError: If input arguments have types not specified above.
|
|
1647
|
-
ValueError: If `shape` has entries < 0.
|
|
1648
|
-
|
|
1649
|
-
Supported Platforms:
|
|
1650
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1651
|
-
|
|
1652
|
-
Examples:
|
|
1653
|
-
>>> import numpy as np
|
|
1654
|
-
>>> from mindspore import Tensor
|
|
1655
|
-
>>> a = Tensor(np.arange(4).reshape((2,2)).astype('float32'))
|
|
1656
|
-
>>> print(a.fill(1.0))
|
|
1657
|
-
[[1. 1.]
|
|
1658
|
-
[1. 1.]]
|
|
1797
|
+
`Tensor.fill` is deprecated, please use `ops.fill` instead.
|
|
1659
1798
|
"""
|
|
1660
1799
|
if value is None:
|
|
1661
1800
|
if x.dtype not in (mstype.float16, mstype.float32, mstype.float64):
|
|
@@ -1669,7 +1808,7 @@ def fill(x, value):
|
|
|
1669
1808
|
|
|
1670
1809
|
def fills(x, value):
|
|
1671
1810
|
"""
|
|
1672
|
-
|
|
1811
|
+
`Tensor.fills` is deprecated, please use `ops.fill` instead.
|
|
1673
1812
|
"""
|
|
1674
1813
|
return F.fills(x, value)
|
|
1675
1814
|
|
|
@@ -1709,70 +1848,24 @@ def ptp(x, axis=None, keepdims=False):
|
|
|
1709
1848
|
if axis is None:
|
|
1710
1849
|
axis = ()
|
|
1711
1850
|
else:
|
|
1712
|
-
check_axis_type(axis, True, True, False)
|
|
1851
|
+
validator.check_axis_type(axis, True, True, False)
|
|
1713
1852
|
axis = check_axis_valid(axis, x.ndim)
|
|
1714
1853
|
|
|
1715
1854
|
return x.max(axis, keepdims) - x.min(axis, keepdims)
|
|
1716
1855
|
|
|
1717
1856
|
|
|
1718
|
-
def
|
|
1857
|
+
def clamp(x, min=None, max=None):
|
|
1719
1858
|
"""
|
|
1720
|
-
|
|
1721
|
-
|
|
1722
|
-
|
|
1723
|
-
For example, if an interval of :math:`[0, 1]` is specified, values smaller than 0 become 0,
|
|
1724
|
-
and values larger than 1 become 1.
|
|
1725
|
-
|
|
1726
|
-
Note:
|
|
1727
|
-
Currently, clip with `nan` is not supported.
|
|
1728
|
-
|
|
1729
|
-
Args:
|
|
1730
|
-
x (Tensor): Tensor containing elements to clip.
|
|
1731
|
-
xmin (Tensor, scalar, None): Minimum value. If None, clipping is not performed
|
|
1732
|
-
on lower interval edge. Not more than one of `xmin` and `xmax` may be None.
|
|
1733
|
-
xmax (Tensor, scalar, None): Maximum value. If None, clipping is not performed
|
|
1734
|
-
on upper interval edge. Not more than one of `xmin` and `xmax` may be None.
|
|
1735
|
-
If `xmin` or `xmax` are tensors, then the three tensors will be broadcasted
|
|
1736
|
-
to match their shapes.
|
|
1737
|
-
dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
|
|
1738
|
-
output Tensor.
|
|
1739
|
-
|
|
1740
|
-
Returns:
|
|
1741
|
-
Tensor, a tensor with the elements of `x`, but where values
|
|
1742
|
-
< `xmin` are replaced with `xmin`, and those > `xmax` with `xmax`.
|
|
1859
|
+
Clamps all elements in `x` into the range `[min, max]`.
|
|
1860
|
+
"""
|
|
1861
|
+
return F.clamp(x, min, max)
|
|
1743
1862
|
|
|
1744
|
-
Supported Platforms:
|
|
1745
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1746
1863
|
|
|
1747
|
-
|
|
1748
|
-
|
|
1749
|
-
|
|
1750
|
-
|
|
1751
|
-
|
|
1752
|
-
[1 2 2 0 0 2 2 0]
|
|
1753
|
-
"""
|
|
1754
|
-
if xmin is None and xmax is None:
|
|
1755
|
-
const_utils.raise_value_error("One of max or min must be given.")
|
|
1756
|
-
is_scalar = False
|
|
1757
|
-
if xmin is not None:
|
|
1758
|
-
xmin = const_utils.make_tensor(xmin, x.dtype)
|
|
1759
|
-
if x.ndim == 0 and xmin.ndim == 0:
|
|
1760
|
-
x = F.maximum(x.reshape((1,)), xmin).squeeze()
|
|
1761
|
-
else:
|
|
1762
|
-
x = F.maximum(x, xmin)
|
|
1763
|
-
if xmax is not None:
|
|
1764
|
-
xmax = const_utils.make_tensor(xmax, x.dtype)
|
|
1765
|
-
if x.ndim == 0 and xmax.ndim == 0:
|
|
1766
|
-
x = F.minimum(x.reshape((1,)), xmax).squeeze()
|
|
1767
|
-
else:
|
|
1768
|
-
x = F.minimum(x, xmax)
|
|
1769
|
-
if is_scalar:
|
|
1770
|
-
return x.squeeze()
|
|
1771
|
-
if dtype is not None:
|
|
1772
|
-
dtype = check_astype_dtype_const(dtype)
|
|
1773
|
-
if dtype != x.dtype:
|
|
1774
|
-
return x.astype(dtype)
|
|
1775
|
-
return x
|
|
1864
|
+
def clip(x, min=None, max=None):
|
|
1865
|
+
"""
|
|
1866
|
+
Clamps all elements in `x` into the range `[min, max]`.
|
|
1867
|
+
"""
|
|
1868
|
+
return F.clamp(x, min, max)
|
|
1776
1869
|
|
|
1777
1870
|
|
|
1778
1871
|
def var(x, axis=None, ddof=0, keepdims=False):
|
|
@@ -1870,16 +1963,16 @@ def std(x, axis=None, ddof=0, keepdims=False):
|
|
|
1870
1963
|
return F.tensor_pow(x_var, 0.5)
|
|
1871
1964
|
|
|
1872
1965
|
|
|
1873
|
-
def gather_elements(
|
|
1966
|
+
def gather_elements(input, dim, index):
|
|
1874
1967
|
r"""
|
|
1875
1968
|
Gathers elements along an axis specified by dim.
|
|
1876
1969
|
|
|
1877
1970
|
Refer to :func:`mindspore.ops.gather_elements` for more detail.
|
|
1878
1971
|
"""
|
|
1879
|
-
return F.gather_elements(
|
|
1972
|
+
return F.gather_elements(input, dim, index)
|
|
1880
1973
|
|
|
1881
1974
|
|
|
1882
|
-
def sum(
|
|
1975
|
+
def sum(input, axis=None, dtype=None, keepdims=False, initial=None): # pylint: disable=redefined-builtin
|
|
1883
1976
|
"""
|
|
1884
1977
|
Return sum of array elements over a given axis.
|
|
1885
1978
|
|
|
@@ -1888,7 +1981,7 @@ def sum(x, axis=None, dtype=None, keepdims=False, initial=None): # pylint: disa
|
|
|
1888
1981
|
`extobj` are not supported.
|
|
1889
1982
|
|
|
1890
1983
|
Args:
|
|
1891
|
-
|
|
1984
|
+
input (Union[int, float, bool, list, tuple, Tensor]): Elements to sum.
|
|
1892
1985
|
axis (Union[None, int, tuple(int)]): Axis or axes along which a sum is performed. Default: None.
|
|
1893
1986
|
If None, sum all of the elements of the input array.
|
|
1894
1987
|
If axis is negative it counts from the last to the first axis.
|
|
@@ -1924,7 +2017,7 @@ def sum(x, axis=None, dtype=None, keepdims=False, initial=None): # pylint: disa
|
|
|
1924
2017
|
>>> print(input_x.sum(axis=1))
|
|
1925
2018
|
[10. 35.]
|
|
1926
2019
|
"""
|
|
1927
|
-
input_x =
|
|
2020
|
+
input_x = input.astype(mstype.int32) if input.dtype == mstype.bool_ else input
|
|
1928
2021
|
dtype = input_x.dtype if dtype is None else dtype
|
|
1929
2022
|
dtype = check_astype_dtype_const(dtype)
|
|
1930
2023
|
if not isinstance(keepdims, int):
|
|
@@ -1934,12 +2027,10 @@ def sum(x, axis=None, dtype=None, keepdims=False, initial=None): # pylint: disa
|
|
|
1934
2027
|
if axis is None:
|
|
1935
2028
|
axis = ()
|
|
1936
2029
|
else:
|
|
1937
|
-
axis = check_and_canonicalize_axes(axis,
|
|
2030
|
+
axis = check_and_canonicalize_axes(axis, input.ndim)
|
|
1938
2031
|
|
|
1939
2032
|
if not check_type_support(input_x.dtype, 'GPU', (mstype.float64, mstype.float32, mstype.float16)):
|
|
1940
2033
|
input_x = input_x.astype(mstype.float32)
|
|
1941
|
-
if 0 in x.shape:
|
|
1942
|
-
x = const_utils.make_tensor([0], x.dtype)
|
|
1943
2034
|
if keepdims:
|
|
1944
2035
|
res = _reduce_sum_keepdims(input_x, axis)
|
|
1945
2036
|
else:
|
|
@@ -1949,6 +2040,36 @@ def sum(x, axis=None, dtype=None, keepdims=False, initial=None): # pylint: disa
|
|
|
1949
2040
|
return res.astype(dtype)
|
|
1950
2041
|
|
|
1951
2042
|
|
|
2043
|
+
def sum_to_size(input, *size):
|
|
2044
|
+
"""
|
|
2045
|
+
Sum `input` to the `size`. `size` must be expandable to the Tensor size.
|
|
2046
|
+
"""
|
|
2047
|
+
if len(size) == 1 and isinstance(size[0], tuple):
|
|
2048
|
+
size = size[0]
|
|
2049
|
+
shape_input = input.shape
|
|
2050
|
+
if len(size) > input.ndim:
|
|
2051
|
+
raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_input}.")
|
|
2052
|
+
if len(size) < input.ndim:
|
|
2053
|
+
pre_axis = tuple(axis for axis in range(input.ndim - len(size)))
|
|
2054
|
+
input = input.sum(pre_axis)
|
|
2055
|
+
axes = []
|
|
2056
|
+
for i, element in enumerate(size):
|
|
2057
|
+
if element != input.shape[i] and element == 1:
|
|
2058
|
+
axes.append(i)
|
|
2059
|
+
elif element != input.shape[i]:
|
|
2060
|
+
raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_input}.")
|
|
2061
|
+
if axes:
|
|
2062
|
+
return input.sum(tuple(axes), keepdims=True)
|
|
2063
|
+
return input
|
|
2064
|
+
|
|
2065
|
+
|
|
2066
|
+
def nansum(input, axis=None, keepdims=False, *, dtype=None):
|
|
2067
|
+
"""
|
|
2068
|
+
Computes sum of all elements, treating NaNs as zero.
|
|
2069
|
+
"""
|
|
2070
|
+
return F.nansum(input, axis=axis, keepdims=keepdims, dtype=dtype)
|
|
2071
|
+
|
|
2072
|
+
|
|
1952
2073
|
def repeat(x, repeats, axis=None):
|
|
1953
2074
|
"""
|
|
1954
2075
|
Repeat elements of an array.
|
|
@@ -1996,7 +2117,7 @@ def repeat(x, repeats, axis=None):
|
|
|
1996
2117
|
axis = 0
|
|
1997
2118
|
if not isinstance(axis, int):
|
|
1998
2119
|
const_utils.raise_type_error('axes should be integers')
|
|
1999
|
-
|
|
2120
|
+
check_axis_in_range(axis, x.ndim)
|
|
2000
2121
|
axis = axis + x.ndim if axis < 0 else axis
|
|
2001
2122
|
|
|
2002
2123
|
if len(repeats) == 1:
|
|
@@ -2006,7 +2127,8 @@ def repeat(x, repeats, axis=None):
|
|
|
2006
2127
|
return repeat_elements(x, repeats, axis)
|
|
2007
2128
|
size = x.shape[axis]
|
|
2008
2129
|
if len(repeats) != size:
|
|
2009
|
-
const_utils.raise_value_error(
|
|
2130
|
+
const_utils.raise_value_error(
|
|
2131
|
+
'operands could not be broadcast together')
|
|
2010
2132
|
subs = P.Split(axis, size)(x)
|
|
2011
2133
|
repeated_subs = []
|
|
2012
2134
|
for sub_item, rep in zip(subs, repeats):
|
|
@@ -2019,7 +2141,6 @@ def repeat_interleave(x, repeats, dim=None):
|
|
|
2019
2141
|
"""
|
|
2020
2142
|
For details, please refer to :func:`mindspore.ops.repeat_interleave`.
|
|
2021
2143
|
"""
|
|
2022
|
-
dim = dim if dim is not None else 0
|
|
2023
2144
|
return F.repeat_interleave(x, repeats, dim)
|
|
2024
2145
|
|
|
2025
2146
|
|
|
@@ -2075,9 +2196,19 @@ def hypot(x, other):
|
|
|
2075
2196
|
return F.hypot(x, other)
|
|
2076
2197
|
|
|
2077
2198
|
|
|
2078
|
-
def soft_shrink(
|
|
2199
|
+
def soft_shrink(input, lambd=0.5):
|
|
2079
2200
|
"""Apply the soft shrink function for a tensor. Calculates the output according to the input elements."""
|
|
2080
|
-
return F.
|
|
2201
|
+
return F.soft_shrink(input, lambd)
|
|
2202
|
+
|
|
2203
|
+
|
|
2204
|
+
def matrix_determinant(input):
|
|
2205
|
+
"""Computes the determinant of one or more square matrices."""
|
|
2206
|
+
return F.matrix_determinant(input)
|
|
2207
|
+
|
|
2208
|
+
|
|
2209
|
+
def log_matrix_determinant(input):
|
|
2210
|
+
"""Computes the sign and the log of the absolute value of the determinant of one or more square matrices."""
|
|
2211
|
+
return F.log_matrix_determinant(input)
|
|
2081
2212
|
|
|
2082
2213
|
|
|
2083
2214
|
def getitem(data, index):
|
|
@@ -2135,7 +2266,8 @@ def constant_round(*data):
|
|
|
2135
2266
|
"""Returns the rounded value of the constant."""
|
|
2136
2267
|
for x in data:
|
|
2137
2268
|
if x is None:
|
|
2138
|
-
raise ValueError(
|
|
2269
|
+
raise ValueError(
|
|
2270
|
+
"For round(), the input should be a Tensor or 1-2 constants.")
|
|
2139
2271
|
return round(*data)
|
|
2140
2272
|
|
|
2141
2273
|
|
|
@@ -2150,7 +2282,8 @@ def ms_round(*data):
|
|
|
2150
2282
|
return round_(x)
|
|
2151
2283
|
return constant_round(x)
|
|
2152
2284
|
if isinstance(data[0], Tensor) or isinstance(data[1], Tensor):
|
|
2153
|
-
const_utils.raise_type_error(
|
|
2285
|
+
const_utils.raise_type_error(
|
|
2286
|
+
"When applying round() to tensor, only one tensor is supported as input.")
|
|
2154
2287
|
return constant_round(*data)
|
|
2155
2288
|
|
|
2156
2289
|
|
|
@@ -2168,9 +2301,11 @@ def str_func(*data):
|
|
|
2168
2301
|
return ''
|
|
2169
2302
|
data = data[0]
|
|
2170
2303
|
if isinstance(data, (CSRTensor, COOTensor, RowTensorInner)):
|
|
2171
|
-
const_utils.raise_type_error(
|
|
2304
|
+
const_utils.raise_type_error(
|
|
2305
|
+
"str() does not support sparse tensor input.")
|
|
2172
2306
|
if not F.isconstant(data):
|
|
2173
|
-
const_utils.raise_type_error(
|
|
2307
|
+
const_utils.raise_type_error(
|
|
2308
|
+
"str() does not support non-constant input.")
|
|
2174
2309
|
return cast_to_str(data)
|
|
2175
2310
|
|
|
2176
2311
|
|
|
@@ -2188,22 +2323,28 @@ def bool_func(*data):
|
|
|
2188
2323
|
return False
|
|
2189
2324
|
data = data[0]
|
|
2190
2325
|
if isinstance(data, (CSRTensor, COOTensor, RowTensorInner)):
|
|
2191
|
-
const_utils.raise_type_error(
|
|
2326
|
+
const_utils.raise_type_error(
|
|
2327
|
+
"bool() does not support sparse tensor input.")
|
|
2192
2328
|
if isinstance(data, (Tensor, Tensor_)):
|
|
2193
2329
|
tensor_shape = F.shape(data)
|
|
2194
2330
|
tensor_shape_len = len(tensor_shape)
|
|
2195
2331
|
if tensor_shape_len == 0 or (tensor_shape_len == 1 and tensor_shape[0] == 1):
|
|
2196
2332
|
return data != 0
|
|
2197
|
-
const_utils.raise_value_error(
|
|
2333
|
+
const_utils.raise_value_error(
|
|
2334
|
+
"The truth value of an array with more than one element is ambiguous.")
|
|
2198
2335
|
if not F.isconstant(data):
|
|
2199
|
-
|
|
2336
|
+
if hasattr(data, "__bool__"):
|
|
2337
|
+
return data.__bool__()
|
|
2338
|
+
if hasattr(data, "__len__"):
|
|
2339
|
+
return len(data) != 0
|
|
2340
|
+
return True
|
|
2200
2341
|
return cast_to_bool(data)
|
|
2201
2342
|
|
|
2202
2343
|
|
|
2203
2344
|
@constexpr
|
|
2204
2345
|
def cast_to_int(*data):
|
|
2205
2346
|
target = data[0]
|
|
2206
|
-
if isinstance(target, Tensor_):
|
|
2347
|
+
if isinstance(target, (Tensor, Tensor_)):
|
|
2207
2348
|
target = Tensor(target, internal=True)
|
|
2208
2349
|
if len(data) == 1:
|
|
2209
2350
|
return int(target)
|
|
@@ -2218,16 +2359,23 @@ def int_func(*data):
|
|
|
2218
2359
|
if data_len == 0:
|
|
2219
2360
|
return 0
|
|
2220
2361
|
target = data[0]
|
|
2362
|
+
base = 10
|
|
2363
|
+
if data_len == 2:
|
|
2364
|
+
base = data[1]
|
|
2365
|
+
if isinstance(target, (Tensor, Tensor_, int, float, bool)) and base == 10 and not F.isconstant(target):
|
|
2366
|
+
return F.scalar_cast(target, mstype.int64)
|
|
2221
2367
|
if not F.isconstant(target):
|
|
2222
|
-
const_utils.raise_type_error(
|
|
2368
|
+
const_utils.raise_type_error(
|
|
2369
|
+
"int() does not support non-constant input.")
|
|
2223
2370
|
if isinstance(target, (CSRTensor, COOTensor, RowTensorInner)):
|
|
2224
|
-
const_utils.raise_type_error(
|
|
2371
|
+
const_utils.raise_type_error(
|
|
2372
|
+
"int() does not support sparse tensor input.")
|
|
2225
2373
|
return cast_to_int(*data)
|
|
2226
2374
|
|
|
2227
2375
|
|
|
2228
2376
|
@constexpr
|
|
2229
2377
|
def cast_to_float(data):
|
|
2230
|
-
if isinstance(data, Tensor_):
|
|
2378
|
+
if isinstance(data, (Tensor, Tensor_)):
|
|
2231
2379
|
data = Tensor(data, internal=True)
|
|
2232
2380
|
return float(data)
|
|
2233
2381
|
|
|
@@ -2240,10 +2388,14 @@ def float_func(*data):
|
|
|
2240
2388
|
if data_len == 0:
|
|
2241
2389
|
return 0.0
|
|
2242
2390
|
data = data[0]
|
|
2391
|
+
if isinstance(data, (Tensor, Tensor_, int, float, bool)) and not F.isconstant(data):
|
|
2392
|
+
return F.scalar_cast(data, mstype.float32)
|
|
2243
2393
|
if not F.isconstant(data):
|
|
2244
|
-
const_utils.raise_type_error(
|
|
2394
|
+
const_utils.raise_type_error(
|
|
2395
|
+
"float() does not support non-constant input.")
|
|
2245
2396
|
if isinstance(data, (CSRTensor, COOTensor, RowTensorInner)):
|
|
2246
|
-
const_utils.raise_type_error(
|
|
2397
|
+
const_utils.raise_type_error(
|
|
2398
|
+
"float() does not support sparse tensor input.")
|
|
2247
2399
|
return cast_to_float(data)
|
|
2248
2400
|
|
|
2249
2401
|
|
|
@@ -2256,13 +2408,20 @@ def list_func(*data):
|
|
|
2256
2408
|
return F.make_list()
|
|
2257
2409
|
data = data[0]
|
|
2258
2410
|
if isinstance(data, (CSRTensor, COOTensor, RowTensorInner)):
|
|
2259
|
-
const_utils.raise_type_error(
|
|
2411
|
+
const_utils.raise_type_error(
|
|
2412
|
+
"list() does not support single sparse tensor input.")
|
|
2260
2413
|
if not isinstance(data, Tensor) and not hasattr(data, "__ms_iter__"):
|
|
2261
2414
|
data_type = F.typeof(data)
|
|
2262
|
-
const_utils.raise_type_error(
|
|
2415
|
+
const_utils.raise_type_error(
|
|
2416
|
+
str(data_type) + " object is not iterable.")
|
|
2263
2417
|
if isinstance(data, dict):
|
|
2264
2418
|
data = data.keys()
|
|
2265
|
-
|
|
2419
|
+
if isinstance(data, (tuple, list)) and F.is_sequence_shape_unknown(data):
|
|
2420
|
+
ret = mutable([], True)
|
|
2421
|
+
if F.is_dynamic_sequence_element_unknown(data):
|
|
2422
|
+
return ret
|
|
2423
|
+
else:
|
|
2424
|
+
ret = F.make_list()
|
|
2266
2425
|
for i in range(len(data)):
|
|
2267
2426
|
ret = ret + F.make_list(data[i])
|
|
2268
2427
|
return ret
|
|
@@ -2277,13 +2436,20 @@ def tuple_func(*data):
|
|
|
2277
2436
|
return F.make_tuple()
|
|
2278
2437
|
data = data[0]
|
|
2279
2438
|
if isinstance(data, (CSRTensor, COOTensor, RowTensorInner)):
|
|
2280
|
-
const_utils.raise_type_error(
|
|
2439
|
+
const_utils.raise_type_error(
|
|
2440
|
+
"tuple() does not support single sparse tensor input.")
|
|
2281
2441
|
if not isinstance(data, Tensor) and not hasattr(data, "__ms_iter__"):
|
|
2282
2442
|
data_type = F.typeof(data)
|
|
2283
|
-
const_utils.raise_type_error(
|
|
2443
|
+
const_utils.raise_type_error(
|
|
2444
|
+
str(data_type) + " object is not iterable.")
|
|
2284
2445
|
if isinstance(data, dict):
|
|
2285
2446
|
data = data.keys()
|
|
2286
|
-
|
|
2447
|
+
if isinstance(data, (tuple, list)) and F.is_sequence_shape_unknown(data):
|
|
2448
|
+
ret = mutable((), True)
|
|
2449
|
+
if F.is_dynamic_sequence_element_unknown(data):
|
|
2450
|
+
return ret
|
|
2451
|
+
else:
|
|
2452
|
+
ret = F.make_tuple()
|
|
2287
2453
|
for i in range(len(data)):
|
|
2288
2454
|
ret = ret + F.make_tuple(data[i])
|
|
2289
2455
|
return ret
|
|
@@ -2309,7 +2475,8 @@ def get_max_min_data_len(*data):
|
|
|
2309
2475
|
if isinstance(data, (dict, list, tuple)):
|
|
2310
2476
|
len_data = len(data)
|
|
2311
2477
|
else:
|
|
2312
|
-
const_utils.raise_type_error(
|
|
2478
|
+
const_utils.raise_type_error(
|
|
2479
|
+
"max() or min() does not support the data type.")
|
|
2313
2480
|
return len_data
|
|
2314
2481
|
|
|
2315
2482
|
|
|
@@ -2321,7 +2488,8 @@ def get_tensor_num(data):
|
|
|
2321
2488
|
tensor_shape = F.shape(input_data)
|
|
2322
2489
|
tensor_shape_len = len(tensor_shape)
|
|
2323
2490
|
if tensor_shape_len != 0 and not (tensor_shape_len == 1 and tensor_shape[0] == 1):
|
|
2324
|
-
const_utils.raise_value_error(
|
|
2491
|
+
const_utils.raise_value_error(
|
|
2492
|
+
"The truth value of an array with more than one element is ambiguous.")
|
|
2325
2493
|
tensor_num = tensor_num + 1
|
|
2326
2494
|
return tensor_num
|
|
2327
2495
|
|
|
@@ -2337,27 +2505,66 @@ def exist_tensor(data):
|
|
|
2337
2505
|
return False
|
|
2338
2506
|
|
|
2339
2507
|
|
|
2508
|
+
def check_sequence_all_variable_scalar(x, str_info):
|
|
2509
|
+
"""Check whether x can be used in SequenceMax and SequenceMin"""
|
|
2510
|
+
if F.is_sequence_shape_unknown(x):
|
|
2511
|
+
if F.is_dynamic_sequence_element_unknown(x):
|
|
2512
|
+
const_utils.raise_value_error(str_info + "() arg is an empty sequence.")
|
|
2513
|
+
if not isinstance(x[0], (int, float)):
|
|
2514
|
+
const_utils.raise_value_error(
|
|
2515
|
+
"When the input to " + str_info + "() is dynamic length sequence, only support scalar type input")
|
|
2516
|
+
return True
|
|
2517
|
+
contain_variable_scalar = False
|
|
2518
|
+
for i in x:
|
|
2519
|
+
if not isinstance(i, (int, float)):
|
|
2520
|
+
return False
|
|
2521
|
+
if not contain_variable_scalar and not F.isconstant(i):
|
|
2522
|
+
contain_variable_scalar = True
|
|
2523
|
+
return contain_variable_scalar
|
|
2524
|
+
|
|
2525
|
+
|
|
2526
|
+
def get_data_type_str(input_data):
|
|
2527
|
+
"""Get the type of input."""
|
|
2528
|
+
if isinstance(input_data, (int, float, bool)):
|
|
2529
|
+
return "variable " + str(F.typeof(input_data))
|
|
2530
|
+
return str(F.typeof(input_data))
|
|
2531
|
+
|
|
2532
|
+
|
|
2533
|
+
def check_isconstant(input_data, func_name):
|
|
2534
|
+
"""Check the input data of func is constant."""
|
|
2535
|
+
if not F.isconstant(input_data):
|
|
2536
|
+
const_utils.raise_type_error("The input of " + func_name + " only support Tensor, List, Tuple, constant Scalar,"
|
|
2537
|
+
" but got " + get_data_type_str(input_data))
|
|
2538
|
+
|
|
2539
|
+
|
|
2340
2540
|
def ms_max_one_element(x):
|
|
2341
2541
|
"""Implementation of `max` which inputs has only one element."""
|
|
2342
2542
|
if isinstance(x, Tensor):
|
|
2343
2543
|
tensor_shape = F.shape(x)
|
|
2344
2544
|
tensor_shape_len = len(tensor_shape)
|
|
2345
2545
|
if tensor_shape_len == 0:
|
|
2346
|
-
const_utils.raise_type_error(
|
|
2546
|
+
const_utils.raise_type_error(
|
|
2547
|
+
"Cannot iterate over a scalar tensor.")
|
|
2347
2548
|
if tensor_shape_len >= 2:
|
|
2348
|
-
const_utils.raise_value_error(
|
|
2549
|
+
const_utils.raise_value_error(
|
|
2550
|
+
"The truth value of an array with more than one element is ambiguous.")
|
|
2349
2551
|
return x.max()
|
|
2350
2552
|
# Deal with Tensor in tuple or list
|
|
2351
2553
|
if isinstance(x, (list, tuple)):
|
|
2554
|
+
if check_sequence_all_variable_scalar(x, "max"):
|
|
2555
|
+
return SequenceMax()(x)
|
|
2352
2556
|
if len(x) == 0:
|
|
2353
2557
|
const_utils.raise_value_error("max() arg is an empty sequence.")
|
|
2354
2558
|
tensor_num = get_tensor_num(x)
|
|
2355
2559
|
if tensor_num == len(x):
|
|
2356
2560
|
return max_tensor(x)
|
|
2357
2561
|
if tensor_num != 0:
|
|
2358
|
-
const_utils.raise_type_error(
|
|
2562
|
+
const_utils.raise_type_error(
|
|
2563
|
+
"max() cannot contain both tensor and non-tensor type.")
|
|
2359
2564
|
if exist_tensor(x):
|
|
2360
|
-
const_utils.raise_type_error(
|
|
2565
|
+
const_utils.raise_type_error(
|
|
2566
|
+
"max() cannot support tensor in list or tuple nested now.")
|
|
2567
|
+
check_isconstant(x, "max()")
|
|
2361
2568
|
return max_(x)
|
|
2362
2569
|
|
|
2363
2570
|
|
|
@@ -2375,10 +2582,14 @@ def ms_max(*data):
|
|
|
2375
2582
|
if tensor_num == len_data:
|
|
2376
2583
|
return max_tensor(*data)
|
|
2377
2584
|
if tensor_num != 0:
|
|
2378
|
-
const_utils.raise_type_error(
|
|
2585
|
+
const_utils.raise_type_error(
|
|
2586
|
+
"max() cannot contain both tensor and non-tensor type.")
|
|
2379
2587
|
# exist tensor in list/tuple
|
|
2380
2588
|
if exist_tensor(data):
|
|
2381
|
-
const_utils.raise_value_error(
|
|
2589
|
+
const_utils.raise_value_error(
|
|
2590
|
+
"The truth value of an array with more than one element is ambiguous.")
|
|
2591
|
+
for input_data in data:
|
|
2592
|
+
check_isconstant(input_data, "max()")
|
|
2382
2593
|
return max_(*data)
|
|
2383
2594
|
|
|
2384
2595
|
|
|
@@ -2415,21 +2626,28 @@ def ms_min_one_element(x):
|
|
|
2415
2626
|
tensor_shape = F.shape(x)
|
|
2416
2627
|
tensor_shape_len = len(tensor_shape)
|
|
2417
2628
|
if tensor_shape_len == 0:
|
|
2418
|
-
const_utils.raise_type_error(
|
|
2629
|
+
const_utils.raise_type_error(
|
|
2630
|
+
"Cannot iterate over a scalar tensor.")
|
|
2419
2631
|
if tensor_shape_len >= 2:
|
|
2420
|
-
const_utils.raise_value_error(
|
|
2632
|
+
const_utils.raise_value_error(
|
|
2633
|
+
"The truth value of an array with more than one element is ambiguous.")
|
|
2421
2634
|
return x.min()
|
|
2422
2635
|
# Deal with Tensor in tuple or list
|
|
2423
2636
|
if isinstance(x, (list, tuple)):
|
|
2637
|
+
if check_sequence_all_variable_scalar(x, "min"):
|
|
2638
|
+
return SequenceMin()(x)
|
|
2424
2639
|
if len(x) == 0:
|
|
2425
2640
|
const_utils.raise_value_error("min() arg is an empty sequence.")
|
|
2426
2641
|
tensor_num = get_tensor_num(x)
|
|
2427
2642
|
if tensor_num == len(x):
|
|
2428
2643
|
return min_tensor(x)
|
|
2429
2644
|
if tensor_num != 0:
|
|
2430
|
-
const_utils.raise_type_error(
|
|
2645
|
+
const_utils.raise_type_error(
|
|
2646
|
+
"min() cannot contain both tensor and non-tensor type.")
|
|
2431
2647
|
if exist_tensor(x):
|
|
2432
|
-
const_utils.raise_type_error(
|
|
2648
|
+
const_utils.raise_type_error(
|
|
2649
|
+
"min() cannot support tensor in list or tuple nested now.")
|
|
2650
|
+
check_isconstant(x, "min()")
|
|
2433
2651
|
return min_(x)
|
|
2434
2652
|
|
|
2435
2653
|
|
|
@@ -2447,10 +2665,14 @@ def ms_min(*data):
|
|
|
2447
2665
|
if tensor_num == len_data:
|
|
2448
2666
|
return min_tensor(*data)
|
|
2449
2667
|
if tensor_num != 0:
|
|
2450
|
-
const_utils.raise_type_error(
|
|
2668
|
+
const_utils.raise_type_error(
|
|
2669
|
+
"min() cannot contain both tensor and non-tensor type.")
|
|
2451
2670
|
# exist tensor in list/tuple
|
|
2452
2671
|
if exist_tensor(data):
|
|
2453
|
-
const_utils.raise_value_error(
|
|
2672
|
+
const_utils.raise_value_error(
|
|
2673
|
+
"The truth value of an array with more than one element is ambiguous.")
|
|
2674
|
+
for input_data in data:
|
|
2675
|
+
check_isconstant(input_data, "min()")
|
|
2454
2676
|
return min_(*data)
|
|
2455
2677
|
|
|
2456
2678
|
|
|
@@ -2462,11 +2684,13 @@ def ms_sum(*data):
|
|
|
2462
2684
|
x = data[0]
|
|
2463
2685
|
if not isinstance(x, Tensor) and not hasattr(x, "__ms_iter__"):
|
|
2464
2686
|
data_type = F.typeof(x)
|
|
2465
|
-
const_utils.raise_type_error(
|
|
2687
|
+
const_utils.raise_type_error(
|
|
2688
|
+
str(data_type) + " object is not iterable.")
|
|
2466
2689
|
if isinstance(x, Tensor):
|
|
2467
2690
|
tensor_shape = F.shape(x)
|
|
2468
2691
|
if len(tensor_shape) == 0:
|
|
2469
|
-
const_utils.raise_type_error(
|
|
2692
|
+
const_utils.raise_type_error(
|
|
2693
|
+
"Cannot iterate over a scalar tensor.")
|
|
2470
2694
|
if isinstance(x, dict):
|
|
2471
2695
|
x = x.keys()
|
|
2472
2696
|
result = 0
|
|
@@ -2497,7 +2721,8 @@ def ms_len(data):
|
|
|
2497
2721
|
def python_len_with_check(data):
|
|
2498
2722
|
"""Return the result of python built-in len function with iterable check"""
|
|
2499
2723
|
if not hasattr(data, "__iter__"):
|
|
2500
|
-
raise TypeError(str(type(data)) +
|
|
2724
|
+
raise TypeError(str(type(data)) +
|
|
2725
|
+
" object is not iterable in graph mode.")
|
|
2501
2726
|
return len(data)
|
|
2502
2727
|
|
|
2503
2728
|
|
|
@@ -2507,10 +2732,19 @@ def ms_len_with_iterable_check(data):
|
|
|
2507
2732
|
return python_len_with_check(data)
|
|
2508
2733
|
if not hasattr(data, "__len__"):
|
|
2509
2734
|
type_str = str(F.typeof(data))
|
|
2510
|
-
const_utils.raise_type_error(
|
|
2735
|
+
const_utils.raise_type_error(
|
|
2736
|
+
type_str + " object is not iterable in graph mode.")
|
|
2511
2737
|
return data.__len__()
|
|
2512
2738
|
|
|
2513
2739
|
|
|
2740
|
+
def ms_next_with_dyn_input_check(it):
|
|
2741
|
+
"""Implementation of `next` with daynamic input check."""
|
|
2742
|
+
if isinstance(it, (tuple, list)) and F.is_sequence_shape_unknown(it):
|
|
2743
|
+
raise ValueError(f"For 'ListComprehension' syntax [i for i in x], "
|
|
2744
|
+
f"input x can not be dynamic length list/tuple in graph mode")
|
|
2745
|
+
return it.__ms_hasnext__()
|
|
2746
|
+
|
|
2747
|
+
|
|
2514
2748
|
def floor(x):
|
|
2515
2749
|
"""Rounds a tensor down to the closest integer element-wise."""
|
|
2516
2750
|
return x.__floor__()
|
|
@@ -2551,6 +2785,9 @@ def enumerate_(x, start=0):
|
|
|
2551
2785
|
if check_is_tensor(x_type):
|
|
2552
2786
|
for i in range(x.shape[0]):
|
|
2553
2787
|
ret += ((start + i, x[i]),)
|
|
2788
|
+
elif F.is_sequence_shape_unknown(x):
|
|
2789
|
+
const_utils.raise_value_error(
|
|
2790
|
+
"For 'enumerate', the dynamic length input is unsupported in graph mode")
|
|
2554
2791
|
else:
|
|
2555
2792
|
ret = zip(range(start, start + len(x)), x)
|
|
2556
2793
|
return ret
|
|
@@ -2558,28 +2795,25 @@ def enumerate_(x, start=0):
|
|
|
2558
2795
|
|
|
2559
2796
|
def expand_tensor_as(x, y):
|
|
2560
2797
|
"""Expand tensor"""
|
|
2561
|
-
return
|
|
2798
|
+
return F.broadcast_to(x, shape_(y))
|
|
2562
2799
|
|
|
2563
2800
|
|
|
2564
2801
|
def broadcast_to(x, shape):
|
|
2565
2802
|
"""Broadcasts tensor to a given shape."""
|
|
2566
|
-
return
|
|
2803
|
+
return F.broadcast_to(x, shape)
|
|
2567
2804
|
|
|
2568
2805
|
|
|
2569
2806
|
def expand_dims(x, axis):
|
|
2570
2807
|
"""
|
|
2571
2808
|
Insert a dimension of shape 1 at the specified axis of Tensor.
|
|
2572
2809
|
"""
|
|
2573
|
-
check_is_int(axis, 'axis')
|
|
2810
|
+
validator.check_is_int(axis, 'axis')
|
|
2574
2811
|
return P.ExpandDims()(x, axis)
|
|
2575
2812
|
|
|
2576
2813
|
|
|
2577
|
-
def unsqueeze(
|
|
2578
|
-
"""
|
|
2579
|
-
|
|
2580
|
-
"""
|
|
2581
|
-
check_is_int(dim, 'dim')
|
|
2582
|
-
return P.ExpandDims()(x, dim)
|
|
2814
|
+
def unsqueeze(input, dim):
|
|
2815
|
+
"""For details, please refer to :func:`mindspore.ops.unsqueeze`."""
|
|
2816
|
+
return P.ExpandDims()(input, dim)
|
|
2583
2817
|
|
|
2584
2818
|
|
|
2585
2819
|
def masked_fill(x, mask, value):
|
|
@@ -2599,12 +2833,12 @@ def col2im(*inputs):
|
|
|
2599
2833
|
return F.col2im(*inputs)
|
|
2600
2834
|
|
|
2601
2835
|
|
|
2602
|
-
def narrow(
|
|
2836
|
+
def narrow(input, axis, start, length):
|
|
2603
2837
|
"""
|
|
2604
2838
|
Returns a narrowed tensor from input tensor.
|
|
2605
2839
|
The dimension axis is input from start to start + length.
|
|
2606
2840
|
"""
|
|
2607
|
-
return F.narrow(
|
|
2841
|
+
return F.narrow(input, axis, start, length)
|
|
2608
2842
|
|
|
2609
2843
|
|
|
2610
2844
|
def to_csr(x):
|
|
@@ -2630,7 +2864,8 @@ def check_select_condition(cond_type):
|
|
|
2630
2864
|
"""
|
|
2631
2865
|
if isinstance(cond_type, mstype.tensor_type):
|
|
2632
2866
|
return
|
|
2633
|
-
raise TypeError(
|
|
2867
|
+
raise TypeError(
|
|
2868
|
+
f"For select, the argument condition should be Tensor, but got {cond_type}.")
|
|
2634
2869
|
|
|
2635
2870
|
|
|
2636
2871
|
@constexpr
|
|
@@ -2670,6 +2905,13 @@ def view(x, *shape):
|
|
|
2670
2905
|
return F.reshape(x, shape)
|
|
2671
2906
|
|
|
2672
2907
|
|
|
2908
|
+
def view_as(input, other):
|
|
2909
|
+
"""View self Tensor as the same shape as `other` ."""
|
|
2910
|
+
if not isinstance(other, (Tensor, Tensor_)):
|
|
2911
|
+
raise TypeError(f"For view_as, the input other must be a Tensor, but got {type(other)}")
|
|
2912
|
+
return F.reshape(input, other.shape)
|
|
2913
|
+
|
|
2914
|
+
|
|
2673
2915
|
def bitwise_and(x, y):
|
|
2674
2916
|
"""Returns bitwise `and` of two tensors element-wise."""
|
|
2675
2917
|
return F.bitwise_and(x, y)
|
|
@@ -2685,11 +2927,42 @@ def bitwise_xor(x, y):
|
|
|
2685
2927
|
return F.bitwise_xor(x, y)
|
|
2686
2928
|
|
|
2687
2929
|
|
|
2930
|
+
def bitwise_left_shift(x, y):
|
|
2931
|
+
"""Returns bitwise left shift of `x` by `other` bits."""
|
|
2932
|
+
return F.bitwise_left_shift(x, y)
|
|
2933
|
+
|
|
2934
|
+
|
|
2935
|
+
def bitwise_right_shift(x, y):
|
|
2936
|
+
"""Returns bitwise right shift of `x` by `other` bits."""
|
|
2937
|
+
return F.bitwise_right_shift(x, y)
|
|
2938
|
+
|
|
2939
|
+
|
|
2688
2940
|
def exp(x):
|
|
2689
2941
|
"""Returns exponential of a tensor element-wise."""
|
|
2690
2942
|
return F.exp(x)
|
|
2691
2943
|
|
|
2692
2944
|
|
|
2945
|
+
def real(x):
|
|
2946
|
+
r"""
|
|
2947
|
+
For details, please refer to :func:`mindspore.ops.real`.
|
|
2948
|
+
"""
|
|
2949
|
+
return F.real(x)
|
|
2950
|
+
|
|
2951
|
+
|
|
2952
|
+
def rsqrt(x):
|
|
2953
|
+
r"""
|
|
2954
|
+
For details, please refer to :func:`mindspore.ops.rsqrt`.
|
|
2955
|
+
"""
|
|
2956
|
+
return F.rsqrt(x)
|
|
2957
|
+
|
|
2958
|
+
|
|
2959
|
+
def reciprocal(x):
|
|
2960
|
+
r"""
|
|
2961
|
+
For details, please refer to :func:`mindspore.ops.reciprocal`.
|
|
2962
|
+
"""
|
|
2963
|
+
return F.reciprocal(x)
|
|
2964
|
+
|
|
2965
|
+
|
|
2693
2966
|
def sqrt(x):
|
|
2694
2967
|
"""Returns sqrt of a tensor element-wise."""
|
|
2695
2968
|
return F.sqrt(x)
|
|
@@ -2705,6 +2978,11 @@ def sub(x, y):
|
|
|
2705
2978
|
return F.sub(x, y)
|
|
2706
2979
|
|
|
2707
2980
|
|
|
2981
|
+
def t(input):
|
|
2982
|
+
"""Transposes a 2-D tensor."""
|
|
2983
|
+
return F.t(input)
|
|
2984
|
+
|
|
2985
|
+
|
|
2708
2986
|
def tan(x):
|
|
2709
2987
|
"""Returns tangent of `x`."""
|
|
2710
2988
|
return F.tan(x)
|
|
@@ -2722,9 +3000,9 @@ def cosh(x):
|
|
|
2722
3000
|
return F.cosh(x)
|
|
2723
3001
|
|
|
2724
3002
|
|
|
2725
|
-
def ger(
|
|
2726
|
-
"""Ger product of `
|
|
2727
|
-
return F.ger(
|
|
3003
|
+
def ger(input, vec2):
|
|
3004
|
+
"""Ger product of `input` and `vec2`."""
|
|
3005
|
+
return F.ger(input, vec2)
|
|
2728
3006
|
|
|
2729
3007
|
|
|
2730
3008
|
def gt(x, y):
|
|
@@ -2813,18 +3091,34 @@ def unsorted_segment_prod(x, segment_ids, num_segments):
|
|
|
2813
3091
|
return F.unsorted_segment_prod(x, segment_ids, num_segments)
|
|
2814
3092
|
|
|
2815
3093
|
|
|
2816
|
-
def negative(
|
|
3094
|
+
def negative(input):
|
|
2817
3095
|
r"""
|
|
2818
3096
|
Return a new tensor with the negative of the elements of input.
|
|
2819
3097
|
"""
|
|
2820
|
-
return F.neg(
|
|
3098
|
+
return F.neg(input)
|
|
2821
3099
|
|
|
2822
3100
|
|
|
2823
|
-
def nonzero(
|
|
3101
|
+
def nonzero(input):
|
|
2824
3102
|
"""
|
|
2825
3103
|
Return a Tensor of the positions of all non-zero values.
|
|
2826
3104
|
"""
|
|
2827
|
-
return F.nonzero(
|
|
3105
|
+
return F.nonzero(input)
|
|
3106
|
+
|
|
3107
|
+
|
|
3108
|
+
def new_zeros(x, size, *, dtype=None):
|
|
3109
|
+
r"""
|
|
3110
|
+
Return a tensor of `size` filled with zeros. By default, the returned tensor has the same dtype as `x`.
|
|
3111
|
+
"""
|
|
3112
|
+
_dtype = x.dtype if dtype is None else dtype
|
|
3113
|
+
return F.zeros(size, dtype=_dtype)
|
|
3114
|
+
|
|
3115
|
+
|
|
3116
|
+
def new_ones(x, size, *, dtype=None):
|
|
3117
|
+
r"""
|
|
3118
|
+
Return a tensor of `size` filled with ones. By default, the returned tensor has the same dtype as `x`.
|
|
3119
|
+
"""
|
|
3120
|
+
_dtype = x.dtype if dtype is None else dtype
|
|
3121
|
+
return F.ones(size, dtype=_dtype)
|
|
2828
3122
|
|
|
2829
3123
|
|
|
2830
3124
|
def diag(x):
|
|
@@ -2834,11 +3128,18 @@ def diag(x):
|
|
|
2834
3128
|
return F.diag(x)
|
|
2835
3129
|
|
|
2836
3130
|
|
|
2837
|
-
def
|
|
3131
|
+
def diagflat(input, offset=0):
|
|
3132
|
+
"""
|
|
3133
|
+
Creates a two-dimensional Tensor with the flattened input as a diagonal.
|
|
3134
|
+
"""
|
|
3135
|
+
return F.diagflat(input, offset)
|
|
3136
|
+
|
|
3137
|
+
|
|
3138
|
+
def masked_select(input, mask):
|
|
2838
3139
|
"""
|
|
2839
3140
|
Returns a new 1-D Tensor which indexes the input tensor according to the boolean mask.
|
|
2840
3141
|
"""
|
|
2841
|
-
return F.masked_select(
|
|
3142
|
+
return F.masked_select(input, mask)
|
|
2842
3143
|
|
|
2843
3144
|
|
|
2844
3145
|
def inplace_update(x, v, indices):
|
|
@@ -2863,21 +3164,23 @@ def coo_to_csr(x):
|
|
|
2863
3164
|
|
|
2864
3165
|
def coo_to_dense(x):
|
|
2865
3166
|
"""convert coo to dense."""
|
|
2866
|
-
zeros_tensor = F.zeros(x.shape, x.values.dtype)
|
|
3167
|
+
zeros_tensor = F.zeros(x.shape, dtype=x.values.dtype)
|
|
2867
3168
|
return F.tensor_scatter_update(zeros_tensor, x.indices, x.values)
|
|
2868
3169
|
|
|
2869
3170
|
|
|
2870
3171
|
def coo_coalesce(x):
|
|
2871
3172
|
"""Returns the coalesced sparse tensor of the input."""
|
|
2872
3173
|
shape = const_utils.make_tensor(x.shape)
|
|
2873
|
-
res_indices, res_values, _ = P.Coalesce()(
|
|
3174
|
+
res_indices, res_values, _ = P.Coalesce()(
|
|
3175
|
+
x.indices.transpose(), x.values, shape)
|
|
2874
3176
|
return COOTensor(res_indices.transpose(), res_values, x.shape)
|
|
2875
3177
|
|
|
2876
3178
|
|
|
2877
3179
|
def csr_to_coo(x):
|
|
2878
3180
|
"""convert csr to coo."""
|
|
2879
3181
|
if x.ndim != 2:
|
|
2880
|
-
const_utils.raise_value_error(
|
|
3182
|
+
const_utils.raise_value_error(
|
|
3183
|
+
"Currently only support 2-D CSRTensor when converting to COOTensor.")
|
|
2881
3184
|
row_indices = F.csr2coo(x.indptr, x.values.shape[0])
|
|
2882
3185
|
coo_indices = P.Stack(1)((row_indices, x.indices))
|
|
2883
3186
|
return COOTensor(coo_indices, x.values, x.shape)
|
|
@@ -2888,7 +3191,7 @@ def csr_to_dense(x):
|
|
|
2888
3191
|
return F.csr_to_dense(x)
|
|
2889
3192
|
|
|
2890
3193
|
|
|
2891
|
-
def
|
|
3194
|
+
def random_categorical(x, num_sample, seed=0, dtype=mstype.int64):
|
|
2892
3195
|
r"""
|
|
2893
3196
|
Generates random samples from a given categorical distribution tensor.
|
|
2894
3197
|
Refer to :func:`mindspore.ops.random_categorical` for more detail.
|
|
@@ -2923,25 +3226,28 @@ def check_is_tuple_or_list_or_tensor(x, op_name, arg_name):
|
|
|
2923
3226
|
"""check whether x is list or tuple or tensor."""
|
|
2924
3227
|
if isinstance(x, (mstype.List, mstype.Tuple, mstype.tensor_type)):
|
|
2925
3228
|
return True
|
|
2926
|
-
raise TypeError(
|
|
3229
|
+
raise TypeError(
|
|
3230
|
+
f"For '{op_name}', the '{arg_name}' should be tuple or list or tensor, but got {x}.")
|
|
2927
3231
|
|
|
2928
3232
|
|
|
2929
3233
|
@constexpr
|
|
2930
3234
|
def check_is_const_int(x, op_name, arg_name):
|
|
2931
3235
|
"""check whether x is const int."""
|
|
2932
3236
|
if x is None:
|
|
2933
|
-
raise TypeError(
|
|
3237
|
+
raise TypeError(
|
|
3238
|
+
f"For '{op_name}', the '{arg_name}' should be a const int number, but got not const.")
|
|
2934
3239
|
if not isinstance(x, int):
|
|
2935
|
-
raise TypeError(
|
|
3240
|
+
raise TypeError(
|
|
3241
|
+
f"For '{op_name}', the '{arg_name}' should be a const int number, but got {x}.")
|
|
2936
3242
|
return True
|
|
2937
3243
|
|
|
2938
3244
|
|
|
2939
|
-
@
|
|
3245
|
+
@_primexpr
|
|
2940
3246
|
def check_is_tensor_bool_cond(shp):
|
|
2941
3247
|
"""check if tensor is a bool condition"""
|
|
2942
|
-
if shp
|
|
3248
|
+
if not shp or (len(shp) == 1 and shp[0] == 1):
|
|
2943
3249
|
return True
|
|
2944
|
-
if
|
|
3250
|
+
if None in shp:
|
|
2945
3251
|
raise ValueError(f"Only tensor which shape is () or (1,) can be converted to bool, but got tensor shape is "
|
|
2946
3252
|
f"None")
|
|
2947
3253
|
raise ValueError(f"Only tensor which shape is () or (1,) can be converted to bool, but got tensor shape is {shp}")
|
|
@@ -2949,7 +3255,13 @@ def check_is_tensor_bool_cond(shp):
|
|
|
2949
3255
|
|
|
2950
3256
|
@constexpr
|
|
2951
3257
|
def const_tensor_to_bool(x):
|
|
2952
|
-
"""convert bool tensor to bool condition
|
|
3258
|
+
"""convert bool tensor to bool condition
|
|
3259
|
+
def const_tensor_to_bool(x):
|
|
3260
|
+
convert bool tensor to bool condition
|
|
3261
|
+
if x.shape == (1,):
|
|
3262
|
+
return bool(x[0])
|
|
3263
|
+
return bool(x)
|
|
3264
|
+
"""
|
|
2953
3265
|
if x is None:
|
|
2954
3266
|
raise ValueError("Only tensor which shape is () or (1,) can be converted to bool, but got None")
|
|
2955
3267
|
x = x.asnumpy()
|
|
@@ -2961,7 +3273,7 @@ def const_tensor_to_bool(x):
|
|
|
2961
3273
|
f"Only tensor which shape is () or (1,) can be converted to bool, but got tensor shape is {x.shape}")
|
|
2962
3274
|
|
|
2963
3275
|
|
|
2964
|
-
@
|
|
3276
|
+
@_primexpr
|
|
2965
3277
|
def check_view_shape(x):
|
|
2966
3278
|
"""Check view function input shape"""
|
|
2967
3279
|
if not x:
|
|
@@ -2973,32 +3285,27 @@ def check_view_shape(x):
|
|
|
2973
3285
|
return x
|
|
2974
3286
|
|
|
2975
3287
|
|
|
2976
|
-
# convert normal param_check functions to constexpr functions
|
|
2977
3288
|
check_astype_dtype_const = constexpr(validator.check_astype_dtype)
|
|
2978
3289
|
check_transpose_axis_const = constexpr(validator.check_transpose_axis)
|
|
2979
|
-
check_reshape_shp_const = constexpr(validator.check_reshape_shp)
|
|
2980
|
-
check_flatten_order_const = constexpr(validator.check_flatten_order)
|
|
2981
|
-
check_swapaxes_axis_const = constexpr(validator.check_swapaxes_axis)
|
|
2982
|
-
prepare_shape_for_squeeze_const = constexpr(validator.prepare_shape_for_squeeze)
|
|
2983
|
-
check_axis_in_range_const = constexpr(validator.check_axis_in_range)
|
|
2984
|
-
check_axis_valid = constexpr(validator.check_axis_valid)
|
|
2985
3290
|
max_ = constexpr(validator.max_)
|
|
2986
3291
|
min_ = constexpr(validator.min_)
|
|
2987
|
-
expanded_shape =
|
|
2988
|
-
tuple_slice =
|
|
2989
|
-
infer_out_shape = constexpr(validator.infer_out_shape)
|
|
2990
|
-
get_log2_size = constexpr(validator.get_log2_size)
|
|
2991
|
-
check_axis_type = constexpr(validator.check_axis_type)
|
|
2992
|
-
check_and_canonicalize_axes = constexpr(validator.check_and_canonicalize_axes)
|
|
2993
|
-
empty_compile = constexpr(validator.empty_compile)
|
|
3292
|
+
expanded_shape = validator.expanded_shape
|
|
3293
|
+
tuple_slice = validator.tuple_slice
|
|
2994
3294
|
check_type_support = constexpr(validator.check_type_support)
|
|
2995
|
-
check_is_int = constexpr(validator.check_is_int)
|
|
2996
3295
|
check_type_name = constexpr(validator.check_type_name)
|
|
2997
3296
|
check_value_type = constexpr(validator.check_value_type)
|
|
2998
|
-
|
|
3297
|
+
check_is_int = constexpr(validator.check_is_int)
|
|
3298
|
+
check_bool_type = constexpr(validator.check_bool)
|
|
3299
|
+
check_is_int = constexpr(validator.check_is_int)
|
|
2999
3300
|
check_bool = constexpr(validator.check_bool)
|
|
3000
3301
|
|
|
3001
3302
|
|
|
3303
|
+
@constexpr
|
|
3304
|
+
def empty_compile(dtype, shape):
|
|
3305
|
+
"""Returns an empty Tensor."""
|
|
3306
|
+
return Tensor_(dtype, shape)
|
|
3307
|
+
|
|
3308
|
+
|
|
3002
3309
|
def tensor_bool(x):
|
|
3003
3310
|
"""tensor as condition, if is constant, return immediate bool value"""
|
|
3004
3311
|
is_cond = check_is_tensor_bool_cond(F.shape(x))
|
|
@@ -3022,6 +3329,11 @@ def matmul(x, y):
|
|
|
3022
3329
|
return F.matmul(x, y)
|
|
3023
3330
|
|
|
3024
3331
|
|
|
3332
|
+
def inner(x, other):
|
|
3333
|
+
"""Computes the inner product of 2 tensors."""
|
|
3334
|
+
return F.inner(x, other)
|
|
3335
|
+
|
|
3336
|
+
|
|
3025
3337
|
def float_bool(x):
|
|
3026
3338
|
"""Implementation of `float_bool`."""
|
|
3027
3339
|
return x != 0.0
|
|
@@ -3046,9 +3358,11 @@ def str_bool(x):
|
|
|
3046
3358
|
return True
|
|
3047
3359
|
|
|
3048
3360
|
|
|
3049
|
-
def
|
|
3050
|
-
"""
|
|
3051
|
-
|
|
3361
|
+
def matrix_power(input, n):
|
|
3362
|
+
"""
|
|
3363
|
+
Raises a square matrix to the (integer) power `n` .
|
|
3364
|
+
"""
|
|
3365
|
+
return F.matrix_power(input, n)
|
|
3052
3366
|
|
|
3053
3367
|
|
|
3054
3368
|
def log1p(x):
|
|
@@ -3084,9 +3398,9 @@ def logit(x, eps=None):
|
|
|
3084
3398
|
return F.logit(x, eps)
|
|
3085
3399
|
|
|
3086
3400
|
|
|
3087
|
-
def
|
|
3088
|
-
"""
|
|
3089
|
-
return F.
|
|
3401
|
+
def logdet(x):
|
|
3402
|
+
"""Returns the log determinant of one or batches of square matrices."""
|
|
3403
|
+
return F.logdet(x)
|
|
3090
3404
|
|
|
3091
3405
|
|
|
3092
3406
|
def lerp(start, end, weight):
|
|
@@ -3094,9 +3408,10 @@ def lerp(start, end, weight):
|
|
|
3094
3408
|
return F.lerp(start, end, weight)
|
|
3095
3409
|
|
|
3096
3410
|
|
|
3097
|
-
|
|
3411
|
+
# pylint: disable=redefined-builtin
|
|
3412
|
+
def norm(A, ord=None, dim=None, keepdim=False, *, dtype=None):
|
|
3098
3413
|
"""Returns the matrix norm or vector norm of a given tensor."""
|
|
3099
|
-
return F.norm(
|
|
3414
|
+
return F.norm(A, ord, dim, keepdim, dtype=dtype)
|
|
3100
3415
|
|
|
3101
3416
|
|
|
3102
3417
|
def renorm(input_x, p, dim, maxnorm):
|
|
@@ -3109,6 +3424,15 @@ def renorm(input_x, p, dim, maxnorm):
|
|
|
3109
3424
|
return F.renorm(input_x, p, dim, maxnorm)
|
|
3110
3425
|
|
|
3111
3426
|
|
|
3427
|
+
def sequence_index(sequence, target, start=None, end=None):
|
|
3428
|
+
"""Implementation of `tuple_index`."""
|
|
3429
|
+
if start is None:
|
|
3430
|
+
start = 0
|
|
3431
|
+
if end is None:
|
|
3432
|
+
end = len(sequence)
|
|
3433
|
+
return SequenceIndex()(sequence, target, start, end)
|
|
3434
|
+
|
|
3435
|
+
|
|
3112
3436
|
def list_bool(x):
|
|
3113
3437
|
"""Implementation of `tuple_bool`."""
|
|
3114
3438
|
return len(x) != 0
|
|
@@ -3148,13 +3472,22 @@ def ceil(x):
|
|
|
3148
3472
|
|
|
3149
3473
|
def top_k(input_x, k, sorted=True):
|
|
3150
3474
|
"""
|
|
3151
|
-
|
|
3475
|
+
`Tensor.top_k` is deprecated, please use `Tensor.topk` instead.
|
|
3152
3476
|
"""
|
|
3153
3477
|
check_is_int(k, 'k')
|
|
3154
3478
|
check_bool(sorted, 'sorted')
|
|
3155
3479
|
return F.top_k(input_x, k, sorted)
|
|
3156
3480
|
|
|
3157
3481
|
|
|
3482
|
+
def topk(input_x, k, dim=None, largest=True, sorted=True):
|
|
3483
|
+
r"""
|
|
3484
|
+
For details, please refer to :func:`mindspore.ops.topk`.
|
|
3485
|
+
"""
|
|
3486
|
+
check_is_int(k, 'k')
|
|
3487
|
+
check_bool_type(sorted, 'sorted')
|
|
3488
|
+
return F.topk(input_x, k, dim, largest=largest, sorted=sorted)
|
|
3489
|
+
|
|
3490
|
+
|
|
3158
3491
|
def subtract(x, other, *, alpha=1):
|
|
3159
3492
|
r"""
|
|
3160
3493
|
Computes the element-wise subtraction of input tensors.
|
|
@@ -3166,15 +3499,15 @@ def true_divide(divident, divisor):
|
|
|
3166
3499
|
r"""
|
|
3167
3500
|
Computes the element-wise division of input tensors.
|
|
3168
3501
|
"""
|
|
3169
|
-
return F.div(divident, divisor, None)
|
|
3502
|
+
return F.div(divident, divisor, rounding_mode=None)
|
|
3170
3503
|
|
|
3171
3504
|
|
|
3172
3505
|
# pylint: disable=redefined-outer-name
|
|
3173
|
-
def triu(
|
|
3506
|
+
def triu(input, diagonal=0):
|
|
3174
3507
|
r"""
|
|
3175
3508
|
Returns the triangular matrix based on the diagonal.
|
|
3176
3509
|
"""
|
|
3177
|
-
return F.
|
|
3510
|
+
return F.triu(input, diagonal)
|
|
3178
3511
|
|
|
3179
3512
|
|
|
3180
3513
|
#############
|
|
@@ -3238,6 +3571,8 @@ def list_append(self_, list_item):
|
|
|
3238
3571
|
|
|
3239
3572
|
def list_insert(self_, index, obj):
|
|
3240
3573
|
"""Insert into list"""
|
|
3574
|
+
if F.is_sequence_shape_unknown(self_) or not F.isconstant(index) or not F.isconstant(obj):
|
|
3575
|
+
return ListInsert()(self_, index, obj)
|
|
3241
3576
|
return _insert(self_, index, obj)
|
|
3242
3577
|
|
|
3243
3578
|
|
|
@@ -3262,11 +3597,6 @@ def list_extend(self_, obj):
|
|
|
3262
3597
|
return _extend(self_, obj)
|
|
3263
3598
|
|
|
3264
3599
|
|
|
3265
|
-
def list_count(self_, value):
|
|
3266
|
-
"""Count the num of value in list"""
|
|
3267
|
-
return _count(self_, value)
|
|
3268
|
-
|
|
3269
|
-
|
|
3270
3600
|
def dict_get(self_, key_index, default_value=None):
|
|
3271
3601
|
"""Get value by key from dict"""
|
|
3272
3602
|
if not _haskey(self_, key_index):
|
|
@@ -3377,7 +3707,7 @@ def coo_abs(x):
|
|
|
3377
3707
|
|
|
3378
3708
|
def coo_add(x, y, thresh):
|
|
3379
3709
|
"""Implementation of `add` for COOTensor."""
|
|
3380
|
-
return
|
|
3710
|
+
return F.coo_add(x, y, thresh)
|
|
3381
3711
|
|
|
3382
3712
|
|
|
3383
3713
|
################
|
|
@@ -3399,12 +3729,12 @@ def sparse_ndim_(x):
|
|
|
3399
3729
|
return F.tuple_len(x.shape)
|
|
3400
3730
|
|
|
3401
3731
|
|
|
3402
|
-
def bernoulli(
|
|
3732
|
+
def bernoulli(input, p=0.5, seed=None):
|
|
3403
3733
|
"""
|
|
3404
3734
|
Randomly draws binary numbers from a Bernoulli distribution.
|
|
3405
3735
|
"""
|
|
3406
3736
|
check_is_int(seed, 'bernoulli', 'seed')
|
|
3407
|
-
return F.bernoulli(
|
|
3737
|
+
return F.bernoulli(input, p, seed)
|
|
3408
3738
|
|
|
3409
3739
|
|
|
3410
3740
|
def gather_nd(input_x, indices):
|
|
@@ -3415,20 +3745,53 @@ def gather_nd(input_x, indices):
|
|
|
3415
3745
|
return F.gather_nd(input_x, indices)
|
|
3416
3746
|
|
|
3417
3747
|
|
|
3418
|
-
def gather(input_x, input_indices, axis):
|
|
3748
|
+
def gather(input_x, input_indices, axis, batch_dims=0):
|
|
3419
3749
|
r"""
|
|
3420
3750
|
Returns the slice of the input tensor corresponding to the elements of `input_indices` on the specified `axis`.
|
|
3421
3751
|
Refer to :func:`mindspore.ops.gather` for more detail.
|
|
3422
3752
|
"""
|
|
3423
|
-
return F.gather(input_x, input_indices, axis)
|
|
3753
|
+
return F.gather(input_x, input_indices, axis, batch_dims)
|
|
3424
3754
|
|
|
3425
3755
|
|
|
3426
|
-
def split(
|
|
3756
|
+
def split(tensor, split_size_or_sections, axis=0):
|
|
3427
3757
|
"""
|
|
3428
|
-
Splits the
|
|
3758
|
+
Splits the Tensor into chunks along the given axis.
|
|
3429
3759
|
Refer to :func:`mindspore.ops.split` for more detail.
|
|
3430
3760
|
"""
|
|
3431
|
-
return F.split(
|
|
3761
|
+
return F.split(tensor, split_size_or_sections, axis)
|
|
3762
|
+
|
|
3763
|
+
|
|
3764
|
+
def tensor_split(input, indices_or_sections, axis=0):
|
|
3765
|
+
"""
|
|
3766
|
+
Splits a tensor into multiple sub-tensors along the given axis.
|
|
3767
|
+
Refer to :func:`mindspore.ops.tensor_split` for more detail.
|
|
3768
|
+
"""
|
|
3769
|
+
return F.tensor_split(input, indices_or_sections, axis=axis)
|
|
3770
|
+
|
|
3771
|
+
|
|
3772
|
+
def vsplit(input, indices_or_sections):
|
|
3773
|
+
"""
|
|
3774
|
+
Splits a tensor into multiple sub-tensors vertically. It is equivalent to `ops.tensor_split` with :math:`axis=0` .
|
|
3775
|
+
Refer to :func:`mindspore.ops.vsplit` for more detail.
|
|
3776
|
+
"""
|
|
3777
|
+
return F.vsplit(input, indices_or_sections)
|
|
3778
|
+
|
|
3779
|
+
|
|
3780
|
+
def hsplit(input, indices_or_sections):
|
|
3781
|
+
"""
|
|
3782
|
+
Splits a tensor into multiple sub-tensors horizontally. It is equivalent to `ops.tensor_split` with :math:`axis=1` .
|
|
3783
|
+
Refer to :func:`mindspore.ops.hsplit` for more detail.
|
|
3784
|
+
"""
|
|
3785
|
+
return F.hsplit(input, indices_or_sections)
|
|
3786
|
+
|
|
3787
|
+
|
|
3788
|
+
def dsplit(input, indices_or_sections):
|
|
3789
|
+
"""
|
|
3790
|
+
Splits a tensor into multiple sub-tensors along the 3rd axis.
|
|
3791
|
+
It is equivalent to `ops.tensor_split` with :math:`axis=2` .
|
|
3792
|
+
Refer to :func:`mindspore.ops.tensor_split` for more detail.
|
|
3793
|
+
"""
|
|
3794
|
+
return F.dsplit(input, indices_or_sections)
|
|
3432
3795
|
|
|
3433
3796
|
|
|
3434
3797
|
def xlogy(x, y):
|
|
@@ -3463,6 +3826,20 @@ def isfinite(x):
|
|
|
3463
3826
|
return F.isfinite(x)
|
|
3464
3827
|
|
|
3465
3828
|
|
|
3829
|
+
def sin(x):
|
|
3830
|
+
r"""
|
|
3831
|
+
For details, please refer to :func:`mindspore.ops.sin`.
|
|
3832
|
+
"""
|
|
3833
|
+
return F.sin(x)
|
|
3834
|
+
|
|
3835
|
+
|
|
3836
|
+
def sinc(x):
|
|
3837
|
+
r"""
|
|
3838
|
+
For details, please refer to :func:`mindspore.ops.sinc`.
|
|
3839
|
+
"""
|
|
3840
|
+
return F.sinc(x)
|
|
3841
|
+
|
|
3842
|
+
|
|
3466
3843
|
def cos(x):
|
|
3467
3844
|
r"""
|
|
3468
3845
|
Computes cosine of input element-wise.
|
|
@@ -3470,6 +3847,13 @@ def cos(x):
|
|
|
3470
3847
|
return F.cos(x)
|
|
3471
3848
|
|
|
3472
3849
|
|
|
3850
|
+
def cov(x, *, correction=1, fweights=None, aweights=None):
|
|
3851
|
+
r"""
|
|
3852
|
+
For details, please refer to :func:`mindspore.ops.cov`.
|
|
3853
|
+
"""
|
|
3854
|
+
return F.cov(x, correction=correction, fweights=fweights, aweights=aweights)
|
|
3855
|
+
|
|
3856
|
+
|
|
3473
3857
|
def acos(x):
|
|
3474
3858
|
r"""
|
|
3475
3859
|
Computes arccosine of input tensors element-wise.
|
|
@@ -3484,18 +3868,18 @@ def asin(x):
|
|
|
3484
3868
|
return F.asin(x)
|
|
3485
3869
|
|
|
3486
3870
|
|
|
3487
|
-
def acosh(
|
|
3871
|
+
def acosh(input):
|
|
3488
3872
|
r"""
|
|
3489
3873
|
Computes inverse hyperbolic cosine of the inputs element-wise.
|
|
3490
3874
|
"""
|
|
3491
|
-
return F.acosh(
|
|
3875
|
+
return F.acosh(input)
|
|
3492
3876
|
|
|
3493
3877
|
|
|
3494
|
-
def add(
|
|
3878
|
+
def add(input, other):
|
|
3495
3879
|
r"""
|
|
3496
3880
|
Computes the element-wise addition of input tensors.
|
|
3497
3881
|
"""
|
|
3498
|
-
return F.add(
|
|
3882
|
+
return F.add(input, other)
|
|
3499
3883
|
|
|
3500
3884
|
|
|
3501
3885
|
def addr(x, vec1, vec2, beta=1, alpha=1):
|
|
@@ -3523,7 +3907,7 @@ def addmv(x, mat, vec, beta=1, alpha=1):
|
|
|
3523
3907
|
r"""
|
|
3524
3908
|
Multiplies matrix `mat` and vector `vec`. The vector `x` is added to the final result.
|
|
3525
3909
|
"""
|
|
3526
|
-
return F.addmv(x, mat, vec, beta, alpha)
|
|
3910
|
+
return F.addmv(x, mat, vec, beta=beta, alpha=alpha)
|
|
3527
3911
|
|
|
3528
3912
|
|
|
3529
3913
|
def adjoint(x):
|
|
@@ -3540,11 +3924,11 @@ def asinh(x):
|
|
|
3540
3924
|
return F.asinh(x)
|
|
3541
3925
|
|
|
3542
3926
|
|
|
3543
|
-
def atan(
|
|
3927
|
+
def atan(input):
|
|
3544
3928
|
r"""
|
|
3545
3929
|
Computes inverse tangent of the input element-wise.
|
|
3546
3930
|
"""
|
|
3547
|
-
return F.atan(
|
|
3931
|
+
return F.atan(input)
|
|
3548
3932
|
|
|
3549
3933
|
|
|
3550
3934
|
def atanh(x):
|
|
@@ -3744,32 +4128,15 @@ def cumprod(input, dim, dtype=None):
|
|
|
3744
4128
|
|
|
3745
4129
|
|
|
3746
4130
|
def multiply(input, other):
|
|
3747
|
-
|
|
3748
|
-
Multiplies two tensors element-wise.
|
|
3749
|
-
|
|
3750
|
-
.. math::
|
|
3751
|
-
|
|
3752
|
-
out_{i} = x_{i} * y_{i}
|
|
3753
|
-
|
|
3754
|
-
Refer to :func:`mindspore.ops.mul` for more details.
|
|
3755
|
-
|
|
3756
|
-
Supported platforms:
|
|
3757
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3758
|
-
|
|
3759
|
-
Example:
|
|
3760
|
-
>>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
|
|
3761
|
-
>>> y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
|
|
3762
|
-
>>> output = x.multiply(y)
|
|
3763
|
-
[4.0 10.0 18.0]
|
|
3764
|
-
"""
|
|
4131
|
+
"""For details, please refer to :func:`mindspore.ops.multiply`."""
|
|
3765
4132
|
return F.multiply(input, other)
|
|
3766
4133
|
|
|
3767
4134
|
|
|
3768
|
-
def div(input,
|
|
4135
|
+
def div(input, value, *, rounding_mode=None):
|
|
3769
4136
|
r"""
|
|
3770
|
-
Divides the tensor `input` by the given input tensor `
|
|
4137
|
+
Divides the tensor `input` by the given input tensor `value` in floating-point type element-wise.
|
|
3771
4138
|
"""
|
|
3772
|
-
return F.div(input,
|
|
4139
|
+
return F.div(input, value, rounding_mode=rounding_mode)
|
|
3773
4140
|
|
|
3774
4141
|
|
|
3775
4142
|
def equal(x, y):
|
|
@@ -3927,6 +4294,34 @@ def ne(input, other):
|
|
|
3927
4294
|
return F.ne(input, other)
|
|
3928
4295
|
|
|
3929
4296
|
|
|
4297
|
+
def not_equal(x, other):
|
|
4298
|
+
r"""
|
|
4299
|
+
Computes the non-equivalence of two tensors element-wise.
|
|
4300
|
+
"""
|
|
4301
|
+
return F.not_equal(x, other)
|
|
4302
|
+
|
|
4303
|
+
|
|
4304
|
+
def sign(x):
|
|
4305
|
+
r"""
|
|
4306
|
+
For details, please refer to :func:`mindspore.ops.sign`.
|
|
4307
|
+
"""
|
|
4308
|
+
return F.sign(x)
|
|
4309
|
+
|
|
4310
|
+
|
|
4311
|
+
def signbit(x):
|
|
4312
|
+
"""
|
|
4313
|
+
For details, please refer to :func:`mindspore.ops.signbit`.
|
|
4314
|
+
"""
|
|
4315
|
+
return F.signbit(x)
|
|
4316
|
+
|
|
4317
|
+
|
|
4318
|
+
def sgn(x):
|
|
4319
|
+
"""
|
|
4320
|
+
For details, please refer to :func:`mindspore.ops.sgn`.
|
|
4321
|
+
"""
|
|
4322
|
+
return F.sgn(x)
|
|
4323
|
+
|
|
4324
|
+
|
|
3930
4325
|
def sinh(input):
|
|
3931
4326
|
r"""
|
|
3932
4327
|
Computes hyperbolic sine of the input element-wise.
|
|
@@ -3934,11 +4329,16 @@ def sinh(input):
|
|
|
3934
4329
|
return F.sinh(input)
|
|
3935
4330
|
|
|
3936
4331
|
|
|
3937
|
-
def sort(input,
|
|
4332
|
+
def sort(input, axis=-1, descending=False):
|
|
3938
4333
|
r"""
|
|
3939
4334
|
Sorts the elements of the input tensor along a given dimension in ascending order by value.
|
|
3940
4335
|
"""
|
|
3941
|
-
return
|
|
4336
|
+
return F.sort(input, axis=axis, descending=descending)
|
|
4337
|
+
|
|
4338
|
+
|
|
4339
|
+
def argsort(input, axis=-1, descending=False):
|
|
4340
|
+
"""For details, please refer to :func:`mindspore.ops.argsort`."""
|
|
4341
|
+
return F.argsort(input, axis, descending)
|
|
3942
4342
|
|
|
3943
4343
|
|
|
3944
4344
|
def trunc(input):
|
|
@@ -3948,8 +4348,80 @@ def trunc(input):
|
|
|
3948
4348
|
return F.trunc(input)
|
|
3949
4349
|
|
|
3950
4350
|
|
|
4351
|
+
def where(x, condition, y):
|
|
4352
|
+
r"""
|
|
4353
|
+
Returns a tensor whose elements are selected from either `x` or `y` depending on `condition`.
|
|
4354
|
+
Please refer to :func:`mindspore.ops.where`.
|
|
4355
|
+
"""
|
|
4356
|
+
return F.where(condition, x, y)
|
|
4357
|
+
|
|
4358
|
+
|
|
3951
4359
|
def imag(input):
|
|
3952
4360
|
r"""
|
|
3953
4361
|
Returns a new tensor containing imaginary value of the input.
|
|
3954
4362
|
"""
|
|
3955
4363
|
return F.imag(input)
|
|
4364
|
+
|
|
4365
|
+
|
|
4366
|
+
def diff(x, n=1, axis=-1, prepend=None, append=None):
|
|
4367
|
+
r"""
|
|
4368
|
+
For details, please refer to :func:`mindspore.ops.diff`.
|
|
4369
|
+
"""
|
|
4370
|
+
return F.diff(x, n, axis, prepend, append)
|
|
4371
|
+
|
|
4372
|
+
|
|
4373
|
+
def frac(x):
|
|
4374
|
+
r"""
|
|
4375
|
+
For details, please refer to :func:`mindspore.ops.frac`.
|
|
4376
|
+
"""
|
|
4377
|
+
return F.frac(x)
|
|
4378
|
+
|
|
4379
|
+
|
|
4380
|
+
def argwhere(input):
|
|
4381
|
+
r"""
|
|
4382
|
+
For details, please refer to :func:`mindspore.ops.argwhere`.
|
|
4383
|
+
"""
|
|
4384
|
+
return F.argwhere(input)
|
|
4385
|
+
|
|
4386
|
+
|
|
4387
|
+
def moveaxis(input, source, destination):
|
|
4388
|
+
r"""
|
|
4389
|
+
For details, please refer to :func:`mindspore.ops.moveaxis`.
|
|
4390
|
+
"""
|
|
4391
|
+
return F.moveaxis(input, source, destination)
|
|
4392
|
+
|
|
4393
|
+
|
|
4394
|
+
def movedim(input, source, destination):
|
|
4395
|
+
r"""
|
|
4396
|
+
For details, please refer to :func:`mindspore.ops.movedim`.
|
|
4397
|
+
"""
|
|
4398
|
+
return F.movedim(input, source, destination)
|
|
4399
|
+
|
|
4400
|
+
|
|
4401
|
+
def nextafter(input, other):
|
|
4402
|
+
r"""
|
|
4403
|
+
For details, please refer to :func:`mindspore.ops.nextafter`.
|
|
4404
|
+
"""
|
|
4405
|
+
return F.nextafter(input, other)
|
|
4406
|
+
|
|
4407
|
+
|
|
4408
|
+
def qr(input, some=True):
|
|
4409
|
+
r"""
|
|
4410
|
+
For details, please refer to :func:`mindspore.ops.qr`.
|
|
4411
|
+
"""
|
|
4412
|
+
check_bool_type(some, 'some', 'Tensor.qr')
|
|
4413
|
+
return F.qr(input, 'reduced' if some else 'complete')
|
|
4414
|
+
|
|
4415
|
+
|
|
4416
|
+
def amax(input, axis=None, keep_dims=False):
|
|
4417
|
+
r"""
|
|
4418
|
+
For details, please refer to :func:`mindspore.ops.amax`.
|
|
4419
|
+
"""
|
|
4420
|
+
return F.amax(input, axis, keep_dims)
|
|
4421
|
+
|
|
4422
|
+
|
|
4423
|
+
def amin(input, axis=None, keep_dims=False):
|
|
4424
|
+
r"""
|
|
4425
|
+
For details, please refer to :func:`mindspore.ops.amin`.
|
|
4426
|
+
"""
|
|
4427
|
+
return F.amin(input, axis, keep_dims)
|