mindspore 1.10.0__cp37-none-any.whl → 2.0.0rc1__cp37-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +9064 -0
- mindspore/__init__.py +9 -4
- mindspore/_akg/akg/composite/build_module.py +11 -0
- mindspore/_akg/akg/config/repository_cuda.json +11 -0
- mindspore/_akg/akg/tvm/contrib/nvcc.py +4 -3
- mindspore/_c_dataengine.cpython-37m-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-37m-aarch64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-37m-aarch64-linux-gnu.so +0 -0
- mindspore/_check_jit_forbidden_api.py +102 -0
- mindspore/_checkparam.py +1066 -1001
- mindspore/_extends/builtin_operations.py +32 -4
- mindspore/_extends/graph_kernel/model/graph_split.py +66 -222
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +12 -9
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +119 -26
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -50
- mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -6
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -25
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -27
- mindspore/_extends/parse/__init__.py +5 -3
- mindspore/_extends/parse/namespace.py +17 -2
- mindspore/_extends/parse/parser.py +193 -34
- mindspore/_extends/parse/resources.py +7 -8
- mindspore/_extends/parse/standard_method.py +1780 -435
- mindspore/_extends/parse/trope.py +3 -1
- mindspore/_mindspore_offline_debug.cpython-37m-aarch64-linux-gnu.so +0 -0
- mindspore/amp.py +53 -58
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/adasum.py +3 -2
- mindspore/boost/boost.py +2 -2
- mindspore/boost/boost_cell_wrapper.py +46 -26
- mindspore/boost/dim_reduce.py +6 -5
- mindspore/boost/grad_accumulation.py +2 -1
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +11 -10
- mindspore/common/_decorator.py +2 -0
- mindspore/common/_register_for_adapter.py +55 -0
- mindspore/common/_stub_tensor.py +201 -0
- mindspore/common/_utils.py +57 -0
- mindspore/common/api.py +582 -297
- mindspore/common/dtype.py +66 -18
- mindspore/common/dump.py +2 -2
- mindspore/common/initializer.py +38 -1
- mindspore/common/jit_config.py +25 -13
- mindspore/common/mutable.py +53 -24
- mindspore/common/parameter.py +60 -37
- mindspore/common/seed.py +8 -24
- mindspore/common/sparse_tensor.py +927 -0
- mindspore/common/tensor.py +1627 -3900
- mindspore/communication/__init__.py +10 -5
- mindspore/communication/_comm_helper.py +78 -214
- mindspore/communication/_hccl_management.py +2 -1
- mindspore/communication/management.py +136 -47
- mindspore/config/op_info.config +501 -1008
- mindspore/config/super_bar_config.json +512 -0
- mindspore/context.py +291 -56
- mindspore/dataset/__init__.py +12 -8
- mindspore/dataset/audio/__init__.py +9 -9
- mindspore/dataset/audio/transforms.py +1090 -228
- mindspore/dataset/audio/utils.py +87 -39
- mindspore/dataset/audio/validators.py +223 -1
- mindspore/dataset/callback/ds_callback.py +17 -15
- mindspore/dataset/core/config.py +246 -17
- mindspore/dataset/core/py_util_helpers.py +4 -3
- mindspore/dataset/core/validator_helpers.py +10 -10
- mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
- mindspore/dataset/debug/debug_hook.py +65 -0
- mindspore/dataset/debug/pre_defined_hook.py +67 -0
- mindspore/dataset/engine/__init__.py +7 -3
- mindspore/dataset/engine/cache_client.py +9 -9
- mindspore/dataset/engine/datasets.py +648 -477
- mindspore/dataset/engine/datasets_audio.py +165 -167
- mindspore/dataset/engine/datasets_standard_format.py +93 -67
- mindspore/dataset/engine/datasets_text.py +492 -342
- mindspore/dataset/engine/datasets_user_defined.py +85 -50
- mindspore/dataset/engine/datasets_vision.py +1224 -699
- mindspore/dataset/engine/graphdata.py +134 -69
- mindspore/dataset/engine/iterators.py +50 -9
- mindspore/dataset/engine/offload.py +52 -31
- mindspore/dataset/engine/samplers.py +27 -24
- mindspore/dataset/engine/serializer_deserializer.py +14 -15
- mindspore/dataset/engine/validators.py +213 -52
- mindspore/dataset/text/__init__.py +10 -8
- mindspore/dataset/text/transforms.py +152 -57
- mindspore/dataset/text/utils.py +98 -49
- mindspore/dataset/text/validators.py +25 -0
- mindspore/dataset/transforms/__init__.py +4 -2
- mindspore/dataset/transforms/c_transforms.py +11 -13
- mindspore/dataset/transforms/py_transforms.py +2 -2
- mindspore/dataset/transforms/py_transforms_util.py +10 -0
- mindspore/dataset/transforms/transforms.py +13 -15
- mindspore/dataset/transforms/validators.py +7 -7
- mindspore/dataset/utils/__init__.py +2 -1
- mindspore/dataset/utils/browse_dataset.py +13 -13
- mindspore/dataset/utils/line_reader.py +121 -0
- mindspore/dataset/vision/__init__.py +8 -7
- mindspore/dataset/vision/c_transforms.py +125 -126
- mindspore/dataset/vision/py_transforms.py +37 -37
- mindspore/dataset/vision/py_transforms_util.py +23 -20
- mindspore/dataset/vision/transforms.py +316 -315
- mindspore/dataset/vision/utils.py +313 -17
- mindspore/dataset/vision/validators.py +6 -6
- mindspore/default_config.py +0 -1
- mindspore/{compression → experimental}/__init__.py +6 -5
- mindspore/experimental/map_parameter.py +275 -0
- mindspore/include/OWNERS +0 -1
- mindspore/include/api/callback/callback.h +9 -13
- mindspore/include/api/callback/ckpt_saver.h +2 -2
- mindspore/include/api/callback/loss_monitor.h +2 -2
- mindspore/include/api/callback/lr_scheduler.h +5 -5
- mindspore/include/api/callback/time_monitor.h +2 -2
- mindspore/include/api/callback/train_accuracy.h +4 -6
- mindspore/include/api/cfg.h +19 -6
- mindspore/include/api/context.h +70 -9
- mindspore/include/api/delegate.h +8 -1
- mindspore/include/api/dual_abi_helper.h +8 -24
- mindspore/include/api/metrics/accuracy.h +2 -2
- mindspore/include/api/metrics/metrics.h +4 -3
- mindspore/include/api/model.h +9 -4
- mindspore/include/api/model_group.h +68 -0
- mindspore/include/api/model_parallel_runner.h +17 -17
- mindspore/include/api/net.h +12 -11
- mindspore/include/api/serialization.h +20 -4
- mindspore/include/api/status.h +7 -1
- mindspore/include/api/types.h +25 -21
- mindspore/include/api/visible.h +4 -0
- mindspore/include/c_api/model_c.h +5 -0
- mindspore/include/c_api/status_c.h +1 -1
- mindspore/include/dataset/config.h +1 -1
- mindspore/include/dataset/constants.h +14 -0
- mindspore/include/dataset/text.h +59 -0
- mindspore/include/dataset/vision.h +56 -117
- mindspore/include/dataset/vision_lite.h +102 -0
- mindspore/include/mindapi/base/type_id.h +42 -3
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libicudata.so.69 +0 -0
- mindspore/lib/libicui18n.so.69 +0 -0
- mindspore/lib/libicuuc.so.69 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/{libakg.so → plugin/cpu/libakg.so} +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/log.py +28 -28
- mindspore/mindrecord/common/exceptions.py +2 -4
- mindspore/mindrecord/filereader.py +19 -1
- mindspore/mindrecord/filewriter.py +250 -88
- mindspore/mindrecord/mindpage.py +13 -13
- mindspore/mindrecord/shardheader.py +15 -15
- mindspore/mindrecord/shardreader.py +9 -0
- mindspore/mindrecord/shardwriter.py +29 -29
- mindspore/mindrecord/tools/cifar100_to_mr.py +9 -9
- mindspore/mindrecord/tools/cifar10_to_mr.py +9 -9
- mindspore/mindrecord/tools/csv_to_mr.py +4 -4
- mindspore/mindrecord/tools/imagenet_to_mr.py +70 -65
- mindspore/mindrecord/tools/mnist_to_mr.py +41 -41
- mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
- mindspore/nn/__init__.py +1 -5
- mindspore/nn/cell.py +297 -234
- mindspore/nn/dynamic_lr.py +1 -1
- mindspore/nn/grad/cell_grad.py +17 -42
- mindspore/nn/layer/__init__.py +7 -4
- mindspore/nn/layer/activation.py +131 -88
- mindspore/nn/layer/basic.py +313 -613
- mindspore/nn/layer/channel_shuffle.py +103 -0
- mindspore/nn/layer/combined.py +1 -1
- mindspore/nn/layer/container.py +52 -6
- mindspore/nn/layer/conv.py +112 -43
- mindspore/nn/layer/dense.py +10 -9
- mindspore/nn/layer/embedding.py +36 -34
- mindspore/nn/layer/image.py +123 -27
- mindspore/nn/layer/math.py +108 -107
- mindspore/nn/layer/normalization.py +212 -366
- mindspore/nn/layer/padding.py +370 -42
- mindspore/nn/layer/pooling.py +1443 -219
- mindspore/nn/layer/rnn_cells.py +11 -16
- mindspore/nn/layer/rnns.py +38 -39
- mindspore/nn/layer/thor_layer.py +24 -25
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +701 -0
- mindspore/nn/learning_rate_schedule.py +8 -8
- mindspore/nn/loss/__init__.py +9 -6
- mindspore/nn/loss/loss.py +678 -142
- mindspore/nn/metrics.py +53 -0
- mindspore/nn/optim/_dist_optimizer_registry.py +2 -2
- mindspore/nn/optim/ada_grad.py +8 -8
- mindspore/nn/optim/adadelta.py +2 -3
- mindspore/nn/optim/adafactor.py +18 -14
- mindspore/nn/optim/adam.py +429 -87
- mindspore/nn/optim/adamax.py +5 -6
- mindspore/nn/optim/adasum.py +10 -8
- mindspore/nn/optim/asgd.py +7 -7
- mindspore/nn/optim/ftrl.py +81 -11
- mindspore/nn/optim/lamb.py +7 -8
- mindspore/nn/optim/lars.py +4 -4
- mindspore/nn/optim/lazyadam.py +82 -7
- mindspore/nn/optim/momentum.py +8 -7
- mindspore/nn/optim/optimizer.py +19 -10
- mindspore/nn/optim/proximal_ada_grad.py +6 -5
- mindspore/nn/optim/rmsprop.py +3 -3
- mindspore/nn/optim/rprop.py +20 -16
- mindspore/nn/optim/sgd.py +21 -15
- mindspore/nn/optim/thor.py +23 -21
- mindspore/nn/probability/__init__.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +7 -6
- mindspore/nn/probability/bijector/invert.py +4 -2
- mindspore/nn/probability/bijector/softplus.py +2 -2
- mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
- mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
- mindspore/nn/probability/distribution/__init__.py +6 -0
- mindspore/nn/probability/distribution/_utils/custom_ops.py +3 -2
- mindspore/nn/probability/distribution/_utils/utils.py +11 -17
- mindspore/nn/probability/distribution/bernoulli.py +6 -6
- mindspore/nn/probability/distribution/beta.py +1 -1
- mindspore/nn/probability/distribution/categorical.py +9 -9
- mindspore/nn/probability/distribution/cauchy.py +8 -8
- mindspore/nn/probability/distribution/distribution.py +12 -6
- mindspore/nn/probability/distribution/exponential.py +5 -5
- mindspore/nn/probability/distribution/gamma.py +3 -3
- mindspore/nn/probability/distribution/geometric.py +6 -5
- mindspore/nn/probability/distribution/gumbel.py +5 -5
- mindspore/nn/probability/distribution/half_normal.py +133 -0
- mindspore/nn/probability/distribution/laplace.py +128 -0
- mindspore/nn/probability/distribution/log_normal.py +0 -1
- mindspore/nn/probability/distribution/logistic.py +4 -5
- mindspore/nn/probability/distribution/normal.py +11 -15
- mindspore/nn/probability/distribution/poisson.py +6 -2
- mindspore/nn/probability/distribution/student_t.py +150 -0
- mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
- mindspore/nn/probability/distribution/uniform.py +5 -5
- mindspore/nn/reinforcement/_tensors_queue.py +3 -3
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +8 -1
- mindspore/nn/wrap/cell_wrapper.py +55 -27
- mindspore/nn/wrap/grad_reducer.py +20 -11
- mindspore/nn/wrap/loss_scale.py +47 -30
- mindspore/numpy/array_creations.py +33 -22
- mindspore/numpy/array_ops.py +46 -42
- mindspore/numpy/logic_ops.py +6 -27
- mindspore/numpy/math_ops.py +26 -19
- mindspore/numpy/utils.py +1 -8
- mindspore/numpy/utils_const.py +112 -62
- mindspore/ops/__init__.py +6 -3
- mindspore/ops/_constants.py +0 -6
- mindspore/ops/_grad/__init__.py +2 -1
- mindspore/ops/_grad/grad_array_ops.py +209 -152
- mindspore/ops/_grad/grad_base.py +55 -17
- mindspore/ops/_grad/grad_clip_ops.py +11 -3
- mindspore/ops/_grad/grad_comm_ops.py +58 -47
- mindspore/ops/_grad/grad_implementations.py +21 -61
- mindspore/ops/_grad/grad_inner_ops.py +48 -6
- mindspore/ops/_grad/grad_math_ops.py +306 -161
- mindspore/ops/_grad/grad_nn_ops.py +192 -181
- mindspore/ops/_grad/grad_other_ops.py +1 -1
- mindspore/ops/_grad/grad_quant_ops.py +5 -5
- mindspore/ops/_grad/grad_sequence_ops.py +296 -0
- mindspore/ops/_grad/grad_sparse.py +15 -9
- mindspore/ops/_grad_experimental/__init__.py +1 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +441 -55
- mindspore/ops/_grad_experimental/grad_image_ops.py +25 -7
- mindspore/ops/_grad_experimental/grad_inner_ops.py +3 -44
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +16 -21
- mindspore/ops/_grad_experimental/grad_math_ops.py +979 -49
- mindspore/ops/_grad_experimental/grad_nn_ops.py +78 -8
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +197 -13
- mindspore/ops/_op_impl/__init__.py +3 -3
- mindspore/ops/_op_impl/_custom_op/__init__.py +0 -1
- mindspore/ops/_op_impl/_custom_op/_basic.py +0 -1
- mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +4 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +5 -5
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
- mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/correction_mul.py +3 -3
- mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +4 -8
- mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_dds_grad_impl.py +0 -1
- mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -1
- mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +238 -3
- mindspore/ops/_op_impl/aicpu/abs.py +36 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d.py +34 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d.py +39 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d.py +42 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d_grad.py +152 -0
- mindspore/ops/_op_impl/aicpu/add.py +43 -0
- mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
- mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
- mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/arg_max.py +75 -0
- mindspore/ops/_op_impl/aicpu/arg_min.py +75 -0
- mindspore/ops/_op_impl/aicpu/argmin_with_value.py +43 -0
- mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -0
- mindspore/ops/_op_impl/aicpu/batch_norm_grad_grad.py +49 -0
- mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
- mindspore/ops/_op_impl/aicpu/bessel_i0.py +31 -0
- mindspore/ops/_op_impl/aicpu/bias_add.py +44 -0
- mindspore/ops/_op_impl/aicpu/bias_add_grad.py +43 -0
- mindspore/ops/_op_impl/aicpu/bincount.py +33 -0
- mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/cauchy.py} +17 -10
- mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
- mindspore/ops/_op_impl/aicpu/cholesky.py +1 -1
- mindspore/ops/_op_impl/{cpu/bias_add.py → aicpu/choleskygrad.py} +9 -7
- mindspore/ops/_op_impl/aicpu/combined_non_max_suppression.py +42 -0
- mindspore/ops/_op_impl/aicpu/concat_offset.py +42 -0
- mindspore/ops/_op_impl/aicpu/concat_offset_v1.py +31 -0
- mindspore/ops/_op_impl/aicpu/conj.py +11 -0
- mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_image.py +38 -0
- mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +36 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +2 -2
- mindspore/ops/_op_impl/aicpu/dense_to_sparse_set_operation.py +48 -0
- mindspore/ops/_op_impl/aicpu/diag.py +36 -0
- mindspore/ops/_op_impl/aicpu/diag_part.py +36 -0
- mindspore/ops/_op_impl/aicpu/diagonal.py +35 -0
- mindspore/ops/_op_impl/{cpu/bias_add_grad.py → aicpu/digamma.py} +9 -7
- mindspore/ops/_op_impl/aicpu/eig.py +35 -0
- mindspore/ops/_op_impl/aicpu/fft_with_size.py +41 -0
- mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
- mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
- mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
- mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
- mindspore/ops/_op_impl/aicpu/glu.py +33 -0
- mindspore/ops/_op_impl/aicpu/glu_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/greater.py +41 -0
- mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
- mindspore/ops/_op_impl/{tbe/scatter_add_ds.py → aicpu/inplace_index_add.py} +17 -21
- mindspore/ops/_op_impl/aicpu/instance_norm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/instance_norm_v2_grad.py +44 -0
- mindspore/ops/_op_impl/aicpu/layer_norm_grad_grad.py +47 -0
- mindspore/ops/_op_impl/aicpu/less.py +41 -0
- mindspore/ops/_op_impl/aicpu/less_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/lgamma.py +32 -0
- mindspore/ops/_op_impl/aicpu/log_normal_reverse.py +33 -0
- mindspore/ops/_op_impl/aicpu/logit.py +33 -0
- mindspore/ops/_op_impl/aicpu/logit_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/masked_fill.py +42 -0
- mindspore/ops/_op_impl/aicpu/masked_scatter.py +39 -0
- mindspore/ops/_op_impl/aicpu/matmul.py +39 -0
- mindspore/ops/_op_impl/aicpu/matrix_logarithm.py +31 -0
- mindspore/ops/_op_impl/aicpu/matrix_power.py +32 -0
- mindspore/ops/_op_impl/aicpu/matrix_solve_ls.py +36 -0
- mindspore/ops/_op_impl/aicpu/matrix_triangular_solve.py +36 -0
- mindspore/ops/_op_impl/aicpu/mirror_pad.py +2 -0
- mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mul.py +3 -1
- mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
- mindspore/ops/_op_impl/aicpu/multinomial_with_replacement.py +35 -0
- mindspore/ops/_op_impl/aicpu/nan_to_num.py +34 -0
- mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
- mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
- mindspore/ops/_op_impl/aicpu/polar.py +32 -0
- mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
- mindspore/ops/_op_impl/aicpu/qr.py +36 -0
- mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
- mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_tensor.py +74 -0
- mindspore/ops/_op_impl/aicpu/random_shuffle.py +3 -0
- mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/range.py +36 -0
- mindspore/ops/_op_impl/aicpu/reciprocal.py +34 -0
- mindspore/ops/_op_impl/aicpu/reciprocal_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/reduce_sum.py +57 -0
- mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
- mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
- mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
- mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
- mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
- mindspore/ops/_op_impl/aicpu/search_sorted.py +12 -6
- mindspore/ops/_op_impl/aicpu/self_adjoint_eig.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
- mindspore/ops/_op_impl/aicpu/slice_grad.py +76 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/sort.py +39 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
- mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows.py +63 -0
- mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows_grad.py +45 -0
- mindspore/ops/_op_impl/aicpu/sparse_matrix_mat_mul.py +56 -0
- mindspore/ops/_op_impl/{tbe/slice_ds.py → aicpu/sparse_segment_sum.py} +16 -24
- mindspore/ops/_op_impl/aicpu/sparse_segment_sum_with_num_segments.py +68 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice.py +63 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +61 -0
- mindspore/ops/_op_impl/aicpu/squared_difference.py +2 -0
- mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +93 -0
- mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +66 -0
- mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
- mindspore/ops/_op_impl/{tbe/gather_v2.py → aicpu/tile.py} +24 -24
- mindspore/ops/_op_impl/aicpu/tridiagonal_solve.py +35 -0
- mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/triu_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
- mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
- mindspore/ops/_op_impl/cpu/__init__.py +1 -2
- mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
- mindspore/ops/_op_impl/cpu/maximum_grad.py +2 -0
- mindspore/{compression/common/__init__.py → ops/_op_impl/cpu/pyexecute.py} +13 -8
- mindspore/ops/_op_impl/cpu/reduce_sum.py +8 -0
- mindspore/ops/_op_impl/cpu/sparse_slice.py +62 -0
- mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +60 -0
- mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
- mindspore/ops/_op_impl/tbe/__init__.py +27 -608
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +42 -0
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +44 -0
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -1
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +41 -0
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +1 -0
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +2 -0
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +40 -0
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
- mindspore/ops/_op_impl/tbe/cast.py +0 -2
- mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -2
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -2
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +1 -0
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +1 -1
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
- mindspore/ops/_op_impl/tbe/greater.py +2 -0
- mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -1
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -6
- mindspore/ops/_op_impl/tbe/{greater_ds.py → reduce_all_ds.py} +13 -16
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +39 -0
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +44 -0
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +44 -0
- mindspore/ops/_op_impl/tbe/scatter_add.py +2 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +2 -2
- mindspore/ops/_op_impl/tbe/slice.py +26 -15
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +1 -0
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +1 -1
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +2 -0
- mindspore/ops/_primitive_cache.py +3 -2
- mindspore/ops/_register_for_op.py +11 -0
- mindspore/ops/_utils/__init__.py +1 -1
- mindspore/ops/_utils/utils.py +20 -41
- mindspore/ops/_vmap/__init__.py +2 -2
- mindspore/ops/_vmap/vmap_array_ops.py +170 -78
- mindspore/ops/_vmap/vmap_base.py +24 -10
- mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
- mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +41 -9
- mindspore/ops/_vmap/vmap_image_ops.py +52 -0
- mindspore/ops/_vmap/vmap_math_ops.py +77 -6
- mindspore/ops/_vmap/vmap_nn_ops.py +78 -29
- mindspore/ops/_vmap/vmap_other_ops.py +3 -1
- mindspore/ops/_vmap/vmap_random_ops.py +55 -3
- mindspore/ops/_vmap/vmap_sparse_ops.py +1 -0
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ApproximateEqual_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Argmax_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/Argmin_bprop.mindir +14 -13
- mindspore/ops/bprop_mindir/AssignSub_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/Assign_bprop.mindir +16 -16
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchToSpaceND_bprop.mindir +28 -0
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +306 -0
- mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +12 -8
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Concat_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
- mindspore/ops/bprop_mindir/DType_bprop.mindir +12 -12
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/Depend_bprop.mindir +12 -13
- mindspore/ops/bprop_mindir/DepthToSpace_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
- mindspore/ops/bprop_mindir/DiagPart_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -24
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -14
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicShape_bprop.mindir +12 -12
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Equal_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +54 -0
- mindspore/ops/bprop_mindir/FloorDiv_bprop.mindir +18 -15
- mindspore/ops/bprop_mindir/GatherD_bprop.mindir +26 -0
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +57 -0
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/GreaterEqual_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/Greater_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/IOU_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/IsFinite_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/IsInf_bprop.mindir +13 -10
- mindspore/ops/bprop_mindir/IsNan_bprop.mindir +14 -11
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LessEqual_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Less_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/LinSpace_bprop.mindir +22 -19
- mindspore/ops/bprop_mindir/Load_bprop.mindir +12 -13
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/LogicalAnd_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/LogicalNot_bprop.mindir +14 -13
- mindspore/ops/bprop_mindir/MaskedSelect_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Maximum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Minimum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NonZero_bprop.mindir +14 -0
- mindspore/ops/bprop_mindir/NotEqual_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +25 -23
- mindspore/ops/bprop_mindir/OnesLike_bprop.mindir +13 -13
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
- mindspore/ops/bprop_mindir/Range_bprop.mindir +21 -19
- mindspore/ops/bprop_mindir/Rank_bprop.mindir +11 -11
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReduceAll_bprop.mindir +18 -17
- mindspore/ops/bprop_mindir/ReduceAny_bprop.mindir +18 -17
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +19 -23
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +60 -0
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +89 -0
- mindspore/ops/bprop_mindir/ReverseSequence_bprop.mindir +52 -0
- mindspore/ops/bprop_mindir/ReverseV2_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/Round_bprop.mindir +14 -13
- mindspore/ops/bprop_mindir/ScatterMax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterMin_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterNdUpdate_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/ScatterNd_bprop.mindir +24 -0
- mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/ScatterUpdate_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/Select_bprop.mindir +30 -34
- mindspore/ops/bprop_mindir/Shape_bprop.mindir +12 -12
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Sign_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/Slice_bprop.mindir +26 -0
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/Sort_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SpaceToBatchND_bprop.mindir +28 -0
- mindspore/ops/bprop_mindir/SpaceToDepth_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Split_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +54 -0
- mindspore/ops/bprop_mindir/StridedSliceGrad_bprop.mindir +95 -0
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +98 -0
- mindspore/ops/bprop_mindir/Switch_bprop.mindir +28 -32
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/TensorScatterAdd_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/TensorScatterUpdate_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/TensorShape_bprop.mindir +14 -0
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TransShape_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/TruncateDiv_bprop.mindir +18 -15
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +11 -13
- mindspore/ops/bprop_mindir/Unique_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Unstack_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
- mindspore/ops/bprop_mindir/ZerosLike_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/__init__.py +1 -4
- mindspore/ops/bprop_mindir/generate_mindir.py +32 -20
- mindspore/ops/composite/__init__.py +12 -13
- mindspore/ops/composite/base.py +261 -254
- mindspore/ops/composite/env_ops.py +41 -0
- mindspore/ops/composite/math_ops.py +197 -156
- mindspore/ops/composite/multitype_ops/_compile_utils.py +428 -176
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +188 -87
- mindspore/ops/composite/multitype_ops/add_impl.py +23 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +3 -3
- mindspore/ops/composite/multitype_ops/equal_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +52 -5
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +15 -3
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +33 -2
- mindspore/ops/composite/multitype_ops/less_impl.py +33 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +21 -7
- mindspore/ops/composite/multitype_ops/not_in_impl.py +15 -3
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
- mindspore/ops/composite/multitype_ops/pow_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +62 -70
- mindspore/ops/composite/multitype_ops/sub_impl.py +3 -3
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +41 -4
- mindspore/ops/function/__init__.py +323 -8
- mindspore/ops/function/array_func.py +3511 -780
- mindspore/ops/function/clip_func.py +329 -0
- mindspore/ops/function/debug_func.py +6 -6
- mindspore/ops/function/grad/__init__.py +5 -1
- mindspore/ops/function/grad/grad_func.py +736 -65
- mindspore/ops/function/image_func.py +270 -0
- mindspore/ops/function/linalg_func.py +268 -8
- mindspore/ops/function/math_func.py +8032 -3164
- mindspore/ops/function/nn_func.py +5619 -1855
- mindspore/ops/function/other_func.py +115 -0
- mindspore/ops/function/parameter_func.py +11 -10
- mindspore/ops/function/random_func.py +939 -77
- mindspore/ops/function/sparse_func.py +249 -84
- mindspore/ops/function/sparse_unary_func.py +2303 -0
- mindspore/ops/function/spectral_func.py +146 -0
- mindspore/ops/function/vmap_func.py +114 -0
- mindspore/ops/functional.py +182 -254
- mindspore/ops/op_info_register.py +79 -34
- mindspore/ops/operations/__init__.py +210 -118
- mindspore/ops/operations/_csr_ops.py +7 -7
- mindspore/ops/operations/_embedding_cache_ops.py +25 -15
- mindspore/ops/operations/_grad_ops.py +447 -322
- mindspore/ops/operations/_inner_ops.py +547 -176
- mindspore/ops/operations/_map_tensor_ops.py +112 -0
- mindspore/ops/operations/_ms_kernel.py +29 -27
- mindspore/ops/operations/_ocr_ops.py +11 -11
- mindspore/ops/operations/_opaque_predicate_registry.py +41 -0
- mindspore/ops/operations/_quant_ops.py +186 -101
- mindspore/ops/operations/_rl_inner_ops.py +122 -61
- mindspore/ops/operations/_scalar_ops.py +466 -0
- mindspore/ops/operations/_sequence_ops.py +1047 -0
- mindspore/ops/operations/_tensor_array.py +10 -11
- mindspore/ops/operations/_thor_ops.py +4 -4
- mindspore/ops/operations/array_ops.py +1428 -1226
- mindspore/ops/operations/comm_ops.py +180 -117
- mindspore/ops/operations/control_ops.py +4 -2
- mindspore/ops/operations/custom_ops.py +185 -98
- mindspore/ops/operations/debug_ops.py +92 -54
- mindspore/ops/operations/image_ops.py +406 -211
- mindspore/ops/operations/inner_ops.py +42 -53
- mindspore/ops/operations/linalg_ops.py +32 -29
- mindspore/ops/operations/math_ops.py +2076 -897
- mindspore/ops/operations/nn_ops.py +1282 -1252
- mindspore/ops/operations/other_ops.py +124 -278
- mindspore/ops/operations/random_ops.py +345 -178
- mindspore/ops/operations/rl_ops.py +8 -9
- mindspore/ops/operations/sparse_ops.py +502 -157
- mindspore/ops/operations/spectral_ops.py +107 -0
- mindspore/ops/primitive.py +192 -15
- mindspore/ops/vm_impl_registry.py +23 -2
- mindspore/parallel/__init__.py +6 -1
- mindspore/parallel/_auto_parallel_context.py +199 -92
- mindspore/parallel/_cell_wrapper.py +4 -2
- mindspore/parallel/_cost_model_context.py +3 -0
- mindspore/parallel/_dp_allreduce_fusion.py +2 -1
- mindspore/parallel/_offload_context.py +185 -0
- mindspore/parallel/_parallel_serialization.py +167 -28
- mindspore/parallel/_ps_context.py +9 -5
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +9 -1
- mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
- mindspore/{nn/transformer → parallel/_transformer}/layers.py +59 -37
- mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
- mindspore/{nn/transformer → parallel/_transformer}/moe.py +160 -35
- mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
- mindspore/{nn/transformer → parallel/_transformer}/transformer.py +235 -196
- mindspore/parallel/_utils.py +47 -7
- mindspore/parallel/algo_parameter_config.py +5 -1
- mindspore/parallel/checkpoint_transform.py +329 -0
- mindspore/parallel/shard.py +229 -0
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/common/util.py +4 -3
- mindspore/profiler/common/validator/validate_path.py +2 -2
- mindspore/profiler/envprofiling.py +249 -0
- mindspore/profiler/parser/aicpu_data_parser.py +38 -39
- mindspore/profiler/parser/ascend_timeline_generator.py +497 -0
- mindspore/profiler/parser/base_timeline_generator.py +471 -0
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +684 -0
- mindspore/profiler/parser/framework_parser.py +42 -16
- mindspore/profiler/parser/hccl_parser.py +158 -158
- mindspore/profiler/parser/hwts_log_parser.py +7 -6
- mindspore/profiler/parser/integrator.py +18 -1579
- mindspore/profiler/parser/minddata_analyzer.py +8 -8
- mindspore/profiler/parser/msadvisor_analyzer.py +14 -27
- mindspore/profiler/parser/msadvisor_parser.py +2 -4
- mindspore/profiler/parser/optime_parser.py +17 -18
- mindspore/profiler/parser/profiler_info.py +108 -0
- mindspore/profiler/parser/step_trace_parser.py +1 -1
- mindspore/profiler/profiling.py +396 -194
- mindspore/rewrite/__init__.py +6 -2
- mindspore/rewrite/api/node.py +51 -110
- mindspore/rewrite/api/node_type.py +10 -6
- mindspore/rewrite/api/pattern_engine.py +51 -7
- mindspore/rewrite/api/scoped_value.py +64 -53
- mindspore/rewrite/api/symbol_tree.py +108 -61
- mindspore/rewrite/api/tree_node_helper.py +2 -3
- mindspore/{compression/quant/__init__.py → rewrite/ast_creator_register.py} +20 -11
- mindspore/rewrite/ast_helpers/__init__.py +6 -3
- mindspore/rewrite/ast_helpers/ast_creator.py +115 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +99 -1
- mindspore/rewrite/ast_helpers/ast_modifier.py +17 -4
- mindspore/rewrite/ast_helpers/ast_replacer.py +1 -1
- mindspore/rewrite/ast_transformers/__init__.py +0 -1
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +46 -5
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +6 -3
- mindspore/rewrite/common/__init__.py +2 -0
- mindspore/rewrite/common/event.py +1 -1
- mindspore/rewrite/common/observable.py +1 -1
- mindspore/rewrite/common/observer.py +1 -1
- mindspore/rewrite/common/rewrite_elog.py +35 -0
- mindspore/rewrite/namer.py +2 -2
- mindspore/rewrite/namespace.py +14 -4
- mindspore/rewrite/node.py +161 -13
- mindspore/rewrite/parser.py +0 -1
- mindspore/rewrite/parser_register.py +0 -1
- mindspore/rewrite/parsers/arguments_parser.py +3 -2
- mindspore/rewrite/parsers/assign_parser.py +267 -67
- mindspore/rewrite/parsers/attribute_parser.py +56 -0
- mindspore/rewrite/parsers/class_def_parser.py +191 -108
- mindspore/rewrite/parsers/constant_parser.py +101 -0
- mindspore/rewrite/parsers/container_parser.py +88 -0
- mindspore/rewrite/parsers/for_parser.py +28 -15
- mindspore/rewrite/parsers/function_def_parser.py +21 -5
- mindspore/rewrite/parsers/if_parser.py +11 -28
- mindspore/rewrite/parsers/module_parser.py +9 -6
- mindspore/rewrite/parsers/return_parser.py +3 -2
- mindspore/rewrite/sparsify/__init__.py +0 -0
- mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
- mindspore/rewrite/sparsify/sparsify.py +109 -0
- mindspore/rewrite/sparsify/utils.py +173 -0
- mindspore/rewrite/symbol_tree.py +322 -109
- mindspore/rewrite/symbol_tree_builder.py +45 -8
- mindspore/rewrite/symbol_tree_dumper.py +0 -1
- mindspore/rewrite/topological_manager.py +1 -2
- mindspore/run_check/_check_version.py +209 -112
- mindspore/run_check/run_check.py +2 -1
- mindspore/scipy/linalg.py +13 -117
- mindspore/scipy/ops.py +5 -71
- mindspore/scipy/ops_grad.py +1 -25
- mindspore/scipy/ops_wrapper.py +1 -1
- mindspore/scipy/optimize/_bfgs.py +1 -1
- mindspore/scipy/optimize/_lagrange.py +200 -0
- mindspore/scipy/optimize/line_search.py +3 -2
- mindspore/scipy/optimize/minimize.py +43 -6
- mindspore/scipy/sparse/__init__.py +2 -2
- mindspore/scipy/sparse/linalg.py +5 -465
- mindspore/scipy/utils.py +2 -1
- mindspore/scipy/utils_const.py +7 -1
- mindspore/train/__init__.py +6 -4
- mindspore/train/_utils.py +28 -5
- mindspore/train/amp.py +321 -50
- mindspore/train/callback/__init__.py +3 -1
- mindspore/train/callback/_backup_and_restore.py +120 -0
- mindspore/train/callback/_callback.py +8 -8
- mindspore/train/callback/_checkpoint.py +12 -9
- mindspore/train/callback/_early_stop.py +13 -7
- mindspore/train/callback/_history.py +8 -8
- mindspore/train/callback/_lambda_callback.py +6 -6
- mindspore/train/callback/_landscape.py +36 -38
- mindspore/train/callback/_loss_monitor.py +12 -6
- mindspore/train/callback/_lr_scheduler_callback.py +2 -4
- mindspore/train/callback/_on_request_exit.py +212 -0
- mindspore/train/callback/_reduce_lr_on_plateau.py +13 -7
- mindspore/train/callback/_summary_collector.py +27 -19
- mindspore/train/callback/_time_monitor.py +13 -7
- mindspore/train/checkpoint_pb2.py +68 -8
- mindspore/train/data_sink.py +122 -33
- mindspore/train/dataset_helper.py +28 -87
- mindspore/train/loss_scale_manager.py +4 -7
- mindspore/{nn → train}/metrics/__init__.py +20 -20
- mindspore/{nn → train}/metrics/accuracy.py +12 -10
- mindspore/{nn → train}/metrics/auc.py +4 -4
- mindspore/{nn → train}/metrics/bleu_score.py +4 -4
- mindspore/{nn → train}/metrics/confusion_matrix.py +10 -8
- mindspore/{nn → train}/metrics/cosine_similarity.py +4 -4
- mindspore/{nn → train}/metrics/dice.py +6 -5
- mindspore/{nn → train}/metrics/error.py +7 -5
- mindspore/{nn → train}/metrics/fbeta.py +9 -7
- mindspore/{nn → train}/metrics/hausdorff_distance.py +8 -6
- mindspore/{nn → train}/metrics/loss.py +4 -3
- mindspore/{nn → train}/metrics/mean_surface_distance.py +6 -5
- mindspore/{nn → train}/metrics/metric.py +6 -5
- mindspore/{nn → train}/metrics/occlusion_sensitivity.py +4 -3
- mindspore/{nn → train}/metrics/perplexity.py +5 -4
- mindspore/{nn → train}/metrics/precision.py +5 -4
- mindspore/{nn → train}/metrics/recall.py +5 -4
- mindspore/{nn → train}/metrics/roc.py +7 -6
- mindspore/{nn → train}/metrics/root_mean_square_surface_distance.py +6 -5
- mindspore/{nn → train}/metrics/topk.py +7 -5
- mindspore/train/mind_ir_pb2.py +339 -32
- mindspore/train/model.py +113 -84
- mindspore/train/serialization.py +547 -167
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +43 -12
- mindspore/train/train_thor/convert_utils.py +7 -1
- mindspore/train/train_thor/dataset_helper.py +3 -3
- mindspore/train/train_thor/model_thor.py +0 -4
- mindspore/version.py +1 -1
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +4 -3
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +899 -675
- mindspore/compression/common/constant.py +0 -124
- mindspore/compression/export/__init__.py +0 -19
- mindspore/compression/export/quant_export.py +0 -514
- mindspore/compression/quant/qat.py +0 -636
- mindspore/compression/quant/quant_utils.py +0 -462
- mindspore/compression/quant/quantizer.py +0 -68
- mindspore/nn/layer/quant.py +0 -1868
- mindspore/nn/layer/rnn_utils.py +0 -90
- mindspore/nn/probability/dpn/__init__.py +0 -22
- mindspore/nn/probability/dpn/vae/__init__.py +0 -25
- mindspore/nn/probability/dpn/vae/cvae.py +0 -138
- mindspore/nn/probability/dpn/vae/vae.py +0 -122
- mindspore/nn/probability/infer/__init__.py +0 -22
- mindspore/nn/probability/infer/variational/elbo.py +0 -70
- mindspore/nn/probability/infer/variational/svi.py +0 -84
- mindspore/nn/probability/toolbox/__init__.py +0 -22
- mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
- mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -363
- mindspore/nn/probability/transforms/__init__.py +0 -22
- mindspore/nn/probability/transforms/transform_bnn.py +0 -262
- mindspore/nn/probability/zhusuan/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
- mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
- mindspore/ops/_op_impl/tbe/bias_add_grad_ds.py +0 -52
- mindspore/ops/_op_impl/tbe/scatter_nd_add_ds.py +0 -43
- mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/Identity_bprop.mindir +0 -9
- mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/stop_gradient_bprop.mindir +0 -12
- mindspore/ops/composite/array_ops.py +0 -210
- mindspore/ops/composite/clip_ops.py +0 -238
- mindspore/ops/composite/random_ops.py +0 -426
- mindspore/ops/composite/vmap_ops.py +0 -38
- mindspore/ops/operations/sponge_ops.py +0 -3531
- mindspore/ops/operations/sponge_update_ops.py +0 -2546
- mindspore/parallel/nn/__init__.py +0 -42
- mindspore/parallel/nn/loss.py +0 -22
- mindspore/parallel/nn/moe.py +0 -21
- mindspore/parallel/nn/op_parallel_config.py +0 -22
- mindspore/parallel/nn/transformer.py +0 -31
- mindspore/run_check/_check_deps_version.py +0 -84
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# This is the Python adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
|
|
2
2
|
#
|
|
3
|
-
# Copyright 2020-
|
|
3
|
+
# Copyright 2020-2023 Huawei Technologies Co., Ltd
|
|
4
4
|
#
|
|
5
5
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
6
6
|
# you may not use this file except in compliance with the License.
|
|
@@ -17,28 +17,36 @@
|
|
|
17
17
|
"""standard_method"""
|
|
18
18
|
|
|
19
19
|
from __future__ import absolute_import
|
|
20
|
-
from mindspore import Tensor, CSRTensor, COOTensor
|
|
20
|
+
from mindspore import Tensor, CSRTensor, COOTensor
|
|
21
21
|
from mindspore import dtype as mstype
|
|
22
22
|
from mindspore._c_expression import Tensor as Tensor_
|
|
23
|
-
from mindspore.
|
|
24
|
-
from mindspore.ops.composite.base import _append, _insert, _pop, _list_clear, _reverse, \
|
|
25
|
-
_count, _extend
|
|
23
|
+
from mindspore.common import mutable
|
|
26
24
|
import mindspore.common._monad as monad
|
|
25
|
+
from mindspore.common.sparse_tensor import RowTensorInner
|
|
26
|
+
from mindspore.ops.composite.base import _append, _insert, _pop, _list_clear, _reverse, \
|
|
27
|
+
_extend, _dict_clear, _haskey, _update, _fromkeys
|
|
27
28
|
|
|
28
|
-
from ...
|
|
29
|
+
from ... import _checkparam as validator
|
|
30
|
+
from ..._checkparam import check_is_number, check_reshape_shp, prepare_shape_for_squeeze, \
|
|
31
|
+
check_axis_in_range, check_axis_valid, check_and_canonicalize_axes
|
|
29
32
|
from ...ops import functional as F
|
|
30
33
|
from ...ops import operations as P
|
|
31
|
-
from ...ops
|
|
34
|
+
from ...ops import composite
|
|
35
|
+
from ...ops.composite import tail, MultitypeFuncGraph, env_get, hyper_add, \
|
|
32
36
|
zeros_like, ones_like, repeat_elements
|
|
33
37
|
from ...ops.composite.multitype_ops import _constexpr_utils as const_utils
|
|
34
38
|
from ...ops.composite.multitype_ops import _compile_utils as compile_utils
|
|
35
39
|
from ...ops.operations.math_ops import Median
|
|
36
|
-
from ...ops.operations._inner_ops import Format
|
|
40
|
+
from ...ops.operations._inner_ops import Format, issubclass_
|
|
37
41
|
from ...ops.operations import _csr_ops
|
|
38
|
-
from ...ops.
|
|
42
|
+
from ...ops.operations import _map_tensor_ops
|
|
43
|
+
from ...ops.primitive import constexpr, _primexpr
|
|
39
44
|
from ...common import dtype as mstype
|
|
45
|
+
from ...ops.operations._sequence_ops import ListAppend, ListInsert, SequenceMax, SequenceMin, \
|
|
46
|
+
SequenceIndex
|
|
40
47
|
|
|
41
|
-
__all__ = ['MultitypeFuncGraph', 'env_get',
|
|
48
|
+
__all__ = ['MultitypeFuncGraph', 'env_get',
|
|
49
|
+
'hyper_add', 'zeros_like', 'ones_like']
|
|
42
50
|
|
|
43
51
|
shape_ = P.Shape()
|
|
44
52
|
dtype_ = P.DType()
|
|
@@ -51,8 +59,6 @@ _reduce_sum_default = P.ReduceSum()
|
|
|
51
59
|
_reduce_sum_keepdims = P.ReduceSum(True)
|
|
52
60
|
_mean_keepdims = P.ReduceMean(True)
|
|
53
61
|
_csr_mm = _csr_ops.CSRMM()
|
|
54
|
-
_addcdiv = P.Addcdiv()
|
|
55
|
-
_addcmul = P.Addcmul()
|
|
56
62
|
|
|
57
63
|
itemsize_map = {mstype.bool_: 1, mstype.int8: 1, mstype.uint8: 1,
|
|
58
64
|
mstype.float16: 2, mstype.int16: 2, mstype.uint16: 2,
|
|
@@ -62,7 +68,7 @@ itemsize_map = {mstype.bool_: 1, mstype.int8: 1, mstype.uint8: 1,
|
|
|
62
68
|
nan_tensor = Tensor(float('nan'), dtype=mstype.float32)
|
|
63
69
|
|
|
64
70
|
|
|
65
|
-
def mean(x, axis=
|
|
71
|
+
def mean(x, axis=None, keep_dims=False):
|
|
66
72
|
"""
|
|
67
73
|
Reduces a dimension of a tensor by averaging all elements in the dimension.
|
|
68
74
|
|
|
@@ -91,14 +97,19 @@ def mean(x, axis=(), keep_dims=False):
|
|
|
91
97
|
return reduce_mean(x, axis)
|
|
92
98
|
|
|
93
99
|
|
|
94
|
-
def
|
|
100
|
+
def ndimension(x):
|
|
101
|
+
"""Return the number of tensor dimensions."""
|
|
102
|
+
return len(x.shape)
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def prod(input, axis=None, keep_dims=False):
|
|
95
106
|
"""
|
|
96
107
|
Reduces a dimension of a tensor by product all elements in the dimension.
|
|
97
108
|
|
|
98
109
|
Args:
|
|
99
|
-
|
|
110
|
+
input (Tensor): Input Tensor.
|
|
100
111
|
axis (Union[None, int, tuple(int), list(int)]): Dimensions of reduction,
|
|
101
|
-
when axis is None or empty tuple, reduce all dimensions. Default:
|
|
112
|
+
when axis is None or empty tuple, reduce all dimensions. Default: None.
|
|
102
113
|
keep_dims (bool): Whether to keep the reduced dimensions. Default: False.
|
|
103
114
|
|
|
104
115
|
Returns:
|
|
@@ -115,41 +126,41 @@ def prod(x, axis=(), keep_dims=False):
|
|
|
115
126
|
>>> print(output)
|
|
116
127
|
6.0
|
|
117
128
|
"""
|
|
118
|
-
return F.prod(
|
|
129
|
+
return F.prod(input, axis, keep_dims)
|
|
119
130
|
|
|
120
131
|
|
|
121
|
-
def addcdiv(
|
|
132
|
+
def addcdiv(input, tensor1, tensor2, value=1):
|
|
122
133
|
"""
|
|
123
|
-
Performs the element-wise division of tensor
|
|
134
|
+
Performs the element-wise division of tensor tensor1 by tensor tensor2,
|
|
124
135
|
multiply the result by the scalar value and add it to input_data.
|
|
125
136
|
|
|
126
137
|
Args:
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
value (Tensor): The multiplier for
|
|
138
|
+
input (Tensor): The tensor to be added.
|
|
139
|
+
tensor1 (Tensor): The numerator tensor.
|
|
140
|
+
tensor1 (Tensor): The denominator tensor.
|
|
141
|
+
value (Union[Tensor, Number]): The multiplier for tensor1/tensor2. Default: 1.
|
|
131
142
|
|
|
132
143
|
Returns:
|
|
133
|
-
Tensor, has the same shape and dtype as
|
|
144
|
+
Tensor, has the same shape and dtype as tensor1 / tensor2.
|
|
134
145
|
"""
|
|
135
|
-
return
|
|
146
|
+
return F.addcdiv(input, tensor1, tensor2, value)
|
|
136
147
|
|
|
137
148
|
|
|
138
|
-
def addcmul(
|
|
149
|
+
def addcmul(input, tensor1, tensor2, value=1):
|
|
139
150
|
"""
|
|
140
|
-
Performs the element-wise product of tensor
|
|
151
|
+
Performs the element-wise product of tensor tensor1 and tensor tensor2,
|
|
141
152
|
multiply the result by the scalar value and add it to input_data.
|
|
142
153
|
|
|
143
154
|
Args:
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
value (Tensor): The multiplier for
|
|
155
|
+
input (Tensor): The tensor to be added.
|
|
156
|
+
tensor1 (Tensor): The tensor to be multiplied.
|
|
157
|
+
tensor2 (Tensor): The tensor to be multiplied.
|
|
158
|
+
value (Union[Tensor, Number]): The multiplier for tensor1*tensor2. Default: 1.
|
|
148
159
|
|
|
149
160
|
Returns:
|
|
150
|
-
Tensor, has the same shape and dtype as
|
|
161
|
+
Tensor, has the same shape and dtype as tensor1 * tensor2.
|
|
151
162
|
"""
|
|
152
|
-
return
|
|
163
|
+
return F.addcmul(input, tensor1, tensor2, value)
|
|
153
164
|
|
|
154
165
|
|
|
155
166
|
def all_(x, axis=(), keep_dims=False):
|
|
@@ -171,6 +182,13 @@ def all_(x, axis=(), keep_dims=False):
|
|
|
171
182
|
return reduce_all(x, axis)
|
|
172
183
|
|
|
173
184
|
|
|
185
|
+
def angle(x):
|
|
186
|
+
r"""
|
|
187
|
+
For details, please refer to :func:`mindspore.ops.angle`.
|
|
188
|
+
"""
|
|
189
|
+
return F.angle(x)
|
|
190
|
+
|
|
191
|
+
|
|
174
192
|
def any_(x, axis=(), keep_dims=False):
|
|
175
193
|
"""
|
|
176
194
|
Check any array element along a given axis evaluate to True.
|
|
@@ -189,12 +207,41 @@ def any_(x, axis=(), keep_dims=False):
|
|
|
189
207
|
return reduce_any(x, axis)
|
|
190
208
|
|
|
191
209
|
|
|
192
|
-
def atan2(
|
|
210
|
+
def atan2(input, other):
|
|
193
211
|
r"""
|
|
194
212
|
Computes the first input tensor multiplied by the logarithm of second input tensor element-wise.
|
|
195
213
|
Refer to :func:`mindspore.ops.atan2` for more details.
|
|
196
214
|
"""
|
|
197
|
-
return F.atan2(
|
|
215
|
+
return F.atan2(input, other)
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def bincount(x, weights=None, minlength=0):
|
|
219
|
+
r"""
|
|
220
|
+
For details, please refer to :func:`mindspore.ops.bincount`.
|
|
221
|
+
"""
|
|
222
|
+
return F.bincount(x, weights, minlength)
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def H(x):
|
|
226
|
+
"""Returns a view of a matrix (2-D tensor) conjugated and transposed."""
|
|
227
|
+
output = x.swapaxes(0, 1)
|
|
228
|
+
if x.dtype in (mstype.complex64, mstype.complex128):
|
|
229
|
+
return output.conj()
|
|
230
|
+
return output
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
def histc(x, bins=100, min=0., max=0.):
|
|
234
|
+
"""
|
|
235
|
+
For details, please refer to :func:`mindspore.ops.histc`.
|
|
236
|
+
"""
|
|
237
|
+
return F.histc(x, bins, min, max)
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
def geqrf(x):
|
|
241
|
+
"""
|
|
242
|
+
For details, please refer to :func:`mindspore.ops.geqrf`.
|
|
243
|
+
"""
|
|
244
|
+
return F.geqrf(x)
|
|
198
245
|
|
|
199
246
|
|
|
200
247
|
def size_(x):
|
|
@@ -210,8 +257,6 @@ def size_(x):
|
|
|
210
257
|
Returns:
|
|
211
258
|
size(int).
|
|
212
259
|
"""
|
|
213
|
-
if not shape_(x):
|
|
214
|
-
return size_op_(x) + 1
|
|
215
260
|
return size_op_(x)
|
|
216
261
|
|
|
217
262
|
|
|
@@ -262,6 +307,27 @@ def strides_(x):
|
|
|
262
307
|
return strides
|
|
263
308
|
|
|
264
309
|
|
|
310
|
+
def slogdet(x):
|
|
311
|
+
r"""
|
|
312
|
+
For details, please refer to :func:`mindspore.ops.slogdet`.
|
|
313
|
+
"""
|
|
314
|
+
return F.slogdet(x)
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
def chunk(x, chunks, axis=0):
|
|
318
|
+
r"""
|
|
319
|
+
For details, please refer to :func:`mindspore.ops.chunk`.
|
|
320
|
+
"""
|
|
321
|
+
return F.chunk(x, chunks, axis)
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
def tril(x, diagonal=0):
|
|
325
|
+
r"""
|
|
326
|
+
For details, please refer to :func:`mindspore.ops.tril`.
|
|
327
|
+
"""
|
|
328
|
+
return F.tril(x, diagonal)
|
|
329
|
+
|
|
330
|
+
|
|
265
331
|
def hasattr(x, attr): # pylint: disable=redefined-builtin
|
|
266
332
|
"""
|
|
267
333
|
Return whether an object has the attribute.
|
|
@@ -321,6 +387,89 @@ def minimum(x, y):
|
|
|
321
387
|
return F.minimum(x, y)
|
|
322
388
|
|
|
323
389
|
|
|
390
|
+
def multinomial(input, num_samples, replacement=True, seed=None):
|
|
391
|
+
r"""
|
|
392
|
+
Returns a tensor sampled from the multinomial probability distribution located in the corresponding
|
|
393
|
+
row of the input tensor.
|
|
394
|
+
|
|
395
|
+
Refer to :func:`mindspore.ops.multinomial` for more detail.
|
|
396
|
+
"""
|
|
397
|
+
return F.multinomial(input, num_samples, replacement, seed)
|
|
398
|
+
|
|
399
|
+
|
|
400
|
+
def tile(x, multiples):
|
|
401
|
+
r"""
|
|
402
|
+
Replicates an input tensor with given multiples times.
|
|
403
|
+
|
|
404
|
+
Creates a new tensor by replicating `input_x` `multiples` times. The i'th dimension of
|
|
405
|
+
output tensor has `input_x.shape[i] * multiples[i]` elements, and the values of `input_x`
|
|
406
|
+
are replicated `multiples[i]` times along the i'th dimension.
|
|
407
|
+
|
|
408
|
+
Note:
|
|
409
|
+
The length of `multiples` must be greater or equal to the length of dimension in `input_x`.
|
|
410
|
+
|
|
411
|
+
Args:
|
|
412
|
+
multiples (tuple[int]): The parameter that specifies the number of replications,
|
|
413
|
+
the parameter type is tuple, and the data type is int, i.e., :math:`(y_1, y_2, ..., y_S)`.
|
|
414
|
+
The length of `multiples` cannot be smaller than the length of the shape of `input_x`.
|
|
415
|
+
Only constant value is allowed.
|
|
416
|
+
|
|
417
|
+
Returns:
|
|
418
|
+
Tensor, has the same data type as the `input_x`. Suppose the length of `multiples` is `d`,
|
|
419
|
+
the dimension of `input_x` is `input_x.dim`, and the shape of `input_x` is :math:`(x_1, x_2, ..., x_S)`.
|
|
420
|
+
|
|
421
|
+
- If `input_x.dim = d`, then the shape of their corresponding positions can be multiplied, and
|
|
422
|
+
the shape of Outputs is :math:`(x_1*y_1, x_2*y_2, ..., x_S*y_R)`.
|
|
423
|
+
- If `input_x.dim < d`, fill in multiple 1 in the length of the shape of `input_x` until their
|
|
424
|
+
lengths are consistent. Such as set the shape of `input_x` as :math:`(1, ..., x_1, x_2, ..., x_S)`,
|
|
425
|
+
then the shape of their corresponding positions can be multiplied, and the shape of Outputs is
|
|
426
|
+
:math:`(1*y_1, ..., x_S*y_R)`.
|
|
427
|
+
|
|
428
|
+
Raises:
|
|
429
|
+
TypeError: If `multiples` is not a tuple or its elements are not all int.
|
|
430
|
+
ValueError: If the elements of `multiples` are not all greater than 0.
|
|
431
|
+
ValueError: If the length of `multiples` are smaller than the length of dimension in `input_x`.
|
|
432
|
+
|
|
433
|
+
Supported Platforms:
|
|
434
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
435
|
+
|
|
436
|
+
Examples:
|
|
437
|
+
>>> import mindspore as ms
|
|
438
|
+
>>> from mindspore import Tensor
|
|
439
|
+
>>> input_x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32)
|
|
440
|
+
>>> multiples = (2, 3)
|
|
441
|
+
>>> output = input_x.tile(multiples)
|
|
442
|
+
>>> print(output)
|
|
443
|
+
[[1. 2. 1. 2. 1. 2.]
|
|
444
|
+
[3. 4. 3. 4. 3. 4.]
|
|
445
|
+
[1. 2. 1. 2. 1. 2.]
|
|
446
|
+
[3. 4. 3. 4. 3. 4.]]
|
|
447
|
+
>>> multiples = (2, 3, 2)
|
|
448
|
+
>>> output = input_x.tile(multiples)
|
|
449
|
+
>>> print(output)
|
|
450
|
+
[[[1. 2. 1. 2.]
|
|
451
|
+
[3. 4. 3. 4.]
|
|
452
|
+
[1. 2. 1. 2.]
|
|
453
|
+
[3. 4. 3. 4.]
|
|
454
|
+
[1. 2. 1. 2.]
|
|
455
|
+
[3. 4. 3. 4.]]
|
|
456
|
+
[[1. 2. 1. 2.]
|
|
457
|
+
[3. 4. 3. 4.]
|
|
458
|
+
[1. 2. 1. 2.]
|
|
459
|
+
[3. 4. 3. 4.]
|
|
460
|
+
[1. 2. 1. 2.]
|
|
461
|
+
[3. 4. 3. 4.]]]
|
|
462
|
+
"""
|
|
463
|
+
return F.tile(x, multiples)
|
|
464
|
+
|
|
465
|
+
|
|
466
|
+
def short(x):
|
|
467
|
+
"""
|
|
468
|
+
Return a copy of the tensor, cast to int16 type, equivalent to self.astype(ms.int16).
|
|
469
|
+
"""
|
|
470
|
+
return F.cast(x, mstype.int16)
|
|
471
|
+
|
|
472
|
+
|
|
324
473
|
def transpose(x, *axis):
|
|
325
474
|
r"""
|
|
326
475
|
Return a view of the tensor with axes transposed.
|
|
@@ -395,10 +544,54 @@ def reshape(x, *shape):
|
|
|
395
544
|
[ 3.6 0.4]
|
|
396
545
|
[ 0.5 -3.2]]
|
|
397
546
|
"""
|
|
398
|
-
new_shape =
|
|
547
|
+
new_shape = check_reshape_shp(shape)
|
|
399
548
|
return F.reshape(x, new_shape)
|
|
400
549
|
|
|
401
550
|
|
|
551
|
+
def reshape_as(x, other):
|
|
552
|
+
"""
|
|
553
|
+
Rearranges the input Tensor based on the `other` shape.
|
|
554
|
+
"""
|
|
555
|
+
return F.reshape(x, other.shape)
|
|
556
|
+
|
|
557
|
+
|
|
558
|
+
def reverse(x, axis):
|
|
559
|
+
"""
|
|
560
|
+
Reverses specific dimensions of a tensor.
|
|
561
|
+
|
|
562
|
+
.. warning::
|
|
563
|
+
The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "input_x".
|
|
564
|
+
|
|
565
|
+
Args:
|
|
566
|
+
- **x** (Tensor) - The target tensor. The data type is Number except float64.
|
|
567
|
+
The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
568
|
+
- **axis** (Union[tuple(int), list(int)]): The indices of the dimensions to reverse.
|
|
569
|
+
|
|
570
|
+
Outputs:
|
|
571
|
+
Tensor, has the same shape and type as `x`.
|
|
572
|
+
|
|
573
|
+
Raises:
|
|
574
|
+
TypeError: If `axis` is neither list nor tuple.
|
|
575
|
+
TypeError: If element of `axis` is not an int.
|
|
576
|
+
|
|
577
|
+
Supported Platforms:
|
|
578
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
579
|
+
|
|
580
|
+
Examples:
|
|
581
|
+
>>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
|
|
582
|
+
>>> output = ops.reverse(input_x, axis=[1])
|
|
583
|
+
>>> print(output)
|
|
584
|
+
[[4 3 2 1]
|
|
585
|
+
[8 7 6 5]]
|
|
586
|
+
>>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
|
|
587
|
+
>>> output = ops.reverse(input_x, axis=[1, 0])
|
|
588
|
+
>>> print(output)
|
|
589
|
+
[[8 7 6 5]
|
|
590
|
+
[4 3 2 1]]
|
|
591
|
+
"""
|
|
592
|
+
return F.reverse(x, axis)
|
|
593
|
+
|
|
594
|
+
|
|
402
595
|
def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
|
|
403
596
|
"""
|
|
404
597
|
Reverses variable length slices.
|
|
@@ -479,17 +672,21 @@ def ravel(x):
|
|
|
479
672
|
return reshape(x, (-1,))
|
|
480
673
|
|
|
481
674
|
|
|
482
|
-
def flatten(x, order='C'):
|
|
675
|
+
def flatten(x, order='C', *, start_dim=0, end_dim=-1):
|
|
483
676
|
r"""
|
|
484
|
-
|
|
677
|
+
Flatten a tensor along dimensions from `start_dim` to `start_dim`.
|
|
485
678
|
|
|
486
679
|
Args:
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
(Fortran-style) order.
|
|
680
|
+
x (Tensor): Input tensor.
|
|
681
|
+
order (str, optional): Only 'C' and 'F' are supported. 'C' means to flatten in row-major (C-style) order.
|
|
682
|
+
'F' means to flatten in column-major (Fortran-style) order. Default: 'C'.
|
|
683
|
+
|
|
684
|
+
Keyword Args:
|
|
685
|
+
start_dim (int, optional): The first dimension to flatten. Default: 0.
|
|
686
|
+
end_dim (int, optional): The last dimension to flatten. Default: -1.
|
|
490
687
|
|
|
491
688
|
Returns:
|
|
492
|
-
Tensor,
|
|
689
|
+
Tensor. If `x` is a 0-dimensional, a 1-dimensional Tensor will be returned.
|
|
493
690
|
|
|
494
691
|
Supported Platforms:
|
|
495
692
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -497,6 +694,9 @@ def flatten(x, order='C'):
|
|
|
497
694
|
Raises:
|
|
498
695
|
TypeError: If `order` is not string type.
|
|
499
696
|
ValueError: If `order` is string type, but not 'C' or 'F'.
|
|
697
|
+
TypeError: If `start_dim` or `end_dim` is not int.
|
|
698
|
+
ValueError: If `start_dim` is greater than `end_dim` after canonicalized.
|
|
699
|
+
ValueError: If `start_dim` or `end_dim` is not in range of [-x.dim, x.dim-1].
|
|
500
700
|
|
|
501
701
|
Examples:
|
|
502
702
|
>>> import numpy as np
|
|
@@ -506,58 +706,28 @@ def flatten(x, order='C'):
|
|
|
506
706
|
>>> print(output.shape)
|
|
507
707
|
(24,)
|
|
508
708
|
"""
|
|
509
|
-
order =
|
|
510
|
-
if order == 'C':
|
|
511
|
-
return F.reshape(x, (-1,))
|
|
512
|
-
|
|
513
|
-
perm = F.make_range(0, F.rank(x))
|
|
514
|
-
new_order = F.tuple_reversed(perm)
|
|
515
|
-
return F.reshape(F.transpose(x, new_order), (-1,))
|
|
709
|
+
return F.flatten(x, order, start_dim=start_dim, end_dim=end_dim)
|
|
516
710
|
|
|
517
711
|
|
|
518
|
-
def
|
|
712
|
+
def scatter(self, axis, index, src):
|
|
519
713
|
"""
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
axis1 (int): First axis.
|
|
524
|
-
axis2 (int): Second axis.
|
|
525
|
-
|
|
526
|
-
Returns:
|
|
527
|
-
Transposed tensor, has the same data type as the input.
|
|
528
|
-
|
|
529
|
-
Raises:
|
|
530
|
-
TypeError: If `axis1` or `axis2` is not integer.
|
|
531
|
-
ValueError: If `axis1` or `axis2` is not in the range of :math:`[-ndim, ndim-1]`.
|
|
714
|
+
Update the value in `src` to tensor according to the specified index.
|
|
715
|
+
"""
|
|
716
|
+
return F.scatter(self, axis, index, src)
|
|
532
717
|
|
|
533
|
-
Supported Platforms:
|
|
534
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
535
718
|
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
>>> x = Tensor(np.ones((2,3,4), dtype=np.float32))
|
|
540
|
-
>>> output = x.swapaxes(0, 2)
|
|
541
|
-
>>> print(output.shape)
|
|
542
|
-
(4,3,2)
|
|
719
|
+
def swapaxes(input, axis0, axis1):
|
|
720
|
+
"""
|
|
721
|
+
Interchange two axes of a tensor.
|
|
543
722
|
"""
|
|
544
|
-
|
|
723
|
+
return F.swapaxes(input, axis0, axis1)
|
|
545
724
|
|
|
546
|
-
if axis1 == axis2:
|
|
547
|
-
return x
|
|
548
|
-
if axis1 > axis2:
|
|
549
|
-
axis1, axis2 = axis2, axis1
|
|
550
|
-
|
|
551
|
-
perm = F.make_range(0, x.ndim)
|
|
552
|
-
new_perm = None
|
|
553
|
-
if axis2 + 1 < x.ndim:
|
|
554
|
-
new_perm = perm[0:axis1] + perm[axis2:axis2 + 1] + \
|
|
555
|
-
perm[axis1 + 1:axis2] + perm[axis1:axis1 + 1] + perm[axis2 + 1:]
|
|
556
|
-
else:
|
|
557
|
-
new_perm = perm[0:axis1] + perm[axis2:axis2 + 1] + \
|
|
558
|
-
perm[axis1 + 1:axis2] + perm[axis1:axis1 + 1]
|
|
559
725
|
|
|
560
|
-
|
|
726
|
+
def swapdims(x, dim0, dim1):
|
|
727
|
+
"""
|
|
728
|
+
Interchange two dims of a tensor.
|
|
729
|
+
"""
|
|
730
|
+
return F.swapdims(x, dim0, dim1)
|
|
561
731
|
|
|
562
732
|
|
|
563
733
|
def squeeze(x, axis=None):
|
|
@@ -589,25 +759,31 @@ def squeeze(x, axis=None):
|
|
|
589
759
|
if axis is None:
|
|
590
760
|
return F.squeeze(x)
|
|
591
761
|
# yield squeezed shape based on the axes
|
|
592
|
-
new_shape =
|
|
762
|
+
new_shape = prepare_shape_for_squeeze(shape, axis)
|
|
593
763
|
return F.reshape(x, new_shape)
|
|
594
764
|
|
|
595
765
|
|
|
596
|
-
def
|
|
766
|
+
def unbind(input, dim=0):
|
|
767
|
+
"""For details, please refer to :func:`mindspore.ops.unbind`."""
|
|
768
|
+
return P.Unstack(axis=dim)(input)
|
|
769
|
+
|
|
770
|
+
|
|
771
|
+
def argmax(x, axis=None, keepdims=False):
|
|
597
772
|
"""
|
|
598
|
-
Returns the indices of the maximum values
|
|
773
|
+
Returns the indices of the maximum values of a tensor across a dimension.
|
|
599
774
|
|
|
600
775
|
Args:
|
|
601
|
-
axis (int, optional):
|
|
602
|
-
|
|
603
|
-
|
|
776
|
+
axis (Union[int, None], optional): The dimension to reduce.
|
|
777
|
+
If `axis` is None, the indices of the maximum value within the
|
|
778
|
+
flattened input will be returned. Default: None.
|
|
779
|
+
keepdims (bool, optional): Whether the output tensor retains the
|
|
780
|
+
specified dimension. Ignored if `axis` is None. Default: False.
|
|
604
781
|
|
|
605
782
|
Returns:
|
|
606
|
-
Tensor,
|
|
607
|
-
shape as a.shape with the dimension along axis removed.
|
|
783
|
+
Tensor, indices of the maximum values across a dimension.
|
|
608
784
|
|
|
609
785
|
Raises:
|
|
610
|
-
ValueError: if axis is out of range.
|
|
786
|
+
ValueError: if `axis` is out of range.
|
|
611
787
|
|
|
612
788
|
Supported Platforms:
|
|
613
789
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -619,17 +795,18 @@ def argmax(x, axis=None):
|
|
|
619
795
|
>>> print(a.argmax())
|
|
620
796
|
5
|
|
621
797
|
"""
|
|
622
|
-
|
|
623
|
-
x = x.astype(mstype.float32)
|
|
798
|
+
is_axis_none = False
|
|
624
799
|
if axis is None:
|
|
625
800
|
x = ravel(x)
|
|
626
801
|
axis = 0
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
802
|
+
is_axis_none = True
|
|
803
|
+
out = P.Argmax(axis, mstype.int64)(x)
|
|
804
|
+
if keepdims and not is_axis_none:
|
|
805
|
+
out = expand_dims(out, axis)
|
|
806
|
+
return out
|
|
630
807
|
|
|
631
808
|
|
|
632
|
-
def argmin(x, axis=None):
|
|
809
|
+
def argmin(x, axis=None, keepdims=False):
|
|
633
810
|
"""
|
|
634
811
|
Returns the indices of the minimum values along an axis.
|
|
635
812
|
|
|
@@ -638,6 +815,8 @@ def argmin(x, axis=None):
|
|
|
638
815
|
axis (int, optional): By default, the index is into
|
|
639
816
|
the flattened array, otherwise along the specified axis.
|
|
640
817
|
Defaults to None.
|
|
818
|
+
keepdims (boolean, optional): Whether the output tensor retains the specified
|
|
819
|
+
dimension. Ignored if `axis` is None. Default: False.
|
|
641
820
|
|
|
642
821
|
Returns:
|
|
643
822
|
Tensor, array of indices into the array. It has the same
|
|
@@ -656,15 +835,19 @@ def argmin(x, axis=None):
|
|
|
656
835
|
>>> print(a.argmin())
|
|
657
836
|
0
|
|
658
837
|
"""
|
|
659
|
-
# P.
|
|
838
|
+
# P.Argmin only supports float
|
|
660
839
|
x = x.astype(mstype.float32)
|
|
840
|
+
is_axis_none = False
|
|
661
841
|
if axis is None:
|
|
662
842
|
x = ravel(x)
|
|
663
843
|
axis = 0
|
|
844
|
+
is_axis_none = True
|
|
664
845
|
else:
|
|
665
|
-
axis =
|
|
666
|
-
|
|
667
|
-
|
|
846
|
+
axis = check_axis_in_range(axis, F.rank(x))
|
|
847
|
+
out = P.Argmin(axis)(x)
|
|
848
|
+
if keepdims and not is_axis_none:
|
|
849
|
+
out = expand_dims(out, axis)
|
|
850
|
+
return out
|
|
668
851
|
|
|
669
852
|
|
|
670
853
|
def argmax_with_value(x, axis=0, keep_dims=False):
|
|
@@ -677,7 +860,7 @@ def argmin_with_value(x, axis=0, keep_dims=False):
|
|
|
677
860
|
return F.min(x, axis, keep_dims)
|
|
678
861
|
|
|
679
862
|
|
|
680
|
-
def median(
|
|
863
|
+
def median(input, global_median, axis=0, keep_dims=False):
|
|
681
864
|
r"""
|
|
682
865
|
Computes the median of input tensor.
|
|
683
866
|
|
|
@@ -685,9 +868,38 @@ def median(x, global_median, axis=0, keep_dims=False):
|
|
|
685
868
|
When attr `global_median` is True, the second output Tensor value is meaningless.
|
|
686
869
|
|
|
687
870
|
"""
|
|
688
|
-
|
|
871
|
+
check_axis_in_range(axis, input.ndim)
|
|
689
872
|
median_ = Median(global_median, axis, keep_dims)
|
|
690
|
-
return median_(
|
|
873
|
+
return median_(input)
|
|
874
|
+
|
|
875
|
+
|
|
876
|
+
def msort(x):
|
|
877
|
+
"""
|
|
878
|
+
For details, please refer to :func:`mindspore.ops.msort`.
|
|
879
|
+
"""
|
|
880
|
+
return F.msort(x)
|
|
881
|
+
|
|
882
|
+
|
|
883
|
+
def mm(mat1, mat2):
|
|
884
|
+
"""
|
|
885
|
+
For details, please refer to :func:`mindspore.ops.mm`.
|
|
886
|
+
"""
|
|
887
|
+
return F.mm(mat1, mat2)
|
|
888
|
+
|
|
889
|
+
|
|
890
|
+
def mT(x):
|
|
891
|
+
"""
|
|
892
|
+
Returns a view of this tensor with the last two dimensions transposed.
|
|
893
|
+
x.mT is equivalent to x.transpose(-2, -1).
|
|
894
|
+
"""
|
|
895
|
+
return swapaxes(x, -2, -1)
|
|
896
|
+
|
|
897
|
+
|
|
898
|
+
def nan_to_num(x, nan=0.0, posinf=None, neginf=None):
|
|
899
|
+
"""
|
|
900
|
+
For details, please refer to :func:`mindspore.ops.nan_to_num`.
|
|
901
|
+
"""
|
|
902
|
+
return F.nan_to_num(x, nan, posinf, neginf)
|
|
691
903
|
|
|
692
904
|
|
|
693
905
|
def cumsum(x, axis=None, dtype=None):
|
|
@@ -730,7 +942,7 @@ def cumsum(x, axis=None, dtype=None):
|
|
|
730
942
|
if axis is None:
|
|
731
943
|
x = x.ravel()
|
|
732
944
|
axis = 0
|
|
733
|
-
|
|
945
|
+
check_axis_in_range(axis, x.ndim)
|
|
734
946
|
if dtype is not None:
|
|
735
947
|
dtype = check_astype_dtype_const(dtype)
|
|
736
948
|
if original_dtype != dtype:
|
|
@@ -752,12 +964,19 @@ def cummax(x, axis):
|
|
|
752
964
|
return F.cummax(x, axis)
|
|
753
965
|
|
|
754
966
|
|
|
755
|
-
def index_fill(x,
|
|
967
|
+
def index_fill(x, axis, index, value):
|
|
756
968
|
"""
|
|
757
|
-
Fills the elements under the
|
|
969
|
+
Fills the elements under the axis dimension of the input Tensor with the input value
|
|
758
970
|
by selecting the indices in the order given in index.
|
|
759
971
|
"""
|
|
760
|
-
return F.index_fill(x,
|
|
972
|
+
return F.index_fill(x, axis, index, value)
|
|
973
|
+
|
|
974
|
+
|
|
975
|
+
def index_select(x, axis, index):
|
|
976
|
+
"""
|
|
977
|
+
Returns a new tensor which indexes the `x` tensor along dimension `axis` using the entries in `index` .
|
|
978
|
+
"""
|
|
979
|
+
return F.index_select(x, axis, index)
|
|
761
980
|
|
|
762
981
|
|
|
763
982
|
def copy(x):
|
|
@@ -798,7 +1017,8 @@ def copy(x):
|
|
|
798
1017
|
return x
|
|
799
1018
|
|
|
800
1019
|
|
|
801
|
-
def max(
|
|
1020
|
+
def max(input, axis=None, keepdims=False, *, initial=None, # pylint: disable=redefined-builtin
|
|
1021
|
+
where=True, return_indices=False): # pylint: disable=redefined-outer-name
|
|
802
1022
|
"""
|
|
803
1023
|
Returns the maximum of a tensor or maximum along an axis.
|
|
804
1024
|
|
|
@@ -808,17 +1028,21 @@ def max(x, axis=None, keepdims=False, initial=None, where=True): # pylint: disa
|
|
|
808
1028
|
axes along which to operate. By default, flattened input is used. If
|
|
809
1029
|
this is a tuple of ints, the maximum is selected over multiple axes,
|
|
810
1030
|
instead of a single axis or all the axes as before.
|
|
811
|
-
keepdims (
|
|
1031
|
+
keepdims (bool, optional): defaults to False.
|
|
812
1032
|
If this is set to True, the axes which are reduced are left in the
|
|
813
1033
|
result as dimensions with size one. With this option, the result will
|
|
814
1034
|
broadcast correctly against the input array.
|
|
1035
|
+
|
|
1036
|
+
Keyword Args:
|
|
815
1037
|
initial (scalar, optional):
|
|
816
1038
|
The minimum value of an output element. Must be present to allow
|
|
817
1039
|
computation on empty slice.
|
|
818
|
-
where (
|
|
1040
|
+
where (bool Tensor, optional): defaults to True.
|
|
819
1041
|
A boolean array which is broadcasted to match the dimensions of array,
|
|
820
1042
|
and selects elements to include in the reduction. If non-default value
|
|
821
1043
|
is passed, initial must also be provided.
|
|
1044
|
+
return_indices (bool, optional): Whether to return the index of the minimum value. Default: False.
|
|
1045
|
+
If `axis` is a list or tuple of ints, it must be False.
|
|
822
1046
|
|
|
823
1047
|
Returns:
|
|
824
1048
|
Tensor or scalar, maximum of input tensor. If `axis` is None, the result is a scalar
|
|
@@ -828,7 +1052,7 @@ def max(x, axis=None, keepdims=False, initial=None, where=True): # pylint: disa
|
|
|
828
1052
|
TypeError: if the input is not a tensor.
|
|
829
1053
|
|
|
830
1054
|
Supported Platforms:
|
|
831
|
-
|
|
1055
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
832
1056
|
|
|
833
1057
|
Examples:
|
|
834
1058
|
>>> import numpy as np
|
|
@@ -839,11 +1063,17 @@ def max(x, axis=None, keepdims=False, initial=None, where=True): # pylint: disa
|
|
|
839
1063
|
>>> print(output)
|
|
840
1064
|
3.0
|
|
841
1065
|
"""
|
|
842
|
-
|
|
843
|
-
|
|
1066
|
+
if isinstance(axis, (list, tuple)):
|
|
1067
|
+
return compile_utils.reduce_(input, P.ReduceMax(keepdims), cmp_fn=F.maximum,
|
|
1068
|
+
axis=axis, keepdims=keepdims, initial=initial, where=where)
|
|
1069
|
+
values, indices = F.max(input, axis, keepdims, initial=initial, where=where)
|
|
1070
|
+
if not return_indices:
|
|
1071
|
+
return values
|
|
1072
|
+
return values, indices
|
|
844
1073
|
|
|
845
1074
|
|
|
846
|
-
def min(
|
|
1075
|
+
def min(input, axis=None, keepdims=False, *, initial=None, # pylint: disable=redefined-builtin
|
|
1076
|
+
where=True, return_indices=False): # pylint: disable=redefined-outer-name
|
|
847
1077
|
"""
|
|
848
1078
|
Returns the minimum of a tensor or minimum along an axis.
|
|
849
1079
|
|
|
@@ -853,17 +1083,21 @@ def min(x, axis=None, keepdims=False, initial=None, where=True): # pylint: disa
|
|
|
853
1083
|
axes along which to operate. By default, flattened input is used. If
|
|
854
1084
|
this is a tuple of ints, the minimum is selected over multiple axes,
|
|
855
1085
|
instead of a single axis or all the axes as before.
|
|
856
|
-
keepdims (
|
|
1086
|
+
keepdims (bool, optional): defaults to False.
|
|
857
1087
|
If this is set to True, the axes which are reduced are left in the
|
|
858
1088
|
result as dimensions with size one. With this option, the result will
|
|
859
1089
|
broadcast correctly against the input array.
|
|
1090
|
+
|
|
1091
|
+
Keyword Args:
|
|
860
1092
|
initial (scalar, optional):
|
|
861
1093
|
The maximum value of an output element. Must be present to allow
|
|
862
1094
|
computation on empty slice.
|
|
863
|
-
where (
|
|
1095
|
+
where (bool Tensor, optional): defaults to True.
|
|
864
1096
|
A boolean array which is broadcasted to match the dimensions of array,
|
|
865
1097
|
and selects elements to include in the reduction. If non-default value
|
|
866
1098
|
is passed, initial must also be provided.
|
|
1099
|
+
return_indices (bool, optional): Whether to return the index of the minimum value. Default: False.
|
|
1100
|
+
If `axis` is a list or tuple of ints, it must be False.
|
|
867
1101
|
|
|
868
1102
|
Returns:
|
|
869
1103
|
Tensor or scalar, minimum of `a`. If axis is None, the result is a scalar
|
|
@@ -884,8 +1118,13 @@ def min(x, axis=None, keepdims=False, initial=None, where=True): # pylint: disa
|
|
|
884
1118
|
>>> print(output)
|
|
885
1119
|
0.0
|
|
886
1120
|
"""
|
|
887
|
-
|
|
888
|
-
|
|
1121
|
+
if isinstance(axis, (list, tuple)):
|
|
1122
|
+
return compile_utils.reduce_(input, P.ReduceMin(keepdims), cmp_fn=F.minimum,
|
|
1123
|
+
axis=axis, keepdims=keepdims, initial=initial, where=where)
|
|
1124
|
+
values, indices = F.min(input, axis, keepdims, initial=initial, where=where)
|
|
1125
|
+
if not return_indices:
|
|
1126
|
+
return values
|
|
1127
|
+
return values, indices
|
|
889
1128
|
|
|
890
1129
|
|
|
891
1130
|
def pow(x, y): # pylint: disable=redefined-builtin
|
|
@@ -902,6 +1141,42 @@ def log(x):
|
|
|
902
1141
|
return F.log(x)
|
|
903
1142
|
|
|
904
1143
|
|
|
1144
|
+
def log10(input):
|
|
1145
|
+
"""
|
|
1146
|
+
Calculate the base-10 logarithm of Tensor.
|
|
1147
|
+
"""
|
|
1148
|
+
return F.log10(input)
|
|
1149
|
+
|
|
1150
|
+
|
|
1151
|
+
def log2(input):
|
|
1152
|
+
"""
|
|
1153
|
+
Calculate the base-2 logarithm of Tensor.
|
|
1154
|
+
"""
|
|
1155
|
+
return F.log2(input)
|
|
1156
|
+
|
|
1157
|
+
|
|
1158
|
+
def logaddexp(input, other):
|
|
1159
|
+
"""
|
|
1160
|
+
Computes the logarithm of the sum of exponentiations of the inputs.
|
|
1161
|
+
"""
|
|
1162
|
+
return F.logaddexp(input, other)
|
|
1163
|
+
|
|
1164
|
+
|
|
1165
|
+
def logaddexp2(input, other):
|
|
1166
|
+
"""
|
|
1167
|
+
Computes the logarithm of the sum of exponentiations in base of 2 of the inputs.
|
|
1168
|
+
"""
|
|
1169
|
+
return F.logaddexp2(input, other)
|
|
1170
|
+
|
|
1171
|
+
|
|
1172
|
+
def logsumexp(input, axis, keepdims=False):
|
|
1173
|
+
"""
|
|
1174
|
+
Reduces a dimension of a tensor by calculating exponential for all elements in the dimension,
|
|
1175
|
+
then calculate logarithm of the sum.
|
|
1176
|
+
"""
|
|
1177
|
+
return F.logsumexp(input, axis, keepdims)
|
|
1178
|
+
|
|
1179
|
+
|
|
905
1180
|
def round_(x):
|
|
906
1181
|
"""
|
|
907
1182
|
Returns half to even of a tensor element-wise.
|
|
@@ -909,11 +1184,84 @@ def round_(x):
|
|
|
909
1184
|
return F.round(x)
|
|
910
1185
|
|
|
911
1186
|
|
|
912
|
-
def
|
|
1187
|
+
def roll(x, shifts, dims):
|
|
1188
|
+
"""
|
|
1189
|
+
Rolls the elements of a tensor along an axis.
|
|
1190
|
+
"""
|
|
1191
|
+
dims = dims if dims is not None else 0
|
|
1192
|
+
return F.Roll(shifts, dims)(x)
|
|
1193
|
+
|
|
1194
|
+
|
|
1195
|
+
def rot90(x, k, dims):
|
|
1196
|
+
"""
|
|
1197
|
+
Rotate a n-D tensor by 90 degrees in the plane specified by dims axis.
|
|
1198
|
+
"""
|
|
1199
|
+
return F.rot90(x, k, dims)
|
|
1200
|
+
|
|
1201
|
+
|
|
1202
|
+
def rad2deg(x):
|
|
1203
|
+
"""
|
|
1204
|
+
Returns a new tensor with each of the elements of `x` converted from angles in radians to degrees.
|
|
1205
|
+
"""
|
|
1206
|
+
return F.rad2deg(x)
|
|
1207
|
+
|
|
1208
|
+
|
|
1209
|
+
def deg2rad(x):
|
|
1210
|
+
"""
|
|
1211
|
+
Calculates a new tensor with each of the elements of `x` converted from angles in degrees to radians.
|
|
1212
|
+
"""
|
|
1213
|
+
return F.deg2rad(x)
|
|
1214
|
+
|
|
1215
|
+
|
|
1216
|
+
def dot(input, other):
|
|
1217
|
+
r"""
|
|
1218
|
+
For details, please refer to :func:`mindspore.ops.dot`.
|
|
1219
|
+
"""
|
|
1220
|
+
return composite.dot(input, other)
|
|
1221
|
+
|
|
1222
|
+
|
|
1223
|
+
def copysign(x, other):
|
|
1224
|
+
"""
|
|
1225
|
+
Create a new floating-point tensor with the magnitude of `x` and the sign of `other`, element-wise.
|
|
1226
|
+
"""
|
|
1227
|
+
return F.copysign(x, other)
|
|
1228
|
+
|
|
1229
|
+
|
|
1230
|
+
def numel(input):
|
|
1231
|
+
"""
|
|
1232
|
+
Returns a Scalar of type int that represents the total number of elements in the Tensor.
|
|
1233
|
+
"""
|
|
1234
|
+
return F.numel(input)
|
|
1235
|
+
|
|
1236
|
+
|
|
1237
|
+
def permute(input, *axis):
|
|
1238
|
+
"""
|
|
1239
|
+
Permutes the dimensions of the input tensor according to input permutation.
|
|
1240
|
+
"""
|
|
1241
|
+
ndim = F.rank(input)
|
|
1242
|
+
perm = check_transpose_axis_const(axis, ndim)
|
|
1243
|
+
return F.permute(input, perm)
|
|
1244
|
+
|
|
1245
|
+
|
|
1246
|
+
def positive(input):
|
|
1247
|
+
"""
|
|
1248
|
+
Return self Tensor.
|
|
1249
|
+
"""
|
|
1250
|
+
return F.positive(input)
|
|
1251
|
+
|
|
1252
|
+
|
|
1253
|
+
def remainder(input, divisor):
|
|
1254
|
+
"""
|
|
1255
|
+
Returns element-wise remainder of division.
|
|
1256
|
+
"""
|
|
1257
|
+
return F.remainder(input, divisor)
|
|
1258
|
+
|
|
1259
|
+
|
|
1260
|
+
def unique_consecutive(input, return_idx=False, return_counts=False, axis=None):
|
|
913
1261
|
"""
|
|
914
1262
|
Returns the elements that are unique in each consecutive group of equivalent elements in the input tensor.
|
|
915
1263
|
"""
|
|
916
|
-
return F.unique_consecutive(
|
|
1264
|
+
return F.unique_consecutive(input, return_idx, return_counts, axis)
|
|
917
1265
|
|
|
918
1266
|
|
|
919
1267
|
def unique_with_pad(x, pad_num):
|
|
@@ -966,6 +1314,13 @@ def resize(x, *new_shape):
|
|
|
966
1314
|
return res.reshape(new_shape)
|
|
967
1315
|
|
|
968
1316
|
|
|
1317
|
+
def det(input):
|
|
1318
|
+
"""
|
|
1319
|
+
Computes the determinant of one or more square matrices.
|
|
1320
|
+
"""
|
|
1321
|
+
return F.det(input)
|
|
1322
|
+
|
|
1323
|
+
|
|
969
1324
|
def diagonal(x, offset=0, axis1=0, axis2=1):
|
|
970
1325
|
"""
|
|
971
1326
|
Returns specified diagonals.
|
|
@@ -1001,7 +1356,8 @@ def diagonal(x, offset=0, axis1=0, axis2=1):
|
|
|
1001
1356
|
"""
|
|
1002
1357
|
ndim = x.ndim
|
|
1003
1358
|
if ndim < 2:
|
|
1004
|
-
const_utils.raise_value_error(
|
|
1359
|
+
const_utils.raise_value_error(
|
|
1360
|
+
'diagonal requires an array of at least two dimensions')
|
|
1005
1361
|
dtype = x.dtype
|
|
1006
1362
|
|
|
1007
1363
|
axes = check_axis_valid((axis1, axis2), ndim)
|
|
@@ -1028,7 +1384,7 @@ def diagonal(x, offset=0, axis1=0, axis2=1):
|
|
|
1028
1384
|
e_upper = F.fill(dtype, (-offset, m), 0)
|
|
1029
1385
|
e_lower = e[0:n + offset:1, ...]
|
|
1030
1386
|
e = P.Concat(0)((e_upper, e_lower)).astype(dtype)
|
|
1031
|
-
e =
|
|
1387
|
+
e = F.broadcast_to(e, shape)
|
|
1032
1388
|
|
|
1033
1389
|
prod_val = F.tensor_mul(x, e)
|
|
1034
1390
|
res = F.reduce_sum(prod_val.astype(mstype.float32), -1)
|
|
@@ -1048,6 +1404,27 @@ def diagonal(x, offset=0, axis1=0, axis2=1):
|
|
|
1048
1404
|
return res.astype(dtype)
|
|
1049
1405
|
|
|
1050
1406
|
|
|
1407
|
+
def digamma(input):
|
|
1408
|
+
"""
|
|
1409
|
+
Computes the logarithmic derivative of the gamma function on input.
|
|
1410
|
+
"""
|
|
1411
|
+
return F.digamma(input)
|
|
1412
|
+
|
|
1413
|
+
|
|
1414
|
+
def lgamma(input):
|
|
1415
|
+
"""
|
|
1416
|
+
Computes the natural logarithm of the absolute value of the gamma function on input.
|
|
1417
|
+
"""
|
|
1418
|
+
return F.lgamma(input)
|
|
1419
|
+
|
|
1420
|
+
|
|
1421
|
+
def i0(x):
|
|
1422
|
+
"""
|
|
1423
|
+
For details, please refer to :func:`mindspore.ops.i0`.
|
|
1424
|
+
"""
|
|
1425
|
+
return F.i0(x)
|
|
1426
|
+
|
|
1427
|
+
|
|
1051
1428
|
def isclose(x1, x2, rtol=1e-05, atol=1e-08, equal_nan=False):
|
|
1052
1429
|
"""
|
|
1053
1430
|
Returns a boolean tensor where two tensors are element-wise equal within a tolerance.
|
|
@@ -1055,51 +1432,137 @@ def isclose(x1, x2, rtol=1e-05, atol=1e-08, equal_nan=False):
|
|
|
1055
1432
|
return F.isclose(x1, x2, rtol, atol, equal_nan)
|
|
1056
1433
|
|
|
1057
1434
|
|
|
1058
|
-
def
|
|
1435
|
+
def isneginf(input):
|
|
1059
1436
|
"""
|
|
1060
|
-
|
|
1437
|
+
Tests element-wise for negative infinity, returns result as bool array.
|
|
1061
1438
|
"""
|
|
1062
|
-
return F.
|
|
1439
|
+
return F.isneginf(input)
|
|
1063
1440
|
|
|
1064
1441
|
|
|
1065
|
-
def
|
|
1442
|
+
def isposinf(input):
|
|
1066
1443
|
"""
|
|
1067
|
-
|
|
1444
|
+
Tests element-wise for positive infinity, returns result as bool array.
|
|
1068
1445
|
"""
|
|
1069
|
-
return F.
|
|
1446
|
+
return F.isposinf(input)
|
|
1070
1447
|
|
|
1071
1448
|
|
|
1072
|
-
def
|
|
1449
|
+
def isreal(input):
|
|
1073
1450
|
"""
|
|
1074
|
-
|
|
1451
|
+
Tests element-wise for real number.
|
|
1452
|
+
"""
|
|
1453
|
+
return F.isreal(input)
|
|
1075
1454
|
|
|
1076
|
-
Args:
|
|
1077
|
-
offset (int, optional): Offset of the diagonal from the main diagonal.
|
|
1078
|
-
Can be positive or negative. Defaults to main diagonal.
|
|
1079
|
-
axis1 (int, optional): Axis to be used as the first axis of the 2-D
|
|
1080
|
-
sub-arrays from which the diagonals should be taken. Defaults to
|
|
1081
|
-
first axis (0).
|
|
1082
|
-
axis2 (int, optional): Axis to be used as the second axis of the 2-D
|
|
1083
|
-
sub-arrays from which the diagonals should be taken. Defaults to
|
|
1084
|
-
second axis.
|
|
1085
|
-
dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
|
|
1086
|
-
output Tensor.
|
|
1087
1455
|
|
|
1088
|
-
|
|
1089
|
-
|
|
1456
|
+
def flip(x, dims):
|
|
1457
|
+
"""
|
|
1458
|
+
For details, please refer to :func:`mindspore.ops.flip`.
|
|
1459
|
+
"""
|
|
1460
|
+
return F.flip(x, dims)
|
|
1090
1461
|
|
|
1091
|
-
Raises:
|
|
1092
|
-
ValueError: if the input tensor has less than two dimensions.
|
|
1093
1462
|
|
|
1094
|
-
|
|
1095
|
-
|
|
1463
|
+
def fliplr(x):
|
|
1464
|
+
"""
|
|
1465
|
+
For details, please refer to :func:`mindspore.ops.fliplr`.
|
|
1466
|
+
"""
|
|
1467
|
+
return F.fliplr(x)
|
|
1096
1468
|
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1469
|
+
|
|
1470
|
+
def flipud(x):
|
|
1471
|
+
"""
|
|
1472
|
+
For details, please refer to :func:`mindspore.ops.flipud`.
|
|
1473
|
+
"""
|
|
1474
|
+
return F.flipud(x)
|
|
1475
|
+
|
|
1476
|
+
|
|
1477
|
+
def float_power(x, exponent):
|
|
1478
|
+
"""
|
|
1479
|
+
For details, please refer to :func:`mindspore.ops.float_power`.
|
|
1480
|
+
"""
|
|
1481
|
+
return F.float_power(x, exponent)
|
|
1482
|
+
|
|
1483
|
+
|
|
1484
|
+
def fmod(x, other):
|
|
1485
|
+
"""
|
|
1486
|
+
For details, please refer to :func:`mindspore.ops.fmod`.
|
|
1487
|
+
"""
|
|
1488
|
+
return F.fmod(x, other)
|
|
1489
|
+
|
|
1490
|
+
|
|
1491
|
+
def is_floating_point(x):
|
|
1492
|
+
"""
|
|
1493
|
+
For details, please refer to :func:`mindspore.ops.is_floating_point`.
|
|
1494
|
+
"""
|
|
1495
|
+
return F.is_floating_point(x)
|
|
1496
|
+
|
|
1497
|
+
|
|
1498
|
+
def is_signed(x):
|
|
1499
|
+
"""
|
|
1500
|
+
For details, please refer to :func:`mindspore.ops.is_signed`.
|
|
1501
|
+
"""
|
|
1502
|
+
return x.dtype in mstype.signed_type
|
|
1503
|
+
|
|
1504
|
+
|
|
1505
|
+
def is_complex(x):
|
|
1506
|
+
"""
|
|
1507
|
+
For details, please refer to :func:`mindspore.ops.is_complex`.
|
|
1508
|
+
"""
|
|
1509
|
+
return F.is_complex(x)
|
|
1510
|
+
|
|
1511
|
+
|
|
1512
|
+
def inv(x):
|
|
1513
|
+
"""
|
|
1514
|
+
Computes Reciprocal of input tensor element-wise.
|
|
1515
|
+
"""
|
|
1516
|
+
return F.inv(x)
|
|
1517
|
+
|
|
1518
|
+
|
|
1519
|
+
def inverse(input):
|
|
1520
|
+
"""
|
|
1521
|
+
Computes the inverse of a square matrix.
|
|
1522
|
+
"""
|
|
1523
|
+
return F.inverse(input)
|
|
1524
|
+
|
|
1525
|
+
|
|
1526
|
+
def invert(x):
|
|
1527
|
+
"""
|
|
1528
|
+
Flips all bits of input tensor element-wise.
|
|
1529
|
+
"""
|
|
1530
|
+
return F.invert(x)
|
|
1531
|
+
|
|
1532
|
+
|
|
1533
|
+
def trace(x, offset=0, axis1=0, axis2=1, dtype=None):
|
|
1534
|
+
"""
|
|
1535
|
+
Returns the sum along diagonals of the array.
|
|
1536
|
+
|
|
1537
|
+
Args:
|
|
1538
|
+
offset (int, optional): Offset of the diagonal from the main diagonal.
|
|
1539
|
+
Can be positive or negative. Defaults to main diagonal.
|
|
1540
|
+
axis1 (int, optional): Axis to be used as the first axis of the 2-D
|
|
1541
|
+
sub-arrays from which the diagonals should be taken. Defaults to
|
|
1542
|
+
first axis (0).
|
|
1543
|
+
axis2 (int, optional): Axis to be used as the second axis of the 2-D
|
|
1544
|
+
sub-arrays from which the diagonals should be taken. Defaults to
|
|
1545
|
+
second axis.
|
|
1546
|
+
dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
|
|
1547
|
+
output Tensor.
|
|
1548
|
+
|
|
1549
|
+
Returns:
|
|
1550
|
+
Tensor, sum_along_diagonals.
|
|
1551
|
+
|
|
1552
|
+
Raises:
|
|
1553
|
+
ValueError: if the input tensor has less than two dimensions.
|
|
1554
|
+
|
|
1555
|
+
Supported Platforms:
|
|
1556
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1557
|
+
|
|
1558
|
+
Examples:
|
|
1559
|
+
>>> import mindspore.numpy as np
|
|
1560
|
+
>>> x = np.eye(3)
|
|
1100
1561
|
>>> print(x.trace())
|
|
1101
1562
|
3.0
|
|
1102
1563
|
"""
|
|
1564
|
+
if offset == 0 and axis1 == 0 and axis2 == 1 and dtype is None:
|
|
1565
|
+
return F.trace(x)
|
|
1103
1566
|
d = x.diagonal(offset, axis1=axis1, axis2=axis2)
|
|
1104
1567
|
shape = d.shape
|
|
1105
1568
|
if dtype is None:
|
|
@@ -1148,14 +1611,15 @@ def take(x, indices, axis=None, mode='clip'):
|
|
|
1148
1611
|
[4 3 6]
|
|
1149
1612
|
"""
|
|
1150
1613
|
if mode not in ('raise', 'wrap', 'clip'):
|
|
1151
|
-
const_utils.raise_value_error(
|
|
1614
|
+
const_utils.raise_value_error(
|
|
1615
|
+
'raise should be one of "raise", "wrap", or "clip"')
|
|
1152
1616
|
if axis is None:
|
|
1153
1617
|
a = x.ravel()
|
|
1154
1618
|
axis = 0
|
|
1155
1619
|
else:
|
|
1156
1620
|
a = x
|
|
1157
1621
|
ndim = a.ndim
|
|
1158
|
-
axis =
|
|
1622
|
+
axis = check_axis_in_range(axis, ndim)
|
|
1159
1623
|
|
|
1160
1624
|
shape_a = a.shape
|
|
1161
1625
|
shape_indices = indices.shape
|
|
@@ -1169,12 +1633,26 @@ def take(x, indices, axis=None, mode='clip'):
|
|
|
1169
1633
|
shape_indices = expanded_shape(ndim, size_indices, axis)
|
|
1170
1634
|
indices = indices.reshape(shape_indices)
|
|
1171
1635
|
shape_indices = shape_ni + (indices.size,) + shape_nk
|
|
1172
|
-
indices =
|
|
1636
|
+
indices = F.broadcast_to(indices, shape_indices)
|
|
1173
1637
|
|
|
1174
1638
|
res = F.gather_d(a, axis, indices)
|
|
1175
1639
|
return res.reshape(shape_out)
|
|
1176
1640
|
|
|
1177
1641
|
|
|
1642
|
+
def _infer_out_shape(*shapes):
|
|
1643
|
+
"""
|
|
1644
|
+
Returns shape of output after broadcasting. Raises ValueError if shapes cannot be broadcast.
|
|
1645
|
+
"""
|
|
1646
|
+
shape_out = list()
|
|
1647
|
+
max_len = ms_max([len(it) for it in shapes])
|
|
1648
|
+
for i in range(max_len):
|
|
1649
|
+
items = [it[i-(max_len-len(it))] if i - (max_len - len(it))
|
|
1650
|
+
>= 0 else 1 for it in shapes]
|
|
1651
|
+
max_size = 0 if 0 in items else ms_max(items)
|
|
1652
|
+
shape_out.append(max_size)
|
|
1653
|
+
return tuple(shape_out)
|
|
1654
|
+
|
|
1655
|
+
|
|
1178
1656
|
def choose(x, choices, mode='clip'):
|
|
1179
1657
|
"""
|
|
1180
1658
|
Construct an array from an index array and a list of arrays to choose from.
|
|
@@ -1212,8 +1690,8 @@ def choose(x, choices, mode='clip'):
|
|
|
1212
1690
|
[20 31 12 3]
|
|
1213
1691
|
"""
|
|
1214
1692
|
if check_is_tensor(F.typeof(choices)):
|
|
1215
|
-
shape_choice =
|
|
1216
|
-
choices =
|
|
1693
|
+
shape_choice = _infer_out_shape(x.shape, choices.shape[1:])
|
|
1694
|
+
choices = F.broadcast_to(choices, (choices.shape[0],) + shape_choice)
|
|
1217
1695
|
else:
|
|
1218
1696
|
# broadcasts choices to the same shape if choices is a sequence
|
|
1219
1697
|
choicelist = []
|
|
@@ -1223,27 +1701,29 @@ def choose(x, choices, mode='clip'):
|
|
|
1223
1701
|
choice = const_utils.make_tensor(choice)
|
|
1224
1702
|
shapes += (choice.shape,)
|
|
1225
1703
|
choicelist.append(choice)
|
|
1226
|
-
shape_choice =
|
|
1704
|
+
shape_choice = _infer_out_shape(x.shape, *shapes)
|
|
1227
1705
|
tmp = []
|
|
1228
1706
|
for choice in choicelist:
|
|
1229
|
-
tmp.append(
|
|
1707
|
+
tmp.append(F.broadcast_to(choice, shape_choice))
|
|
1230
1708
|
choices = F.stack(tmp)
|
|
1231
1709
|
|
|
1232
1710
|
if x.ndim == 0 or choices.ndim == 0:
|
|
1233
1711
|
const_utils.raise_value_error('input cannot be scalars')
|
|
1234
|
-
a =
|
|
1712
|
+
a = F.broadcast_to(x, shape_choice)
|
|
1235
1713
|
dtype = choices.dtype
|
|
1236
1714
|
# adjusts dtype for F.tensor_mul and F.gather_nd
|
|
1237
1715
|
a = a.astype(mstype.int32)
|
|
1238
1716
|
choices = choices.astype(mstype.int32)
|
|
1239
|
-
a = compile_utils.check_indices(
|
|
1717
|
+
a = compile_utils.check_indices(
|
|
1718
|
+
choices.shape[0], a, mode, allow_negative_index=False)
|
|
1240
1719
|
|
|
1241
1720
|
grids = []
|
|
1242
1721
|
ndim = len(a.shape)
|
|
1243
1722
|
for i in range(ndim):
|
|
1244
|
-
dim_grid = const_utils.make_tensor(
|
|
1723
|
+
dim_grid = const_utils.make_tensor(
|
|
1724
|
+
F.make_range(a.shape[i]), mstype.int32)
|
|
1245
1725
|
dim_shape = expanded_shape(ndim, a.shape[i], i)
|
|
1246
|
-
dim_grid =
|
|
1726
|
+
dim_grid = F.broadcast_to(dim_grid.reshape(dim_shape), a.shape)
|
|
1247
1727
|
grids.append(dim_grid)
|
|
1248
1728
|
grid = P.Stack(-1)(grids)
|
|
1249
1729
|
indices = P.Concat(-1)((a.reshape(a.shape + (1,)), grid))
|
|
@@ -1278,6 +1758,12 @@ def searchsorted(x, v, side='left', sorter=None):
|
|
|
1278
1758
|
>>> print(x.searchsorted(3))
|
|
1279
1759
|
2
|
|
1280
1760
|
"""
|
|
1761
|
+
def get_log2_size(size):
|
|
1762
|
+
"""Get log2 size"""
|
|
1763
|
+
log2_res = F.log2(F.cast(Tensor(size), mstype.float32))
|
|
1764
|
+
ceil_res = F.ceil(log2_res)
|
|
1765
|
+
cast_res = F.cast(ceil_res, mstype.int64)
|
|
1766
|
+
return cast_res
|
|
1281
1767
|
if side not in ('left', 'right'):
|
|
1282
1768
|
const_utils.raise_value_error('invalid value for keyword "side"')
|
|
1283
1769
|
a = x.astype(mstype.float32)
|
|
@@ -1286,7 +1772,8 @@ def searchsorted(x, v, side='left', sorter=None):
|
|
|
1286
1772
|
shape = v.shape
|
|
1287
1773
|
if sorter is not None:
|
|
1288
1774
|
if sorter.ndim != 1 or sorter.size != a.size:
|
|
1289
|
-
const_utils.raise_value_error(
|
|
1775
|
+
const_utils.raise_value_error(
|
|
1776
|
+
'sorter must be 1-D array with the same size as `a`')
|
|
1290
1777
|
sorter = const_utils.make_tensor(sorter)
|
|
1291
1778
|
sorter = sorter.reshape(sorter.shape + (1,))
|
|
1292
1779
|
a = F.gather_nd(a, sorter)
|
|
@@ -1294,43 +1781,20 @@ def searchsorted(x, v, side='left', sorter=None):
|
|
|
1294
1781
|
i = F.fill(mstype.int32, shape, 0)
|
|
1295
1782
|
j = F.fill(mstype.int32, shape, a.size)
|
|
1296
1783
|
|
|
1297
|
-
|
|
1298
|
-
|
|
1784
|
+
loop_num = get_log2_size(F.shape_mul(a.shape) + 1)
|
|
1785
|
+
index = Tensor([0])
|
|
1786
|
+
while index < loop_num:
|
|
1299
1787
|
mid = (i - F.neg_tensor(j)) // 2
|
|
1300
1788
|
mask = less_op(v, F.gather_nd(a, mid.reshape(mid.shape + (1,))))
|
|
1301
1789
|
i = F.select(mask, i, mid)
|
|
1302
1790
|
j = F.select(mask, mid, j)
|
|
1791
|
+
index += 1
|
|
1303
1792
|
return j
|
|
1304
1793
|
|
|
1305
1794
|
|
|
1306
1795
|
def fill(x, value):
|
|
1307
1796
|
"""
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
Note:
|
|
1311
|
-
Unlike Numpy, tensor.fill() will always returns a new tensor, instead of
|
|
1312
|
-
filling the original tensor.
|
|
1313
|
-
|
|
1314
|
-
Args:
|
|
1315
|
-
value (Union[None, int, float, bool]): All elements of a will be assigned this value.
|
|
1316
|
-
|
|
1317
|
-
Returns:
|
|
1318
|
-
Tensor, with the original dtype and shape as input tensor.
|
|
1319
|
-
|
|
1320
|
-
Raises:
|
|
1321
|
-
TypeError: If input arguments have types not specified above.
|
|
1322
|
-
ValueError: If `shape` has entries < 0.
|
|
1323
|
-
|
|
1324
|
-
Supported Platforms:
|
|
1325
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1326
|
-
|
|
1327
|
-
Examples:
|
|
1328
|
-
>>> import numpy as np
|
|
1329
|
-
>>> from mindspore import Tensor
|
|
1330
|
-
>>> a = Tensor(np.arange(4).reshape((2,2)).astype('float32'))
|
|
1331
|
-
>>> print(a.fill(1.0))
|
|
1332
|
-
[[1. 1.]
|
|
1333
|
-
[1. 1.]]
|
|
1797
|
+
`Tensor.fill` is deprecated, please use `ops.fill` instead.
|
|
1334
1798
|
"""
|
|
1335
1799
|
if value is None:
|
|
1336
1800
|
if x.dtype not in (mstype.float16, mstype.float32, mstype.float64):
|
|
@@ -1344,7 +1808,7 @@ def fill(x, value):
|
|
|
1344
1808
|
|
|
1345
1809
|
def fills(x, value):
|
|
1346
1810
|
"""
|
|
1347
|
-
|
|
1811
|
+
`Tensor.fills` is deprecated, please use `ops.fill` instead.
|
|
1348
1812
|
"""
|
|
1349
1813
|
return F.fills(x, value)
|
|
1350
1814
|
|
|
@@ -1384,70 +1848,24 @@ def ptp(x, axis=None, keepdims=False):
|
|
|
1384
1848
|
if axis is None:
|
|
1385
1849
|
axis = ()
|
|
1386
1850
|
else:
|
|
1387
|
-
check_axis_type(axis, True, True, False)
|
|
1851
|
+
validator.check_axis_type(axis, True, True, False)
|
|
1388
1852
|
axis = check_axis_valid(axis, x.ndim)
|
|
1389
1853
|
|
|
1390
1854
|
return x.max(axis, keepdims) - x.min(axis, keepdims)
|
|
1391
1855
|
|
|
1392
1856
|
|
|
1393
|
-
def
|
|
1857
|
+
def clamp(x, min=None, max=None):
|
|
1394
1858
|
"""
|
|
1395
|
-
|
|
1396
|
-
|
|
1397
|
-
|
|
1398
|
-
For example, if an interval of :math:`[0, 1]` is specified, values smaller than 0 become 0,
|
|
1399
|
-
and values larger than 1 become 1.
|
|
1400
|
-
|
|
1401
|
-
Note:
|
|
1402
|
-
Currently, clip with `nan` is not supported.
|
|
1403
|
-
|
|
1404
|
-
Args:
|
|
1405
|
-
x (Tensor): Tensor containing elements to clip.
|
|
1406
|
-
xmin (Tensor, scalar, None): Minimum value. If None, clipping is not performed
|
|
1407
|
-
on lower interval edge. Not more than one of `xmin` and `xmax` may be None.
|
|
1408
|
-
xmax (Tensor, scalar, None): Maximum value. If None, clipping is not performed
|
|
1409
|
-
on upper interval edge. Not more than one of `xmin` and `xmax` may be None.
|
|
1410
|
-
If `xmin` or `xmax` are tensors, then the three tensors will be broadcasted
|
|
1411
|
-
to match their shapes.
|
|
1412
|
-
dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
|
|
1413
|
-
output Tensor.
|
|
1414
|
-
|
|
1415
|
-
Returns:
|
|
1416
|
-
Tensor, a tensor with the elements of `x`, but where values
|
|
1417
|
-
< `xmin` are replaced with `xmin`, and those > `xmax` with `xmax`.
|
|
1859
|
+
Clamps all elements in `x` into the range `[min, max]`.
|
|
1860
|
+
"""
|
|
1861
|
+
return F.clamp(x, min, max)
|
|
1418
1862
|
|
|
1419
|
-
Supported Platforms:
|
|
1420
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1421
1863
|
|
|
1422
|
-
|
|
1423
|
-
|
|
1424
|
-
|
|
1425
|
-
|
|
1426
|
-
|
|
1427
|
-
[1 2 2 0 0 2 2 0]
|
|
1428
|
-
"""
|
|
1429
|
-
if xmin is None and xmax is None:
|
|
1430
|
-
const_utils.raise_value_error("One of max or min must be given.")
|
|
1431
|
-
is_scalar = False
|
|
1432
|
-
if xmin is not None:
|
|
1433
|
-
xmin = const_utils.make_tensor(xmin, x.dtype)
|
|
1434
|
-
if x.ndim == 0 and xmin.ndim == 0:
|
|
1435
|
-
x = F.maximum(x.reshape((1,)), xmin).squeeze()
|
|
1436
|
-
else:
|
|
1437
|
-
x = F.maximum(x, xmin)
|
|
1438
|
-
if xmax is not None:
|
|
1439
|
-
xmax = const_utils.make_tensor(xmax, x.dtype)
|
|
1440
|
-
if x.ndim == 0 and xmax.ndim == 0:
|
|
1441
|
-
x = F.minimum(x.reshape((1,)), xmax).squeeze()
|
|
1442
|
-
else:
|
|
1443
|
-
x = F.minimum(x, xmax)
|
|
1444
|
-
if is_scalar:
|
|
1445
|
-
return x.squeeze()
|
|
1446
|
-
if dtype is not None:
|
|
1447
|
-
dtype = check_astype_dtype_const(dtype)
|
|
1448
|
-
if dtype != x.dtype:
|
|
1449
|
-
return x.astype(dtype)
|
|
1450
|
-
return x
|
|
1864
|
+
def clip(x, min=None, max=None):
|
|
1865
|
+
"""
|
|
1866
|
+
Clamps all elements in `x` into the range `[min, max]`.
|
|
1867
|
+
"""
|
|
1868
|
+
return F.clamp(x, min, max)
|
|
1451
1869
|
|
|
1452
1870
|
|
|
1453
1871
|
def var(x, axis=None, ddof=0, keepdims=False):
|
|
@@ -1545,16 +1963,16 @@ def std(x, axis=None, ddof=0, keepdims=False):
|
|
|
1545
1963
|
return F.tensor_pow(x_var, 0.5)
|
|
1546
1964
|
|
|
1547
1965
|
|
|
1548
|
-
def gather_elements(
|
|
1966
|
+
def gather_elements(input, dim, index):
|
|
1549
1967
|
r"""
|
|
1550
1968
|
Gathers elements along an axis specified by dim.
|
|
1551
1969
|
|
|
1552
1970
|
Refer to :func:`mindspore.ops.gather_elements` for more detail.
|
|
1553
1971
|
"""
|
|
1554
|
-
return F.gather_elements(
|
|
1972
|
+
return F.gather_elements(input, dim, index)
|
|
1555
1973
|
|
|
1556
1974
|
|
|
1557
|
-
def sum(
|
|
1975
|
+
def sum(input, axis=None, dtype=None, keepdims=False, initial=None): # pylint: disable=redefined-builtin
|
|
1558
1976
|
"""
|
|
1559
1977
|
Return sum of array elements over a given axis.
|
|
1560
1978
|
|
|
@@ -1563,7 +1981,7 @@ def sum(x, axis=None, dtype=None, keepdims=False, initial=None): # pylint: disa
|
|
|
1563
1981
|
`extobj` are not supported.
|
|
1564
1982
|
|
|
1565
1983
|
Args:
|
|
1566
|
-
|
|
1984
|
+
input (Union[int, float, bool, list, tuple, Tensor]): Elements to sum.
|
|
1567
1985
|
axis (Union[None, int, tuple(int)]): Axis or axes along which a sum is performed. Default: None.
|
|
1568
1986
|
If None, sum all of the elements of the input array.
|
|
1569
1987
|
If axis is negative it counts from the last to the first axis.
|
|
@@ -1599,7 +2017,7 @@ def sum(x, axis=None, dtype=None, keepdims=False, initial=None): # pylint: disa
|
|
|
1599
2017
|
>>> print(input_x.sum(axis=1))
|
|
1600
2018
|
[10. 35.]
|
|
1601
2019
|
"""
|
|
1602
|
-
input_x =
|
|
2020
|
+
input_x = input.astype(mstype.int32) if input.dtype == mstype.bool_ else input
|
|
1603
2021
|
dtype = input_x.dtype if dtype is None else dtype
|
|
1604
2022
|
dtype = check_astype_dtype_const(dtype)
|
|
1605
2023
|
if not isinstance(keepdims, int):
|
|
@@ -1609,12 +2027,10 @@ def sum(x, axis=None, dtype=None, keepdims=False, initial=None): # pylint: disa
|
|
|
1609
2027
|
if axis is None:
|
|
1610
2028
|
axis = ()
|
|
1611
2029
|
else:
|
|
1612
|
-
axis = check_and_canonicalize_axes(axis,
|
|
2030
|
+
axis = check_and_canonicalize_axes(axis, input.ndim)
|
|
1613
2031
|
|
|
1614
2032
|
if not check_type_support(input_x.dtype, 'GPU', (mstype.float64, mstype.float32, mstype.float16)):
|
|
1615
2033
|
input_x = input_x.astype(mstype.float32)
|
|
1616
|
-
if 0 in x.shape:
|
|
1617
|
-
x = const_utils.make_tensor([0], x.dtype)
|
|
1618
2034
|
if keepdims:
|
|
1619
2035
|
res = _reduce_sum_keepdims(input_x, axis)
|
|
1620
2036
|
else:
|
|
@@ -1624,6 +2040,36 @@ def sum(x, axis=None, dtype=None, keepdims=False, initial=None): # pylint: disa
|
|
|
1624
2040
|
return res.astype(dtype)
|
|
1625
2041
|
|
|
1626
2042
|
|
|
2043
|
+
def sum_to_size(input, *size):
|
|
2044
|
+
"""
|
|
2045
|
+
Sum `input` to the `size`. `size` must be expandable to the Tensor size.
|
|
2046
|
+
"""
|
|
2047
|
+
if len(size) == 1 and isinstance(size[0], tuple):
|
|
2048
|
+
size = size[0]
|
|
2049
|
+
shape_input = input.shape
|
|
2050
|
+
if len(size) > input.ndim:
|
|
2051
|
+
raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_input}.")
|
|
2052
|
+
if len(size) < input.ndim:
|
|
2053
|
+
pre_axis = tuple(axis for axis in range(input.ndim - len(size)))
|
|
2054
|
+
input = input.sum(pre_axis)
|
|
2055
|
+
axes = []
|
|
2056
|
+
for i, element in enumerate(size):
|
|
2057
|
+
if element != input.shape[i] and element == 1:
|
|
2058
|
+
axes.append(i)
|
|
2059
|
+
elif element != input.shape[i]:
|
|
2060
|
+
raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_input}.")
|
|
2061
|
+
if axes:
|
|
2062
|
+
return input.sum(tuple(axes), keepdims=True)
|
|
2063
|
+
return input
|
|
2064
|
+
|
|
2065
|
+
|
|
2066
|
+
def nansum(input, axis=None, keepdims=False, *, dtype=None):
|
|
2067
|
+
"""
|
|
2068
|
+
Computes sum of all elements, treating NaNs as zero.
|
|
2069
|
+
"""
|
|
2070
|
+
return F.nansum(input, axis=axis, keepdims=keepdims, dtype=dtype)
|
|
2071
|
+
|
|
2072
|
+
|
|
1627
2073
|
def repeat(x, repeats, axis=None):
|
|
1628
2074
|
"""
|
|
1629
2075
|
Repeat elements of an array.
|
|
@@ -1671,7 +2117,7 @@ def repeat(x, repeats, axis=None):
|
|
|
1671
2117
|
axis = 0
|
|
1672
2118
|
if not isinstance(axis, int):
|
|
1673
2119
|
const_utils.raise_type_error('axes should be integers')
|
|
1674
|
-
|
|
2120
|
+
check_axis_in_range(axis, x.ndim)
|
|
1675
2121
|
axis = axis + x.ndim if axis < 0 else axis
|
|
1676
2122
|
|
|
1677
2123
|
if len(repeats) == 1:
|
|
@@ -1681,7 +2127,8 @@ def repeat(x, repeats, axis=None):
|
|
|
1681
2127
|
return repeat_elements(x, repeats, axis)
|
|
1682
2128
|
size = x.shape[axis]
|
|
1683
2129
|
if len(repeats) != size:
|
|
1684
|
-
const_utils.raise_value_error(
|
|
2130
|
+
const_utils.raise_value_error(
|
|
2131
|
+
'operands could not be broadcast together')
|
|
1685
2132
|
subs = P.Split(axis, size)(x)
|
|
1686
2133
|
repeated_subs = []
|
|
1687
2134
|
for sub_item, rep in zip(subs, repeats):
|
|
@@ -1690,6 +2137,13 @@ def repeat(x, repeats, axis=None):
|
|
|
1690
2137
|
return P.Concat(axis)(repeated_subs)
|
|
1691
2138
|
|
|
1692
2139
|
|
|
2140
|
+
def repeat_interleave(x, repeats, dim=None):
|
|
2141
|
+
"""
|
|
2142
|
+
For details, please refer to :func:`mindspore.ops.repeat_interleave`.
|
|
2143
|
+
"""
|
|
2144
|
+
return F.repeat_interleave(x, repeats, dim)
|
|
2145
|
+
|
|
2146
|
+
|
|
1693
2147
|
def hardshrink(x, lambd=0.5):
|
|
1694
2148
|
r"""
|
|
1695
2149
|
Apply the Hard Shrink function for a tensor. Calculates the output according to the input elements.
|
|
@@ -1728,9 +2182,33 @@ def hardshrink(x, lambd=0.5):
|
|
|
1728
2182
|
return P.HShrink(lambd)(x)
|
|
1729
2183
|
|
|
1730
2184
|
|
|
1731
|
-
def
|
|
2185
|
+
def heaviside(x, values):
|
|
2186
|
+
r"""
|
|
2187
|
+
For details, please refer to :func:`mindspore.ops.heaviside`.
|
|
2188
|
+
"""
|
|
2189
|
+
return F.heaviside(x, values)
|
|
2190
|
+
|
|
2191
|
+
|
|
2192
|
+
def hypot(x, other):
|
|
2193
|
+
r'''
|
|
2194
|
+
For details, please refer to :func:`mindspore.ops.hypot`.
|
|
2195
|
+
'''
|
|
2196
|
+
return F.hypot(x, other)
|
|
2197
|
+
|
|
2198
|
+
|
|
2199
|
+
def soft_shrink(input, lambd=0.5):
|
|
1732
2200
|
"""Apply the soft shrink function for a tensor. Calculates the output according to the input elements."""
|
|
1733
|
-
return F.
|
|
2201
|
+
return F.soft_shrink(input, lambd)
|
|
2202
|
+
|
|
2203
|
+
|
|
2204
|
+
def matrix_determinant(input):
|
|
2205
|
+
"""Computes the determinant of one or more square matrices."""
|
|
2206
|
+
return F.matrix_determinant(input)
|
|
2207
|
+
|
|
2208
|
+
|
|
2209
|
+
def log_matrix_determinant(input):
|
|
2210
|
+
"""Computes the sign and the log of the absolute value of the determinant of one or more square matrices."""
|
|
2211
|
+
return F.log_matrix_determinant(input)
|
|
1734
2212
|
|
|
1735
2213
|
|
|
1736
2214
|
def getitem(data, index):
|
|
@@ -1788,7 +2266,8 @@ def constant_round(*data):
|
|
|
1788
2266
|
"""Returns the rounded value of the constant."""
|
|
1789
2267
|
for x in data:
|
|
1790
2268
|
if x is None:
|
|
1791
|
-
raise ValueError(
|
|
2269
|
+
raise ValueError(
|
|
2270
|
+
"For round(), the input should be a Tensor or 1-2 constants.")
|
|
1792
2271
|
return round(*data)
|
|
1793
2272
|
|
|
1794
2273
|
|
|
@@ -1803,7 +2282,8 @@ def ms_round(*data):
|
|
|
1803
2282
|
return round_(x)
|
|
1804
2283
|
return constant_round(x)
|
|
1805
2284
|
if isinstance(data[0], Tensor) or isinstance(data[1], Tensor):
|
|
1806
|
-
const_utils.raise_type_error(
|
|
2285
|
+
const_utils.raise_type_error(
|
|
2286
|
+
"When applying round() to tensor, only one tensor is supported as input.")
|
|
1807
2287
|
return constant_round(*data)
|
|
1808
2288
|
|
|
1809
2289
|
|
|
@@ -1820,10 +2300,12 @@ def str_func(*data):
|
|
|
1820
2300
|
if data_len == 0:
|
|
1821
2301
|
return ''
|
|
1822
2302
|
data = data[0]
|
|
1823
|
-
if isinstance(data, (CSRTensor, COOTensor,
|
|
1824
|
-
const_utils.raise_type_error(
|
|
2303
|
+
if isinstance(data, (CSRTensor, COOTensor, RowTensorInner)):
|
|
2304
|
+
const_utils.raise_type_error(
|
|
2305
|
+
"str() does not support sparse tensor input.")
|
|
1825
2306
|
if not F.isconstant(data):
|
|
1826
|
-
const_utils.raise_type_error(
|
|
2307
|
+
const_utils.raise_type_error(
|
|
2308
|
+
"str() does not support non-constant input.")
|
|
1827
2309
|
return cast_to_str(data)
|
|
1828
2310
|
|
|
1829
2311
|
|
|
@@ -1840,23 +2322,29 @@ def bool_func(*data):
|
|
|
1840
2322
|
if data_len == 0:
|
|
1841
2323
|
return False
|
|
1842
2324
|
data = data[0]
|
|
1843
|
-
if isinstance(data, (CSRTensor, COOTensor,
|
|
1844
|
-
const_utils.raise_type_error(
|
|
2325
|
+
if isinstance(data, (CSRTensor, COOTensor, RowTensorInner)):
|
|
2326
|
+
const_utils.raise_type_error(
|
|
2327
|
+
"bool() does not support sparse tensor input.")
|
|
1845
2328
|
if isinstance(data, (Tensor, Tensor_)):
|
|
1846
2329
|
tensor_shape = F.shape(data)
|
|
1847
2330
|
tensor_shape_len = len(tensor_shape)
|
|
1848
2331
|
if tensor_shape_len == 0 or (tensor_shape_len == 1 and tensor_shape[0] == 1):
|
|
1849
2332
|
return data != 0
|
|
1850
|
-
const_utils.raise_value_error(
|
|
2333
|
+
const_utils.raise_value_error(
|
|
2334
|
+
"The truth value of an array with more than one element is ambiguous.")
|
|
1851
2335
|
if not F.isconstant(data):
|
|
1852
|
-
|
|
2336
|
+
if hasattr(data, "__bool__"):
|
|
2337
|
+
return data.__bool__()
|
|
2338
|
+
if hasattr(data, "__len__"):
|
|
2339
|
+
return len(data) != 0
|
|
2340
|
+
return True
|
|
1853
2341
|
return cast_to_bool(data)
|
|
1854
2342
|
|
|
1855
2343
|
|
|
1856
2344
|
@constexpr
|
|
1857
2345
|
def cast_to_int(*data):
|
|
1858
2346
|
target = data[0]
|
|
1859
|
-
if isinstance(target, Tensor_):
|
|
2347
|
+
if isinstance(target, (Tensor, Tensor_)):
|
|
1860
2348
|
target = Tensor(target, internal=True)
|
|
1861
2349
|
if len(data) == 1:
|
|
1862
2350
|
return int(target)
|
|
@@ -1871,16 +2359,23 @@ def int_func(*data):
|
|
|
1871
2359
|
if data_len == 0:
|
|
1872
2360
|
return 0
|
|
1873
2361
|
target = data[0]
|
|
2362
|
+
base = 10
|
|
2363
|
+
if data_len == 2:
|
|
2364
|
+
base = data[1]
|
|
2365
|
+
if isinstance(target, (Tensor, Tensor_, int, float, bool)) and base == 10 and not F.isconstant(target):
|
|
2366
|
+
return F.scalar_cast(target, mstype.int64)
|
|
1874
2367
|
if not F.isconstant(target):
|
|
1875
|
-
const_utils.raise_type_error(
|
|
1876
|
-
|
|
1877
|
-
|
|
2368
|
+
const_utils.raise_type_error(
|
|
2369
|
+
"int() does not support non-constant input.")
|
|
2370
|
+
if isinstance(target, (CSRTensor, COOTensor, RowTensorInner)):
|
|
2371
|
+
const_utils.raise_type_error(
|
|
2372
|
+
"int() does not support sparse tensor input.")
|
|
1878
2373
|
return cast_to_int(*data)
|
|
1879
2374
|
|
|
1880
2375
|
|
|
1881
2376
|
@constexpr
|
|
1882
2377
|
def cast_to_float(data):
|
|
1883
|
-
if isinstance(data, Tensor_):
|
|
2378
|
+
if isinstance(data, (Tensor, Tensor_)):
|
|
1884
2379
|
data = Tensor(data, internal=True)
|
|
1885
2380
|
return float(data)
|
|
1886
2381
|
|
|
@@ -1893,10 +2388,14 @@ def float_func(*data):
|
|
|
1893
2388
|
if data_len == 0:
|
|
1894
2389
|
return 0.0
|
|
1895
2390
|
data = data[0]
|
|
2391
|
+
if isinstance(data, (Tensor, Tensor_, int, float, bool)) and not F.isconstant(data):
|
|
2392
|
+
return F.scalar_cast(data, mstype.float32)
|
|
1896
2393
|
if not F.isconstant(data):
|
|
1897
|
-
const_utils.raise_type_error(
|
|
1898
|
-
|
|
1899
|
-
|
|
2394
|
+
const_utils.raise_type_error(
|
|
2395
|
+
"float() does not support non-constant input.")
|
|
2396
|
+
if isinstance(data, (CSRTensor, COOTensor, RowTensorInner)):
|
|
2397
|
+
const_utils.raise_type_error(
|
|
2398
|
+
"float() does not support sparse tensor input.")
|
|
1900
2399
|
return cast_to_float(data)
|
|
1901
2400
|
|
|
1902
2401
|
|
|
@@ -1908,14 +2407,21 @@ def list_func(*data):
|
|
|
1908
2407
|
if data_len == 0:
|
|
1909
2408
|
return F.make_list()
|
|
1910
2409
|
data = data[0]
|
|
1911
|
-
if isinstance(data, (CSRTensor, COOTensor,
|
|
1912
|
-
const_utils.raise_type_error(
|
|
2410
|
+
if isinstance(data, (CSRTensor, COOTensor, RowTensorInner)):
|
|
2411
|
+
const_utils.raise_type_error(
|
|
2412
|
+
"list() does not support single sparse tensor input.")
|
|
1913
2413
|
if not isinstance(data, Tensor) and not hasattr(data, "__ms_iter__"):
|
|
1914
2414
|
data_type = F.typeof(data)
|
|
1915
|
-
const_utils.raise_type_error(
|
|
2415
|
+
const_utils.raise_type_error(
|
|
2416
|
+
str(data_type) + " object is not iterable.")
|
|
1916
2417
|
if isinstance(data, dict):
|
|
1917
2418
|
data = data.keys()
|
|
1918
|
-
|
|
2419
|
+
if isinstance(data, (tuple, list)) and F.is_sequence_shape_unknown(data):
|
|
2420
|
+
ret = mutable([], True)
|
|
2421
|
+
if F.is_dynamic_sequence_element_unknown(data):
|
|
2422
|
+
return ret
|
|
2423
|
+
else:
|
|
2424
|
+
ret = F.make_list()
|
|
1919
2425
|
for i in range(len(data)):
|
|
1920
2426
|
ret = ret + F.make_list(data[i])
|
|
1921
2427
|
return ret
|
|
@@ -1929,14 +2435,21 @@ def tuple_func(*data):
|
|
|
1929
2435
|
if data_len == 0:
|
|
1930
2436
|
return F.make_tuple()
|
|
1931
2437
|
data = data[0]
|
|
1932
|
-
if isinstance(data, (CSRTensor, COOTensor,
|
|
1933
|
-
const_utils.raise_type_error(
|
|
2438
|
+
if isinstance(data, (CSRTensor, COOTensor, RowTensorInner)):
|
|
2439
|
+
const_utils.raise_type_error(
|
|
2440
|
+
"tuple() does not support single sparse tensor input.")
|
|
1934
2441
|
if not isinstance(data, Tensor) and not hasattr(data, "__ms_iter__"):
|
|
1935
2442
|
data_type = F.typeof(data)
|
|
1936
|
-
const_utils.raise_type_error(
|
|
2443
|
+
const_utils.raise_type_error(
|
|
2444
|
+
str(data_type) + " object is not iterable.")
|
|
1937
2445
|
if isinstance(data, dict):
|
|
1938
2446
|
data = data.keys()
|
|
1939
|
-
|
|
2447
|
+
if isinstance(data, (tuple, list)) and F.is_sequence_shape_unknown(data):
|
|
2448
|
+
ret = mutable((), True)
|
|
2449
|
+
if F.is_dynamic_sequence_element_unknown(data):
|
|
2450
|
+
return ret
|
|
2451
|
+
else:
|
|
2452
|
+
ret = F.make_tuple()
|
|
1940
2453
|
for i in range(len(data)):
|
|
1941
2454
|
ret = ret + F.make_tuple(data[i])
|
|
1942
2455
|
return ret
|
|
@@ -1962,7 +2475,8 @@ def get_max_min_data_len(*data):
|
|
|
1962
2475
|
if isinstance(data, (dict, list, tuple)):
|
|
1963
2476
|
len_data = len(data)
|
|
1964
2477
|
else:
|
|
1965
|
-
const_utils.raise_type_error(
|
|
2478
|
+
const_utils.raise_type_error(
|
|
2479
|
+
"max() or min() does not support the data type.")
|
|
1966
2480
|
return len_data
|
|
1967
2481
|
|
|
1968
2482
|
|
|
@@ -1971,6 +2485,11 @@ def get_tensor_num(data):
|
|
|
1971
2485
|
tensor_num = 0
|
|
1972
2486
|
for input_data in data:
|
|
1973
2487
|
if isinstance(input_data, Tensor):
|
|
2488
|
+
tensor_shape = F.shape(input_data)
|
|
2489
|
+
tensor_shape_len = len(tensor_shape)
|
|
2490
|
+
if tensor_shape_len != 0 and not (tensor_shape_len == 1 and tensor_shape[0] == 1):
|
|
2491
|
+
const_utils.raise_value_error(
|
|
2492
|
+
"The truth value of an array with more than one element is ambiguous.")
|
|
1974
2493
|
tensor_num = tensor_num + 1
|
|
1975
2494
|
return tensor_num
|
|
1976
2495
|
|
|
@@ -1986,6 +2505,69 @@ def exist_tensor(data):
|
|
|
1986
2505
|
return False
|
|
1987
2506
|
|
|
1988
2507
|
|
|
2508
|
+
def check_sequence_all_variable_scalar(x, str_info):
|
|
2509
|
+
"""Check whether x can be used in SequenceMax and SequenceMin"""
|
|
2510
|
+
if F.is_sequence_shape_unknown(x):
|
|
2511
|
+
if F.is_dynamic_sequence_element_unknown(x):
|
|
2512
|
+
const_utils.raise_value_error(str_info + "() arg is an empty sequence.")
|
|
2513
|
+
if not isinstance(x[0], (int, float)):
|
|
2514
|
+
const_utils.raise_value_error(
|
|
2515
|
+
"When the input to " + str_info + "() is dynamic length sequence, only support scalar type input")
|
|
2516
|
+
return True
|
|
2517
|
+
contain_variable_scalar = False
|
|
2518
|
+
for i in x:
|
|
2519
|
+
if not isinstance(i, (int, float)):
|
|
2520
|
+
return False
|
|
2521
|
+
if not contain_variable_scalar and not F.isconstant(i):
|
|
2522
|
+
contain_variable_scalar = True
|
|
2523
|
+
return contain_variable_scalar
|
|
2524
|
+
|
|
2525
|
+
|
|
2526
|
+
def get_data_type_str(input_data):
|
|
2527
|
+
"""Get the type of input."""
|
|
2528
|
+
if isinstance(input_data, (int, float, bool)):
|
|
2529
|
+
return "variable " + str(F.typeof(input_data))
|
|
2530
|
+
return str(F.typeof(input_data))
|
|
2531
|
+
|
|
2532
|
+
|
|
2533
|
+
def check_isconstant(input_data, func_name):
|
|
2534
|
+
"""Check the input data of func is constant."""
|
|
2535
|
+
if not F.isconstant(input_data):
|
|
2536
|
+
const_utils.raise_type_error("The input of " + func_name + " only support Tensor, List, Tuple, constant Scalar,"
|
|
2537
|
+
" but got " + get_data_type_str(input_data))
|
|
2538
|
+
|
|
2539
|
+
|
|
2540
|
+
def ms_max_one_element(x):
|
|
2541
|
+
"""Implementation of `max` which inputs has only one element."""
|
|
2542
|
+
if isinstance(x, Tensor):
|
|
2543
|
+
tensor_shape = F.shape(x)
|
|
2544
|
+
tensor_shape_len = len(tensor_shape)
|
|
2545
|
+
if tensor_shape_len == 0:
|
|
2546
|
+
const_utils.raise_type_error(
|
|
2547
|
+
"Cannot iterate over a scalar tensor.")
|
|
2548
|
+
if tensor_shape_len >= 2:
|
|
2549
|
+
const_utils.raise_value_error(
|
|
2550
|
+
"The truth value of an array with more than one element is ambiguous.")
|
|
2551
|
+
return x.max()
|
|
2552
|
+
# Deal with Tensor in tuple or list
|
|
2553
|
+
if isinstance(x, (list, tuple)):
|
|
2554
|
+
if check_sequence_all_variable_scalar(x, "max"):
|
|
2555
|
+
return SequenceMax()(x)
|
|
2556
|
+
if len(x) == 0:
|
|
2557
|
+
const_utils.raise_value_error("max() arg is an empty sequence.")
|
|
2558
|
+
tensor_num = get_tensor_num(x)
|
|
2559
|
+
if tensor_num == len(x):
|
|
2560
|
+
return max_tensor(x)
|
|
2561
|
+
if tensor_num != 0:
|
|
2562
|
+
const_utils.raise_type_error(
|
|
2563
|
+
"max() cannot contain both tensor and non-tensor type.")
|
|
2564
|
+
if exist_tensor(x):
|
|
2565
|
+
const_utils.raise_type_error(
|
|
2566
|
+
"max() cannot support tensor in list or tuple nested now.")
|
|
2567
|
+
check_isconstant(x, "max()")
|
|
2568
|
+
return max_(x)
|
|
2569
|
+
|
|
2570
|
+
|
|
1989
2571
|
def ms_max(*data):
|
|
1990
2572
|
"""Implementation of `max`."""
|
|
1991
2573
|
len_data = get_max_min_data_len(data)
|
|
@@ -1993,25 +2575,21 @@ def ms_max(*data):
|
|
|
1993
2575
|
const_utils.raise_type_error("max() requires 1 argument at least.")
|
|
1994
2576
|
elif len_data == 1:
|
|
1995
2577
|
x = data[0]
|
|
1996
|
-
|
|
1997
|
-
return x.max()
|
|
1998
|
-
# Deal with Tensor in tuple or list
|
|
1999
|
-
if isinstance(x, (list, tuple)):
|
|
2000
|
-
tensor_num = get_tensor_num(x)
|
|
2001
|
-
if tensor_num == len(x):
|
|
2002
|
-
return max_tensor(x)
|
|
2003
|
-
if tensor_num != 0:
|
|
2004
|
-
const_utils.raise_type_error("max() cannot contain both tensor and non-tensor type.")
|
|
2005
|
-
if exist_tensor(x):
|
|
2006
|
-
const_utils.raise_type_error("max() cannot support tensor in list or tuple nested now.")
|
|
2007
|
-
return max_(x)
|
|
2578
|
+
return ms_max_one_element(x)
|
|
2008
2579
|
elif len_data >= 2:
|
|
2009
2580
|
tensor_num = get_tensor_num(data)
|
|
2010
2581
|
# All inputs is Tensor
|
|
2011
2582
|
if tensor_num == len_data:
|
|
2012
2583
|
return max_tensor(*data)
|
|
2013
2584
|
if tensor_num != 0:
|
|
2014
|
-
const_utils.raise_type_error(
|
|
2585
|
+
const_utils.raise_type_error(
|
|
2586
|
+
"max() cannot contain both tensor and non-tensor type.")
|
|
2587
|
+
# exist tensor in list/tuple
|
|
2588
|
+
if exist_tensor(data):
|
|
2589
|
+
const_utils.raise_value_error(
|
|
2590
|
+
"The truth value of an array with more than one element is ambiguous.")
|
|
2591
|
+
for input_data in data:
|
|
2592
|
+
check_isconstant(input_data, "max()")
|
|
2015
2593
|
return max_(*data)
|
|
2016
2594
|
|
|
2017
2595
|
|
|
@@ -2042,6 +2620,37 @@ def min_list_tuple(seq1, seq2):
|
|
|
2042
2620
|
return seq1
|
|
2043
2621
|
|
|
2044
2622
|
|
|
2623
|
+
def ms_min_one_element(x):
|
|
2624
|
+
"""Implementation of `min` which inputs has only one element."""
|
|
2625
|
+
if isinstance(x, Tensor):
|
|
2626
|
+
tensor_shape = F.shape(x)
|
|
2627
|
+
tensor_shape_len = len(tensor_shape)
|
|
2628
|
+
if tensor_shape_len == 0:
|
|
2629
|
+
const_utils.raise_type_error(
|
|
2630
|
+
"Cannot iterate over a scalar tensor.")
|
|
2631
|
+
if tensor_shape_len >= 2:
|
|
2632
|
+
const_utils.raise_value_error(
|
|
2633
|
+
"The truth value of an array with more than one element is ambiguous.")
|
|
2634
|
+
return x.min()
|
|
2635
|
+
# Deal with Tensor in tuple or list
|
|
2636
|
+
if isinstance(x, (list, tuple)):
|
|
2637
|
+
if check_sequence_all_variable_scalar(x, "min"):
|
|
2638
|
+
return SequenceMin()(x)
|
|
2639
|
+
if len(x) == 0:
|
|
2640
|
+
const_utils.raise_value_error("min() arg is an empty sequence.")
|
|
2641
|
+
tensor_num = get_tensor_num(x)
|
|
2642
|
+
if tensor_num == len(x):
|
|
2643
|
+
return min_tensor(x)
|
|
2644
|
+
if tensor_num != 0:
|
|
2645
|
+
const_utils.raise_type_error(
|
|
2646
|
+
"min() cannot contain both tensor and non-tensor type.")
|
|
2647
|
+
if exist_tensor(x):
|
|
2648
|
+
const_utils.raise_type_error(
|
|
2649
|
+
"min() cannot support tensor in list or tuple nested now.")
|
|
2650
|
+
check_isconstant(x, "min()")
|
|
2651
|
+
return min_(x)
|
|
2652
|
+
|
|
2653
|
+
|
|
2045
2654
|
def ms_min(*data):
|
|
2046
2655
|
"""Implementation of `min`."""
|
|
2047
2656
|
len_data = get_max_min_data_len(data)
|
|
@@ -2049,25 +2658,21 @@ def ms_min(*data):
|
|
|
2049
2658
|
const_utils.raise_type_error("min() requires 1 argument at least.")
|
|
2050
2659
|
elif len_data == 1:
|
|
2051
2660
|
x = data[0]
|
|
2052
|
-
|
|
2053
|
-
return x.min()
|
|
2054
|
-
# Deal with Tensor in tuple or list
|
|
2055
|
-
if isinstance(x, (list, tuple)):
|
|
2056
|
-
tensor_num = get_tensor_num(x)
|
|
2057
|
-
if tensor_num == len(x):
|
|
2058
|
-
return min_tensor(x)
|
|
2059
|
-
if tensor_num != 0:
|
|
2060
|
-
const_utils.raise_type_error("min() cannot contain both tensor and non-tensor type.")
|
|
2061
|
-
if exist_tensor(x):
|
|
2062
|
-
const_utils.raise_type_error("min() cannot support tensor in list or tuple nested now.")
|
|
2063
|
-
return min_(x)
|
|
2661
|
+
return ms_min_one_element(x)
|
|
2064
2662
|
elif len_data >= 2:
|
|
2065
2663
|
tensor_num = get_tensor_num(data)
|
|
2066
2664
|
# All inputs is Tensor
|
|
2067
2665
|
if tensor_num == len_data:
|
|
2068
2666
|
return min_tensor(*data)
|
|
2069
2667
|
if tensor_num != 0:
|
|
2070
|
-
const_utils.raise_type_error(
|
|
2668
|
+
const_utils.raise_type_error(
|
|
2669
|
+
"min() cannot contain both tensor and non-tensor type.")
|
|
2670
|
+
# exist tensor in list/tuple
|
|
2671
|
+
if exist_tensor(data):
|
|
2672
|
+
const_utils.raise_value_error(
|
|
2673
|
+
"The truth value of an array with more than one element is ambiguous.")
|
|
2674
|
+
for input_data in data:
|
|
2675
|
+
check_isconstant(input_data, "min()")
|
|
2071
2676
|
return min_(*data)
|
|
2072
2677
|
|
|
2073
2678
|
|
|
@@ -2079,7 +2684,13 @@ def ms_sum(*data):
|
|
|
2079
2684
|
x = data[0]
|
|
2080
2685
|
if not isinstance(x, Tensor) and not hasattr(x, "__ms_iter__"):
|
|
2081
2686
|
data_type = F.typeof(x)
|
|
2082
|
-
const_utils.raise_type_error(
|
|
2687
|
+
const_utils.raise_type_error(
|
|
2688
|
+
str(data_type) + " object is not iterable.")
|
|
2689
|
+
if isinstance(x, Tensor):
|
|
2690
|
+
tensor_shape = F.shape(x)
|
|
2691
|
+
if len(tensor_shape) == 0:
|
|
2692
|
+
const_utils.raise_type_error(
|
|
2693
|
+
"Cannot iterate over a scalar tensor.")
|
|
2083
2694
|
if isinstance(x, dict):
|
|
2084
2695
|
x = x.keys()
|
|
2085
2696
|
result = 0
|
|
@@ -2106,14 +2717,37 @@ def ms_len(data):
|
|
|
2106
2717
|
return data.__len__()
|
|
2107
2718
|
|
|
2108
2719
|
|
|
2109
|
-
|
|
2110
|
-
|
|
2111
|
-
|
|
2720
|
+
@constexpr
|
|
2721
|
+
def python_len_with_check(data):
|
|
2722
|
+
"""Return the result of python built-in len function with iterable check"""
|
|
2723
|
+
if not hasattr(data, "__iter__"):
|
|
2724
|
+
raise TypeError(str(type(data)) +
|
|
2725
|
+
" object is not iterable in graph mode.")
|
|
2726
|
+
return len(data)
|
|
2727
|
+
|
|
2728
|
+
|
|
2729
|
+
def ms_len_with_iterable_check(data):
|
|
2730
|
+
"""Implementation of `len` with iterable check, used in len of condition."""
|
|
2731
|
+
if not isinstance(data, Tensor) and F.isconstant(data):
|
|
2732
|
+
return python_len_with_check(data)
|
|
2733
|
+
if not hasattr(data, "__len__"):
|
|
2734
|
+
type_str = str(F.typeof(data))
|
|
2735
|
+
const_utils.raise_type_error(
|
|
2736
|
+
type_str + " object is not iterable in graph mode.")
|
|
2737
|
+
return data.__len__()
|
|
2738
|
+
|
|
2739
|
+
|
|
2740
|
+
def ms_next_with_dyn_input_check(it):
|
|
2741
|
+
"""Implementation of `next` with daynamic input check."""
|
|
2742
|
+
if isinstance(it, (tuple, list)) and F.is_sequence_shape_unknown(it):
|
|
2743
|
+
raise ValueError(f"For 'ListComprehension' syntax [i for i in x], "
|
|
2744
|
+
f"input x can not be dynamic length list/tuple in graph mode")
|
|
2745
|
+
return it.__ms_hasnext__()
|
|
2112
2746
|
|
|
2113
2747
|
|
|
2114
|
-
def
|
|
2115
|
-
"""
|
|
2116
|
-
return x.
|
|
2748
|
+
def floor(x):
|
|
2749
|
+
"""Rounds a tensor down to the closest integer element-wise."""
|
|
2750
|
+
return x.__floor__()
|
|
2117
2751
|
|
|
2118
2752
|
|
|
2119
2753
|
def uadd(x):
|
|
@@ -2151,6 +2785,9 @@ def enumerate_(x, start=0):
|
|
|
2151
2785
|
if check_is_tensor(x_type):
|
|
2152
2786
|
for i in range(x.shape[0]):
|
|
2153
2787
|
ret += ((start + i, x[i]),)
|
|
2788
|
+
elif F.is_sequence_shape_unknown(x):
|
|
2789
|
+
const_utils.raise_value_error(
|
|
2790
|
+
"For 'enumerate', the dynamic length input is unsupported in graph mode")
|
|
2154
2791
|
else:
|
|
2155
2792
|
ret = zip(range(start, start + len(x)), x)
|
|
2156
2793
|
return ret
|
|
@@ -2158,22 +2795,27 @@ def enumerate_(x, start=0):
|
|
|
2158
2795
|
|
|
2159
2796
|
def expand_tensor_as(x, y):
|
|
2160
2797
|
"""Expand tensor"""
|
|
2161
|
-
return
|
|
2798
|
+
return F.broadcast_to(x, shape_(y))
|
|
2162
2799
|
|
|
2163
2800
|
|
|
2164
2801
|
def broadcast_to(x, shape):
|
|
2165
2802
|
"""Broadcasts tensor to a given shape."""
|
|
2166
|
-
return
|
|
2803
|
+
return F.broadcast_to(x, shape)
|
|
2167
2804
|
|
|
2168
2805
|
|
|
2169
2806
|
def expand_dims(x, axis):
|
|
2170
2807
|
"""
|
|
2171
|
-
Insert a dimension of shape 1 at the specified axis of Tensor
|
|
2808
|
+
Insert a dimension of shape 1 at the specified axis of Tensor.
|
|
2172
2809
|
"""
|
|
2173
|
-
check_is_int(axis, 'axis')
|
|
2810
|
+
validator.check_is_int(axis, 'axis')
|
|
2174
2811
|
return P.ExpandDims()(x, axis)
|
|
2175
2812
|
|
|
2176
2813
|
|
|
2814
|
+
def unsqueeze(input, dim):
|
|
2815
|
+
"""For details, please refer to :func:`mindspore.ops.unsqueeze`."""
|
|
2816
|
+
return P.ExpandDims()(input, dim)
|
|
2817
|
+
|
|
2818
|
+
|
|
2177
2819
|
def masked_fill(x, mask, value):
|
|
2178
2820
|
"""
|
|
2179
2821
|
Fills elements of Tensor with value where mask is True.
|
|
@@ -2191,12 +2833,12 @@ def col2im(*inputs):
|
|
|
2191
2833
|
return F.col2im(*inputs)
|
|
2192
2834
|
|
|
2193
2835
|
|
|
2194
|
-
def narrow(
|
|
2836
|
+
def narrow(input, axis, start, length):
|
|
2195
2837
|
"""
|
|
2196
2838
|
Returns a narrowed tensor from input tensor.
|
|
2197
2839
|
The dimension axis is input from start to start + length.
|
|
2198
2840
|
"""
|
|
2199
|
-
return F.narrow(
|
|
2841
|
+
return F.narrow(input, axis, start, length)
|
|
2200
2842
|
|
|
2201
2843
|
|
|
2202
2844
|
def to_csr(x):
|
|
@@ -2222,7 +2864,8 @@ def check_select_condition(cond_type):
|
|
|
2222
2864
|
"""
|
|
2223
2865
|
if isinstance(cond_type, mstype.tensor_type):
|
|
2224
2866
|
return
|
|
2225
|
-
raise TypeError(
|
|
2867
|
+
raise TypeError(
|
|
2868
|
+
f"For select, the argument condition should be Tensor, but got {cond_type}.")
|
|
2226
2869
|
|
|
2227
2870
|
|
|
2228
2871
|
@constexpr
|
|
@@ -2262,6 +2905,13 @@ def view(x, *shape):
|
|
|
2262
2905
|
return F.reshape(x, shape)
|
|
2263
2906
|
|
|
2264
2907
|
|
|
2908
|
+
def view_as(input, other):
|
|
2909
|
+
"""View self Tensor as the same shape as `other` ."""
|
|
2910
|
+
if not isinstance(other, (Tensor, Tensor_)):
|
|
2911
|
+
raise TypeError(f"For view_as, the input other must be a Tensor, but got {type(other)}")
|
|
2912
|
+
return F.reshape(input, other.shape)
|
|
2913
|
+
|
|
2914
|
+
|
|
2265
2915
|
def bitwise_and(x, y):
|
|
2266
2916
|
"""Returns bitwise `and` of two tensors element-wise."""
|
|
2267
2917
|
return F.bitwise_and(x, y)
|
|
@@ -2277,11 +2927,42 @@ def bitwise_xor(x, y):
|
|
|
2277
2927
|
return F.bitwise_xor(x, y)
|
|
2278
2928
|
|
|
2279
2929
|
|
|
2930
|
+
def bitwise_left_shift(x, y):
|
|
2931
|
+
"""Returns bitwise left shift of `x` by `other` bits."""
|
|
2932
|
+
return F.bitwise_left_shift(x, y)
|
|
2933
|
+
|
|
2934
|
+
|
|
2935
|
+
def bitwise_right_shift(x, y):
|
|
2936
|
+
"""Returns bitwise right shift of `x` by `other` bits."""
|
|
2937
|
+
return F.bitwise_right_shift(x, y)
|
|
2938
|
+
|
|
2939
|
+
|
|
2280
2940
|
def exp(x):
|
|
2281
2941
|
"""Returns exponential of a tensor element-wise."""
|
|
2282
2942
|
return F.exp(x)
|
|
2283
2943
|
|
|
2284
2944
|
|
|
2945
|
+
def real(x):
|
|
2946
|
+
r"""
|
|
2947
|
+
For details, please refer to :func:`mindspore.ops.real`.
|
|
2948
|
+
"""
|
|
2949
|
+
return F.real(x)
|
|
2950
|
+
|
|
2951
|
+
|
|
2952
|
+
def rsqrt(x):
|
|
2953
|
+
r"""
|
|
2954
|
+
For details, please refer to :func:`mindspore.ops.rsqrt`.
|
|
2955
|
+
"""
|
|
2956
|
+
return F.rsqrt(x)
|
|
2957
|
+
|
|
2958
|
+
|
|
2959
|
+
def reciprocal(x):
|
|
2960
|
+
r"""
|
|
2961
|
+
For details, please refer to :func:`mindspore.ops.reciprocal`.
|
|
2962
|
+
"""
|
|
2963
|
+
return F.reciprocal(x)
|
|
2964
|
+
|
|
2965
|
+
|
|
2285
2966
|
def sqrt(x):
|
|
2286
2967
|
"""Returns sqrt of a tensor element-wise."""
|
|
2287
2968
|
return F.sqrt(x)
|
|
@@ -2297,6 +2978,11 @@ def sub(x, y):
|
|
|
2297
2978
|
return F.sub(x, y)
|
|
2298
2979
|
|
|
2299
2980
|
|
|
2981
|
+
def t(input):
|
|
2982
|
+
"""Transposes a 2-D tensor."""
|
|
2983
|
+
return F.t(input)
|
|
2984
|
+
|
|
2985
|
+
|
|
2300
2986
|
def tan(x):
|
|
2301
2987
|
"""Returns tangent of `x`."""
|
|
2302
2988
|
return F.tan(x)
|
|
@@ -2314,14 +3000,24 @@ def cosh(x):
|
|
|
2314
3000
|
return F.cosh(x)
|
|
2315
3001
|
|
|
2316
3002
|
|
|
2317
|
-
def ger(
|
|
2318
|
-
"""Ger product of `
|
|
2319
|
-
return F.ger(
|
|
3003
|
+
def ger(input, vec2):
|
|
3004
|
+
"""Ger product of `input` and `vec2`."""
|
|
3005
|
+
return F.ger(input, vec2)
|
|
3006
|
+
|
|
3007
|
+
|
|
3008
|
+
def gt(x, y):
|
|
3009
|
+
"""Compare the value of the input parameters :math:`x > y` element-wise."""
|
|
3010
|
+
return F.gt(x, y)
|
|
3011
|
+
|
|
3012
|
+
|
|
3013
|
+
def ge(x, y):
|
|
3014
|
+
"""Compare the value of the input parameters :math:`x >= y` element-wise."""
|
|
3015
|
+
return F.ge(x, y)
|
|
2320
3016
|
|
|
2321
3017
|
|
|
2322
3018
|
def while_cond(x):
|
|
2323
3019
|
"""For while condition, if the condition is a tensor, the loop will not be unrolled"""
|
|
2324
|
-
if
|
|
3020
|
+
if issubclass_(F.typeof(x), F.typeof(mstype.tensor)):
|
|
2325
3021
|
is_cond = check_is_tensor_bool_cond(F.shape(x))
|
|
2326
3022
|
if is_cond:
|
|
2327
3023
|
return F.cast(x, mstype.bool_)
|
|
@@ -2395,11 +3091,34 @@ def unsorted_segment_prod(x, segment_ids, num_segments):
|
|
|
2395
3091
|
return F.unsorted_segment_prod(x, segment_ids, num_segments)
|
|
2396
3092
|
|
|
2397
3093
|
|
|
2398
|
-
def
|
|
3094
|
+
def negative(input):
|
|
3095
|
+
r"""
|
|
3096
|
+
Return a new tensor with the negative of the elements of input.
|
|
3097
|
+
"""
|
|
3098
|
+
return F.neg(input)
|
|
3099
|
+
|
|
3100
|
+
|
|
3101
|
+
def nonzero(input):
|
|
2399
3102
|
"""
|
|
2400
3103
|
Return a Tensor of the positions of all non-zero values.
|
|
2401
3104
|
"""
|
|
2402
|
-
return F.nonzero(
|
|
3105
|
+
return F.nonzero(input)
|
|
3106
|
+
|
|
3107
|
+
|
|
3108
|
+
def new_zeros(x, size, *, dtype=None):
|
|
3109
|
+
r"""
|
|
3110
|
+
Return a tensor of `size` filled with zeros. By default, the returned tensor has the same dtype as `x`.
|
|
3111
|
+
"""
|
|
3112
|
+
_dtype = x.dtype if dtype is None else dtype
|
|
3113
|
+
return F.zeros(size, dtype=_dtype)
|
|
3114
|
+
|
|
3115
|
+
|
|
3116
|
+
def new_ones(x, size, *, dtype=None):
|
|
3117
|
+
r"""
|
|
3118
|
+
Return a tensor of `size` filled with ones. By default, the returned tensor has the same dtype as `x`.
|
|
3119
|
+
"""
|
|
3120
|
+
_dtype = x.dtype if dtype is None else dtype
|
|
3121
|
+
return F.ones(size, dtype=_dtype)
|
|
2403
3122
|
|
|
2404
3123
|
|
|
2405
3124
|
def diag(x):
|
|
@@ -2409,11 +3128,18 @@ def diag(x):
|
|
|
2409
3128
|
return F.diag(x)
|
|
2410
3129
|
|
|
2411
3130
|
|
|
2412
|
-
def
|
|
3131
|
+
def diagflat(input, offset=0):
|
|
3132
|
+
"""
|
|
3133
|
+
Creates a two-dimensional Tensor with the flattened input as a diagonal.
|
|
3134
|
+
"""
|
|
3135
|
+
return F.diagflat(input, offset)
|
|
3136
|
+
|
|
3137
|
+
|
|
3138
|
+
def masked_select(input, mask):
|
|
2413
3139
|
"""
|
|
2414
3140
|
Returns a new 1-D Tensor which indexes the input tensor according to the boolean mask.
|
|
2415
3141
|
"""
|
|
2416
|
-
return F.masked_select(
|
|
3142
|
+
return F.masked_select(input, mask)
|
|
2417
3143
|
|
|
2418
3144
|
|
|
2419
3145
|
def inplace_update(x, v, indices):
|
|
@@ -2438,21 +3164,23 @@ def coo_to_csr(x):
|
|
|
2438
3164
|
|
|
2439
3165
|
def coo_to_dense(x):
|
|
2440
3166
|
"""convert coo to dense."""
|
|
2441
|
-
zeros_tensor = F.zeros(x.shape, x.values.dtype)
|
|
3167
|
+
zeros_tensor = F.zeros(x.shape, dtype=x.values.dtype)
|
|
2442
3168
|
return F.tensor_scatter_update(zeros_tensor, x.indices, x.values)
|
|
2443
3169
|
|
|
2444
3170
|
|
|
2445
3171
|
def coo_coalesce(x):
|
|
2446
3172
|
"""Returns the coalesced sparse tensor of the input."""
|
|
2447
3173
|
shape = const_utils.make_tensor(x.shape)
|
|
2448
|
-
res_indices, res_values, _ = P.Coalesce()(
|
|
3174
|
+
res_indices, res_values, _ = P.Coalesce()(
|
|
3175
|
+
x.indices.transpose(), x.values, shape)
|
|
2449
3176
|
return COOTensor(res_indices.transpose(), res_values, x.shape)
|
|
2450
3177
|
|
|
2451
3178
|
|
|
2452
3179
|
def csr_to_coo(x):
|
|
2453
3180
|
"""convert csr to coo."""
|
|
2454
3181
|
if x.ndim != 2:
|
|
2455
|
-
const_utils.raise_value_error(
|
|
3182
|
+
const_utils.raise_value_error(
|
|
3183
|
+
"Currently only support 2-D CSRTensor when converting to COOTensor.")
|
|
2456
3184
|
row_indices = F.csr2coo(x.indptr, x.values.shape[0])
|
|
2457
3185
|
coo_indices = P.Stack(1)((row_indices, x.indices))
|
|
2458
3186
|
return COOTensor(coo_indices, x.values, x.shape)
|
|
@@ -2463,7 +3191,7 @@ def csr_to_dense(x):
|
|
|
2463
3191
|
return F.csr_to_dense(x)
|
|
2464
3192
|
|
|
2465
3193
|
|
|
2466
|
-
def
|
|
3194
|
+
def random_categorical(x, num_sample, seed=0, dtype=mstype.int64):
|
|
2467
3195
|
r"""
|
|
2468
3196
|
Generates random samples from a given categorical distribution tensor.
|
|
2469
3197
|
Refer to :func:`mindspore.ops.random_categorical` for more detail.
|
|
@@ -2498,30 +3226,42 @@ def check_is_tuple_or_list_or_tensor(x, op_name, arg_name):
|
|
|
2498
3226
|
"""check whether x is list or tuple or tensor."""
|
|
2499
3227
|
if isinstance(x, (mstype.List, mstype.Tuple, mstype.tensor_type)):
|
|
2500
3228
|
return True
|
|
2501
|
-
raise TypeError(
|
|
3229
|
+
raise TypeError(
|
|
3230
|
+
f"For '{op_name}', the '{arg_name}' should be tuple or list or tensor, but got {x}.")
|
|
2502
3231
|
|
|
2503
3232
|
|
|
2504
3233
|
@constexpr
|
|
2505
3234
|
def check_is_const_int(x, op_name, arg_name):
|
|
2506
3235
|
"""check whether x is const int."""
|
|
2507
3236
|
if x is None:
|
|
2508
|
-
raise TypeError(
|
|
3237
|
+
raise TypeError(
|
|
3238
|
+
f"For '{op_name}', the '{arg_name}' should be a const int number, but got not const.")
|
|
2509
3239
|
if not isinstance(x, int):
|
|
2510
|
-
raise TypeError(
|
|
3240
|
+
raise TypeError(
|
|
3241
|
+
f"For '{op_name}', the '{arg_name}' should be a const int number, but got {x}.")
|
|
2511
3242
|
return True
|
|
2512
3243
|
|
|
2513
3244
|
|
|
2514
|
-
@
|
|
3245
|
+
@_primexpr
|
|
2515
3246
|
def check_is_tensor_bool_cond(shp):
|
|
2516
3247
|
"""check if tensor is a bool condition"""
|
|
2517
|
-
if shp
|
|
3248
|
+
if not shp or (len(shp) == 1 and shp[0] == 1):
|
|
2518
3249
|
return True
|
|
3250
|
+
if None in shp:
|
|
3251
|
+
raise ValueError(f"Only tensor which shape is () or (1,) can be converted to bool, but got tensor shape is "
|
|
3252
|
+
f"None")
|
|
2519
3253
|
raise ValueError(f"Only tensor which shape is () or (1,) can be converted to bool, but got tensor shape is {shp}")
|
|
2520
3254
|
|
|
2521
3255
|
|
|
2522
3256
|
@constexpr
|
|
2523
3257
|
def const_tensor_to_bool(x):
|
|
2524
|
-
"""convert bool tensor to bool condition
|
|
3258
|
+
"""convert bool tensor to bool condition
|
|
3259
|
+
def const_tensor_to_bool(x):
|
|
3260
|
+
convert bool tensor to bool condition
|
|
3261
|
+
if x.shape == (1,):
|
|
3262
|
+
return bool(x[0])
|
|
3263
|
+
return bool(x)
|
|
3264
|
+
"""
|
|
2525
3265
|
if x is None:
|
|
2526
3266
|
raise ValueError("Only tensor which shape is () or (1,) can be converted to bool, but got None")
|
|
2527
3267
|
x = x.asnumpy()
|
|
@@ -2533,7 +3273,7 @@ def const_tensor_to_bool(x):
|
|
|
2533
3273
|
f"Only tensor which shape is () or (1,) can be converted to bool, but got tensor shape is {x.shape}")
|
|
2534
3274
|
|
|
2535
3275
|
|
|
2536
|
-
@
|
|
3276
|
+
@_primexpr
|
|
2537
3277
|
def check_view_shape(x):
|
|
2538
3278
|
"""Check view function input shape"""
|
|
2539
3279
|
if not x:
|
|
@@ -2545,32 +3285,27 @@ def check_view_shape(x):
|
|
|
2545
3285
|
return x
|
|
2546
3286
|
|
|
2547
3287
|
|
|
2548
|
-
# convert normal param_check functions to constexpr functions
|
|
2549
3288
|
check_astype_dtype_const = constexpr(validator.check_astype_dtype)
|
|
2550
3289
|
check_transpose_axis_const = constexpr(validator.check_transpose_axis)
|
|
2551
|
-
check_reshape_shp_const = constexpr(validator.check_reshape_shp)
|
|
2552
|
-
check_flatten_order_const = constexpr(validator.check_flatten_order)
|
|
2553
|
-
check_swapaxes_axis_const = constexpr(validator.check_swapaxes_axis)
|
|
2554
|
-
prepare_shape_for_squeeze_const = constexpr(validator.prepare_shape_for_squeeze)
|
|
2555
|
-
check_axis_in_range_const = constexpr(validator.check_axis_in_range)
|
|
2556
|
-
check_axis_valid = constexpr(validator.check_axis_valid)
|
|
2557
3290
|
max_ = constexpr(validator.max_)
|
|
2558
3291
|
min_ = constexpr(validator.min_)
|
|
2559
|
-
expanded_shape =
|
|
2560
|
-
tuple_slice =
|
|
2561
|
-
infer_out_shape = constexpr(validator.infer_out_shape)
|
|
2562
|
-
get_log2_size = constexpr(validator.get_log2_size)
|
|
2563
|
-
check_axis_type = constexpr(validator.check_axis_type)
|
|
2564
|
-
check_and_canonicalize_axes = constexpr(validator.check_and_canonicalize_axes)
|
|
2565
|
-
empty_compile = constexpr(validator.empty_compile)
|
|
3292
|
+
expanded_shape = validator.expanded_shape
|
|
3293
|
+
tuple_slice = validator.tuple_slice
|
|
2566
3294
|
check_type_support = constexpr(validator.check_type_support)
|
|
2567
|
-
check_is_int = constexpr(validator.check_is_int)
|
|
2568
3295
|
check_type_name = constexpr(validator.check_type_name)
|
|
2569
3296
|
check_value_type = constexpr(validator.check_value_type)
|
|
2570
|
-
|
|
3297
|
+
check_is_int = constexpr(validator.check_is_int)
|
|
3298
|
+
check_bool_type = constexpr(validator.check_bool)
|
|
3299
|
+
check_is_int = constexpr(validator.check_is_int)
|
|
2571
3300
|
check_bool = constexpr(validator.check_bool)
|
|
2572
3301
|
|
|
2573
3302
|
|
|
3303
|
+
@constexpr
|
|
3304
|
+
def empty_compile(dtype, shape):
|
|
3305
|
+
"""Returns an empty Tensor."""
|
|
3306
|
+
return Tensor_(dtype, shape)
|
|
3307
|
+
|
|
3308
|
+
|
|
2574
3309
|
def tensor_bool(x):
|
|
2575
3310
|
"""tensor as condition, if is constant, return immediate bool value"""
|
|
2576
3311
|
is_cond = check_is_tensor_bool_cond(F.shape(x))
|
|
@@ -2594,6 +3329,11 @@ def matmul(x, y):
|
|
|
2594
3329
|
return F.matmul(x, y)
|
|
2595
3330
|
|
|
2596
3331
|
|
|
3332
|
+
def inner(x, other):
|
|
3333
|
+
"""Computes the inner product of 2 tensors."""
|
|
3334
|
+
return F.inner(x, other)
|
|
3335
|
+
|
|
3336
|
+
|
|
2597
3337
|
def float_bool(x):
|
|
2598
3338
|
"""Implementation of `float_bool`."""
|
|
2599
3339
|
return x != 0.0
|
|
@@ -2618,9 +3358,11 @@ def str_bool(x):
|
|
|
2618
3358
|
return True
|
|
2619
3359
|
|
|
2620
3360
|
|
|
2621
|
-
def
|
|
2622
|
-
"""
|
|
2623
|
-
|
|
3361
|
+
def matrix_power(input, n):
|
|
3362
|
+
"""
|
|
3363
|
+
Raises a square matrix to the (integer) power `n` .
|
|
3364
|
+
"""
|
|
3365
|
+
return F.matrix_power(input, n)
|
|
2624
3366
|
|
|
2625
3367
|
|
|
2626
3368
|
def log1p(x):
|
|
@@ -2656,9 +3398,9 @@ def logit(x, eps=None):
|
|
|
2656
3398
|
return F.logit(x, eps)
|
|
2657
3399
|
|
|
2658
3400
|
|
|
2659
|
-
def
|
|
2660
|
-
"""
|
|
2661
|
-
return F.
|
|
3401
|
+
def logdet(x):
|
|
3402
|
+
"""Returns the log determinant of one or batches of square matrices."""
|
|
3403
|
+
return F.logdet(x)
|
|
2662
3404
|
|
|
2663
3405
|
|
|
2664
3406
|
def lerp(start, end, weight):
|
|
@@ -2666,9 +3408,10 @@ def lerp(start, end, weight):
|
|
|
2666
3408
|
return F.lerp(start, end, weight)
|
|
2667
3409
|
|
|
2668
3410
|
|
|
2669
|
-
|
|
3411
|
+
# pylint: disable=redefined-builtin
|
|
3412
|
+
def norm(A, ord=None, dim=None, keepdim=False, *, dtype=None):
|
|
2670
3413
|
"""Returns the matrix norm or vector norm of a given tensor."""
|
|
2671
|
-
return F.norm(
|
|
3414
|
+
return F.norm(A, ord, dim, keepdim, dtype=dtype)
|
|
2672
3415
|
|
|
2673
3416
|
|
|
2674
3417
|
def renorm(input_x, p, dim, maxnorm):
|
|
@@ -2681,6 +3424,15 @@ def renorm(input_x, p, dim, maxnorm):
|
|
|
2681
3424
|
return F.renorm(input_x, p, dim, maxnorm)
|
|
2682
3425
|
|
|
2683
3426
|
|
|
3427
|
+
def sequence_index(sequence, target, start=None, end=None):
|
|
3428
|
+
"""Implementation of `tuple_index`."""
|
|
3429
|
+
if start is None:
|
|
3430
|
+
start = 0
|
|
3431
|
+
if end is None:
|
|
3432
|
+
end = len(sequence)
|
|
3433
|
+
return SequenceIndex()(sequence, target, start, end)
|
|
3434
|
+
|
|
3435
|
+
|
|
2684
3436
|
def list_bool(x):
|
|
2685
3437
|
"""Implementation of `tuple_bool`."""
|
|
2686
3438
|
return len(x) != 0
|
|
@@ -2720,49 +3472,47 @@ def ceil(x):
|
|
|
2720
3472
|
|
|
2721
3473
|
def top_k(input_x, k, sorted=True):
|
|
2722
3474
|
"""
|
|
2723
|
-
|
|
3475
|
+
`Tensor.top_k` is deprecated, please use `Tensor.topk` instead.
|
|
2724
3476
|
"""
|
|
2725
3477
|
check_is_int(k, 'k')
|
|
2726
3478
|
check_bool(sorted, 'sorted')
|
|
2727
3479
|
return F.top_k(input_x, k, sorted)
|
|
2728
3480
|
|
|
2729
3481
|
|
|
2730
|
-
|
|
2731
|
-
|
|
2732
|
-
|
|
2733
|
-
|
|
2734
|
-
|
|
2735
|
-
@ms_class
|
|
2736
|
-
class SequenceIterator:
|
|
3482
|
+
def topk(input_x, k, dim=None, largest=True, sorted=True):
|
|
3483
|
+
r"""
|
|
3484
|
+
For details, please refer to :func:`mindspore.ops.topk`.
|
|
2737
3485
|
"""
|
|
2738
|
-
|
|
3486
|
+
check_is_int(k, 'k')
|
|
3487
|
+
check_bool_type(sorted, 'sorted')
|
|
3488
|
+
return F.topk(input_x, k, dim, largest=largest, sorted=sorted)
|
|
2739
3489
|
|
|
2740
|
-
Iterator to use for sequences like List, Array.
|
|
2741
|
-
"""
|
|
2742
3490
|
|
|
2743
|
-
|
|
2744
|
-
|
|
2745
|
-
|
|
3491
|
+
def subtract(x, other, *, alpha=1):
|
|
3492
|
+
r"""
|
|
3493
|
+
Computes the element-wise subtraction of input tensors.
|
|
3494
|
+
"""
|
|
3495
|
+
return F.sub(x, other * alpha)
|
|
2746
3496
|
|
|
2747
|
-
@core(ignore_values=True)
|
|
2748
|
-
def __ms_hasnext__(self):
|
|
2749
|
-
"""Whether the index is past the length of the sequence."""
|
|
2750
|
-
return self.idx < ms_len(self.seq)
|
|
2751
3497
|
|
|
2752
|
-
|
|
2753
|
-
|
|
2754
|
-
|
|
2755
|
-
|
|
3498
|
+
def true_divide(divident, divisor):
|
|
3499
|
+
r"""
|
|
3500
|
+
Computes the element-wise division of input tensors.
|
|
3501
|
+
"""
|
|
3502
|
+
return F.div(divident, divisor, rounding_mode=None)
|
|
2756
3503
|
|
|
2757
3504
|
|
|
2758
|
-
|
|
2759
|
-
|
|
2760
|
-
|
|
3505
|
+
# pylint: disable=redefined-outer-name
|
|
3506
|
+
def triu(input, diagonal=0):
|
|
3507
|
+
r"""
|
|
3508
|
+
Returns the triangular matrix based on the diagonal.
|
|
3509
|
+
"""
|
|
3510
|
+
return F.triu(input, diagonal)
|
|
2761
3511
|
|
|
2762
3512
|
|
|
2763
|
-
|
|
2764
|
-
|
|
2765
|
-
|
|
3513
|
+
#############
|
|
3514
|
+
# Iteration #
|
|
3515
|
+
#############
|
|
2766
3516
|
|
|
2767
3517
|
|
|
2768
3518
|
def tuple_next(xs):
|
|
@@ -2785,13 +3535,44 @@ def list_hasnext(xs):
|
|
|
2785
3535
|
return len(xs) > 0
|
|
2786
3536
|
|
|
2787
3537
|
|
|
3538
|
+
def dict_next(xs):
|
|
3539
|
+
"""Next array."""
|
|
3540
|
+
keys = xs.keys()
|
|
3541
|
+
new_keys = F.make_list()
|
|
3542
|
+
new_values = F.make_list()
|
|
3543
|
+
for i in range(1, len(keys)):
|
|
3544
|
+
new_keys.append(keys[i])
|
|
3545
|
+
new_values.append(xs[keys[i]])
|
|
3546
|
+
new_dict = {}
|
|
3547
|
+
return keys[0], new_dict.fromkeys(new_keys, new_values)
|
|
3548
|
+
|
|
3549
|
+
|
|
3550
|
+
def dict_hasnext(xs):
|
|
3551
|
+
"""Whether the dict is empty or not."""
|
|
3552
|
+
return len(xs) > 0
|
|
3553
|
+
|
|
3554
|
+
|
|
3555
|
+
def array_next(xs):
|
|
3556
|
+
"""Next array."""
|
|
3557
|
+
return xs[0], xs[1:]
|
|
3558
|
+
|
|
3559
|
+
|
|
3560
|
+
def array_hasnext(xs):
|
|
3561
|
+
"""Whether the array is empty or not."""
|
|
3562
|
+
return len(xs) > 0
|
|
3563
|
+
|
|
3564
|
+
|
|
2788
3565
|
def list_append(self_, list_item):
|
|
2789
3566
|
"""Append into list"""
|
|
3567
|
+
if F.is_sequence_shape_unknown(self_):
|
|
3568
|
+
return ListAppend()(self_, list_item)
|
|
2790
3569
|
return _append(self_, list_item)
|
|
2791
3570
|
|
|
2792
3571
|
|
|
2793
3572
|
def list_insert(self_, index, obj):
|
|
2794
3573
|
"""Insert into list"""
|
|
3574
|
+
if F.is_sequence_shape_unknown(self_) or not F.isconstant(index) or not F.isconstant(obj):
|
|
3575
|
+
return ListInsert()(self_, index, obj)
|
|
2795
3576
|
return _insert(self_, index, obj)
|
|
2796
3577
|
|
|
2797
3578
|
|
|
@@ -2807,23 +3588,40 @@ def list_clear(self_):
|
|
|
2807
3588
|
|
|
2808
3589
|
|
|
2809
3590
|
def list_reverse(self_):
|
|
2810
|
-
"""Reverse the list"""
|
|
3591
|
+
"""Reverse the obj in list"""
|
|
2811
3592
|
return _reverse(self_)
|
|
2812
3593
|
|
|
2813
3594
|
|
|
2814
3595
|
def list_extend(self_, obj):
|
|
2815
|
-
"""Append
|
|
3596
|
+
"""Append obj to list"""
|
|
2816
3597
|
return _extend(self_, obj)
|
|
2817
3598
|
|
|
2818
3599
|
|
|
2819
|
-
def list_count(self_, value):
|
|
2820
|
-
""""Count the number of times an element appears in list"""
|
|
2821
|
-
return _count(self_, value)
|
|
2822
|
-
|
|
2823
|
-
|
|
2824
3600
|
def dict_get(self_, key_index, default_value=None):
|
|
2825
3601
|
"""Get value by key from dict"""
|
|
2826
|
-
|
|
3602
|
+
if not _haskey(self_, key_index):
|
|
3603
|
+
return default_value
|
|
3604
|
+
return F.dict_getitem(self_, key_index)
|
|
3605
|
+
|
|
3606
|
+
|
|
3607
|
+
def dict_clear(self_):
|
|
3608
|
+
"""Clear the dict"""
|
|
3609
|
+
return _dict_clear(self_)
|
|
3610
|
+
|
|
3611
|
+
|
|
3612
|
+
def dict_haskey(self_, key_index):
|
|
3613
|
+
"""Check if key is in dict"""
|
|
3614
|
+
return _haskey(self_, key_index)
|
|
3615
|
+
|
|
3616
|
+
|
|
3617
|
+
def dict_update(self_, dict_obj):
|
|
3618
|
+
"""Update the dict"""
|
|
3619
|
+
return _update(self_, dict_obj)
|
|
3620
|
+
|
|
3621
|
+
|
|
3622
|
+
def dict_fromkeys(self_, seq, value=None):
|
|
3623
|
+
"""Check if key is in dict"""
|
|
3624
|
+
return _fromkeys(self_, seq, value)
|
|
2827
3625
|
|
|
2828
3626
|
|
|
2829
3627
|
#################
|
|
@@ -2831,11 +3629,6 @@ def dict_get(self_, key_index, default_value=None):
|
|
|
2831
3629
|
#################
|
|
2832
3630
|
|
|
2833
3631
|
|
|
2834
|
-
def to_array(x):
|
|
2835
|
-
"""Implementation of `to_array`."""
|
|
2836
|
-
return x.__ms_to_array__()
|
|
2837
|
-
|
|
2838
|
-
|
|
2839
3632
|
def filter_(fun, iter_):
|
|
2840
3633
|
"""Support the use of built-in function filter."""
|
|
2841
3634
|
result = []
|
|
@@ -2882,9 +3675,11 @@ def csr_mv(x, dense_vector):
|
|
|
2882
3675
|
return F.csr_mv(x, dense_vector)
|
|
2883
3676
|
|
|
2884
3677
|
|
|
2885
|
-
def csr_mm(x,
|
|
3678
|
+
def csr_mm(x, matrix):
|
|
2886
3679
|
"""Implementation of `mm` for CSRTensor."""
|
|
2887
|
-
|
|
3680
|
+
if isinstance(matrix, CSRTensor):
|
|
3681
|
+
return F.csr_mm(x, matrix)
|
|
3682
|
+
return _csr_mm(x.indptr, x.indices, x.values, x.shape, matrix)
|
|
2888
3683
|
|
|
2889
3684
|
|
|
2890
3685
|
def csr_to_tuple(x):
|
|
@@ -2912,7 +3707,7 @@ def coo_abs(x):
|
|
|
2912
3707
|
|
|
2913
3708
|
def coo_add(x, y, thresh):
|
|
2914
3709
|
"""Implementation of `add` for COOTensor."""
|
|
2915
|
-
return
|
|
3710
|
+
return F.coo_add(x, y, thresh)
|
|
2916
3711
|
|
|
2917
3712
|
|
|
2918
3713
|
################
|
|
@@ -2934,12 +3729,12 @@ def sparse_ndim_(x):
|
|
|
2934
3729
|
return F.tuple_len(x.shape)
|
|
2935
3730
|
|
|
2936
3731
|
|
|
2937
|
-
def bernoulli(
|
|
3732
|
+
def bernoulli(input, p=0.5, seed=None):
|
|
2938
3733
|
"""
|
|
2939
3734
|
Randomly draws binary numbers from a Bernoulli distribution.
|
|
2940
3735
|
"""
|
|
2941
3736
|
check_is_int(seed, 'bernoulli', 'seed')
|
|
2942
|
-
return F.bernoulli(
|
|
3737
|
+
return F.bernoulli(input, p, seed)
|
|
2943
3738
|
|
|
2944
3739
|
|
|
2945
3740
|
def gather_nd(input_x, indices):
|
|
@@ -2950,20 +3745,53 @@ def gather_nd(input_x, indices):
|
|
|
2950
3745
|
return F.gather_nd(input_x, indices)
|
|
2951
3746
|
|
|
2952
3747
|
|
|
2953
|
-
def gather(input_x, input_indices, axis):
|
|
3748
|
+
def gather(input_x, input_indices, axis, batch_dims=0):
|
|
2954
3749
|
r"""
|
|
2955
3750
|
Returns the slice of the input tensor corresponding to the elements of `input_indices` on the specified `axis`.
|
|
2956
3751
|
Refer to :func:`mindspore.ops.gather` for more detail.
|
|
2957
3752
|
"""
|
|
2958
|
-
return F.gather(input_x, input_indices, axis)
|
|
3753
|
+
return F.gather(input_x, input_indices, axis, batch_dims)
|
|
2959
3754
|
|
|
2960
3755
|
|
|
2961
|
-
def split(
|
|
3756
|
+
def split(tensor, split_size_or_sections, axis=0):
|
|
2962
3757
|
"""
|
|
2963
|
-
Splits the
|
|
3758
|
+
Splits the Tensor into chunks along the given axis.
|
|
2964
3759
|
Refer to :func:`mindspore.ops.split` for more detail.
|
|
2965
3760
|
"""
|
|
2966
|
-
return F.split(
|
|
3761
|
+
return F.split(tensor, split_size_or_sections, axis)
|
|
3762
|
+
|
|
3763
|
+
|
|
3764
|
+
def tensor_split(input, indices_or_sections, axis=0):
|
|
3765
|
+
"""
|
|
3766
|
+
Splits a tensor into multiple sub-tensors along the given axis.
|
|
3767
|
+
Refer to :func:`mindspore.ops.tensor_split` for more detail.
|
|
3768
|
+
"""
|
|
3769
|
+
return F.tensor_split(input, indices_or_sections, axis=axis)
|
|
3770
|
+
|
|
3771
|
+
|
|
3772
|
+
def vsplit(input, indices_or_sections):
|
|
3773
|
+
"""
|
|
3774
|
+
Splits a tensor into multiple sub-tensors vertically. It is equivalent to `ops.tensor_split` with :math:`axis=0` .
|
|
3775
|
+
Refer to :func:`mindspore.ops.vsplit` for more detail.
|
|
3776
|
+
"""
|
|
3777
|
+
return F.vsplit(input, indices_or_sections)
|
|
3778
|
+
|
|
3779
|
+
|
|
3780
|
+
def hsplit(input, indices_or_sections):
|
|
3781
|
+
"""
|
|
3782
|
+
Splits a tensor into multiple sub-tensors horizontally. It is equivalent to `ops.tensor_split` with :math:`axis=1` .
|
|
3783
|
+
Refer to :func:`mindspore.ops.hsplit` for more detail.
|
|
3784
|
+
"""
|
|
3785
|
+
return F.hsplit(input, indices_or_sections)
|
|
3786
|
+
|
|
3787
|
+
|
|
3788
|
+
def dsplit(input, indices_or_sections):
|
|
3789
|
+
"""
|
|
3790
|
+
Splits a tensor into multiple sub-tensors along the 3rd axis.
|
|
3791
|
+
It is equivalent to `ops.tensor_split` with :math:`axis=2` .
|
|
3792
|
+
Refer to :func:`mindspore.ops.tensor_split` for more detail.
|
|
3793
|
+
"""
|
|
3794
|
+
return F.dsplit(input, indices_or_sections)
|
|
2967
3795
|
|
|
2968
3796
|
|
|
2969
3797
|
def xlogy(x, y):
|
|
@@ -2998,6 +3826,20 @@ def isfinite(x):
|
|
|
2998
3826
|
return F.isfinite(x)
|
|
2999
3827
|
|
|
3000
3828
|
|
|
3829
|
+
def sin(x):
|
|
3830
|
+
r"""
|
|
3831
|
+
For details, please refer to :func:`mindspore.ops.sin`.
|
|
3832
|
+
"""
|
|
3833
|
+
return F.sin(x)
|
|
3834
|
+
|
|
3835
|
+
|
|
3836
|
+
def sinc(x):
|
|
3837
|
+
r"""
|
|
3838
|
+
For details, please refer to :func:`mindspore.ops.sinc`.
|
|
3839
|
+
"""
|
|
3840
|
+
return F.sinc(x)
|
|
3841
|
+
|
|
3842
|
+
|
|
3001
3843
|
def cos(x):
|
|
3002
3844
|
r"""
|
|
3003
3845
|
Computes cosine of input element-wise.
|
|
@@ -3005,6 +3847,13 @@ def cos(x):
|
|
|
3005
3847
|
return F.cos(x)
|
|
3006
3848
|
|
|
3007
3849
|
|
|
3850
|
+
def cov(x, *, correction=1, fweights=None, aweights=None):
|
|
3851
|
+
r"""
|
|
3852
|
+
For details, please refer to :func:`mindspore.ops.cov`.
|
|
3853
|
+
"""
|
|
3854
|
+
return F.cov(x, correction=correction, fweights=fweights, aweights=aweights)
|
|
3855
|
+
|
|
3856
|
+
|
|
3008
3857
|
def acos(x):
|
|
3009
3858
|
r"""
|
|
3010
3859
|
Computes arccosine of input tensors element-wise.
|
|
@@ -3019,32 +3868,53 @@ def asin(x):
|
|
|
3019
3868
|
return F.asin(x)
|
|
3020
3869
|
|
|
3021
3870
|
|
|
3022
|
-
def acosh(
|
|
3871
|
+
def acosh(input):
|
|
3023
3872
|
r"""
|
|
3024
3873
|
Computes inverse hyperbolic cosine of the inputs element-wise.
|
|
3025
3874
|
"""
|
|
3026
|
-
return F.acosh(
|
|
3875
|
+
return F.acosh(input)
|
|
3027
3876
|
|
|
3028
3877
|
|
|
3029
|
-
def add(
|
|
3878
|
+
def add(input, other):
|
|
3030
3879
|
r"""
|
|
3031
3880
|
Computes the element-wise addition of input tensors.
|
|
3032
3881
|
"""
|
|
3033
|
-
return F.add(
|
|
3882
|
+
return F.add(input, other)
|
|
3034
3883
|
|
|
3035
3884
|
|
|
3036
3885
|
def addr(x, vec1, vec2, beta=1, alpha=1):
|
|
3037
3886
|
r"""
|
|
3038
3887
|
Computes the outer-product of `vec1` and `vec2` and adds it to `x`.
|
|
3039
3888
|
"""
|
|
3040
|
-
return F.addr(x, vec1, vec2, beta=
|
|
3889
|
+
return F.addr(x, vec1, vec2, beta=beta, alpha=alpha)
|
|
3890
|
+
|
|
3891
|
+
|
|
3892
|
+
def addbmm(x, batch1, batch2, *, beta=1, alpha=1):
|
|
3893
|
+
r"""
|
|
3894
|
+
Performs matrix multiplication with a reduced sum, and add `x` to the result.
|
|
3895
|
+
"""
|
|
3896
|
+
return F.addbmm(x, batch1, batch2, beta=beta, alpha=alpha)
|
|
3897
|
+
|
|
3898
|
+
|
|
3899
|
+
def addmm(x, mat1, mat2, *, beta=1, alpha=1):
|
|
3900
|
+
r"""
|
|
3901
|
+
Performs matrix multiplication, and add `x` to the result.
|
|
3902
|
+
"""
|
|
3903
|
+
return F.addmm(x, mat1, mat2, beta=beta, alpha=alpha)
|
|
3041
3904
|
|
|
3042
3905
|
|
|
3043
3906
|
def addmv(x, mat, vec, beta=1, alpha=1):
|
|
3044
3907
|
r"""
|
|
3045
3908
|
Multiplies matrix `mat` and vector `vec`. The vector `x` is added to the final result.
|
|
3046
3909
|
"""
|
|
3047
|
-
return F.addmv(x, mat, vec, beta, alpha)
|
|
3910
|
+
return F.addmv(x, mat, vec, beta=beta, alpha=alpha)
|
|
3911
|
+
|
|
3912
|
+
|
|
3913
|
+
def adjoint(x):
|
|
3914
|
+
r"""
|
|
3915
|
+
Computes the conjucated matrix with the last 2 dimensions transposed.
|
|
3916
|
+
"""
|
|
3917
|
+
return F.adjoint(x)
|
|
3048
3918
|
|
|
3049
3919
|
|
|
3050
3920
|
def asinh(x):
|
|
@@ -3054,11 +3924,11 @@ def asinh(x):
|
|
|
3054
3924
|
return F.asinh(x)
|
|
3055
3925
|
|
|
3056
3926
|
|
|
3057
|
-
def atan(
|
|
3927
|
+
def atan(input):
|
|
3058
3928
|
r"""
|
|
3059
3929
|
Computes inverse tangent of the input element-wise.
|
|
3060
3930
|
"""
|
|
3061
|
-
return F.atan(
|
|
3931
|
+
return F.atan(input)
|
|
3062
3932
|
|
|
3063
3933
|
|
|
3064
3934
|
def atanh(x):
|
|
@@ -3068,9 +3938,16 @@ def atanh(x):
|
|
|
3068
3938
|
return F.atanh(x)
|
|
3069
3939
|
|
|
3070
3940
|
|
|
3941
|
+
def baddbmm(x, batch1, batch2, beta=1, alpha=1):
|
|
3942
|
+
r"""
|
|
3943
|
+
For details, please refer to :func:`mindspore.ops.baddbmm`.
|
|
3944
|
+
"""
|
|
3945
|
+
return F.baddbmm(x, batch1, batch2, beta=beta, alpha=alpha)
|
|
3946
|
+
|
|
3947
|
+
|
|
3071
3948
|
def bmm(input_x, mat2):
|
|
3072
3949
|
r"""
|
|
3073
|
-
Computes
|
|
3950
|
+
Computes matrix multiplication between two tensors by batch.
|
|
3074
3951
|
"""
|
|
3075
3952
|
return F.bmm(input_x, mat2)
|
|
3076
3953
|
|
|
@@ -3080,3 +3957,471 @@ def value_(x):
|
|
|
3080
3957
|
Get the value of Parameter or Tensor x. If x is Parameter, will change the type from RefTensor to Tensor.
|
|
3081
3958
|
"""
|
|
3082
3959
|
return P.Load()(x, monad.U)
|
|
3960
|
+
|
|
3961
|
+
|
|
3962
|
+
def to(input_x, dtype):
|
|
3963
|
+
r"""
|
|
3964
|
+
Performs tensor dtype conversion.
|
|
3965
|
+
"""
|
|
3966
|
+
return P.Cast()(input_x, dtype)
|
|
3967
|
+
|
|
3968
|
+
|
|
3969
|
+
def to_bool(input_x):
|
|
3970
|
+
r"""
|
|
3971
|
+
Converts input tensor dtype to bool.
|
|
3972
|
+
"""
|
|
3973
|
+
return P.Cast()(input_x, mstype.bool_)
|
|
3974
|
+
|
|
3975
|
+
|
|
3976
|
+
def to_float(input_x):
|
|
3977
|
+
r"""
|
|
3978
|
+
Converts input tensor dtype to float32.
|
|
3979
|
+
"""
|
|
3980
|
+
return P.Cast()(input_x, mstype.float32)
|
|
3981
|
+
|
|
3982
|
+
|
|
3983
|
+
def to_half(input_x):
|
|
3984
|
+
r"""
|
|
3985
|
+
Converts input tensor dtype to float16.
|
|
3986
|
+
"""
|
|
3987
|
+
return P.Cast()(input_x, mstype.float16)
|
|
3988
|
+
|
|
3989
|
+
|
|
3990
|
+
def to_int(input_x):
|
|
3991
|
+
r"""
|
|
3992
|
+
Converts input tensor dtype to int32.
|
|
3993
|
+
"""
|
|
3994
|
+
return P.Cast()(input_x, mstype.int32)
|
|
3995
|
+
|
|
3996
|
+
|
|
3997
|
+
def to_long(input_x):
|
|
3998
|
+
r"""
|
|
3999
|
+
Converts input tensor dtype to int64.
|
|
4000
|
+
"""
|
|
4001
|
+
return P.Cast()(input_x, mstype.int64)
|
|
4002
|
+
|
|
4003
|
+
|
|
4004
|
+
def cholesky(input_x, upper=False):
|
|
4005
|
+
r"""
|
|
4006
|
+
Computes the Cholesky decomposition of a symmetric positive-definite matrix
|
|
4007
|
+
"""
|
|
4008
|
+
return F.cholesky(input_x, upper=upper)
|
|
4009
|
+
|
|
4010
|
+
|
|
4011
|
+
def cholesky_inverse(input_x, upper=False):
|
|
4012
|
+
r"""
|
|
4013
|
+
Computes the inverse of the positive definite matrix using cholesky matrix factorization.
|
|
4014
|
+
"""
|
|
4015
|
+
return F.cholesky_inverse(input_x, upper=upper)
|
|
4016
|
+
|
|
4017
|
+
|
|
4018
|
+
def map_tensor_get(map_tensor, key_tensor, insert_default_value=True):
|
|
4019
|
+
r"""
|
|
4020
|
+
Get or create value according the key tensor from a map tensor.
|
|
4021
|
+
"""
|
|
4022
|
+
return _map_tensor_ops.MapTensorGet(insert_default_value)(map_tensor, key_tensor)
|
|
4023
|
+
|
|
4024
|
+
|
|
4025
|
+
def map_tensor_put(map_tensor, key_tensor, value_tensor):
|
|
4026
|
+
r"""
|
|
4027
|
+
Insert or update key value tensor pairs to a map tensor.
|
|
4028
|
+
"""
|
|
4029
|
+
return _map_tensor_ops.put(map_tensor, key_tensor, value_tensor)
|
|
4030
|
+
|
|
4031
|
+
|
|
4032
|
+
def map_tensor_erase(map_tensor, key_tensor):
|
|
4033
|
+
r"""
|
|
4034
|
+
Remove records according the key tensor from a map tensor.
|
|
4035
|
+
"""
|
|
4036
|
+
return _map_tensor_ops.erase(map_tensor, key_tensor)
|
|
4037
|
+
|
|
4038
|
+
|
|
4039
|
+
def map_tensor_get_keys(map_tensor):
|
|
4040
|
+
r"""
|
|
4041
|
+
Get all keys as a tensor.
|
|
4042
|
+
"""
|
|
4043
|
+
return _map_tensor_ops.get_keys(map_tensor)
|
|
4044
|
+
|
|
4045
|
+
|
|
4046
|
+
def map_tensor_get_values(map_tensor):
|
|
4047
|
+
r"""
|
|
4048
|
+
Get all values as a tensor.
|
|
4049
|
+
"""
|
|
4050
|
+
return _map_tensor_ops.get_values(map_tensor)
|
|
4051
|
+
|
|
4052
|
+
|
|
4053
|
+
def map_tensor_get_data(map_tensor):
|
|
4054
|
+
r"""
|
|
4055
|
+
Get all keys and values as a tensor.
|
|
4056
|
+
"""
|
|
4057
|
+
return _map_tensor_ops.get_data(map_tensor)
|
|
4058
|
+
|
|
4059
|
+
|
|
4060
|
+
def conj(input):
|
|
4061
|
+
r"""
|
|
4062
|
+
Computes complex conjugate of the input element-wise.
|
|
4063
|
+
"""
|
|
4064
|
+
return F.conj(input)
|
|
4065
|
+
|
|
4066
|
+
|
|
4067
|
+
def cross(input, other, dim=None):
|
|
4068
|
+
r"""
|
|
4069
|
+
Computes the cross product of input vectors in specified dimension.
|
|
4070
|
+
"""
|
|
4071
|
+
return F.cross(input, other, dim)
|
|
4072
|
+
|
|
4073
|
+
|
|
4074
|
+
def erfinv(input):
|
|
4075
|
+
r"""
|
|
4076
|
+
Computes the inverse error function of input tensor.
|
|
4077
|
+
"""
|
|
4078
|
+
return F.erfinv(input)
|
|
4079
|
+
|
|
4080
|
+
|
|
4081
|
+
def less_equal(input, other):
|
|
4082
|
+
r"""
|
|
4083
|
+
Computes the boolean value of :math:`input\_x <= other` element-wise.
|
|
4084
|
+
"""
|
|
4085
|
+
return F.less_equal(input, other)
|
|
4086
|
+
|
|
4087
|
+
|
|
4088
|
+
def lcm(x, other):
|
|
4089
|
+
r"""
|
|
4090
|
+
Computes least common multiplier of input tensors element-wise.
|
|
4091
|
+
"""
|
|
4092
|
+
return F.lcm(x, other)
|
|
4093
|
+
|
|
4094
|
+
|
|
4095
|
+
def ldexp(x, other):
|
|
4096
|
+
r"""
|
|
4097
|
+
Multiplies input by 2**:attr:other.
|
|
4098
|
+
"""
|
|
4099
|
+
return F.ldexp(x, other)
|
|
4100
|
+
|
|
4101
|
+
|
|
4102
|
+
def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
|
|
4103
|
+
r"""
|
|
4104
|
+
Combines an array of sliding local blocks into a large containing tensor.
|
|
4105
|
+
"""
|
|
4106
|
+
return F.fold(input, output_size, kernel_size, dilation, padding, stride)
|
|
4107
|
+
|
|
4108
|
+
|
|
4109
|
+
def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
|
|
4110
|
+
r"""
|
|
4111
|
+
Extracts sliding local blocks from a batched input tensor.
|
|
4112
|
+
"""
|
|
4113
|
+
return F.unfold(input, kernel_size, dilation, padding, stride)
|
|
4114
|
+
|
|
4115
|
+
|
|
4116
|
+
def expand(input, size):
|
|
4117
|
+
r"""
|
|
4118
|
+
Returns a new view of the self tensor with singleton dimensions expanded to a larger size.
|
|
4119
|
+
"""
|
|
4120
|
+
return F.expand(input, size)
|
|
4121
|
+
|
|
4122
|
+
|
|
4123
|
+
def cumprod(input, dim, dtype=None):
|
|
4124
|
+
r"""
|
|
4125
|
+
Computes the cumulative product of the `input` tensor along dimension `dim`.
|
|
4126
|
+
"""
|
|
4127
|
+
return F.cumprod(input, dim, dtype)
|
|
4128
|
+
|
|
4129
|
+
|
|
4130
|
+
def multiply(input, other):
|
|
4131
|
+
"""For details, please refer to :func:`mindspore.ops.multiply`."""
|
|
4132
|
+
return F.multiply(input, other)
|
|
4133
|
+
|
|
4134
|
+
|
|
4135
|
+
def div(input, value, *, rounding_mode=None):
|
|
4136
|
+
r"""
|
|
4137
|
+
Divides the tensor `input` by the given input tensor `value` in floating-point type element-wise.
|
|
4138
|
+
"""
|
|
4139
|
+
return F.div(input, value, rounding_mode=rounding_mode)
|
|
4140
|
+
|
|
4141
|
+
|
|
4142
|
+
def equal(x, y):
|
|
4143
|
+
r"""
|
|
4144
|
+
Computes the equivalence between the tensor `x` and the given input tensor `y` element-wise.
|
|
4145
|
+
"""
|
|
4146
|
+
return F.equal(x, y)
|
|
4147
|
+
|
|
4148
|
+
|
|
4149
|
+
def expm1(input_x):
|
|
4150
|
+
r"""
|
|
4151
|
+
Computes exponential then minus 1 of a tensor element-wise.
|
|
4152
|
+
"""
|
|
4153
|
+
return F.expm1(input_x)
|
|
4154
|
+
|
|
4155
|
+
|
|
4156
|
+
@constexpr
|
|
4157
|
+
def _check_index_add_alpha(alpha):
|
|
4158
|
+
check_is_number(alpha, (int, float))
|
|
4159
|
+
|
|
4160
|
+
|
|
4161
|
+
def index_add(input, dim, index, source, *, alpha=1):
|
|
4162
|
+
r"""
|
|
4163
|
+
Adds tensor `alpha` times `source` to specified `dim` and `index` of input tensor.
|
|
4164
|
+
"""
|
|
4165
|
+
_check_index_add_alpha(alpha)
|
|
4166
|
+
source = source * alpha
|
|
4167
|
+
return F.index_add(input, indices=index, y=source, axis=dim)
|
|
4168
|
+
|
|
4169
|
+
|
|
4170
|
+
def greater(input, other):
|
|
4171
|
+
r"""
|
|
4172
|
+
Computes the boolean value of :math:`input > other` element-wise.
|
|
4173
|
+
"""
|
|
4174
|
+
return F.greater(input, other)
|
|
4175
|
+
|
|
4176
|
+
|
|
4177
|
+
def greater_equal(input, other):
|
|
4178
|
+
r"""
|
|
4179
|
+
Computes the boolean value of :math:`input >= other` element-wise.
|
|
4180
|
+
"""
|
|
4181
|
+
return F.greater_equal(input, other)
|
|
4182
|
+
|
|
4183
|
+
|
|
4184
|
+
def igamma(input, other):
|
|
4185
|
+
r"""
|
|
4186
|
+
Computes lower regularized incomplete Gamma function.
|
|
4187
|
+
"""
|
|
4188
|
+
return F.igamma(input, other)
|
|
4189
|
+
|
|
4190
|
+
|
|
4191
|
+
def igammac(input, other):
|
|
4192
|
+
r"""
|
|
4193
|
+
Computes upper regularized incomplete Gamma function.
|
|
4194
|
+
"""
|
|
4195
|
+
return F.igammac(input, other)
|
|
4196
|
+
|
|
4197
|
+
|
|
4198
|
+
def isinf(input):
|
|
4199
|
+
r"""
|
|
4200
|
+
Determines which elements are inf or -inf for each position.
|
|
4201
|
+
"""
|
|
4202
|
+
return F.isinf(input)
|
|
4203
|
+
|
|
4204
|
+
|
|
4205
|
+
def isnan(input):
|
|
4206
|
+
r"""
|
|
4207
|
+
Determines which elements are NaN for each position.
|
|
4208
|
+
"""
|
|
4209
|
+
return F.isnan(input)
|
|
4210
|
+
|
|
4211
|
+
|
|
4212
|
+
def le(input, other):
|
|
4213
|
+
r"""
|
|
4214
|
+
Computes the boolean value of :math:`input <= other` element-wise.
|
|
4215
|
+
"""
|
|
4216
|
+
return F.le(input, other)
|
|
4217
|
+
|
|
4218
|
+
|
|
4219
|
+
def less(input, other):
|
|
4220
|
+
r"""
|
|
4221
|
+
Computes the boolean value of :math:`input < other` element-wise.
|
|
4222
|
+
"""
|
|
4223
|
+
return F.less(input, other)
|
|
4224
|
+
|
|
4225
|
+
|
|
4226
|
+
def logical_and(input, other):
|
|
4227
|
+
r"""
|
|
4228
|
+
Computes the "logical AND" of two tensors element-wise.
|
|
4229
|
+
"""
|
|
4230
|
+
return F.logical_and(input, other)
|
|
4231
|
+
|
|
4232
|
+
|
|
4233
|
+
def logical_not(input):
|
|
4234
|
+
r"""
|
|
4235
|
+
Computes the "logical NOT" of input tensor element-wise.
|
|
4236
|
+
"""
|
|
4237
|
+
return F.logical_not(input)
|
|
4238
|
+
|
|
4239
|
+
|
|
4240
|
+
def logical_or(input, other):
|
|
4241
|
+
r"""
|
|
4242
|
+
Computes the "logical OR" of two tensors element-wise.
|
|
4243
|
+
"""
|
|
4244
|
+
return F.logical_or(input, other)
|
|
4245
|
+
|
|
4246
|
+
|
|
4247
|
+
def logical_xor(input, other):
|
|
4248
|
+
r"""
|
|
4249
|
+
Computes the "logical XOR" of two tensors element-wise.
|
|
4250
|
+
"""
|
|
4251
|
+
return F.logical_xor(input, other)
|
|
4252
|
+
|
|
4253
|
+
|
|
4254
|
+
def lstsq(input, A):
|
|
4255
|
+
r"""
|
|
4256
|
+
Computes the solutions of the least squares and minimum norm problems of full-rank
|
|
4257
|
+
matrix `input` of size :math:`(m \times n)` and matrix `A` of size :math:`(m \times k)`.
|
|
4258
|
+
"""
|
|
4259
|
+
return F.lstsq(input, A)
|
|
4260
|
+
|
|
4261
|
+
|
|
4262
|
+
def mvlgamma(input, p):
|
|
4263
|
+
r"""
|
|
4264
|
+
Computes the multivariate log-gamma function with dimension p element-wise.
|
|
4265
|
+
"""
|
|
4266
|
+
return F.mvlgamma(input, p)
|
|
4267
|
+
|
|
4268
|
+
|
|
4269
|
+
def maximum(input, other):
|
|
4270
|
+
r"""
|
|
4271
|
+
Computes the maximum of input tensors element-wise.
|
|
4272
|
+
"""
|
|
4273
|
+
return F.maximum(input, other)
|
|
4274
|
+
|
|
4275
|
+
|
|
4276
|
+
def mul(input, other):
|
|
4277
|
+
r"""
|
|
4278
|
+
Multiplies two tensors element-wise.
|
|
4279
|
+
"""
|
|
4280
|
+
return F.mul(input, other)
|
|
4281
|
+
|
|
4282
|
+
|
|
4283
|
+
def neg(input):
|
|
4284
|
+
r"""
|
|
4285
|
+
Returns a tensor with negative values of the input tensor element-wise.
|
|
4286
|
+
"""
|
|
4287
|
+
return F.neg(input)
|
|
4288
|
+
|
|
4289
|
+
|
|
4290
|
+
def ne(input, other):
|
|
4291
|
+
r"""
|
|
4292
|
+
Computes the non-equivalence of two tensors element-wise.
|
|
4293
|
+
"""
|
|
4294
|
+
return F.ne(input, other)
|
|
4295
|
+
|
|
4296
|
+
|
|
4297
|
+
def not_equal(x, other):
|
|
4298
|
+
r"""
|
|
4299
|
+
Computes the non-equivalence of two tensors element-wise.
|
|
4300
|
+
"""
|
|
4301
|
+
return F.not_equal(x, other)
|
|
4302
|
+
|
|
4303
|
+
|
|
4304
|
+
def sign(x):
|
|
4305
|
+
r"""
|
|
4306
|
+
For details, please refer to :func:`mindspore.ops.sign`.
|
|
4307
|
+
"""
|
|
4308
|
+
return F.sign(x)
|
|
4309
|
+
|
|
4310
|
+
|
|
4311
|
+
def signbit(x):
|
|
4312
|
+
"""
|
|
4313
|
+
For details, please refer to :func:`mindspore.ops.signbit`.
|
|
4314
|
+
"""
|
|
4315
|
+
return F.signbit(x)
|
|
4316
|
+
|
|
4317
|
+
|
|
4318
|
+
def sgn(x):
|
|
4319
|
+
"""
|
|
4320
|
+
For details, please refer to :func:`mindspore.ops.sgn`.
|
|
4321
|
+
"""
|
|
4322
|
+
return F.sgn(x)
|
|
4323
|
+
|
|
4324
|
+
|
|
4325
|
+
def sinh(input):
|
|
4326
|
+
r"""
|
|
4327
|
+
Computes hyperbolic sine of the input element-wise.
|
|
4328
|
+
"""
|
|
4329
|
+
return F.sinh(input)
|
|
4330
|
+
|
|
4331
|
+
|
|
4332
|
+
def sort(input, axis=-1, descending=False):
|
|
4333
|
+
r"""
|
|
4334
|
+
Sorts the elements of the input tensor along a given dimension in ascending order by value.
|
|
4335
|
+
"""
|
|
4336
|
+
return F.sort(input, axis=axis, descending=descending)
|
|
4337
|
+
|
|
4338
|
+
|
|
4339
|
+
def argsort(input, axis=-1, descending=False):
|
|
4340
|
+
"""For details, please refer to :func:`mindspore.ops.argsort`."""
|
|
4341
|
+
return F.argsort(input, axis, descending)
|
|
4342
|
+
|
|
4343
|
+
|
|
4344
|
+
def trunc(input):
|
|
4345
|
+
r"""
|
|
4346
|
+
Returns a new tensor with the truncated integer values of the elements of input.
|
|
4347
|
+
"""
|
|
4348
|
+
return F.trunc(input)
|
|
4349
|
+
|
|
4350
|
+
|
|
4351
|
+
def where(x, condition, y):
|
|
4352
|
+
r"""
|
|
4353
|
+
Returns a tensor whose elements are selected from either `x` or `y` depending on `condition`.
|
|
4354
|
+
Please refer to :func:`mindspore.ops.where`.
|
|
4355
|
+
"""
|
|
4356
|
+
return F.where(condition, x, y)
|
|
4357
|
+
|
|
4358
|
+
|
|
4359
|
+
def imag(input):
|
|
4360
|
+
r"""
|
|
4361
|
+
Returns a new tensor containing imaginary value of the input.
|
|
4362
|
+
"""
|
|
4363
|
+
return F.imag(input)
|
|
4364
|
+
|
|
4365
|
+
|
|
4366
|
+
def diff(x, n=1, axis=-1, prepend=None, append=None):
|
|
4367
|
+
r"""
|
|
4368
|
+
For details, please refer to :func:`mindspore.ops.diff`.
|
|
4369
|
+
"""
|
|
4370
|
+
return F.diff(x, n, axis, prepend, append)
|
|
4371
|
+
|
|
4372
|
+
|
|
4373
|
+
def frac(x):
|
|
4374
|
+
r"""
|
|
4375
|
+
For details, please refer to :func:`mindspore.ops.frac`.
|
|
4376
|
+
"""
|
|
4377
|
+
return F.frac(x)
|
|
4378
|
+
|
|
4379
|
+
|
|
4380
|
+
def argwhere(input):
|
|
4381
|
+
r"""
|
|
4382
|
+
For details, please refer to :func:`mindspore.ops.argwhere`.
|
|
4383
|
+
"""
|
|
4384
|
+
return F.argwhere(input)
|
|
4385
|
+
|
|
4386
|
+
|
|
4387
|
+
def moveaxis(input, source, destination):
|
|
4388
|
+
r"""
|
|
4389
|
+
For details, please refer to :func:`mindspore.ops.moveaxis`.
|
|
4390
|
+
"""
|
|
4391
|
+
return F.moveaxis(input, source, destination)
|
|
4392
|
+
|
|
4393
|
+
|
|
4394
|
+
def movedim(input, source, destination):
|
|
4395
|
+
r"""
|
|
4396
|
+
For details, please refer to :func:`mindspore.ops.movedim`.
|
|
4397
|
+
"""
|
|
4398
|
+
return F.movedim(input, source, destination)
|
|
4399
|
+
|
|
4400
|
+
|
|
4401
|
+
def nextafter(input, other):
|
|
4402
|
+
r"""
|
|
4403
|
+
For details, please refer to :func:`mindspore.ops.nextafter`.
|
|
4404
|
+
"""
|
|
4405
|
+
return F.nextafter(input, other)
|
|
4406
|
+
|
|
4407
|
+
|
|
4408
|
+
def qr(input, some=True):
|
|
4409
|
+
r"""
|
|
4410
|
+
For details, please refer to :func:`mindspore.ops.qr`.
|
|
4411
|
+
"""
|
|
4412
|
+
check_bool_type(some, 'some', 'Tensor.qr')
|
|
4413
|
+
return F.qr(input, 'reduced' if some else 'complete')
|
|
4414
|
+
|
|
4415
|
+
|
|
4416
|
+
def amax(input, axis=None, keep_dims=False):
|
|
4417
|
+
r"""
|
|
4418
|
+
For details, please refer to :func:`mindspore.ops.amax`.
|
|
4419
|
+
"""
|
|
4420
|
+
return F.amax(input, axis, keep_dims)
|
|
4421
|
+
|
|
4422
|
+
|
|
4423
|
+
def amin(input, axis=None, keep_dims=False):
|
|
4424
|
+
r"""
|
|
4425
|
+
For details, please refer to :func:`mindspore.ops.amin`.
|
|
4426
|
+
"""
|
|
4427
|
+
return F.amin(input, axis, keep_dims)
|