mindspore 2.2.11__cp37-cp37m-manylinux1_x86_64.whl → 2.3.0rc1__cp37-cp37m-manylinux1_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +5 -4
- mindspore/_akg/akg/composite/build_module.py +155 -11
- mindspore/_akg/akg/config/repository.json +38 -0
- mindspore/_akg/akg/ms/info_version_adapt.py +29 -0
- mindspore/_akg/akg/topi/cpp/impl.py +1 -1
- mindspore/_akg/akg/tvm/_ffi/base.py +1 -1
- mindspore/_akg/akg/tvm/contrib/nvcc.py +4 -1
- mindspore/_akg/akg/utils/ascend_profilier/path_manager.py +2 -1
- mindspore/_akg/akg/utils/composite_op_helper.py +4 -2
- mindspore/_akg/akg/utils/dump_ascend_meta.py +2 -2
- mindspore/_akg/akg/utils/gen_random.py +14 -8
- mindspore/_akg/akg/utils/op_dsl.py +11 -0
- mindspore/_akg/akg/utils/tbe_codegen_utils.py +5 -5
- mindspore/_c_dataengine.cpython-37m-x86_64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-37m-x86_64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-37m-x86_64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +58 -0
- mindspore/_extends/builtin_operations.py +2 -1
- mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
- mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
- mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
- mindspore/_extends/parse/__init__.py +18 -14
- mindspore/_extends/parse/compile_config.py +229 -0
- mindspore/_extends/parse/parser.py +155 -59
- mindspore/_extends/parse/resources.py +40 -7
- mindspore/_extends/parse/standard_method.py +124 -204
- mindspore/_extends/remote/kernel_build_server.py +2 -0
- mindspore/_mindspore_offline_debug.cpython-37m-x86_64-linux-gnu.so +0 -0
- mindspore/_profiler.py +30 -0
- mindspore/amp.py +24 -18
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/boost_cell_wrapper.py +1 -1
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +3 -1
- mindspore/common/_jit_fallback_utils.py +2 -3
- mindspore/common/_register_for_adapter.py +7 -0
- mindspore/common/_stub_tensor.py +6 -1
- mindspore/common/_utils.py +5 -17
- mindspore/common/api.py +91 -48
- mindspore/common/auto_dynamic_shape.py +27 -14
- mindspore/common/dtype.py +5 -4
- mindspore/common/dump.py +5 -4
- mindspore/common/initializer.py +1 -1
- mindspore/common/jit_config.py +20 -11
- mindspore/common/lazy_inline.py +58 -17
- mindspore/common/mindir_util.py +12 -2
- mindspore/common/mutable.py +79 -14
- mindspore/common/parameter.py +19 -4
- mindspore/common/seed.py +9 -9
- mindspore/common/sparse_tensor.py +251 -18
- mindspore/common/symbol.py +122 -0
- mindspore/common/tensor.py +321 -435
- mindspore/communication/__init__.py +3 -3
- mindspore/communication/_comm_helper.py +5 -0
- mindspore/communication/management.py +56 -38
- mindspore/config/op_info.config +22 -54
- mindspore/context.py +192 -54
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +6 -6
- mindspore/dataset/audio/transforms.py +711 -158
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/engine/cache_client.py +2 -2
- mindspore/dataset/engine/datasets.py +95 -38
- mindspore/dataset/engine/datasets_audio.py +14 -14
- mindspore/dataset/engine/datasets_standard_format.py +33 -3
- mindspore/dataset/engine/datasets_text.py +38 -38
- mindspore/dataset/engine/datasets_user_defined.py +7 -7
- mindspore/dataset/engine/datasets_vision.py +75 -71
- mindspore/dataset/engine/offload.py +5 -7
- mindspore/dataset/engine/validators.py +1 -1
- mindspore/dataset/text/__init__.py +3 -3
- mindspore/dataset/text/transforms.py +408 -121
- mindspore/dataset/text/utils.py +9 -9
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/transforms.py +261 -76
- mindspore/dataset/utils/browse_dataset.py +9 -9
- mindspore/dataset/vision/__init__.py +3 -3
- mindspore/dataset/vision/c_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +2 -2
- mindspore/dataset/vision/transforms.py +2264 -514
- mindspore/dataset/vision/utils.py +40 -9
- mindspore/dataset/vision/validators.py +7 -1
- mindspore/experimental/optim/__init__.py +12 -2
- mindspore/experimental/optim/adadelta.py +161 -0
- mindspore/experimental/optim/adagrad.py +168 -0
- mindspore/experimental/optim/adam.py +35 -34
- mindspore/experimental/optim/adamax.py +170 -0
- mindspore/experimental/optim/adamw.py +40 -16
- mindspore/experimental/optim/asgd.py +153 -0
- mindspore/experimental/optim/lr_scheduler.py +65 -125
- mindspore/experimental/optim/nadam.py +157 -0
- mindspore/experimental/optim/optimizer.py +15 -8
- mindspore/experimental/optim/radam.py +194 -0
- mindspore/experimental/optim/rmsprop.py +154 -0
- mindspore/experimental/optim/rprop.py +164 -0
- mindspore/experimental/optim/sgd.py +28 -19
- mindspore/hal/__init__.py +34 -0
- mindspore/hal/_ascend.py +57 -0
- mindspore/hal/_base.py +57 -0
- mindspore/hal/_cpu.py +56 -0
- mindspore/hal/_gpu.py +57 -0
- mindspore/hal/device.py +356 -0
- mindspore/hal/event.py +179 -0
- mindspore/hal/stream.py +337 -0
- mindspore/include/api/data_type.h +2 -2
- mindspore/include/api/dual_abi_helper.h +16 -3
- mindspore/include/api/model.h +1 -3
- mindspore/include/api/status.h +14 -0
- mindspore/include/c_api/model_c.h +173 -0
- mindspore/include/c_api/ms/base/types.h +1 -0
- mindspore/include/c_api/types_c.h +19 -0
- mindspore/include/dataset/execute.h +1 -3
- mindspore/include/mindapi/base/format.h +125 -23
- mindspore/include/mindapi/base/types.h +7 -0
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +2044 -154
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +2044 -33
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/build_tbe_kernel.py +529 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/compiler.py +56 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/custom.py +1109 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/get_file_path.py +36 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/tbe_topi.py +556 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6365 -1759
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_add_custom.h +49 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +59 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +59 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +52 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +232 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +232 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.cpp +192 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.cpp +274 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +39 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/x86_64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/cpu/libakg.so +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
- mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu10.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.6/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
- mindspore/mindrecord/__init__.py +5 -1
- mindspore/mindrecord/config.py +809 -0
- mindspore/mindrecord/filereader.py +25 -0
- mindspore/mindrecord/filewriter.py +74 -56
- mindspore/mindrecord/mindpage.py +40 -6
- mindspore/mindrecord/shardutils.py +3 -2
- mindspore/mindrecord/shardwriter.py +7 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +53 -66
- mindspore/mindrecord/tools/cifar10_to_mr.py +48 -63
- mindspore/mindrecord/tools/csv_to_mr.py +7 -17
- mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
- mindspore/mindrecord/tools/mnist_to_mr.py +11 -21
- mindspore/mindrecord/tools/tfrecord_to_mr.py +2 -10
- mindspore/multiprocessing/__init__.py +68 -0
- mindspore/nn/cell.py +86 -133
- mindspore/nn/dynamic_lr.py +2 -2
- mindspore/nn/layer/activation.py +80 -91
- mindspore/nn/layer/basic.py +4 -80
- mindspore/nn/layer/channel_shuffle.py +3 -16
- mindspore/nn/layer/container.py +3 -3
- mindspore/nn/layer/conv.py +71 -71
- mindspore/nn/layer/embedding.py +107 -46
- mindspore/nn/layer/image.py +4 -7
- mindspore/nn/layer/normalization.py +46 -38
- mindspore/nn/layer/padding.py +26 -39
- mindspore/nn/layer/pooling.py +13 -9
- mindspore/nn/layer/rnn_cells.py +5 -15
- mindspore/nn/layer/rnns.py +6 -5
- mindspore/nn/layer/thor_layer.py +1 -2
- mindspore/nn/layer/timedistributed.py +1 -1
- mindspore/nn/layer/transformer.py +52 -50
- mindspore/nn/learning_rate_schedule.py +6 -5
- mindspore/nn/loss/loss.py +44 -65
- mindspore/nn/optim/ada_grad.py +6 -4
- mindspore/nn/optim/adadelta.py +3 -1
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +102 -181
- mindspore/nn/optim/adamax.py +4 -2
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +4 -2
- mindspore/nn/optim/ftrl.py +31 -61
- mindspore/nn/optim/lamb.py +5 -3
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +6 -4
- mindspore/nn/optim/momentum.py +13 -25
- mindspore/nn/optim/optimizer.py +6 -3
- mindspore/nn/optim/proximal_ada_grad.py +4 -2
- mindspore/nn/optim/rmsprop.py +9 -3
- mindspore/nn/optim/rprop.py +4 -2
- mindspore/nn/optim/sgd.py +4 -2
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
- mindspore/nn/probability/distribution/beta.py +2 -2
- mindspore/nn/probability/distribution/categorical.py +4 -6
- mindspore/nn/probability/distribution/cauchy.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/poisson.py +2 -2
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +13 -1
- mindspore/nn/wrap/__init__.py +2 -1
- mindspore/nn/wrap/cell_wrapper.py +33 -12
- mindspore/nn/wrap/grad_reducer.py +148 -8
- mindspore/nn/wrap/loss_scale.py +7 -7
- mindspore/numpy/__init__.py +2 -0
- mindspore/numpy/array_creations.py +2 -0
- mindspore/numpy/array_ops.py +1 -5
- mindspore/numpy/fft.py +431 -0
- mindspore/numpy/math_ops.py +53 -59
- mindspore/numpy/utils.py +3 -0
- mindspore/ops/__init__.py +7 -3
- mindspore/ops/_grad_experimental/grad_array_ops.py +4 -160
- mindspore/ops/_grad_experimental/grad_comm_ops.py +14 -18
- mindspore/ops/_grad_experimental/grad_inner_ops.py +8 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +92 -287
- mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
- mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
- mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
- mindspore/ops/_op_impl/__init__.py +0 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +1 -0
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
- mindspore/ops/_op_impl/{cpu/concat.py → aicpu/generate_eod_mask.py} +16 -17
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
- mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -3
- mindspore/ops/_op_impl/cpu/adam.py +2 -2
- mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
- mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
- mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
- mindspore/ops/_vmap/vmap_array_ops.py +137 -101
- mindspore/ops/_vmap/vmap_base.py +8 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +102 -56
- mindspore/ops/_vmap/vmap_image_ops.py +70 -13
- mindspore/ops/_vmap/vmap_math_ops.py +74 -49
- mindspore/ops/_vmap/vmap_nn_ops.py +164 -89
- mindspore/ops/_vmap/vmap_other_ops.py +1 -1
- mindspore/ops/auto_generate/__init__.py +31 -0
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +133 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +248 -0
- mindspore/ops/auto_generate/gen_arg_handler.py +147 -0
- mindspore/ops/auto_generate/gen_extend_func.py +130 -0
- mindspore/ops/auto_generate/gen_ops_def.py +4786 -0
- mindspore/ops/auto_generate/gen_ops_prim.py +8335 -0
- mindspore/ops/auto_generate/pyboost_inner_prim.py +77 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +118 -17
- mindspore/ops/composite/math_ops.py +9 -48
- mindspore/ops/composite/multitype_ops/_compile_utils.py +166 -601
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +15 -133
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
- mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
- mindspore/ops/deprecated.py +14 -3
- mindspore/ops/extend/__init__.py +46 -0
- mindspore/ops/extend/array_func.py +152 -0
- mindspore/ops/extend/math_func.py +76 -0
- mindspore/ops/{_op_impl/tbe/atomic_addr_clean.py → extend/nn_func.py} +5 -15
- mindspore/ops/function/__init__.py +19 -11
- mindspore/ops/function/array_func.py +248 -1436
- mindspore/ops/function/clip_func.py +12 -13
- mindspore/ops/function/debug_func.py +2 -5
- mindspore/ops/function/fft_func.py +31 -0
- mindspore/ops/function/grad/grad_func.py +24 -17
- mindspore/ops/function/image_func.py +27 -21
- mindspore/ops/function/linalg_func.py +30 -53
- mindspore/ops/function/math_func.py +450 -2356
- mindspore/ops/function/nn_func.py +470 -789
- mindspore/ops/function/other_func.py +4 -5
- mindspore/ops/function/parameter_func.py +6 -92
- mindspore/ops/function/random_func.py +24 -80
- mindspore/ops/function/sparse_unary_func.py +11 -18
- mindspore/ops/function/spectral_func.py +1 -1
- mindspore/ops/function/vmap_func.py +15 -14
- mindspore/ops/functional.py +56 -62
- mindspore/ops/op_info_register.py +22 -19
- mindspore/ops/operations/__init__.py +19 -19
- mindspore/ops/operations/_embedding_cache_ops.py +1 -1
- mindspore/ops/operations/_grad_ops.py +20 -723
- mindspore/ops/operations/_inner_ops.py +233 -286
- mindspore/ops/operations/_quant_ops.py +4 -4
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +5 -480
- mindspore/ops/operations/_sequence_ops.py +4 -34
- mindspore/ops/operations/array_ops.py +100 -2481
- mindspore/ops/operations/comm_ops.py +38 -46
- mindspore/ops/operations/custom_ops.py +9 -9
- mindspore/ops/operations/debug_ops.py +101 -32
- mindspore/ops/operations/image_ops.py +3 -219
- mindspore/ops/operations/inner_ops.py +52 -38
- mindspore/ops/operations/linalg_ops.py +1 -49
- mindspore/{rewrite/ast_transformers → ops/operations/manually_defined}/__init__.py +11 -4
- mindspore/ops/operations/manually_defined/_inner.py +61 -0
- mindspore/ops/operations/manually_defined/ops_def.py +1391 -0
- mindspore/ops/operations/math_ops.py +752 -4588
- mindspore/ops/operations/nn_ops.py +380 -1750
- mindspore/ops/operations/other_ops.py +50 -42
- mindspore/ops/operations/random_ops.py +3 -50
- mindspore/ops/operations/sparse_ops.py +4 -4
- mindspore/ops/primitive.py +196 -96
- mindspore/ops/silent_check.py +162 -0
- mindspore/ops_generate/__init__.py +27 -0
- mindspore/ops_generate/arg_dtype_cast.py +248 -0
- mindspore/ops_generate/arg_handler.py +147 -0
- mindspore/ops_generate/gen_aclnn_implement.py +266 -0
- mindspore/ops_generate/gen_ops.py +1062 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +129 -0
- mindspore/ops_generate/gen_pyboost_func.py +932 -0
- mindspore/ops_generate/gen_utils.py +188 -0
- mindspore/ops_generate/op_proto.py +138 -0
- mindspore/ops_generate/pyboost_utils.py +364 -0
- mindspore/ops_generate/template.py +238 -0
- mindspore/parallel/__init__.py +6 -4
- mindspore/parallel/_auto_parallel_context.py +28 -4
- mindspore/parallel/_cell_wrapper.py +16 -9
- mindspore/parallel/_cost_model_context.py +1 -1
- mindspore/parallel/_dp_allreduce_fusion.py +159 -159
- mindspore/parallel/_parallel_serialization.py +28 -12
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +22 -8
- mindspore/parallel/_transformer/__init__.py +1 -1
- mindspore/parallel/_transformer/layers.py +1 -1
- mindspore/parallel/_transformer/loss.py +1 -1
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/op_parallel_config.py +1 -1
- mindspore/parallel/_transformer/transformer.py +9 -9
- mindspore/parallel/_utils.py +131 -6
- mindspore/parallel/algo_parameter_config.py +6 -6
- mindspore/parallel/checkpoint_transform.py +156 -26
- mindspore/parallel/cluster/__init__.py +15 -0
- mindspore/parallel/cluster/process_entity/__init__.py +18 -0
- mindspore/parallel/cluster/process_entity/_api.py +345 -0
- mindspore/parallel/cluster/process_entity/_utils.py +116 -0
- mindspore/parallel/cluster/run.py +139 -0
- mindspore/parallel/mpi/__init__.py +1 -1
- mindspore/parallel/mpi/_mpi_config.py +1 -1
- mindspore/parallel/parameter_broadcast.py +152 -0
- mindspore/parallel/shard.py +99 -2
- mindspore/profiler/common/util.py +20 -0
- mindspore/profiler/envprofiling.py +1 -1
- mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +66 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +77 -0
- mindspore/profiler/parser/ascend_analysis/function_event.py +146 -0
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +108 -0
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +80 -0
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +52 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +104 -0
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +59 -0
- mindspore/profiler/parser/ascend_cluster_generator.py +116 -0
- mindspore/profiler/parser/ascend_communicate_generator.py +314 -0
- mindspore/profiler/parser/ascend_flops_generator.py +27 -5
- mindspore/profiler/parser/ascend_fpbp_generator.py +8 -2
- mindspore/profiler/parser/ascend_hccl_generator.py +27 -279
- mindspore/profiler/parser/ascend_msprof_exporter.py +122 -118
- mindspore/profiler/parser/ascend_msprof_generator.py +67 -273
- mindspore/profiler/parser/ascend_op_generator.py +68 -27
- mindspore/profiler/parser/ascend_timeline_generator.py +292 -131
- mindspore/profiler/parser/base_timeline_generator.py +17 -3
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +2 -1
- mindspore/profiler/parser/framework_parser.py +11 -4
- mindspore/profiler/parser/integrator.py +3 -1
- mindspore/profiler/parser/memory_usage_parser.py +8 -2
- mindspore/profiler/parser/minddata_analyzer.py +8 -2
- mindspore/profiler/parser/minddata_parser.py +1 -1
- mindspore/profiler/parser/msadvisor_analyzer.py +5 -3
- mindspore/profiler/parser/msadvisor_parser.py +10 -4
- mindspore/profiler/parser/profiler_info.py +5 -0
- mindspore/profiler/profiling.py +373 -171
- mindspore/rewrite/__init__.py +2 -13
- mindspore/rewrite/api/node.py +122 -36
- mindspore/rewrite/api/pattern_engine.py +2 -3
- mindspore/rewrite/api/scoped_value.py +16 -15
- mindspore/rewrite/api/symbol_tree.py +46 -30
- mindspore/rewrite/ast_helpers/__init__.py +3 -6
- mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
- mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
- mindspore/rewrite/common/__init__.py +1 -2
- mindspore/rewrite/common/config.py +24 -0
- mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
- mindspore/rewrite/{namer.py → common/namer.py} +63 -18
- mindspore/rewrite/common/namespace.py +118 -0
- mindspore/rewrite/node/__init__.py +5 -5
- mindspore/rewrite/node/call_function.py +23 -7
- mindspore/rewrite/node/cell_container.py +7 -3
- mindspore/rewrite/node/control_flow.py +53 -28
- mindspore/rewrite/node/node.py +212 -196
- mindspore/rewrite/node/node_manager.py +51 -22
- mindspore/rewrite/node/node_topological_manager.py +3 -23
- mindspore/rewrite/parsers/__init__.py +12 -0
- mindspore/rewrite/parsers/arguments_parser.py +8 -9
- mindspore/rewrite/parsers/assign_parser.py +635 -413
- mindspore/rewrite/parsers/attribute_parser.py +3 -4
- mindspore/rewrite/parsers/class_def_parser.py +107 -144
- mindspore/rewrite/parsers/constant_parser.py +5 -5
- mindspore/rewrite/parsers/container_parser.py +4 -6
- mindspore/rewrite/parsers/expr_parser.py +55 -0
- mindspore/rewrite/parsers/for_parser.py +31 -98
- mindspore/rewrite/parsers/function_def_parser.py +13 -5
- mindspore/rewrite/parsers/if_parser.py +28 -10
- mindspore/rewrite/parsers/module_parser.py +8 -182
- mindspore/rewrite/parsers/parser.py +1 -5
- mindspore/rewrite/parsers/parser_register.py +1 -1
- mindspore/rewrite/parsers/return_parser.py +5 -10
- mindspore/rewrite/parsers/while_parser.py +59 -0
- mindspore/rewrite/sparsify/utils.py +1 -1
- mindspore/rewrite/symbol_tree/__init__.py +20 -0
- mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
- mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
- mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
- mindspore/run_check/_check_version.py +6 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/safeguard/rewrite_obfuscation.py +9 -19
- mindspore/scipy/__init__.py +2 -1
- mindspore/scipy/fft.py +133 -0
- mindspore/scipy/linalg.py +140 -55
- mindspore/scipy/ops.py +15 -71
- mindspore/scipy/ops_grad.py +5 -34
- mindspore/scipy/optimize/line_search.py +2 -2
- mindspore/scipy/optimize/minimize.py +1 -1
- mindspore/train/__init__.py +3 -2
- mindspore/train/_utils.py +178 -4
- mindspore/train/amp.py +167 -245
- mindspore/train/callback/_backup_and_restore.py +4 -4
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +47 -21
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_landscape.py +15 -10
- mindspore/train/callback/_loss_monitor.py +2 -2
- mindspore/train/callback/_on_request_exit.py +2 -2
- mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
- mindspore/train/callback/_summary_collector.py +13 -14
- mindspore/train/callback/_time_monitor.py +2 -2
- mindspore/train/data_sink.py +1 -1
- mindspore/train/dataset_helper.py +19 -4
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/accuracy.py +7 -7
- mindspore/train/metrics/confusion_matrix.py +8 -6
- mindspore/train/metrics/cosine_similarity.py +6 -4
- mindspore/train/metrics/error.py +2 -2
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/metrics/perplexity.py +2 -1
- mindspore/train/metrics/topk.py +2 -2
- mindspore/train/mind_ir_pb2.py +75 -6
- mindspore/train/model.py +41 -27
- mindspore/train/serialization.py +262 -133
- mindspore/train/summary/_writer_pool.py +1 -1
- mindspore/train/summary/summary_record.py +56 -34
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/version.py +1 -1
- {mindspore-2.2.11.dist-info → mindspore-2.3.0rc1.dist-info}/METADATA +2 -2
- {mindspore-2.2.11.dist-info → mindspore-2.3.0rc1.dist-info}/RECORD +532 -1075
- {mindspore-2.2.11.dist-info → mindspore-2.3.0rc1.dist-info}/entry_points.txt +1 -0
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
- mindspore/config/super_bar_config.json +0 -544
- mindspore/gen_ops.py +0 -273
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
- mindspore/nn/layer/flash_attention.py +0 -189
- mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
- mindspore/ops/_op_impl/tbe/__init__.py +0 -47
- mindspore/ops/_op_impl/tbe/abs.py +0 -38
- mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/acos.py +0 -37
- mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/acosh.py +0 -37
- mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
- mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
- mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
- mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
- mindspore/ops/_op_impl/tbe/add.py +0 -42
- mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/add_n.py +0 -39
- mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
- mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
- mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
- mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
- mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
- mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
- mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
- mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/asin.py +0 -37
- mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/asinh.py +0 -37
- mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/assign.py +0 -79
- mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
- mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
- mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/atan.py +0 -37
- mindspore/ops/_op_impl/tbe/atan2.py +0 -38
- mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/atanh.py +0 -37
- mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
- mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
- mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
- mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
- mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
- mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
- mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
- mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
- mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
- mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
- mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cast.py +0 -55
- mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/cdist.py +0 -38
- mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/ceil.py +0 -37
- mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/celu.py +0 -39
- mindspore/ops/_op_impl/tbe/centralization.py +0 -39
- mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
- mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/concat.py +0 -40
- mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
- mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
- mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
- mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
- mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
- mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/cos.py +0 -37
- mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/cosh.py +0 -37
- mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
- mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cummin.py +0 -41
- mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
- mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
- mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
- mindspore/ops/_op_impl/tbe/diag.py +0 -38
- mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
- mindspore/ops/_op_impl/tbe/dilation.py +0 -40
- mindspore/ops/_op_impl/tbe/div.py +0 -41
- mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
- mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
- mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
- mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
- mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
- mindspore/ops/_op_impl/tbe/elu.py +0 -38
- mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/equal.py +0 -42
- mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/erf.py +0 -37
- mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfc.py +0 -37
- mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
- mindspore/ops/_op_impl/tbe/exp.py +0 -40
- mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
- mindspore/ops/_op_impl/tbe/expm1.py +0 -37
- mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
- mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/fill.py +0 -56
- mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/flatten.py +0 -48
- mindspore/ops/_op_impl/tbe/floor.py +0 -37
- mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
- mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
- mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
- mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
- mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
- mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
- mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
- mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/ger.py +0 -43
- mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/greater.py +0 -43
- mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
- mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
- mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
- mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
- mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
- mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
- mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
- mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/im2col.py +0 -42
- mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
- mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
- mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/inv.py +0 -38
- mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/invert.py +0 -37
- mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/iou.py +0 -38
- mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/is_close.py +0 -40
- mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
- mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
- mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
- mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
- mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
- mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
- mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/lerp.py +0 -38
- mindspore/ops/_op_impl/tbe/less.py +0 -41
- mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/log.py +0 -40
- mindspore/ops/_op_impl/tbe/log1p.py +0 -37
- mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
- mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
- mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
- mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
- mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/matmul.py +0 -53
- mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
- mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
- mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
- mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum.py +0 -39
- mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
- mindspore/ops/_op_impl/tbe/minimum.py +0 -40
- mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mish.py +0 -37
- mindspore/ops/_op_impl/tbe/mod.py +0 -41
- mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/mul.py +0 -37
- mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
- mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
- mindspore/ops/_op_impl/tbe/neg.py +0 -39
- mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
- mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
- mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
- mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
- mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
- mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/pack.py +0 -58
- mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
- mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
- mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/pdist.py +0 -36
- mindspore/ops/_op_impl/tbe/pooling.py +0 -46
- mindspore/ops/_op_impl/tbe/population_count.py +0 -38
- mindspore/ops/_op_impl/tbe/pow.py +0 -41
- mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/prelu.py +0 -37
- mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/range.py +0 -39
- mindspore/ops/_op_impl/tbe/real_div.py +0 -38
- mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
- mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
- mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
- mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
- mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6.py +0 -38
- mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/renorm.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
- mindspore/ops/_op_impl/tbe/rint.py +0 -37
- mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roll.py +0 -42
- mindspore/ops/_op_impl/tbe/round.py +0 -38
- mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
- mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
- mindspore/ops/_op_impl/tbe/select.py +0 -38
- mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/selu.py +0 -39
- mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sgd.py +0 -62
- mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sign.py +0 -38
- mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/sin.py +0 -37
- mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sinh.py +0 -37
- mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/slice.py +0 -58
- mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
- mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax.py +0 -37
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
- mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/softplus.py +0 -37
- mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softsign.py +0 -37
- mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sort.py +0 -38
- mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/split_d.py +0 -38
- mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/split_v.py +0 -39
- mindspore/ops/_op_impl/tbe/splitv.py +0 -39
- mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/square.py +0 -38
- mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
- mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
- mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
- mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
- mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
- mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
- mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
- mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
- mindspore/ops/_op_impl/tbe/sub.py +0 -39
- mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tan.py +0 -38
- mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh.py +0 -37
- mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
- mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
- mindspore/ops/_op_impl/tbe/tile.py +0 -37
- mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
- mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
- mindspore/ops/_op_impl/tbe/transpose.py +0 -60
- mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
- mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
- mindspore/ops/_op_impl/tbe/trunc.py +0 -39
- mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/unpack.py +0 -38
- mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
- mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
- mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
- mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
- mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
- mindspore/ops/_tracefunc.py +0 -241
- mindspore/ops/arg_dtype_cast.py +0 -54
- mindspore/rewrite/api/tree_node_helper.py +0 -60
- mindspore/rewrite/ast_creator_register.py +0 -37
- mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
- mindspore/rewrite/namespace.py +0 -53
- {mindspore-2.2.11.dist-info → mindspore-2.3.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.2.11.dist-info → mindspore-2.3.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -13,6 +13,7 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
# ============================================================================
|
|
15
15
|
|
|
16
|
+
# pylint: disable=unused-import
|
|
16
17
|
"""Defines math operators with functional form."""
|
|
17
18
|
|
|
18
19
|
import collections
|
|
@@ -21,6 +22,7 @@ import math
|
|
|
21
22
|
import numbers
|
|
22
23
|
import numpy as np
|
|
23
24
|
|
|
25
|
+
import mindspore as ms
|
|
24
26
|
from mindspore import log as logger
|
|
25
27
|
import mindspore.ops as ops
|
|
26
28
|
from mindspore.common import dtype as mstype
|
|
@@ -28,13 +30,18 @@ from mindspore.ops import operations as P
|
|
|
28
30
|
from mindspore.ops import composite as C
|
|
29
31
|
from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
|
|
30
32
|
from mindspore.ops.primitive import constexpr, _primexpr
|
|
31
|
-
from mindspore.ops.operations._inner_ops import
|
|
33
|
+
from mindspore.ops.operations._inner_ops import TileSize
|
|
34
|
+
from mindspore.ops.auto_generate import Cummin
|
|
32
35
|
from mindspore.ops.operations.math_ops import STFT
|
|
33
|
-
from mindspore.ops.operations.math_ops import Logit
|
|
34
36
|
from mindspore.ops.operations.math_ops import LuUnpack
|
|
35
37
|
from mindspore.ops.operations.math_ops import Roll
|
|
36
38
|
from mindspore.ops.operations.math_ops import Ormqr
|
|
37
39
|
from mindspore.ops.operations.array_ops import MatrixSetDiagV3, Transpose
|
|
40
|
+
from mindspore.ops.auto_generate import (minimum, maximum, mul, sin, sinc, sinh, cummax, real, conj, add, sub, cos, cosh,
|
|
41
|
+
matrix_exp, sqrt, rsqrt, square, trace, nextafter, abs, acos, acosh, angle,
|
|
42
|
+
asin, asinh, atan, atan2, atanh, ceil, equal, erf, erfc, erfinv, exp, expm1,
|
|
43
|
+
floor, floor_divide, floor_mod, gcd, greater, greater_equal, less, less_equal,
|
|
44
|
+
log, log1p, neg, not_equal, pow, round)
|
|
38
45
|
from mindspore.nn import layer
|
|
39
46
|
from mindspore._checkparam import check_is_number
|
|
40
47
|
from mindspore import _checkparam as validator
|
|
@@ -63,7 +70,6 @@ from mindspore.ops.operations.math_ops import (
|
|
|
63
70
|
Heaviside,
|
|
64
71
|
Lcm,
|
|
65
72
|
Gcd,
|
|
66
|
-
Sinc,
|
|
67
73
|
Quantile,
|
|
68
74
|
NanToNum,
|
|
69
75
|
SparseSegmentMean,
|
|
@@ -101,128 +107,125 @@ def get_x_shape(x_shape):
|
|
|
101
107
|
# Public Operation Functions.
|
|
102
108
|
#####################################
|
|
103
109
|
absolute_ = P.Abs()
|
|
104
|
-
|
|
110
|
+
cast_ = P.Cast()
|
|
105
111
|
tensor_add = P.Add()
|
|
106
|
-
|
|
107
|
-
tensor_sub = P.Sub()
|
|
108
|
-
tensor_mul = P.Mul()
|
|
112
|
+
tensor_ceil = P.Ceil()
|
|
109
113
|
tensor_div = P.RealDiv()
|
|
114
|
+
tensor_exp = P.Exp()
|
|
115
|
+
tensor_expm1 = P.Expm1()
|
|
110
116
|
tensor_floordiv = P.FloorDiv()
|
|
111
117
|
floordiv = tensor_floordiv
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
118
|
+
tensor_ge = P.GreaterEqual()
|
|
119
|
+
tensor_gt = greater
|
|
120
|
+
tensor_le = P.LessEqual()
|
|
121
|
+
tensor_lt = P.Less()
|
|
115
122
|
tensor_mod = P.FloorMod()
|
|
116
123
|
floormod = tensor_mod
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
tensor_gt = P.Greater()
|
|
122
|
-
tensor_ge = P.GreaterEqual()
|
|
124
|
+
tensor_mul = P.Mul()
|
|
125
|
+
tensor_pow = P.Pow()
|
|
126
|
+
pows = tensor_pow
|
|
127
|
+
tensor_sub = P.Sub()
|
|
123
128
|
transpose_ = P.Transpose()
|
|
124
|
-
|
|
125
|
-
cast_ = P.Cast()
|
|
129
|
+
xdivy_ = P.Xdivy()
|
|
126
130
|
|
|
127
131
|
#####################################
|
|
128
132
|
# Private Operation Functions.
|
|
129
133
|
#####################################
|
|
134
|
+
accumulate_ = P.AccumulateNV2()
|
|
135
|
+
acos_ = P.ACos()
|
|
136
|
+
acosh_ = P.Acosh()
|
|
130
137
|
addcdiv_ = P.Addcdiv()
|
|
131
138
|
addcuml_ = P.Addcmul()
|
|
132
139
|
addn_ = P.AddN()
|
|
133
140
|
angle_ = Angle()
|
|
134
|
-
log_ = P.Log()
|
|
135
|
-
floor_ = P.Floor()
|
|
136
|
-
logical_not_ = P.LogicalNot()
|
|
137
|
-
logical_or_ = P.LogicalOr()
|
|
138
|
-
logical_and_ = P.LogicalAnd()
|
|
139
|
-
sin_ = P.Sin()
|
|
140
|
-
sinc_ = Sinc()
|
|
141
|
-
cos_ = P.Cos()
|
|
142
|
-
tan_ = P.Tan()
|
|
143
141
|
asin_ = P.Asin()
|
|
144
|
-
polar_ = Polar()
|
|
145
|
-
acos_ = P.ACos()
|
|
146
|
-
atan_ = P.Atan()
|
|
147
|
-
atan2_ = P.Atan2()
|
|
148
|
-
sinh_ = P.Sinh()
|
|
149
|
-
cosh_ = P.Cosh()
|
|
150
|
-
tanh_ = P.Tanh()
|
|
151
142
|
asinh_ = P.Asinh()
|
|
152
|
-
|
|
143
|
+
atan2_ = P.Atan2()
|
|
144
|
+
atan_ = P.Atan()
|
|
153
145
|
atanh_ = P.Atanh()
|
|
154
|
-
|
|
155
|
-
bitwise_or_ = P.BitwiseOr()
|
|
156
|
-
bitwise_xor_ = P.BitwiseXor()
|
|
157
|
-
inv_ = P.math_ops.Inv()
|
|
158
|
-
invert_ = P.Invert()
|
|
159
|
-
erf_ = P.Erf()
|
|
160
|
-
erfc_ = P.Erfc()
|
|
161
|
-
bessel_j1_ = BesselJ1()
|
|
162
|
-
bessel_j0_ = BesselJ0()
|
|
146
|
+
batch_matmul_ = P.BatchMatMul()
|
|
163
147
|
bessel_i0_ = BesselI0()
|
|
164
148
|
bessel_i0e_ = P.BesselI0e()
|
|
165
|
-
bessel_k0_ = BesselK0()
|
|
166
|
-
bessel_k0e_ = BesselK0e()
|
|
167
|
-
bessel_y0_ = BesselY0()
|
|
168
|
-
bessel_y1_ = BesselY1()
|
|
169
149
|
bessel_i1_ = BesselI1()
|
|
170
150
|
bessel_i1e_ = P.BesselI1e()
|
|
151
|
+
bessel_j0_ = BesselJ0()
|
|
152
|
+
bessel_j1_ = BesselJ1()
|
|
153
|
+
bessel_k0_ = BesselK0()
|
|
154
|
+
bessel_k0e_ = BesselK0e()
|
|
171
155
|
bessel_k1_ = BesselK1()
|
|
172
156
|
bessel_k1e_ = BesselK1e()
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
linspace_ = P.LinSpace()
|
|
181
|
-
matrix_exp_ = MatrixExp()
|
|
182
|
-
exp2_ = P.Pow()
|
|
183
|
-
trunc_ = P.Trunc()
|
|
184
|
-
truncate_div_ = P.TruncateDiv()
|
|
185
|
-
truncate_mod_ = P.TruncateMod()
|
|
186
|
-
sparse_segment_mean_ = SparseSegmentMean()
|
|
187
|
-
lu_unpack_ = LuUnpack()
|
|
188
|
-
xlogy_ = P.Xlogy()
|
|
189
|
-
square_ = P.Square()
|
|
190
|
-
sqrt_ = P.Sqrt()
|
|
157
|
+
bessel_y0_ = BesselY0()
|
|
158
|
+
bessel_y1_ = BesselY1()
|
|
159
|
+
bitwise_and_ = P.BitwiseAnd()
|
|
160
|
+
bitwise_or_ = P.BitwiseOr()
|
|
161
|
+
bitwise_xor_ = P.BitwiseXor()
|
|
162
|
+
conj_ = P.Conj()
|
|
163
|
+
cumprod_ = P.CumProd()
|
|
191
164
|
cumsum_ = P.CumSum()
|
|
192
|
-
|
|
193
|
-
|
|
165
|
+
cumulative_logsumexp_ = CumulativeLogsumexp()
|
|
166
|
+
digamma_ = P.Digamma()
|
|
167
|
+
div_ = P.Div()
|
|
194
168
|
dtype_ = P.DType()
|
|
195
169
|
eps_ = P.Eps()
|
|
196
|
-
|
|
170
|
+
erf_ = P.Erf()
|
|
171
|
+
erfc_ = P.Erfc()
|
|
172
|
+
erfinv_ = P.Erfinv()
|
|
173
|
+
exp2_ = P.Pow()
|
|
197
174
|
expand_dims_ = P.ExpandDims()
|
|
198
|
-
sign_ = P.Sign()
|
|
199
|
-
nextafter_ = P.NextAfter()
|
|
200
|
-
matrix_inverse_ = P.MatrixInverse()
|
|
201
|
-
matrix_determinant_ = P.MatrixDeterminant()
|
|
202
|
-
log_matrix_determinant_ = P.LogMatrixDeterminant()
|
|
203
|
-
trace_ = P.Trace()
|
|
204
|
-
real_ = P.Real()
|
|
205
|
-
rsqrt_ = P.Rsqrt()
|
|
206
|
-
reciprocal_ = P.Reciprocal()
|
|
207
|
-
tile_ = P.Tile()
|
|
208
|
-
batch_matmul_ = P.BatchMatMul()
|
|
209
175
|
fill_v2_ = P.FillV2()
|
|
176
|
+
floor_ = P.Floor()
|
|
177
|
+
gcd_ = Gcd()
|
|
178
|
+
igamma_ = Igamma()
|
|
179
|
+
igammac_ = Igammac()
|
|
210
180
|
imag_ = P.Imag()
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
181
|
+
inv_ = P.math_ops.Inv()
|
|
182
|
+
invert_ = P.Invert()
|
|
183
|
+
isfinite_ = P.IsFinite()
|
|
184
|
+
isinf_ = P.IsInf()
|
|
185
|
+
isnan_ = P.IsNan()
|
|
186
|
+
lcm_ = Lcm()
|
|
187
|
+
lerp_ = P.Lerp()
|
|
216
188
|
lgamma_ = P.Lgamma()
|
|
217
|
-
|
|
189
|
+
linspace_ = P.LinSpace()
|
|
190
|
+
log1p_ = P.Log1p()
|
|
191
|
+
log_ = P.Log()
|
|
192
|
+
log_matrix_determinant_ = P.LogMatrixDeterminant()
|
|
193
|
+
logical_and_ = P.LogicalAnd()
|
|
194
|
+
logical_not_ = P.LogicalNot()
|
|
195
|
+
logical_or_ = P.LogicalOr()
|
|
196
|
+
logical_xor_ = P.LogicalXor()
|
|
197
|
+
lu_solve_ = LuSolve()
|
|
198
|
+
lu_unpack_ = LuUnpack()
|
|
199
|
+
matmul_ = P.MatMul()
|
|
200
|
+
matrix_determinant_ = P.MatrixDeterminant()
|
|
201
|
+
matrix_inverse_ = P.MatrixInverse()
|
|
202
|
+
mod_ = P.Mod()
|
|
203
|
+
nextafter_ = P.NextAfter()
|
|
204
|
+
ones_ = P.Ones()
|
|
205
|
+
polar_ = Polar()
|
|
218
206
|
poly_gamma_ = P.Polygamma()
|
|
219
|
-
|
|
207
|
+
rank_ = P.Rank()
|
|
208
|
+
reciprocal_ = P.Reciprocal()
|
|
209
|
+
reduce_sum_ = P.ReduceSum()
|
|
210
|
+
reshape_ = P.Reshape()
|
|
211
|
+
select_ = P.Select()
|
|
212
|
+
slice_ = P.Slice()
|
|
213
|
+
size_ = P.Size()
|
|
214
|
+
scalar_to_tensor_ = P.ScalarToTensor()
|
|
215
|
+
shape_ = P.Shape()
|
|
216
|
+
sign_ = P.Sign()
|
|
217
|
+
sparse_segment_mean_ = SparseSegmentMean()
|
|
218
|
+
tan_ = P.Tan()
|
|
219
|
+
tanh_ = P.Tanh()
|
|
220
|
+
tensor_round_ = P.Round()
|
|
221
|
+
tile_ = P.Tile()
|
|
222
|
+
tile_size_ = TileSize()
|
|
223
|
+
trunc_ = P.Trunc()
|
|
224
|
+
truncate_div_ = P.TruncateDiv()
|
|
225
|
+
truncate_mod_ = P.TruncateMod()
|
|
226
|
+
xlogy_ = P.Xlogy()
|
|
220
227
|
zeros_ = P.Zeros()
|
|
221
|
-
ones_ = P.Ones()
|
|
222
|
-
logical_xor_ = P.LogicalXor()
|
|
223
228
|
zeta_ = P.Zeta()
|
|
224
|
-
div_ = P.Div()
|
|
225
|
-
matmul_ = P.MatMul()
|
|
226
229
|
|
|
227
230
|
|
|
228
231
|
#####################################
|
|
@@ -262,39 +265,6 @@ def addn(x):
|
|
|
262
265
|
return addn_(x)
|
|
263
266
|
|
|
264
267
|
|
|
265
|
-
def abs(input):
|
|
266
|
-
r"""
|
|
267
|
-
Returns absolute value of a tensor element-wise.
|
|
268
|
-
|
|
269
|
-
.. math::
|
|
270
|
-
|
|
271
|
-
out_i = |input_i|
|
|
272
|
-
|
|
273
|
-
Args:
|
|
274
|
-
input (Tensor): The input tensor. The shape of tensor is
|
|
275
|
-
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
276
|
-
|
|
277
|
-
Returns:
|
|
278
|
-
Tensor, has the same shape as the `input`.
|
|
279
|
-
|
|
280
|
-
Raises:
|
|
281
|
-
TypeError: If `input` is not a Tensor.
|
|
282
|
-
|
|
283
|
-
Supported Platforms:
|
|
284
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
285
|
-
|
|
286
|
-
Examples:
|
|
287
|
-
>>> import mindspore
|
|
288
|
-
>>> import numpy as np
|
|
289
|
-
>>> from mindspore import Tensor, ops
|
|
290
|
-
>>> input = Tensor(np.array([-1.0, 1.0, 0.0]), mindspore.float32)
|
|
291
|
-
>>> output = ops.abs(input)
|
|
292
|
-
>>> print(output)
|
|
293
|
-
[1. 1. 0.]
|
|
294
|
-
"""
|
|
295
|
-
return absolute_(input)
|
|
296
|
-
|
|
297
|
-
|
|
298
268
|
def absolute(input):
|
|
299
269
|
"""
|
|
300
270
|
Alias for :func:`mindspore.ops.abs` .
|
|
@@ -305,69 +275,10 @@ def absolute(input):
|
|
|
305
275
|
return abs(input)
|
|
306
276
|
|
|
307
277
|
|
|
308
|
-
def add(input, other):
|
|
309
|
-
r"""
|
|
310
|
-
Adds other value to input Tensor.
|
|
311
|
-
|
|
312
|
-
.. math::
|
|
313
|
-
|
|
314
|
-
out_{i} = input_{i} + other_{i}
|
|
315
|
-
|
|
316
|
-
Note:
|
|
317
|
-
- One of the two inputs must be a Tensor, when the two inputs have different shapes,
|
|
318
|
-
they must be able to broadcast to a common shape.
|
|
319
|
-
- The two inputs can not be bool type at the same time,
|
|
320
|
-
[True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
|
|
321
|
-
- The two inputs comply with the implicit type conversion rules to make the data types
|
|
322
|
-
consistent.
|
|
323
|
-
- When input is Tensor, it's dimension should be greater than or equal to 1.
|
|
324
|
-
|
|
325
|
-
Args:
|
|
326
|
-
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
327
|
-
a bool or a tensor whose data type is
|
|
328
|
-
`number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
329
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
|
|
330
|
-
other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
|
|
331
|
-
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
|
|
332
|
-
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
|
|
333
|
-
|
|
334
|
-
Returns:
|
|
335
|
-
Tensor, the shape is the same as the one of the input `input` , `other` after broadcasting,
|
|
336
|
-
and the data type is the one with higher precision or higher digits among the two inputs.
|
|
337
|
-
|
|
338
|
-
Raises:
|
|
339
|
-
TypeError: If `input` and `other` is not one of the following: Tensor, number.Number, bool.
|
|
340
|
-
|
|
341
|
-
Supported Platforms:
|
|
342
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
343
|
-
|
|
344
|
-
Examples:
|
|
345
|
-
>>> import numpy as np
|
|
346
|
-
>>> from mindspore import Tensor, ops
|
|
347
|
-
>>> # case 1: x and y are both Tensor.
|
|
348
|
-
>>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
|
|
349
|
-
>>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
|
|
350
|
-
>>> output = ops.add(x, y)
|
|
351
|
-
>>> print(output)
|
|
352
|
-
[5. 7. 9.]
|
|
353
|
-
>>> # case 2: x is a scalar and y is a Tensor
|
|
354
|
-
>>> x = Tensor(1, mindspore.int32)
|
|
355
|
-
>>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
|
|
356
|
-
>>> output = ops.add(x, y)
|
|
357
|
-
>>> print(output)
|
|
358
|
-
[5. 6. 7.]
|
|
359
|
-
>>> # the data type of x is int32, the data type of y is float32,
|
|
360
|
-
>>> # and the output is the data format of higher precision float32.
|
|
361
|
-
>>> print(output.dtype)
|
|
362
|
-
Float32
|
|
363
|
-
"""
|
|
364
|
-
return tensor_add(input, other)
|
|
365
|
-
|
|
366
|
-
|
|
367
278
|
def addcdiv(input, tensor1, tensor2, value=1):
|
|
368
279
|
r"""
|
|
369
280
|
Performs the element-wise division of tensor tensor1 by tensor tensor2,
|
|
370
|
-
multiply the result by the scalar value and add it to
|
|
281
|
+
multiply the result by the scalar value and add it to input data.
|
|
371
282
|
|
|
372
283
|
.. math::
|
|
373
284
|
y[i] = input[i] + value[i] * (tensor1[i] / tensor2[i])
|
|
@@ -408,7 +319,7 @@ def addcdiv(input, tensor1, tensor2, value=1):
|
|
|
408
319
|
def addcmul(input, tensor1, tensor2, value=1):
|
|
409
320
|
r"""
|
|
410
321
|
Performs the element-wise product of tensor tensor1 and tensor tensor2,
|
|
411
|
-
multiply the result by the scalar value and add it to
|
|
322
|
+
multiply the result by the scalar value and add it to input data.
|
|
412
323
|
|
|
413
324
|
.. math::
|
|
414
325
|
output[i] = input[i] + value[i] * (tensor1[i] * tensor2[i])
|
|
@@ -420,7 +331,7 @@ def addcmul(input, tensor1, tensor2, value=1):
|
|
|
420
331
|
value (Union[Tensor, Number]): The multiplier for tensor1*tensor2. Default: ``1`` .
|
|
421
332
|
|
|
422
333
|
Returns:
|
|
423
|
-
Tensor, has the same shape and dtype as
|
|
334
|
+
Tensor, has the same shape and dtype as tensor1*tensor2.
|
|
424
335
|
|
|
425
336
|
Raises:
|
|
426
337
|
TypeError: If dtype of `tensor1`, `tensor2`, `input` is not Tensor.
|
|
@@ -451,36 +362,6 @@ def addcmul(input, tensor1, tensor2, value=1):
|
|
|
451
362
|
return addcuml_(input, tensor1, tensor2, Tensor(value))
|
|
452
363
|
|
|
453
364
|
|
|
454
|
-
def angle(input):
|
|
455
|
-
"""
|
|
456
|
-
Returns the element-wise argument of a complex tensor.
|
|
457
|
-
The elements in input are considered to be complex numbers of the form a+bj, where a is the real part and b
|
|
458
|
-
is the imaginary part. The argument returned by this function is of the form :math:`atan2(b, a)`.
|
|
459
|
-
|
|
460
|
-
Args:
|
|
461
|
-
input (Tensor): The input tensor. types: complex64, complex128.
|
|
462
|
-
|
|
463
|
-
Returns:
|
|
464
|
-
Tensor, has the float32 or float64 type and the same shape as input.
|
|
465
|
-
|
|
466
|
-
Raises:
|
|
467
|
-
TypeError: If `input` is not a Tensor.
|
|
468
|
-
TypeError: If the dtype of `input` is not one of: complex64, complex128.
|
|
469
|
-
|
|
470
|
-
Supported Platforms:
|
|
471
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
472
|
-
|
|
473
|
-
Examples:
|
|
474
|
-
>>> import mindspore
|
|
475
|
-
>>> from mindspore import Tensor, ops
|
|
476
|
-
>>> input = Tensor([-1.5 + 7.8j, 3 + 5.75j], mindspore.complex64)
|
|
477
|
-
>>> output = ops.angle(input)
|
|
478
|
-
>>> print(output)
|
|
479
|
-
[1.7607845 1.0899091]
|
|
480
|
-
"""
|
|
481
|
-
return angle_(input)
|
|
482
|
-
|
|
483
|
-
|
|
484
365
|
def bincount(input, weights=None, minlength=0):
|
|
485
366
|
"""
|
|
486
367
|
Counts the number of occurrences of each value in `input`.
|
|
@@ -493,6 +374,9 @@ def bincount(input, weights=None, minlength=0):
|
|
|
493
374
|
Each value in the output Tensor marks the number of occurrences of that index in `input`.
|
|
494
375
|
If 'weights' is specified, the output results are weighted, i.e ``out[n] += weight[i]`` instead of ``out[n] += 1``.
|
|
495
376
|
|
|
377
|
+
Note:
|
|
378
|
+
If `input` contains negative value, the result will be undefined.
|
|
379
|
+
|
|
496
380
|
Args:
|
|
497
381
|
input (Tensor): 1-d input tensor.
|
|
498
382
|
weights (Tensor, optional): Weights, a tensor of the same shape as `input`. Default: ``None`` .
|
|
@@ -504,7 +388,6 @@ def bincount(input, weights=None, minlength=0):
|
|
|
504
388
|
Raises:
|
|
505
389
|
TypeError: If `input` or `weights` is not a tensor.
|
|
506
390
|
ValueError: If `input` is not one-dimensional, or if `input` and `weights` do not have the same shape.
|
|
507
|
-
ValueError: If `input` contains negative value.
|
|
508
391
|
ValueError: If `minlength` is a negative integer.
|
|
509
392
|
|
|
510
393
|
Supported Platforms:
|
|
@@ -528,23 +411,21 @@ def bincount(input, weights=None, minlength=0):
|
|
|
528
411
|
raise TypeError(f"For math function 'bincount', 'minlength' must be int but got {type(minlength)}.")
|
|
529
412
|
if rank_(input) != 1:
|
|
530
413
|
raise ValueError(f"For math function 'bincount', 'input' should be one-dimensional tensor.")
|
|
531
|
-
if not (input >= 0).all():
|
|
532
|
-
raise ValueError(f"For 'bincount', elements of 'input' should be non-negative.")
|
|
533
414
|
if input.shape[0] == 0:
|
|
534
|
-
return
|
|
415
|
+
return Tensor_([])
|
|
535
416
|
if minlength < 0:
|
|
536
417
|
raise ValueError(f"For 'bincount', 'minlength' should be >= 0 but got {minlength}.")
|
|
537
418
|
if max(input.astype(mstype.float32)) > minlength - 1:
|
|
538
419
|
length = (max(input.astype(mstype.float32)) + 1).astype(mstype.int32)
|
|
539
420
|
else:
|
|
540
|
-
length =
|
|
421
|
+
length = cast_(minlength, mstype.int32)
|
|
541
422
|
idx = F.arange(length).expand_dims(-1)
|
|
542
|
-
idx_mapping = equal(input, idx)
|
|
423
|
+
idx_mapping = equal(input, idx.astype(input.dtype))
|
|
543
424
|
if weights is not None:
|
|
544
425
|
if input.shape != weights.shape:
|
|
545
426
|
raise ValueError('for bincount `input` and `weights` must have the same length')
|
|
546
427
|
idx_mapping *= weights
|
|
547
|
-
return
|
|
428
|
+
return reduce_sum_(idx_mapping.astype(mstype.float32), 1).ravel()
|
|
548
429
|
|
|
549
430
|
|
|
550
431
|
def bucketize(input, boundaries, *, right=False):
|
|
@@ -673,38 +554,6 @@ def argmin(input, axis=None, keepdims=False):
|
|
|
673
554
|
return out
|
|
674
555
|
|
|
675
556
|
|
|
676
|
-
def neg(input):
|
|
677
|
-
"""
|
|
678
|
-
Returns a tensor with negative values of the input tensor element-wise.
|
|
679
|
-
|
|
680
|
-
.. math::
|
|
681
|
-
|
|
682
|
-
out_{i} = - input_{i}
|
|
683
|
-
|
|
684
|
-
Args:
|
|
685
|
-
input (Tensor): The input tensor with a dtype of Number.
|
|
686
|
-
|
|
687
|
-
Returns:
|
|
688
|
-
Tensor, has the same shape and dtype as input.
|
|
689
|
-
|
|
690
|
-
Raises:
|
|
691
|
-
TypeError: If `input` is not a Tensor.
|
|
692
|
-
|
|
693
|
-
Supported Platforms:
|
|
694
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
695
|
-
|
|
696
|
-
Examples:
|
|
697
|
-
>>> import mindspore
|
|
698
|
-
>>> import numpy as np
|
|
699
|
-
>>> from mindspore import Tensor, ops
|
|
700
|
-
>>> input = Tensor(np.array([1, 2, -1, 2, 0, -3.5]), mindspore.float32)
|
|
701
|
-
>>> output = ops.neg(input)
|
|
702
|
-
>>> print(output)
|
|
703
|
-
[-1. -2. 1. -2. 0. 3.5]
|
|
704
|
-
"""
|
|
705
|
-
return neg_tensor(input)
|
|
706
|
-
|
|
707
|
-
|
|
708
557
|
def negative(input):
|
|
709
558
|
r"""
|
|
710
559
|
Alias for :func:`mindspore.ops.neg` .
|
|
@@ -712,7 +561,7 @@ def negative(input):
|
|
|
712
561
|
Supported Platforms:
|
|
713
562
|
``Ascend`` ``GPU`` ``CPU``
|
|
714
563
|
"""
|
|
715
|
-
return
|
|
564
|
+
return neg(input)
|
|
716
565
|
|
|
717
566
|
|
|
718
567
|
def positive(input):
|
|
@@ -777,7 +626,7 @@ def permute(input, axis):
|
|
|
777
626
|
|
|
778
627
|
Args:
|
|
779
628
|
input (Tensor): Input Tensor.
|
|
780
|
-
axis (
|
|
629
|
+
axis (tuple(int)): Permute will permute the tensor to the input `axis` order.
|
|
781
630
|
|
|
782
631
|
Returns:
|
|
783
632
|
Tensor, has the same dimension as input tensor, with `axis` suitably permuted.
|
|
@@ -806,135 +655,22 @@ def permute(input, axis):
|
|
|
806
655
|
return transpose_(input, axis)
|
|
807
656
|
|
|
808
657
|
|
|
809
|
-
def
|
|
658
|
+
def subtract(input, other, *, alpha=1):
|
|
810
659
|
r"""
|
|
811
|
-
|
|
660
|
+
Performs the element-wise subtract of input tensors.
|
|
812
661
|
|
|
813
662
|
.. math::
|
|
814
|
-
|
|
815
|
-
out_i = \lceil x_i \rceil = \lfloor x_i \rfloor + 1
|
|
663
|
+
output[i] = input[i] - alpha * other[i]
|
|
816
664
|
|
|
817
665
|
Args:
|
|
818
|
-
input (Tensor):
|
|
666
|
+
input (Union[Tensor, number.Number]): Tensor or Number involved in subtraction.
|
|
667
|
+
other (Union[Tensor, number.Number]): Tensor or Number involved in subtraction.
|
|
668
|
+
|
|
669
|
+
Keyword Args:
|
|
670
|
+
alpha (Number): The multiplier for :math:`other`. Default: ``1`` .
|
|
819
671
|
|
|
820
672
|
Returns:
|
|
821
|
-
Tensor, has the same shape as
|
|
822
|
-
|
|
823
|
-
Raises:
|
|
824
|
-
TypeError: If `input` is not a Tensor.
|
|
825
|
-
TypeError: If dtype of `input` is not float16 or float32.
|
|
826
|
-
|
|
827
|
-
Supported Platforms:
|
|
828
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
829
|
-
|
|
830
|
-
Examples:
|
|
831
|
-
>>> import mindspore
|
|
832
|
-
>>> import numpy as np
|
|
833
|
-
>>> from mindspore import Tensor, ops
|
|
834
|
-
>>> x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
|
|
835
|
-
>>> output = ops.ceil(x)
|
|
836
|
-
>>> print(output)
|
|
837
|
-
[ 2. 3. -1.]
|
|
838
|
-
"""
|
|
839
|
-
return tensor_ceil(input)
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
def round(input):
|
|
843
|
-
r"""
|
|
844
|
-
Returns half to even of a tensor element-wise.
|
|
845
|
-
|
|
846
|
-
.. math::
|
|
847
|
-
|
|
848
|
-
out_i \approx input_i
|
|
849
|
-
|
|
850
|
-
Args:
|
|
851
|
-
input (Tensor): The input tensor.
|
|
852
|
-
|
|
853
|
-
Returns:
|
|
854
|
-
Tensor, has the same shape and type as the `input`.
|
|
855
|
-
|
|
856
|
-
Raises:
|
|
857
|
-
TypeError: If `input` is not a Tensor.
|
|
858
|
-
|
|
859
|
-
Supported Platforms:
|
|
860
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
861
|
-
|
|
862
|
-
Examples:
|
|
863
|
-
>>> import mindspore
|
|
864
|
-
>>> import numpy as np
|
|
865
|
-
>>> from mindspore import Tensor, ops
|
|
866
|
-
>>> input = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32)
|
|
867
|
-
>>> output = ops.round(input)
|
|
868
|
-
>>> print(output)
|
|
869
|
-
[ 1. 2. 2. 2. -4.]
|
|
870
|
-
"""
|
|
871
|
-
return tensor_round_(input)
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
def sub(input, other):
|
|
875
|
-
r"""
|
|
876
|
-
Subtracts the second input tensor from the first input tensor element-wise.
|
|
877
|
-
|
|
878
|
-
.. math::
|
|
879
|
-
|
|
880
|
-
out_{i} = input_{i} - other_{i}
|
|
881
|
-
|
|
882
|
-
Note:
|
|
883
|
-
- One of the two inputs must be a Tensor, when the two inputs have different shapes,
|
|
884
|
-
they must be able to broadcast to a common shape.
|
|
885
|
-
- The two inputs can not be bool type at the same time,
|
|
886
|
-
[True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
|
|
887
|
-
- The two inputs comply with the implicit type conversion rules to make the data types
|
|
888
|
-
consistent.
|
|
889
|
-
|
|
890
|
-
Args:
|
|
891
|
-
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
892
|
-
a bool or a tensor whose data type is
|
|
893
|
-
`number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
894
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
|
|
895
|
-
other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
|
|
896
|
-
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
|
|
897
|
-
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
|
|
898
|
-
|
|
899
|
-
Returns:
|
|
900
|
-
Tensor, the shape is the same as the one after broadcasting,
|
|
901
|
-
and the data type is the one with higher precision or higher digits among the two inputs.
|
|
902
|
-
|
|
903
|
-
Raises:
|
|
904
|
-
TypeError: If `input` and `other` are not number.Number or bool or Tensor.
|
|
905
|
-
|
|
906
|
-
Supported Platforms:
|
|
907
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
908
|
-
|
|
909
|
-
Examples:
|
|
910
|
-
>>> import mindspore
|
|
911
|
-
>>> import numpy as np
|
|
912
|
-
>>> from mindspore import Tensor, ops
|
|
913
|
-
>>> input = Tensor(np.array([1, 2, 3]), mindspore.int32)
|
|
914
|
-
>>> other = Tensor(np.array([4, 5, 6]), mindspore.int32)
|
|
915
|
-
>>> output = ops.sub(input, other)
|
|
916
|
-
>>> print(output)
|
|
917
|
-
[-3 -3 -3]
|
|
918
|
-
"""
|
|
919
|
-
return tensor_sub(input, other)
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
def subtract(input, other, *, alpha=1):
|
|
923
|
-
r"""
|
|
924
|
-
Performs the element-wise subtract of input tensors.
|
|
925
|
-
|
|
926
|
-
.. math::
|
|
927
|
-
output[i] = input[i] - alpha * other[i]
|
|
928
|
-
|
|
929
|
-
Args:
|
|
930
|
-
input (Union[Tensor, number.Number]): Tensor or Number involved in subtraction.
|
|
931
|
-
other (Union[Tensor, number.Number]): Tensor or Number involved in subtraction.
|
|
932
|
-
|
|
933
|
-
Keyword Args:
|
|
934
|
-
alpha (Number): The multiplier for :math:`other`. Default: ``1`` .
|
|
935
|
-
|
|
936
|
-
Returns:
|
|
937
|
-
Tensor, has the same shape and dtype as input tensors.
|
|
673
|
+
Tensor, has the same shape and dtype as input tensors.
|
|
938
674
|
|
|
939
675
|
Raises:
|
|
940
676
|
TypeError: `input` or `other` is neither Tensor nor number.Number.
|
|
@@ -966,55 +702,6 @@ def true_divide(dividend, divisor):
|
|
|
966
702
|
return div(dividend, divisor, rounding_mode=None)
|
|
967
703
|
|
|
968
704
|
|
|
969
|
-
def mul(input, other):
|
|
970
|
-
r"""
|
|
971
|
-
Multiplies two tensors element-wise.
|
|
972
|
-
|
|
973
|
-
.. math::
|
|
974
|
-
|
|
975
|
-
out_{i} = input_{i} * other_{i}
|
|
976
|
-
|
|
977
|
-
Note:
|
|
978
|
-
- One of the two inputs must be a Tensor, when the two inputs have different shapes,
|
|
979
|
-
they must be able to broadcast to a common shape.
|
|
980
|
-
- The two inputs can not be bool type at the same time,
|
|
981
|
-
[True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
|
|
982
|
-
- The two inputs comply with the implicit type conversion rules to make the data types
|
|
983
|
-
consistent.
|
|
984
|
-
|
|
985
|
-
Args:
|
|
986
|
-
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
987
|
-
a bool or a tensor whose data type is
|
|
988
|
-
`number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
989
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
|
|
990
|
-
other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
|
|
991
|
-
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
|
|
992
|
-
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
|
|
993
|
-
|
|
994
|
-
Returns:
|
|
995
|
-
Tensor, the shape is the same as the one after broadcasting,
|
|
996
|
-
and the data type is the one with higher precision or higher digits among the two inputs.
|
|
997
|
-
|
|
998
|
-
Raises:
|
|
999
|
-
TypeError: If `input` and `other` is not one of the following: Tensor, number.Number, bool.
|
|
1000
|
-
ValueError: If `input` and `other` are not the same shape.
|
|
1001
|
-
|
|
1002
|
-
Supported Platforms:
|
|
1003
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1004
|
-
|
|
1005
|
-
Examples:
|
|
1006
|
-
>>> import mindspore
|
|
1007
|
-
>>> import numpy as np
|
|
1008
|
-
>>> from mindspore import Tensor, ops
|
|
1009
|
-
>>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
|
|
1010
|
-
>>> y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
|
|
1011
|
-
>>> output = ops.mul(x, y)
|
|
1012
|
-
>>> print(output)
|
|
1013
|
-
[ 4. 10. 18.]
|
|
1014
|
-
"""
|
|
1015
|
-
return tensor_mul(input, other)
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
705
|
def multiply(input, other):
|
|
1019
706
|
r"""
|
|
1020
707
|
Alias for :func:`mindspore.ops.asinh`.
|
|
@@ -1029,18 +716,17 @@ def div(input, other, *, rounding_mode=None):
|
|
|
1029
716
|
r"""
|
|
1030
717
|
Divides the first input tensor by the second input tensor in floating-point type element-wise.
|
|
1031
718
|
|
|
719
|
+
.. math::
|
|
720
|
+
|
|
721
|
+
out_{i} = input_{i} / other_{i}
|
|
722
|
+
|
|
1032
723
|
Note:
|
|
1033
|
-
-
|
|
1034
|
-
they must be able to broadcast to a common shape.
|
|
724
|
+
- When the two inputs have different shapes, they must be able to broadcast to a common shape.
|
|
1035
725
|
- The two inputs can not be bool type at the same time,
|
|
1036
726
|
[True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
|
|
1037
727
|
- The two inputs comply with the implicit type conversion rules to make the data types
|
|
1038
728
|
consistent.
|
|
1039
729
|
|
|
1040
|
-
.. math::
|
|
1041
|
-
|
|
1042
|
-
out_{i} = input_{i} / other_{i}
|
|
1043
|
-
|
|
1044
730
|
Args:
|
|
1045
731
|
input (Union[Tensor, Number, bool]): The first input is a number or
|
|
1046
732
|
a bool or a tensor whose data type is number or bool.
|
|
@@ -1161,60 +847,6 @@ def floor_div(x, y):
|
|
|
1161
847
|
return tensor_floordiv(x, y)
|
|
1162
848
|
|
|
1163
849
|
|
|
1164
|
-
def floor_divide(input, other):
|
|
1165
|
-
"""
|
|
1166
|
-
Divides the first input tensor by the second input tensor element-wise and round down to the closest integer.
|
|
1167
|
-
|
|
1168
|
-
Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
|
|
1169
|
-
The inputs must be two tensors or one tensor and one scalar.
|
|
1170
|
-
When the inputs are two tensors,
|
|
1171
|
-
dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
|
|
1172
|
-
When the inputs are one tensor and one scalar,
|
|
1173
|
-
the scalar could only be a constant.
|
|
1174
|
-
|
|
1175
|
-
.. math::
|
|
1176
|
-
|
|
1177
|
-
out_{i} = \\text{floor}( \\frac{x_i}{y_i})
|
|
1178
|
-
|
|
1179
|
-
where the :math:`floor` indicates the Floor operator, for more details,
|
|
1180
|
-
please refer to the :class:`mindspore.ops.Floor` operator.
|
|
1181
|
-
|
|
1182
|
-
.. warning::
|
|
1183
|
-
This is an experimental API that is subject to change or deletion.
|
|
1184
|
-
|
|
1185
|
-
Args:
|
|
1186
|
-
input (Union[Tensor, Number, bool]): The first input is a number or
|
|
1187
|
-
a bool or a tensor whose data type is number or bool.
|
|
1188
|
-
other (Union[Tensor, Number, bool]): The second input is a number or
|
|
1189
|
-
a bool when the first input is a tensor, or it can be a tensor whose data type is number or bool.
|
|
1190
|
-
Returns:
|
|
1191
|
-
Tensor, the shape is the same as the one after broadcasting,
|
|
1192
|
-
and the data type is the one with higher precision or higher digits among the two inputs.
|
|
1193
|
-
|
|
1194
|
-
Raises:
|
|
1195
|
-
TypeError: If neither `input` nor `other` is a Tensor.
|
|
1196
|
-
|
|
1197
|
-
Supported Platforms:
|
|
1198
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1199
|
-
|
|
1200
|
-
Examples:
|
|
1201
|
-
>>> import mindspore
|
|
1202
|
-
>>> from mindspore import Tensor, ops
|
|
1203
|
-
>>> import numpy as np
|
|
1204
|
-
>>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
|
|
1205
|
-
>>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
|
|
1206
|
-
>>> output = ops.floor_divide(x, y)
|
|
1207
|
-
>>> print(output)
|
|
1208
|
-
[ 0 1 -1]
|
|
1209
|
-
>>> x = Tensor(2.0, mindspore.float32)
|
|
1210
|
-
>>> y = Tensor(2.0, mindspore.float32)
|
|
1211
|
-
>>> output = ops.floor_divide(x, y)
|
|
1212
|
-
>>> print(output)
|
|
1213
|
-
1.0
|
|
1214
|
-
"""
|
|
1215
|
-
return tensor_floordiv(input, other)
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
850
|
def fmod(input, other):
|
|
1219
851
|
"""
|
|
1220
852
|
Computes the floating-point remainder of the division operation input/other.
|
|
@@ -1256,214 +888,6 @@ def fmod(input, other):
|
|
|
1256
888
|
return input - div(input, other, rounding_mode="trunc") * other
|
|
1257
889
|
|
|
1258
890
|
|
|
1259
|
-
def pow(input, exponent):
|
|
1260
|
-
r"""
|
|
1261
|
-
Calculates the `exponent` power of each element in `input`.
|
|
1262
|
-
|
|
1263
|
-
.. math::
|
|
1264
|
-
|
|
1265
|
-
out_{i} = input_{i} ^{ exponent_{i}}
|
|
1266
|
-
|
|
1267
|
-
.. note::
|
|
1268
|
-
- Inputs of `input` and `exponent` comply with the implicit type conversion rules to make the
|
|
1269
|
-
data types consistent.
|
|
1270
|
-
- The inputs must be two tensors or one tensor and one scalar.
|
|
1271
|
-
- When the inputs are two tensors,
|
|
1272
|
-
dtypes of them cannot be bool at the same time, and the shapes of them can be broadcast.
|
|
1273
|
-
|
|
1274
|
-
Args:
|
|
1275
|
-
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
1276
|
-
a bool or a tensor whose data type is
|
|
1277
|
-
`number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
1278
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
|
|
1279
|
-
exponent (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
|
|
1280
|
-
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
|
|
1281
|
-
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
|
|
1282
|
-
|
|
1283
|
-
Returns:
|
|
1284
|
-
Tensor, the shape is the same as the one after broadcasting,
|
|
1285
|
-
and the data type is the one with higher precision or higher digits among the two inputs.
|
|
1286
|
-
|
|
1287
|
-
Raises:
|
|
1288
|
-
TypeError: If `input` and `exponent` is not one of the following: Tensor, number.Number or bool.
|
|
1289
|
-
ValueError: If the shape of `input` and `exponent` are different.
|
|
1290
|
-
|
|
1291
|
-
Supported Platforms:
|
|
1292
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1293
|
-
|
|
1294
|
-
Examples:
|
|
1295
|
-
>>> import mindspore
|
|
1296
|
-
>>> import numpy as np
|
|
1297
|
-
>>> from mindspore import Tensor, ops
|
|
1298
|
-
>>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
|
|
1299
|
-
>>> y = 3.0
|
|
1300
|
-
>>> output = ops.pow(x, y)
|
|
1301
|
-
>>> print(output)
|
|
1302
|
-
[ 1. 8. 64.]
|
|
1303
|
-
>>>
|
|
1304
|
-
>>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
|
|
1305
|
-
>>> y = Tensor(np.array([2.0, 4.0, 3.0]), mindspore.float32)
|
|
1306
|
-
>>> output = ops.pow(x, y)
|
|
1307
|
-
>>> print(output)
|
|
1308
|
-
[ 1. 16. 64.]
|
|
1309
|
-
"""
|
|
1310
|
-
return tensor_pow(input, exponent)
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
def floor_mod(x, y):
|
|
1314
|
-
r"""
|
|
1315
|
-
Computes the remainder of division element-wise. It's a flooring divide.
|
|
1316
|
-
E.g. :math:`floor(x / y) * y + mod(x, y) = x`.
|
|
1317
|
-
|
|
1318
|
-
Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
|
|
1319
|
-
The inputs must be two tensors or one tensor and one scalar.
|
|
1320
|
-
When the inputs are two tensors,
|
|
1321
|
-
dtypes of them cannot be both bool, and the shapes of them could be broadcast.
|
|
1322
|
-
When the inputs are one tensor and one scalar,
|
|
1323
|
-
the scalar could only be a constant.
|
|
1324
|
-
|
|
1325
|
-
.. math::
|
|
1326
|
-
|
|
1327
|
-
out_{i} =\text{floor}(x_{i} // y_{i})
|
|
1328
|
-
|
|
1329
|
-
where the :math:`floor` indicates the Floor operator, for more details,
|
|
1330
|
-
please refer to the :class:`mindspore.ops.Floor` operator.
|
|
1331
|
-
|
|
1332
|
-
.. warning::
|
|
1333
|
-
- Data of input `y` should not be 0, or the maximum value of its dtype will be returned.
|
|
1334
|
-
- When the elements of input exceeds 2048 , the accuracy of operator cannot guarantee the requirement of
|
|
1335
|
-
double thousandths in the mini form.
|
|
1336
|
-
- Due to different architectures, the calculation results of this operator on NPU and CPU may be inconsistent.
|
|
1337
|
-
- If shape is expressed as :math:`(D1, D2 ..., Dn)`, then D1\*D2... \*DN<=1000000,n<=8.
|
|
1338
|
-
|
|
1339
|
-
Args:
|
|
1340
|
-
x (Union[Tensor, Number, bool]): The first input is a number or
|
|
1341
|
-
a bool or a tensor whose data type is number or bool.
|
|
1342
|
-
y (Union[Tensor, Number, bool]): The second input is a number or
|
|
1343
|
-
a bool when the first input is a tensor, or it can be a tensor whose data type is number or bool.
|
|
1344
|
-
|
|
1345
|
-
Returns:
|
|
1346
|
-
Tensor, the shape is the same as the one after broadcasting,
|
|
1347
|
-
and the data type is the one with higher precision of the two inputs.
|
|
1348
|
-
|
|
1349
|
-
Raises:
|
|
1350
|
-
TypeError: If neither `x` nor `y` is a Tensor.
|
|
1351
|
-
|
|
1352
|
-
Supported Platforms:
|
|
1353
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1354
|
-
|
|
1355
|
-
Examples:
|
|
1356
|
-
>>> import mindspore
|
|
1357
|
-
>>> import numpy as np
|
|
1358
|
-
>>> from mindspore import Tensor, ops
|
|
1359
|
-
>>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
|
|
1360
|
-
>>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
|
|
1361
|
-
>>> output = ops.floor_mod(x, y)
|
|
1362
|
-
>>> print(output)
|
|
1363
|
-
[2 1 2]
|
|
1364
|
-
"""
|
|
1365
|
-
return tensor_mod(x, y)
|
|
1366
|
-
|
|
1367
|
-
|
|
1368
|
-
def exp(input):
|
|
1369
|
-
r"""
|
|
1370
|
-
Returns exponential of a tensor element-wise.
|
|
1371
|
-
|
|
1372
|
-
.. math::
|
|
1373
|
-
|
|
1374
|
-
out_i = e^{x_i}
|
|
1375
|
-
|
|
1376
|
-
Args:
|
|
1377
|
-
input (Tensor): The input tensor.
|
|
1378
|
-
|
|
1379
|
-
Returns:
|
|
1380
|
-
Tensor, has the same shape and dtype as the `input`.
|
|
1381
|
-
|
|
1382
|
-
Raises:
|
|
1383
|
-
TypeError: If `input` is not a Tensor.
|
|
1384
|
-
|
|
1385
|
-
Supported Platforms:
|
|
1386
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1387
|
-
|
|
1388
|
-
Examples:
|
|
1389
|
-
>>> import mindspore
|
|
1390
|
-
>>> import numpy as np
|
|
1391
|
-
>>> from mindspore import Tensor, ops
|
|
1392
|
-
>>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
|
|
1393
|
-
>>> output = ops.exp(x)
|
|
1394
|
-
>>> print(output)
|
|
1395
|
-
[ 2.718282 7.389056 54.598152]
|
|
1396
|
-
"""
|
|
1397
|
-
return tensor_exp(input)
|
|
1398
|
-
|
|
1399
|
-
|
|
1400
|
-
def expm1(input):
|
|
1401
|
-
r"""
|
|
1402
|
-
Returns exponential then minus 1 of a tensor element-wise.
|
|
1403
|
-
|
|
1404
|
-
.. math::
|
|
1405
|
-
|
|
1406
|
-
out_i = e^{x_i} - 1
|
|
1407
|
-
|
|
1408
|
-
Args:
|
|
1409
|
-
input (Tensor): The input Tensor.
|
|
1410
|
-
|
|
1411
|
-
Returns:
|
|
1412
|
-
Tensor, has the same shape as the `input`.
|
|
1413
|
-
|
|
1414
|
-
Raises:
|
|
1415
|
-
TypeError: If `input` is not a Tensor.
|
|
1416
|
-
|
|
1417
|
-
Supported Platforms:
|
|
1418
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1419
|
-
|
|
1420
|
-
Examples:
|
|
1421
|
-
>>> import mindspore
|
|
1422
|
-
>>> import numpy as np
|
|
1423
|
-
>>> from mindspore import Tensor, ops
|
|
1424
|
-
>>> x = Tensor(np.array([0.0, 1.0, 2.0, 4.0]), mindspore.float32)
|
|
1425
|
-
>>> output = ops.expm1(x)
|
|
1426
|
-
>>> print(output)
|
|
1427
|
-
[ 0. 1.718282 6.389056 53.598152]
|
|
1428
|
-
"""
|
|
1429
|
-
return tensor_expm1(input)
|
|
1430
|
-
|
|
1431
|
-
|
|
1432
|
-
def log(input):
|
|
1433
|
-
r"""
|
|
1434
|
-
Returns the natural logarithm of a tensor element-wise.
|
|
1435
|
-
|
|
1436
|
-
.. math::
|
|
1437
|
-
y_i = \log_e(x_i)
|
|
1438
|
-
|
|
1439
|
-
.. warning::
|
|
1440
|
-
If the input value of operator Log is within the range (0, 0.01] or [0.95, 1.05], the output accuracy may
|
|
1441
|
-
be affacted.
|
|
1442
|
-
|
|
1443
|
-
Args:
|
|
1444
|
-
input (Tensor): Input Tensor of any dimension. The value must be greater than 0.
|
|
1445
|
-
|
|
1446
|
-
Returns:
|
|
1447
|
-
Tensor, has the same shape and dtype as the `input`.
|
|
1448
|
-
|
|
1449
|
-
Raises:
|
|
1450
|
-
TypeError: If `input` is not a Tensor.
|
|
1451
|
-
|
|
1452
|
-
Supported Platforms:
|
|
1453
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1454
|
-
|
|
1455
|
-
Examples:
|
|
1456
|
-
>>> import mindspore
|
|
1457
|
-
>>> import numpy as np
|
|
1458
|
-
>>> from mindspore import Tensor, ops
|
|
1459
|
-
>>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
|
|
1460
|
-
>>> output = ops.log(x)
|
|
1461
|
-
>>> print(output)
|
|
1462
|
-
[0. 0.6931472 1.3862944]
|
|
1463
|
-
"""
|
|
1464
|
-
return log_(input)
|
|
1465
|
-
|
|
1466
|
-
|
|
1467
891
|
def logdet(input):
|
|
1468
892
|
r"""
|
|
1469
893
|
Calculates log determinant of one or a batch of square matrices.
|
|
@@ -1493,40 +917,6 @@ def logdet(input):
|
|
|
1493
917
|
return log_(det_x)
|
|
1494
918
|
|
|
1495
919
|
|
|
1496
|
-
def floor(input):
|
|
1497
|
-
r"""
|
|
1498
|
-
Rounds a tensor down to the closest integer element-wise.
|
|
1499
|
-
|
|
1500
|
-
.. math::
|
|
1501
|
-
|
|
1502
|
-
out_i = \lfloor x_i \rfloor
|
|
1503
|
-
|
|
1504
|
-
Args:
|
|
1505
|
-
input (Tensor): The input tensor, its data type must be float16,
|
|
1506
|
-
float32 or float64.
|
|
1507
|
-
|
|
1508
|
-
Returns:
|
|
1509
|
-
Tensor, has the same shape as `input`.
|
|
1510
|
-
|
|
1511
|
-
Raises:
|
|
1512
|
-
TypeError: If `input` is not a Tensor.
|
|
1513
|
-
TypeError: If dtype of `input` is not in [float16, float32, float64].
|
|
1514
|
-
|
|
1515
|
-
Supported Platforms:
|
|
1516
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1517
|
-
|
|
1518
|
-
Examples:
|
|
1519
|
-
>>> import mindspore
|
|
1520
|
-
>>> import numpy as np
|
|
1521
|
-
>>> from mindspore import Tensor, ops
|
|
1522
|
-
>>> x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
|
|
1523
|
-
>>> output = ops.floor(x)
|
|
1524
|
-
>>> print(output)
|
|
1525
|
-
[ 1. 2. -2.]
|
|
1526
|
-
"""
|
|
1527
|
-
return floor_(input)
|
|
1528
|
-
|
|
1529
|
-
|
|
1530
920
|
def i0(input):
|
|
1531
921
|
r"""
|
|
1532
922
|
Alias for :func:`mindspore.ops.bessel_i0` .
|
|
@@ -1729,7 +1119,7 @@ def logical_not(input):
|
|
|
1729
1119
|
out_{i} = \\neg input_{i}
|
|
1730
1120
|
|
|
1731
1121
|
Args:
|
|
1732
|
-
input (Tensor): The input tensor
|
|
1122
|
+
input (Tensor): The input tensor.
|
|
1733
1123
|
|
|
1734
1124
|
Returns:
|
|
1735
1125
|
Tensor, the shape is the same as the `input`, and the dtype is bool.
|
|
@@ -1749,8 +1139,6 @@ def logical_not(input):
|
|
|
1749
1139
|
>>> print(output)
|
|
1750
1140
|
[False True False]
|
|
1751
1141
|
"""
|
|
1752
|
-
if isinstance(input, Tensor) and input.dtype != mstype.bool_:
|
|
1753
|
-
input = input.astype(mstype.bool_)
|
|
1754
1142
|
return logical_not_(input)
|
|
1755
1143
|
|
|
1756
1144
|
|
|
@@ -1760,17 +1148,17 @@ def logical_or(input, other):
|
|
|
1760
1148
|
|
|
1761
1149
|
Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
|
|
1762
1150
|
The inputs must be two tensors or one tensor and one bool.
|
|
1763
|
-
|
|
1764
|
-
|
|
1765
|
-
|
|
1766
|
-
|
|
1151
|
+
|
|
1152
|
+
When the inputs are two tensors, the shapes of them could be broadcast.
|
|
1153
|
+
|
|
1154
|
+
When the inputs are one tensor and one bool, the bool object could only be a constant.
|
|
1767
1155
|
|
|
1768
1156
|
.. math::
|
|
1769
1157
|
|
|
1770
|
-
out_{i} =
|
|
1158
|
+
out_{i} = input_{i} \\vee other_{i}
|
|
1771
1159
|
|
|
1772
1160
|
Note:
|
|
1773
|
-
|
|
1161
|
+
logical_or supports broadcasting.
|
|
1774
1162
|
|
|
1775
1163
|
Args:
|
|
1776
1164
|
input (Union[Tensor, bool]): The first input is a bool or a tensor whose data type can be implicitly
|
|
@@ -1781,9 +1169,6 @@ def logical_or(input, other):
|
|
|
1781
1169
|
Returns:
|
|
1782
1170
|
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
|
|
1783
1171
|
|
|
1784
|
-
Raises:
|
|
1785
|
-
TypeError: If neither `input` nor `other` is a Tensor.
|
|
1786
|
-
|
|
1787
1172
|
Supported Platforms:
|
|
1788
1173
|
``Ascend`` ``GPU`` ``CPU``
|
|
1789
1174
|
|
|
@@ -1812,10 +1197,6 @@ def logical_or(input, other):
|
|
|
1812
1197
|
>>> print(output)
|
|
1813
1198
|
[True True]
|
|
1814
1199
|
"""
|
|
1815
|
-
if isinstance(input, Tensor) and input.dtype != mstype.bool_:
|
|
1816
|
-
input = input.astype(mstype.bool_)
|
|
1817
|
-
if isinstance(other, Tensor) and other.dtype != mstype.bool_:
|
|
1818
|
-
other = other.astype(mstype.bool_)
|
|
1819
1200
|
return logical_or_(input, other)
|
|
1820
1201
|
|
|
1821
1202
|
|
|
@@ -1825,17 +1206,17 @@ def logical_and(input, other):
|
|
|
1825
1206
|
|
|
1826
1207
|
Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
|
|
1827
1208
|
The inputs must be two tensors or one tensor and one bool.
|
|
1828
|
-
|
|
1829
|
-
|
|
1830
|
-
|
|
1831
|
-
|
|
1209
|
+
|
|
1210
|
+
When the inputs are two tensors, the shapes of them could be broadcast.
|
|
1211
|
+
|
|
1212
|
+
When the inputs are one tensor and one bool, the bool object could only be a constant.
|
|
1832
1213
|
|
|
1833
1214
|
.. math::
|
|
1834
1215
|
|
|
1835
1216
|
out_{i} = input_{i} \wedge other_{i}
|
|
1836
1217
|
|
|
1837
1218
|
Note:
|
|
1838
|
-
|
|
1219
|
+
logical_and supports broadcasting.
|
|
1839
1220
|
|
|
1840
1221
|
Args:
|
|
1841
1222
|
input (Union[Tensor, bool]): The first input is a bool or a tensor whose data type can be implicitly
|
|
@@ -1877,10 +1258,6 @@ def logical_and(input, other):
|
|
|
1877
1258
|
>>> print(output)
|
|
1878
1259
|
[True False]
|
|
1879
1260
|
"""
|
|
1880
|
-
if isinstance(input, Tensor) and input.dtype != mstype.bool_:
|
|
1881
|
-
input = input.astype(mstype.bool_)
|
|
1882
|
-
if isinstance(other, Tensor) and other.dtype != mstype.bool_:
|
|
1883
|
-
other = other.astype(mstype.bool_)
|
|
1884
1261
|
return logical_and_(input, other)
|
|
1885
1262
|
|
|
1886
1263
|
|
|
@@ -1983,130 +1360,25 @@ def sgn(input):
|
|
|
1983
1360
|
``Ascend`` ``GPU`` ``CPU``
|
|
1984
1361
|
|
|
1985
1362
|
Examples:
|
|
1986
|
-
>>> import mindspore as ms
|
|
1987
|
-
>>> import mindspore.ops as ops
|
|
1988
|
-
>>> input = ms.Tensor([[3 + 4j, 7 - 24j, 0, 6 + 8j, 8], [15 + 20j, 7 - 24j, 0, 3 + 4j, 20]], dtype=ms.complex64)
|
|
1989
|
-
>>> output = ops.sgn(input)
|
|
1990
|
-
>>> print(output)
|
|
1991
|
-
[[0.6 +0.8j 0.28-0.96j 0. +0.j 0.6 +0.8j 1. +0.j ]
|
|
1992
|
-
[0.6 +0.8j 0.28-0.96j 0. +0.j 0.6 +0.8j 1. +0.j ]]
|
|
1993
|
-
"""
|
|
1994
|
-
if not isinstance(input, Tensor):
|
|
1995
|
-
raise TypeError(f"For sgn, the input must be a Tensor, but got {type(input)}")
|
|
1996
|
-
if not ops.is_complex(input):
|
|
1997
|
-
return ops.sign(input)
|
|
1998
|
-
modulus = ops.ComplexAbs()(input)
|
|
1999
|
-
zeros_mask = modulus.equal(0)
|
|
2000
|
-
non_zero_modulus = ops.masked_fill(modulus, zeros_mask, 1)
|
|
2001
|
-
zeros_modulus = ops.zeros_like(non_zero_modulus)
|
|
2002
|
-
complex_modulus = ops.Complex()(non_zero_modulus, zeros_modulus)
|
|
2003
|
-
res = input / complex_modulus
|
|
2004
|
-
return res
|
|
2005
|
-
|
|
2006
|
-
|
|
2007
|
-
def sin(input):
|
|
2008
|
-
r"""
|
|
2009
|
-
Computes sine of the input element-wise.
|
|
2010
|
-
|
|
2011
|
-
.. math::
|
|
2012
|
-
|
|
2013
|
-
out_i = \sin(input_i)
|
|
2014
|
-
|
|
2015
|
-
Args:
|
|
2016
|
-
input (Tensor): The shape of tensor is
|
|
2017
|
-
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
2018
|
-
|
|
2019
|
-
Returns:
|
|
2020
|
-
Tensor, has the same shape and dtype as `input`.
|
|
2021
|
-
|
|
2022
|
-
Raises:
|
|
2023
|
-
TypeError: If `input` is not a Tensor.
|
|
2024
|
-
TypeError: If dtype of `input` is not float16, float32 or float64, complex64, complex128.
|
|
2025
|
-
|
|
2026
|
-
Supported Platforms:
|
|
2027
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2028
|
-
|
|
2029
|
-
Examples:
|
|
2030
|
-
>>> import mindspore
|
|
2031
|
-
>>> import numpy as np
|
|
2032
|
-
>>> from mindspore import Tensor, ops
|
|
2033
|
-
>>> input = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
|
|
2034
|
-
>>> output = ops.sin(input)
|
|
2035
|
-
>>> print(output)
|
|
2036
|
-
[0.5810352 0.27635565 0.41687083 0.5810352]
|
|
2037
|
-
"""
|
|
2038
|
-
return sin_(input)
|
|
2039
|
-
|
|
2040
|
-
|
|
2041
|
-
def sinc(input):
|
|
2042
|
-
r"""
|
|
2043
|
-
Computes the normalized sinc of input.
|
|
2044
|
-
|
|
2045
|
-
.. math::
|
|
2046
|
-
|
|
2047
|
-
out_i = \begin{cases} \frac{sin(\pi input_i)}{\pi input_i} & input_i\neq 0\\
|
|
2048
|
-
1 & input_i=0 \end{cases}
|
|
2049
|
-
|
|
2050
|
-
Args:
|
|
2051
|
-
input (Tensor): The input Tensor.
|
|
2052
|
-
|
|
2053
|
-
Returns:
|
|
2054
|
-
Tensor, has the same shape as the `input`. The dtype of output is float32 when dtype of `input` is in
|
|
2055
|
-
[int, bool]. Otherwise output has the same dtype as the `input`.
|
|
2056
|
-
|
|
2057
|
-
Raises:
|
|
2058
|
-
TypeError: If `input` is not a Tensor.
|
|
2059
|
-
|
|
2060
|
-
Supported Platforms:
|
|
2061
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2062
|
-
|
|
2063
|
-
Examples:
|
|
2064
|
-
>>> import mindspore
|
|
2065
|
-
>>> import numpy as np
|
|
2066
|
-
>>> from mindspore import Tensor, ops
|
|
2067
|
-
>>> input = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
|
|
2068
|
-
>>> output = ops.sinc(input)
|
|
2069
|
-
>>> print(output)
|
|
2070
|
-
[0.47735003 0.8759357 0.7224278 0.47735003]
|
|
2071
|
-
"""
|
|
2072
|
-
return sinc_(input)
|
|
2073
|
-
|
|
2074
|
-
|
|
2075
|
-
def cos(input):
|
|
2076
|
-
r"""
|
|
2077
|
-
Computes cosine of input element-wise.
|
|
2078
|
-
|
|
2079
|
-
.. math::
|
|
2080
|
-
out_i = \cos(x_i)
|
|
2081
|
-
|
|
2082
|
-
.. warning::
|
|
2083
|
-
Supported dtypes are float16 and float32, and using float64 may
|
|
2084
|
-
cause a problem of missing precision.
|
|
2085
|
-
|
|
2086
|
-
Args:
|
|
2087
|
-
input (Tensor): The shape of tensor is
|
|
2088
|
-
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
2089
|
-
|
|
2090
|
-
Returns:
|
|
2091
|
-
Tensor, has the same shape and dtype as `input`.
|
|
2092
|
-
|
|
2093
|
-
Raises:
|
|
2094
|
-
TypeError: If `input` is not a Tensor.
|
|
2095
|
-
TypeError: If dtype of `input` is not float16, float32 or float64, complex64, complex128.
|
|
2096
|
-
|
|
2097
|
-
Supported Platforms:
|
|
2098
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2099
|
-
|
|
2100
|
-
Examples:
|
|
2101
|
-
>>> import mindspore
|
|
2102
|
-
>>> import numpy as np
|
|
2103
|
-
>>> from mindspore import Tensor, ops
|
|
2104
|
-
>>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
|
|
2105
|
-
>>> output = ops.cos(x)
|
|
1363
|
+
>>> import mindspore as ms
|
|
1364
|
+
>>> import mindspore.ops as ops
|
|
1365
|
+
>>> input = ms.Tensor([[3 + 4j, 7 - 24j, 0, 6 + 8j, 8], [15 + 20j, 7 - 24j, 0, 3 + 4j, 20]], dtype=ms.complex64)
|
|
1366
|
+
>>> output = ops.sgn(input)
|
|
2106
1367
|
>>> print(output)
|
|
2107
|
-
[0.
|
|
1368
|
+
[[0.6 +0.8j 0.28-0.96j 0. +0.j 0.6 +0.8j 1. +0.j ]
|
|
1369
|
+
[0.6 +0.8j 0.28-0.96j 0. +0.j 0.6 +0.8j 1. +0.j ]]
|
|
2108
1370
|
"""
|
|
2109
|
-
|
|
1371
|
+
if not isinstance(input, Tensor):
|
|
1372
|
+
raise TypeError(f"For sgn, the input must be a Tensor, but got {type(input)}")
|
|
1373
|
+
if not ops.is_complex(input):
|
|
1374
|
+
return ops.sign(input)
|
|
1375
|
+
modulus = ops.ComplexAbs()(input)
|
|
1376
|
+
zeros_mask = modulus.equal(0)
|
|
1377
|
+
non_zero_modulus = ops.masked_fill(modulus, zeros_mask, ops.cast(1, modulus.dtype))
|
|
1378
|
+
zeros_modulus = ops.zeros_like(non_zero_modulus)
|
|
1379
|
+
complex_modulus = ops.Complex()(non_zero_modulus, zeros_modulus)
|
|
1380
|
+
res = input / complex_modulus
|
|
1381
|
+
return res
|
|
2110
1382
|
|
|
2111
1383
|
|
|
2112
1384
|
def cosine_similarity(x1, x2, dim=1, eps=1e-08):
|
|
@@ -2221,7 +1493,7 @@ def cov(input, *, correction=1, fweights=None, aweights=None):
|
|
|
2221
1493
|
Default: ``None`` .
|
|
2222
1494
|
|
|
2223
1495
|
Returns:
|
|
2224
|
-
Tensor,
|
|
1496
|
+
Tensor, the covariance matrix Tensor of `input`.
|
|
2225
1497
|
|
|
2226
1498
|
Raises:
|
|
2227
1499
|
ValueError: If the dimensions of input is greater than 2.
|
|
@@ -2308,9 +1580,6 @@ def t(input):
|
|
|
2308
1580
|
Returns:
|
|
2309
1581
|
Tensor, the transpose of `input` .
|
|
2310
1582
|
|
|
2311
|
-
Raises:
|
|
2312
|
-
ValueError: If the dimension of `input` is larger than 2.
|
|
2313
|
-
|
|
2314
1583
|
Supported Platforms:
|
|
2315
1584
|
``Ascend`` ``GPU`` ``CPU``
|
|
2316
1585
|
|
|
@@ -2325,8 +1594,6 @@ def t(input):
|
|
|
2325
1594
|
[2. 3.]
|
|
2326
1595
|
[3. 4.]]
|
|
2327
1596
|
"""
|
|
2328
|
-
if input.ndim > 2:
|
|
2329
|
-
raise ValueError(f"For t(), the dimension of tensor should be less than 3, but got {input.ndim}.")
|
|
2330
1597
|
if input.ndim == 2:
|
|
2331
1598
|
return transpose_(input, (1, 0))
|
|
2332
1599
|
return input
|
|
@@ -2385,8 +1652,8 @@ def xlogy(input, other):
|
|
|
2385
1652
|
Args:
|
|
2386
1653
|
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
2387
1654
|
a bool or a tensor whose data type is
|
|
2388
|
-
`number <https://www.mindspore.cn/docs/en/r2.
|
|
2389
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.
|
|
1655
|
+
`number <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
1656
|
+
`bool_ <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_.
|
|
2390
1657
|
other (Union[Tensor, number.Number, bool]): The second input is a number.Number or
|
|
2391
1658
|
a bool when the first input is a tensor or a tensor whose data type is number or bool\_.
|
|
2392
1659
|
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
|
|
@@ -2534,389 +1801,73 @@ def polar(abs, angle): # pylint: disable=redefined-outer-name
|
|
|
2534
1801
|
return polar_(abs, angle)
|
|
2535
1802
|
|
|
2536
1803
|
|
|
2537
|
-
def asin(input):
|
|
2538
|
-
r"""
|
|
2539
|
-
Computes arcsine of input tensors element-wise.
|
|
2540
|
-
|
|
2541
|
-
.. math::
|
|
2542
|
-
|
|
2543
|
-
out_i = \sin^{-1}(input_i)
|
|
2544
|
-
|
|
2545
|
-
Args:
|
|
2546
|
-
input (Tensor): The shape of tensor is
|
|
2547
|
-
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
2548
|
-
|
|
2549
|
-
Returns:
|
|
2550
|
-
Tensor, has the same shape and dtype as `input`.
|
|
2551
|
-
|
|
2552
|
-
Raises:
|
|
2553
|
-
TypeError: If `input` is not a Tensor.
|
|
2554
|
-
TypeError: If dtype of `input` is not float16, float32, float64, complex64, complex128.
|
|
2555
|
-
|
|
2556
|
-
Supported Platforms:
|
|
2557
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2558
|
-
|
|
2559
|
-
Examples:
|
|
2560
|
-
>>> import mindspore
|
|
2561
|
-
>>> import numpy as np
|
|
2562
|
-
>>> from mindspore import Tensor, ops
|
|
2563
|
-
>>> x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
|
|
2564
|
-
>>> output = ops.asin(x)
|
|
2565
|
-
>>> print(output)
|
|
2566
|
-
[0.8330704 0.04001067 0.30469266 0.5943858 ]
|
|
2567
|
-
"""
|
|
2568
|
-
return asin_(input)
|
|
2569
|
-
|
|
2570
|
-
|
|
2571
|
-
def acos(input):
|
|
2572
|
-
r"""
|
|
2573
|
-
Computes arccosine of input tensors element-wise.
|
|
2574
|
-
|
|
2575
|
-
.. math::
|
|
2576
|
-
|
|
2577
|
-
out_i = \cos^{-1}(input_i)
|
|
2578
|
-
|
|
2579
|
-
Args:
|
|
2580
|
-
input (Tensor): The shape of tensor is
|
|
2581
|
-
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
2582
|
-
|
|
2583
|
-
Returns:
|
|
2584
|
-
Tensor, has the same shape and dtype as `input`.
|
|
2585
|
-
|
|
2586
|
-
Raises:
|
|
2587
|
-
TypeError: If `input` is not a Tensor.
|
|
2588
|
-
TypeError: If dtype of `input` is not float16, float32 or float64, complex64, complex128.
|
|
2589
|
-
|
|
2590
|
-
Supported Platforms:
|
|
2591
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2592
|
-
|
|
2593
|
-
Examples:
|
|
2594
|
-
>>> import mindspore
|
|
2595
|
-
>>> import numpy as np
|
|
2596
|
-
>>> from mindspore import Tensor, ops
|
|
2597
|
-
>>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
|
|
2598
|
-
>>> output = ops.acos(input)
|
|
2599
|
-
>>> print(output)
|
|
2600
|
-
[0.737726 1.5307857 1.2661036 0.9764105]
|
|
2601
|
-
"""
|
|
2602
|
-
return acos_(input)
|
|
2603
|
-
|
|
2604
|
-
|
|
2605
1804
|
def arccos(input):
|
|
2606
1805
|
"""
|
|
2607
1806
|
Alias for :func:`mindspore.ops.acos` .
|
|
2608
1807
|
|
|
2609
1808
|
Supported Platforms:
|
|
2610
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2611
|
-
"""
|
|
2612
|
-
return acos(input)
|
|
2613
|
-
|
|
2614
|
-
|
|
2615
|
-
def atan(input):
|
|
2616
|
-
r"""
|
|
2617
|
-
Computes the trigonometric inverse tangent of the input element-wise.
|
|
2618
|
-
|
|
2619
|
-
.. math::
|
|
2620
|
-
|
|
2621
|
-
out_i = \tan^{-1}(input_i)
|
|
2622
|
-
|
|
2623
|
-
Args:
|
|
2624
|
-
input (Tensor): The shape of tensor is
|
|
2625
|
-
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
2626
|
-
The data type should be one of the following types: float16, float32.
|
|
2627
|
-
|
|
2628
|
-
Returns:
|
|
2629
|
-
A Tensor, has the same type as the input.
|
|
2630
|
-
|
|
2631
|
-
Raises:
|
|
2632
|
-
TypeError: If `input` is not a Tensor.
|
|
2633
|
-
TypeError: If dtype of `input` is not float16 or float32.
|
|
2634
|
-
|
|
2635
|
-
Supported Platforms:
|
|
2636
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2637
|
-
|
|
2638
|
-
Examples:
|
|
2639
|
-
>>> import mindspore
|
|
2640
|
-
>>> import numpy as np
|
|
2641
|
-
>>> from mindspore import Tensor, ops
|
|
2642
|
-
>>> x = Tensor(np.array([1.0, 0.0]), mindspore.float32)
|
|
2643
|
-
>>> output = ops.atan(x)
|
|
2644
|
-
>>> print(output)
|
|
2645
|
-
[0.7853982 0. ]
|
|
2646
|
-
"""
|
|
2647
|
-
return atan_(input)
|
|
2648
|
-
|
|
2649
|
-
|
|
2650
|
-
def sinh(input):
|
|
2651
|
-
r"""
|
|
2652
|
-
Computes hyperbolic sine of the input element-wise.
|
|
2653
|
-
|
|
2654
|
-
.. math::
|
|
2655
|
-
|
|
2656
|
-
out_i = \sinh(input_i)
|
|
2657
|
-
|
|
2658
|
-
Args:
|
|
2659
|
-
input (Tensor): The input tensor of hyperbolic sine function.
|
|
2660
|
-
|
|
2661
|
-
Returns:
|
|
2662
|
-
Tensor, has the same shape as `input`.
|
|
2663
|
-
|
|
2664
|
-
Raises:
|
|
2665
|
-
TypeError: If `input` is not a Tensor.
|
|
2666
|
-
|
|
2667
|
-
Supported Platforms:
|
|
2668
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2669
|
-
|
|
2670
|
-
Examples:
|
|
2671
|
-
>>> import mindspore
|
|
2672
|
-
>>> import numpy as np
|
|
2673
|
-
>>> from mindspore import Tensor, ops
|
|
2674
|
-
>>> input = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
|
|
2675
|
-
>>> output = ops.sinh(input)
|
|
2676
|
-
>>> print(output)
|
|
2677
|
-
[0.6604918 0.28367308 0.44337422 0.6604918 ]
|
|
2678
|
-
"""
|
|
2679
|
-
return sinh_(input)
|
|
2680
|
-
|
|
2681
|
-
|
|
2682
|
-
def cosh(input):
|
|
2683
|
-
r"""
|
|
2684
|
-
Computes hyperbolic cosine of input element-wise.
|
|
2685
|
-
|
|
2686
|
-
.. math::
|
|
2687
|
-
|
|
2688
|
-
out_i = \cosh(input_i)
|
|
2689
|
-
|
|
2690
|
-
Args:
|
|
2691
|
-
input (Tensor): The input tensor of hyperbolic cosine function, its data type
|
|
2692
|
-
must be float16, float32, float64, complex64 or complex128.
|
|
2693
|
-
|
|
2694
|
-
Returns:
|
|
2695
|
-
Tensor, has the same shape as `input`.
|
|
2696
|
-
|
|
2697
|
-
Raises:
|
|
2698
|
-
TypeError: If the dtype of `input` is not one of the following types:
|
|
2699
|
-
float16, float32, float64, complex64, complex128.
|
|
2700
|
-
TypeError: If `input` is not a Tensor.
|
|
2701
|
-
|
|
2702
|
-
Supported Platforms:
|
|
2703
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2704
|
-
|
|
2705
|
-
Examples:
|
|
2706
|
-
>>> import mindspore
|
|
2707
|
-
>>> import numpy as np
|
|
2708
|
-
>>> from mindspore import Tensor, ops
|
|
2709
|
-
>>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
|
|
2710
|
-
>>> output = ops.cosh(x)
|
|
2711
|
-
>>> print(output)
|
|
2712
|
-
[1.0289385 1.364684 1.048436 1.0040528]
|
|
2713
|
-
>>> x = Tensor(2.1, mindspore.float32)
|
|
2714
|
-
>>> output = ops.cosh(x)
|
|
2715
|
-
>>> print(output)
|
|
2716
|
-
4.144313
|
|
2717
|
-
"""
|
|
2718
|
-
return cosh_(input)
|
|
2719
|
-
|
|
2720
|
-
|
|
2721
|
-
def tanh(input):
|
|
2722
|
-
r"""
|
|
2723
|
-
Computes hyperbolic tangent of input element-wise. The Tanh function is defined as:
|
|
2724
|
-
|
|
2725
|
-
.. math::
|
|
2726
|
-
|
|
2727
|
-
tanh(x_i) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1},
|
|
2728
|
-
|
|
2729
|
-
where :math:`x_i` is an element of the input Tensor.
|
|
2730
|
-
|
|
2731
|
-
Args:
|
|
2732
|
-
input (Tensor): Input of Tanh.
|
|
2733
|
-
|
|
2734
|
-
Returns:
|
|
2735
|
-
Tensor, with the same type and shape as the `input`.
|
|
2736
|
-
|
|
2737
|
-
Raises:
|
|
2738
|
-
TypeError: If `input` is not a Tensor.
|
|
2739
|
-
|
|
2740
|
-
Supported Platforms:
|
|
2741
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2742
|
-
|
|
2743
|
-
Examples:
|
|
2744
|
-
>>> import mindspore
|
|
2745
|
-
>>> import numpy as np
|
|
2746
|
-
>>> from mindspore import Tensor, ops
|
|
2747
|
-
>>> input = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
|
|
2748
|
-
>>> output = ops.tanh(input)
|
|
2749
|
-
>>> print(output)
|
|
2750
|
-
[0.7615941 0.9640276 0.9950547 0.9993293 0.9999092]
|
|
2751
|
-
"""
|
|
2752
|
-
return tanh_(input)
|
|
2753
|
-
|
|
2754
|
-
|
|
2755
|
-
def asinh(input):
|
|
2756
|
-
r"""
|
|
2757
|
-
Computes inverse hyperbolic sine of the input element-wise.
|
|
2758
|
-
|
|
2759
|
-
.. math::
|
|
2760
|
-
|
|
2761
|
-
out_i = \sinh^{-1}(input_i)
|
|
2762
|
-
|
|
2763
|
-
Args:
|
|
2764
|
-
input (Tensor): The input tensor of inverse hyperbolic sine function.
|
|
2765
|
-
|
|
2766
|
-
Returns:
|
|
2767
|
-
Tensor, has the same shape and type as `input`.
|
|
2768
|
-
|
|
2769
|
-
Raises:
|
|
2770
|
-
TypeError: If `input` is not a Tensor.
|
|
2771
|
-
|
|
2772
|
-
Supported Platforms:
|
|
2773
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2774
|
-
|
|
2775
|
-
Examples:
|
|
2776
|
-
>>> import mindspore
|
|
2777
|
-
>>> import numpy as np
|
|
2778
|
-
>>> from mindspore import Tensor, ops
|
|
2779
|
-
>>> input = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), mindspore.float32)
|
|
2780
|
-
>>> output = ops.asinh(input)
|
|
2781
|
-
>>> print(output)
|
|
2782
|
-
[-2.3124382 1.1947632 1.8184465 5.298342 ]
|
|
2783
|
-
"""
|
|
2784
|
-
return asinh_(input)
|
|
2785
|
-
|
|
2786
|
-
|
|
2787
|
-
def arcsinh(input):
|
|
2788
|
-
r"""
|
|
2789
|
-
Alias for :func:`mindspore.ops.asinh`.
|
|
2790
|
-
|
|
2791
|
-
Supported Platforms:
|
|
2792
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2793
|
-
"""
|
|
2794
|
-
return asinh(input)
|
|
2795
|
-
|
|
2796
|
-
|
|
2797
|
-
def arctanh(input):
|
|
2798
|
-
r"""
|
|
2799
|
-
Alias for :func:`mindspore.ops.atanh`.
|
|
2800
|
-
|
|
2801
|
-
Supported Platforms:
|
|
2802
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2803
|
-
"""
|
|
2804
|
-
return atanh(input)
|
|
2805
|
-
|
|
2806
|
-
|
|
2807
|
-
def acosh(input):
|
|
2808
|
-
r"""
|
|
2809
|
-
Computes inverse hyperbolic cosine of the inputs element-wise.
|
|
2810
|
-
|
|
2811
|
-
.. math::
|
|
2812
|
-
|
|
2813
|
-
out_i = \cosh^{-1}(input_i)
|
|
2814
|
-
|
|
2815
|
-
.. warning::
|
|
2816
|
-
Given an input tensor input, the function computes inverse hyperbolic cosine of every element.
|
|
2817
|
-
Input range is [1, inf].
|
|
2818
|
-
|
|
2819
|
-
Args:
|
|
2820
|
-
input (Tensor): The input tensor of inverse hyperbolic cosine function.
|
|
2821
|
-
|
|
2822
|
-
Returns:
|
|
2823
|
-
Tensor, has the same shape and type as `input`.
|
|
2824
|
-
|
|
2825
|
-
Raises:
|
|
2826
|
-
TypeError: If `input` is not a Tensor.
|
|
2827
|
-
|
|
2828
|
-
Supported Platforms:
|
|
2829
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2830
|
-
|
|
2831
|
-
Examples:
|
|
2832
|
-
>>> import mindspore
|
|
2833
|
-
>>> import numpy as np
|
|
2834
|
-
>>> from mindspore import Tensor, ops
|
|
2835
|
-
>>> x = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
|
|
2836
|
-
>>> output = ops.acosh(x)
|
|
2837
|
-
>>> print(output)
|
|
2838
|
-
[0. 0.9624237 1.7627472 5.298292 ]
|
|
1809
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2839
1810
|
"""
|
|
2840
|
-
return
|
|
1811
|
+
return acos(input)
|
|
2841
1812
|
|
|
2842
1813
|
|
|
2843
|
-
def
|
|
1814
|
+
def tanh(input):
|
|
2844
1815
|
r"""
|
|
2845
|
-
Computes
|
|
1816
|
+
Computes hyperbolic tangent of input element-wise. The Tanh function is defined as:
|
|
2846
1817
|
|
|
2847
1818
|
.. math::
|
|
2848
1819
|
|
|
2849
|
-
|
|
1820
|
+
tanh(x_i) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1},
|
|
1821
|
+
|
|
1822
|
+
where :math:`x_i` is an element of the input Tensor.
|
|
1823
|
+
|
|
1824
|
+
Tanh Activation Function Graph:
|
|
1825
|
+
|
|
1826
|
+
.. image:: ../images/Tanh.png
|
|
1827
|
+
:align: center
|
|
2850
1828
|
|
|
2851
1829
|
Args:
|
|
2852
|
-
input (Tensor):
|
|
2853
|
-
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
2854
|
-
The data type should be one of the following types: float16, float32.
|
|
1830
|
+
input (Tensor): Input of Tanh.
|
|
2855
1831
|
|
|
2856
1832
|
Returns:
|
|
2857
|
-
|
|
1833
|
+
Tensor, with the same type and shape as the `input`.
|
|
2858
1834
|
|
|
2859
1835
|
Raises:
|
|
2860
1836
|
TypeError: If `input` is not a Tensor.
|
|
2861
|
-
TypeError: If dtype of `input` is not float16 or float32.
|
|
2862
1837
|
|
|
2863
1838
|
Supported Platforms:
|
|
2864
|
-
``Ascend`` ``GPU``
|
|
1839
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2865
1840
|
|
|
2866
1841
|
Examples:
|
|
2867
1842
|
>>> import mindspore
|
|
2868
1843
|
>>> import numpy as np
|
|
2869
1844
|
>>> from mindspore import Tensor, ops
|
|
2870
|
-
>>> input = Tensor(np.array([
|
|
2871
|
-
>>> output = ops.
|
|
1845
|
+
>>> input = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
|
|
1846
|
+
>>> output = ops.tanh(input)
|
|
2872
1847
|
>>> print(output)
|
|
2873
|
-
[ 0.
|
|
1848
|
+
[0.7615941 0.9640276 0.9950547 0.9993293 0.9999092]
|
|
2874
1849
|
"""
|
|
2875
|
-
return
|
|
1850
|
+
return tanh_(input)
|
|
2876
1851
|
|
|
2877
1852
|
|
|
2878
|
-
def
|
|
1853
|
+
def arcsinh(input):
|
|
2879
1854
|
r"""
|
|
2880
|
-
|
|
2881
|
-
|
|
2882
|
-
It returns :math:`\theta\ \in\ [-\pi, \pi]`
|
|
2883
|
-
such that :math:`input = r*\sin(\theta), other = r*\cos(\theta)`, where :math:`r = \sqrt{input^2 + other^2}`.
|
|
2884
|
-
|
|
2885
|
-
Note:
|
|
2886
|
-
- Arg `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
|
|
2887
|
-
If they have different data types, the lower precision data type will be converted to relatively the
|
|
2888
|
-
highest precision data type.
|
|
2889
|
-
- At least one of the `input` and `other` args is Tensor.
|
|
1855
|
+
Alias for :func:`mindspore.ops.asinh`.
|
|
2890
1856
|
|
|
2891
|
-
|
|
2892
|
-
|
|
2893
|
-
|
|
2894
|
-
|
|
2895
|
-
other (Tensor): The input tensor. It has the same shape with `input` or
|
|
2896
|
-
its shape is able to broadcast with `input`.
|
|
1857
|
+
Supported Platforms:
|
|
1858
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1859
|
+
"""
|
|
1860
|
+
return asinh(input)
|
|
2897
1861
|
|
|
2898
|
-
Returns:
|
|
2899
|
-
Tensor, the shape is the same as the one after broadcasting, and the data type is same as `input`.
|
|
2900
1862
|
|
|
2901
|
-
|
|
2902
|
-
|
|
2903
|
-
|
|
2904
|
-
when data type conversion of Parameter is not supported.
|
|
1863
|
+
def arctanh(input):
|
|
1864
|
+
r"""
|
|
1865
|
+
Alias for :func:`mindspore.ops.atanh`.
|
|
2905
1866
|
|
|
2906
1867
|
Supported Platforms:
|
|
2907
1868
|
``Ascend`` ``GPU`` ``CPU``
|
|
2908
|
-
|
|
2909
|
-
Examples:
|
|
2910
|
-
>>> import mindspore
|
|
2911
|
-
>>> import numpy as np
|
|
2912
|
-
>>> from mindspore import Tensor, ops
|
|
2913
|
-
>>> input = Tensor(np.array([0, 1]), mindspore.float32)
|
|
2914
|
-
>>> other = Tensor(np.array([1, 1]), mindspore.float32)
|
|
2915
|
-
>>> output = ops.atan2(input, other)
|
|
2916
|
-
>>> print(output)
|
|
2917
|
-
[0. 0.7853982]
|
|
2918
1869
|
"""
|
|
2919
|
-
return
|
|
1870
|
+
return atanh(input)
|
|
2920
1871
|
|
|
2921
1872
|
|
|
2922
1873
|
def bitwise_and(input, other):
|
|
@@ -3144,51 +2095,6 @@ def bitwise_right_shift(input, other):
|
|
|
3144
2095
|
return rs(input, other)
|
|
3145
2096
|
|
|
3146
2097
|
|
|
3147
|
-
def nextafter(input, other):
|
|
3148
|
-
"""
|
|
3149
|
-
Returns the next representable floating-point value after `input` towards `other` element-wise.
|
|
3150
|
-
|
|
3151
|
-
Say there are two float32 numbers :math:`a`, :math:`b`, and let the
|
|
3152
|
-
representable delta of float32 datatype is :math:`eps`. If :math:`a < b`,
|
|
3153
|
-
then the next representable of :math:`a` towards :math:`b` is :math:`a+eps`,
|
|
3154
|
-
the next representable of :math:`b` towards :math:`a` is :math:`b-eps`.
|
|
3155
|
-
|
|
3156
|
-
.. math::
|
|
3157
|
-
|
|
3158
|
-
out_{i} = nextafter({input_{i}, other_{i}})
|
|
3159
|
-
|
|
3160
|
-
Args:
|
|
3161
|
-
input (Tensor): The first input tensor. The shape of tensor is :math:`(N,*)` where :math:`*` means,
|
|
3162
|
-
any number of additional dimensions. Must be one of the following types: float32, float64.
|
|
3163
|
-
|
|
3164
|
-
other (Tensor): The second input tensor. The shape of tensor is :math:`(N,*)` where :math:`*` means,
|
|
3165
|
-
any number of additional dimensions. Must be one of the following types: float32, float64.
|
|
3166
|
-
|
|
3167
|
-
Returns:
|
|
3168
|
-
Tensor, has the same shape and data type as `input`.
|
|
3169
|
-
|
|
3170
|
-
Raises:
|
|
3171
|
-
TypeError: If neither `input` nor `other` is a Tensor.
|
|
3172
|
-
TypeError: If the dtype of `input` and `other` is not one of: float32, float64.
|
|
3173
|
-
TypeError: If the dtypes of `input` and `other` are not same.
|
|
3174
|
-
ValueError: If `input`'s shape is not the same as `other`.
|
|
3175
|
-
|
|
3176
|
-
Supported Platforms:
|
|
3177
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3178
|
-
|
|
3179
|
-
Examples:
|
|
3180
|
-
>>> import mindspore
|
|
3181
|
-
>>> import numpy as np
|
|
3182
|
-
>>> from mindspore import Tensor, ops
|
|
3183
|
-
>>> input_ = Tensor(np.asarray([0.0]), mindspore.float32)
|
|
3184
|
-
>>> other_ = Tensor(np.asarray([0.1]), mindspore.float32)
|
|
3185
|
-
>>> output_ = ops.nextafter(input_, other_)
|
|
3186
|
-
>>> print(output_)
|
|
3187
|
-
[1.e-45]
|
|
3188
|
-
"""
|
|
3189
|
-
return nextafter_(input, other)
|
|
3190
|
-
|
|
3191
|
-
|
|
3192
2098
|
def inv(x):
|
|
3193
2099
|
r"""
|
|
3194
2100
|
Computes Reciprocal of input tensor element-wise.
|
|
@@ -3284,78 +2190,6 @@ def invert(x):
|
|
|
3284
2190
|
return invert_(x)
|
|
3285
2191
|
|
|
3286
2192
|
|
|
3287
|
-
def erf(input):
|
|
3288
|
-
r"""
|
|
3289
|
-
Computes the Gauss error function of `input` element-wise.
|
|
3290
|
-
|
|
3291
|
-
.. math::
|
|
3292
|
-
|
|
3293
|
-
erf(x)=\frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt
|
|
3294
|
-
|
|
3295
|
-
Args:
|
|
3296
|
-
input (Tensor): The input tensor of Gaussian error function. Supported dtypes:
|
|
3297
|
-
|
|
3298
|
-
- Ascend: float16, float32.
|
|
3299
|
-
- GPU/CPU: float16, float32, float64.
|
|
3300
|
-
|
|
3301
|
-
Returns:
|
|
3302
|
-
Tensor, has the same shape and dtype as the `input`.
|
|
3303
|
-
|
|
3304
|
-
Raises:
|
|
3305
|
-
TypeError: If `input` is not a Tensor.
|
|
3306
|
-
TypeError: If dtype of `input` is neither float16 float32 or float64.
|
|
3307
|
-
|
|
3308
|
-
Supported Platforms:
|
|
3309
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3310
|
-
|
|
3311
|
-
Examples:
|
|
3312
|
-
>>> import mindspore
|
|
3313
|
-
>>> import numpy as np
|
|
3314
|
-
>>> from mindspore import Tensor, ops
|
|
3315
|
-
>>> x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
|
|
3316
|
-
>>> output = ops.erf(x)
|
|
3317
|
-
>>> print(output)
|
|
3318
|
-
[-0.8427168 0. 0.8427168 0.99530876 0.99997765]
|
|
3319
|
-
"""
|
|
3320
|
-
return erf_(input)
|
|
3321
|
-
|
|
3322
|
-
|
|
3323
|
-
def erfc(input):
|
|
3324
|
-
r"""
|
|
3325
|
-
Computes the complementary error function of `input` element-wise.
|
|
3326
|
-
|
|
3327
|
-
.. math::
|
|
3328
|
-
|
|
3329
|
-
erfc(x) = 1 - \frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt
|
|
3330
|
-
|
|
3331
|
-
Args:
|
|
3332
|
-
input (Tensor): The input tensor. Supported dtypes:
|
|
3333
|
-
|
|
3334
|
-
- Ascend: float16, float32.
|
|
3335
|
-
- GPU/CPU: float16, float32, float64.
|
|
3336
|
-
|
|
3337
|
-
Returns:
|
|
3338
|
-
Tensor, has the same shape and dtype as `input`.
|
|
3339
|
-
|
|
3340
|
-
Raises:
|
|
3341
|
-
TypeError: If `input` is not a Tensor.
|
|
3342
|
-
TypeError: If dtype of `input` is not float16, float32 or float64.
|
|
3343
|
-
|
|
3344
|
-
Supported Platforms:
|
|
3345
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3346
|
-
|
|
3347
|
-
Examples:
|
|
3348
|
-
>>> import mindspore
|
|
3349
|
-
>>> import numpy as np
|
|
3350
|
-
>>> from mindspore import Tensor, ops
|
|
3351
|
-
>>> x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
|
|
3352
|
-
>>> output = ops.erfc(x)
|
|
3353
|
-
>>> print(output)
|
|
3354
|
-
[1.8427168e+00 1.0000000e+00 1.5728319e-01 4.6912432e-03 2.2351742e-05]
|
|
3355
|
-
"""
|
|
3356
|
-
return erfc_(input)
|
|
3357
|
-
|
|
3358
|
-
|
|
3359
2193
|
def bessel_j0(x):
|
|
3360
2194
|
r"""
|
|
3361
2195
|
Computes Bessel function of the first kind, order 0 element-wise.
|
|
@@ -3791,46 +2625,6 @@ def log_matrix_determinant(input):
|
|
|
3791
2625
|
return log_matrix_determinant_(input)
|
|
3792
2626
|
|
|
3793
2627
|
|
|
3794
|
-
def matrix_exp(input):
|
|
3795
|
-
r"""
|
|
3796
|
-
Computes the exponential of a single or a batch of square matrices.
|
|
3797
|
-
|
|
3798
|
-
.. math::
|
|
3799
|
-
|
|
3800
|
-
matrix\_exp(x) = \sum_{k=0}^{\infty} \frac{1}{k !} x^{k} \in \mathbb{K}^{n \times n}
|
|
3801
|
-
|
|
3802
|
-
where :math:`x` corresponds to `input` .
|
|
3803
|
-
|
|
3804
|
-
Args:
|
|
3805
|
-
input (Tensor): The shape of tensor is :math:`(*, n, n)` where * is zero or more batch dimensions.
|
|
3806
|
-
Must be one of the following types: float16, float32, float64, complex64, complex128.
|
|
3807
|
-
|
|
3808
|
-
Returns:
|
|
3809
|
-
Tensor, has the same shape and dtype as the `input`.
|
|
3810
|
-
|
|
3811
|
-
Raises:
|
|
3812
|
-
TypeError: If `input` is not a Tensor.
|
|
3813
|
-
TypeError: If the dtype of `input` is not one of the following dtype:
|
|
3814
|
-
float16, float32, float64, complex64, complex128.
|
|
3815
|
-
ValueError: If the rank of `input` is less than 2.
|
|
3816
|
-
ValueError: If the size of last two dimensions of `input` are not equal.
|
|
3817
|
-
|
|
3818
|
-
Supported Platforms:
|
|
3819
|
-
|
|
3820
|
-
|
|
3821
|
-
Examples:
|
|
3822
|
-
>>> import mindspore
|
|
3823
|
-
>>> import numpy as np
|
|
3824
|
-
>>> from mindspore import Tensor, ops
|
|
3825
|
-
>>> input = Tensor(np.array([[1, 2], [0, 1]]), mindspore.float32)
|
|
3826
|
-
>>> output = ops.matrix_exp(input)
|
|
3827
|
-
>>> print(output)
|
|
3828
|
-
[[2.7182817 5.436563 ]
|
|
3829
|
-
[0. 2.7182817]]
|
|
3830
|
-
"""
|
|
3831
|
-
return matrix_exp_(input)
|
|
3832
|
-
|
|
3833
|
-
|
|
3834
2628
|
def lu_solve(b, LU_data, LU_pivots):
|
|
3835
2629
|
r"""
|
|
3836
2630
|
Computes the solution y to the system of linear equations :math:`Ay = b` ,
|
|
@@ -3878,7 +2672,6 @@ def lu_solve(b, LU_data, LU_pivots):
|
|
|
3878
2672
|
[-1.4000001]
|
|
3879
2673
|
[ 0.6 ]]
|
|
3880
2674
|
"""
|
|
3881
|
-
lu_solve_ = _get_cache_prim(LuSolve)()
|
|
3882
2675
|
out = lu_solve_(b, LU_data, LU_pivots)
|
|
3883
2676
|
return out
|
|
3884
2677
|
|
|
@@ -3972,53 +2765,12 @@ def slogdet(input):
|
|
|
3972
2765
|
return log_matrix_determinant_(input)
|
|
3973
2766
|
|
|
3974
2767
|
|
|
3975
|
-
def trace(input):
|
|
3976
|
-
"""
|
|
3977
|
-
Returns a new tensor that is the sum of the `input` main trace.
|
|
3978
|
-
|
|
3979
|
-
Note:
|
|
3980
|
-
Input must be matrix, and complex number is not supported at present.
|
|
3981
|
-
|
|
3982
|
-
Args:
|
|
3983
|
-
input (Tensor): A matrix to be calculated. The matrix must be two dimensional.
|
|
3984
|
-
|
|
3985
|
-
Returns:
|
|
3986
|
-
Tensor, with the same data type as input `input`, and size equals to 1.
|
|
3987
|
-
|
|
3988
|
-
Raises:
|
|
3989
|
-
TypeError: If `input` is not a Tensor.
|
|
3990
|
-
ValueError: If the dimension of `input` is not equal to 2.
|
|
3991
|
-
|
|
3992
|
-
Supported Platforms:
|
|
3993
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3994
|
-
|
|
3995
|
-
Examples:
|
|
3996
|
-
>>> import mindspore
|
|
3997
|
-
>>> import numpy as np
|
|
3998
|
-
>>> from mindspore import Tensor, ops
|
|
3999
|
-
>>> input = Tensor(np.array([[10, 11, 12], [13, 14, 15], [16, 17, 18]]), mindspore.float32)
|
|
4000
|
-
>>> output = ops.trace(input)
|
|
4001
|
-
>>> print(output)
|
|
4002
|
-
42.0
|
|
4003
|
-
>>> input = Tensor(np.arange(1, 13).reshape(3, 4), mindspore.float32)
|
|
4004
|
-
>>> output = ops.trace(input)
|
|
4005
|
-
>>> print(output)
|
|
4006
|
-
18.0
|
|
4007
|
-
>>> input = Tensor(np.arange(12, 0, -1).reshape(4, 3), mindspore.float32)
|
|
4008
|
-
>>> output = ops.trace(input)
|
|
4009
|
-
>>> print(output)
|
|
4010
|
-
24.0
|
|
4011
|
-
"""
|
|
4012
|
-
return trace_(input)
|
|
4013
|
-
|
|
4014
|
-
|
|
4015
2768
|
def truncate_div(x, y):
|
|
4016
2769
|
"""
|
|
4017
2770
|
Divides the first input tensor by the second input tensor element-wise and rounds the results
|
|
4018
2771
|
of division towards zero. Equivalent to C-style integer division.
|
|
4019
2772
|
|
|
4020
2773
|
Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
|
|
4021
|
-
The inputs must be two tensors or one tensor and one scalar.
|
|
4022
2774
|
When the inputs are two tensors,
|
|
4023
2775
|
dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
|
|
4024
2776
|
When the inputs are one tensor and one scalar,
|
|
@@ -4061,7 +2813,6 @@ def truncate_mod(x, y):
|
|
|
4061
2813
|
Returns the remainder of division element-wise.
|
|
4062
2814
|
|
|
4063
2815
|
Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
|
|
4064
|
-
The inputs must be two tensors or one tensor and one scalar.
|
|
4065
2816
|
When the inputs are two tensors,
|
|
4066
2817
|
dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
|
|
4067
2818
|
When the inputs are one tensor and one scalar,
|
|
@@ -4187,8 +2938,7 @@ def ldexp(x, other):
|
|
|
4187
2938
|
|
|
4188
2939
|
def logit(input, eps=None):
|
|
4189
2940
|
r"""
|
|
4190
|
-
Calculate the logit of a tensor element-wise.
|
|
4191
|
-
When eps is None, input `input` is not clamped.
|
|
2941
|
+
Calculate the logit of a tensor element-wise.
|
|
4192
2942
|
|
|
4193
2943
|
.. math::
|
|
4194
2944
|
\begin{align}
|
|
@@ -4204,7 +2954,7 @@ def logit(input, eps=None):
|
|
|
4204
2954
|
Args:
|
|
4205
2955
|
input (Tensor): The input tensor of type float16, float32 or float64.
|
|
4206
2956
|
eps (float, optional): The epsilon. If eps is not None, the input clamp bound is defined as [eps, 1-eps],
|
|
4207
|
-
otherwise, the
|
|
2957
|
+
otherwise, the `input` is not clamped. Default: ``None`` .
|
|
4208
2958
|
|
|
4209
2959
|
Returns:
|
|
4210
2960
|
Tensor, with the same shape and dtype as the `input`.
|
|
@@ -4227,59 +2977,14 @@ def logit(input, eps=None):
|
|
|
4227
2977
|
"""
|
|
4228
2978
|
if eps is None:
|
|
4229
2979
|
eps = -1.0
|
|
4230
|
-
logit_ = _get_cache_prim(Logit)(eps)
|
|
2980
|
+
logit_ = _get_cache_prim(P.Logit)(eps)
|
|
4231
2981
|
return logit_(input)
|
|
4232
2982
|
|
|
4233
|
-
|
|
4234
2983
|
#####################################
|
|
4235
2984
|
# Comparison Operation Functions.
|
|
4236
2985
|
#####################################
|
|
4237
2986
|
|
|
4238
2987
|
|
|
4239
|
-
def less(input, other):
|
|
4240
|
-
r"""
|
|
4241
|
-
Computes the boolean value of :math:`input < other` element-wise.
|
|
4242
|
-
|
|
4243
|
-
Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
|
|
4244
|
-
The inputs must be two tensors or one tensor and one scalar.
|
|
4245
|
-
When the inputs are one tensor and one scalar,
|
|
4246
|
-
the scalar could only be a constant.
|
|
4247
|
-
|
|
4248
|
-
.. math::
|
|
4249
|
-
|
|
4250
|
-
out_{i} =\begin{cases}
|
|
4251
|
-
& \text{True, if } input_{i}<other_{i} \\
|
|
4252
|
-
& \text{False, if } input_{i}>=other_{i}
|
|
4253
|
-
\end{cases}
|
|
4254
|
-
|
|
4255
|
-
Args:
|
|
4256
|
-
input (Union[Tensor, Number, bool]): The first input is a number or
|
|
4257
|
-
a bool or a tensor whose data type is number or bool.
|
|
4258
|
-
other (Union[Tensor, Number, bool]): The second input is a number or
|
|
4259
|
-
a bool when the first input is a tensor, or it can be a tensor whose data type is number or bool.
|
|
4260
|
-
|
|
4261
|
-
Returns:
|
|
4262
|
-
Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
|
|
4263
|
-
|
|
4264
|
-
Raises:
|
|
4265
|
-
TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
|
|
4266
|
-
|
|
4267
|
-
Supported Platforms:
|
|
4268
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4269
|
-
|
|
4270
|
-
Examples:
|
|
4271
|
-
>>> import mindspore
|
|
4272
|
-
>>> import numpy as np
|
|
4273
|
-
>>> from mindspore import Tensor, ops
|
|
4274
|
-
>>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
|
|
4275
|
-
>>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
|
|
4276
|
-
>>> output = ops.less(x, y)
|
|
4277
|
-
>>> print(output)
|
|
4278
|
-
[False False True]
|
|
4279
|
-
"""
|
|
4280
|
-
return tensor_lt(input, other)
|
|
4281
|
-
|
|
4282
|
-
|
|
4283
2988
|
def lt(input, other):
|
|
4284
2989
|
"""
|
|
4285
2990
|
Alias for :func:`mindspore.ops.less` .
|
|
@@ -4310,8 +3015,8 @@ def le(input, other):
|
|
|
4310
3015
|
Args:
|
|
4311
3016
|
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
4312
3017
|
a bool or a tensor whose data type is
|
|
4313
|
-
`number <https://www.mindspore.cn/docs/en/r2.
|
|
4314
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.
|
|
3018
|
+
`number <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
3019
|
+
`bool_ <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_.
|
|
4315
3020
|
other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
|
|
4316
3021
|
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
|
|
4317
3022
|
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
|
|
@@ -4319,9 +3024,6 @@ def le(input, other):
|
|
|
4319
3024
|
Returns:
|
|
4320
3025
|
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
|
|
4321
3026
|
|
|
4322
|
-
Raises:
|
|
4323
|
-
TypeError: If neither `input` nor `other` is a Tensor.
|
|
4324
|
-
|
|
4325
3027
|
Supported Platforms:
|
|
4326
3028
|
``Ascend`` ``GPU`` ``CPU``
|
|
4327
3029
|
|
|
@@ -4363,8 +3065,8 @@ def gt(input, other):
|
|
|
4363
3065
|
Args:
|
|
4364
3066
|
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
4365
3067
|
a bool or a tensor whose data type is
|
|
4366
|
-
`number <https://www.mindspore.cn/docs/en/r2.
|
|
4367
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.
|
|
3068
|
+
`number <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
3069
|
+
`bool_ <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_ .
|
|
4368
3070
|
other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
|
|
4369
3071
|
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
|
|
4370
3072
|
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
|
|
@@ -4403,67 +3105,21 @@ def ge(input, other):
|
|
|
4403
3105
|
and the shapes of them can be broadcast.
|
|
4404
3106
|
- When the inputs are one tensor and one scalar, the scalar could only be a constant.
|
|
4405
3107
|
- Broadcasting is supported.
|
|
4406
|
-
- If the input Tensor can be broadcast, the low dimension will be extended to the corresponding high dimension
|
|
4407
|
-
in another input by copying the value of the dimension.
|
|
4408
|
-
|
|
4409
|
-
.. math::
|
|
4410
|
-
|
|
4411
|
-
out_{i} =\begin{cases}
|
|
4412
|
-
& \text{True, if } input_{i}>=other_{i} \\
|
|
4413
|
-
& \text{False, if } input_{i}<other_{i}
|
|
4414
|
-
\end{cases}
|
|
4415
|
-
|
|
4416
|
-
Args:
|
|
4417
|
-
input (Union[Tensor, Number, bool]): The first input is a number or
|
|
4418
|
-
a bool or a tensor whose data type is number or bool.
|
|
4419
|
-
other (Union[Tensor, Number, bool]): The second input is a number or
|
|
4420
|
-
a bool when the first input is a tensor or a tensor whose data type is number or bool.
|
|
4421
|
-
|
|
4422
|
-
Returns:
|
|
4423
|
-
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
|
|
4424
|
-
|
|
4425
|
-
Raises:
|
|
4426
|
-
TypeError: If neither `input` nor `other` is a Tensor.
|
|
4427
|
-
|
|
4428
|
-
Supported Platforms:
|
|
4429
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4430
|
-
|
|
4431
|
-
Examples:
|
|
4432
|
-
>>> import mindspore
|
|
4433
|
-
>>> import numpy as np
|
|
4434
|
-
>>> from mindspore import Tensor, ops
|
|
4435
|
-
>>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
|
|
4436
|
-
>>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
|
|
4437
|
-
>>> output = ops.ge(x, y)
|
|
4438
|
-
>>> print(output)
|
|
4439
|
-
[True True False]
|
|
4440
|
-
"""
|
|
4441
|
-
return tensor_ge(input, other)
|
|
4442
|
-
|
|
4443
|
-
|
|
4444
|
-
def eq(input, other):
|
|
4445
|
-
r"""
|
|
4446
|
-
Computes the equivalence between two tensors element-wise.
|
|
4447
|
-
|
|
4448
|
-
The second argument can be a number or a tensor whose shape is broadcastable with the first argument and vise versa.
|
|
3108
|
+
- If the input Tensor can be broadcast, the low dimension will be extended to the corresponding high dimension
|
|
3109
|
+
in another input by copying the value of the dimension.
|
|
4449
3110
|
|
|
4450
3111
|
.. math::
|
|
4451
3112
|
|
|
4452
3113
|
out_{i} =\begin{cases}
|
|
4453
|
-
& \text{True, if } input_{i}
|
|
4454
|
-
& \text{False, if } input_{i}
|
|
3114
|
+
& \text{True, if } input_{i}>=other_{i} \\
|
|
3115
|
+
& \text{False, if } input_{i}<other_{i}
|
|
4455
3116
|
\end{cases}
|
|
4456
3117
|
|
|
4457
|
-
Note:
|
|
4458
|
-
- `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
|
|
4459
|
-
- The shapes of the inputs can be broadcasted to each other.
|
|
4460
|
-
|
|
4461
3118
|
Args:
|
|
4462
|
-
input (Union[Tensor, Number]): The first input is a number or
|
|
4463
|
-
a tensor whose data type is number.
|
|
4464
|
-
other (Union[Tensor, Number]): The second input is a number
|
|
4465
|
-
|
|
4466
|
-
the second input should be a tensor.
|
|
3119
|
+
input (Union[Tensor, Number, bool]): The first input is a number or
|
|
3120
|
+
a bool or a tensor whose data type is number or bool.
|
|
3121
|
+
other (Union[Tensor, Number, bool]): The second input is a number or
|
|
3122
|
+
a bool when the first input is a tensor or a tensor whose data type is number or bool.
|
|
4467
3123
|
|
|
4468
3124
|
Returns:
|
|
4469
3125
|
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
|
|
@@ -4476,23 +3132,18 @@ def eq(input, other):
|
|
|
4476
3132
|
|
|
4477
3133
|
Examples:
|
|
4478
3134
|
>>> import mindspore
|
|
3135
|
+
>>> import numpy as np
|
|
4479
3136
|
>>> from mindspore import Tensor, ops
|
|
4480
|
-
>>>
|
|
4481
|
-
>>>
|
|
4482
|
-
>>> output = ops.
|
|
4483
|
-
>>> print(output)
|
|
4484
|
-
[False True False]
|
|
4485
|
-
>>> # case 2: The shape of two inputs are the same
|
|
4486
|
-
>>> x = Tensor([1, 2, 3], mindspore.int32)
|
|
4487
|
-
>>> y = Tensor([1, 2, 4], mindspore.int32)
|
|
4488
|
-
>>> output = ops.eq(x, y)
|
|
3137
|
+
>>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
|
|
3138
|
+
>>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
|
|
3139
|
+
>>> output = ops.ge(x, y)
|
|
4489
3140
|
>>> print(output)
|
|
4490
|
-
[ True
|
|
3141
|
+
[True True False]
|
|
4491
3142
|
"""
|
|
4492
|
-
return
|
|
3143
|
+
return tensor_ge(input, other)
|
|
4493
3144
|
|
|
4494
3145
|
|
|
4495
|
-
def
|
|
3146
|
+
def eq(input, other):
|
|
4496
3147
|
r"""
|
|
4497
3148
|
Computes the equivalence between two tensors element-wise.
|
|
4498
3149
|
|
|
@@ -4511,7 +3162,7 @@ def equal(input, other):
|
|
|
4511
3162
|
|
|
4512
3163
|
Args:
|
|
4513
3164
|
input (Union[Tensor, Number]): The first input is a number or
|
|
4514
|
-
a tensor whose data type is number.
|
|
3165
|
+
a tensor whose data type is number.
|
|
4515
3166
|
other (Union[Tensor, Number]): The second input is a number when the first input is a tensor.
|
|
4516
3167
|
The data type is the same as the first input. If the first input is a number,
|
|
4517
3168
|
the second input should be a tensor.
|
|
@@ -4530,17 +3181,17 @@ def equal(input, other):
|
|
|
4530
3181
|
>>> from mindspore import Tensor, ops
|
|
4531
3182
|
>>> # case 1: The shape of two inputs are different
|
|
4532
3183
|
>>> x = Tensor([1, 2, 3], mindspore.float32)
|
|
4533
|
-
>>> output = ops.
|
|
3184
|
+
>>> output = ops.eq(x, 2.0)
|
|
4534
3185
|
>>> print(output)
|
|
4535
3186
|
[False True False]
|
|
4536
3187
|
>>> # case 2: The shape of two inputs are the same
|
|
4537
3188
|
>>> x = Tensor([1, 2, 3], mindspore.int32)
|
|
4538
3189
|
>>> y = Tensor([1, 2, 4], mindspore.int32)
|
|
4539
|
-
>>> output = ops.
|
|
3190
|
+
>>> output = ops.eq(x, y)
|
|
4540
3191
|
>>> print(output)
|
|
4541
3192
|
[ True True False]
|
|
4542
3193
|
"""
|
|
4543
|
-
return
|
|
3194
|
+
return equal(input, other)
|
|
4544
3195
|
|
|
4545
3196
|
|
|
4546
3197
|
def ne(input, other):
|
|
@@ -4550,7 +3201,6 @@ def ne(input, other):
|
|
|
4550
3201
|
Note:
|
|
4551
3202
|
- Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
|
|
4552
3203
|
consistent.
|
|
4553
|
-
- The inputs must be two tensors or one tensor and one scalar.
|
|
4554
3204
|
- When the inputs are two tensors, the shapes of them could be broadcast.
|
|
4555
3205
|
- When the inputs are one tensor and one scalar, the scalar could only be a constant.
|
|
4556
3206
|
- Broadcasting is supported.
|
|
@@ -4573,7 +3223,6 @@ def ne(input, other):
|
|
|
4573
3223
|
|
|
4574
3224
|
Raises:
|
|
4575
3225
|
TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
|
|
4576
|
-
TypeError: If neither `input` nor `other` is a Tensor.
|
|
4577
3226
|
|
|
4578
3227
|
Supported Platforms:
|
|
4579
3228
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4592,17 +3241,7 @@ def ne(input, other):
|
|
|
4592
3241
|
>>> print(output)
|
|
4593
3242
|
[False False True]
|
|
4594
3243
|
"""
|
|
4595
|
-
return
|
|
4596
|
-
|
|
4597
|
-
|
|
4598
|
-
def not_equal(input, other):
|
|
4599
|
-
r"""
|
|
4600
|
-
Alias for :func:`mindspore.ops.ne` .
|
|
4601
|
-
|
|
4602
|
-
Supported Platforms:
|
|
4603
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4604
|
-
"""
|
|
4605
|
-
return ne(input, other)
|
|
3244
|
+
return not_equal(input, other)
|
|
4606
3245
|
|
|
4607
3246
|
|
|
4608
3247
|
def approximate_equal(x, y, tolerance=1e-5):
|
|
@@ -4650,7 +3289,7 @@ def approximate_equal(x, y, tolerance=1e-5):
|
|
|
4650
3289
|
>>> print(output)
|
|
4651
3290
|
[ True False False]
|
|
4652
3291
|
"""
|
|
4653
|
-
return P.ApproximateEqual(tolerance)(x, y)
|
|
3292
|
+
return _get_cache_prim(P.ApproximateEqual)(tolerance)(x, y)
|
|
4654
3293
|
|
|
4655
3294
|
|
|
4656
3295
|
def isfinite(x):
|
|
@@ -4740,7 +3379,7 @@ def isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False):
|
|
|
4740
3379
|
is “close” to the corresponding element of `other`. Closeness is defined as:
|
|
4741
3380
|
|
|
4742
3381
|
.. math::
|
|
4743
|
-
|
|
3382
|
+
|input-other| ≤ atol + rtol × |other|
|
|
4744
3383
|
|
|
4745
3384
|
Args:
|
|
4746
3385
|
input (Tensor): First Tensor to compare, with data type belongs to float32, float16, int32.
|
|
@@ -4946,61 +3585,6 @@ def fmax(input, other):
|
|
|
4946
3585
|
return fmax_(input, other)
|
|
4947
3586
|
|
|
4948
3587
|
|
|
4949
|
-
def maximum(input, other):
|
|
4950
|
-
r"""
|
|
4951
|
-
Computes the maximum of input tensors element-wise.
|
|
4952
|
-
|
|
4953
|
-
Note:
|
|
4954
|
-
- Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
|
|
4955
|
-
consistent.
|
|
4956
|
-
- The inputs must be two tensors or one tensor and one scalar.
|
|
4957
|
-
- When the inputs are two tensors,
|
|
4958
|
-
dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
|
|
4959
|
-
- When the inputs are one tensor and one scalar,
|
|
4960
|
-
the scalar could only be a constant.
|
|
4961
|
-
- Broadcasting is supported.
|
|
4962
|
-
- If one of the elements being compared is a NaN, then that element is returned.
|
|
4963
|
-
|
|
4964
|
-
.. math::
|
|
4965
|
-
output_i = \max(input_i, other_i)
|
|
4966
|
-
|
|
4967
|
-
Args:
|
|
4968
|
-
input (Union[Tensor, Number, bool]): The first input is a number or
|
|
4969
|
-
a bool or a tensor whose data type is number or bool.
|
|
4970
|
-
other (Union[Tensor, Number, bool]): The second input is a number or
|
|
4971
|
-
a bool when the first input is a tensor or a tensor whose data type is number or bool.
|
|
4972
|
-
|
|
4973
|
-
Returns:
|
|
4974
|
-
Tensor, the shape is the same as the one after broadcasting,
|
|
4975
|
-
and the data type is the one with higher precision or higher digits among the two inputs.
|
|
4976
|
-
|
|
4977
|
-
Raises:
|
|
4978
|
-
TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
|
|
4979
|
-
ValueError: If `input` and `other` are not the same shape.
|
|
4980
|
-
|
|
4981
|
-
Supported Platforms:
|
|
4982
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4983
|
-
|
|
4984
|
-
Examples:
|
|
4985
|
-
>>> import mindspore
|
|
4986
|
-
>>> import numpy as np
|
|
4987
|
-
>>> from mindspore import Tensor, ops
|
|
4988
|
-
>>> # case 1 : same data type
|
|
4989
|
-
>>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
|
|
4990
|
-
>>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
|
|
4991
|
-
>>> output = ops.maximum(x, y)
|
|
4992
|
-
>>> print(output)
|
|
4993
|
-
[4. 5. 6.]
|
|
4994
|
-
>>> # case 2 : different data type
|
|
4995
|
-
>>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.int32)
|
|
4996
|
-
>>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
|
|
4997
|
-
>>> output = ops.maximum(x, y)
|
|
4998
|
-
>>> print(output.dtype)
|
|
4999
|
-
Float32
|
|
5000
|
-
"""
|
|
5001
|
-
return maximum_(input, other)
|
|
5002
|
-
|
|
5003
|
-
|
|
5004
3588
|
def fmin(input, other):
|
|
5005
3589
|
r"""
|
|
5006
3590
|
Computes the minimum of input tensors element-wise.
|
|
@@ -5044,59 +3628,6 @@ def fmin(input, other):
|
|
|
5044
3628
|
return fmin_(input, other)
|
|
5045
3629
|
|
|
5046
3630
|
|
|
5047
|
-
def minimum(input, other):
|
|
5048
|
-
r"""
|
|
5049
|
-
Computes the minimum of input tensors element-wise.
|
|
5050
|
-
|
|
5051
|
-
Note:
|
|
5052
|
-
- Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
|
|
5053
|
-
consistent.
|
|
5054
|
-
- The inputs must be two tensors or one tensor and one scalar.
|
|
5055
|
-
- When the inputs are two tensors, dtypes of them cannot be bool at the same time.
|
|
5056
|
-
- When the inputs are one tensor and one scalar, the scalar could only be a constant.
|
|
5057
|
-
- Shapes of them are supposed to be broadcast.
|
|
5058
|
-
- If one of the elements being compared is a NaN, then that element is returned.
|
|
5059
|
-
|
|
5060
|
-
.. math::
|
|
5061
|
-
output_i = \min(input_i, other_i)
|
|
5062
|
-
|
|
5063
|
-
Args:
|
|
5064
|
-
input (Union[Tensor, Number, bool]): The first input is a number or
|
|
5065
|
-
a bool or a tensor whose data type is number or bool.
|
|
5066
|
-
other (Union[Tensor, Number, bool]): The second input is a number or
|
|
5067
|
-
a bool when the first input is a tensor or a tensor whose data type is number or bool.
|
|
5068
|
-
|
|
5069
|
-
Returns:
|
|
5070
|
-
Tensor, the shape is the same as the one after broadcasting,
|
|
5071
|
-
and the data type is the one with higher precision or higher digits among the two inputs.
|
|
5072
|
-
|
|
5073
|
-
Raises:
|
|
5074
|
-
TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
|
|
5075
|
-
ValueError: If `input` and `other` are not the same shape after broadcast.
|
|
5076
|
-
|
|
5077
|
-
Supported Platforms:
|
|
5078
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
5079
|
-
|
|
5080
|
-
Examples:
|
|
5081
|
-
>>> import mindspore
|
|
5082
|
-
>>> import numpy as np
|
|
5083
|
-
>>> from mindspore import Tensor, ops
|
|
5084
|
-
>>> # case 1 : same data type
|
|
5085
|
-
>>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
|
|
5086
|
-
>>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
|
|
5087
|
-
>>> output = ops.minimum(x, y)
|
|
5088
|
-
>>> print(output)
|
|
5089
|
-
[1. 2. 3.]
|
|
5090
|
-
>>> # case 2 : different data type
|
|
5091
|
-
>>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.int32)
|
|
5092
|
-
>>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
|
|
5093
|
-
>>> output = ops.minimum(x, y)
|
|
5094
|
-
>>> print(output.dtype)
|
|
5095
|
-
Float32
|
|
5096
|
-
"""
|
|
5097
|
-
return minimum_(input, other)
|
|
5098
|
-
|
|
5099
|
-
|
|
5100
3631
|
def median(input, axis=-1, keepdims=False):
|
|
5101
3632
|
r"""
|
|
5102
3633
|
Computes the median and indices of input tensor.
|
|
@@ -5227,6 +3758,8 @@ def nanmean(input, axis=None, keepdims=False, *, dtype=None):
|
|
|
5227
3758
|
"""
|
|
5228
3759
|
_check_is_tensor("input", input, "nanmean")
|
|
5229
3760
|
_check_repeat_in_axis(axis, input.ndim, "nanmean")
|
|
3761
|
+
if input.dtype not in mstype.float_type:
|
|
3762
|
+
raise TypeError(f"For 'nanmean', input should be floating point dtype, but got {type(input)}.")
|
|
5230
3763
|
nan_sum = nansum(input, axis, keepdims)
|
|
5231
3764
|
is_num = isnan(input).logical_not()
|
|
5232
3765
|
is_num = is_num.sum(axis=axis, keepdims=keepdims)
|
|
@@ -5322,7 +3855,7 @@ def ormqr(input, tau, other, left=True, transpose=False):
|
|
|
5322
3855
|
TypeError: If dtype of `input` or `tau` or `other` is not one of: float64, float32, complex64, complex128.
|
|
5323
3856
|
ValueError: If the dimension of `input` or `other` is less than 2D.
|
|
5324
3857
|
ValueError: If rank(`input`) - rank(`tau`) != 1.
|
|
5325
|
-
ValueError: If tau.shape[:-
|
|
3858
|
+
ValueError: If tau.shape[:-1] != input.shape[:-2]
|
|
5326
3859
|
ValueError: If other.shape[:-2] != input.shape[:-2]
|
|
5327
3860
|
ValueError: If left == true, other.shape[-2] < tau.shape[-1].
|
|
5328
3861
|
ValueError: If left == true, other.shape[-2] != input.shape[-2].
|
|
@@ -5396,11 +3929,11 @@ def heaviside(input, values):
|
|
|
5396
3929
|
Computes the Heaviside step function for each element in input.
|
|
5397
3930
|
|
|
5398
3931
|
.. math::
|
|
5399
|
-
|
|
5400
|
-
|
|
5401
|
-
|
|
5402
|
-
|
|
5403
|
-
|
|
3932
|
+
\text { heaviside }(\text { input, values })=\left\{\begin{array}{ll}
|
|
3933
|
+
0, & \text { if input }<0 \\
|
|
3934
|
+
\text { values, } & \text { if input }=0 \\
|
|
3935
|
+
1, & \text { if input }>0
|
|
3936
|
+
\end{array}\right.
|
|
5404
3937
|
|
|
5405
3938
|
Args:
|
|
5406
3939
|
input (Tensor): The input tensor. With real number data type.
|
|
@@ -5488,9 +4021,6 @@ def logspace(start, end, steps, base=10, *, dtype=mstype.float32):
|
|
|
5488
4021
|
&output = [base^{start}, base^{start + 1 * step}, ... , base^{start + (steps-2) * step}, base^{end}]
|
|
5489
4022
|
\end{aligned}
|
|
5490
4023
|
|
|
5491
|
-
Note:
|
|
5492
|
-
- Input `base` must be integer.
|
|
5493
|
-
|
|
5494
4024
|
Args:
|
|
5495
4025
|
start (Union[float, Tensor]): Start value of interval.
|
|
5496
4026
|
end (Union[float, Tensor]): End value of interval.
|
|
@@ -5532,6 +4062,8 @@ def logspace(start, end, steps, base=10, *, dtype=mstype.float32):
|
|
|
5532
4062
|
def logaddexp(input, other):
|
|
5533
4063
|
r"""
|
|
5534
4064
|
Computes the logarithm of the sum of exponentiations of the inputs.
|
|
4065
|
+
This function is useful in statistics where the calculated probabilities of events may be
|
|
4066
|
+
so small as to exceed the range of normal floating point numbers.
|
|
5535
4067
|
|
|
5536
4068
|
.. math::
|
|
5537
4069
|
|
|
@@ -5572,7 +4104,7 @@ def logaddexp(input, other):
|
|
|
5572
4104
|
f"but got {input.dtype} and {other.dtype}.")
|
|
5573
4105
|
m = maximum(input, other)
|
|
5574
4106
|
abs_val = abs(input - other)
|
|
5575
|
-
exp_val = tensor_exp(
|
|
4107
|
+
exp_val = tensor_exp(neg(abs_val))
|
|
5576
4108
|
y = m + log1p(exp_val)
|
|
5577
4109
|
return y
|
|
5578
4110
|
|
|
@@ -5618,7 +4150,7 @@ def logaddexp2(input, other):
|
|
|
5618
4150
|
|
|
5619
4151
|
m = maximum(input, other)
|
|
5620
4152
|
abs_val = abs(input - other)
|
|
5621
|
-
exp2_val = pows(2.,
|
|
4153
|
+
exp2_val = pows(2., neg(abs_val))
|
|
5622
4154
|
y = m + log2(1. + exp2_val)
|
|
5623
4155
|
return y
|
|
5624
4156
|
|
|
@@ -5949,35 +4481,6 @@ def std_mean(input, axis=None, ddof=0, keepdims=False):
|
|
|
5949
4481
|
return tensor_pow(output[0], 0.5), output[1]
|
|
5950
4482
|
|
|
5951
4483
|
|
|
5952
|
-
def real(input):
|
|
5953
|
-
r"""
|
|
5954
|
-
Returns a Tensor that is the real part of the input.
|
|
5955
|
-
If input is real, it is returned unchanged.
|
|
5956
|
-
|
|
5957
|
-
Args:
|
|
5958
|
-
input (Tensor): The input tensor to compute to.
|
|
5959
|
-
|
|
5960
|
-
Returns:
|
|
5961
|
-
Tensor, the shape is the same as the `input`.
|
|
5962
|
-
|
|
5963
|
-
Raises:
|
|
5964
|
-
TypeError: If `input` is not a Tensor.
|
|
5965
|
-
|
|
5966
|
-
Supported Platforms:
|
|
5967
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
5968
|
-
|
|
5969
|
-
Examples:
|
|
5970
|
-
>>> import mindspore as ms
|
|
5971
|
-
>>> import mindspore.ops as ops
|
|
5972
|
-
>>> import numpy as np
|
|
5973
|
-
>>> input = ms.Tensor(np.asarray(np.complex(1.3+0.4j)), ms.complex64)
|
|
5974
|
-
>>> output = ops.real(input)
|
|
5975
|
-
>>> print(output)
|
|
5976
|
-
1.3
|
|
5977
|
-
"""
|
|
5978
|
-
return real_(input)
|
|
5979
|
-
|
|
5980
|
-
|
|
5981
4484
|
def reciprocal(input):
|
|
5982
4485
|
r"""
|
|
5983
4486
|
Returns reciprocal of a tensor element-wise.
|
|
@@ -5988,7 +4491,6 @@ def reciprocal(input):
|
|
|
5988
4491
|
|
|
5989
4492
|
Args:
|
|
5990
4493
|
input (Tensor): The input tensor.
|
|
5991
|
-
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
5992
4494
|
|
|
5993
4495
|
Returns:
|
|
5994
4496
|
Tensor, has the same shape as the `input`.
|
|
@@ -6008,108 +4510,9 @@ def reciprocal(input):
|
|
|
6008
4510
|
>>> print(output)
|
|
6009
4511
|
[1. 0.5 0.25]
|
|
6010
4512
|
"""
|
|
6011
|
-
if not isinstance(input, Tensor):
|
|
6012
|
-
raise TypeError(f"For reciprocal, the input must be a Tensor, but got {type(input)}.")
|
|
6013
|
-
if not is_complex(input) and not ops.is_floating_point(input):
|
|
6014
|
-
input = ops.cast(input, mstype.float32)
|
|
6015
4513
|
return reciprocal_(input)
|
|
6016
4514
|
|
|
6017
4515
|
|
|
6018
|
-
def rsqrt(input):
|
|
6019
|
-
r"""
|
|
6020
|
-
Computes reciprocal of square root of input tensor element-wise.
|
|
6021
|
-
|
|
6022
|
-
.. math::
|
|
6023
|
-
|
|
6024
|
-
out_{i} = \frac{1}{\sqrt{input_{i}}}
|
|
6025
|
-
|
|
6026
|
-
Args:
|
|
6027
|
-
input (Tensor): The input of rsqrt. Its each element must be a non-negative
|
|
6028
|
-
number, if an element is negative, the calculation result is nan.
|
|
6029
|
-
|
|
6030
|
-
Returns:
|
|
6031
|
-
Tensor, has the same shape and dtype as the `input`.
|
|
6032
|
-
|
|
6033
|
-
Raises:
|
|
6034
|
-
TypeError: If `input` is not a Tensor.
|
|
6035
|
-
|
|
6036
|
-
Supported Platforms:
|
|
6037
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
6038
|
-
|
|
6039
|
-
Examples:
|
|
6040
|
-
>>> import mindspore as ms
|
|
6041
|
-
>>> import mindspore.ops as ops
|
|
6042
|
-
>>> input = ms.Tensor([-0.0370, 0.2970, 1.5420, -0.9105])
|
|
6043
|
-
>>> output = ops.rsqrt(input)
|
|
6044
|
-
>>> print(output)
|
|
6045
|
-
[ nan 1.8349396 0.80530024 nan]
|
|
6046
|
-
"""
|
|
6047
|
-
return rsqrt_(input)
|
|
6048
|
-
|
|
6049
|
-
|
|
6050
|
-
def sqrt(x):
|
|
6051
|
-
"""
|
|
6052
|
-
Returns sqrt of a tensor element-wise.
|
|
6053
|
-
|
|
6054
|
-
.. math::
|
|
6055
|
-
|
|
6056
|
-
out_{i} = \\sqrt{x_{i}}
|
|
6057
|
-
|
|
6058
|
-
Args:
|
|
6059
|
-
x (Tensor): The input tensor with a dtype of number.Number.
|
|
6060
|
-
Returns:
|
|
6061
|
-
Tensor, has the same shape and dtype as the `x`.
|
|
6062
|
-
|
|
6063
|
-
Raises:
|
|
6064
|
-
TypeError: If `x` is not a Tensor.
|
|
6065
|
-
|
|
6066
|
-
Supported Platforms:
|
|
6067
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
6068
|
-
|
|
6069
|
-
Examples:
|
|
6070
|
-
>>> import mindspore
|
|
6071
|
-
>>> import numpy as np
|
|
6072
|
-
>>> from mindspore import Tensor, ops
|
|
6073
|
-
>>> x = Tensor(np.array([1.0, 4.0, 9.0]), mindspore.float32)
|
|
6074
|
-
>>> output = ops.sqrt(x)
|
|
6075
|
-
>>> print(output)
|
|
6076
|
-
[1. 2. 3.]
|
|
6077
|
-
"""
|
|
6078
|
-
return sqrt_(x)
|
|
6079
|
-
|
|
6080
|
-
|
|
6081
|
-
def square(input):
|
|
6082
|
-
"""
|
|
6083
|
-
Returns square of a tensor element-wise.
|
|
6084
|
-
|
|
6085
|
-
.. math::
|
|
6086
|
-
|
|
6087
|
-
y_i = input_i ^ 2
|
|
6088
|
-
|
|
6089
|
-
Args:
|
|
6090
|
-
input (Tensor): The input tensor with a dtype of Number.
|
|
6091
|
-
|
|
6092
|
-
Returns:
|
|
6093
|
-
Tensor, has the same shape and dtype as the `input`.
|
|
6094
|
-
|
|
6095
|
-
Raises:
|
|
6096
|
-
TypeError: If `input` is not a Tensor.
|
|
6097
|
-
|
|
6098
|
-
Supported Platforms:
|
|
6099
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
6100
|
-
|
|
6101
|
-
Examples:
|
|
6102
|
-
>>> import mindspore
|
|
6103
|
-
>>> import numpy as np
|
|
6104
|
-
>>> from mindspore import Tensor, ops
|
|
6105
|
-
>>> input = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
|
|
6106
|
-
>>> output = ops.square(input)
|
|
6107
|
-
>>> print(output)
|
|
6108
|
-
[1. 4. 9.]
|
|
6109
|
-
"""
|
|
6110
|
-
return square_(input)
|
|
6111
|
-
|
|
6112
|
-
|
|
6113
4516
|
def outer(input, vec2):
|
|
6114
4517
|
"""
|
|
6115
4518
|
Return outer product of `input` and `vec2`. If `input` is a vector of size :math:`n`
|
|
@@ -6127,7 +4530,6 @@ def outer(input, vec2):
|
|
|
6127
4530
|
|
|
6128
4531
|
Raises:
|
|
6129
4532
|
TypeError: If `input` or `vec2` is not a Tensor.
|
|
6130
|
-
ValueError: If `input` or `vec2` is not an 1-D Tensor.
|
|
6131
4533
|
|
|
6132
4534
|
Supported Platforms:
|
|
6133
4535
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -6150,10 +4552,6 @@ def outer(input, vec2):
|
|
|
6150
4552
|
raise TypeError("the input input must be Tensor!")
|
|
6151
4553
|
if not isinstance(vec2, (Tensor, Tensor_)):
|
|
6152
4554
|
raise TypeError("the input vec2 must be Tensor!")
|
|
6153
|
-
if len(input.shape) != 1:
|
|
6154
|
-
raise ValueError("the input input must be a 1-D vector!")
|
|
6155
|
-
if len(vec2.shape) != 1:
|
|
6156
|
-
raise ValueError("the input vec2 must be a 1-D vector!")
|
|
6157
4555
|
input = input.reshape(-1, 1)
|
|
6158
4556
|
y = tensor_mul(input, vec2)
|
|
6159
4557
|
return y
|
|
@@ -6193,10 +4591,6 @@ def mv(mat, vec):
|
|
|
6193
4591
|
raise TypeError("The input mat must be Tensor.")
|
|
6194
4592
|
if not isinstance(vec, (Tensor, Tensor_)):
|
|
6195
4593
|
raise TypeError("The input vec must be Tensor.")
|
|
6196
|
-
if len(mat.shape) != 2:
|
|
6197
|
-
raise ValueError("The input mat must be 2-D Tensor.")
|
|
6198
|
-
if len(vec.shape) != 1:
|
|
6199
|
-
raise ValueError("The input vec must be 1-D Tensor.")
|
|
6200
4594
|
|
|
6201
4595
|
length_vec = get_x_shape(vec.shape)
|
|
6202
4596
|
vec = reshape_(vec, (length_vec[0], 1))
|
|
@@ -6251,10 +4645,6 @@ def addbmm(input, batch1, batch2, *, beta=1, alpha=1):
|
|
|
6251
4645
|
[1285. 1377. 1469.]
|
|
6252
4646
|
[1621. 1745. 1869.]]
|
|
6253
4647
|
"""
|
|
6254
|
-
dim1 = batch1.ndim
|
|
6255
|
-
dim2 = batch2.ndim
|
|
6256
|
-
if dim1 != 3 or dim2 != 3:
|
|
6257
|
-
raise ValueError(f"For 'addbmm', 'batch1' and 'batch2' must be 3D, but got {dim1} and {dim2} respectively.")
|
|
6258
4648
|
if not isinstance(alpha, (int, float)):
|
|
6259
4649
|
raise TypeError(f"For 'addbmm', parameter 'alpha' must be an int or float, but got {type(alpha)}.")
|
|
6260
4650
|
if not isinstance(beta, (int, float)):
|
|
@@ -6339,7 +4729,7 @@ def addmv(input, mat, vec, *, beta=1, alpha=1):
|
|
|
6339
4729
|
|
|
6340
4730
|
Raises:
|
|
6341
4731
|
TypeError: If `mat`, `vec`, `input` is not a Tensor.
|
|
6342
|
-
TypeError: If inputs `mat`,
|
|
4732
|
+
TypeError: If inputs `mat`, `vec` are not the same dtype.
|
|
6343
4733
|
ValueError: If `mat` is not a 2-D Tensor.
|
|
6344
4734
|
ValueError: If `vec` is not a 1-D Tensor.
|
|
6345
4735
|
|
|
@@ -6362,17 +4752,14 @@ def addmv(input, mat, vec, *, beta=1, alpha=1):
|
|
|
6362
4752
|
raise TypeError("For Addmv, inputs must be all tensors.")
|
|
6363
4753
|
if dtype_(mat) != dtype_(vec):
|
|
6364
4754
|
raise TypeError("For Addmv, the mat and vec should be the same dtype.")
|
|
6365
|
-
_check_input_1d(vec.shape, "vec", "Addmv")
|
|
6366
|
-
_check_input_2d(mat.shape, "mat", "Addmv")
|
|
6367
4755
|
_check_input_dtype("input", input_dtype,
|
|
6368
4756
|
[mstype.float16, mstype.float32, mstype.float64,
|
|
6369
4757
|
mstype.int16, mstype.int32, mstype.int64], "Addmv")
|
|
6370
4758
|
_check_attr_dtype("alpha", alpha, [int, float, bool], "Addmv")
|
|
6371
4759
|
_check_attr_dtype("beta", beta, [int, float, bool], "Addmv")
|
|
6372
4760
|
if input_dtype in (mstype.int16, mstype.int32, mstype.int64):
|
|
6373
|
-
|
|
6374
|
-
|
|
6375
|
-
beta = scalar_cast(beta, mstype.int32)
|
|
4761
|
+
alpha = ops.scalar_cast(alpha, mstype.int64)
|
|
4762
|
+
beta = ops.scalar_cast(beta, mstype.int64)
|
|
6376
4763
|
out = beta * input + alpha * mv(mat, vec)
|
|
6377
4764
|
return out
|
|
6378
4765
|
|
|
@@ -6403,7 +4790,11 @@ def adjoint(x):
|
|
|
6403
4790
|
[[0.-0.j 2.-2.j]
|
|
6404
4791
|
[1.-1.j 3.-3.j]]
|
|
6405
4792
|
"""
|
|
6406
|
-
|
|
4793
|
+
_dtype = x.dtype
|
|
4794
|
+
_t = x.swapaxes(-1, -2)
|
|
4795
|
+
if _dtype in mstype.complex_type:
|
|
4796
|
+
return _t.conj()
|
|
4797
|
+
return _t
|
|
6407
4798
|
|
|
6408
4799
|
|
|
6409
4800
|
def addr(x, vec1, vec2, *, beta=1, alpha=1):
|
|
@@ -6459,25 +4850,21 @@ def addr(x, vec1, vec2, *, beta=1, alpha=1):
|
|
|
6459
4850
|
raise TypeError("For Addr, inputs must be all tensors.")
|
|
6460
4851
|
if dtype_(vec1) != dtype_(vec2):
|
|
6461
4852
|
raise TypeError("For Addr, the vec1 and vec2 should be the same dtype.")
|
|
6462
|
-
_check_input_1d(vec1.shape, "vec1", "Addr")
|
|
6463
|
-
_check_input_1d(vec2.shape, "vec2", "Addr")
|
|
6464
4853
|
_check_input_dtype("x", input_dtype,
|
|
6465
4854
|
[mstype.float16, mstype.float32, mstype.float64,
|
|
6466
4855
|
mstype.int16, mstype.int32, mstype.int64], "Addr")
|
|
6467
4856
|
_check_attr_dtype("alpha", alpha, [int, float, bool], "Addr")
|
|
6468
4857
|
_check_attr_dtype("beta", beta, [int, float, bool], "Addr")
|
|
6469
4858
|
if input_dtype in (mstype.int16, mstype.int32, mstype.int64):
|
|
6470
|
-
|
|
6471
|
-
|
|
6472
|
-
beta = scalar_cast(beta, mstype.int32)
|
|
6473
|
-
matmul_op = P.MatMul()
|
|
4859
|
+
alpha = ops.scalar_cast(alpha, mstype.int64)
|
|
4860
|
+
beta = ops.scalar_cast(beta, mstype.int64)
|
|
6474
4861
|
|
|
6475
4862
|
length_vec1 = get_x_shape(vec1.shape)
|
|
6476
4863
|
vec1 = reshape_(vec1, (length_vec1[0], 1))
|
|
6477
4864
|
length_vec2 = get_x_shape(vec2.shape)
|
|
6478
4865
|
vec2 = reshape_(vec2, (1, length_vec2[0]))
|
|
6479
4866
|
|
|
6480
|
-
out = beta * x + alpha *
|
|
4867
|
+
out = beta * x + alpha * matmul_(vec1, vec2)
|
|
6481
4868
|
return out
|
|
6482
4869
|
|
|
6483
4870
|
|
|
@@ -6497,7 +4884,7 @@ def lcm(input, other):
|
|
|
6497
4884
|
|
|
6498
4885
|
Raises:
|
|
6499
4886
|
TypeError: If data type `input` or `other` is not int32 or int64.
|
|
6500
|
-
ValueError: If
|
|
4887
|
+
ValueError: If shapes of two inputs are not broadcastable.
|
|
6501
4888
|
|
|
6502
4889
|
Supported Platforms:
|
|
6503
4890
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -6511,8 +4898,6 @@ def lcm(input, other):
|
|
|
6511
4898
|
>>> print(y)
|
|
6512
4899
|
[14 24 36]
|
|
6513
4900
|
"""
|
|
6514
|
-
|
|
6515
|
-
lcm_ = _get_cache_prim(Lcm)()
|
|
6516
4901
|
return lcm_(input, other)
|
|
6517
4902
|
|
|
6518
4903
|
|
|
@@ -6543,44 +4928,9 @@ def cdist(x1, x2, p=2.0):
|
|
|
6543
4928
|
TypeError: If `p` is not float32.
|
|
6544
4929
|
ValueError: If `p` is negative.
|
|
6545
4930
|
ValueError: If dimension of `x1` is not the same as `x2`.
|
|
6546
|
-
ValueError: If dimension of `x1` or `x2` is neither 2 nor 3.
|
|
6547
|
-
ValueError: If the batch shape of `x1` is not the same as the shape of `x2`.
|
|
6548
|
-
ValueError: If the number of columns of `x1` is not the same as
|
|
6549
|
-
|
|
6550
|
-
Supported Platforms:
|
|
6551
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
6552
|
-
|
|
6553
|
-
Examples:
|
|
6554
|
-
>>> import numpy as np
|
|
6555
|
-
>>> from mindspore import Tensor, ops
|
|
6556
|
-
>>> x = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
|
|
6557
|
-
>>> y = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
|
|
6558
|
-
>>> output = ops.cdist(x, y, 2.0)
|
|
6559
|
-
>>> print(output)
|
|
6560
|
-
[[[2.8284273 2.8284273]
|
|
6561
|
-
[1.4142137 1.4142137]]]
|
|
6562
|
-
"""
|
|
6563
|
-
cdist_ = _get_cache_prim(P.Cdist)(p)
|
|
6564
|
-
return cdist_(x1, x2)
|
|
6565
|
-
|
|
6566
|
-
|
|
6567
|
-
def gcd(input, other):
|
|
6568
|
-
"""
|
|
6569
|
-
Computes greatest common divisor of input tensors element-wise.
|
|
6570
|
-
The shape of two inputs should be broadcastable, and data type of them should be
|
|
6571
|
-
one of: int32, int64
|
|
6572
|
-
|
|
6573
|
-
Args:
|
|
6574
|
-
input (Tensor): The first input tensor.
|
|
6575
|
-
other (Tensor): The second input tensor.
|
|
6576
|
-
|
|
6577
|
-
Returns:
|
|
6578
|
-
Tensor, the shape is the same as the one after broadcasting, and the data type is one
|
|
6579
|
-
with higher digits in the two inputs.
|
|
6580
|
-
|
|
6581
|
-
Raises:
|
|
6582
|
-
TypeError: If data type `input` or `other` is not int32 or int64.
|
|
6583
|
-
ValueError: If shape of two inputs are not broadcastable.
|
|
4931
|
+
ValueError: If dimension of `x1` or `x2` is neither 2 nor 3.
|
|
4932
|
+
ValueError: If the batch shape of `x1` is not the same as the shape of `x2`.
|
|
4933
|
+
ValueError: If the number of columns of `x1` is not the same as that of `x2`.
|
|
6584
4934
|
|
|
6585
4935
|
Supported Platforms:
|
|
6586
4936
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -6588,15 +4938,15 @@ def gcd(input, other):
|
|
|
6588
4938
|
Examples:
|
|
6589
4939
|
>>> import numpy as np
|
|
6590
4940
|
>>> from mindspore import Tensor, ops
|
|
6591
|
-
>>>
|
|
6592
|
-
>>>
|
|
6593
|
-
>>>
|
|
6594
|
-
>>> print(
|
|
6595
|
-
[
|
|
4941
|
+
>>> x = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
|
|
4942
|
+
>>> y = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
|
|
4943
|
+
>>> output = ops.cdist(x, y, 2.0)
|
|
4944
|
+
>>> print(output)
|
|
4945
|
+
[[[2.8284273 2.8284273]
|
|
4946
|
+
[1.4142137 1.4142137]]]
|
|
6596
4947
|
"""
|
|
6597
|
-
|
|
6598
|
-
|
|
6599
|
-
return gcd_(input, other)
|
|
4948
|
+
cdist_ = _get_cache_prim(P.Cdist)(p)
|
|
4949
|
+
return cdist_(x1, x2)
|
|
6600
4950
|
|
|
6601
4951
|
|
|
6602
4952
|
def lerp(input, end, weight):
|
|
@@ -6963,8 +5313,7 @@ def frac(x):
|
|
|
6963
5313
|
>>> print(output)
|
|
6964
5314
|
[ 0. 0.1992 -0.5 ]
|
|
6965
5315
|
"""
|
|
6966
|
-
|
|
6967
|
-
return frac_op(x, 1)
|
|
5316
|
+
return mod_(x, 1)
|
|
6968
5317
|
|
|
6969
5318
|
|
|
6970
5319
|
#####################################
|
|
@@ -7013,6 +5362,7 @@ def cummin(input, axis):
|
|
|
7013
5362
|
|
|
7014
5363
|
Raises:
|
|
7015
5364
|
TypeError: If `input` is not a Tensor.
|
|
5365
|
+
TypeError: If `input` is a Tensor, but the type is complex or bool.
|
|
7016
5366
|
TypeError: If `axis` is not an int.
|
|
7017
5367
|
ValueError: If `axis` is out the range of `[-input.ndim, input.ndim - 1]`.
|
|
7018
5368
|
|
|
@@ -7029,6 +5379,8 @@ def cummin(input, axis):
|
|
|
7029
5379
|
>>> print(output[1])
|
|
7030
5380
|
[0 1 1 1 4 4]
|
|
7031
5381
|
"""
|
|
5382
|
+
if isinstance(axis, bool):
|
|
5383
|
+
raise TypeError(f"For 'cummin', the date type of 'axis' must be Int, but got {axis}.")
|
|
7032
5384
|
cummin_op = _get_cache_prim(Cummin)(axis=0)
|
|
7033
5385
|
if axis == 0:
|
|
7034
5386
|
out1, out2 = cummin_op(input)
|
|
@@ -7042,55 +5394,6 @@ def cummin(input, axis):
|
|
|
7042
5394
|
return [out1, out2]
|
|
7043
5395
|
|
|
7044
5396
|
|
|
7045
|
-
def cummax(input, axis):
|
|
7046
|
-
r"""
|
|
7047
|
-
Returns a tuple (values,indices) where 'values' is the cumulative maximum value of input Tensor `input`
|
|
7048
|
-
along the dimension `axis`, and `indices` is the index location of each maximum value.
|
|
7049
|
-
|
|
7050
|
-
.. math::
|
|
7051
|
-
\begin{array}{ll} \\
|
|
7052
|
-
y_{i} = \max(x_{1}, x_{2}, ... , x_{i})
|
|
7053
|
-
\end{array}
|
|
7054
|
-
|
|
7055
|
-
Args:
|
|
7056
|
-
input (Tensor): The input Tensor, rank of `input` > 0.
|
|
7057
|
-
axis (int): The dimension to do the operation over. The value of `axis` must be in the range
|
|
7058
|
-
`[-input.ndim, input.ndim - 1]`.
|
|
7059
|
-
|
|
7060
|
-
Returns:
|
|
7061
|
-
tuple [Tensor], tuple of 2 Tensors, containing the cumulative maximum of elements and the index.
|
|
7062
|
-
The shape of each output tensor is the same as input `input`.
|
|
7063
|
-
|
|
7064
|
-
Raises:
|
|
7065
|
-
TypeError: If `input` is not a Tensor.
|
|
7066
|
-
TypeError: If `axis` is not an int.
|
|
7067
|
-
ValueError: If `axis` is out the range of `[-input.ndim, input.ndim - 1]`.
|
|
7068
|
-
|
|
7069
|
-
Supported Platforms:
|
|
7070
|
-
``GPU`` ``CPU``
|
|
7071
|
-
|
|
7072
|
-
Examples:
|
|
7073
|
-
>>> import mindspore
|
|
7074
|
-
>>> import numpy as np
|
|
7075
|
-
>>> from mindspore import Tensor
|
|
7076
|
-
>>> import mindspore.ops as ops
|
|
7077
|
-
>>> x = Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))
|
|
7078
|
-
>>> output = ops.cummax(x, axis=0)
|
|
7079
|
-
>>> print(output[0])
|
|
7080
|
-
[[ 3. 4. 6. 10.]
|
|
7081
|
-
[ 3. 6. 7. 10.]
|
|
7082
|
-
[ 4. 6. 8. 10.]
|
|
7083
|
-
[ 4. 6. 8. 10.]]
|
|
7084
|
-
>>> print(output[1])
|
|
7085
|
-
[[0 0 0 0]
|
|
7086
|
-
[0 1 1 0]
|
|
7087
|
-
[2 1 2 0]
|
|
7088
|
-
[2 1 2 0]]
|
|
7089
|
-
"""
|
|
7090
|
-
_cummax = _get_cache_prim(ops.Cummax)(axis=axis)
|
|
7091
|
-
return _cummax(input)
|
|
7092
|
-
|
|
7093
|
-
|
|
7094
5397
|
def cumsum(x, axis, dtype=None):
|
|
7095
5398
|
"""
|
|
7096
5399
|
Computes the cumulative sum of input Tensor along `axis`.
|
|
@@ -7104,7 +5407,7 @@ def cumsum(x, axis, dtype=None):
|
|
|
7104
5407
|
For the case of dynamic shape, the dtype of `x` only support int32, float16 or float32.
|
|
7105
5408
|
|
|
7106
5409
|
Args:
|
|
7107
|
-
x (Tensor): The input Tensor of shape :math:`(N
|
|
5410
|
+
x (Tensor): The input Tensor of shape :math:`(N, *)` where :math:`*` means, any number
|
|
7108
5411
|
of additional dimensions.
|
|
7109
5412
|
axis (int): Axis along which the cumulative sum is computed.
|
|
7110
5413
|
dtype (:class:`mindspore.dtype`, optional): The desired dtype of returned Tensor. If specified,
|
|
@@ -7175,8 +5478,8 @@ def sparse_segment_mean(x, indices, segment_ids):
|
|
|
7175
5478
|
TypeError: If the dtype of `x` is not one of the following dtype: float16, float32, float64.
|
|
7176
5479
|
TypeError: If the dtype of `indices` and `segment_ids` are not one of the following dtype: int32, int64.
|
|
7177
5480
|
TypeError: If the dtype of `indices` and `segment_ids` are not the same.
|
|
7178
|
-
ValueError: If the shape of `x`,
|
|
7179
|
-
ValueError: If the size of
|
|
5481
|
+
ValueError: If the shape of `x`, `indices` or `segment_ids` don't meet the parameter description.
|
|
5482
|
+
ValueError: If the size of `indices` and `segment_ids` are not the same.
|
|
7180
5483
|
|
|
7181
5484
|
Supported Platforms:
|
|
7182
5485
|
``GPU`` ``CPU``
|
|
@@ -7258,6 +5561,8 @@ def block_diag(*inputs):
|
|
|
7258
5561
|
f"{ary.ndim}"
|
|
7259
5562
|
)
|
|
7260
5563
|
|
|
5564
|
+
if not inputs:
|
|
5565
|
+
raise RuntimeError("For 'block_diag', the input is empty.")
|
|
7261
5566
|
arys = [to_2d(ary) for ary in inputs]
|
|
7262
5567
|
matrix = [ops.concat(to_col_block(arys, idx, ary)) for idx, ary in enumerate(arys)]
|
|
7263
5568
|
return ops.concat(matrix, 1)
|
|
@@ -7276,7 +5581,7 @@ def atleast_1d(inputs):
|
|
|
7276
5581
|
Tensor or list[Tensor]. If returned a list, every element `a` in that list satisfies `a.ndim >= 1`.
|
|
7277
5582
|
|
|
7278
5583
|
Raises:
|
|
7279
|
-
TypeError: If the `
|
|
5584
|
+
TypeError: If the `inputs` is not a tensor or a list of tensors.
|
|
7280
5585
|
|
|
7281
5586
|
Supported Platforms:
|
|
7282
5587
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -7358,7 +5663,7 @@ def dstack(inputs):
|
|
|
7358
5663
|
trans_inputs += (tensor,)
|
|
7359
5664
|
if not trans_inputs:
|
|
7360
5665
|
raise ValueError("For 'dstack', at least one tensor is needed to concatenate.")
|
|
7361
|
-
return P.Concat(2)(trans_inputs)
|
|
5666
|
+
return _get_cache_prim(P.Concat)(2)(trans_inputs)
|
|
7362
5667
|
|
|
7363
5668
|
|
|
7364
5669
|
@_primexpr
|
|
@@ -7376,7 +5681,7 @@ def diff(x, n=1, axis=-1, prepend=None, append=None):
|
|
|
7376
5681
|
|
|
7377
5682
|
Note:
|
|
7378
5683
|
Zero-shaped Tensor is not supported, a value error is raised if
|
|
7379
|
-
an empty Tensor is encountered. Any dimension of
|
|
5684
|
+
an empty Tensor is encountered. Any dimension of a Tensor is 0, which is considered
|
|
7380
5685
|
an empty Tensor. Tensor with shape of :math:`(0,)`, :math:`(1, 2, 0, 4)` are all
|
|
7381
5686
|
empty Tensor.
|
|
7382
5687
|
|
|
@@ -7555,7 +5860,7 @@ def atleast_2d(inputs):
|
|
|
7555
5860
|
Tensor or list[Tensor]. If returned a list, every element `a` in that list satisfies `a.ndim >= 2` .
|
|
7556
5861
|
|
|
7557
5862
|
Raises:
|
|
7558
|
-
TypeError: If the `
|
|
5863
|
+
TypeError: If the `inputs` is not a tensor or a list of tensors.
|
|
7559
5864
|
|
|
7560
5865
|
Supported Platforms:
|
|
7561
5866
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -7615,9 +5920,9 @@ def cartesian_prod(*inputs):
|
|
|
7615
5920
|
>>> print(len(out))
|
|
7616
5921
|
60
|
|
7617
5922
|
"""
|
|
7618
|
-
meshgrid = P.Meshgrid(indexing="ij")
|
|
5923
|
+
meshgrid = _get_cache_prim(P.Meshgrid)(indexing="ij")
|
|
7619
5924
|
meshgrid_output = meshgrid(inputs)
|
|
7620
|
-
stack = P.Stack(axis=-1)
|
|
5925
|
+
stack = _get_cache_prim(P.Stack)(axis=-1)
|
|
7621
5926
|
stack_output = stack(meshgrid_output)
|
|
7622
5927
|
return reshape_(stack_output, (-1, len(inputs)))
|
|
7623
5928
|
|
|
@@ -7638,7 +5943,7 @@ def atleast_3d(inputs):
|
|
|
7638
5943
|
a 2-D Tensor of shape :math:`(M, N)` becomes a tensor of shape :math:`(M, N, 1)`.
|
|
7639
5944
|
|
|
7640
5945
|
Raises:
|
|
7641
|
-
TypeError: If the `
|
|
5946
|
+
TypeError: If the `inputs` is not a tensor or a list of tensors.
|
|
7642
5947
|
|
|
7643
5948
|
Supported Platforms:
|
|
7644
5949
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -7673,9 +5978,9 @@ def atleast_3d(inputs):
|
|
|
7673
5978
|
if ndim == 0:
|
|
7674
5979
|
return reshape_(arr, (1, 1, 1))
|
|
7675
5980
|
if ndim == 1:
|
|
7676
|
-
return reshape_(arr, (1,
|
|
5981
|
+
return reshape_(arr, (1, size_(arr), 1))
|
|
7677
5982
|
if ndim == 2:
|
|
7678
|
-
return reshape_(arr,
|
|
5983
|
+
return reshape_(arr, shape_(arr) + (1,))
|
|
7679
5984
|
return arr
|
|
7680
5985
|
|
|
7681
5986
|
if isinstance(inputs, Tensor):
|
|
@@ -7767,7 +6072,7 @@ def vstack(inputs):
|
|
|
7767
6072
|
msg = f"For 'vstack', Tensor is required, but got {type(tensor)}"
|
|
7768
6073
|
raise TypeError(msg)
|
|
7769
6074
|
if tensor.ndim <= 1:
|
|
7770
|
-
shape =
|
|
6075
|
+
shape = shape_(tensor)
|
|
7771
6076
|
if isinstance(shape, int):
|
|
7772
6077
|
shape = (shape,)
|
|
7773
6078
|
ndim_diff = 2 - len(shape)
|
|
@@ -7777,7 +6082,7 @@ def vstack(inputs):
|
|
|
7777
6082
|
trans_tup += (tensor,)
|
|
7778
6083
|
if not trans_tup:
|
|
7779
6084
|
raise ValueError("For 'vstack', need at least one tensor to concatenate.")
|
|
7780
|
-
out = P.Concat(0)(trans_tup)
|
|
6085
|
+
out = _get_cache_prim(P.Concat)(0)(trans_tup)
|
|
7781
6086
|
return out
|
|
7782
6087
|
|
|
7783
6088
|
|
|
@@ -7795,8 +6100,8 @@ def combinations(input, r=2, with_replacement=False):
|
|
|
7795
6100
|
r"""
|
|
7796
6101
|
Returns all r-length subsequences of input Tensor.
|
|
7797
6102
|
|
|
7798
|
-
When `with_replacement` is set to
|
|
7799
|
-
`itertools.combinations`, and when `with_replacement` is set to
|
|
6103
|
+
When `with_replacement` is set to ``False``, it works similar to Python's
|
|
6104
|
+
`itertools.combinations`, and when `with_replacement` is set to ``True``,
|
|
7800
6105
|
it behaves like `itertools.combinations_with_replacement`.
|
|
7801
6106
|
|
|
7802
6107
|
Args:
|
|
@@ -7859,7 +6164,7 @@ def combinations(input, r=2, with_replacement=False):
|
|
|
7859
6164
|
return None
|
|
7860
6165
|
|
|
7861
6166
|
def _combinations_with_replacement(iterable, r):
|
|
7862
|
-
lst =
|
|
6167
|
+
lst = Tensor_([])
|
|
7863
6168
|
pool = tuple(iterable)
|
|
7864
6169
|
n = len(pool)
|
|
7865
6170
|
if not n and r:
|
|
@@ -7973,7 +6278,7 @@ def copysign(x, other):
|
|
|
7973
6278
|
"""Broadcasts x from current shape to shape"""
|
|
7974
6279
|
ndim_to = len(shape)
|
|
7975
6280
|
x = _expand(x, ndim_to)
|
|
7976
|
-
return _broadcast_to(x,
|
|
6281
|
+
return _broadcast_to(x, shape_(x), shape, ndim_to)
|
|
7977
6282
|
|
|
7978
6283
|
if not isinstance(x, Tensor):
|
|
7979
6284
|
raise TypeError("Tensor is expected, but got " + f"{type(x)}")
|
|
@@ -7984,7 +6289,7 @@ def copysign(x, other):
|
|
|
7984
6289
|
|
|
7985
6290
|
if not isinstance(other, Tensor):
|
|
7986
6291
|
other = _type_convert(Tensor, other)
|
|
7987
|
-
other = _broadcast_to_shape(other,
|
|
6292
|
+
other = _broadcast_to_shape(other, shape_(x))
|
|
7988
6293
|
|
|
7989
6294
|
if _check_same_type(dtype_(x), mstype.bool_):
|
|
7990
6295
|
raise TypeError("copysign does not accept dtype bool.")
|
|
@@ -8004,9 +6309,9 @@ def copysign(x, other):
|
|
|
8004
6309
|
if x.dtype in (mstype.float16, mstype.float32, mstype.float64)
|
|
8005
6310
|
else x.astype("float32")
|
|
8006
6311
|
)
|
|
8007
|
-
pos_tensor =
|
|
8008
|
-
less_zero =
|
|
8009
|
-
return
|
|
6312
|
+
pos_tensor = absolute_(x_float)
|
|
6313
|
+
less_zero = tensor_lt(other, 0)
|
|
6314
|
+
return select_(less_zero, neg(pos_tensor), pos_tensor)
|
|
8010
6315
|
|
|
8011
6316
|
|
|
8012
6317
|
def hann_window(window_length, periodic=True, *, dtype=None):
|
|
@@ -8066,7 +6371,7 @@ def hann_window(window_length, periodic=True, *, dtype=None):
|
|
|
8066
6371
|
w = 0.5 - 0.5 * np.cos(2 * math.pi / (window_length - 1) * n)
|
|
8067
6372
|
|
|
8068
6373
|
if dtype is not None:
|
|
8069
|
-
w =
|
|
6374
|
+
w = cast_(ms.tensor(w), dtype)
|
|
8070
6375
|
return Tensor(w[:-1]) if periodic else Tensor(w)
|
|
8071
6376
|
|
|
8072
6377
|
|
|
@@ -8090,7 +6395,7 @@ def logcumsumexp(input, axis):
|
|
|
8090
6395
|
Args:
|
|
8091
6396
|
input (Tensor) - The input tensor. Must be one of the following types: float16, float32, float64.
|
|
8092
6397
|
axis (int) - Describing the dimension to compute the cumulative product.
|
|
8093
|
-
Must be in the range [-rank(
|
|
6398
|
+
Must be in the range [-rank(input), rank(input)).
|
|
8094
6399
|
|
|
8095
6400
|
Returns:
|
|
8096
6401
|
Tensor, has the same dtype and shape as the `input`.
|
|
@@ -8117,8 +6422,7 @@ def logcumsumexp(input, axis):
|
|
|
8117
6422
|
raise TypeError(
|
|
8118
6423
|
f"For 'logcumsumexp', 'axis' must be int type, but got {type(axis)}"
|
|
8119
6424
|
)
|
|
8120
|
-
|
|
8121
|
-
return logcumsumexp_(input, Tensor(axis))
|
|
6425
|
+
return cumulative_logsumexp_(input, Tensor(axis))
|
|
8122
6426
|
|
|
8123
6427
|
|
|
8124
6428
|
def logsumexp(input, axis, keep_dims=False):
|
|
@@ -8175,34 +6479,40 @@ def amin(input, axis=None, keepdims=False, *, initial=None, where=None):
|
|
|
8175
6479
|
reduce a dimension of `input` along specified `axis`. `keepdims` determines whether the dimensions of
|
|
8176
6480
|
output and input are the same.
|
|
8177
6481
|
|
|
6482
|
+
Note:
|
|
6483
|
+
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
6484
|
+
|
|
8178
6485
|
Args:
|
|
8179
6486
|
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
8180
6487
|
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
8181
|
-
axis (Union[int, tuple(int), list(int)]): The dimensions to reduce. Default: ``None`` , reduce all
|
|
8182
|
-
Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
|
|
8183
|
-
keepdims (bool): If
|
|
6488
|
+
axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` , reduce all
|
|
6489
|
+
dimensions. Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
|
|
6490
|
+
keepdims (bool): If ``True`` , keep these reduced dimensions and the length is 1. If ``False`` , don't keep
|
|
8184
6491
|
these dimensions. Default: ``False`` .
|
|
8185
6492
|
|
|
8186
6493
|
Keyword Args:
|
|
8187
6494
|
initial (scalar, optional): The minimum value of an output element. Must be present to allow computation
|
|
8188
6495
|
on empty slice. Default: ``None`` .
|
|
8189
|
-
where (Tensor[bool], optional): A Tensor indicating whether to replace the primitive value in `input`
|
|
8190
|
-
|
|
8191
|
-
the corresponding value in `initial` must be assigned. Default: ``None`` , which indicates True by
|
|
6496
|
+
where (Tensor[bool], optional): A Tensor indicating whether to replace the primitive value in `input` with the
|
|
6497
|
+
value in `initial`. If ``True`` , do not replace, otherwise replace. For the index of ``True`` in `where`,
|
|
6498
|
+
the corresponding value in `initial` must be assigned. Default: ``None`` , which indicates ``True`` by
|
|
6499
|
+
default.
|
|
8192
6500
|
|
|
8193
6501
|
Returns:
|
|
8194
6502
|
Tensor, has the same data type as input tensor.
|
|
8195
6503
|
|
|
8196
|
-
- If `axis` is None, and `keepdims` is False,
|
|
6504
|
+
- If `axis` is ``None`` , and `keepdims` is ``False`` ,
|
|
8197
6505
|
the output is a 0-D tensor representing the product of all elements in the input tensor.
|
|
8198
|
-
- If `axis` is int, set as 1, and `keepdims` is False,
|
|
6506
|
+
- If `axis` is int, set as 1, and `keepdims` is ``False`` ,
|
|
8199
6507
|
the shape of output is :math:`(x_0, x_2, ..., x_R)`.
|
|
8200
|
-
- If `axis` is tuple(int), set as (1, 2), and `keepdims` is False,
|
|
6508
|
+
- If `axis` is tuple(int), set as (1, 2), and `keepdims` is ``False`` ,
|
|
6509
|
+
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
|
|
6510
|
+
- If `axis` is 1-D Tensor, set as [1, 2], and `keepdims` is ``False`` ,
|
|
8201
6511
|
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
|
|
8202
6512
|
|
|
8203
6513
|
Raises:
|
|
8204
6514
|
TypeError: If `input` is not a Tensor.
|
|
8205
|
-
TypeError: If `axis` is not one of the following: int, tuple or
|
|
6515
|
+
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
|
|
8206
6516
|
TypeError: If `keepdims` is not a bool.
|
|
8207
6517
|
ValueError: If `axis` is out of range.
|
|
8208
6518
|
|
|
@@ -8279,33 +6589,39 @@ def amax(input, axis=None, keepdims=False, *, initial=None, where=None):
|
|
|
8279
6589
|
reduce a dimension of `input` along specified `axis`. `keepdims` determines whether the dimensions of
|
|
8280
6590
|
output and input are the same.
|
|
8281
6591
|
|
|
6592
|
+
Note:
|
|
6593
|
+
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
6594
|
+
|
|
8282
6595
|
Args:
|
|
8283
6596
|
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
8284
6597
|
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
8285
|
-
axis (Union[int, tuple(int), list(int)]): The dimensions to reduce. Default: ``None`` , reduce all
|
|
8286
|
-
Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
|
|
8287
|
-
keepdims (bool): If
|
|
8288
|
-
dimensions. Default: ``False`` .
|
|
6598
|
+
axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` , reduce all
|
|
6599
|
+
dimensions. Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
|
|
6600
|
+
keepdims (bool): If ``True`` , keep these reduced dimensions and the length is 1. If ``False`` , don't keep
|
|
6601
|
+
these dimensions. Default: ``False`` .
|
|
8289
6602
|
|
|
8290
6603
|
Keyword Args:
|
|
8291
6604
|
initial (scalar, optional): The minimum value of an output element. Must be present to allow computation
|
|
8292
6605
|
on empty slice. Default: ``None`` .
|
|
8293
|
-
where (Tensor[bool], optional): A Tensor indicating whether to replace the primitive value in `input`
|
|
8294
|
-
|
|
8295
|
-
the corresponding value in `initial` must be assigned. Default: ``None`` , which indicates True by
|
|
6606
|
+
where (Tensor[bool], optional): A Tensor indicating whether to replace the primitive value in `input` with the
|
|
6607
|
+
value in `initial`. If ``True`` , do not replace, otherwise replace. For the index of ``True`` in `where`,
|
|
6608
|
+
the corresponding value in `initial` must be assigned. Default: ``None`` , which indicates ``True`` by
|
|
6609
|
+
default.
|
|
8296
6610
|
|
|
8297
6611
|
Returns:
|
|
8298
6612
|
Tensor, has the same data type as input tensor.
|
|
8299
6613
|
|
|
8300
|
-
- If `axis` is None, and `keepdims` is False, the output is a 0-D tensor representing the product of
|
|
8301
|
-
elements in the input tensor.
|
|
8302
|
-
- If `axis` is int, set as 1, and `keepdims` is False, the shape of output is :math:`(x_0, x_2, ..., x_R)`.
|
|
8303
|
-
- If `axis` is tuple(int), set as (1, 2), and `keepdims` is False, the shape of output is
|
|
6614
|
+
- If `axis` is ``None`` , and `keepdims` is ``False`` , the output is a 0-D tensor representing the product of
|
|
6615
|
+
all elements in the input tensor.
|
|
6616
|
+
- If `axis` is int, set as 1, and `keepdims` is ``False`` , the shape of output is :math:`(x_0, x_2, ..., x_R)`.
|
|
6617
|
+
- If `axis` is tuple(int), set as (1, 2), and `keepdims` is ``False`` , the shape of output is
|
|
6618
|
+
:math:`(x_0, x_3, ..., x_R)`.
|
|
6619
|
+
- If `axis` is 1-D Tensor, set as [1, 2], and `keepdims` is ``False`` , the shape of output is
|
|
8304
6620
|
:math:`(x_0, x_3, ..., x_R)`.
|
|
8305
6621
|
|
|
8306
6622
|
Raises:
|
|
8307
6623
|
TypeError: If `input` is not a Tensor.
|
|
8308
|
-
TypeError: If `axis` is not one of the following: int, tuple or
|
|
6624
|
+
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
|
|
8309
6625
|
TypeError: If `keepdims` is not a bool.
|
|
8310
6626
|
ValueError: If `axis` is out of range.
|
|
8311
6627
|
|
|
@@ -8364,30 +6680,36 @@ def amax(input, axis=None, keepdims=False, *, initial=None, where=None):
|
|
|
8364
6680
|
def mean(x, axis=None, keep_dims=False):
|
|
8365
6681
|
r"""
|
|
8366
6682
|
Reduces all dimension of a tensor by averaging all elements in the dimension, by default.
|
|
8367
|
-
And reduce a dimension of `
|
|
6683
|
+
And reduce a dimension of `input` along the specified `axis`. `keep_dims`
|
|
8368
6684
|
determines whether the dimensions of the output and input are the same.
|
|
8369
6685
|
|
|
6686
|
+
Note:
|
|
6687
|
+
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
6688
|
+
|
|
8370
6689
|
Args:
|
|
8371
6690
|
x (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
8372
|
-
|
|
8373
|
-
axis (Union[int, tuple(int), list(int)]): The dimensions to reduce. Default: ``None`` ,
|
|
8374
|
-
|
|
8375
|
-
|
|
8376
|
-
|
|
6691
|
+
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
6692
|
+
axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` ,
|
|
6693
|
+
reduce all dimensions. Only constant value is allowed. Assume the rank of `input` is r,
|
|
6694
|
+
and the value range is [-r,r).
|
|
6695
|
+
keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
6696
|
+
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
8377
6697
|
|
|
8378
6698
|
Returns:
|
|
8379
6699
|
Tensor, has the same data type as input tensor.
|
|
8380
6700
|
|
|
8381
|
-
- If `axis` is None, and `keep_dims` is False,
|
|
8382
|
-
|
|
8383
|
-
- If `axis` is int, set as 1, and `keep_dims` is False,
|
|
8384
|
-
|
|
6701
|
+
- If `axis` is ``None`` , and `keep_dims` is ``False`` ,
|
|
6702
|
+
the output is a 0-D tensor representing the product of all elements in the input tensor.
|
|
6703
|
+
- If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
|
|
6704
|
+
the shape of output is :math:`(x_0, x_2, ..., x_R)`.
|
|
8385
6705
|
- If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` ,
|
|
8386
|
-
|
|
6706
|
+
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
|
|
6707
|
+
- If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` ,
|
|
6708
|
+
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
|
|
8387
6709
|
|
|
8388
6710
|
Raises:
|
|
8389
6711
|
TypeError: If `x` is not a Tensor.
|
|
8390
|
-
TypeError: If `axis` is not one of the following: int, tuple or
|
|
6712
|
+
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
|
|
8391
6713
|
TypeError: If `keep_dims` is not a bool.
|
|
8392
6714
|
ValueError: If `axis` is out of range.
|
|
8393
6715
|
|
|
@@ -8417,26 +6739,26 @@ def mean(x, axis=None, keep_dims=False):
|
|
|
8417
6739
|
>>> output = ops.mean(x, 0, True)
|
|
8418
6740
|
>>> print(output)
|
|
8419
6741
|
[[[4. 4. 4. 4. 4. 4.]
|
|
8420
|
-
|
|
8421
|
-
|
|
6742
|
+
[5. 5. 5. 5. 5. 5.]
|
|
6743
|
+
[6. 6. 6. 6. 6. 6.]]]
|
|
8422
6744
|
>>> # case 3: Reduces a dimension along the axis 1
|
|
8423
6745
|
>>> output = ops.mean(x, 1, True)
|
|
8424
6746
|
>>> print(output)
|
|
8425
6747
|
[[[2. 2. 2. 2. 2. 2.]]
|
|
8426
|
-
|
|
8427
|
-
|
|
6748
|
+
[[5. 5. 5. 5. 5. 5.]]
|
|
6749
|
+
[[8. 8. 8. 8. 8. 8.]]]
|
|
8428
6750
|
>>> # case 4: Reduces a dimension along the axis 2
|
|
8429
6751
|
>>> output = ops.mean(x, 2, True)
|
|
8430
6752
|
>>> print(output)
|
|
8431
6753
|
[[[ 2.]
|
|
8432
|
-
|
|
8433
|
-
|
|
8434
|
-
|
|
8435
|
-
|
|
8436
|
-
|
|
8437
|
-
|
|
8438
|
-
|
|
8439
|
-
|
|
6754
|
+
[ 2.]
|
|
6755
|
+
[ 2.]]
|
|
6756
|
+
[[ 4.]
|
|
6757
|
+
[ 5.]
|
|
6758
|
+
[ 6.]]
|
|
6759
|
+
[[ 6.]
|
|
6760
|
+
[ 8.]
|
|
6761
|
+
[10.]]]
|
|
8440
6762
|
"""
|
|
8441
6763
|
if axis is None:
|
|
8442
6764
|
axis = ()
|
|
@@ -8446,30 +6768,35 @@ def mean(x, axis=None, keep_dims=False):
|
|
|
8446
6768
|
def prod(input, axis=None, keep_dims=False):
|
|
8447
6769
|
r"""
|
|
8448
6770
|
Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can
|
|
8449
|
-
reduce a dimension of `input` along the axis
|
|
8450
|
-
by controlling `keep_dims`.
|
|
6771
|
+
reduce a dimension of `input` along the `axis`. Determine whether the dimensions of the output and input are the
|
|
6772
|
+
same by controlling `keep_dims`.
|
|
6773
|
+
|
|
6774
|
+
Note:
|
|
6775
|
+
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
8451
6776
|
|
|
8452
6777
|
Args:
|
|
8453
6778
|
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
8454
|
-
|
|
8455
|
-
axis (Union[int, tuple(int), list(int)]): The dimensions to reduce. Default: ``None`` , reduce all
|
|
8456
|
-
|
|
8457
|
-
keep_dims (bool): If
|
|
8458
|
-
|
|
6779
|
+
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
6780
|
+
axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` , reduce all
|
|
6781
|
+
dimensions. Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
|
|
6782
|
+
keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
6783
|
+
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
8459
6784
|
|
|
8460
6785
|
Returns:
|
|
8461
6786
|
Tensor, has the same data type as input tensor.
|
|
8462
6787
|
|
|
8463
|
-
- If `axis` is None, and `keep_dims` is False,
|
|
6788
|
+
- If `axis` is ``None`` , and `keep_dims` is ``False`` ,
|
|
8464
6789
|
the output is a 0-D tensor representing the product of all elements in the input tensor.
|
|
8465
|
-
- If `axis` is int, set as 1, and `keep_dims` is False,
|
|
6790
|
+
- If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
|
|
8466
6791
|
the shape of output is :math:`(input_0, input_2, ..., input_R)`.
|
|
8467
|
-
- If `axis` is tuple(int), set as (1, 2), and `keep_dims` is False,
|
|
6792
|
+
- If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` ,
|
|
6793
|
+
the shape of output is :math:`(input_0, input_3, ..., input_R)`.
|
|
6794
|
+
- If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` ,
|
|
8468
6795
|
the shape of output is :math:`(input_0, input_3, ..., input_R)`.
|
|
8469
6796
|
|
|
8470
6797
|
Raises:
|
|
8471
6798
|
TypeError: If `input` is not a Tensor.
|
|
8472
|
-
TypeError: If `axis` is not one of the following: int, tuple or
|
|
6799
|
+
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
|
|
8473
6800
|
TypeError: If `keep_dims` is not a bool.
|
|
8474
6801
|
ValueError: If `axis` is out of range.
|
|
8475
6802
|
|
|
@@ -9251,7 +7578,7 @@ def _check_logits_shape(logits):
|
|
|
9251
7578
|
raise ValueError("For gumbel_softmax, the 0-D input is not supported.")
|
|
9252
7579
|
|
|
9253
7580
|
|
|
9254
|
-
def gumbel_softmax(logits, tau=1, hard=False, dim=-1):
|
|
7581
|
+
def gumbel_softmax(logits, tau=1.0, hard=False, dim=-1):
|
|
9255
7582
|
r"""
|
|
9256
7583
|
Returns the samples from the Gumbel-Softmax distribution and optionally discretizes. If `hard = True`, the returned
|
|
9257
7584
|
samples will be one-hot, otherwise it will be probability distributions that sum to 1 across `dim`.
|
|
@@ -9269,9 +7596,9 @@ def gumbel_softmax(logits, tau=1, hard=False, dim=-1):
|
|
|
9269
7596
|
Raises:
|
|
9270
7597
|
TypeError: If `logits` is not a Tensor.
|
|
9271
7598
|
TypeError: If dtype of `logits` is not one of: float16, float32.
|
|
9272
|
-
TypeError: If `tau` is not
|
|
7599
|
+
TypeError: If `tau` is not a float.
|
|
9273
7600
|
TypeError: If `hard` is not a bool.
|
|
9274
|
-
TypeError: If `dim` is not
|
|
7601
|
+
TypeError: If `dim` is not an int.
|
|
9275
7602
|
ValueError: If If `tau` is not positive.
|
|
9276
7603
|
|
|
9277
7604
|
Supported Platforms:
|
|
@@ -9300,13 +7627,11 @@ def gumbel_softmax(logits, tau=1, hard=False, dim=-1):
|
|
|
9300
7627
|
_check_int_range(dim, -len(logits.shape),
|
|
9301
7628
|
len(logits.shape), 'dim', "gumbel_softmax")
|
|
9302
7629
|
|
|
9303
|
-
const_op = _get_cache_prim(P.ScalarToTensor)()
|
|
9304
|
-
|
|
9305
7630
|
sample_shape = shape_(logits)
|
|
9306
|
-
uniform = C.uniform(sample_shape,
|
|
9307
|
-
0.0, mstype.float32),
|
|
7631
|
+
uniform = C.uniform(sample_shape, scalar_to_tensor_(
|
|
7632
|
+
0.0, mstype.float32), scalar_to_tensor_(1.0, mstype.float32))
|
|
9308
7633
|
uniform = cast_(uniform, logits_dtype)
|
|
9309
|
-
gumbel =
|
|
7634
|
+
gumbel = neg(log_(neg(log_(uniform))))
|
|
9310
7635
|
gumbel = (logits + gumbel) / tau
|
|
9311
7636
|
y_soft = _get_cache_prim(P.Softmax)(dim)(gumbel)
|
|
9312
7637
|
if hard:
|
|
@@ -9387,7 +7712,7 @@ def kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None):
|
|
|
9387
7712
|
beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)
|
|
9388
7713
|
) / np.i0(float(beta))
|
|
9389
7714
|
if dtype is not None:
|
|
9390
|
-
w =
|
|
7715
|
+
w = cast_(ms.tensor(w), dtype)
|
|
9391
7716
|
out = Tensor(w[:-1]) if periodic else Tensor(w)
|
|
9392
7717
|
return out
|
|
9393
7718
|
|
|
@@ -9540,18 +7865,6 @@ def _check_value(items, max_size, msg_prefix, shape1, shape2):
|
|
|
9540
7865
|
def _check_matmul_shapes(shape1, shape2, prim_name=None):
|
|
9541
7866
|
"""Checks shape1 and shape2 are valid to perform matmul, and returns output shape after broadcasting."""
|
|
9542
7867
|
msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
|
|
9543
|
-
|
|
9544
|
-
def _check(shape1, shape2):
|
|
9545
|
-
ndim1, ndim2 = len(shape1), len(shape2)
|
|
9546
|
-
if ndim1 < 1 or ndim2 < 1:
|
|
9547
|
-
raise ValueError(f"{msg_prefix} dimension of input operands must be at least 1, but got "
|
|
9548
|
-
f"the length of shape1: {ndim1}, the length of shape2: {ndim2}.")
|
|
9549
|
-
if ndim2 >= 2 and shape1[-1] != shape2[-2]:
|
|
9550
|
-
raise ValueError(f"{msg_prefix} shape1[-1] must be equal to shape2[-2] when the length of shape2 "
|
|
9551
|
-
f"is greater than or equal to 2, but got shape1[-1]: {shape1[-1]}, "
|
|
9552
|
-
f"shape2[-2]: {shape2[-2]}.")
|
|
9553
|
-
|
|
9554
|
-
_check(shape1, shape2)
|
|
9555
7868
|
shape_out = list()
|
|
9556
7869
|
r_shape1 = shape1[:-2]
|
|
9557
7870
|
r_shape2 = shape2[:-2]
|
|
@@ -9570,18 +7883,6 @@ def _check_need_broadcast(shape1, shape2):
|
|
|
9570
7883
|
return shape1[:-2] != shape2[:-2]
|
|
9571
7884
|
|
|
9572
7885
|
|
|
9573
|
-
@_primexpr
|
|
9574
|
-
def _check_input_1d(input_shape, param_name, func_name):
|
|
9575
|
-
if len(input_shape) != 1:
|
|
9576
|
-
raise ValueError(f"{func_name} {param_name} should be 1d, but got shape {input_shape}")
|
|
9577
|
-
|
|
9578
|
-
|
|
9579
|
-
@_primexpr
|
|
9580
|
-
def _check_input_2d(input_shape, param_name, func_name):
|
|
9581
|
-
if len(input_shape) != 2:
|
|
9582
|
-
raise ValueError(f"{func_name} {param_name} should be 2d, but got shape {input_shape}")
|
|
9583
|
-
|
|
9584
|
-
|
|
9585
7886
|
@_primexpr
|
|
9586
7887
|
def _expand(x, ndim):
|
|
9587
7888
|
"""Expand x to ndim from axis, which can be 0 or -1."""
|
|
@@ -9592,8 +7893,7 @@ def _expand(x, ndim):
|
|
|
9592
7893
|
|
|
9593
7894
|
def _broadcast_to(x, shape_cur, shape_to, ndim_to):
|
|
9594
7895
|
"""Broadcasts x from shape_cur to shape_to."""
|
|
9595
|
-
|
|
9596
|
-
size = tile_size_op(shape_cur, shape_to, ndim_to)
|
|
7896
|
+
size = tile_size_(shape_cur, shape_to, ndim_to)
|
|
9597
7897
|
F.stop_gradient(size)
|
|
9598
7898
|
return tile_(x, size)
|
|
9599
7899
|
|
|
@@ -9611,11 +7911,11 @@ def matmul(input, other):
|
|
|
9611
7911
|
|
|
9612
7912
|
Args:
|
|
9613
7913
|
input (Tensor): Input tensor, scalar not allowed.
|
|
9614
|
-
|
|
9615
|
-
|
|
7914
|
+
The last dimension of `input` must be the same size as the second last dimension of `other`.
|
|
7915
|
+
And the shape of input and other could be broadcast.
|
|
9616
7916
|
other (Tensor): Input tensor, scalar not allowed.
|
|
9617
|
-
|
|
9618
|
-
|
|
7917
|
+
The last dimension of `input` must be the same size as the second last dimension of `other`.
|
|
7918
|
+
And the shape of input and other could be broadcast.
|
|
9619
7919
|
|
|
9620
7920
|
Returns:
|
|
9621
7921
|
Tensor or scalar, the matrix product of the inputs. This is a scalar only
|
|
@@ -9978,9 +8278,6 @@ def baddbmm(input, batch1, batch2, beta=1, alpha=1):
|
|
|
9978
8278
|
bmmop = _get_cache_prim(P.BatchMatMul)(False, False)
|
|
9979
8279
|
if not (isinstance(input, Tensor) and isinstance(batch1, Tensor) and isinstance(batch2, Tensor)):
|
|
9980
8280
|
raise TypeError("For Baddbmm, inputs must be all tensors.")
|
|
9981
|
-
if len(batch1.shape) != 3 or len(batch2.shape) != 3:
|
|
9982
|
-
raise ValueError("For batch1 and batch2 must be 3-D tensors each containing the same number of matrices, "
|
|
9983
|
-
f"but got length of batch1:'{len(batch1.shape)}', length of batch2:'{len(batch2.shape)}'.")
|
|
9984
8281
|
input_dtype = dtype_(input)
|
|
9985
8282
|
if not (input_dtype == dtype_(batch1) and input_dtype == dtype_(batch2)):
|
|
9986
8283
|
raise TypeError("For Baddbmm, the inputs should be the same dtype.")
|
|
@@ -10172,11 +8469,9 @@ def xdivy(x, y):
|
|
|
10172
8469
|
Divides the first input tensor by the second input tensor element-wise. Returns zero when `x` is zero.
|
|
10173
8470
|
|
|
10174
8471
|
Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
|
|
10175
|
-
The inputs must be two tensors or one tensor and one scalar.
|
|
10176
8472
|
When the inputs are two tensors,
|
|
10177
8473
|
dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
|
|
10178
|
-
|
|
10179
|
-
the scalar could only be a constant.
|
|
8474
|
+
If one of the inputs is scalar, the scalar could only be a constant.
|
|
10180
8475
|
|
|
10181
8476
|
.. note::
|
|
10182
8477
|
When `x` and `y` are both of datatype complex, they should be both complex64 or complex128 at the same time.
|
|
@@ -10192,7 +8487,7 @@ def xdivy(x, y):
|
|
|
10192
8487
|
|
|
10193
8488
|
Raises:
|
|
10194
8489
|
TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
|
|
10195
|
-
TypeError: If dtype of `x` and
|
|
8490
|
+
TypeError: If dtype of `x` and `y` is not in [float16, float32, float64, complex64, complex128, bool].
|
|
10196
8491
|
ValueError: If `x` could not be broadcast to a tensor with shape of `y`.
|
|
10197
8492
|
RuntimeError: If the data type of `x`, `y` conversion of Parameter is given
|
|
10198
8493
|
but data type conversion of Parameter is not supported.
|
|
@@ -10253,37 +8548,6 @@ def log10(input):
|
|
|
10253
8548
|
return output
|
|
10254
8549
|
|
|
10255
8550
|
|
|
10256
|
-
def log1p(input):
|
|
10257
|
-
r"""
|
|
10258
|
-
Returns the natural logarithm of one plus the input tensor element-wise.
|
|
10259
|
-
|
|
10260
|
-
.. math::
|
|
10261
|
-
out_i = {log_e}(input_i + 1)
|
|
10262
|
-
|
|
10263
|
-
Args:
|
|
10264
|
-
input (Tensor): The input tensor. The value must be greater than -1.
|
|
10265
|
-
|
|
10266
|
-
Returns:
|
|
10267
|
-
Tensor, has the same shape as the `input`.
|
|
10268
|
-
|
|
10269
|
-
Raises:
|
|
10270
|
-
TypeError: If `input` is not a Tensor.
|
|
10271
|
-
|
|
10272
|
-
Supported Platforms:
|
|
10273
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
10274
|
-
|
|
10275
|
-
Examples:
|
|
10276
|
-
>>> import mindspore
|
|
10277
|
-
>>> import numpy as np
|
|
10278
|
-
>>> from mindspore import Tensor, ops
|
|
10279
|
-
>>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
|
|
10280
|
-
>>> output = ops.log1p(x)
|
|
10281
|
-
>>> print(output)
|
|
10282
|
-
[0.6931472 1.0986123 1.609438 ]
|
|
10283
|
-
"""
|
|
10284
|
-
return log1p_(input)
|
|
10285
|
-
|
|
10286
|
-
|
|
10287
8551
|
def kron(input, other):
|
|
10288
8552
|
"""
|
|
10289
8553
|
Computes the Kronecker product :math:`input ⊗ other`, denoted by ⊗, of `input` and `other`.
|
|
@@ -10380,31 +8644,37 @@ def _check_is_tensor(param_name, input, cls_name):
|
|
|
10380
8644
|
def all(input, axis=None, keep_dims=False):
|
|
10381
8645
|
r"""
|
|
10382
8646
|
Reduces a dimension of `input` by the "logical AND" of all elements in the dimension, by default. And also can
|
|
10383
|
-
reduce a dimension of `input` along the axis
|
|
10384
|
-
by controlling `keep_dims`.
|
|
8647
|
+
reduce a dimension of `input` along the `axis`. Determine whether the dimensions of the output and input are the
|
|
8648
|
+
same by controlling `keep_dims`.
|
|
8649
|
+
|
|
8650
|
+
Note:
|
|
8651
|
+
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
10385
8652
|
|
|
10386
8653
|
Args:
|
|
10387
8654
|
input (Tensor): Input Tensor, has the shape :math:`(N, *)` where :math:`*` means,
|
|
10388
8655
|
any number of additional dimensions.
|
|
10389
|
-
axis (Union[int, tuple(int), list(int)], optional): The dimensions to reduce.
|
|
10390
|
-
r, axis must be in the range [-rank(input), rank(input)).
|
|
10391
|
-
|
|
10392
|
-
|
|
8656
|
+
axis (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce.
|
|
8657
|
+
Suppose the rank of `input` is r, `axis` must be in the range [-rank(input), rank(input)).
|
|
8658
|
+
Default: ``None`` , all dimensions are reduced.
|
|
8659
|
+
keep_dims (bool, optional): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
8660
|
+
If ``False`` , don't keep these dimensions. Default : ``False`` .
|
|
10393
8661
|
|
|
10394
8662
|
Returns:
|
|
10395
8663
|
Tensor, the dtype is bool.
|
|
10396
8664
|
|
|
10397
|
-
- If `axis` is None, and `keep_dims` is ``False`` ,
|
|
8665
|
+
- If `axis` is ``None`` , and `keep_dims` is ``False`` ,
|
|
10398
8666
|
the output is a 0-D Tensor representing the "logical AND" of all elements in the input Tensor.
|
|
10399
8667
|
- If `axis` is int, such as 2, and `keep_dims` is ``False`` ,
|
|
10400
8668
|
the shape of output is :math:`(input_1, input_3, ..., input_R)`.
|
|
10401
|
-
- If `axis` is tuple(int), such as (2, 3), and `keep_dims` is False,
|
|
8669
|
+
- If `axis` is tuple(int), such as (2, 3), and `keep_dims` is ``False`` ,
|
|
8670
|
+
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
8671
|
+
- If `axis` is 1-D Tensor, such as [2, 3], and `keep_dims` is ``False`` ,
|
|
10402
8672
|
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
10403
8673
|
|
|
10404
8674
|
Raises:
|
|
10405
8675
|
TypeError: If `keep_dims` is not a bool.
|
|
10406
8676
|
TypeError: If `input` is not a Tensor.
|
|
10407
|
-
TypeError: If `axis` is not one of the following: int, tuple or
|
|
8677
|
+
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
|
|
10408
8678
|
|
|
10409
8679
|
Supported Platforms:
|
|
10410
8680
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -10439,31 +8709,37 @@ def all(input, axis=None, keep_dims=False):
|
|
|
10439
8709
|
def any(input, axis=None, keep_dims=False):
|
|
10440
8710
|
r"""
|
|
10441
8711
|
Reduces a dimension of `input` by the "logical OR" of all elements in the dimension, by default. And also can
|
|
10442
|
-
reduce a dimension of `input` along the axis
|
|
10443
|
-
by controlling `keep_dims`.
|
|
8712
|
+
reduce a dimension of `input` along the `axis`. Determine whether the dimensions of the output and input are the
|
|
8713
|
+
same by controlling `keep_dims`.
|
|
8714
|
+
|
|
8715
|
+
Note:
|
|
8716
|
+
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
10444
8717
|
|
|
10445
8718
|
Args:
|
|
10446
8719
|
input (Tensor): Input Tensor, has the shape :math:`(N, *)` where :math:`*` means,
|
|
10447
8720
|
any number of additional dimensions.
|
|
10448
|
-
axis (Union[int, tuple(int), list(int)], optional): The dimensions to reduce.
|
|
10449
|
-
axis must be in the range [-rank(input), rank(input)).
|
|
10450
|
-
|
|
10451
|
-
|
|
8721
|
+
axis (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce.
|
|
8722
|
+
Suppose the rank of `input` is r, `axis` must be in the range [-rank(input), rank(input)).
|
|
8723
|
+
Default: ``None`` , all dimensions are reduced.
|
|
8724
|
+
keep_dims (bool, optional): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
8725
|
+
If ``False`` , don't keep these dimensions. Default : ``False`` .
|
|
10452
8726
|
|
|
10453
8727
|
Returns:
|
|
10454
8728
|
Tensor, the dtype is bool.
|
|
10455
8729
|
|
|
10456
|
-
- If `axis` is None, and `keep_dims` is ``False`` ,
|
|
8730
|
+
- If `axis` is ``None`` , and `keep_dims` is ``False`` ,
|
|
10457
8731
|
the output is a 0-D Tensor representing the "logical OR" of all elements in the input Tensor.
|
|
10458
8732
|
- If `axis` is int, such as 2, and `keep_dims` is ``False`` ,
|
|
10459
8733
|
the shape of output is :math:`(input_1, input_3, ..., input_R)`.
|
|
10460
8734
|
- If `axis` is tuple(int), such as (2, 3), and `keep_dims` is ``False`` ,
|
|
10461
8735
|
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
8736
|
+
- If `axis` is 1-D Tensor, such as [2, 3], and `keep_dims` is ``False`` ,
|
|
8737
|
+
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
10462
8738
|
|
|
10463
8739
|
Raises:
|
|
10464
8740
|
TypeError: If `keep_dims` is not a bool.
|
|
10465
8741
|
TypeError: If `input` is not a Tensor.
|
|
10466
|
-
TypeError: If `axis` is not one of the following: int, tuple or
|
|
8742
|
+
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
|
|
10467
8743
|
|
|
10468
8744
|
Supported Platforms:
|
|
10469
8745
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -10487,11 +8763,8 @@ def any(input, axis=None, keep_dims=False):
|
|
|
10487
8763
|
>>> print(output)
|
|
10488
8764
|
[ True True]
|
|
10489
8765
|
"""
|
|
10490
|
-
_check_is_tensor("input", input, "any")
|
|
10491
8766
|
if axis is None:
|
|
10492
8767
|
axis = ()
|
|
10493
|
-
if input.dtype != mstype.bool_:
|
|
10494
|
-
input = cast_(input, mstype.bool_)
|
|
10495
8768
|
return _get_cache_prim(P.ReduceAny)(keep_dims)(input, axis)
|
|
10496
8769
|
|
|
10497
8770
|
|
|
@@ -10598,21 +8871,21 @@ def iou(anchor_boxes, gt_boxes, mode='iou'):
|
|
|
10598
8871
|
and width are scaled by 0.2 internally.
|
|
10599
8872
|
|
|
10600
8873
|
Args:
|
|
10601
|
-
anchor_boxes (Tensor): Anchor boxes, tensor of shape :math:`(N, 4)` .
|
|
10602
|
-
and the value
|
|
10603
|
-
Data type must be either float16,
|
|
10604
|
-
gt_boxes (Tensor): Ground truth boxes, tensor of shape :math:`(M, 4)` .
|
|
10605
|
-
truth boxes, and the value
|
|
10606
|
-
Data type must be either float16, float32 or float64.
|
|
8874
|
+
anchor_boxes (Tensor): Anchor boxes, tensor of shape :math:`(N, 4)` . :math:`N` indicates the number of
|
|
8875
|
+
anchor boxes, and the value :math:`4` refers to four boundary coordinates of the predicted area
|
|
8876
|
+
"x0", "y0", "x1", and "y1". Data type must be either float16, float32 or float64.
|
|
8877
|
+
gt_boxes (Tensor): Ground truth boxes, tensor of shape :math:`(M, 4)` . :math:`M` indicates the number
|
|
8878
|
+
of ground truth boxes, and the value :math:`4` refers to four boundary coordinates of the truth
|
|
8879
|
+
area "x0", "y0", "x1", and "y1". Data type must be either float16, float32 or float64.
|
|
10607
8880
|
mode (string): The mode is used to specify the calculation method,
|
|
10608
8881
|
now supporting 'iou' (intersection over union) or 'iof' (intersection over foreground) mode.
|
|
10609
8882
|
Default: ``'iou'`` .
|
|
10610
8883
|
|
|
10611
8884
|
Returns:
|
|
10612
|
-
Tensor, the
|
|
8885
|
+
Tensor, the IOU/IOF values, tensor of shape :math:`(M, N)` , with the same data type as `anchor_boxes`.
|
|
10613
8886
|
|
|
10614
8887
|
Raises:
|
|
10615
|
-
KeyError: When `mode` is not 'iou' or 'iof'
|
|
8888
|
+
KeyError: When `mode` is not ``'iou'`` or ``'iof'``.
|
|
10616
8889
|
|
|
10617
8890
|
Supported Platforms:
|
|
10618
8891
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -10658,8 +8931,8 @@ def _check_dim_in_range(dim, ndim):
|
|
|
10658
8931
|
|
|
10659
8932
|
|
|
10660
8933
|
def dotrapezoid(y, dx, dim):
|
|
10661
|
-
y_left =
|
|
10662
|
-
y_right =
|
|
8934
|
+
y_left = _select(y, dim, 0)
|
|
8935
|
+
y_right = _select(y, dim, -1)
|
|
10663
8936
|
y_sum = y.sum(dim)
|
|
10664
8937
|
return (y_sum - (y_left + y_right) * 0.5) * dx
|
|
10665
8938
|
|
|
@@ -10669,10 +8942,10 @@ def dotrapezoid_tensor(y, dx, dim):
|
|
|
10669
8942
|
y_start_dim_left = tuple(y_start_dim_left)
|
|
10670
8943
|
y_start_dim_right = [0 for _ in range(y.ndim - dim - 1)]
|
|
10671
8944
|
y_start_dim_right = tuple(y_start_dim_right)
|
|
10672
|
-
y_slice_size = _tuple_setitem(
|
|
10673
|
-
y_slice_left =
|
|
10674
|
-
y_slice_right =
|
|
10675
|
-
return (
|
|
8945
|
+
y_slice_size = _tuple_setitem(shape_(y), dim, shape_(y)[dim] - 1)
|
|
8946
|
+
y_slice_left = slice_(y, y_start_dim_left + (0,) + y_start_dim_right, y_slice_size)
|
|
8947
|
+
y_slice_right = slice_(y, y_start_dim_left + (1,) + y_start_dim_right, y_slice_size)
|
|
8948
|
+
return (tensor_add(y_slice_left, y_slice_right) * dx).sum(dim) / 2.
|
|
10676
8949
|
|
|
10677
8950
|
|
|
10678
8951
|
def add_padding_to_shape(curr_shape, target_n_dim):
|
|
@@ -10705,8 +8978,8 @@ def trapezoid_tensor(y, x, dim):
|
|
|
10705
8978
|
x_start_dim_right = [0 for _ in range(x.ndim - dim - 1)]
|
|
10706
8979
|
x_start_dim_right = tuple(x_start_dim_right)
|
|
10707
8980
|
x_slice_size = _tuple_setitem(x.shape, dim, x.shape[dim] - 1)
|
|
10708
|
-
x_left =
|
|
10709
|
-
x_right =
|
|
8981
|
+
x_left = slice_(x, x_start_dim_left + (0,) + x_start_dim_right, x_slice_size)
|
|
8982
|
+
x_right = slice_(x, x_start_dim_left + (1,) + x_start_dim_right, x_slice_size)
|
|
10710
8983
|
dx = x_right - x_left
|
|
10711
8984
|
new_sizes = add_padding_to_shape(dx.shape, y.ndim)
|
|
10712
8985
|
dx = dx.view(tuple(new_sizes))
|
|
@@ -10724,8 +8997,8 @@ def trapezoid_tensor(y, x, dim):
|
|
|
10724
8997
|
x_start_dim_right = [0 for _ in range(x_viewed.ndim - dim - 1)]
|
|
10725
8998
|
x_start_dim_right = tuple(x_start_dim_right)
|
|
10726
8999
|
x_slice_size = _tuple_setitem(x_viewed.shape, dim, x_viewed.shape[dim] - 1)
|
|
10727
|
-
x_left =
|
|
10728
|
-
x_right =
|
|
9000
|
+
x_left = slice_(x_viewed, x_start_dim_left + (0,) + x_start_dim_right, x_slice_size)
|
|
9001
|
+
x_right = slice_(x_viewed, x_start_dim_left + (1,) + x_start_dim_right, x_slice_size)
|
|
10729
9002
|
dx = x_right - x_left
|
|
10730
9003
|
return dotrapezoid_tensor(y, dx, dim)
|
|
10731
9004
|
|
|
@@ -10744,12 +9017,12 @@ def get(ts, depth, dim, index, r):
|
|
|
10744
9017
|
return get(item, depth + 1, dim, index, r)
|
|
10745
9018
|
|
|
10746
9019
|
|
|
10747
|
-
def
|
|
9020
|
+
def _select(feat, dim, index):
|
|
10748
9021
|
select_shape = feat.shape
|
|
10749
9022
|
select_shape = list(select_shape)
|
|
10750
9023
|
select_shape[dim] = 1
|
|
10751
9024
|
new_shape = feat.shape[:dim] + feat.shape[dim + 1:]
|
|
10752
|
-
indexes =
|
|
9025
|
+
indexes = ones_(tuple(select_shape), mstype.int32) * (index)
|
|
10753
9026
|
return feat.gather_elements(dim, indexes).reshape(new_shape)
|
|
10754
9027
|
|
|
10755
9028
|
|
|
@@ -10808,14 +9081,14 @@ def trapz(y, x=None, *, dx=1.0, dim=-1):
|
|
|
10808
9081
|
if not isinstance(dim, int):
|
|
10809
9082
|
raise TypeError(f"For `trapz`, the input `dim` must be int, but get {type(dim)}.")
|
|
10810
9083
|
if not _check_is_float(y.dtype):
|
|
10811
|
-
y =
|
|
9084
|
+
y = cast_(y, mstype.float32)
|
|
10812
9085
|
_check_dim_in_range(dim, y.ndim)
|
|
10813
9086
|
dim = dim + y.ndim if dim < 0 else dim
|
|
10814
9087
|
if x is None:
|
|
10815
9088
|
return trapezoid(y, dx, dim)
|
|
10816
9089
|
if not isinstance(x, (Tensor, Tensor_)):
|
|
10817
9090
|
raise TypeError(f"For `trapz`, the input `x` must be Tensor, but get {type(x)}.")
|
|
10818
|
-
x =
|
|
9091
|
+
x = cast_(x, mstype.float32)
|
|
10819
9092
|
return trapezoid_tensor(y, x, dim)
|
|
10820
9093
|
|
|
10821
9094
|
|
|
@@ -10978,42 +9251,6 @@ def cholesky_solve(input, input2, upper=False):
|
|
|
10978
9251
|
return _get_cache_prim(P.CholeskySolve)(upper)(input, input2)
|
|
10979
9252
|
|
|
10980
9253
|
|
|
10981
|
-
def conj(input):
|
|
10982
|
-
r"""
|
|
10983
|
-
Returns a tensor of complex numbers that are the complex conjugate of each element in input.
|
|
10984
|
-
The complex numbers in input must be of the form a + bj, where a is the real part and b is the imaginary part.
|
|
10985
|
-
|
|
10986
|
-
The complex conjugate returned by this operation is of the form a - bj.
|
|
10987
|
-
|
|
10988
|
-
If `input` is real, it is returned unchanged.
|
|
10989
|
-
|
|
10990
|
-
Args:
|
|
10991
|
-
input (Tensor): The input tensor to compute to. Must have numeric type.
|
|
10992
|
-
|
|
10993
|
-
Returns:
|
|
10994
|
-
Tensor, has the same dtype as the `input`.
|
|
10995
|
-
|
|
10996
|
-
Raises:
|
|
10997
|
-
TypeError: If the dtype of `input` is not a numeric type.
|
|
10998
|
-
TypeError: If the `input` is not a Tensor.
|
|
10999
|
-
|
|
11000
|
-
Supported Platforms:
|
|
11001
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
11002
|
-
|
|
11003
|
-
Examples:
|
|
11004
|
-
>>> import mindspore
|
|
11005
|
-
>>> import numpy as np
|
|
11006
|
-
>>> from mindspore import Tensor, ops
|
|
11007
|
-
>>> x = Tensor(np.asarray(np.complex(1.3+0.4j)), mindspore.complex64)
|
|
11008
|
-
>>> output = ops.conj(x)
|
|
11009
|
-
>>> print(output)
|
|
11010
|
-
(1.3-0.4j)
|
|
11011
|
-
"""
|
|
11012
|
-
if not isinstance(input, (Tensor, Tensor_)):
|
|
11013
|
-
raise TypeError("For conj op, input must be Tensor.")
|
|
11014
|
-
return conj_(input)
|
|
11015
|
-
|
|
11016
|
-
|
|
11017
9254
|
def cross(input, other, dim=None):
|
|
11018
9255
|
r"""
|
|
11019
9256
|
Computes the cross product of `input` and `other` in dimension `dim`.
|
|
@@ -11183,91 +9420,6 @@ def einsum(equation, *operands):
|
|
|
11183
9420
|
return _get_cache_prim(P.Einsum)(equation)(operands)
|
|
11184
9421
|
|
|
11185
9422
|
|
|
11186
|
-
def erfinv(input):
|
|
11187
|
-
r"""
|
|
11188
|
-
Returns the result of the inverse error function with `input`, which is defined in the
|
|
11189
|
-
range `(-1, 1)` as:
|
|
11190
|
-
|
|
11191
|
-
.. math::
|
|
11192
|
-
|
|
11193
|
-
erfinv(erf(x)) = x
|
|
11194
|
-
|
|
11195
|
-
where :math:`x` is the `input`.
|
|
11196
|
-
|
|
11197
|
-
Args:
|
|
11198
|
-
input (Tensor): The input tensor. Supported dtypes:
|
|
11199
|
-
|
|
11200
|
-
- Ascend: float16, float32.
|
|
11201
|
-
- GPU/CPU: float16, float32, float64.
|
|
11202
|
-
|
|
11203
|
-
Returns:
|
|
11204
|
-
Tensor, has the same shape and dtype as `input`.
|
|
11205
|
-
|
|
11206
|
-
Raises:
|
|
11207
|
-
TypeError: If dtype of `input` is not float16, float32 or float64.
|
|
11208
|
-
|
|
11209
|
-
Supported Platforms:
|
|
11210
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
11211
|
-
|
|
11212
|
-
Examples:
|
|
11213
|
-
>>> import mindspore
|
|
11214
|
-
>>> import numpy as np
|
|
11215
|
-
>>> from mindspore import Tensor, ops
|
|
11216
|
-
>>> x = Tensor(np.array([0, 0.5, -0.9]), mindspore.float32)
|
|
11217
|
-
>>> output = ops.erfinv(x)
|
|
11218
|
-
>>> print(output)
|
|
11219
|
-
[ 0. 0.47695306 -1.1630805 ]
|
|
11220
|
-
"""
|
|
11221
|
-
return erfinv_(input)
|
|
11222
|
-
|
|
11223
|
-
|
|
11224
|
-
def less_equal(input, other):
|
|
11225
|
-
r"""
|
|
11226
|
-
Computes the boolean value of :math:`input <= other` element-wise.
|
|
11227
|
-
|
|
11228
|
-
.. math::
|
|
11229
|
-
out_{i} =\begin{cases}
|
|
11230
|
-
& \text{True, if } input_{i}<=other_{i} \\
|
|
11231
|
-
& \text{False, if } input_{i}>other_{i}
|
|
11232
|
-
\end{cases}
|
|
11233
|
-
|
|
11234
|
-
.. note::
|
|
11235
|
-
- Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
|
|
11236
|
-
consistent.
|
|
11237
|
-
- The inputs must be two tensors or one tensor and one scalar.
|
|
11238
|
-
- When the inputs are one tensor and one scalar, the scalar could only be a constant.
|
|
11239
|
-
|
|
11240
|
-
Args:
|
|
11241
|
-
input (Union[Tensor, Number, bool]): The first input is a Number or
|
|
11242
|
-
a bool or a tensor whose data type is
|
|
11243
|
-
`number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
11244
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
|
|
11245
|
-
other (Union[Tensor, Number, bool]): The second input, when the first input is a Tensor,
|
|
11246
|
-
the second input should be a Number or bool value, or a Tensor whose data type is number or bool\_.
|
|
11247
|
-
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
|
|
11248
|
-
|
|
11249
|
-
Returns:
|
|
11250
|
-
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
|
|
11251
|
-
|
|
11252
|
-
Raises:
|
|
11253
|
-
TypeError: If neither `input` nor `other` is a Tensor.
|
|
11254
|
-
|
|
11255
|
-
Supported Platforms:
|
|
11256
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
11257
|
-
|
|
11258
|
-
Examples:
|
|
11259
|
-
>>> import mindspore
|
|
11260
|
-
>>> import numpy as np
|
|
11261
|
-
>>> from mindspore import Tensor, ops
|
|
11262
|
-
>>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
|
|
11263
|
-
>>> other = Tensor(np.array([1, 1, 4]), mindspore.int32)
|
|
11264
|
-
>>> output = ops.less_equal(x, other)
|
|
11265
|
-
>>> print(output)
|
|
11266
|
-
[ True False True]
|
|
11267
|
-
"""
|
|
11268
|
-
return tensor_le(input, other)
|
|
11269
|
-
|
|
11270
|
-
|
|
11271
9423
|
def cumprod(input, dim, dtype=None):
|
|
11272
9424
|
r"""
|
|
11273
9425
|
Computes the cumulative product of the `input` tensor along dimension `dim`.
|
|
@@ -11309,70 +9461,6 @@ def cumprod(input, dim, dtype=None):
|
|
|
11309
9461
|
return output
|
|
11310
9462
|
|
|
11311
9463
|
|
|
11312
|
-
def greater(input, other):
|
|
11313
|
-
r"""
|
|
11314
|
-
Computes the boolean value of :math:`input > other` element-wise.
|
|
11315
|
-
|
|
11316
|
-
Args:
|
|
11317
|
-
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
11318
|
-
a bool or a tensor whose data type is
|
|
11319
|
-
`number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
11320
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ .
|
|
11321
|
-
other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
|
|
11322
|
-
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
|
|
11323
|
-
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
|
|
11324
|
-
|
|
11325
|
-
Returns:
|
|
11326
|
-
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
|
|
11327
|
-
|
|
11328
|
-
Supported Platforms:
|
|
11329
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
11330
|
-
|
|
11331
|
-
Examples:
|
|
11332
|
-
>>> import mindspore
|
|
11333
|
-
>>> import numpy as np
|
|
11334
|
-
>>> from mindspore import Tensor, ops
|
|
11335
|
-
>>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
|
|
11336
|
-
>>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
|
|
11337
|
-
>>> output = ops.greater(x, y)
|
|
11338
|
-
>>> print(output)
|
|
11339
|
-
[False True False]
|
|
11340
|
-
"""
|
|
11341
|
-
return tensor_gt(input, other)
|
|
11342
|
-
|
|
11343
|
-
|
|
11344
|
-
def greater_equal(input, other):
|
|
11345
|
-
r"""
|
|
11346
|
-
Computes the boolean value of :math:`input \geq other` element-wise.
|
|
11347
|
-
|
|
11348
|
-
Args:
|
|
11349
|
-
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
11350
|
-
a bool or a tensor whose data type is
|
|
11351
|
-
`number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
11352
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ .
|
|
11353
|
-
other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
|
|
11354
|
-
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
|
|
11355
|
-
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
|
|
11356
|
-
|
|
11357
|
-
Returns:
|
|
11358
|
-
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
|
|
11359
|
-
|
|
11360
|
-
Supported Platforms:
|
|
11361
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
11362
|
-
|
|
11363
|
-
Examples:
|
|
11364
|
-
>>> import mindspore
|
|
11365
|
-
>>> import numpy as np
|
|
11366
|
-
>>> from mindspore import Tensor, ops
|
|
11367
|
-
>>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
|
|
11368
|
-
>>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
|
|
11369
|
-
>>> output = ops.greater_equal(x, y)
|
|
11370
|
-
>>> print(output)
|
|
11371
|
-
[True True False]
|
|
11372
|
-
"""
|
|
11373
|
-
return tensor_ge(input, other)
|
|
11374
|
-
|
|
11375
|
-
|
|
11376
9464
|
def igamma(input, other):
|
|
11377
9465
|
r"""
|
|
11378
9466
|
Calculates lower regularized incomplete Gamma function.
|
|
@@ -11420,8 +9508,7 @@ def igamma(input, other):
|
|
|
11420
9508
|
>>> print(output)
|
|
11421
9509
|
[0.593994 0.35276785 0.21486944 0.13337152]
|
|
11422
9510
|
"""
|
|
11423
|
-
|
|
11424
|
-
return igamma_op(input, other)
|
|
9511
|
+
return igamma_(input, other)
|
|
11425
9512
|
|
|
11426
9513
|
|
|
11427
9514
|
def igammac(input, other):
|
|
@@ -11471,8 +9558,7 @@ def igammac(input, other):
|
|
|
11471
9558
|
>>> print (output)
|
|
11472
9559
|
[0.40600586 0.6472318 0.7851304 0.8666283]
|
|
11473
9560
|
"""
|
|
11474
|
-
|
|
11475
|
-
return igammac_op(input, other)
|
|
9561
|
+
return igammac_(input, other)
|
|
11476
9562
|
|
|
11477
9563
|
|
|
11478
9564
|
def lgamma(input):
|
|
@@ -11699,7 +9785,7 @@ def logical_xor(input, other):
|
|
|
11699
9785
|
|
|
11700
9786
|
.. math::
|
|
11701
9787
|
|
|
11702
|
-
out_{i} =
|
|
9788
|
+
out_{i} = input_{i} \oplus other_{i}
|
|
11703
9789
|
|
|
11704
9790
|
Args:
|
|
11705
9791
|
input (Tensor): The first input is a tensor whose data type can be implicitly converted to bool.
|
|
@@ -11842,7 +9928,7 @@ def nansum(input, axis=None, keepdims=False, *, dtype=None):
|
|
|
11842
9928
|
if input.dtype == mstype.bool_:
|
|
11843
9929
|
input = input.astype(mstype.int64)
|
|
11844
9930
|
is_nan = isnan_(input)
|
|
11845
|
-
input = ops.masked_fill(input, is_nan, 0)
|
|
9931
|
+
input = ops.masked_fill(input, is_nan, ops.cast(0, input.dtype))
|
|
11846
9932
|
input = _get_cache_prim(P.ReduceSum)(keepdims)(input, axis)
|
|
11847
9933
|
if dtype is not None and input.dtype != dtype:
|
|
11848
9934
|
input = input.astype(dtype)
|
|
@@ -11936,7 +10022,7 @@ def diag_embed(input, offset=0, dim1=-2, dim2=-1):
|
|
|
11936
10022
|
diag_plane = (dsize, dsize)
|
|
11937
10023
|
output_shape_trans = batch_shape + diag_plane
|
|
11938
10024
|
output = zeros(output_shape_trans, input.dtype)
|
|
11939
|
-
k =
|
|
10025
|
+
k = cast_(offset, mstype.int32)
|
|
11940
10026
|
output = matrix_set_diag_op(output, input, k)
|
|
11941
10027
|
dim = 0
|
|
11942
10028
|
perm = ()
|
|
@@ -11955,25 +10041,28 @@ def sum(input, dim=None, keepdim=False, *, dtype=None):
|
|
|
11955
10041
|
"""
|
|
11956
10042
|
Calculate sum of Tensor elements over a given dim.
|
|
11957
10043
|
|
|
10044
|
+
Note:
|
|
10045
|
+
The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
10046
|
+
|
|
11958
10047
|
Args:
|
|
11959
10048
|
input (Tensor): The input tensor.
|
|
11960
|
-
dim (Union[None, int, tuple(int), list(int)]): Dimensions along which a sum is performed.
|
|
11961
|
-
If None, sum all the elements of the input tensor.
|
|
10049
|
+
dim (Union[None, int, tuple(int), list(int), Tensor]): Dimensions along which a sum is performed.
|
|
10050
|
+
If ``None`` , sum all the elements of the input tensor.
|
|
11962
10051
|
If the `dim` is a tuple or list of ints, a sum is performed on all the dimensions specified in the tuple.
|
|
11963
|
-
Must be in the range :math:`[-input.ndim, input.ndim)` . Default: ``None
|
|
10052
|
+
Must be in the range :math:`[-input.ndim, input.ndim)` . Default: ``None`` .
|
|
11964
10053
|
keepdim (bool): Whether the output tensor has dim retained or not.
|
|
11965
|
-
If True, keep these reduced dimensions and the length is 1.
|
|
11966
|
-
If False, don't keep these dimensions. Default: ``False
|
|
10054
|
+
If ``True`` , keep these reduced dimensions and the length is 1.
|
|
10055
|
+
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
11967
10056
|
|
|
11968
10057
|
Keyword Args:
|
|
11969
|
-
dtype (:class:`mindspore.dtype`, optional): The desired data type of returned Tensor. Default: ``None
|
|
10058
|
+
dtype (:class:`mindspore.dtype`, optional): The desired data type of returned Tensor. Default: ``None`` .
|
|
11970
10059
|
|
|
11971
10060
|
Returns:
|
|
11972
|
-
A Tensor, sum of elements over a given dim in `input`.
|
|
10061
|
+
A Tensor, sum of elements over a given `dim` in `input`.
|
|
11973
10062
|
|
|
11974
10063
|
Raises:
|
|
11975
10064
|
TypeError: If `input` is not a Tensor.
|
|
11976
|
-
TypeError: If `dim` is not an int, tulpe(int), list(int) or None.
|
|
10065
|
+
TypeError: If `dim` is not an int, tulpe(int), list(int), Tensor or None.
|
|
11977
10066
|
ValueError: If `dim` is not in the range :math:`[-input.ndim, input.ndim)` .
|
|
11978
10067
|
TypeError: If `keepdim` is not a bool.
|
|
11979
10068
|
|
|
@@ -12228,6 +10317,8 @@ def _canonicalize_fft_shape_and_dim(input, shape, dim):
|
|
|
12228
10317
|
def as_strided(x, shape=None, strides=None):
|
|
12229
10318
|
n = np.dtype(mstype.dtype_to_nptype(x.dtype)).itemsize
|
|
12230
10319
|
strides = tuple(np.array(strides) * n)
|
|
10320
|
+
if x.dtype == mstype.bfloat16:
|
|
10321
|
+
return Tensor(np.lib.stride_tricks.as_strided(x.float().asnumpy(), shape, strides, False, True), dtype=x.dtype)
|
|
12231
10322
|
return Tensor(np.lib.stride_tricks.as_strided(x.asnumpy(), shape, strides, False, True), dtype=x.dtype)
|
|
12232
10323
|
|
|
12233
10324
|
|
|
@@ -12249,13 +10340,13 @@ def _resize_input(input, input_dim, ret_dim, ret_shape, input_sizes):
|
|
|
12249
10340
|
if input_sizes[value] > ret_shape[i]:
|
|
12250
10341
|
start_index = [0] * input_dim
|
|
12251
10342
|
input_sizes[value] = ret_shape[i]
|
|
12252
|
-
input =
|
|
10343
|
+
input = slice_(input, start_index, input_sizes)
|
|
12253
10344
|
|
|
12254
10345
|
if must_copy:
|
|
12255
10346
|
paddings = np.reshape(paddings, (input_dim, 2)).tolist()
|
|
12256
10347
|
paddings.reverse()
|
|
12257
10348
|
paddings = (*paddings,)
|
|
12258
|
-
input = P.Pad(paddings)(input)
|
|
10349
|
+
input = _get_cache_prim(P.Pad)(paddings)(input)
|
|
12259
10350
|
|
|
12260
10351
|
return input
|
|
12261
10352
|
|
|
@@ -12761,7 +10852,7 @@ def count_nonzero(x, axis=(), keep_dims=False, dtype=mstype.int32):
|
|
|
12761
10852
|
|
|
12762
10853
|
Args:
|
|
12763
10854
|
x (Tensor): Input data is used to count non-zero numbers. With shape
|
|
12764
|
-
:math:`(
|
|
10855
|
+
:math:`(*)` where :math:`*` means, any number of additional dimensions.
|
|
12765
10856
|
axis (Union[int, tuple(int), list(int)], optional): The dimensions to reduce.
|
|
12766
10857
|
Default: ``()`` , reduce all dimensions.
|
|
12767
10858
|
keep_dims (bool, optional): Whether to maintain dimensions specified by `axis`.
|
|
@@ -12784,6 +10875,7 @@ def count_nonzero(x, axis=(), keep_dims=False, dtype=mstype.int32):
|
|
|
12784
10875
|
Examples:
|
|
12785
10876
|
>>> from mindspore import Tensor, ops
|
|
12786
10877
|
>>> import numpy as np
|
|
10878
|
+
>>> import mindspore
|
|
12787
10879
|
>>> # case 1: each value specified.
|
|
12788
10880
|
>>> x = Tensor(np.array([[0, 1, 0], [1, 1, 0]]).astype(np.float32))
|
|
12789
10881
|
>>> nonzero_num = ops.count_nonzero(x=x, axis=[0, 1], keep_dims=True, dtype=mindspore.int32)
|
|
@@ -12819,7 +10911,7 @@ def count_nonzero(x, axis=(), keep_dims=False, dtype=mstype.int32):
|
|
|
12819
10911
|
reduce_sum = _get_cache_prim(P.ReduceSum)(keep_dims)
|
|
12820
10912
|
|
|
12821
10913
|
tensor_0 = ops.zeros(x.shape, x.dtype)
|
|
12822
|
-
nonzero_bool =
|
|
10914
|
+
nonzero_bool = not_equal(x, tensor_0)
|
|
12823
10915
|
# ReduceSum only support float16 or float32 tensor.
|
|
12824
10916
|
nonzero_val = cast_(nonzero_bool, mstype.float32)
|
|
12825
10917
|
nonzero_num = cast_(reduce_sum(nonzero_val, axis), dtype)
|
|
@@ -13046,7 +11138,8 @@ def vecdot(x, y, *, axis=-1):
|
|
|
13046
11138
|
Calculates the dot product of two batches of vectors across the specified dimension.
|
|
13047
11139
|
|
|
13048
11140
|
The formula of calculation is as follows.
|
|
13049
|
-
:math:`\bar{x_{i}}` represents the conjugate for complex vectors,
|
|
11141
|
+
:math:`\bar{x_{i}}` represents the conjugate for complex vectors,
|
|
11142
|
+
and :math:`\bar{x_{i}}` is the raw value for real vectors.
|
|
13050
11143
|
|
|
13051
11144
|
.. math::
|
|
13052
11145
|
|
|
@@ -13356,7 +11449,8 @@ def _get_output_shape(batch_size, x1_ret, x2_ret):
|
|
|
13356
11449
|
|
|
13357
11450
|
def batch_dot(x1, x2, axes=None):
|
|
13358
11451
|
"""
|
|
13359
|
-
Computation of batch dot product between samples in two tensors containing batch dims.
|
|
11452
|
+
Computation of batch dot product between samples in two tensors containing batch dims, i.e. `x1` or `x2` 's
|
|
11453
|
+
first dimension is batch size.
|
|
13360
11454
|
|
|
13361
11455
|
.. math::
|
|
13362
11456
|
output = x1[batch, :] * x2[batch, :]
|
|
@@ -13392,6 +11486,7 @@ def batch_dot(x1, x2, axes=None):
|
|
|
13392
11486
|
``Ascend`` ``GPU`` ``CPU``
|
|
13393
11487
|
|
|
13394
11488
|
Examples:
|
|
11489
|
+
>>> import mindspore
|
|
13395
11490
|
>>> from mindspore import Tensor, ops
|
|
13396
11491
|
>>> import numpy as np
|
|
13397
11492
|
>>> x1 = Tensor(np.ones(shape=[2, 2, 3]), mindspore.float32)
|
|
@@ -13489,7 +11584,6 @@ __all__ = [
|
|
|
13489
11584
|
'arctan',
|
|
13490
11585
|
'arctan2',
|
|
13491
11586
|
'bincount',
|
|
13492
|
-
'neg_tensor',
|
|
13493
11587
|
'neg',
|
|
13494
11588
|
'negative',
|
|
13495
11589
|
'tensor_lt',
|
|
@@ -13747,6 +11841,6 @@ __all__ = [
|
|
|
13747
11841
|
'vecdot',
|
|
13748
11842
|
'dot',
|
|
13749
11843
|
'batch_dot',
|
|
13750
|
-
'eps'
|
|
11844
|
+
'eps',
|
|
13751
11845
|
]
|
|
13752
11846
|
__all__.sort()
|