mindspore 2.2.14__cp38-cp38-manylinux1_x86_64.whl → 2.3.0rc2__cp38-cp38-manylinux1_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +4 -4
- mindspore/_akg/akg/composite/build_module.py +155 -11
- mindspore/_akg/akg/config/repository.json +38 -0
- mindspore/_akg/akg/ms/info_version_adapt.py +29 -0
- mindspore/_akg/akg/tvm/contrib/nvcc.py +4 -1
- mindspore/_akg/akg/utils/ascend_profilier/path_manager.py +2 -1
- mindspore/_akg/akg/utils/composite_op_helper.py +4 -2
- mindspore/_akg/akg/utils/dump_ascend_meta.py +2 -2
- mindspore/_akg/akg/utils/gen_random.py +14 -8
- mindspore/_akg/akg/utils/op_dsl.py +11 -0
- mindspore/_akg/akg/utils/tbe_codegen_utils.py +18 -8
- mindspore/_c_dataengine.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +78 -0
- mindspore/_extends/builtin_operations.py +2 -1
- mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
- mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
- mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
- mindspore/_extends/parse/__init__.py +18 -14
- mindspore/_extends/parse/compile_config.py +229 -0
- mindspore/_extends/parse/parser.py +155 -59
- mindspore/_extends/parse/resources.py +40 -7
- mindspore/_extends/parse/standard_method.py +127 -206
- mindspore/_extends/remote/kernel_build_server.py +2 -0
- mindspore/_mindspore_offline_debug.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/{ops/_op_impl/tbe/atomic_addr_clean.py → _profiler.py} +13 -16
- mindspore/amp.py +24 -18
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/boost_cell_wrapper.py +1 -1
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +7 -3
- mindspore/common/_jit_fallback_utils.py +2 -3
- mindspore/common/_register_for_adapter.py +7 -0
- mindspore/common/_register_for_recompute.py +48 -0
- mindspore/common/_stub_tensor.py +7 -1
- mindspore/common/_utils.py +5 -17
- mindspore/common/api.py +145 -50
- mindspore/common/auto_dynamic_shape.py +27 -14
- mindspore/common/dtype.py +9 -6
- mindspore/common/dump.py +5 -4
- mindspore/common/hook_handle.py +51 -4
- mindspore/common/initializer.py +1 -1
- mindspore/common/jit_config.py +33 -13
- mindspore/common/lazy_inline.py +58 -17
- mindspore/common/mindir_util.py +12 -2
- mindspore/common/mutable.py +79 -14
- mindspore/common/parameter.py +24 -4
- mindspore/common/recompute.py +247 -0
- mindspore/common/seed.py +9 -9
- mindspore/common/sparse_tensor.py +251 -18
- mindspore/common/symbol.py +122 -0
- mindspore/common/tensor.py +391 -465
- mindspore/communication/__init__.py +3 -3
- mindspore/communication/_comm_helper.py +5 -0
- mindspore/communication/management.py +53 -38
- mindspore/config/op_info.config +22 -54
- mindspore/context.py +176 -55
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +6 -6
- mindspore/dataset/audio/transforms.py +711 -158
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/engine/cache_client.py +2 -2
- mindspore/dataset/engine/datasets.py +72 -38
- mindspore/dataset/engine/datasets_audio.py +14 -14
- mindspore/dataset/engine/datasets_standard_format.py +33 -3
- mindspore/dataset/engine/datasets_text.py +38 -38
- mindspore/dataset/engine/datasets_user_defined.py +7 -7
- mindspore/dataset/engine/datasets_vision.py +75 -71
- mindspore/dataset/engine/offload.py +5 -7
- mindspore/dataset/text/__init__.py +3 -3
- mindspore/dataset/text/transforms.py +408 -121
- mindspore/dataset/text/utils.py +9 -9
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/transforms.py +261 -76
- mindspore/dataset/utils/browse_dataset.py +9 -9
- mindspore/dataset/vision/__init__.py +3 -3
- mindspore/dataset/vision/c_transforms.py +5 -5
- mindspore/dataset/vision/transforms.py +2264 -514
- mindspore/dataset/vision/utils.py +40 -9
- mindspore/dataset/vision/validators.py +7 -1
- mindspore/experimental/optim/__init__.py +12 -2
- mindspore/experimental/optim/adadelta.py +161 -0
- mindspore/experimental/optim/adagrad.py +168 -0
- mindspore/experimental/optim/adam.py +35 -34
- mindspore/experimental/optim/adamax.py +170 -0
- mindspore/experimental/optim/adamw.py +40 -16
- mindspore/experimental/optim/asgd.py +153 -0
- mindspore/experimental/optim/lr_scheduler.py +66 -121
- mindspore/experimental/optim/nadam.py +157 -0
- mindspore/experimental/optim/optimizer.py +15 -8
- mindspore/experimental/optim/radam.py +194 -0
- mindspore/experimental/optim/rmsprop.py +154 -0
- mindspore/experimental/optim/rprop.py +164 -0
- mindspore/experimental/optim/sgd.py +28 -19
- mindspore/hal/__init__.py +34 -0
- mindspore/hal/_ascend.py +57 -0
- mindspore/hal/_base.py +57 -0
- mindspore/hal/_cpu.py +56 -0
- mindspore/hal/_gpu.py +57 -0
- mindspore/hal/device.py +356 -0
- mindspore/hal/event.py +179 -0
- mindspore/hal/stream.py +339 -0
- mindspore/include/api/data_type.h +2 -2
- mindspore/include/api/dual_abi_helper.h +16 -3
- mindspore/include/api/model.h +1 -3
- mindspore/include/api/status.h +14 -0
- mindspore/include/c_api/model_c.h +173 -0
- mindspore/include/c_api/ms/base/types.h +1 -0
- mindspore/include/c_api/types_c.h +19 -0
- mindspore/include/dataset/execute.h +1 -3
- mindspore/include/mindapi/base/format.h +125 -23
- mindspore/include/mindapi/base/types.h +12 -0
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +2044 -154
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +2044 -33
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/build_tbe_kernel.py +529 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/compiler.py +56 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/custom.py +1109 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/get_file_path.py +36 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/tbe_topi.py +556 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6318 -1760
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_add_custom.h +49 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +59 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +59 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +52 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +232 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +232 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.cpp +192 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.cpp +274 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +39 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/x86_64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/cpu/libakg.so +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
- mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu10.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.6/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/{libmindspore_ascend.so.1 → libmindspore_ascend.so.2} +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
- mindspore/log.py +2 -2
- mindspore/mindrecord/__init__.py +5 -1
- mindspore/mindrecord/config.py +809 -0
- mindspore/mindrecord/filereader.py +25 -0
- mindspore/mindrecord/filewriter.py +74 -56
- mindspore/mindrecord/mindpage.py +40 -6
- mindspore/mindrecord/shardutils.py +3 -2
- mindspore/mindrecord/shardwriter.py +7 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +8 -13
- mindspore/mindrecord/tools/cifar10_to_mr.py +9 -15
- mindspore/mindrecord/tools/csv_to_mr.py +4 -9
- mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
- mindspore/mindrecord/tools/mnist_to_mr.py +7 -12
- mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -6
- mindspore/mint/__init__.py +457 -0
- mindspore/mint/nn/__init__.py +430 -0
- mindspore/mint/nn/functional.py +424 -0
- mindspore/mint/optim/__init__.py +24 -0
- mindspore/mint/optim/adamw.py +186 -0
- mindspore/multiprocessing/__init__.py +72 -0
- mindspore/nn/__init__.py +3 -0
- mindspore/nn/cell.py +131 -174
- mindspore/nn/dynamic_lr.py +2 -2
- mindspore/nn/extend/__init__.py +29 -0
- mindspore/nn/extend/basic.py +140 -0
- mindspore/nn/extend/embedding.py +143 -0
- mindspore/{rewrite/ast_creator_register.py → nn/extend/layer/__init__.py} +9 -19
- mindspore/nn/extend/layer/normalization.py +107 -0
- mindspore/nn/extend/pooling.py +117 -0
- mindspore/nn/generator.py +297 -0
- mindspore/nn/layer/activation.py +79 -90
- mindspore/nn/layer/basic.py +113 -81
- mindspore/nn/layer/channel_shuffle.py +3 -16
- mindspore/nn/layer/container.py +3 -3
- mindspore/nn/layer/conv.py +71 -71
- mindspore/nn/layer/embedding.py +105 -44
- mindspore/nn/layer/image.py +4 -7
- mindspore/nn/layer/normalization.py +52 -66
- mindspore/nn/layer/padding.py +30 -39
- mindspore/nn/layer/pooling.py +13 -9
- mindspore/nn/layer/rnn_cells.py +5 -15
- mindspore/nn/layer/rnns.py +6 -5
- mindspore/nn/layer/thor_layer.py +1 -2
- mindspore/nn/layer/timedistributed.py +1 -1
- mindspore/nn/layer/transformer.py +52 -50
- mindspore/nn/learning_rate_schedule.py +6 -5
- mindspore/nn/loss/loss.py +43 -64
- mindspore/nn/optim/ada_grad.py +4 -2
- mindspore/nn/optim/adadelta.py +3 -1
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +102 -181
- mindspore/nn/optim/adamax.py +4 -2
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +4 -2
- mindspore/nn/optim/ftrl.py +31 -61
- mindspore/nn/optim/lamb.py +5 -3
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +6 -4
- mindspore/nn/optim/momentum.py +13 -25
- mindspore/nn/optim/optimizer.py +6 -3
- mindspore/nn/optim/proximal_ada_grad.py +4 -2
- mindspore/nn/optim/rmsprop.py +9 -3
- mindspore/nn/optim/rprop.py +4 -2
- mindspore/nn/optim/sgd.py +6 -5
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
- mindspore/nn/probability/distribution/beta.py +2 -2
- mindspore/nn/probability/distribution/categorical.py +4 -6
- mindspore/nn/probability/distribution/cauchy.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/poisson.py +2 -2
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +13 -1
- mindspore/nn/wrap/__init__.py +2 -1
- mindspore/nn/wrap/cell_wrapper.py +33 -12
- mindspore/nn/wrap/grad_reducer.py +148 -8
- mindspore/nn/wrap/loss_scale.py +7 -7
- mindspore/numpy/__init__.py +2 -0
- mindspore/numpy/array_creations.py +2 -0
- mindspore/numpy/array_ops.py +1 -5
- mindspore/numpy/fft.py +431 -0
- mindspore/numpy/math_ops.py +54 -60
- mindspore/numpy/utils.py +3 -0
- mindspore/ops/__init__.py +5 -4
- mindspore/ops/_grad_experimental/grad_array_ops.py +4 -129
- mindspore/ops/_grad_experimental/grad_comm_ops.py +14 -18
- mindspore/ops/_grad_experimental/grad_math_ops.py +68 -283
- mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
- mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
- mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
- mindspore/ops/_op_impl/__init__.py +0 -1
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
- mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +1 -1
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
- mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -3
- mindspore/ops/_op_impl/cpu/adam.py +2 -2
- mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
- mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
- mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
- mindspore/ops/_vmap/vmap_array_ops.py +137 -101
- mindspore/ops/_vmap/vmap_base.py +8 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +143 -58
- mindspore/ops/_vmap/vmap_image_ops.py +70 -13
- mindspore/ops/_vmap/vmap_math_ops.py +101 -57
- mindspore/ops/_vmap/vmap_nn_ops.py +230 -97
- mindspore/ops/_vmap/vmap_other_ops.py +1 -1
- mindspore/ops/auto_generate/__init__.py +31 -0
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +205 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +257 -0
- mindspore/ops/auto_generate/gen_arg_handler.py +171 -0
- mindspore/ops/auto_generate/gen_extend_func.py +404 -0
- mindspore/ops/auto_generate/gen_ops_def.py +5653 -0
- mindspore/ops/auto_generate/gen_ops_prim.py +11623 -0
- mindspore/ops/auto_generate/pyboost_inner_prim.py +359 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +118 -17
- mindspore/ops/composite/math_ops.py +9 -48
- mindspore/ops/composite/multitype_ops/_compile_utils.py +168 -602
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +24 -133
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
- mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
- mindspore/ops/deprecated.py +14 -3
- mindspore/ops/extend/__init__.py +54 -0
- mindspore/ops/extend/array_func.py +259 -0
- mindspore/ops/extend/math_func.py +76 -0
- mindspore/ops/extend/nn_func.py +384 -0
- mindspore/ops/function/__init__.py +37 -12
- mindspore/ops/function/array_func.py +702 -1867
- mindspore/ops/function/clip_func.py +19 -31
- mindspore/ops/function/debug_func.py +1 -4
- mindspore/ops/function/fft_func.py +31 -0
- mindspore/ops/function/grad/grad_func.py +24 -17
- mindspore/ops/function/image_func.py +27 -21
- mindspore/ops/function/linalg_func.py +35 -68
- mindspore/ops/function/math_func.py +639 -2531
- mindspore/ops/function/nn_func.py +1274 -832
- mindspore/ops/function/other_func.py +4 -5
- mindspore/ops/function/parameter_func.py +5 -93
- mindspore/ops/function/random_func.py +84 -71
- mindspore/ops/function/sparse_unary_func.py +9 -16
- mindspore/ops/function/spectral_func.py +1 -1
- mindspore/ops/function/vmap_func.py +14 -14
- mindspore/ops/functional.py +57 -63
- mindspore/ops/op_info_register.py +16 -43
- mindspore/ops/operations/__init__.py +19 -20
- mindspore/ops/operations/_grad_ops.py +20 -828
- mindspore/ops/operations/_inner_ops.py +180 -288
- mindspore/ops/operations/_scalar_ops.py +5 -480
- mindspore/ops/operations/_sequence_ops.py +6 -36
- mindspore/ops/operations/array_ops.py +83 -2697
- mindspore/ops/operations/comm_ops.py +38 -46
- mindspore/ops/operations/custom_ops.py +14 -96
- mindspore/ops/operations/debug_ops.py +100 -31
- mindspore/ops/operations/image_ops.py +1 -217
- mindspore/ops/operations/inner_ops.py +3 -38
- mindspore/ops/operations/linalg_ops.py +1 -49
- mindspore/{rewrite/ast_transformers → ops/operations/manually_defined}/__init__.py +11 -4
- mindspore/ops/operations/manually_defined/_inner.py +61 -0
- mindspore/ops/operations/manually_defined/ops_def.py +1716 -0
- mindspore/ops/operations/math_ops.py +581 -4629
- mindspore/ops/operations/nn_ops.py +260 -1941
- mindspore/ops/operations/other_ops.py +50 -42
- mindspore/ops/operations/random_ops.py +3 -52
- mindspore/ops/operations/sparse_ops.py +3 -3
- mindspore/ops/primitive.py +196 -96
- mindspore/ops_generate/__init__.py +27 -0
- mindspore/ops_generate/arg_dtype_cast.py +257 -0
- mindspore/ops_generate/arg_handler.py +171 -0
- mindspore/ops_generate/gen_aclnn_implement.py +266 -0
- mindspore/ops_generate/gen_ops.py +1062 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
- mindspore/ops_generate/gen_pyboost_func.py +939 -0
- mindspore/ops_generate/gen_utils.py +188 -0
- mindspore/ops_generate/op_proto.py +138 -0
- mindspore/ops_generate/pyboost_utils.py +349 -0
- mindspore/ops_generate/template.py +238 -0
- mindspore/parallel/__init__.py +6 -4
- mindspore/parallel/_auto_parallel_context.py +52 -2
- mindspore/parallel/_cell_wrapper.py +16 -9
- mindspore/parallel/_cost_model_context.py +1 -1
- mindspore/parallel/_dp_allreduce_fusion.py +159 -159
- mindspore/parallel/_parallel_serialization.py +29 -13
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +19 -7
- mindspore/parallel/_transformer/__init__.py +1 -1
- mindspore/parallel/_transformer/layers.py +1 -1
- mindspore/parallel/_transformer/loss.py +1 -1
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/op_parallel_config.py +1 -1
- mindspore/parallel/_transformer/transformer.py +1 -1
- mindspore/parallel/_utils.py +147 -6
- mindspore/parallel/algo_parameter_config.py +6 -6
- mindspore/parallel/checkpoint_transform.py +180 -24
- mindspore/parallel/cluster/__init__.py +15 -0
- mindspore/parallel/cluster/process_entity/__init__.py +18 -0
- mindspore/parallel/cluster/process_entity/_api.py +345 -0
- mindspore/parallel/cluster/process_entity/_utils.py +116 -0
- mindspore/parallel/cluster/run.py +139 -0
- mindspore/parallel/mpi/__init__.py +1 -1
- mindspore/parallel/mpi/_mpi_config.py +1 -1
- mindspore/parallel/parameter_broadcast.py +152 -0
- mindspore/parallel/shard.py +99 -2
- mindspore/profiler/common/util.py +20 -0
- mindspore/profiler/envprofiling.py +1 -1
- mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +66 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +77 -0
- mindspore/profiler/parser/ascend_analysis/function_event.py +146 -0
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +109 -0
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +80 -0
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +52 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +116 -0
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +59 -0
- mindspore/profiler/parser/ascend_cluster_generator.py +14 -9
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -1
- mindspore/profiler/parser/ascend_flops_generator.py +20 -4
- mindspore/profiler/parser/ascend_hccl_generator.py +25 -277
- mindspore/profiler/parser/ascend_msprof_exporter.py +112 -132
- mindspore/profiler/parser/ascend_msprof_generator.py +73 -283
- mindspore/profiler/parser/ascend_op_generator.py +92 -42
- mindspore/profiler/parser/ascend_timeline_generator.py +294 -133
- mindspore/profiler/parser/base_timeline_generator.py +6 -0
- mindspore/profiler/parser/framework_parser.py +3 -2
- mindspore/profiler/parser/integrator.py +3 -1
- mindspore/profiler/parser/msadvisor_analyzer.py +1 -1
- mindspore/profiler/parser/msadvisor_parser.py +1 -1
- mindspore/profiler/parser/profiler_info.py +16 -1
- mindspore/profiler/profiling.py +305 -167
- mindspore/rewrite/__init__.py +2 -13
- mindspore/rewrite/api/node.py +121 -35
- mindspore/rewrite/api/pattern_engine.py +2 -3
- mindspore/rewrite/api/scoped_value.py +16 -15
- mindspore/rewrite/api/symbol_tree.py +45 -29
- mindspore/rewrite/ast_helpers/__init__.py +3 -6
- mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
- mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
- mindspore/rewrite/common/__init__.py +1 -2
- mindspore/rewrite/common/config.py +24 -0
- mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
- mindspore/rewrite/{namer.py → common/namer.py} +63 -18
- mindspore/rewrite/common/namespace.py +118 -0
- mindspore/rewrite/node/__init__.py +5 -5
- mindspore/rewrite/node/call_function.py +23 -7
- mindspore/rewrite/node/cell_container.py +7 -3
- mindspore/rewrite/node/control_flow.py +53 -28
- mindspore/rewrite/node/node.py +212 -196
- mindspore/rewrite/node/node_manager.py +51 -22
- mindspore/rewrite/node/node_topological_manager.py +3 -23
- mindspore/rewrite/parsers/__init__.py +12 -0
- mindspore/rewrite/parsers/arguments_parser.py +8 -9
- mindspore/rewrite/parsers/assign_parser.py +635 -413
- mindspore/rewrite/parsers/attribute_parser.py +3 -4
- mindspore/rewrite/parsers/class_def_parser.py +107 -144
- mindspore/rewrite/parsers/constant_parser.py +5 -5
- mindspore/rewrite/parsers/container_parser.py +4 -6
- mindspore/rewrite/parsers/expr_parser.py +55 -0
- mindspore/rewrite/parsers/for_parser.py +31 -98
- mindspore/rewrite/parsers/function_def_parser.py +13 -5
- mindspore/rewrite/parsers/if_parser.py +28 -10
- mindspore/rewrite/parsers/module_parser.py +8 -182
- mindspore/rewrite/parsers/parser.py +1 -5
- mindspore/rewrite/parsers/parser_register.py +1 -1
- mindspore/rewrite/parsers/return_parser.py +5 -10
- mindspore/rewrite/parsers/while_parser.py +59 -0
- mindspore/rewrite/sparsify/utils.py +1 -1
- mindspore/rewrite/symbol_tree/__init__.py +20 -0
- mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
- mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
- mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
- mindspore/run_check/_check_version.py +6 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/safeguard/rewrite_obfuscation.py +9 -19
- mindspore/scipy/__init__.py +2 -1
- mindspore/scipy/fft.py +133 -0
- mindspore/scipy/linalg.py +140 -55
- mindspore/scipy/ops.py +15 -71
- mindspore/scipy/ops_grad.py +5 -34
- mindspore/scipy/optimize/line_search.py +2 -2
- mindspore/scipy/optimize/minimize.py +1 -1
- mindspore/train/__init__.py +3 -2
- mindspore/train/_utils.py +178 -4
- mindspore/train/amp.py +167 -245
- mindspore/train/anf_ir_pb2.py +8 -2
- mindspore/train/callback/_backup_and_restore.py +4 -4
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +39 -13
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_landscape.py +14 -8
- mindspore/train/callback/_loss_monitor.py +2 -2
- mindspore/train/callback/_on_request_exit.py +2 -2
- mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
- mindspore/train/callback/_summary_collector.py +7 -7
- mindspore/train/callback/_time_monitor.py +2 -2
- mindspore/train/data_sink.py +1 -1
- mindspore/train/dataset_helper.py +18 -4
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/accuracy.py +7 -7
- mindspore/train/metrics/confusion_matrix.py +8 -6
- mindspore/train/metrics/cosine_similarity.py +6 -4
- mindspore/train/metrics/error.py +2 -2
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/metrics/perplexity.py +2 -1
- mindspore/train/metrics/topk.py +2 -2
- mindspore/train/mind_ir_pb2.py +89 -15
- mindspore/train/model.py +24 -22
- mindspore/train/serialization.py +257 -133
- mindspore/train/summary/summary_record.py +51 -28
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/version.py +1 -1
- {mindspore-2.2.14.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +2 -2
- {mindspore-2.2.14.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +534 -1066
- {mindspore-2.2.14.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +1 -0
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
- mindspore/config/super_bar_config.json +0 -544
- mindspore/gen_ops.py +0 -273
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/nn/layer/flash_attention.py +0 -189
- mindspore/ops/_op_impl/cpu/concat.py +0 -39
- mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
- mindspore/ops/_op_impl/tbe/__init__.py +0 -47
- mindspore/ops/_op_impl/tbe/abs.py +0 -38
- mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/acos.py +0 -37
- mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/acosh.py +0 -37
- mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
- mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
- mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
- mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
- mindspore/ops/_op_impl/tbe/add.py +0 -42
- mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/add_n.py +0 -39
- mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
- mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
- mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
- mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
- mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
- mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
- mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
- mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/asin.py +0 -37
- mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/asinh.py +0 -37
- mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/assign.py +0 -79
- mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
- mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
- mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/atan.py +0 -37
- mindspore/ops/_op_impl/tbe/atan2.py +0 -38
- mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/atanh.py +0 -37
- mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
- mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
- mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
- mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
- mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
- mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
- mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
- mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
- mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
- mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
- mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cast.py +0 -55
- mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/cdist.py +0 -38
- mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/ceil.py +0 -37
- mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/celu.py +0 -39
- mindspore/ops/_op_impl/tbe/centralization.py +0 -39
- mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
- mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/concat.py +0 -40
- mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
- mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
- mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
- mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
- mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
- mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/cos.py +0 -37
- mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/cosh.py +0 -37
- mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
- mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cummin.py +0 -41
- mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
- mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
- mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
- mindspore/ops/_op_impl/tbe/diag.py +0 -38
- mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
- mindspore/ops/_op_impl/tbe/dilation.py +0 -40
- mindspore/ops/_op_impl/tbe/div.py +0 -41
- mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
- mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
- mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
- mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
- mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
- mindspore/ops/_op_impl/tbe/elu.py +0 -38
- mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/equal.py +0 -42
- mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/erf.py +0 -37
- mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfc.py +0 -37
- mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
- mindspore/ops/_op_impl/tbe/exp.py +0 -40
- mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
- mindspore/ops/_op_impl/tbe/expm1.py +0 -37
- mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
- mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/fill.py +0 -56
- mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/flatten.py +0 -48
- mindspore/ops/_op_impl/tbe/floor.py +0 -37
- mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
- mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
- mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
- mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
- mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
- mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
- mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
- mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/ger.py +0 -43
- mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/greater.py +0 -43
- mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
- mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
- mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
- mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
- mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
- mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
- mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
- mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/im2col.py +0 -42
- mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
- mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
- mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/inv.py +0 -38
- mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/invert.py +0 -37
- mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/iou.py +0 -38
- mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/is_close.py +0 -40
- mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
- mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
- mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
- mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
- mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
- mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
- mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/lerp.py +0 -38
- mindspore/ops/_op_impl/tbe/less.py +0 -41
- mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/log.py +0 -40
- mindspore/ops/_op_impl/tbe/log1p.py +0 -37
- mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
- mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
- mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
- mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
- mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/matmul.py +0 -53
- mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
- mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
- mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
- mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum.py +0 -39
- mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
- mindspore/ops/_op_impl/tbe/minimum.py +0 -40
- mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mish.py +0 -37
- mindspore/ops/_op_impl/tbe/mod.py +0 -41
- mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/mul.py +0 -37
- mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
- mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
- mindspore/ops/_op_impl/tbe/neg.py +0 -39
- mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
- mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
- mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
- mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
- mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
- mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/pack.py +0 -58
- mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
- mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
- mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/pdist.py +0 -36
- mindspore/ops/_op_impl/tbe/pooling.py +0 -46
- mindspore/ops/_op_impl/tbe/population_count.py +0 -38
- mindspore/ops/_op_impl/tbe/pow.py +0 -41
- mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/prelu.py +0 -37
- mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/range.py +0 -39
- mindspore/ops/_op_impl/tbe/real_div.py +0 -38
- mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
- mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
- mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
- mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
- mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6.py +0 -38
- mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/renorm.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
- mindspore/ops/_op_impl/tbe/rint.py +0 -37
- mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roll.py +0 -42
- mindspore/ops/_op_impl/tbe/round.py +0 -38
- mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
- mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
- mindspore/ops/_op_impl/tbe/select.py +0 -38
- mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/selu.py +0 -39
- mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sgd.py +0 -62
- mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sign.py +0 -38
- mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/sin.py +0 -37
- mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sinh.py +0 -37
- mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/slice.py +0 -58
- mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
- mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax.py +0 -37
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
- mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/softplus.py +0 -37
- mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softsign.py +0 -37
- mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sort.py +0 -38
- mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/split_d.py +0 -38
- mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/split_v.py +0 -39
- mindspore/ops/_op_impl/tbe/splitv.py +0 -39
- mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/square.py +0 -38
- mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
- mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
- mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
- mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
- mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
- mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
- mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
- mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
- mindspore/ops/_op_impl/tbe/sub.py +0 -39
- mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tan.py +0 -38
- mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh.py +0 -37
- mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
- mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
- mindspore/ops/_op_impl/tbe/tile.py +0 -37
- mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
- mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
- mindspore/ops/_op_impl/tbe/transpose.py +0 -60
- mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
- mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
- mindspore/ops/_op_impl/tbe/trunc.py +0 -39
- mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/unpack.py +0 -38
- mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
- mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
- mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
- mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
- mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
- mindspore/ops/_tracefunc.py +0 -241
- mindspore/ops/arg_dtype_cast.py +0 -54
- mindspore/rewrite/api/tree_node_helper.py +0 -60
- mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
- mindspore/rewrite/namespace.py +0 -53
- {mindspore-2.2.14.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
- {mindspore-2.2.14.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -13,6 +13,7 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
# ============================================================================
|
|
15
15
|
|
|
16
|
+
# pylint: disable=unused-import
|
|
16
17
|
"""Defines math operators with functional form."""
|
|
17
18
|
|
|
18
19
|
import collections
|
|
@@ -21,6 +22,7 @@ import math
|
|
|
21
22
|
import numbers
|
|
22
23
|
import numpy as np
|
|
23
24
|
|
|
25
|
+
import mindspore as ms
|
|
24
26
|
from mindspore import log as logger
|
|
25
27
|
import mindspore.ops as ops
|
|
26
28
|
from mindspore.common import dtype as mstype
|
|
@@ -28,13 +30,22 @@ from mindspore.ops import operations as P
|
|
|
28
30
|
from mindspore.ops import composite as C
|
|
29
31
|
from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
|
|
30
32
|
from mindspore.ops.primitive import constexpr, _primexpr
|
|
31
|
-
from mindspore.ops.operations._inner_ops import
|
|
33
|
+
from mindspore.ops.operations._inner_ops import TileSize
|
|
34
|
+
from mindspore.ops.auto_generate import Cummin, BatchMatMul, LinSpaceExt, Norm
|
|
35
|
+
from mindspore.ops import auto_generate
|
|
32
36
|
from mindspore.ops.operations.math_ops import STFT
|
|
33
|
-
from mindspore.ops.operations.math_ops import Logit
|
|
34
37
|
from mindspore.ops.operations.math_ops import LuUnpack
|
|
35
38
|
from mindspore.ops.operations.math_ops import Roll
|
|
36
39
|
from mindspore.ops.operations.math_ops import Ormqr
|
|
40
|
+
from mindspore.ops.operations.math_ops import DivMod
|
|
37
41
|
from mindspore.ops.operations.array_ops import MatrixSetDiagV3, Transpose
|
|
42
|
+
from mindspore.ops.auto_generate import (minimum, maximum, mul, sin, sinc, sinh, cummax, real, conj, add, sub, cos, cosh,
|
|
43
|
+
matrix_exp, sqrt, rsqrt, square, trace, nextafter, abs, acos, acosh, angle,
|
|
44
|
+
asin, asinh, atan, atan2, atanh, ceil, equal, erf, erfc, erfinv, exp, expm1,
|
|
45
|
+
floor, floor_divide, floor_mod, gcd, greater, greater_equal, less, less_equal,
|
|
46
|
+
log, log1p, neg, not_equal, pow, round, isfinite, argmax, mean_ext_op,
|
|
47
|
+
sum_ext_op, prod_ext_op, all)
|
|
48
|
+
from mindspore.ops.auto_generate import tanh
|
|
38
49
|
from mindspore.nn import layer
|
|
39
50
|
from mindspore._checkparam import check_is_number
|
|
40
51
|
from mindspore import _checkparam as validator
|
|
@@ -63,7 +74,6 @@ from mindspore.ops.operations.math_ops import (
|
|
|
63
74
|
Heaviside,
|
|
64
75
|
Lcm,
|
|
65
76
|
Gcd,
|
|
66
|
-
Sinc,
|
|
67
77
|
Quantile,
|
|
68
78
|
NanToNum,
|
|
69
79
|
SparseSegmentMean,
|
|
@@ -101,128 +111,123 @@ def get_x_shape(x_shape):
|
|
|
101
111
|
# Public Operation Functions.
|
|
102
112
|
#####################################
|
|
103
113
|
absolute_ = P.Abs()
|
|
104
|
-
|
|
114
|
+
cast_ = P.Cast()
|
|
105
115
|
tensor_add = P.Add()
|
|
106
|
-
|
|
107
|
-
tensor_sub = P.Sub()
|
|
108
|
-
tensor_mul = P.Mul()
|
|
116
|
+
tensor_ceil = P.Ceil()
|
|
109
117
|
tensor_div = P.RealDiv()
|
|
118
|
+
tensor_exp = P.Exp()
|
|
119
|
+
tensor_expm1 = P.Expm1()
|
|
110
120
|
tensor_floordiv = P.FloorDiv()
|
|
111
121
|
floordiv = tensor_floordiv
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
122
|
+
tensor_ge = P.GreaterEqual()
|
|
123
|
+
tensor_gt = greater
|
|
124
|
+
tensor_le = P.LessEqual()
|
|
125
|
+
tensor_lt = P.Less()
|
|
115
126
|
tensor_mod = P.FloorMod()
|
|
116
127
|
floormod = tensor_mod
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
tensor_gt = P.Greater()
|
|
122
|
-
tensor_ge = P.GreaterEqual()
|
|
128
|
+
tensor_mul = P.Mul()
|
|
129
|
+
tensor_pow = P.Pow()
|
|
130
|
+
pows = tensor_pow
|
|
131
|
+
tensor_sub = P.Sub()
|
|
123
132
|
transpose_ = P.Transpose()
|
|
124
|
-
|
|
125
|
-
cast_ = P.Cast()
|
|
133
|
+
xdivy_ = P.Xdivy()
|
|
126
134
|
|
|
127
135
|
#####################################
|
|
128
136
|
# Private Operation Functions.
|
|
129
137
|
#####################################
|
|
138
|
+
accumulate_ = P.AccumulateNV2()
|
|
139
|
+
acos_ = P.ACos()
|
|
140
|
+
acosh_ = P.Acosh()
|
|
130
141
|
addcdiv_ = P.Addcdiv()
|
|
131
142
|
addcuml_ = P.Addcmul()
|
|
132
143
|
addn_ = P.AddN()
|
|
133
144
|
angle_ = Angle()
|
|
134
|
-
log_ = P.Log()
|
|
135
|
-
floor_ = P.Floor()
|
|
136
|
-
logical_not_ = P.LogicalNot()
|
|
137
|
-
logical_or_ = P.LogicalOr()
|
|
138
|
-
logical_and_ = P.LogicalAnd()
|
|
139
|
-
sin_ = P.Sin()
|
|
140
|
-
sinc_ = Sinc()
|
|
141
|
-
cos_ = P.Cos()
|
|
142
|
-
tan_ = P.Tan()
|
|
143
145
|
asin_ = P.Asin()
|
|
144
|
-
polar_ = Polar()
|
|
145
|
-
acos_ = P.ACos()
|
|
146
|
-
atan_ = P.Atan()
|
|
147
|
-
atan2_ = P.Atan2()
|
|
148
|
-
sinh_ = P.Sinh()
|
|
149
|
-
cosh_ = P.Cosh()
|
|
150
|
-
tanh_ = P.Tanh()
|
|
151
146
|
asinh_ = P.Asinh()
|
|
152
|
-
|
|
147
|
+
atan2_ = P.Atan2()
|
|
148
|
+
atan_ = P.Atan()
|
|
153
149
|
atanh_ = P.Atanh()
|
|
154
|
-
|
|
155
|
-
bitwise_or_ = P.BitwiseOr()
|
|
156
|
-
bitwise_xor_ = P.BitwiseXor()
|
|
157
|
-
inv_ = P.math_ops.Inv()
|
|
158
|
-
invert_ = P.Invert()
|
|
159
|
-
erf_ = P.Erf()
|
|
160
|
-
erfc_ = P.Erfc()
|
|
161
|
-
bessel_j1_ = BesselJ1()
|
|
162
|
-
bessel_j0_ = BesselJ0()
|
|
150
|
+
batch_matmul_ = BatchMatMul()
|
|
163
151
|
bessel_i0_ = BesselI0()
|
|
164
152
|
bessel_i0e_ = P.BesselI0e()
|
|
165
|
-
bessel_k0_ = BesselK0()
|
|
166
|
-
bessel_k0e_ = BesselK0e()
|
|
167
|
-
bessel_y0_ = BesselY0()
|
|
168
|
-
bessel_y1_ = BesselY1()
|
|
169
153
|
bessel_i1_ = BesselI1()
|
|
170
154
|
bessel_i1e_ = P.BesselI1e()
|
|
155
|
+
bessel_j0_ = BesselJ0()
|
|
156
|
+
bessel_j1_ = BesselJ1()
|
|
157
|
+
bessel_k0_ = BesselK0()
|
|
158
|
+
bessel_k0e_ = BesselK0e()
|
|
171
159
|
bessel_k1_ = BesselK1()
|
|
172
160
|
bessel_k1e_ = BesselK1e()
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
linspace_ = P.LinSpace()
|
|
181
|
-
matrix_exp_ = MatrixExp()
|
|
182
|
-
exp2_ = P.Pow()
|
|
183
|
-
trunc_ = P.Trunc()
|
|
184
|
-
truncate_div_ = P.TruncateDiv()
|
|
185
|
-
truncate_mod_ = P.TruncateMod()
|
|
186
|
-
sparse_segment_mean_ = SparseSegmentMean()
|
|
187
|
-
lu_unpack_ = LuUnpack()
|
|
188
|
-
xlogy_ = P.Xlogy()
|
|
189
|
-
square_ = P.Square()
|
|
190
|
-
sqrt_ = P.Sqrt()
|
|
161
|
+
bessel_y0_ = BesselY0()
|
|
162
|
+
bessel_y1_ = BesselY1()
|
|
163
|
+
bitwise_and_ = P.BitwiseAnd()
|
|
164
|
+
bitwise_or_ = P.BitwiseOr()
|
|
165
|
+
bitwise_xor_ = P.BitwiseXor()
|
|
166
|
+
conj_ = P.Conj()
|
|
167
|
+
cumprod_ = P.CumProd()
|
|
191
168
|
cumsum_ = P.CumSum()
|
|
192
|
-
|
|
193
|
-
|
|
169
|
+
cumulative_logsumexp_ = CumulativeLogsumexp()
|
|
170
|
+
digamma_ = P.Digamma()
|
|
194
171
|
dtype_ = P.DType()
|
|
195
172
|
eps_ = P.Eps()
|
|
196
|
-
|
|
173
|
+
erf_ = P.Erf()
|
|
174
|
+
erfc_ = P.Erfc()
|
|
175
|
+
erfinv_ = P.Erfinv()
|
|
176
|
+
exp2_ = P.Pow()
|
|
197
177
|
expand_dims_ = P.ExpandDims()
|
|
198
|
-
sign_ = P.Sign()
|
|
199
|
-
nextafter_ = P.NextAfter()
|
|
200
|
-
matrix_inverse_ = P.MatrixInverse()
|
|
201
|
-
matrix_determinant_ = P.MatrixDeterminant()
|
|
202
|
-
log_matrix_determinant_ = P.LogMatrixDeterminant()
|
|
203
|
-
trace_ = P.Trace()
|
|
204
|
-
real_ = P.Real()
|
|
205
|
-
rsqrt_ = P.Rsqrt()
|
|
206
|
-
reciprocal_ = P.Reciprocal()
|
|
207
|
-
tile_ = P.Tile()
|
|
208
|
-
batch_matmul_ = P.BatchMatMul()
|
|
209
178
|
fill_v2_ = P.FillV2()
|
|
179
|
+
floor_ = P.Floor()
|
|
180
|
+
gcd_ = Gcd()
|
|
181
|
+
igamma_ = Igamma()
|
|
182
|
+
igammac_ = Igammac()
|
|
210
183
|
imag_ = P.Imag()
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
184
|
+
inv_ = P.math_ops.Inv()
|
|
185
|
+
invert_ = P.Invert()
|
|
186
|
+
isinf_ = P.IsInf()
|
|
187
|
+
isnan_ = P.IsNan()
|
|
188
|
+
lcm_ = Lcm()
|
|
189
|
+
lerp_ = P.Lerp()
|
|
216
190
|
lgamma_ = P.Lgamma()
|
|
217
|
-
|
|
191
|
+
linspace_ = P.LinSpace()
|
|
192
|
+
log1p_ = P.Log1p()
|
|
193
|
+
log_ = P.Log()
|
|
194
|
+
log_matrix_determinant_ = P.LogMatrixDeterminant()
|
|
195
|
+
logical_and_ = P.LogicalAnd()
|
|
196
|
+
logical_not_ = P.LogicalNot()
|
|
197
|
+
logical_or_ = P.LogicalOr()
|
|
198
|
+
logical_xor_ = P.LogicalXor()
|
|
199
|
+
lu_solve_ = LuSolve()
|
|
200
|
+
lu_unpack_ = LuUnpack()
|
|
201
|
+
matmul_ = P.MatMul()
|
|
202
|
+
matrix_determinant_ = P.MatrixDeterminant()
|
|
203
|
+
matrix_inverse_ = P.MatrixInverse()
|
|
204
|
+
mod_ = P.Mod()
|
|
205
|
+
nextafter_ = P.NextAfter()
|
|
206
|
+
ones_ = P.Ones()
|
|
207
|
+
polar_ = Polar()
|
|
218
208
|
poly_gamma_ = P.Polygamma()
|
|
219
|
-
|
|
209
|
+
rank_ = P.Rank()
|
|
210
|
+
reciprocal_ = P.Reciprocal()
|
|
211
|
+
reduce_sum_ = P.ReduceSum()
|
|
212
|
+
reshape_ = P.Reshape()
|
|
213
|
+
select_ = P.Select()
|
|
214
|
+
slice_ = P.Slice()
|
|
215
|
+
size_ = P.Size()
|
|
216
|
+
scalar_to_tensor_ = P.ScalarToTensor()
|
|
217
|
+
shape_ = P.Shape()
|
|
218
|
+
sign_ = P.Sign()
|
|
219
|
+
sparse_segment_mean_ = SparseSegmentMean()
|
|
220
|
+
tan_ = P.Tan()
|
|
221
|
+
tanh_ = P.Tanh()
|
|
222
|
+
tensor_round_ = P.Round()
|
|
223
|
+
tile_ = P.Tile()
|
|
224
|
+
tile_size_ = TileSize()
|
|
225
|
+
trunc_ = P.Trunc()
|
|
226
|
+
truncate_div_ = P.TruncateDiv()
|
|
227
|
+
truncate_mod_ = P.TruncateMod()
|
|
228
|
+
xlogy_ = P.Xlogy()
|
|
220
229
|
zeros_ = P.Zeros()
|
|
221
|
-
ones_ = P.Ones()
|
|
222
|
-
logical_xor_ = P.LogicalXor()
|
|
223
230
|
zeta_ = P.Zeta()
|
|
224
|
-
div_ = P.Div()
|
|
225
|
-
matmul_ = P.MatMul()
|
|
226
231
|
|
|
227
232
|
|
|
228
233
|
#####################################
|
|
@@ -262,39 +267,6 @@ def addn(x):
|
|
|
262
267
|
return addn_(x)
|
|
263
268
|
|
|
264
269
|
|
|
265
|
-
def abs(input):
|
|
266
|
-
r"""
|
|
267
|
-
Returns absolute value of a tensor element-wise.
|
|
268
|
-
|
|
269
|
-
.. math::
|
|
270
|
-
|
|
271
|
-
out_i = |input_i|
|
|
272
|
-
|
|
273
|
-
Args:
|
|
274
|
-
input (Tensor): The input tensor. The shape of tensor is
|
|
275
|
-
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
276
|
-
|
|
277
|
-
Returns:
|
|
278
|
-
Tensor, has the same shape as the `input`.
|
|
279
|
-
|
|
280
|
-
Raises:
|
|
281
|
-
TypeError: If `input` is not a Tensor.
|
|
282
|
-
|
|
283
|
-
Supported Platforms:
|
|
284
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
285
|
-
|
|
286
|
-
Examples:
|
|
287
|
-
>>> import mindspore
|
|
288
|
-
>>> import numpy as np
|
|
289
|
-
>>> from mindspore import Tensor, ops
|
|
290
|
-
>>> input = Tensor(np.array([-1.0, 1.0, 0.0]), mindspore.float32)
|
|
291
|
-
>>> output = ops.abs(input)
|
|
292
|
-
>>> print(output)
|
|
293
|
-
[1. 1. 0.]
|
|
294
|
-
"""
|
|
295
|
-
return absolute_(input)
|
|
296
|
-
|
|
297
|
-
|
|
298
270
|
def absolute(input):
|
|
299
271
|
"""
|
|
300
272
|
Alias for :func:`mindspore.ops.abs` .
|
|
@@ -305,70 +277,10 @@ def absolute(input):
|
|
|
305
277
|
return abs(input)
|
|
306
278
|
|
|
307
279
|
|
|
308
|
-
def add(input, other):
|
|
309
|
-
r"""
|
|
310
|
-
Adds other value to input Tensor.
|
|
311
|
-
|
|
312
|
-
.. math::
|
|
313
|
-
|
|
314
|
-
out_{i} = input_{i} + other_{i}
|
|
315
|
-
|
|
316
|
-
Note:
|
|
317
|
-
- One of the two inputs must be a Tensor, when the two inputs have different shapes,
|
|
318
|
-
they must be able to broadcast to a common shape.
|
|
319
|
-
- The two inputs can not be bool type at the same time,
|
|
320
|
-
[True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
|
|
321
|
-
- The two inputs comply with the implicit type conversion rules to make the data types
|
|
322
|
-
consistent.
|
|
323
|
-
- When input is Tensor, it's dimension should be greater than or equal to 1.
|
|
324
|
-
|
|
325
|
-
Args:
|
|
326
|
-
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
327
|
-
a bool or a tensor whose data type is
|
|
328
|
-
`number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
329
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
|
|
330
|
-
other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
|
|
331
|
-
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
|
|
332
|
-
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
|
|
333
|
-
|
|
334
|
-
Returns:
|
|
335
|
-
Tensor, the shape is the same as the one of the input `input` , `other` after broadcasting,
|
|
336
|
-
and the data type is the one with higher precision or higher digits among the two inputs.
|
|
337
|
-
|
|
338
|
-
Raises:
|
|
339
|
-
TypeError: If `input` and `other` is not one of the following: Tensor, number.Number, bool.
|
|
340
|
-
|
|
341
|
-
Supported Platforms:
|
|
342
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
343
|
-
|
|
344
|
-
Examples:
|
|
345
|
-
>>> import numpy as np
|
|
346
|
-
>>> import mindspore
|
|
347
|
-
>>> from mindspore import Tensor, ops
|
|
348
|
-
>>> # case 1: x and y are both Tensor.
|
|
349
|
-
>>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
|
|
350
|
-
>>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
|
|
351
|
-
>>> output = ops.add(x, y)
|
|
352
|
-
>>> print(output)
|
|
353
|
-
[5. 7. 9.]
|
|
354
|
-
>>> # case 2: x is a scalar and y is a Tensor
|
|
355
|
-
>>> x = Tensor(1, mindspore.int32)
|
|
356
|
-
>>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
|
|
357
|
-
>>> output = ops.add(x, y)
|
|
358
|
-
>>> print(output)
|
|
359
|
-
[5. 6. 7.]
|
|
360
|
-
>>> # the data type of x is int32, the data type of y is float32,
|
|
361
|
-
>>> # and the output is the data format of higher precision float32.
|
|
362
|
-
>>> print(output.dtype)
|
|
363
|
-
Float32
|
|
364
|
-
"""
|
|
365
|
-
return tensor_add(input, other)
|
|
366
|
-
|
|
367
|
-
|
|
368
280
|
def addcdiv(input, tensor1, tensor2, value=1):
|
|
369
281
|
r"""
|
|
370
282
|
Performs the element-wise division of tensor tensor1 by tensor tensor2,
|
|
371
|
-
multiply the result by the scalar value and add it to
|
|
283
|
+
multiply the result by the scalar value and add it to input data.
|
|
372
284
|
|
|
373
285
|
.. math::
|
|
374
286
|
y[i] = input[i] + value[i] * (tensor1[i] / tensor2[i])
|
|
@@ -409,7 +321,7 @@ def addcdiv(input, tensor1, tensor2, value=1):
|
|
|
409
321
|
def addcmul(input, tensor1, tensor2, value=1):
|
|
410
322
|
r"""
|
|
411
323
|
Performs the element-wise product of tensor tensor1 and tensor tensor2,
|
|
412
|
-
multiply the result by the scalar value and add it to
|
|
324
|
+
multiply the result by the scalar value and add it to input data.
|
|
413
325
|
|
|
414
326
|
.. math::
|
|
415
327
|
output[i] = input[i] + value[i] * (tensor1[i] * tensor2[i])
|
|
@@ -421,7 +333,7 @@ def addcmul(input, tensor1, tensor2, value=1):
|
|
|
421
333
|
value (Union[Tensor, Number]): The multiplier for tensor1*tensor2. Default: ``1`` .
|
|
422
334
|
|
|
423
335
|
Returns:
|
|
424
|
-
Tensor, has the same shape and dtype as
|
|
336
|
+
Tensor, has the same shape and dtype as tensor1*tensor2.
|
|
425
337
|
|
|
426
338
|
Raises:
|
|
427
339
|
TypeError: If dtype of `tensor1`, `tensor2`, `input` is not Tensor.
|
|
@@ -452,36 +364,6 @@ def addcmul(input, tensor1, tensor2, value=1):
|
|
|
452
364
|
return addcuml_(input, tensor1, tensor2, Tensor(value))
|
|
453
365
|
|
|
454
366
|
|
|
455
|
-
def angle(input):
|
|
456
|
-
"""
|
|
457
|
-
Returns the element-wise argument of a complex tensor.
|
|
458
|
-
The elements in input are considered to be complex numbers of the form a+bj, where a is the real part and b
|
|
459
|
-
is the imaginary part. The argument returned by this function is of the form :math:`atan2(b, a)`.
|
|
460
|
-
|
|
461
|
-
Args:
|
|
462
|
-
input (Tensor): The input tensor. types: complex64, complex128.
|
|
463
|
-
|
|
464
|
-
Returns:
|
|
465
|
-
Tensor, has the float32 or float64 type and the same shape as input.
|
|
466
|
-
|
|
467
|
-
Raises:
|
|
468
|
-
TypeError: If `input` is not a Tensor.
|
|
469
|
-
TypeError: If the dtype of `input` is not one of: complex64, complex128.
|
|
470
|
-
|
|
471
|
-
Supported Platforms:
|
|
472
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
473
|
-
|
|
474
|
-
Examples:
|
|
475
|
-
>>> import mindspore
|
|
476
|
-
>>> from mindspore import Tensor, ops
|
|
477
|
-
>>> input = Tensor([-1.5 + 7.8j, 3 + 5.75j], mindspore.complex64)
|
|
478
|
-
>>> output = ops.angle(input)
|
|
479
|
-
>>> print(output)
|
|
480
|
-
[1.7607845 1.0899091]
|
|
481
|
-
"""
|
|
482
|
-
return angle_(input)
|
|
483
|
-
|
|
484
|
-
|
|
485
367
|
def bincount(input, weights=None, minlength=0):
|
|
486
368
|
"""
|
|
487
369
|
Counts the number of occurrences of each value in `input`.
|
|
@@ -494,6 +376,9 @@ def bincount(input, weights=None, minlength=0):
|
|
|
494
376
|
Each value in the output Tensor marks the number of occurrences of that index in `input`.
|
|
495
377
|
If 'weights' is specified, the output results are weighted, i.e ``out[n] += weight[i]`` instead of ``out[n] += 1``.
|
|
496
378
|
|
|
379
|
+
Note:
|
|
380
|
+
If `input` contains negative value, the result will be undefined.
|
|
381
|
+
|
|
497
382
|
Args:
|
|
498
383
|
input (Tensor): 1-d input tensor.
|
|
499
384
|
weights (Tensor, optional): Weights, a tensor of the same shape as `input`. Default: ``None`` .
|
|
@@ -505,7 +390,6 @@ def bincount(input, weights=None, minlength=0):
|
|
|
505
390
|
Raises:
|
|
506
391
|
TypeError: If `input` or `weights` is not a tensor.
|
|
507
392
|
ValueError: If `input` is not one-dimensional, or if `input` and `weights` do not have the same shape.
|
|
508
|
-
ValueError: If `input` contains negative value.
|
|
509
393
|
ValueError: If `minlength` is a negative integer.
|
|
510
394
|
|
|
511
395
|
Supported Platforms:
|
|
@@ -529,23 +413,21 @@ def bincount(input, weights=None, minlength=0):
|
|
|
529
413
|
raise TypeError(f"For math function 'bincount', 'minlength' must be int but got {type(minlength)}.")
|
|
530
414
|
if rank_(input) != 1:
|
|
531
415
|
raise ValueError(f"For math function 'bincount', 'input' should be one-dimensional tensor.")
|
|
532
|
-
if not (input >= 0).all():
|
|
533
|
-
raise ValueError(f"For 'bincount', elements of 'input' should be non-negative.")
|
|
534
416
|
if input.shape[0] == 0:
|
|
535
|
-
return
|
|
417
|
+
return Tensor_([])
|
|
536
418
|
if minlength < 0:
|
|
537
419
|
raise ValueError(f"For 'bincount', 'minlength' should be >= 0 but got {minlength}.")
|
|
538
420
|
if max(input.astype(mstype.float32)) > minlength - 1:
|
|
539
421
|
length = (max(input.astype(mstype.float32)) + 1).astype(mstype.int32)
|
|
540
422
|
else:
|
|
541
|
-
length =
|
|
423
|
+
length = cast_(minlength, mstype.int32)
|
|
542
424
|
idx = F.arange(length).expand_dims(-1)
|
|
543
|
-
idx_mapping = equal(input, idx)
|
|
425
|
+
idx_mapping = equal(input, idx.astype(input.dtype))
|
|
544
426
|
if weights is not None:
|
|
545
427
|
if input.shape != weights.shape:
|
|
546
428
|
raise ValueError('for bincount `input` and `weights` must have the same length')
|
|
547
429
|
idx_mapping *= weights
|
|
548
|
-
return
|
|
430
|
+
return reduce_sum_(idx_mapping.astype(mstype.float32), 1).ravel()
|
|
549
431
|
|
|
550
432
|
|
|
551
433
|
def bucketize(input, boundaries, *, right=False):
|
|
@@ -674,38 +556,6 @@ def argmin(input, axis=None, keepdims=False):
|
|
|
674
556
|
return out
|
|
675
557
|
|
|
676
558
|
|
|
677
|
-
def neg(input):
|
|
678
|
-
"""
|
|
679
|
-
Returns a tensor with negative values of the input tensor element-wise.
|
|
680
|
-
|
|
681
|
-
.. math::
|
|
682
|
-
|
|
683
|
-
out_{i} = - input_{i}
|
|
684
|
-
|
|
685
|
-
Args:
|
|
686
|
-
input (Tensor): The input tensor with a dtype of Number.
|
|
687
|
-
|
|
688
|
-
Returns:
|
|
689
|
-
Tensor, has the same shape and dtype as input.
|
|
690
|
-
|
|
691
|
-
Raises:
|
|
692
|
-
TypeError: If `input` is not a Tensor.
|
|
693
|
-
|
|
694
|
-
Supported Platforms:
|
|
695
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
696
|
-
|
|
697
|
-
Examples:
|
|
698
|
-
>>> import mindspore
|
|
699
|
-
>>> import numpy as np
|
|
700
|
-
>>> from mindspore import Tensor, ops
|
|
701
|
-
>>> input = Tensor(np.array([1, 2, -1, 2, 0, -3.5]), mindspore.float32)
|
|
702
|
-
>>> output = ops.neg(input)
|
|
703
|
-
>>> print(output)
|
|
704
|
-
[-1. -2. 1. -2. 0. 3.5]
|
|
705
|
-
"""
|
|
706
|
-
return neg_tensor(input)
|
|
707
|
-
|
|
708
|
-
|
|
709
559
|
def negative(input):
|
|
710
560
|
r"""
|
|
711
561
|
Alias for :func:`mindspore.ops.neg` .
|
|
@@ -713,7 +563,7 @@ def negative(input):
|
|
|
713
563
|
Supported Platforms:
|
|
714
564
|
``Ascend`` ``GPU`` ``CPU``
|
|
715
565
|
"""
|
|
716
|
-
return
|
|
566
|
+
return neg(input)
|
|
717
567
|
|
|
718
568
|
|
|
719
569
|
def positive(input):
|
|
@@ -778,7 +628,7 @@ def permute(input, axis):
|
|
|
778
628
|
|
|
779
629
|
Args:
|
|
780
630
|
input (Tensor): Input Tensor.
|
|
781
|
-
axis (
|
|
631
|
+
axis (tuple(int)): Permute will permute the tensor to the input `axis` order.
|
|
782
632
|
|
|
783
633
|
Returns:
|
|
784
634
|
Tensor, has the same dimension as input tensor, with `axis` suitably permuted.
|
|
@@ -807,139 +657,26 @@ def permute(input, axis):
|
|
|
807
657
|
return transpose_(input, axis)
|
|
808
658
|
|
|
809
659
|
|
|
810
|
-
def
|
|
660
|
+
def subtract(input, other, *, alpha=1):
|
|
811
661
|
r"""
|
|
812
|
-
|
|
662
|
+
Performs the element-wise subtract of input tensors.
|
|
813
663
|
|
|
814
664
|
.. math::
|
|
815
|
-
|
|
816
|
-
out_i = \lceil x_i \rceil = \lfloor x_i \rfloor + 1
|
|
665
|
+
output[i] = input[i] - alpha * other[i]
|
|
817
666
|
|
|
818
667
|
Args:
|
|
819
|
-
input (Tensor):
|
|
668
|
+
input (Union[Tensor, number.Number]): Tensor or Number involved in subtraction.
|
|
669
|
+
other (Union[Tensor, number.Number]): Tensor or Number involved in subtraction.
|
|
670
|
+
|
|
671
|
+
Keyword Args:
|
|
672
|
+
alpha (Number): The multiplier for :math:`other`. Default: ``1`` .
|
|
820
673
|
|
|
821
674
|
Returns:
|
|
822
|
-
Tensor, has the same shape as
|
|
675
|
+
Tensor, has the same shape and dtype as input tensors.
|
|
823
676
|
|
|
824
677
|
Raises:
|
|
825
|
-
TypeError:
|
|
826
|
-
TypeError:
|
|
827
|
-
|
|
828
|
-
Supported Platforms:
|
|
829
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
830
|
-
|
|
831
|
-
Examples:
|
|
832
|
-
>>> import mindspore
|
|
833
|
-
>>> import numpy as np
|
|
834
|
-
>>> from mindspore import Tensor, ops
|
|
835
|
-
>>> x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
|
|
836
|
-
>>> output = ops.ceil(x)
|
|
837
|
-
>>> print(output)
|
|
838
|
-
[ 2. 3. -1.]
|
|
839
|
-
"""
|
|
840
|
-
return tensor_ceil(input)
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
def round(input):
|
|
844
|
-
r"""
|
|
845
|
-
Returns half to even of a tensor element-wise.
|
|
846
|
-
|
|
847
|
-
.. math::
|
|
848
|
-
|
|
849
|
-
out_i \approx input_i
|
|
850
|
-
|
|
851
|
-
Args:
|
|
852
|
-
input (Tensor): The input tensor.
|
|
853
|
-
|
|
854
|
-
Returns:
|
|
855
|
-
Tensor, has the same shape and type as the `input`.
|
|
856
|
-
|
|
857
|
-
Raises:
|
|
858
|
-
TypeError: If `input` is not a Tensor.
|
|
859
|
-
|
|
860
|
-
Supported Platforms:
|
|
861
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
862
|
-
|
|
863
|
-
Examples:
|
|
864
|
-
>>> import mindspore
|
|
865
|
-
>>> import numpy as np
|
|
866
|
-
>>> from mindspore import Tensor, ops
|
|
867
|
-
>>> input = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32)
|
|
868
|
-
>>> output = ops.round(input)
|
|
869
|
-
>>> print(output)
|
|
870
|
-
[ 1. 2. 2. 2. -4.]
|
|
871
|
-
"""
|
|
872
|
-
return tensor_round_(input)
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
def sub(input, other):
|
|
876
|
-
r"""
|
|
877
|
-
Subtracts the second input tensor from the first input tensor element-wise.
|
|
878
|
-
|
|
879
|
-
.. math::
|
|
880
|
-
|
|
881
|
-
out_{i} = input_{i} - other_{i}
|
|
882
|
-
|
|
883
|
-
Note:
|
|
884
|
-
- One of the two inputs must be a Tensor, when the two inputs have different shapes,
|
|
885
|
-
they must be able to broadcast to a common shape.
|
|
886
|
-
- The two inputs can not be bool type at the same time,
|
|
887
|
-
[True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
|
|
888
|
-
- The two inputs comply with the implicit type conversion rules to make the data types
|
|
889
|
-
consistent.
|
|
890
|
-
|
|
891
|
-
Args:
|
|
892
|
-
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
893
|
-
a bool or a tensor whose data type is
|
|
894
|
-
`number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
895
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
|
|
896
|
-
other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
|
|
897
|
-
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
|
|
898
|
-
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
|
|
899
|
-
|
|
900
|
-
Returns:
|
|
901
|
-
Tensor, the shape is the same as the one after broadcasting,
|
|
902
|
-
and the data type is the one with higher precision or higher digits among the two inputs.
|
|
903
|
-
|
|
904
|
-
Raises:
|
|
905
|
-
TypeError: If `input` and `other` are not number.Number or bool or Tensor.
|
|
906
|
-
|
|
907
|
-
Supported Platforms:
|
|
908
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
909
|
-
|
|
910
|
-
Examples:
|
|
911
|
-
>>> import mindspore
|
|
912
|
-
>>> import numpy as np
|
|
913
|
-
>>> from mindspore import Tensor, ops
|
|
914
|
-
>>> input = Tensor(np.array([1, 2, 3]), mindspore.int32)
|
|
915
|
-
>>> other = Tensor(np.array([4, 5, 6]), mindspore.int32)
|
|
916
|
-
>>> output = ops.sub(input, other)
|
|
917
|
-
>>> print(output)
|
|
918
|
-
[-3 -3 -3]
|
|
919
|
-
"""
|
|
920
|
-
return tensor_sub(input, other)
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
def subtract(input, other, *, alpha=1):
|
|
924
|
-
r"""
|
|
925
|
-
Performs the element-wise subtract of input tensors.
|
|
926
|
-
|
|
927
|
-
.. math::
|
|
928
|
-
output[i] = input[i] - alpha * other[i]
|
|
929
|
-
|
|
930
|
-
Args:
|
|
931
|
-
input (Union[Tensor, number.Number]): Tensor or Number involved in subtraction.
|
|
932
|
-
other (Union[Tensor, number.Number]): Tensor or Number involved in subtraction.
|
|
933
|
-
|
|
934
|
-
Keyword Args:
|
|
935
|
-
alpha (Number): The multiplier for :math:`other`. Default: ``1`` .
|
|
936
|
-
|
|
937
|
-
Returns:
|
|
938
|
-
Tensor, has the same shape and dtype as input tensors.
|
|
939
|
-
|
|
940
|
-
Raises:
|
|
941
|
-
TypeError: `input` or `other` is neither Tensor nor number.Number.
|
|
942
|
-
TypeError: Both `input` and `other` are not Tensor.
|
|
678
|
+
TypeError: `input` or `other` is neither Tensor nor number.Number.
|
|
679
|
+
TypeError: Both `input` and `other` are not Tensor.
|
|
943
680
|
|
|
944
681
|
Supported Platforms:
|
|
945
682
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -957,65 +694,6 @@ def subtract(input, other, *, alpha=1):
|
|
|
957
694
|
return tensor_sub(input, alpha * other)
|
|
958
695
|
|
|
959
696
|
|
|
960
|
-
def true_divide(dividend, divisor):
|
|
961
|
-
r"""
|
|
962
|
-
Alias for :func:`mindspore.ops.div` with :math:`rounding\_mode=None`.
|
|
963
|
-
|
|
964
|
-
Supported Platforms:
|
|
965
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
966
|
-
"""
|
|
967
|
-
return div(dividend, divisor, rounding_mode=None)
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
def mul(input, other):
|
|
971
|
-
r"""
|
|
972
|
-
Multiplies two tensors element-wise.
|
|
973
|
-
|
|
974
|
-
.. math::
|
|
975
|
-
|
|
976
|
-
out_{i} = input_{i} * other_{i}
|
|
977
|
-
|
|
978
|
-
Note:
|
|
979
|
-
- One of the two inputs must be a Tensor, when the two inputs have different shapes,
|
|
980
|
-
they must be able to broadcast to a common shape.
|
|
981
|
-
- The two inputs can not be bool type at the same time,
|
|
982
|
-
[True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
|
|
983
|
-
- The two inputs comply with the implicit type conversion rules to make the data types
|
|
984
|
-
consistent.
|
|
985
|
-
|
|
986
|
-
Args:
|
|
987
|
-
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
988
|
-
a bool or a tensor whose data type is
|
|
989
|
-
`number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
990
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
|
|
991
|
-
other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
|
|
992
|
-
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
|
|
993
|
-
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
|
|
994
|
-
|
|
995
|
-
Returns:
|
|
996
|
-
Tensor, the shape is the same as the one after broadcasting,
|
|
997
|
-
and the data type is the one with higher precision or higher digits among the two inputs.
|
|
998
|
-
|
|
999
|
-
Raises:
|
|
1000
|
-
TypeError: If `input` and `other` is not one of the following: Tensor, number.Number, bool.
|
|
1001
|
-
ValueError: If `input` and `other` are not the same shape.
|
|
1002
|
-
|
|
1003
|
-
Supported Platforms:
|
|
1004
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1005
|
-
|
|
1006
|
-
Examples:
|
|
1007
|
-
>>> import mindspore
|
|
1008
|
-
>>> import numpy as np
|
|
1009
|
-
>>> from mindspore import Tensor, ops
|
|
1010
|
-
>>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
|
|
1011
|
-
>>> y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
|
|
1012
|
-
>>> output = ops.mul(x, y)
|
|
1013
|
-
>>> print(output)
|
|
1014
|
-
[ 4. 10. 18.]
|
|
1015
|
-
"""
|
|
1016
|
-
return tensor_mul(input, other)
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
697
|
def multiply(input, other):
|
|
1020
698
|
r"""
|
|
1021
699
|
Alias for :func:`mindspore.ops.asinh`.
|
|
@@ -1030,18 +708,17 @@ def div(input, other, *, rounding_mode=None):
|
|
|
1030
708
|
r"""
|
|
1031
709
|
Divides the first input tensor by the second input tensor in floating-point type element-wise.
|
|
1032
710
|
|
|
711
|
+
.. math::
|
|
712
|
+
|
|
713
|
+
out_{i} = input_{i} / other_{i}
|
|
714
|
+
|
|
1033
715
|
Note:
|
|
1034
|
-
-
|
|
1035
|
-
they must be able to broadcast to a common shape.
|
|
716
|
+
- When the two inputs have different shapes, they must be able to broadcast to a common shape.
|
|
1036
717
|
- The two inputs can not be bool type at the same time,
|
|
1037
718
|
[True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
|
|
1038
719
|
- The two inputs comply with the implicit type conversion rules to make the data types
|
|
1039
720
|
consistent.
|
|
1040
721
|
|
|
1041
|
-
.. math::
|
|
1042
|
-
|
|
1043
|
-
out_{i} = input_{i} / other_{i}
|
|
1044
|
-
|
|
1045
722
|
Args:
|
|
1046
723
|
input (Union[Tensor, Number, bool]): The first input is a number or
|
|
1047
724
|
a bool or a tensor whose data type is number or bool.
|
|
@@ -1082,15 +759,23 @@ def div(input, other, *, rounding_mode=None):
|
|
|
1082
759
|
"""
|
|
1083
760
|
if rounding_mode is not None and rounding_mode not in ['floor', 'trunc']:
|
|
1084
761
|
raise ValueError("For ops.div, rounding_mode value should be None, 'floor' or 'trunc'.")
|
|
1085
|
-
|
|
1086
|
-
|
|
1087
|
-
|
|
1088
|
-
|
|
1089
|
-
if rounding_mode == 'trunc':
|
|
1090
|
-
output = trunc_(output)
|
|
762
|
+
if rounding_mode:
|
|
763
|
+
output = DivMod()(input, other, rounding_mode)
|
|
764
|
+
else:
|
|
765
|
+
output = P.Div()(input, other)
|
|
1091
766
|
return output
|
|
1092
767
|
|
|
1093
768
|
|
|
769
|
+
def true_divide(dividend, divisor):
|
|
770
|
+
r"""
|
|
771
|
+
Alias for :func:`mindspore.ops.div` with :math:`rounding\_mode=None`.
|
|
772
|
+
|
|
773
|
+
Supported Platforms:
|
|
774
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
775
|
+
"""
|
|
776
|
+
return div(dividend, divisor)
|
|
777
|
+
|
|
778
|
+
|
|
1094
779
|
def divide(input, other, *, rounding_mode=None):
|
|
1095
780
|
"""
|
|
1096
781
|
Alias for :func:`mindspore.ops.div` .
|
|
@@ -1162,60 +847,6 @@ def floor_div(x, y):
|
|
|
1162
847
|
return tensor_floordiv(x, y)
|
|
1163
848
|
|
|
1164
849
|
|
|
1165
|
-
def floor_divide(input, other):
|
|
1166
|
-
"""
|
|
1167
|
-
Divides the first input tensor by the second input tensor element-wise and round down to the closest integer.
|
|
1168
|
-
|
|
1169
|
-
Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
|
|
1170
|
-
The inputs must be two tensors or one tensor and one scalar.
|
|
1171
|
-
When the inputs are two tensors,
|
|
1172
|
-
dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
|
|
1173
|
-
When the inputs are one tensor and one scalar,
|
|
1174
|
-
the scalar could only be a constant.
|
|
1175
|
-
|
|
1176
|
-
.. math::
|
|
1177
|
-
|
|
1178
|
-
out_{i} = \\text{floor}( \\frac{x_i}{y_i})
|
|
1179
|
-
|
|
1180
|
-
where the :math:`floor` indicates the Floor operator, for more details,
|
|
1181
|
-
please refer to the :class:`mindspore.ops.Floor` operator.
|
|
1182
|
-
|
|
1183
|
-
.. warning::
|
|
1184
|
-
This is an experimental API that is subject to change or deletion.
|
|
1185
|
-
|
|
1186
|
-
Args:
|
|
1187
|
-
input (Union[Tensor, Number, bool]): The first input is a number or
|
|
1188
|
-
a bool or a tensor whose data type is number or bool.
|
|
1189
|
-
other (Union[Tensor, Number, bool]): The second input is a number or
|
|
1190
|
-
a bool when the first input is a tensor, or it can be a tensor whose data type is number or bool.
|
|
1191
|
-
Returns:
|
|
1192
|
-
Tensor, the shape is the same as the one after broadcasting,
|
|
1193
|
-
and the data type is the one with higher precision or higher digits among the two inputs.
|
|
1194
|
-
|
|
1195
|
-
Raises:
|
|
1196
|
-
TypeError: If neither `input` nor `other` is a Tensor.
|
|
1197
|
-
|
|
1198
|
-
Supported Platforms:
|
|
1199
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1200
|
-
|
|
1201
|
-
Examples:
|
|
1202
|
-
>>> import mindspore
|
|
1203
|
-
>>> from mindspore import Tensor, ops
|
|
1204
|
-
>>> import numpy as np
|
|
1205
|
-
>>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
|
|
1206
|
-
>>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
|
|
1207
|
-
>>> output = ops.floor_divide(x, y)
|
|
1208
|
-
>>> print(output)
|
|
1209
|
-
[ 0 1 -1]
|
|
1210
|
-
>>> x = Tensor(2.0, mindspore.float32)
|
|
1211
|
-
>>> y = Tensor(2.0, mindspore.float32)
|
|
1212
|
-
>>> output = ops.floor_divide(x, y)
|
|
1213
|
-
>>> print(output)
|
|
1214
|
-
1.0
|
|
1215
|
-
"""
|
|
1216
|
-
return tensor_floordiv(input, other)
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
850
|
def fmod(input, other):
|
|
1220
851
|
"""
|
|
1221
852
|
Computes the floating-point remainder of the division operation input/other.
|
|
@@ -1257,214 +888,6 @@ def fmod(input, other):
|
|
|
1257
888
|
return input - div(input, other, rounding_mode="trunc") * other
|
|
1258
889
|
|
|
1259
890
|
|
|
1260
|
-
def pow(input, exponent):
|
|
1261
|
-
r"""
|
|
1262
|
-
Calculates the `exponent` power of each element in `input`.
|
|
1263
|
-
|
|
1264
|
-
.. math::
|
|
1265
|
-
|
|
1266
|
-
out_{i} = input_{i} ^{ exponent_{i}}
|
|
1267
|
-
|
|
1268
|
-
.. note::
|
|
1269
|
-
- Inputs of `input` and `exponent` comply with the implicit type conversion rules to make the
|
|
1270
|
-
data types consistent.
|
|
1271
|
-
- The inputs must be two tensors or one tensor and one scalar.
|
|
1272
|
-
- When the inputs are two tensors,
|
|
1273
|
-
dtypes of them cannot be bool at the same time, and the shapes of them can be broadcast.
|
|
1274
|
-
|
|
1275
|
-
Args:
|
|
1276
|
-
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
1277
|
-
a bool or a tensor whose data type is
|
|
1278
|
-
`number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
1279
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
|
|
1280
|
-
exponent (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
|
|
1281
|
-
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
|
|
1282
|
-
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
|
|
1283
|
-
|
|
1284
|
-
Returns:
|
|
1285
|
-
Tensor, the shape is the same as the one after broadcasting,
|
|
1286
|
-
and the data type is the one with higher precision or higher digits among the two inputs.
|
|
1287
|
-
|
|
1288
|
-
Raises:
|
|
1289
|
-
TypeError: If `input` and `exponent` is not one of the following: Tensor, number.Number or bool.
|
|
1290
|
-
ValueError: If the shape of `input` and `exponent` are different.
|
|
1291
|
-
|
|
1292
|
-
Supported Platforms:
|
|
1293
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1294
|
-
|
|
1295
|
-
Examples:
|
|
1296
|
-
>>> import mindspore
|
|
1297
|
-
>>> import numpy as np
|
|
1298
|
-
>>> from mindspore import Tensor, ops
|
|
1299
|
-
>>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
|
|
1300
|
-
>>> y = 3.0
|
|
1301
|
-
>>> output = ops.pow(x, y)
|
|
1302
|
-
>>> print(output)
|
|
1303
|
-
[ 1. 8. 64.]
|
|
1304
|
-
>>>
|
|
1305
|
-
>>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
|
|
1306
|
-
>>> y = Tensor(np.array([2.0, 4.0, 3.0]), mindspore.float32)
|
|
1307
|
-
>>> output = ops.pow(x, y)
|
|
1308
|
-
>>> print(output)
|
|
1309
|
-
[ 1. 16. 64.]
|
|
1310
|
-
"""
|
|
1311
|
-
return tensor_pow(input, exponent)
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
def floor_mod(x, y):
|
|
1315
|
-
r"""
|
|
1316
|
-
Computes the remainder of division element-wise. It's a flooring divide.
|
|
1317
|
-
E.g. :math:`floor(x / y) * y + mod(x, y) = x`.
|
|
1318
|
-
|
|
1319
|
-
Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
|
|
1320
|
-
The inputs must be two tensors or one tensor and one scalar.
|
|
1321
|
-
When the inputs are two tensors,
|
|
1322
|
-
dtypes of them cannot be both bool, and the shapes of them could be broadcast.
|
|
1323
|
-
When the inputs are one tensor and one scalar,
|
|
1324
|
-
the scalar could only be a constant.
|
|
1325
|
-
|
|
1326
|
-
.. math::
|
|
1327
|
-
|
|
1328
|
-
out_{i} =\text{floor}(x_{i} // y_{i})
|
|
1329
|
-
|
|
1330
|
-
where the :math:`floor` indicates the Floor operator, for more details,
|
|
1331
|
-
please refer to the :class:`mindspore.ops.Floor` operator.
|
|
1332
|
-
|
|
1333
|
-
.. warning::
|
|
1334
|
-
- Data of input `y` should not be 0, or the maximum value of its dtype will be returned.
|
|
1335
|
-
- When the elements of input exceeds 2048 , the accuracy of operator cannot guarantee the requirement of
|
|
1336
|
-
double thousandths in the mini form.
|
|
1337
|
-
- Due to different architectures, the calculation results of this operator on NPU and CPU may be inconsistent.
|
|
1338
|
-
- If shape is expressed as :math:`(D1, D2 ..., Dn)`, then D1\*D2... \*DN<=1000000,n<=8.
|
|
1339
|
-
|
|
1340
|
-
Args:
|
|
1341
|
-
x (Union[Tensor, Number, bool]): The first input is a number or
|
|
1342
|
-
a bool or a tensor whose data type is number or bool.
|
|
1343
|
-
y (Union[Tensor, Number, bool]): The second input is a number or
|
|
1344
|
-
a bool when the first input is a tensor, or it can be a tensor whose data type is number or bool.
|
|
1345
|
-
|
|
1346
|
-
Returns:
|
|
1347
|
-
Tensor, the shape is the same as the one after broadcasting,
|
|
1348
|
-
and the data type is the one with higher precision of the two inputs.
|
|
1349
|
-
|
|
1350
|
-
Raises:
|
|
1351
|
-
TypeError: If neither `x` nor `y` is a Tensor.
|
|
1352
|
-
|
|
1353
|
-
Supported Platforms:
|
|
1354
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1355
|
-
|
|
1356
|
-
Examples:
|
|
1357
|
-
>>> import mindspore
|
|
1358
|
-
>>> import numpy as np
|
|
1359
|
-
>>> from mindspore import Tensor, ops
|
|
1360
|
-
>>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
|
|
1361
|
-
>>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
|
|
1362
|
-
>>> output = ops.floor_mod(x, y)
|
|
1363
|
-
>>> print(output)
|
|
1364
|
-
[2 1 2]
|
|
1365
|
-
"""
|
|
1366
|
-
return tensor_mod(x, y)
|
|
1367
|
-
|
|
1368
|
-
|
|
1369
|
-
def exp(input):
|
|
1370
|
-
r"""
|
|
1371
|
-
Returns exponential of a tensor element-wise.
|
|
1372
|
-
|
|
1373
|
-
.. math::
|
|
1374
|
-
|
|
1375
|
-
out_i = e^{x_i}
|
|
1376
|
-
|
|
1377
|
-
Args:
|
|
1378
|
-
input (Tensor): The input tensor.
|
|
1379
|
-
|
|
1380
|
-
Returns:
|
|
1381
|
-
Tensor, has the same shape and dtype as the `input`.
|
|
1382
|
-
|
|
1383
|
-
Raises:
|
|
1384
|
-
TypeError: If `input` is not a Tensor.
|
|
1385
|
-
|
|
1386
|
-
Supported Platforms:
|
|
1387
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1388
|
-
|
|
1389
|
-
Examples:
|
|
1390
|
-
>>> import mindspore
|
|
1391
|
-
>>> import numpy as np
|
|
1392
|
-
>>> from mindspore import Tensor, ops
|
|
1393
|
-
>>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
|
|
1394
|
-
>>> output = ops.exp(x)
|
|
1395
|
-
>>> print(output)
|
|
1396
|
-
[ 2.718282 7.389056 54.598152]
|
|
1397
|
-
"""
|
|
1398
|
-
return tensor_exp(input)
|
|
1399
|
-
|
|
1400
|
-
|
|
1401
|
-
def expm1(input):
|
|
1402
|
-
r"""
|
|
1403
|
-
Returns exponential then minus 1 of a tensor element-wise.
|
|
1404
|
-
|
|
1405
|
-
.. math::
|
|
1406
|
-
|
|
1407
|
-
out_i = e^{x_i} - 1
|
|
1408
|
-
|
|
1409
|
-
Args:
|
|
1410
|
-
input (Tensor): The input Tensor.
|
|
1411
|
-
|
|
1412
|
-
Returns:
|
|
1413
|
-
Tensor, has the same shape as the `input`.
|
|
1414
|
-
|
|
1415
|
-
Raises:
|
|
1416
|
-
TypeError: If `input` is not a Tensor.
|
|
1417
|
-
|
|
1418
|
-
Supported Platforms:
|
|
1419
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1420
|
-
|
|
1421
|
-
Examples:
|
|
1422
|
-
>>> import mindspore
|
|
1423
|
-
>>> import numpy as np
|
|
1424
|
-
>>> from mindspore import Tensor, ops
|
|
1425
|
-
>>> x = Tensor(np.array([0.0, 1.0, 2.0, 4.0]), mindspore.float32)
|
|
1426
|
-
>>> output = ops.expm1(x)
|
|
1427
|
-
>>> print(output)
|
|
1428
|
-
[ 0. 1.718282 6.389056 53.598152]
|
|
1429
|
-
"""
|
|
1430
|
-
return tensor_expm1(input)
|
|
1431
|
-
|
|
1432
|
-
|
|
1433
|
-
def log(input):
|
|
1434
|
-
r"""
|
|
1435
|
-
Returns the natural logarithm of a tensor element-wise.
|
|
1436
|
-
|
|
1437
|
-
.. math::
|
|
1438
|
-
y_i = \log_e(x_i)
|
|
1439
|
-
|
|
1440
|
-
.. warning::
|
|
1441
|
-
If the input value of operator Log is within the range (0, 0.01] or [0.95, 1.05], the output accuracy may
|
|
1442
|
-
be affacted.
|
|
1443
|
-
|
|
1444
|
-
Args:
|
|
1445
|
-
input (Tensor): Input Tensor of any dimension. The value must be greater than 0.
|
|
1446
|
-
|
|
1447
|
-
Returns:
|
|
1448
|
-
Tensor, has the same shape and dtype as the `input`.
|
|
1449
|
-
|
|
1450
|
-
Raises:
|
|
1451
|
-
TypeError: If `input` is not a Tensor.
|
|
1452
|
-
|
|
1453
|
-
Supported Platforms:
|
|
1454
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1455
|
-
|
|
1456
|
-
Examples:
|
|
1457
|
-
>>> import mindspore
|
|
1458
|
-
>>> import numpy as np
|
|
1459
|
-
>>> from mindspore import Tensor, ops
|
|
1460
|
-
>>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
|
|
1461
|
-
>>> output = ops.log(x)
|
|
1462
|
-
>>> print(output)
|
|
1463
|
-
[0. 0.6931472 1.3862944]
|
|
1464
|
-
"""
|
|
1465
|
-
return log_(input)
|
|
1466
|
-
|
|
1467
|
-
|
|
1468
891
|
def logdet(input):
|
|
1469
892
|
r"""
|
|
1470
893
|
Calculates log determinant of one or a batch of square matrices.
|
|
@@ -1494,40 +917,6 @@ def logdet(input):
|
|
|
1494
917
|
return log_(det_x)
|
|
1495
918
|
|
|
1496
919
|
|
|
1497
|
-
def floor(input):
|
|
1498
|
-
r"""
|
|
1499
|
-
Rounds a tensor down to the closest integer element-wise.
|
|
1500
|
-
|
|
1501
|
-
.. math::
|
|
1502
|
-
|
|
1503
|
-
out_i = \lfloor x_i \rfloor
|
|
1504
|
-
|
|
1505
|
-
Args:
|
|
1506
|
-
input (Tensor): The input tensor, its data type must be float16,
|
|
1507
|
-
float32 or float64.
|
|
1508
|
-
|
|
1509
|
-
Returns:
|
|
1510
|
-
Tensor, has the same shape as `input`.
|
|
1511
|
-
|
|
1512
|
-
Raises:
|
|
1513
|
-
TypeError: If `input` is not a Tensor.
|
|
1514
|
-
TypeError: If dtype of `input` is not in [float16, float32, float64].
|
|
1515
|
-
|
|
1516
|
-
Supported Platforms:
|
|
1517
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1518
|
-
|
|
1519
|
-
Examples:
|
|
1520
|
-
>>> import mindspore
|
|
1521
|
-
>>> import numpy as np
|
|
1522
|
-
>>> from mindspore import Tensor, ops
|
|
1523
|
-
>>> x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
|
|
1524
|
-
>>> output = ops.floor(x)
|
|
1525
|
-
>>> print(output)
|
|
1526
|
-
[ 1. 2. -2.]
|
|
1527
|
-
"""
|
|
1528
|
-
return floor_(input)
|
|
1529
|
-
|
|
1530
|
-
|
|
1531
920
|
def i0(input):
|
|
1532
921
|
r"""
|
|
1533
922
|
Alias for :func:`mindspore.ops.bessel_i0` .
|
|
@@ -1730,7 +1119,7 @@ def logical_not(input):
|
|
|
1730
1119
|
out_{i} = \\neg input_{i}
|
|
1731
1120
|
|
|
1732
1121
|
Args:
|
|
1733
|
-
input (Tensor): The input tensor
|
|
1122
|
+
input (Tensor): The input tensor.
|
|
1734
1123
|
|
|
1735
1124
|
Returns:
|
|
1736
1125
|
Tensor, the shape is the same as the `input`, and the dtype is bool.
|
|
@@ -1750,8 +1139,6 @@ def logical_not(input):
|
|
|
1750
1139
|
>>> print(output)
|
|
1751
1140
|
[False True False]
|
|
1752
1141
|
"""
|
|
1753
|
-
if isinstance(input, Tensor) and input.dtype != mstype.bool_:
|
|
1754
|
-
input = input.astype(mstype.bool_)
|
|
1755
1142
|
return logical_not_(input)
|
|
1756
1143
|
|
|
1757
1144
|
|
|
@@ -1761,17 +1148,17 @@ def logical_or(input, other):
|
|
|
1761
1148
|
|
|
1762
1149
|
Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
|
|
1763
1150
|
The inputs must be two tensors or one tensor and one bool.
|
|
1764
|
-
|
|
1765
|
-
|
|
1766
|
-
|
|
1767
|
-
|
|
1151
|
+
|
|
1152
|
+
When the inputs are two tensors, the shapes of them could be broadcast.
|
|
1153
|
+
|
|
1154
|
+
When the inputs are one tensor and one bool, the bool object could only be a constant.
|
|
1768
1155
|
|
|
1769
1156
|
.. math::
|
|
1770
1157
|
|
|
1771
|
-
out_{i} =
|
|
1158
|
+
out_{i} = input_{i} \\vee other_{i}
|
|
1772
1159
|
|
|
1773
1160
|
Note:
|
|
1774
|
-
|
|
1161
|
+
logical_or supports broadcasting.
|
|
1775
1162
|
|
|
1776
1163
|
Args:
|
|
1777
1164
|
input (Union[Tensor, bool]): The first input is a bool or a tensor whose data type can be implicitly
|
|
@@ -1782,9 +1169,6 @@ def logical_or(input, other):
|
|
|
1782
1169
|
Returns:
|
|
1783
1170
|
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
|
|
1784
1171
|
|
|
1785
|
-
Raises:
|
|
1786
|
-
TypeError: If neither `input` nor `other` is a Tensor.
|
|
1787
|
-
|
|
1788
1172
|
Supported Platforms:
|
|
1789
1173
|
``Ascend`` ``GPU`` ``CPU``
|
|
1790
1174
|
|
|
@@ -1813,10 +1197,6 @@ def logical_or(input, other):
|
|
|
1813
1197
|
>>> print(output)
|
|
1814
1198
|
[True True]
|
|
1815
1199
|
"""
|
|
1816
|
-
if isinstance(input, Tensor) and input.dtype != mstype.bool_:
|
|
1817
|
-
input = input.astype(mstype.bool_)
|
|
1818
|
-
if isinstance(other, Tensor) and other.dtype != mstype.bool_:
|
|
1819
|
-
other = other.astype(mstype.bool_)
|
|
1820
1200
|
return logical_or_(input, other)
|
|
1821
1201
|
|
|
1822
1202
|
|
|
@@ -1826,17 +1206,17 @@ def logical_and(input, other):
|
|
|
1826
1206
|
|
|
1827
1207
|
Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
|
|
1828
1208
|
The inputs must be two tensors or one tensor and one bool.
|
|
1829
|
-
|
|
1830
|
-
|
|
1831
|
-
|
|
1832
|
-
|
|
1209
|
+
|
|
1210
|
+
When the inputs are two tensors, the shapes of them could be broadcast.
|
|
1211
|
+
|
|
1212
|
+
When the inputs are one tensor and one bool, the bool object could only be a constant.
|
|
1833
1213
|
|
|
1834
1214
|
.. math::
|
|
1835
1215
|
|
|
1836
1216
|
out_{i} = input_{i} \wedge other_{i}
|
|
1837
1217
|
|
|
1838
1218
|
Note:
|
|
1839
|
-
|
|
1219
|
+
logical_and supports broadcasting.
|
|
1840
1220
|
|
|
1841
1221
|
Args:
|
|
1842
1222
|
input (Union[Tensor, bool]): The first input is a bool or a tensor whose data type can be implicitly
|
|
@@ -1878,10 +1258,6 @@ def logical_and(input, other):
|
|
|
1878
1258
|
>>> print(output)
|
|
1879
1259
|
[True False]
|
|
1880
1260
|
"""
|
|
1881
|
-
if isinstance(input, Tensor) and input.dtype != mstype.bool_:
|
|
1882
|
-
input = input.astype(mstype.bool_)
|
|
1883
|
-
if isinstance(other, Tensor) and other.dtype != mstype.bool_:
|
|
1884
|
-
other = other.astype(mstype.bool_)
|
|
1885
1261
|
return logical_and_(input, other)
|
|
1886
1262
|
|
|
1887
1263
|
|
|
@@ -1998,118 +1374,13 @@ def sgn(input):
|
|
|
1998
1374
|
return ops.sign(input)
|
|
1999
1375
|
modulus = ops.ComplexAbs()(input)
|
|
2000
1376
|
zeros_mask = modulus.equal(0)
|
|
2001
|
-
non_zero_modulus = ops.masked_fill(modulus, zeros_mask, 1)
|
|
1377
|
+
non_zero_modulus = ops.masked_fill(modulus, zeros_mask, ops.cast(1, modulus.dtype))
|
|
2002
1378
|
zeros_modulus = ops.zeros_like(non_zero_modulus)
|
|
2003
1379
|
complex_modulus = ops.Complex()(non_zero_modulus, zeros_modulus)
|
|
2004
1380
|
res = input / complex_modulus
|
|
2005
1381
|
return res
|
|
2006
1382
|
|
|
2007
1383
|
|
|
2008
|
-
def sin(input):
|
|
2009
|
-
r"""
|
|
2010
|
-
Computes sine of the input element-wise.
|
|
2011
|
-
|
|
2012
|
-
.. math::
|
|
2013
|
-
|
|
2014
|
-
out_i = \sin(input_i)
|
|
2015
|
-
|
|
2016
|
-
Args:
|
|
2017
|
-
input (Tensor): The shape of tensor is
|
|
2018
|
-
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
2019
|
-
|
|
2020
|
-
Returns:
|
|
2021
|
-
Tensor, has the same shape and dtype as `input`.
|
|
2022
|
-
|
|
2023
|
-
Raises:
|
|
2024
|
-
TypeError: If `input` is not a Tensor.
|
|
2025
|
-
TypeError: If dtype of `input` is not float16, float32 or float64, complex64, complex128.
|
|
2026
|
-
|
|
2027
|
-
Supported Platforms:
|
|
2028
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2029
|
-
|
|
2030
|
-
Examples:
|
|
2031
|
-
>>> import mindspore
|
|
2032
|
-
>>> import numpy as np
|
|
2033
|
-
>>> from mindspore import Tensor, ops
|
|
2034
|
-
>>> input = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
|
|
2035
|
-
>>> output = ops.sin(input)
|
|
2036
|
-
>>> print(output)
|
|
2037
|
-
[0.5810352 0.27635565 0.41687083 0.5810352]
|
|
2038
|
-
"""
|
|
2039
|
-
return sin_(input)
|
|
2040
|
-
|
|
2041
|
-
|
|
2042
|
-
def sinc(input):
|
|
2043
|
-
r"""
|
|
2044
|
-
Computes the normalized sinc of input.
|
|
2045
|
-
|
|
2046
|
-
.. math::
|
|
2047
|
-
|
|
2048
|
-
out_i = \begin{cases} \frac{sin(\pi input_i)}{\pi input_i} & input_i\neq 0\\
|
|
2049
|
-
1 & input_i=0 \end{cases}
|
|
2050
|
-
|
|
2051
|
-
Args:
|
|
2052
|
-
input (Tensor): The input Tensor.
|
|
2053
|
-
|
|
2054
|
-
Returns:
|
|
2055
|
-
Tensor, has the same shape as the `input`. The dtype of output is float32 when dtype of `input` is in
|
|
2056
|
-
[int, bool]. Otherwise output has the same dtype as the `input`.
|
|
2057
|
-
|
|
2058
|
-
Raises:
|
|
2059
|
-
TypeError: If `input` is not a Tensor.
|
|
2060
|
-
|
|
2061
|
-
Supported Platforms:
|
|
2062
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2063
|
-
|
|
2064
|
-
Examples:
|
|
2065
|
-
>>> import mindspore
|
|
2066
|
-
>>> import numpy as np
|
|
2067
|
-
>>> from mindspore import Tensor, ops
|
|
2068
|
-
>>> input = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
|
|
2069
|
-
>>> output = ops.sinc(input)
|
|
2070
|
-
>>> print(output)
|
|
2071
|
-
[0.47735003 0.8759357 0.7224278 0.47735003]
|
|
2072
|
-
"""
|
|
2073
|
-
return sinc_(input)
|
|
2074
|
-
|
|
2075
|
-
|
|
2076
|
-
def cos(input):
|
|
2077
|
-
r"""
|
|
2078
|
-
Computes cosine of input element-wise.
|
|
2079
|
-
|
|
2080
|
-
.. math::
|
|
2081
|
-
out_i = \cos(x_i)
|
|
2082
|
-
|
|
2083
|
-
.. warning::
|
|
2084
|
-
Supported dtypes are float16 and float32, and using float64 may
|
|
2085
|
-
cause a problem of missing precision.
|
|
2086
|
-
|
|
2087
|
-
Args:
|
|
2088
|
-
input (Tensor): The shape of tensor is
|
|
2089
|
-
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
2090
|
-
|
|
2091
|
-
Returns:
|
|
2092
|
-
Tensor, has the same shape and dtype as `input`.
|
|
2093
|
-
|
|
2094
|
-
Raises:
|
|
2095
|
-
TypeError: If `input` is not a Tensor.
|
|
2096
|
-
TypeError: If dtype of `input` is not float16, float32 or float64, complex64, complex128.
|
|
2097
|
-
|
|
2098
|
-
Supported Platforms:
|
|
2099
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2100
|
-
|
|
2101
|
-
Examples:
|
|
2102
|
-
>>> import mindspore
|
|
2103
|
-
>>> import numpy as np
|
|
2104
|
-
>>> from mindspore import Tensor, ops
|
|
2105
|
-
>>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
|
|
2106
|
-
>>> output = ops.cos(x)
|
|
2107
|
-
>>> print(output)
|
|
2108
|
-
[0.971338 0.6748758 0.95233357 0.9959527]
|
|
2109
|
-
"""
|
|
2110
|
-
return cos_(input)
|
|
2111
|
-
|
|
2112
|
-
|
|
2113
1384
|
def cosine_similarity(x1, x2, dim=1, eps=1e-08):
|
|
2114
1385
|
r"""
|
|
2115
1386
|
Calculate cosine similarity between `x1` and `x2` along the axis, `dim`.
|
|
@@ -2222,7 +1493,7 @@ def cov(input, *, correction=1, fweights=None, aweights=None):
|
|
|
2222
1493
|
Default: ``None`` .
|
|
2223
1494
|
|
|
2224
1495
|
Returns:
|
|
2225
|
-
Tensor,
|
|
1496
|
+
Tensor, the covariance matrix Tensor of `input`.
|
|
2226
1497
|
|
|
2227
1498
|
Raises:
|
|
2228
1499
|
ValueError: If the dimensions of input is greater than 2.
|
|
@@ -2307,10 +1578,7 @@ def t(input):
|
|
|
2307
1578
|
input (Tensor): The input Tensor.
|
|
2308
1579
|
|
|
2309
1580
|
Returns:
|
|
2310
|
-
Tensor, the transpose of `input` .
|
|
2311
|
-
|
|
2312
|
-
Raises:
|
|
2313
|
-
ValueError: If the dimension of `input` is larger than 2.
|
|
1581
|
+
Tensor, the transpose of `input` .
|
|
2314
1582
|
|
|
2315
1583
|
Supported Platforms:
|
|
2316
1584
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -2326,8 +1594,6 @@ def t(input):
|
|
|
2326
1594
|
[2. 3.]
|
|
2327
1595
|
[3. 4.]]
|
|
2328
1596
|
"""
|
|
2329
|
-
if input.ndim > 2:
|
|
2330
|
-
raise ValueError(f"For t(), the dimension of tensor should be less than 3, but got {input.ndim}.")
|
|
2331
1597
|
if input.ndim == 2:
|
|
2332
1598
|
return transpose_(input, (1, 0))
|
|
2333
1599
|
return input
|
|
@@ -2386,8 +1652,8 @@ def xlogy(input, other):
|
|
|
2386
1652
|
Args:
|
|
2387
1653
|
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
2388
1654
|
a bool or a tensor whose data type is
|
|
2389
|
-
`number <https://www.mindspore.cn/docs/en/
|
|
2390
|
-
`bool_ <https://www.mindspore.cn/docs/en/
|
|
1655
|
+
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
1656
|
+
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
|
|
2391
1657
|
other (Union[Tensor, number.Number, bool]): The second input is a number.Number or
|
|
2392
1658
|
a bool when the first input is a tensor or a tensor whose data type is number or bool\_.
|
|
2393
1659
|
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
|
|
@@ -2535,74 +1801,6 @@ def polar(abs, angle): # pylint: disable=redefined-outer-name
|
|
|
2535
1801
|
return polar_(abs, angle)
|
|
2536
1802
|
|
|
2537
1803
|
|
|
2538
|
-
def asin(input):
|
|
2539
|
-
r"""
|
|
2540
|
-
Computes arcsine of input tensors element-wise.
|
|
2541
|
-
|
|
2542
|
-
.. math::
|
|
2543
|
-
|
|
2544
|
-
out_i = \sin^{-1}(input_i)
|
|
2545
|
-
|
|
2546
|
-
Args:
|
|
2547
|
-
input (Tensor): The shape of tensor is
|
|
2548
|
-
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
2549
|
-
|
|
2550
|
-
Returns:
|
|
2551
|
-
Tensor, has the same shape and dtype as `input`.
|
|
2552
|
-
|
|
2553
|
-
Raises:
|
|
2554
|
-
TypeError: If `input` is not a Tensor.
|
|
2555
|
-
TypeError: If dtype of `input` is not float16, float32, float64, complex64, complex128.
|
|
2556
|
-
|
|
2557
|
-
Supported Platforms:
|
|
2558
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2559
|
-
|
|
2560
|
-
Examples:
|
|
2561
|
-
>>> import mindspore
|
|
2562
|
-
>>> import numpy as np
|
|
2563
|
-
>>> from mindspore import Tensor, ops
|
|
2564
|
-
>>> x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
|
|
2565
|
-
>>> output = ops.asin(x)
|
|
2566
|
-
>>> print(output)
|
|
2567
|
-
[0.8330704 0.04001067 0.30469266 0.5943858 ]
|
|
2568
|
-
"""
|
|
2569
|
-
return asin_(input)
|
|
2570
|
-
|
|
2571
|
-
|
|
2572
|
-
def acos(input):
|
|
2573
|
-
r"""
|
|
2574
|
-
Computes arccosine of input tensors element-wise.
|
|
2575
|
-
|
|
2576
|
-
.. math::
|
|
2577
|
-
|
|
2578
|
-
out_i = \cos^{-1}(input_i)
|
|
2579
|
-
|
|
2580
|
-
Args:
|
|
2581
|
-
input (Tensor): The shape of tensor is
|
|
2582
|
-
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
2583
|
-
|
|
2584
|
-
Returns:
|
|
2585
|
-
Tensor, has the same shape and dtype as `input`.
|
|
2586
|
-
|
|
2587
|
-
Raises:
|
|
2588
|
-
TypeError: If `input` is not a Tensor.
|
|
2589
|
-
TypeError: If dtype of `input` is not float16, float32 or float64, complex64, complex128.
|
|
2590
|
-
|
|
2591
|
-
Supported Platforms:
|
|
2592
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2593
|
-
|
|
2594
|
-
Examples:
|
|
2595
|
-
>>> import mindspore
|
|
2596
|
-
>>> import numpy as np
|
|
2597
|
-
>>> from mindspore import Tensor, ops
|
|
2598
|
-
>>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
|
|
2599
|
-
>>> output = ops.acos(input)
|
|
2600
|
-
>>> print(output)
|
|
2601
|
-
[0.737726 1.5307857 1.2661036 0.9764105]
|
|
2602
|
-
"""
|
|
2603
|
-
return acos_(input)
|
|
2604
|
-
|
|
2605
|
-
|
|
2606
1804
|
def arccos(input):
|
|
2607
1805
|
"""
|
|
2608
1806
|
Alias for :func:`mindspore.ops.acos` .
|
|
@@ -2613,178 +1811,6 @@ def arccos(input):
|
|
|
2613
1811
|
return acos(input)
|
|
2614
1812
|
|
|
2615
1813
|
|
|
2616
|
-
def atan(input):
|
|
2617
|
-
r"""
|
|
2618
|
-
Computes the trigonometric inverse tangent of the input element-wise.
|
|
2619
|
-
|
|
2620
|
-
.. math::
|
|
2621
|
-
|
|
2622
|
-
out_i = \tan^{-1}(input_i)
|
|
2623
|
-
|
|
2624
|
-
Args:
|
|
2625
|
-
input (Tensor): The shape of tensor is
|
|
2626
|
-
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
2627
|
-
The data type should be one of the following types: float16, float32.
|
|
2628
|
-
|
|
2629
|
-
Returns:
|
|
2630
|
-
A Tensor, has the same type as the input.
|
|
2631
|
-
|
|
2632
|
-
Raises:
|
|
2633
|
-
TypeError: If `input` is not a Tensor.
|
|
2634
|
-
TypeError: If dtype of `input` is not float16 or float32.
|
|
2635
|
-
|
|
2636
|
-
Supported Platforms:
|
|
2637
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2638
|
-
|
|
2639
|
-
Examples:
|
|
2640
|
-
>>> import mindspore
|
|
2641
|
-
>>> import numpy as np
|
|
2642
|
-
>>> from mindspore import Tensor, ops
|
|
2643
|
-
>>> x = Tensor(np.array([1.0, 0.0]), mindspore.float32)
|
|
2644
|
-
>>> output = ops.atan(x)
|
|
2645
|
-
>>> print(output)
|
|
2646
|
-
[0.7853982 0. ]
|
|
2647
|
-
"""
|
|
2648
|
-
return atan_(input)
|
|
2649
|
-
|
|
2650
|
-
|
|
2651
|
-
def sinh(input):
|
|
2652
|
-
r"""
|
|
2653
|
-
Computes hyperbolic sine of the input element-wise.
|
|
2654
|
-
|
|
2655
|
-
.. math::
|
|
2656
|
-
|
|
2657
|
-
out_i = \sinh(input_i)
|
|
2658
|
-
|
|
2659
|
-
Args:
|
|
2660
|
-
input (Tensor): The input tensor of hyperbolic sine function.
|
|
2661
|
-
|
|
2662
|
-
Returns:
|
|
2663
|
-
Tensor, has the same shape as `input`.
|
|
2664
|
-
|
|
2665
|
-
Raises:
|
|
2666
|
-
TypeError: If `input` is not a Tensor.
|
|
2667
|
-
|
|
2668
|
-
Supported Platforms:
|
|
2669
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2670
|
-
|
|
2671
|
-
Examples:
|
|
2672
|
-
>>> import mindspore
|
|
2673
|
-
>>> import numpy as np
|
|
2674
|
-
>>> from mindspore import Tensor, ops
|
|
2675
|
-
>>> input = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
|
|
2676
|
-
>>> output = ops.sinh(input)
|
|
2677
|
-
>>> print(output)
|
|
2678
|
-
[0.6604918 0.28367308 0.44337422 0.6604918 ]
|
|
2679
|
-
"""
|
|
2680
|
-
return sinh_(input)
|
|
2681
|
-
|
|
2682
|
-
|
|
2683
|
-
def cosh(input):
|
|
2684
|
-
r"""
|
|
2685
|
-
Computes hyperbolic cosine of input element-wise.
|
|
2686
|
-
|
|
2687
|
-
.. math::
|
|
2688
|
-
|
|
2689
|
-
out_i = \cosh(input_i)
|
|
2690
|
-
|
|
2691
|
-
Args:
|
|
2692
|
-
input (Tensor): The input tensor of hyperbolic cosine function, its data type
|
|
2693
|
-
must be float16, float32, float64, complex64 or complex128.
|
|
2694
|
-
|
|
2695
|
-
Returns:
|
|
2696
|
-
Tensor, has the same shape as `input`.
|
|
2697
|
-
|
|
2698
|
-
Raises:
|
|
2699
|
-
TypeError: If the dtype of `input` is not one of the following types:
|
|
2700
|
-
float16, float32, float64, complex64, complex128.
|
|
2701
|
-
TypeError: If `input` is not a Tensor.
|
|
2702
|
-
|
|
2703
|
-
Supported Platforms:
|
|
2704
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2705
|
-
|
|
2706
|
-
Examples:
|
|
2707
|
-
>>> import mindspore
|
|
2708
|
-
>>> import numpy as np
|
|
2709
|
-
>>> from mindspore import Tensor, ops
|
|
2710
|
-
>>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
|
|
2711
|
-
>>> output = ops.cosh(x)
|
|
2712
|
-
>>> print(output)
|
|
2713
|
-
[1.0289385 1.364684 1.048436 1.0040528]
|
|
2714
|
-
>>> x = Tensor(2.1, mindspore.float32)
|
|
2715
|
-
>>> output = ops.cosh(x)
|
|
2716
|
-
>>> print(output)
|
|
2717
|
-
4.144313
|
|
2718
|
-
"""
|
|
2719
|
-
return cosh_(input)
|
|
2720
|
-
|
|
2721
|
-
|
|
2722
|
-
def tanh(input):
|
|
2723
|
-
r"""
|
|
2724
|
-
Computes hyperbolic tangent of input element-wise. The Tanh function is defined as:
|
|
2725
|
-
|
|
2726
|
-
.. math::
|
|
2727
|
-
|
|
2728
|
-
tanh(x_i) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1},
|
|
2729
|
-
|
|
2730
|
-
where :math:`x_i` is an element of the input Tensor.
|
|
2731
|
-
|
|
2732
|
-
Args:
|
|
2733
|
-
input (Tensor): Input of Tanh.
|
|
2734
|
-
|
|
2735
|
-
Returns:
|
|
2736
|
-
Tensor, with the same type and shape as the `input`.
|
|
2737
|
-
|
|
2738
|
-
Raises:
|
|
2739
|
-
TypeError: If `input` is not a Tensor.
|
|
2740
|
-
|
|
2741
|
-
Supported Platforms:
|
|
2742
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2743
|
-
|
|
2744
|
-
Examples:
|
|
2745
|
-
>>> import mindspore
|
|
2746
|
-
>>> import numpy as np
|
|
2747
|
-
>>> from mindspore import Tensor, ops
|
|
2748
|
-
>>> input = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
|
|
2749
|
-
>>> output = ops.tanh(input)
|
|
2750
|
-
>>> print(output)
|
|
2751
|
-
[0.7615941 0.9640276 0.9950547 0.9993293 0.9999092]
|
|
2752
|
-
"""
|
|
2753
|
-
return tanh_(input)
|
|
2754
|
-
|
|
2755
|
-
|
|
2756
|
-
def asinh(input):
|
|
2757
|
-
r"""
|
|
2758
|
-
Computes inverse hyperbolic sine of the input element-wise.
|
|
2759
|
-
|
|
2760
|
-
.. math::
|
|
2761
|
-
|
|
2762
|
-
out_i = \sinh^{-1}(input_i)
|
|
2763
|
-
|
|
2764
|
-
Args:
|
|
2765
|
-
input (Tensor): The input tensor of inverse hyperbolic sine function.
|
|
2766
|
-
|
|
2767
|
-
Returns:
|
|
2768
|
-
Tensor, has the same shape and type as `input`.
|
|
2769
|
-
|
|
2770
|
-
Raises:
|
|
2771
|
-
TypeError: If `input` is not a Tensor.
|
|
2772
|
-
|
|
2773
|
-
Supported Platforms:
|
|
2774
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2775
|
-
|
|
2776
|
-
Examples:
|
|
2777
|
-
>>> import mindspore
|
|
2778
|
-
>>> import numpy as np
|
|
2779
|
-
>>> from mindspore import Tensor, ops
|
|
2780
|
-
>>> input = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), mindspore.float32)
|
|
2781
|
-
>>> output = ops.asinh(input)
|
|
2782
|
-
>>> print(output)
|
|
2783
|
-
[-2.3124382 1.1947632 1.8184465 5.298342 ]
|
|
2784
|
-
"""
|
|
2785
|
-
return asinh_(input)
|
|
2786
|
-
|
|
2787
|
-
|
|
2788
1814
|
def arcsinh(input):
|
|
2789
1815
|
r"""
|
|
2790
1816
|
Alias for :func:`mindspore.ops.asinh`.
|
|
@@ -2802,122 +1828,7 @@ def arctanh(input):
|
|
|
2802
1828
|
Supported Platforms:
|
|
2803
1829
|
``Ascend`` ``GPU`` ``CPU``
|
|
2804
1830
|
"""
|
|
2805
|
-
return atanh(input)
|
|
2806
|
-
|
|
2807
|
-
|
|
2808
|
-
def acosh(input):
|
|
2809
|
-
r"""
|
|
2810
|
-
Computes inverse hyperbolic cosine of the inputs element-wise.
|
|
2811
|
-
|
|
2812
|
-
.. math::
|
|
2813
|
-
|
|
2814
|
-
out_i = \cosh^{-1}(input_i)
|
|
2815
|
-
|
|
2816
|
-
.. warning::
|
|
2817
|
-
Given an input tensor input, the function computes inverse hyperbolic cosine of every element.
|
|
2818
|
-
Input range is [1, inf].
|
|
2819
|
-
|
|
2820
|
-
Args:
|
|
2821
|
-
input (Tensor): The input tensor of inverse hyperbolic cosine function.
|
|
2822
|
-
|
|
2823
|
-
Returns:
|
|
2824
|
-
Tensor, has the same shape and type as `input`.
|
|
2825
|
-
|
|
2826
|
-
Raises:
|
|
2827
|
-
TypeError: If `input` is not a Tensor.
|
|
2828
|
-
|
|
2829
|
-
Supported Platforms:
|
|
2830
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2831
|
-
|
|
2832
|
-
Examples:
|
|
2833
|
-
>>> import mindspore
|
|
2834
|
-
>>> import numpy as np
|
|
2835
|
-
>>> from mindspore import Tensor, ops
|
|
2836
|
-
>>> x = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
|
|
2837
|
-
>>> output = ops.acosh(x)
|
|
2838
|
-
>>> print(output)
|
|
2839
|
-
[0. 0.9624237 1.7627472 5.298292 ]
|
|
2840
|
-
"""
|
|
2841
|
-
return acosh_(input)
|
|
2842
|
-
|
|
2843
|
-
|
|
2844
|
-
def atanh(input):
|
|
2845
|
-
r"""
|
|
2846
|
-
Computes inverse hyperbolic tangent of the input element-wise.
|
|
2847
|
-
|
|
2848
|
-
.. math::
|
|
2849
|
-
|
|
2850
|
-
out_i = \tanh^{-1}(input_{i})
|
|
2851
|
-
|
|
2852
|
-
Args:
|
|
2853
|
-
input (Tensor): The shape of tensor is
|
|
2854
|
-
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
2855
|
-
The data type should be one of the following types: float16, float32.
|
|
2856
|
-
|
|
2857
|
-
Returns:
|
|
2858
|
-
A Tensor, has the same type as the input.
|
|
2859
|
-
|
|
2860
|
-
Raises:
|
|
2861
|
-
TypeError: If `input` is not a Tensor.
|
|
2862
|
-
TypeError: If dtype of `input` is not float16 or float32.
|
|
2863
|
-
|
|
2864
|
-
Supported Platforms:
|
|
2865
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2866
|
-
|
|
2867
|
-
Examples:
|
|
2868
|
-
>>> import mindspore
|
|
2869
|
-
>>> import numpy as np
|
|
2870
|
-
>>> from mindspore import Tensor, ops
|
|
2871
|
-
>>> input = Tensor(np.array([0, -0.5]), mindspore.float32)
|
|
2872
|
-
>>> output = ops.atanh(input)
|
|
2873
|
-
>>> print(output)
|
|
2874
|
-
[ 0. -0.54930615]
|
|
2875
|
-
"""
|
|
2876
|
-
return atanh_(input)
|
|
2877
|
-
|
|
2878
|
-
|
|
2879
|
-
def atan2(input, other):
|
|
2880
|
-
r"""
|
|
2881
|
-
Returns arctangent of input/other element-wise.
|
|
2882
|
-
|
|
2883
|
-
It returns :math:`\theta\ \in\ [-\pi, \pi]`
|
|
2884
|
-
such that :math:`input = r*\sin(\theta), other = r*\cos(\theta)`, where :math:`r = \sqrt{input^2 + other^2}`.
|
|
2885
|
-
|
|
2886
|
-
Note:
|
|
2887
|
-
- Arg `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
|
|
2888
|
-
If they have different data types, the lower precision data type will be converted to relatively the
|
|
2889
|
-
highest precision data type.
|
|
2890
|
-
- At least one of the `input` and `other` args is Tensor.
|
|
2891
|
-
|
|
2892
|
-
Args:
|
|
2893
|
-
input (Tensor): The input tensor with shape
|
|
2894
|
-
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
2895
|
-
The data type should be one of the following types: float16, float32, float64
|
|
2896
|
-
other (Tensor): The input tensor. It has the same shape with `input` or
|
|
2897
|
-
its shape is able to broadcast with `input`.
|
|
2898
|
-
|
|
2899
|
-
Returns:
|
|
2900
|
-
Tensor, the shape is the same as the one after broadcasting, and the data type is same as `input`.
|
|
2901
|
-
|
|
2902
|
-
Raises:
|
|
2903
|
-
TypeError: If `input` or `other` is not a Tensor.
|
|
2904
|
-
RuntimeError: If the data type of `input` and `other` conversion of Parameter is required
|
|
2905
|
-
when data type conversion of Parameter is not supported.
|
|
2906
|
-
|
|
2907
|
-
Supported Platforms:
|
|
2908
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2909
|
-
|
|
2910
|
-
Examples:
|
|
2911
|
-
>>> import mindspore
|
|
2912
|
-
>>> import numpy as np
|
|
2913
|
-
>>> from mindspore import Tensor, ops
|
|
2914
|
-
>>> input = Tensor(np.array([0, 1]), mindspore.float32)
|
|
2915
|
-
>>> other = Tensor(np.array([1, 1]), mindspore.float32)
|
|
2916
|
-
>>> output = ops.atan2(input, other)
|
|
2917
|
-
>>> print(output)
|
|
2918
|
-
[0. 0.7853982]
|
|
2919
|
-
"""
|
|
2920
|
-
return atan2_(input, other)
|
|
1831
|
+
return atanh(input)
|
|
2921
1832
|
|
|
2922
1833
|
|
|
2923
1834
|
def bitwise_and(input, other):
|
|
@@ -3145,51 +2056,6 @@ def bitwise_right_shift(input, other):
|
|
|
3145
2056
|
return rs(input, other)
|
|
3146
2057
|
|
|
3147
2058
|
|
|
3148
|
-
def nextafter(input, other):
|
|
3149
|
-
"""
|
|
3150
|
-
Returns the next representable floating-point value after `input` towards `other` element-wise.
|
|
3151
|
-
|
|
3152
|
-
Say there are two float32 numbers :math:`a`, :math:`b`, and let the
|
|
3153
|
-
representable delta of float32 datatype is :math:`eps`. If :math:`a < b`,
|
|
3154
|
-
then the next representable of :math:`a` towards :math:`b` is :math:`a+eps`,
|
|
3155
|
-
the next representable of :math:`b` towards :math:`a` is :math:`b-eps`.
|
|
3156
|
-
|
|
3157
|
-
.. math::
|
|
3158
|
-
|
|
3159
|
-
out_{i} = nextafter({input_{i}, other_{i}})
|
|
3160
|
-
|
|
3161
|
-
Args:
|
|
3162
|
-
input (Tensor): The first input tensor. The shape of tensor is :math:`(N,*)` where :math:`*` means,
|
|
3163
|
-
any number of additional dimensions. Must be one of the following types: float32, float64.
|
|
3164
|
-
|
|
3165
|
-
other (Tensor): The second input tensor. The shape of tensor is :math:`(N,*)` where :math:`*` means,
|
|
3166
|
-
any number of additional dimensions. Must be one of the following types: float32, float64.
|
|
3167
|
-
|
|
3168
|
-
Returns:
|
|
3169
|
-
Tensor, has the same shape and data type as `input`.
|
|
3170
|
-
|
|
3171
|
-
Raises:
|
|
3172
|
-
TypeError: If neither `input` nor `other` is a Tensor.
|
|
3173
|
-
TypeError: If the dtype of `input` and `other` is not one of: float32, float64.
|
|
3174
|
-
TypeError: If the dtypes of `input` and `other` are not same.
|
|
3175
|
-
ValueError: If `input`'s shape is not the same as `other`.
|
|
3176
|
-
|
|
3177
|
-
Supported Platforms:
|
|
3178
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3179
|
-
|
|
3180
|
-
Examples:
|
|
3181
|
-
>>> import mindspore
|
|
3182
|
-
>>> import numpy as np
|
|
3183
|
-
>>> from mindspore import Tensor, ops
|
|
3184
|
-
>>> input_ = Tensor(np.asarray([0.0]), mindspore.float32)
|
|
3185
|
-
>>> other_ = Tensor(np.asarray([0.1]), mindspore.float32)
|
|
3186
|
-
>>> output_ = ops.nextafter(input_, other_)
|
|
3187
|
-
>>> print(output_)
|
|
3188
|
-
[1.e-45]
|
|
3189
|
-
"""
|
|
3190
|
-
return nextafter_(input, other)
|
|
3191
|
-
|
|
3192
|
-
|
|
3193
2059
|
def inv(x):
|
|
3194
2060
|
r"""
|
|
3195
2061
|
Computes Reciprocal of input tensor element-wise.
|
|
@@ -3285,78 +2151,6 @@ def invert(x):
|
|
|
3285
2151
|
return invert_(x)
|
|
3286
2152
|
|
|
3287
2153
|
|
|
3288
|
-
def erf(input):
|
|
3289
|
-
r"""
|
|
3290
|
-
Computes the Gauss error function of `input` element-wise.
|
|
3291
|
-
|
|
3292
|
-
.. math::
|
|
3293
|
-
|
|
3294
|
-
erf(x)=\frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt
|
|
3295
|
-
|
|
3296
|
-
Args:
|
|
3297
|
-
input (Tensor): The input tensor of Gaussian error function. Supported dtypes:
|
|
3298
|
-
|
|
3299
|
-
- Ascend: float16, float32.
|
|
3300
|
-
- GPU/CPU: float16, float32, float64.
|
|
3301
|
-
|
|
3302
|
-
Returns:
|
|
3303
|
-
Tensor, has the same shape and dtype as the `input`.
|
|
3304
|
-
|
|
3305
|
-
Raises:
|
|
3306
|
-
TypeError: If `input` is not a Tensor.
|
|
3307
|
-
TypeError: If dtype of `input` is neither float16 float32 or float64.
|
|
3308
|
-
|
|
3309
|
-
Supported Platforms:
|
|
3310
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3311
|
-
|
|
3312
|
-
Examples:
|
|
3313
|
-
>>> import mindspore
|
|
3314
|
-
>>> import numpy as np
|
|
3315
|
-
>>> from mindspore import Tensor, ops
|
|
3316
|
-
>>> x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
|
|
3317
|
-
>>> output = ops.erf(x)
|
|
3318
|
-
>>> print(output)
|
|
3319
|
-
[-0.8427168 0. 0.8427168 0.99530876 0.99997765]
|
|
3320
|
-
"""
|
|
3321
|
-
return erf_(input)
|
|
3322
|
-
|
|
3323
|
-
|
|
3324
|
-
def erfc(input):
|
|
3325
|
-
r"""
|
|
3326
|
-
Computes the complementary error function of `input` element-wise.
|
|
3327
|
-
|
|
3328
|
-
.. math::
|
|
3329
|
-
|
|
3330
|
-
erfc(x) = 1 - \frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt
|
|
3331
|
-
|
|
3332
|
-
Args:
|
|
3333
|
-
input (Tensor): The input tensor. Supported dtypes:
|
|
3334
|
-
|
|
3335
|
-
- Ascend: float16, float32.
|
|
3336
|
-
- GPU/CPU: float16, float32, float64.
|
|
3337
|
-
|
|
3338
|
-
Returns:
|
|
3339
|
-
Tensor, has the same shape and dtype as `input`.
|
|
3340
|
-
|
|
3341
|
-
Raises:
|
|
3342
|
-
TypeError: If `input` is not a Tensor.
|
|
3343
|
-
TypeError: If dtype of `input` is not float16, float32 or float64.
|
|
3344
|
-
|
|
3345
|
-
Supported Platforms:
|
|
3346
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3347
|
-
|
|
3348
|
-
Examples:
|
|
3349
|
-
>>> import mindspore
|
|
3350
|
-
>>> import numpy as np
|
|
3351
|
-
>>> from mindspore import Tensor, ops
|
|
3352
|
-
>>> x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
|
|
3353
|
-
>>> output = ops.erfc(x)
|
|
3354
|
-
>>> print(output)
|
|
3355
|
-
[1.8427168e+00 1.0000000e+00 1.5728319e-01 4.6912432e-03 2.2351742e-05]
|
|
3356
|
-
"""
|
|
3357
|
-
return erfc_(input)
|
|
3358
|
-
|
|
3359
|
-
|
|
3360
2154
|
def bessel_j0(x):
|
|
3361
2155
|
r"""
|
|
3362
2156
|
Computes Bessel function of the first kind, order 0 element-wise.
|
|
@@ -3740,6 +2534,52 @@ def linspace(start, end, steps):
|
|
|
3740
2534
|
return linspace_(start, end, steps)
|
|
3741
2535
|
|
|
3742
2536
|
|
|
2537
|
+
def linspace_ext(start, end, steps, *, dtype=None):
|
|
2538
|
+
r"""
|
|
2539
|
+
Returns a Tensor whose value is `steps` evenly spaced in the interval `start` and `end` (including `start` and
|
|
2540
|
+
`end`), and the length of the output Tensor is `steps`.
|
|
2541
|
+
|
|
2542
|
+
.. math::
|
|
2543
|
+
\begin{aligned}
|
|
2544
|
+
&step = (end - start)/(steps - 1)\\
|
|
2545
|
+
&output = [start, start+step, start+2*step, ... , end]
|
|
2546
|
+
\end{aligned}
|
|
2547
|
+
|
|
2548
|
+
Args:
|
|
2549
|
+
start (Union[Tensor, Number]): Start value of interval.
|
|
2550
|
+
If `start` is Tensor, data type must be float32 or float64 and with shape of 0-D.
|
|
2551
|
+
end (Union[Tensor, Number]): Last value of interval.
|
|
2552
|
+
If `end` is Tensor, data type must be float32 or float64 and with shape of 0-D.
|
|
2553
|
+
steps (Union[Tensor, int]): Number of ticks in the interval, inclusive of start and end.
|
|
2554
|
+
Must be positive int number or 0D int32/int64 Tensor.
|
|
2555
|
+
|
|
2556
|
+
Keyword Args:
|
|
2557
|
+
dtype (mindspore.dtype, optional): The output Tensor data type. Default: ``None`` , the data type of output
|
|
2558
|
+
Tensor is float32.
|
|
2559
|
+
|
|
2560
|
+
Returns:
|
|
2561
|
+
Tensor, has the shape of :math:`(steps,)`.
|
|
2562
|
+
|
|
2563
|
+
Raises:
|
|
2564
|
+
TypeError: If dtype of `start` or dtype of `end` is not supported.
|
|
2565
|
+
ValueError: If shape of `start` or shape of `end` is not 0-D.
|
|
2566
|
+
TypeError: If `steps` is not int or 0D int32/int64 Tensor.
|
|
2567
|
+
ValueError: If `steps` is not positive int number.
|
|
2568
|
+
|
|
2569
|
+
Supported Platforms:
|
|
2570
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2571
|
+
|
|
2572
|
+
Examples:
|
|
2573
|
+
>>> start = Tensor(1, mindspore.float32)
|
|
2574
|
+
>>> end = Tensor(10, mindspore.float32)
|
|
2575
|
+
>>> steps = 5
|
|
2576
|
+
>>> output = ops.linspace_ext(start, end, steps, dtype=mindspore.float32)
|
|
2577
|
+
>>> print(output)
|
|
2578
|
+
[ 1. 3.25 5.5 7.75 10. ]
|
|
2579
|
+
"""
|
|
2580
|
+
return _get_cache_prim(LinSpaceExt)()(start, end, steps, dtype)
|
|
2581
|
+
|
|
2582
|
+
|
|
3743
2583
|
def det(input):
|
|
3744
2584
|
r"""
|
|
3745
2585
|
Computes the determinant of one or more square matrices.
|
|
@@ -3792,46 +2632,6 @@ def log_matrix_determinant(input):
|
|
|
3792
2632
|
return log_matrix_determinant_(input)
|
|
3793
2633
|
|
|
3794
2634
|
|
|
3795
|
-
def matrix_exp(input):
|
|
3796
|
-
r"""
|
|
3797
|
-
Computes the exponential of a single or a batch of square matrices.
|
|
3798
|
-
|
|
3799
|
-
.. math::
|
|
3800
|
-
|
|
3801
|
-
matrix\_exp(x) = \sum_{k=0}^{\infty} \frac{1}{k !} x^{k} \in \mathbb{K}^{n \times n}
|
|
3802
|
-
|
|
3803
|
-
where :math:`x` corresponds to `input` .
|
|
3804
|
-
|
|
3805
|
-
Args:
|
|
3806
|
-
input (Tensor): The shape of tensor is :math:`(*, n, n)` where * is zero or more batch dimensions.
|
|
3807
|
-
Must be one of the following types: float16, float32, float64, complex64, complex128.
|
|
3808
|
-
|
|
3809
|
-
Returns:
|
|
3810
|
-
Tensor, has the same shape and dtype as the `input`.
|
|
3811
|
-
|
|
3812
|
-
Raises:
|
|
3813
|
-
TypeError: If `input` is not a Tensor.
|
|
3814
|
-
TypeError: If the dtype of `input` is not one of the following dtype:
|
|
3815
|
-
float16, float32, float64, complex64, complex128.
|
|
3816
|
-
ValueError: If the rank of `input` is less than 2.
|
|
3817
|
-
ValueError: If the size of last two dimensions of `input` are not equal.
|
|
3818
|
-
|
|
3819
|
-
Supported Platforms:
|
|
3820
|
-
|
|
3821
|
-
|
|
3822
|
-
Examples:
|
|
3823
|
-
>>> import mindspore
|
|
3824
|
-
>>> import numpy as np
|
|
3825
|
-
>>> from mindspore import Tensor, ops
|
|
3826
|
-
>>> input = Tensor(np.array([[1, 2], [0, 1]]), mindspore.float32)
|
|
3827
|
-
>>> output = ops.matrix_exp(input)
|
|
3828
|
-
>>> print(output)
|
|
3829
|
-
[[2.7182817 5.436563 ]
|
|
3830
|
-
[0. 2.7182817]]
|
|
3831
|
-
"""
|
|
3832
|
-
return matrix_exp_(input)
|
|
3833
|
-
|
|
3834
|
-
|
|
3835
2635
|
def lu_solve(b, LU_data, LU_pivots):
|
|
3836
2636
|
r"""
|
|
3837
2637
|
Computes the solution y to the system of linear equations :math:`Ay = b` ,
|
|
@@ -3879,7 +2679,6 @@ def lu_solve(b, LU_data, LU_pivots):
|
|
|
3879
2679
|
[-1.4000001]
|
|
3880
2680
|
[ 0.6 ]]
|
|
3881
2681
|
"""
|
|
3882
|
-
lu_solve_ = _get_cache_prim(LuSolve)()
|
|
3883
2682
|
out = lu_solve_(b, LU_data, LU_pivots)
|
|
3884
2683
|
return out
|
|
3885
2684
|
|
|
@@ -3973,53 +2772,12 @@ def slogdet(input):
|
|
|
3973
2772
|
return log_matrix_determinant_(input)
|
|
3974
2773
|
|
|
3975
2774
|
|
|
3976
|
-
def trace(input):
|
|
3977
|
-
"""
|
|
3978
|
-
Returns a new tensor that is the sum of the `input` main trace.
|
|
3979
|
-
|
|
3980
|
-
Note:
|
|
3981
|
-
Input must be matrix, and complex number is not supported at present.
|
|
3982
|
-
|
|
3983
|
-
Args:
|
|
3984
|
-
input (Tensor): A matrix to be calculated. The matrix must be two dimensional.
|
|
3985
|
-
|
|
3986
|
-
Returns:
|
|
3987
|
-
Tensor, with the same data type as input `input`, and size equals to 1.
|
|
3988
|
-
|
|
3989
|
-
Raises:
|
|
3990
|
-
TypeError: If `input` is not a Tensor.
|
|
3991
|
-
ValueError: If the dimension of `input` is not equal to 2.
|
|
3992
|
-
|
|
3993
|
-
Supported Platforms:
|
|
3994
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3995
|
-
|
|
3996
|
-
Examples:
|
|
3997
|
-
>>> import mindspore
|
|
3998
|
-
>>> import numpy as np
|
|
3999
|
-
>>> from mindspore import Tensor, ops
|
|
4000
|
-
>>> input = Tensor(np.array([[10, 11, 12], [13, 14, 15], [16, 17, 18]]), mindspore.float32)
|
|
4001
|
-
>>> output = ops.trace(input)
|
|
4002
|
-
>>> print(output)
|
|
4003
|
-
42.0
|
|
4004
|
-
>>> input = Tensor(np.arange(1, 13).reshape(3, 4), mindspore.float32)
|
|
4005
|
-
>>> output = ops.trace(input)
|
|
4006
|
-
>>> print(output)
|
|
4007
|
-
18.0
|
|
4008
|
-
>>> input = Tensor(np.arange(12, 0, -1).reshape(4, 3), mindspore.float32)
|
|
4009
|
-
>>> output = ops.trace(input)
|
|
4010
|
-
>>> print(output)
|
|
4011
|
-
24.0
|
|
4012
|
-
"""
|
|
4013
|
-
return trace_(input)
|
|
4014
|
-
|
|
4015
|
-
|
|
4016
2775
|
def truncate_div(x, y):
|
|
4017
2776
|
"""
|
|
4018
2777
|
Divides the first input tensor by the second input tensor element-wise and rounds the results
|
|
4019
2778
|
of division towards zero. Equivalent to C-style integer division.
|
|
4020
2779
|
|
|
4021
2780
|
Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
|
|
4022
|
-
The inputs must be two tensors or one tensor and one scalar.
|
|
4023
2781
|
When the inputs are two tensors,
|
|
4024
2782
|
dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
|
|
4025
2783
|
When the inputs are one tensor and one scalar,
|
|
@@ -4062,7 +2820,6 @@ def truncate_mod(x, y):
|
|
|
4062
2820
|
Returns the remainder of division element-wise.
|
|
4063
2821
|
|
|
4064
2822
|
Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
|
|
4065
|
-
The inputs must be two tensors or one tensor and one scalar.
|
|
4066
2823
|
When the inputs are two tensors,
|
|
4067
2824
|
dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
|
|
4068
2825
|
When the inputs are one tensor and one scalar,
|
|
@@ -4188,8 +2945,7 @@ def ldexp(x, other):
|
|
|
4188
2945
|
|
|
4189
2946
|
def logit(input, eps=None):
|
|
4190
2947
|
r"""
|
|
4191
|
-
Calculate the logit of a tensor element-wise.
|
|
4192
|
-
When eps is None, input `input` is not clamped.
|
|
2948
|
+
Calculate the logit of a tensor element-wise.
|
|
4193
2949
|
|
|
4194
2950
|
.. math::
|
|
4195
2951
|
\begin{align}
|
|
@@ -4205,7 +2961,7 @@ def logit(input, eps=None):
|
|
|
4205
2961
|
Args:
|
|
4206
2962
|
input (Tensor): The input tensor of type float16, float32 or float64.
|
|
4207
2963
|
eps (float, optional): The epsilon. If eps is not None, the input clamp bound is defined as [eps, 1-eps],
|
|
4208
|
-
otherwise, the
|
|
2964
|
+
otherwise, the `input` is not clamped. Default: ``None`` .
|
|
4209
2965
|
|
|
4210
2966
|
Returns:
|
|
4211
2967
|
Tensor, with the same shape and dtype as the `input`.
|
|
@@ -4228,59 +2984,14 @@ def logit(input, eps=None):
|
|
|
4228
2984
|
"""
|
|
4229
2985
|
if eps is None:
|
|
4230
2986
|
eps = -1.0
|
|
4231
|
-
logit_ = _get_cache_prim(Logit)(eps)
|
|
2987
|
+
logit_ = _get_cache_prim(P.Logit)(eps)
|
|
4232
2988
|
return logit_(input)
|
|
4233
2989
|
|
|
4234
|
-
|
|
4235
2990
|
#####################################
|
|
4236
2991
|
# Comparison Operation Functions.
|
|
4237
2992
|
#####################################
|
|
4238
2993
|
|
|
4239
2994
|
|
|
4240
|
-
def less(input, other):
|
|
4241
|
-
r"""
|
|
4242
|
-
Computes the boolean value of :math:`input < other` element-wise.
|
|
4243
|
-
|
|
4244
|
-
Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
|
|
4245
|
-
The inputs must be two tensors or one tensor and one scalar.
|
|
4246
|
-
When the inputs are one tensor and one scalar,
|
|
4247
|
-
the scalar could only be a constant.
|
|
4248
|
-
|
|
4249
|
-
.. math::
|
|
4250
|
-
|
|
4251
|
-
out_{i} =\begin{cases}
|
|
4252
|
-
& \text{True, if } input_{i}<other_{i} \\
|
|
4253
|
-
& \text{False, if } input_{i}>=other_{i}
|
|
4254
|
-
\end{cases}
|
|
4255
|
-
|
|
4256
|
-
Args:
|
|
4257
|
-
input (Union[Tensor, Number, bool]): The first input is a number or
|
|
4258
|
-
a bool or a tensor whose data type is number or bool.
|
|
4259
|
-
other (Union[Tensor, Number, bool]): The second input is a number or
|
|
4260
|
-
a bool when the first input is a tensor, or it can be a tensor whose data type is number or bool.
|
|
4261
|
-
|
|
4262
|
-
Returns:
|
|
4263
|
-
Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
|
|
4264
|
-
|
|
4265
|
-
Raises:
|
|
4266
|
-
TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
|
|
4267
|
-
|
|
4268
|
-
Supported Platforms:
|
|
4269
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4270
|
-
|
|
4271
|
-
Examples:
|
|
4272
|
-
>>> import mindspore
|
|
4273
|
-
>>> import numpy as np
|
|
4274
|
-
>>> from mindspore import Tensor, ops
|
|
4275
|
-
>>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
|
|
4276
|
-
>>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
|
|
4277
|
-
>>> output = ops.less(x, y)
|
|
4278
|
-
>>> print(output)
|
|
4279
|
-
[False False True]
|
|
4280
|
-
"""
|
|
4281
|
-
return tensor_lt(input, other)
|
|
4282
|
-
|
|
4283
|
-
|
|
4284
2995
|
def lt(input, other):
|
|
4285
2996
|
"""
|
|
4286
2997
|
Alias for :func:`mindspore.ops.less` .
|
|
@@ -4311,8 +3022,8 @@ def le(input, other):
|
|
|
4311
3022
|
Args:
|
|
4312
3023
|
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
4313
3024
|
a bool or a tensor whose data type is
|
|
4314
|
-
`number <https://www.mindspore.cn/docs/en/
|
|
4315
|
-
`bool_ <https://www.mindspore.cn/docs/en/
|
|
3025
|
+
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
3026
|
+
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
|
|
4316
3027
|
other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
|
|
4317
3028
|
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
|
|
4318
3029
|
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
|
|
@@ -4320,9 +3031,6 @@ def le(input, other):
|
|
|
4320
3031
|
Returns:
|
|
4321
3032
|
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
|
|
4322
3033
|
|
|
4323
|
-
Raises:
|
|
4324
|
-
TypeError: If neither `input` nor `other` is a Tensor.
|
|
4325
|
-
|
|
4326
3034
|
Supported Platforms:
|
|
4327
3035
|
``Ascend`` ``GPU`` ``CPU``
|
|
4328
3036
|
|
|
@@ -4364,8 +3072,8 @@ def gt(input, other):
|
|
|
4364
3072
|
Args:
|
|
4365
3073
|
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
4366
3074
|
a bool or a tensor whose data type is
|
|
4367
|
-
`number <https://www.mindspore.cn/docs/en/
|
|
4368
|
-
`bool_ <https://www.mindspore.cn/docs/en/
|
|
3075
|
+
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
3076
|
+
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ .
|
|
4369
3077
|
other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
|
|
4370
3078
|
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
|
|
4371
3079
|
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
|
|
@@ -4435,65 +3143,14 @@ def ge(input, other):
|
|
|
4435
3143
|
>>> from mindspore import Tensor, ops
|
|
4436
3144
|
>>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
|
|
4437
3145
|
>>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
|
|
4438
|
-
>>> output = ops.ge(x, y)
|
|
4439
|
-
>>> print(output)
|
|
4440
|
-
[True True False]
|
|
4441
|
-
"""
|
|
4442
|
-
return tensor_ge(input, other)
|
|
4443
|
-
|
|
4444
|
-
|
|
4445
|
-
def eq(input, other):
|
|
4446
|
-
r"""
|
|
4447
|
-
Computes the equivalence between two tensors element-wise.
|
|
4448
|
-
|
|
4449
|
-
The second argument can be a number or a tensor whose shape is broadcastable with the first argument and vise versa.
|
|
4450
|
-
|
|
4451
|
-
.. math::
|
|
4452
|
-
|
|
4453
|
-
out_{i} =\begin{cases}
|
|
4454
|
-
& \text{True, if } input_{i} = other_{i} \\
|
|
4455
|
-
& \text{False, if } input_{i} \ne other_{i}
|
|
4456
|
-
\end{cases}
|
|
4457
|
-
|
|
4458
|
-
Note:
|
|
4459
|
-
- `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
|
|
4460
|
-
- The shapes of the inputs can be broadcasted to each other.
|
|
4461
|
-
|
|
4462
|
-
Args:
|
|
4463
|
-
input (Union[Tensor, Number]): The first input is a number or
|
|
4464
|
-
a tensor whose data type is number.
|
|
4465
|
-
other (Union[Tensor, Number]): The second input is a number when the first input is a tensor.
|
|
4466
|
-
The data type is the same as the first input. If the first input is a number,
|
|
4467
|
-
the second input should be a tensor.
|
|
4468
|
-
|
|
4469
|
-
Returns:
|
|
4470
|
-
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
|
|
4471
|
-
|
|
4472
|
-
Raises:
|
|
4473
|
-
TypeError: If neither `input` nor `other` is a Tensor.
|
|
4474
|
-
|
|
4475
|
-
Supported Platforms:
|
|
4476
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4477
|
-
|
|
4478
|
-
Examples:
|
|
4479
|
-
>>> import mindspore
|
|
4480
|
-
>>> from mindspore import Tensor, ops
|
|
4481
|
-
>>> # case 1: The shape of two inputs are different
|
|
4482
|
-
>>> x = Tensor([1, 2, 3], mindspore.float32)
|
|
4483
|
-
>>> output = ops.eq(x, 2.0)
|
|
4484
|
-
>>> print(output)
|
|
4485
|
-
[False True False]
|
|
4486
|
-
>>> # case 2: The shape of two inputs are the same
|
|
4487
|
-
>>> x = Tensor([1, 2, 3], mindspore.int32)
|
|
4488
|
-
>>> y = Tensor([1, 2, 4], mindspore.int32)
|
|
4489
|
-
>>> output = ops.eq(x, y)
|
|
3146
|
+
>>> output = ops.ge(x, y)
|
|
4490
3147
|
>>> print(output)
|
|
4491
|
-
[ True
|
|
3148
|
+
[True True False]
|
|
4492
3149
|
"""
|
|
4493
|
-
return
|
|
3150
|
+
return tensor_ge(input, other)
|
|
4494
3151
|
|
|
4495
3152
|
|
|
4496
|
-
def
|
|
3153
|
+
def eq(input, other):
|
|
4497
3154
|
r"""
|
|
4498
3155
|
Computes the equivalence between two tensors element-wise.
|
|
4499
3156
|
|
|
@@ -4512,7 +3169,7 @@ def equal(input, other):
|
|
|
4512
3169
|
|
|
4513
3170
|
Args:
|
|
4514
3171
|
input (Union[Tensor, Number]): The first input is a number or
|
|
4515
|
-
a tensor whose data type is number.
|
|
3172
|
+
a tensor whose data type is number.
|
|
4516
3173
|
other (Union[Tensor, Number]): The second input is a number when the first input is a tensor.
|
|
4517
3174
|
The data type is the same as the first input. If the first input is a number,
|
|
4518
3175
|
the second input should be a tensor.
|
|
@@ -4531,17 +3188,17 @@ def equal(input, other):
|
|
|
4531
3188
|
>>> from mindspore import Tensor, ops
|
|
4532
3189
|
>>> # case 1: The shape of two inputs are different
|
|
4533
3190
|
>>> x = Tensor([1, 2, 3], mindspore.float32)
|
|
4534
|
-
>>> output = ops.
|
|
3191
|
+
>>> output = ops.eq(x, 2.0)
|
|
4535
3192
|
>>> print(output)
|
|
4536
3193
|
[False True False]
|
|
4537
3194
|
>>> # case 2: The shape of two inputs are the same
|
|
4538
3195
|
>>> x = Tensor([1, 2, 3], mindspore.int32)
|
|
4539
3196
|
>>> y = Tensor([1, 2, 4], mindspore.int32)
|
|
4540
|
-
>>> output = ops.
|
|
3197
|
+
>>> output = ops.eq(x, y)
|
|
4541
3198
|
>>> print(output)
|
|
4542
3199
|
[ True True False]
|
|
4543
3200
|
"""
|
|
4544
|
-
return
|
|
3201
|
+
return equal(input, other)
|
|
4545
3202
|
|
|
4546
3203
|
|
|
4547
3204
|
def ne(input, other):
|
|
@@ -4551,7 +3208,6 @@ def ne(input, other):
|
|
|
4551
3208
|
Note:
|
|
4552
3209
|
- Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
|
|
4553
3210
|
consistent.
|
|
4554
|
-
- The inputs must be two tensors or one tensor and one scalar.
|
|
4555
3211
|
- When the inputs are two tensors, the shapes of them could be broadcast.
|
|
4556
3212
|
- When the inputs are one tensor and one scalar, the scalar could only be a constant.
|
|
4557
3213
|
- Broadcasting is supported.
|
|
@@ -4574,7 +3230,6 @@ def ne(input, other):
|
|
|
4574
3230
|
|
|
4575
3231
|
Raises:
|
|
4576
3232
|
TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
|
|
4577
|
-
TypeError: If neither `input` nor `other` is a Tensor.
|
|
4578
3233
|
|
|
4579
3234
|
Supported Platforms:
|
|
4580
3235
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4593,17 +3248,7 @@ def ne(input, other):
|
|
|
4593
3248
|
>>> print(output)
|
|
4594
3249
|
[False False True]
|
|
4595
3250
|
"""
|
|
4596
|
-
return
|
|
4597
|
-
|
|
4598
|
-
|
|
4599
|
-
def not_equal(input, other):
|
|
4600
|
-
r"""
|
|
4601
|
-
Alias for :func:`mindspore.ops.ne` .
|
|
4602
|
-
|
|
4603
|
-
Supported Platforms:
|
|
4604
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4605
|
-
"""
|
|
4606
|
-
return ne(input, other)
|
|
3251
|
+
return not_equal(input, other)
|
|
4607
3252
|
|
|
4608
3253
|
|
|
4609
3254
|
def approximate_equal(x, y, tolerance=1e-5):
|
|
@@ -4651,47 +3296,7 @@ def approximate_equal(x, y, tolerance=1e-5):
|
|
|
4651
3296
|
>>> print(output)
|
|
4652
3297
|
[ True False False]
|
|
4653
3298
|
"""
|
|
4654
|
-
return P.ApproximateEqual(tolerance)(x, y)
|
|
4655
|
-
|
|
4656
|
-
|
|
4657
|
-
def isfinite(x):
|
|
4658
|
-
r"""
|
|
4659
|
-
Determines which elements are finite for each position. If elements are not ``NaN`` , ``-INF`` , ``INF``,
|
|
4660
|
-
they are finite.
|
|
4661
|
-
|
|
4662
|
-
.. math::
|
|
4663
|
-
|
|
4664
|
-
out_i = \begin{cases}
|
|
4665
|
-
& \text{ if } x_{i} = \text{Finite},\ \ True \\
|
|
4666
|
-
& \text{ if } x_{i} \ne \text{Finite},\ \ False
|
|
4667
|
-
\end{cases}
|
|
4668
|
-
|
|
4669
|
-
Args:
|
|
4670
|
-
x (Tensor): The input tensor.
|
|
4671
|
-
|
|
4672
|
-
Returns:
|
|
4673
|
-
Tensor, has the same shape of input, and the dtype is bool.
|
|
4674
|
-
|
|
4675
|
-
Raises:
|
|
4676
|
-
TypeError: If `x` is not a Tensor.
|
|
4677
|
-
|
|
4678
|
-
Supported Platforms:
|
|
4679
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4680
|
-
|
|
4681
|
-
Examples:
|
|
4682
|
-
>>> import mindspore
|
|
4683
|
-
>>> import numpy as np
|
|
4684
|
-
>>> from mindspore import Tensor, ops
|
|
4685
|
-
>>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
|
|
4686
|
-
>>> output = ops.isfinite(x)
|
|
4687
|
-
>>> print(output)
|
|
4688
|
-
[False True False]
|
|
4689
|
-
>>> x = Tensor(2.1, mindspore.float64)
|
|
4690
|
-
>>> output = ops.isfinite(x)
|
|
4691
|
-
>>> print(output)
|
|
4692
|
-
True
|
|
4693
|
-
"""
|
|
4694
|
-
return isfinite_(x)
|
|
3299
|
+
return _get_cache_prim(P.ApproximateEqual)(tolerance)(x, y)
|
|
4695
3300
|
|
|
4696
3301
|
|
|
4697
3302
|
def isnan(input):
|
|
@@ -4741,7 +3346,7 @@ def isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False):
|
|
|
4741
3346
|
is “close” to the corresponding element of `other`. Closeness is defined as:
|
|
4742
3347
|
|
|
4743
3348
|
.. math::
|
|
4744
|
-
|
|
3349
|
+
|input-other| ≤ atol + rtol × |other|
|
|
4745
3350
|
|
|
4746
3351
|
Args:
|
|
4747
3352
|
input (Tensor): First Tensor to compare, with data type belongs to float32, float16, int32.
|
|
@@ -4947,61 +3552,6 @@ def fmax(input, other):
|
|
|
4947
3552
|
return fmax_(input, other)
|
|
4948
3553
|
|
|
4949
3554
|
|
|
4950
|
-
def maximum(input, other):
|
|
4951
|
-
r"""
|
|
4952
|
-
Computes the maximum of input tensors element-wise.
|
|
4953
|
-
|
|
4954
|
-
Note:
|
|
4955
|
-
- Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
|
|
4956
|
-
consistent.
|
|
4957
|
-
- The inputs must be two tensors or one tensor and one scalar.
|
|
4958
|
-
- When the inputs are two tensors,
|
|
4959
|
-
dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
|
|
4960
|
-
- When the inputs are one tensor and one scalar,
|
|
4961
|
-
the scalar could only be a constant.
|
|
4962
|
-
- Broadcasting is supported.
|
|
4963
|
-
- If one of the elements being compared is a NaN, then that element is returned.
|
|
4964
|
-
|
|
4965
|
-
.. math::
|
|
4966
|
-
output_i = \max(input_i, other_i)
|
|
4967
|
-
|
|
4968
|
-
Args:
|
|
4969
|
-
input (Union[Tensor, Number, bool]): The first input is a number or
|
|
4970
|
-
a bool or a tensor whose data type is number or bool.
|
|
4971
|
-
other (Union[Tensor, Number, bool]): The second input is a number or
|
|
4972
|
-
a bool when the first input is a tensor or a tensor whose data type is number or bool.
|
|
4973
|
-
|
|
4974
|
-
Returns:
|
|
4975
|
-
Tensor, the shape is the same as the one after broadcasting,
|
|
4976
|
-
and the data type is the one with higher precision or higher digits among the two inputs.
|
|
4977
|
-
|
|
4978
|
-
Raises:
|
|
4979
|
-
TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
|
|
4980
|
-
ValueError: If `input` and `other` are not the same shape.
|
|
4981
|
-
|
|
4982
|
-
Supported Platforms:
|
|
4983
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4984
|
-
|
|
4985
|
-
Examples:
|
|
4986
|
-
>>> import mindspore
|
|
4987
|
-
>>> import numpy as np
|
|
4988
|
-
>>> from mindspore import Tensor, ops
|
|
4989
|
-
>>> # case 1 : same data type
|
|
4990
|
-
>>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
|
|
4991
|
-
>>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
|
|
4992
|
-
>>> output = ops.maximum(x, y)
|
|
4993
|
-
>>> print(output)
|
|
4994
|
-
[4. 5. 6.]
|
|
4995
|
-
>>> # case 2 : different data type
|
|
4996
|
-
>>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.int32)
|
|
4997
|
-
>>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
|
|
4998
|
-
>>> output = ops.maximum(x, y)
|
|
4999
|
-
>>> print(output.dtype)
|
|
5000
|
-
Float32
|
|
5001
|
-
"""
|
|
5002
|
-
return maximum_(input, other)
|
|
5003
|
-
|
|
5004
|
-
|
|
5005
3555
|
def fmin(input, other):
|
|
5006
3556
|
r"""
|
|
5007
3557
|
Computes the minimum of input tensors element-wise.
|
|
@@ -5045,59 +3595,6 @@ def fmin(input, other):
|
|
|
5045
3595
|
return fmin_(input, other)
|
|
5046
3596
|
|
|
5047
3597
|
|
|
5048
|
-
def minimum(input, other):
|
|
5049
|
-
r"""
|
|
5050
|
-
Computes the minimum of input tensors element-wise.
|
|
5051
|
-
|
|
5052
|
-
Note:
|
|
5053
|
-
- Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
|
|
5054
|
-
consistent.
|
|
5055
|
-
- The inputs must be two tensors or one tensor and one scalar.
|
|
5056
|
-
- When the inputs are two tensors, dtypes of them cannot be bool at the same time.
|
|
5057
|
-
- When the inputs are one tensor and one scalar, the scalar could only be a constant.
|
|
5058
|
-
- Shapes of them are supposed to be broadcast.
|
|
5059
|
-
- If one of the elements being compared is a NaN, then that element is returned.
|
|
5060
|
-
|
|
5061
|
-
.. math::
|
|
5062
|
-
output_i = \min(input_i, other_i)
|
|
5063
|
-
|
|
5064
|
-
Args:
|
|
5065
|
-
input (Union[Tensor, Number, bool]): The first input is a number or
|
|
5066
|
-
a bool or a tensor whose data type is number or bool.
|
|
5067
|
-
other (Union[Tensor, Number, bool]): The second input is a number or
|
|
5068
|
-
a bool when the first input is a tensor or a tensor whose data type is number or bool.
|
|
5069
|
-
|
|
5070
|
-
Returns:
|
|
5071
|
-
Tensor, the shape is the same as the one after broadcasting,
|
|
5072
|
-
and the data type is the one with higher precision or higher digits among the two inputs.
|
|
5073
|
-
|
|
5074
|
-
Raises:
|
|
5075
|
-
TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
|
|
5076
|
-
ValueError: If `input` and `other` are not the same shape after broadcast.
|
|
5077
|
-
|
|
5078
|
-
Supported Platforms:
|
|
5079
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
5080
|
-
|
|
5081
|
-
Examples:
|
|
5082
|
-
>>> import mindspore
|
|
5083
|
-
>>> import numpy as np
|
|
5084
|
-
>>> from mindspore import Tensor, ops
|
|
5085
|
-
>>> # case 1 : same data type
|
|
5086
|
-
>>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
|
|
5087
|
-
>>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
|
|
5088
|
-
>>> output = ops.minimum(x, y)
|
|
5089
|
-
>>> print(output)
|
|
5090
|
-
[1. 2. 3.]
|
|
5091
|
-
>>> # case 2 : different data type
|
|
5092
|
-
>>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.int32)
|
|
5093
|
-
>>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
|
|
5094
|
-
>>> output = ops.minimum(x, y)
|
|
5095
|
-
>>> print(output.dtype)
|
|
5096
|
-
Float32
|
|
5097
|
-
"""
|
|
5098
|
-
return minimum_(input, other)
|
|
5099
|
-
|
|
5100
|
-
|
|
5101
3598
|
def median(input, axis=-1, keepdims=False):
|
|
5102
3599
|
r"""
|
|
5103
3600
|
Computes the median and indices of input tensor.
|
|
@@ -5150,7 +3647,7 @@ def nanmedian(input, axis=-1, keepdims=False):
|
|
|
5150
3647
|
|
|
5151
3648
|
.. warning::
|
|
5152
3649
|
`indices` does not necessarily contain the first occurrence of each median value found in the `input`,
|
|
5153
|
-
|
|
3650
|
+
unless it is unique.
|
|
5154
3651
|
|
|
5155
3652
|
Args:
|
|
5156
3653
|
input (Tensor): The input tensor to calculate the median and indices.
|
|
@@ -5228,6 +3725,8 @@ def nanmean(input, axis=None, keepdims=False, *, dtype=None):
|
|
|
5228
3725
|
"""
|
|
5229
3726
|
_check_is_tensor("input", input, "nanmean")
|
|
5230
3727
|
_check_repeat_in_axis(axis, input.ndim, "nanmean")
|
|
3728
|
+
if input.dtype not in mstype.float_type:
|
|
3729
|
+
raise TypeError(f"For 'nanmean', input should be floating point dtype, but got {type(input)}.")
|
|
5231
3730
|
nan_sum = nansum(input, axis, keepdims)
|
|
5232
3731
|
is_num = isnan(input).logical_not()
|
|
5233
3732
|
is_num = is_num.sum(axis=axis, keepdims=keepdims)
|
|
@@ -5323,7 +3822,7 @@ def ormqr(input, tau, other, left=True, transpose=False):
|
|
|
5323
3822
|
TypeError: If dtype of `input` or `tau` or `other` is not one of: float64, float32, complex64, complex128.
|
|
5324
3823
|
ValueError: If the dimension of `input` or `other` is less than 2D.
|
|
5325
3824
|
ValueError: If rank(`input`) - rank(`tau`) != 1.
|
|
5326
|
-
ValueError: If tau.shape[:-
|
|
3825
|
+
ValueError: If tau.shape[:-1] != input.shape[:-2]
|
|
5327
3826
|
ValueError: If other.shape[:-2] != input.shape[:-2]
|
|
5328
3827
|
ValueError: If left == true, other.shape[-2] < tau.shape[-1].
|
|
5329
3828
|
ValueError: If left == true, other.shape[-2] != input.shape[-2].
|
|
@@ -5397,11 +3896,11 @@ def heaviside(input, values):
|
|
|
5397
3896
|
Computes the Heaviside step function for each element in input.
|
|
5398
3897
|
|
|
5399
3898
|
.. math::
|
|
5400
|
-
|
|
5401
|
-
|
|
5402
|
-
|
|
5403
|
-
|
|
5404
|
-
|
|
3899
|
+
\text { heaviside }(\text { input, values })=\left\{\begin{array}{ll}
|
|
3900
|
+
0, & \text { if input }<0 \\
|
|
3901
|
+
\text { values, } & \text { if input }=0 \\
|
|
3902
|
+
1, & \text { if input }>0
|
|
3903
|
+
\end{array}\right.
|
|
5405
3904
|
|
|
5406
3905
|
Args:
|
|
5407
3906
|
input (Tensor): The input tensor. With real number data type.
|
|
@@ -5489,9 +3988,6 @@ def logspace(start, end, steps, base=10, *, dtype=mstype.float32):
|
|
|
5489
3988
|
&output = [base^{start}, base^{start + 1 * step}, ... , base^{start + (steps-2) * step}, base^{end}]
|
|
5490
3989
|
\end{aligned}
|
|
5491
3990
|
|
|
5492
|
-
Note:
|
|
5493
|
-
- Input `base` must be integer.
|
|
5494
|
-
|
|
5495
3991
|
Args:
|
|
5496
3992
|
start (Union[float, Tensor]): Start value of interval.
|
|
5497
3993
|
end (Union[float, Tensor]): End value of interval.
|
|
@@ -5533,6 +4029,8 @@ def logspace(start, end, steps, base=10, *, dtype=mstype.float32):
|
|
|
5533
4029
|
def logaddexp(input, other):
|
|
5534
4030
|
r"""
|
|
5535
4031
|
Computes the logarithm of the sum of exponentiations of the inputs.
|
|
4032
|
+
This function is useful in statistics where the calculated probabilities of events may be
|
|
4033
|
+
so small as to exceed the range of normal floating point numbers.
|
|
5536
4034
|
|
|
5537
4035
|
.. math::
|
|
5538
4036
|
|
|
@@ -5573,7 +4071,7 @@ def logaddexp(input, other):
|
|
|
5573
4071
|
f"but got {input.dtype} and {other.dtype}.")
|
|
5574
4072
|
m = maximum(input, other)
|
|
5575
4073
|
abs_val = abs(input - other)
|
|
5576
|
-
exp_val = tensor_exp(
|
|
4074
|
+
exp_val = tensor_exp(neg(abs_val))
|
|
5577
4075
|
y = m + log1p(exp_val)
|
|
5578
4076
|
return y
|
|
5579
4077
|
|
|
@@ -5619,7 +4117,7 @@ def logaddexp2(input, other):
|
|
|
5619
4117
|
|
|
5620
4118
|
m = maximum(input, other)
|
|
5621
4119
|
abs_val = abs(input - other)
|
|
5622
|
-
exp2_val = pows(2.,
|
|
4120
|
+
exp2_val = pows(2., neg(abs_val))
|
|
5623
4121
|
y = m + log2(1. + exp2_val)
|
|
5624
4122
|
return y
|
|
5625
4123
|
|
|
@@ -5902,196 +4400,67 @@ def std_mean(input, axis=None, ddof=0, keepdims=False):
|
|
|
5902
4400
|
axis (Union[int, tuple(int)], optional): Specifies the dimensions from which to calculate the standard
|
|
5903
4401
|
deviation and mean. Only constant value is allowed. Must be in the range [-rank(`input`), rank(`input`)).
|
|
5904
4402
|
Default: ``None`` , reduce all dimensions.
|
|
5905
|
-
ddof (Union[int, bool], optional): Means Delta Degrees of Freedom.
|
|
5906
|
-
If ddof is an integer, the divisor used in calculations is :math:`N - ddof`,
|
|
5907
|
-
where :math:`N` represents the number of elements.
|
|
5908
|
-
If ddof is True, will use the Bessel correction unbiased estimation.
|
|
5909
|
-
If ddof is False, will through the biased estimation to calculate the standard deviation.
|
|
5910
|
-
Default: ``0`` .
|
|
5911
|
-
keepdims (bool, optional): Whether the output Tensor has dim retained or not.
|
|
5912
|
-
If true, keep these reduced dimensions and the length is 1.
|
|
5913
|
-
If false, don't keep these dimensions. Default: ``False`` .
|
|
5914
|
-
|
|
5915
|
-
Returns:
|
|
5916
|
-
A tuple containing the standard deviation and mean.
|
|
5917
|
-
Suppose the shape of `input` is :math:`(x_0, x_1, ..., x_R)`:
|
|
5918
|
-
|
|
5919
|
-
- If `axis` is () and `keepdims` is set to ``False`` , returns a 0-D Tensor, indicating
|
|
5920
|
-
the standard deviation of all elements in `input`.
|
|
5921
|
-
- If `axis` is int 1 and `keepdims` is set to ``False`` , then the returned Tensor
|
|
5922
|
-
has shape :math:`(x_0, x_2, ..., x_R)`.
|
|
5923
|
-
- If `axis` is tuple(int) or list(int), e.g. (1, 2) and `keepdims` is set to ``False`` ,
|
|
5924
|
-
then the returned Tensor has shape :math:`(x_0, x_2, ..., x_R)`.
|
|
5925
|
-
|
|
5926
|
-
Raises:
|
|
5927
|
-
TypeError: If `input` is not a Tensor.
|
|
5928
|
-
TypeError: If `axis` is not one of the following: None, int, tuple.
|
|
5929
|
-
TypeError: If `keepdims` is not a bool.
|
|
5930
|
-
ValueError: If `axis` is out of range.
|
|
5931
|
-
|
|
5932
|
-
Supported Platforms:
|
|
5933
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
5934
|
-
|
|
5935
|
-
Examples:
|
|
5936
|
-
>>> import mindspore as ms
|
|
5937
|
-
>>> input = ms.Tensor([[1, 2, 3, 4], [-1, 1, 4, -10]], ms.float32)
|
|
5938
|
-
>>> output_std, output_mean = ms.ops.std_mean(input, 1, 2, True)
|
|
5939
|
-
>>> print(output_std)
|
|
5940
|
-
[[1.5811388]
|
|
5941
|
-
[7.3824115]]
|
|
5942
|
-
>>> print(output_mean)
|
|
5943
|
-
[[ 2.5]
|
|
5944
|
-
[-1.5]]
|
|
5945
|
-
"""
|
|
5946
|
-
axis = _check_var_std_input(input, ddof, keepdims, axis, "std_mean")
|
|
5947
|
-
if ddof in (0, 1):
|
|
5948
|
-
return _get_cache_prim(P.ReduceStd)(axis=axis, unbiased=bool(ddof), keep_dims=keepdims)(input)
|
|
5949
|
-
output = var_mean(input, axis, ddof, keepdims)
|
|
5950
|
-
return tensor_pow(output[0], 0.5), output[1]
|
|
5951
|
-
|
|
5952
|
-
|
|
5953
|
-
def real(input):
|
|
5954
|
-
r"""
|
|
5955
|
-
Returns a Tensor that is the real part of the input.
|
|
5956
|
-
If input is real, it is returned unchanged.
|
|
5957
|
-
|
|
5958
|
-
Args:
|
|
5959
|
-
input (Tensor): The input tensor to compute to.
|
|
5960
|
-
|
|
5961
|
-
Returns:
|
|
5962
|
-
Tensor, the shape is the same as the `input`.
|
|
5963
|
-
|
|
5964
|
-
Raises:
|
|
5965
|
-
TypeError: If `input` is not a Tensor.
|
|
5966
|
-
|
|
5967
|
-
Supported Platforms:
|
|
5968
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
5969
|
-
|
|
5970
|
-
Examples:
|
|
5971
|
-
>>> import mindspore as ms
|
|
5972
|
-
>>> import mindspore.ops as ops
|
|
5973
|
-
>>> import numpy as np
|
|
5974
|
-
>>> input = ms.Tensor(np.asarray(np.complex(1.3+0.4j)), ms.complex64)
|
|
5975
|
-
>>> output = ops.real(input)
|
|
5976
|
-
>>> print(output)
|
|
5977
|
-
1.3
|
|
5978
|
-
"""
|
|
5979
|
-
return real_(input)
|
|
5980
|
-
|
|
5981
|
-
|
|
5982
|
-
def reciprocal(input):
|
|
5983
|
-
r"""
|
|
5984
|
-
Returns reciprocal of a tensor element-wise.
|
|
5985
|
-
|
|
5986
|
-
.. math::
|
|
5987
|
-
|
|
5988
|
-
out_{i} = \frac{1}{x_{i}}
|
|
5989
|
-
|
|
5990
|
-
Args:
|
|
5991
|
-
input (Tensor): The input tensor.
|
|
5992
|
-
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
5993
|
-
|
|
5994
|
-
Returns:
|
|
5995
|
-
Tensor, has the same shape as the `input`.
|
|
5996
|
-
|
|
5997
|
-
Raises:
|
|
5998
|
-
TypeError: If `input` is not a Tensor.
|
|
5999
|
-
|
|
6000
|
-
Supported Platforms:
|
|
6001
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
6002
|
-
|
|
6003
|
-
Examples:
|
|
6004
|
-
>>> import mindspore as ms
|
|
6005
|
-
>>> import mindspore.ops as ops
|
|
6006
|
-
>>> import numpy as np
|
|
6007
|
-
>>> input = ms.Tensor(np.array([1.0, 2.0, 4.0]), ms.float32)
|
|
6008
|
-
>>> output = ops.reciprocal(input)
|
|
6009
|
-
>>> print(output)
|
|
6010
|
-
[1. 0.5 0.25]
|
|
6011
|
-
"""
|
|
6012
|
-
if not isinstance(input, Tensor):
|
|
6013
|
-
raise TypeError(f"For reciprocal, the input must be a Tensor, but got {type(input)}.")
|
|
6014
|
-
if not is_complex(input) and not ops.is_floating_point(input):
|
|
6015
|
-
input = ops.cast(input, mstype.float32)
|
|
6016
|
-
return reciprocal_(input)
|
|
6017
|
-
|
|
6018
|
-
|
|
6019
|
-
def rsqrt(input):
|
|
6020
|
-
r"""
|
|
6021
|
-
Computes reciprocal of square root of input tensor element-wise.
|
|
6022
|
-
|
|
6023
|
-
.. math::
|
|
6024
|
-
|
|
6025
|
-
out_{i} = \frac{1}{\sqrt{input_{i}}}
|
|
6026
|
-
|
|
6027
|
-
Args:
|
|
6028
|
-
input (Tensor): The input of rsqrt. Its each element must be a non-negative
|
|
6029
|
-
number, if an element is negative, the calculation result is nan.
|
|
6030
|
-
|
|
6031
|
-
Returns:
|
|
6032
|
-
Tensor, has the same shape and dtype as the `input`.
|
|
6033
|
-
|
|
6034
|
-
Raises:
|
|
6035
|
-
TypeError: If `input` is not a Tensor.
|
|
6036
|
-
|
|
6037
|
-
Supported Platforms:
|
|
6038
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
6039
|
-
|
|
6040
|
-
Examples:
|
|
6041
|
-
>>> import mindspore as ms
|
|
6042
|
-
>>> import mindspore.ops as ops
|
|
6043
|
-
>>> input = ms.Tensor([-0.0370, 0.2970, 1.5420, -0.9105])
|
|
6044
|
-
>>> output = ops.rsqrt(input)
|
|
6045
|
-
>>> print(output)
|
|
6046
|
-
[ nan 1.8349396 0.80530024 nan]
|
|
6047
|
-
"""
|
|
6048
|
-
return rsqrt_(input)
|
|
6049
|
-
|
|
6050
|
-
|
|
6051
|
-
def sqrt(x):
|
|
6052
|
-
"""
|
|
6053
|
-
Returns sqrt of a tensor element-wise.
|
|
6054
|
-
|
|
6055
|
-
.. math::
|
|
6056
|
-
|
|
6057
|
-
out_{i} = \\sqrt{x_{i}}
|
|
4403
|
+
ddof (Union[int, bool], optional): Means Delta Degrees of Freedom.
|
|
4404
|
+
If ddof is an integer, the divisor used in calculations is :math:`N - ddof`,
|
|
4405
|
+
where :math:`N` represents the number of elements.
|
|
4406
|
+
If ddof is True, will use the Bessel correction unbiased estimation.
|
|
4407
|
+
If ddof is False, will through the biased estimation to calculate the standard deviation.
|
|
4408
|
+
Default: ``0`` .
|
|
4409
|
+
keepdims (bool, optional): Whether the output Tensor has dim retained or not.
|
|
4410
|
+
If true, keep these reduced dimensions and the length is 1.
|
|
4411
|
+
If false, don't keep these dimensions. Default: ``False`` .
|
|
6058
4412
|
|
|
6059
|
-
Args:
|
|
6060
|
-
x (Tensor): The input tensor with a dtype of number.Number.
|
|
6061
4413
|
Returns:
|
|
6062
|
-
|
|
4414
|
+
A tuple containing the standard deviation and mean.
|
|
4415
|
+
Suppose the shape of `input` is :math:`(x_0, x_1, ..., x_R)`:
|
|
4416
|
+
|
|
4417
|
+
- If `axis` is () and `keepdims` is set to ``False`` , returns a 0-D Tensor, indicating
|
|
4418
|
+
the standard deviation of all elements in `input`.
|
|
4419
|
+
- If `axis` is int 1 and `keepdims` is set to ``False`` , then the returned Tensor
|
|
4420
|
+
has shape :math:`(x_0, x_2, ..., x_R)`.
|
|
4421
|
+
- If `axis` is tuple(int) or list(int), e.g. (1, 2) and `keepdims` is set to ``False`` ,
|
|
4422
|
+
then the returned Tensor has shape :math:`(x_0, x_2, ..., x_R)`.
|
|
6063
4423
|
|
|
6064
4424
|
Raises:
|
|
6065
|
-
TypeError: If `
|
|
4425
|
+
TypeError: If `input` is not a Tensor.
|
|
4426
|
+
TypeError: If `axis` is not one of the following: None, int, tuple.
|
|
4427
|
+
TypeError: If `keepdims` is not a bool.
|
|
4428
|
+
ValueError: If `axis` is out of range.
|
|
6066
4429
|
|
|
6067
4430
|
Supported Platforms:
|
|
6068
4431
|
``Ascend`` ``GPU`` ``CPU``
|
|
6069
4432
|
|
|
6070
4433
|
Examples:
|
|
6071
|
-
>>> import mindspore
|
|
6072
|
-
>>>
|
|
6073
|
-
>>>
|
|
6074
|
-
>>>
|
|
6075
|
-
|
|
6076
|
-
|
|
6077
|
-
|
|
4434
|
+
>>> import mindspore as ms
|
|
4435
|
+
>>> input = ms.Tensor([[1, 2, 3, 4], [-1, 1, 4, -10]], ms.float32)
|
|
4436
|
+
>>> output_std, output_mean = ms.ops.std_mean(input, 1, 2, True)
|
|
4437
|
+
>>> print(output_std)
|
|
4438
|
+
[[1.5811388]
|
|
4439
|
+
[7.3824115]]
|
|
4440
|
+
>>> print(output_mean)
|
|
4441
|
+
[[ 2.5]
|
|
4442
|
+
[-1.5]]
|
|
6078
4443
|
"""
|
|
6079
|
-
|
|
4444
|
+
axis = _check_var_std_input(input, ddof, keepdims, axis, "std_mean")
|
|
4445
|
+
if ddof in (0, 1):
|
|
4446
|
+
return _get_cache_prim(P.ReduceStd)(axis=axis, unbiased=bool(ddof), keep_dims=keepdims)(input)
|
|
4447
|
+
output = var_mean(input, axis, ddof, keepdims)
|
|
4448
|
+
return tensor_pow(output[0], 0.5), output[1]
|
|
6080
4449
|
|
|
6081
4450
|
|
|
6082
|
-
def
|
|
6083
|
-
"""
|
|
6084
|
-
Returns
|
|
4451
|
+
def reciprocal(input):
|
|
4452
|
+
r"""
|
|
4453
|
+
Returns reciprocal of a tensor element-wise.
|
|
6085
4454
|
|
|
6086
4455
|
.. math::
|
|
6087
4456
|
|
|
6088
|
-
|
|
4457
|
+
out_{i} = \frac{1}{x_{i}}
|
|
6089
4458
|
|
|
6090
4459
|
Args:
|
|
6091
|
-
input (Tensor): The input tensor
|
|
4460
|
+
input (Tensor): The input tensor.
|
|
6092
4461
|
|
|
6093
4462
|
Returns:
|
|
6094
|
-
Tensor, has the same shape
|
|
4463
|
+
Tensor, has the same shape as the `input`.
|
|
6095
4464
|
|
|
6096
4465
|
Raises:
|
|
6097
4466
|
TypeError: If `input` is not a Tensor.
|
|
@@ -6100,15 +4469,15 @@ def square(input):
|
|
|
6100
4469
|
``Ascend`` ``GPU`` ``CPU``
|
|
6101
4470
|
|
|
6102
4471
|
Examples:
|
|
6103
|
-
>>> import mindspore
|
|
4472
|
+
>>> import mindspore as ms
|
|
4473
|
+
>>> import mindspore.ops as ops
|
|
6104
4474
|
>>> import numpy as np
|
|
6105
|
-
>>>
|
|
6106
|
-
>>>
|
|
6107
|
-
>>> output = ops.square(input)
|
|
4475
|
+
>>> input = ms.Tensor(np.array([1.0, 2.0, 4.0]), ms.float32)
|
|
4476
|
+
>>> output = ops.reciprocal(input)
|
|
6108
4477
|
>>> print(output)
|
|
6109
|
-
[1.
|
|
4478
|
+
[1. 0.5 0.25]
|
|
6110
4479
|
"""
|
|
6111
|
-
return
|
|
4480
|
+
return reciprocal_(input)
|
|
6112
4481
|
|
|
6113
4482
|
|
|
6114
4483
|
def outer(input, vec2):
|
|
@@ -6128,7 +4497,6 @@ def outer(input, vec2):
|
|
|
6128
4497
|
|
|
6129
4498
|
Raises:
|
|
6130
4499
|
TypeError: If `input` or `vec2` is not a Tensor.
|
|
6131
|
-
ValueError: If `input` or `vec2` is not an 1-D Tensor.
|
|
6132
4500
|
|
|
6133
4501
|
Supported Platforms:
|
|
6134
4502
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -6151,10 +4519,6 @@ def outer(input, vec2):
|
|
|
6151
4519
|
raise TypeError("the input input must be Tensor!")
|
|
6152
4520
|
if not isinstance(vec2, (Tensor, Tensor_)):
|
|
6153
4521
|
raise TypeError("the input vec2 must be Tensor!")
|
|
6154
|
-
if len(input.shape) != 1:
|
|
6155
|
-
raise ValueError("the input input must be a 1-D vector!")
|
|
6156
|
-
if len(vec2.shape) != 1:
|
|
6157
|
-
raise ValueError("the input vec2 must be a 1-D vector!")
|
|
6158
4522
|
input = input.reshape(-1, 1)
|
|
6159
4523
|
y = tensor_mul(input, vec2)
|
|
6160
4524
|
return y
|
|
@@ -6194,10 +4558,6 @@ def mv(mat, vec):
|
|
|
6194
4558
|
raise TypeError("The input mat must be Tensor.")
|
|
6195
4559
|
if not isinstance(vec, (Tensor, Tensor_)):
|
|
6196
4560
|
raise TypeError("The input vec must be Tensor.")
|
|
6197
|
-
if len(mat.shape) != 2:
|
|
6198
|
-
raise ValueError("The input mat must be 2-D Tensor.")
|
|
6199
|
-
if len(vec.shape) != 1:
|
|
6200
|
-
raise ValueError("The input vec must be 1-D Tensor.")
|
|
6201
4561
|
|
|
6202
4562
|
length_vec = get_x_shape(vec.shape)
|
|
6203
4563
|
vec = reshape_(vec, (length_vec[0], 1))
|
|
@@ -6252,10 +4612,6 @@ def addbmm(input, batch1, batch2, *, beta=1, alpha=1):
|
|
|
6252
4612
|
[1285. 1377. 1469.]
|
|
6253
4613
|
[1621. 1745. 1869.]]
|
|
6254
4614
|
"""
|
|
6255
|
-
dim1 = batch1.ndim
|
|
6256
|
-
dim2 = batch2.ndim
|
|
6257
|
-
if dim1 != 3 or dim2 != 3:
|
|
6258
|
-
raise ValueError(f"For 'addbmm', 'batch1' and 'batch2' must be 3D, but got {dim1} and {dim2} respectively.")
|
|
6259
4615
|
if not isinstance(alpha, (int, float)):
|
|
6260
4616
|
raise TypeError(f"For 'addbmm', parameter 'alpha' must be an int or float, but got {type(alpha)}.")
|
|
6261
4617
|
if not isinstance(beta, (int, float)):
|
|
@@ -6340,7 +4696,7 @@ def addmv(input, mat, vec, *, beta=1, alpha=1):
|
|
|
6340
4696
|
|
|
6341
4697
|
Raises:
|
|
6342
4698
|
TypeError: If `mat`, `vec`, `input` is not a Tensor.
|
|
6343
|
-
TypeError: If inputs `mat`,
|
|
4699
|
+
TypeError: If inputs `mat`, `vec` are not the same dtype.
|
|
6344
4700
|
ValueError: If `mat` is not a 2-D Tensor.
|
|
6345
4701
|
ValueError: If `vec` is not a 1-D Tensor.
|
|
6346
4702
|
|
|
@@ -6363,17 +4719,14 @@ def addmv(input, mat, vec, *, beta=1, alpha=1):
|
|
|
6363
4719
|
raise TypeError("For Addmv, inputs must be all tensors.")
|
|
6364
4720
|
if dtype_(mat) != dtype_(vec):
|
|
6365
4721
|
raise TypeError("For Addmv, the mat and vec should be the same dtype.")
|
|
6366
|
-
_check_input_1d(vec.shape, "vec", "Addmv")
|
|
6367
|
-
_check_input_2d(mat.shape, "mat", "Addmv")
|
|
6368
4722
|
_check_input_dtype("input", input_dtype,
|
|
6369
4723
|
[mstype.float16, mstype.float32, mstype.float64,
|
|
6370
4724
|
mstype.int16, mstype.int32, mstype.int64], "Addmv")
|
|
6371
4725
|
_check_attr_dtype("alpha", alpha, [int, float, bool], "Addmv")
|
|
6372
4726
|
_check_attr_dtype("beta", beta, [int, float, bool], "Addmv")
|
|
6373
4727
|
if input_dtype in (mstype.int16, mstype.int32, mstype.int64):
|
|
6374
|
-
|
|
6375
|
-
|
|
6376
|
-
beta = scalar_cast(beta, mstype.int32)
|
|
4728
|
+
alpha = ops.scalar_cast(alpha, mstype.int64)
|
|
4729
|
+
beta = ops.scalar_cast(beta, mstype.int64)
|
|
6377
4730
|
out = beta * input + alpha * mv(mat, vec)
|
|
6378
4731
|
return out
|
|
6379
4732
|
|
|
@@ -6404,7 +4757,11 @@ def adjoint(x):
|
|
|
6404
4757
|
[[0.-0.j 2.-2.j]
|
|
6405
4758
|
[1.-1.j 3.-3.j]]
|
|
6406
4759
|
"""
|
|
6407
|
-
|
|
4760
|
+
_dtype = x.dtype
|
|
4761
|
+
_t = x.swapaxes(-1, -2)
|
|
4762
|
+
if _dtype in mstype.complex_type:
|
|
4763
|
+
return _t.conj()
|
|
4764
|
+
return _t
|
|
6408
4765
|
|
|
6409
4766
|
|
|
6410
4767
|
def addr(x, vec1, vec2, *, beta=1, alpha=1):
|
|
@@ -6460,25 +4817,21 @@ def addr(x, vec1, vec2, *, beta=1, alpha=1):
|
|
|
6460
4817
|
raise TypeError("For Addr, inputs must be all tensors.")
|
|
6461
4818
|
if dtype_(vec1) != dtype_(vec2):
|
|
6462
4819
|
raise TypeError("For Addr, the vec1 and vec2 should be the same dtype.")
|
|
6463
|
-
_check_input_1d(vec1.shape, "vec1", "Addr")
|
|
6464
|
-
_check_input_1d(vec2.shape, "vec2", "Addr")
|
|
6465
4820
|
_check_input_dtype("x", input_dtype,
|
|
6466
4821
|
[mstype.float16, mstype.float32, mstype.float64,
|
|
6467
4822
|
mstype.int16, mstype.int32, mstype.int64], "Addr")
|
|
6468
4823
|
_check_attr_dtype("alpha", alpha, [int, float, bool], "Addr")
|
|
6469
4824
|
_check_attr_dtype("beta", beta, [int, float, bool], "Addr")
|
|
6470
4825
|
if input_dtype in (mstype.int16, mstype.int32, mstype.int64):
|
|
6471
|
-
|
|
6472
|
-
|
|
6473
|
-
beta = scalar_cast(beta, mstype.int32)
|
|
6474
|
-
matmul_op = P.MatMul()
|
|
4826
|
+
alpha = ops.scalar_cast(alpha, mstype.int64)
|
|
4827
|
+
beta = ops.scalar_cast(beta, mstype.int64)
|
|
6475
4828
|
|
|
6476
4829
|
length_vec1 = get_x_shape(vec1.shape)
|
|
6477
4830
|
vec1 = reshape_(vec1, (length_vec1[0], 1))
|
|
6478
4831
|
length_vec2 = get_x_shape(vec2.shape)
|
|
6479
4832
|
vec2 = reshape_(vec2, (1, length_vec2[0]))
|
|
6480
4833
|
|
|
6481
|
-
out = beta * x + alpha *
|
|
4834
|
+
out = beta * x + alpha * matmul_(vec1, vec2)
|
|
6482
4835
|
return out
|
|
6483
4836
|
|
|
6484
4837
|
|
|
@@ -6498,7 +4851,7 @@ def lcm(input, other):
|
|
|
6498
4851
|
|
|
6499
4852
|
Raises:
|
|
6500
4853
|
TypeError: If data type `input` or `other` is not int32 or int64.
|
|
6501
|
-
ValueError: If
|
|
4854
|
+
ValueError: If shapes of two inputs are not broadcastable.
|
|
6502
4855
|
|
|
6503
4856
|
Supported Platforms:
|
|
6504
4857
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -6512,8 +4865,6 @@ def lcm(input, other):
|
|
|
6512
4865
|
>>> print(y)
|
|
6513
4866
|
[14 24 36]
|
|
6514
4867
|
"""
|
|
6515
|
-
|
|
6516
|
-
lcm_ = _get_cache_prim(Lcm)()
|
|
6517
4868
|
return lcm_(input, other)
|
|
6518
4869
|
|
|
6519
4870
|
|
|
@@ -6546,7 +4897,7 @@ def cdist(x1, x2, p=2.0):
|
|
|
6546
4897
|
ValueError: If dimension of `x1` is not the same as `x2`.
|
|
6547
4898
|
ValueError: If dimension of `x1` or `x2` is neither 2 nor 3.
|
|
6548
4899
|
ValueError: If the batch shape of `x1` is not the same as the shape of `x2`.
|
|
6549
|
-
ValueError: If the number of columns of `x1` is not the same as
|
|
4900
|
+
ValueError: If the number of columns of `x1` is not the same as that of `x2`.
|
|
6550
4901
|
|
|
6551
4902
|
Supported Platforms:
|
|
6552
4903
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -6565,41 +4916,6 @@ def cdist(x1, x2, p=2.0):
|
|
|
6565
4916
|
return cdist_(x1, x2)
|
|
6566
4917
|
|
|
6567
4918
|
|
|
6568
|
-
def gcd(input, other):
|
|
6569
|
-
"""
|
|
6570
|
-
Computes greatest common divisor of input tensors element-wise.
|
|
6571
|
-
The shape of two inputs should be broadcastable, and data type of them should be
|
|
6572
|
-
one of: int32, int64
|
|
6573
|
-
|
|
6574
|
-
Args:
|
|
6575
|
-
input (Tensor): The first input tensor.
|
|
6576
|
-
other (Tensor): The second input tensor.
|
|
6577
|
-
|
|
6578
|
-
Returns:
|
|
6579
|
-
Tensor, the shape is the same as the one after broadcasting, and the data type is one
|
|
6580
|
-
with higher digits in the two inputs.
|
|
6581
|
-
|
|
6582
|
-
Raises:
|
|
6583
|
-
TypeError: If data type `input` or `other` is not int32 or int64.
|
|
6584
|
-
ValueError: If shape of two inputs are not broadcastable.
|
|
6585
|
-
|
|
6586
|
-
Supported Platforms:
|
|
6587
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
6588
|
-
|
|
6589
|
-
Examples:
|
|
6590
|
-
>>> import numpy as np
|
|
6591
|
-
>>> from mindspore import Tensor, ops
|
|
6592
|
-
>>> x1 = Tensor(np.array([7, 8, 9]))
|
|
6593
|
-
>>> x2 = Tensor(np.array([14, 6, 12]))
|
|
6594
|
-
>>> y = ops.gcd(x1, x2)
|
|
6595
|
-
>>> print(y)
|
|
6596
|
-
[7 2 3]
|
|
6597
|
-
"""
|
|
6598
|
-
|
|
6599
|
-
gcd_ = _get_cache_prim(Gcd)()
|
|
6600
|
-
return gcd_(input, other)
|
|
6601
|
-
|
|
6602
|
-
|
|
6603
4919
|
def lerp(input, end, weight):
|
|
6604
4920
|
"""
|
|
6605
4921
|
Does a linear interpolation of two tensors input and end based on a float or tensor weight.
|
|
@@ -6964,8 +5280,7 @@ def frac(x):
|
|
|
6964
5280
|
>>> print(output)
|
|
6965
5281
|
[ 0. 0.1992 -0.5 ]
|
|
6966
5282
|
"""
|
|
6967
|
-
|
|
6968
|
-
return frac_op(x, 1)
|
|
5283
|
+
return mod_(x, 1)
|
|
6969
5284
|
|
|
6970
5285
|
|
|
6971
5286
|
#####################################
|
|
@@ -7014,6 +5329,7 @@ def cummin(input, axis):
|
|
|
7014
5329
|
|
|
7015
5330
|
Raises:
|
|
7016
5331
|
TypeError: If `input` is not a Tensor.
|
|
5332
|
+
TypeError: If `input` is a Tensor, but the type is complex or bool.
|
|
7017
5333
|
TypeError: If `axis` is not an int.
|
|
7018
5334
|
ValueError: If `axis` is out the range of `[-input.ndim, input.ndim - 1]`.
|
|
7019
5335
|
|
|
@@ -7030,6 +5346,8 @@ def cummin(input, axis):
|
|
|
7030
5346
|
>>> print(output[1])
|
|
7031
5347
|
[0 1 1 1 4 4]
|
|
7032
5348
|
"""
|
|
5349
|
+
if isinstance(axis, bool):
|
|
5350
|
+
raise TypeError(f"For 'cummin', the date type of 'axis' must be Int, but got {axis}.")
|
|
7033
5351
|
cummin_op = _get_cache_prim(Cummin)(axis=0)
|
|
7034
5352
|
if axis == 0:
|
|
7035
5353
|
out1, out2 = cummin_op(input)
|
|
@@ -7043,55 +5361,6 @@ def cummin(input, axis):
|
|
|
7043
5361
|
return [out1, out2]
|
|
7044
5362
|
|
|
7045
5363
|
|
|
7046
|
-
def cummax(input, axis):
|
|
7047
|
-
r"""
|
|
7048
|
-
Returns a tuple (values,indices) where 'values' is the cumulative maximum value of input Tensor `input`
|
|
7049
|
-
along the dimension `axis`, and `indices` is the index location of each maximum value.
|
|
7050
|
-
|
|
7051
|
-
.. math::
|
|
7052
|
-
\begin{array}{ll} \\
|
|
7053
|
-
y_{i} = \max(x_{1}, x_{2}, ... , x_{i})
|
|
7054
|
-
\end{array}
|
|
7055
|
-
|
|
7056
|
-
Args:
|
|
7057
|
-
input (Tensor): The input Tensor, rank of `input` > 0.
|
|
7058
|
-
axis (int): The dimension to do the operation over. The value of `axis` must be in the range
|
|
7059
|
-
`[-input.ndim, input.ndim - 1]`.
|
|
7060
|
-
|
|
7061
|
-
Returns:
|
|
7062
|
-
tuple [Tensor], tuple of 2 Tensors, containing the cumulative maximum of elements and the index.
|
|
7063
|
-
The shape of each output tensor is the same as input `input`.
|
|
7064
|
-
|
|
7065
|
-
Raises:
|
|
7066
|
-
TypeError: If `input` is not a Tensor.
|
|
7067
|
-
TypeError: If `axis` is not an int.
|
|
7068
|
-
ValueError: If `axis` is out the range of `[-input.ndim, input.ndim - 1]`.
|
|
7069
|
-
|
|
7070
|
-
Supported Platforms:
|
|
7071
|
-
``GPU`` ``CPU``
|
|
7072
|
-
|
|
7073
|
-
Examples:
|
|
7074
|
-
>>> import mindspore
|
|
7075
|
-
>>> import numpy as np
|
|
7076
|
-
>>> from mindspore import Tensor
|
|
7077
|
-
>>> import mindspore.ops as ops
|
|
7078
|
-
>>> x = Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))
|
|
7079
|
-
>>> output = ops.cummax(x, axis=0)
|
|
7080
|
-
>>> print(output[0])
|
|
7081
|
-
[[ 3. 4. 6. 10.]
|
|
7082
|
-
[ 3. 6. 7. 10.]
|
|
7083
|
-
[ 4. 6. 8. 10.]
|
|
7084
|
-
[ 4. 6. 8. 10.]]
|
|
7085
|
-
>>> print(output[1])
|
|
7086
|
-
[[0 0 0 0]
|
|
7087
|
-
[0 1 1 0]
|
|
7088
|
-
[2 1 2 0]
|
|
7089
|
-
[2 1 2 0]]
|
|
7090
|
-
"""
|
|
7091
|
-
_cummax = _get_cache_prim(ops.Cummax)(axis=axis)
|
|
7092
|
-
return _cummax(input)
|
|
7093
|
-
|
|
7094
|
-
|
|
7095
5364
|
def cumsum(x, axis, dtype=None):
|
|
7096
5365
|
"""
|
|
7097
5366
|
Computes the cumulative sum of input Tensor along `axis`.
|
|
@@ -7105,7 +5374,7 @@ def cumsum(x, axis, dtype=None):
|
|
|
7105
5374
|
For the case of dynamic shape, the dtype of `x` only support int32, float16 or float32.
|
|
7106
5375
|
|
|
7107
5376
|
Args:
|
|
7108
|
-
x (Tensor): The input Tensor of shape :math:`(N
|
|
5377
|
+
x (Tensor): The input Tensor of shape :math:`(N, *)` where :math:`*` means, any number
|
|
7109
5378
|
of additional dimensions.
|
|
7110
5379
|
axis (int): Axis along which the cumulative sum is computed.
|
|
7111
5380
|
dtype (:class:`mindspore.dtype`, optional): The desired dtype of returned Tensor. If specified,
|
|
@@ -7176,8 +5445,8 @@ def sparse_segment_mean(x, indices, segment_ids):
|
|
|
7176
5445
|
TypeError: If the dtype of `x` is not one of the following dtype: float16, float32, float64.
|
|
7177
5446
|
TypeError: If the dtype of `indices` and `segment_ids` are not one of the following dtype: int32, int64.
|
|
7178
5447
|
TypeError: If the dtype of `indices` and `segment_ids` are not the same.
|
|
7179
|
-
ValueError: If the shape of `x`,
|
|
7180
|
-
ValueError: If the size of
|
|
5448
|
+
ValueError: If the shape of `x`, `indices` or `segment_ids` don't meet the parameter description.
|
|
5449
|
+
ValueError: If the size of `indices` and `segment_ids` are not the same.
|
|
7181
5450
|
|
|
7182
5451
|
Supported Platforms:
|
|
7183
5452
|
``GPU`` ``CPU``
|
|
@@ -7259,6 +5528,8 @@ def block_diag(*inputs):
|
|
|
7259
5528
|
f"{ary.ndim}"
|
|
7260
5529
|
)
|
|
7261
5530
|
|
|
5531
|
+
if not inputs:
|
|
5532
|
+
raise RuntimeError("For 'block_diag', the input is empty.")
|
|
7262
5533
|
arys = [to_2d(ary) for ary in inputs]
|
|
7263
5534
|
matrix = [ops.concat(to_col_block(arys, idx, ary)) for idx, ary in enumerate(arys)]
|
|
7264
5535
|
return ops.concat(matrix, 1)
|
|
@@ -7277,7 +5548,7 @@ def atleast_1d(inputs):
|
|
|
7277
5548
|
Tensor or list[Tensor]. If returned a list, every element `a` in that list satisfies `a.ndim >= 1`.
|
|
7278
5549
|
|
|
7279
5550
|
Raises:
|
|
7280
|
-
TypeError: If the `
|
|
5551
|
+
TypeError: If the `inputs` is not a tensor or a list of tensors.
|
|
7281
5552
|
|
|
7282
5553
|
Supported Platforms:
|
|
7283
5554
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -7359,7 +5630,7 @@ def dstack(inputs):
|
|
|
7359
5630
|
trans_inputs += (tensor,)
|
|
7360
5631
|
if not trans_inputs:
|
|
7361
5632
|
raise ValueError("For 'dstack', at least one tensor is needed to concatenate.")
|
|
7362
|
-
return P.Concat(2)(trans_inputs)
|
|
5633
|
+
return _get_cache_prim(P.Concat)(2)(trans_inputs)
|
|
7363
5634
|
|
|
7364
5635
|
|
|
7365
5636
|
@_primexpr
|
|
@@ -7377,7 +5648,7 @@ def diff(x, n=1, axis=-1, prepend=None, append=None):
|
|
|
7377
5648
|
|
|
7378
5649
|
Note:
|
|
7379
5650
|
Zero-shaped Tensor is not supported, a value error is raised if
|
|
7380
|
-
an empty Tensor is encountered. Any dimension of
|
|
5651
|
+
an empty Tensor is encountered. Any dimension of a Tensor is 0, which is considered
|
|
7381
5652
|
an empty Tensor. Tensor with shape of :math:`(0,)`, :math:`(1, 2, 0, 4)` are all
|
|
7382
5653
|
empty Tensor.
|
|
7383
5654
|
|
|
@@ -7556,7 +5827,7 @@ def atleast_2d(inputs):
|
|
|
7556
5827
|
Tensor or list[Tensor]. If returned a list, every element `a` in that list satisfies `a.ndim >= 2` .
|
|
7557
5828
|
|
|
7558
5829
|
Raises:
|
|
7559
|
-
TypeError: If the `
|
|
5830
|
+
TypeError: If the `inputs` is not a tensor or a list of tensors.
|
|
7560
5831
|
|
|
7561
5832
|
Supported Platforms:
|
|
7562
5833
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -7616,9 +5887,9 @@ def cartesian_prod(*inputs):
|
|
|
7616
5887
|
>>> print(len(out))
|
|
7617
5888
|
60
|
|
7618
5889
|
"""
|
|
7619
|
-
meshgrid = P.Meshgrid(indexing="ij")
|
|
5890
|
+
meshgrid = _get_cache_prim(P.Meshgrid)(indexing="ij")
|
|
7620
5891
|
meshgrid_output = meshgrid(inputs)
|
|
7621
|
-
stack = P.Stack(axis=-1)
|
|
5892
|
+
stack = _get_cache_prim(P.Stack)(axis=-1)
|
|
7622
5893
|
stack_output = stack(meshgrid_output)
|
|
7623
5894
|
return reshape_(stack_output, (-1, len(inputs)))
|
|
7624
5895
|
|
|
@@ -7639,7 +5910,7 @@ def atleast_3d(inputs):
|
|
|
7639
5910
|
a 2-D Tensor of shape :math:`(M, N)` becomes a tensor of shape :math:`(M, N, 1)`.
|
|
7640
5911
|
|
|
7641
5912
|
Raises:
|
|
7642
|
-
TypeError: If the `
|
|
5913
|
+
TypeError: If the `inputs` is not a tensor or a list of tensors.
|
|
7643
5914
|
|
|
7644
5915
|
Supported Platforms:
|
|
7645
5916
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -7674,9 +5945,9 @@ def atleast_3d(inputs):
|
|
|
7674
5945
|
if ndim == 0:
|
|
7675
5946
|
return reshape_(arr, (1, 1, 1))
|
|
7676
5947
|
if ndim == 1:
|
|
7677
|
-
return reshape_(arr, (1,
|
|
5948
|
+
return reshape_(arr, (1, size_(arr), 1))
|
|
7678
5949
|
if ndim == 2:
|
|
7679
|
-
return reshape_(arr,
|
|
5950
|
+
return reshape_(arr, shape_(arr) + (1,))
|
|
7680
5951
|
return arr
|
|
7681
5952
|
|
|
7682
5953
|
if isinstance(inputs, Tensor):
|
|
@@ -7768,7 +6039,7 @@ def vstack(inputs):
|
|
|
7768
6039
|
msg = f"For 'vstack', Tensor is required, but got {type(tensor)}"
|
|
7769
6040
|
raise TypeError(msg)
|
|
7770
6041
|
if tensor.ndim <= 1:
|
|
7771
|
-
shape =
|
|
6042
|
+
shape = shape_(tensor)
|
|
7772
6043
|
if isinstance(shape, int):
|
|
7773
6044
|
shape = (shape,)
|
|
7774
6045
|
ndim_diff = 2 - len(shape)
|
|
@@ -7778,7 +6049,7 @@ def vstack(inputs):
|
|
|
7778
6049
|
trans_tup += (tensor,)
|
|
7779
6050
|
if not trans_tup:
|
|
7780
6051
|
raise ValueError("For 'vstack', need at least one tensor to concatenate.")
|
|
7781
|
-
out = P.Concat(0)(trans_tup)
|
|
6052
|
+
out = _get_cache_prim(P.Concat)(0)(trans_tup)
|
|
7782
6053
|
return out
|
|
7783
6054
|
|
|
7784
6055
|
|
|
@@ -7796,8 +6067,8 @@ def combinations(input, r=2, with_replacement=False):
|
|
|
7796
6067
|
r"""
|
|
7797
6068
|
Returns all r-length subsequences of input Tensor.
|
|
7798
6069
|
|
|
7799
|
-
When `with_replacement` is set to
|
|
7800
|
-
`itertools.combinations`, and when `with_replacement` is set to
|
|
6070
|
+
When `with_replacement` is set to ``False``, it works similar to Python's
|
|
6071
|
+
`itertools.combinations`, and when `with_replacement` is set to ``True``,
|
|
7801
6072
|
it behaves like `itertools.combinations_with_replacement`.
|
|
7802
6073
|
|
|
7803
6074
|
Args:
|
|
@@ -7860,7 +6131,7 @@ def combinations(input, r=2, with_replacement=False):
|
|
|
7860
6131
|
return None
|
|
7861
6132
|
|
|
7862
6133
|
def _combinations_with_replacement(iterable, r):
|
|
7863
|
-
lst =
|
|
6134
|
+
lst = Tensor_([])
|
|
7864
6135
|
pool = tuple(iterable)
|
|
7865
6136
|
n = len(pool)
|
|
7866
6137
|
if not n and r:
|
|
@@ -7974,7 +6245,7 @@ def copysign(x, other):
|
|
|
7974
6245
|
"""Broadcasts x from current shape to shape"""
|
|
7975
6246
|
ndim_to = len(shape)
|
|
7976
6247
|
x = _expand(x, ndim_to)
|
|
7977
|
-
return _broadcast_to(x,
|
|
6248
|
+
return _broadcast_to(x, shape_(x), shape, ndim_to)
|
|
7978
6249
|
|
|
7979
6250
|
if not isinstance(x, Tensor):
|
|
7980
6251
|
raise TypeError("Tensor is expected, but got " + f"{type(x)}")
|
|
@@ -7985,7 +6256,7 @@ def copysign(x, other):
|
|
|
7985
6256
|
|
|
7986
6257
|
if not isinstance(other, Tensor):
|
|
7987
6258
|
other = _type_convert(Tensor, other)
|
|
7988
|
-
other = _broadcast_to_shape(other,
|
|
6259
|
+
other = _broadcast_to_shape(other, shape_(x))
|
|
7989
6260
|
|
|
7990
6261
|
if _check_same_type(dtype_(x), mstype.bool_):
|
|
7991
6262
|
raise TypeError("copysign does not accept dtype bool.")
|
|
@@ -8005,9 +6276,9 @@ def copysign(x, other):
|
|
|
8005
6276
|
if x.dtype in (mstype.float16, mstype.float32, mstype.float64)
|
|
8006
6277
|
else x.astype("float32")
|
|
8007
6278
|
)
|
|
8008
|
-
pos_tensor =
|
|
8009
|
-
less_zero =
|
|
8010
|
-
return
|
|
6279
|
+
pos_tensor = absolute_(x_float)
|
|
6280
|
+
less_zero = tensor_lt(other, 0)
|
|
6281
|
+
return select_(less_zero, neg(pos_tensor), pos_tensor)
|
|
8011
6282
|
|
|
8012
6283
|
|
|
8013
6284
|
def hann_window(window_length, periodic=True, *, dtype=None):
|
|
@@ -8067,7 +6338,7 @@ def hann_window(window_length, periodic=True, *, dtype=None):
|
|
|
8067
6338
|
w = 0.5 - 0.5 * np.cos(2 * math.pi / (window_length - 1) * n)
|
|
8068
6339
|
|
|
8069
6340
|
if dtype is not None:
|
|
8070
|
-
w =
|
|
6341
|
+
w = cast_(ms.tensor(w), dtype)
|
|
8071
6342
|
return Tensor(w[:-1]) if periodic else Tensor(w)
|
|
8072
6343
|
|
|
8073
6344
|
|
|
@@ -8091,7 +6362,7 @@ def logcumsumexp(input, axis):
|
|
|
8091
6362
|
Args:
|
|
8092
6363
|
input (Tensor) - The input tensor. Must be one of the following types: float16, float32, float64.
|
|
8093
6364
|
axis (int) - Describing the dimension to compute the cumulative product.
|
|
8094
|
-
Must be in the range [-rank(
|
|
6365
|
+
Must be in the range [-rank(input), rank(input)).
|
|
8095
6366
|
|
|
8096
6367
|
Returns:
|
|
8097
6368
|
Tensor, has the same dtype and shape as the `input`.
|
|
@@ -8118,8 +6389,7 @@ def logcumsumexp(input, axis):
|
|
|
8118
6389
|
raise TypeError(
|
|
8119
6390
|
f"For 'logcumsumexp', 'axis' must be int type, but got {type(axis)}"
|
|
8120
6391
|
)
|
|
8121
|
-
|
|
8122
|
-
return logcumsumexp_(input, Tensor(axis))
|
|
6392
|
+
return cumulative_logsumexp_(input, Tensor(axis))
|
|
8123
6393
|
|
|
8124
6394
|
|
|
8125
6395
|
def logsumexp(input, axis, keep_dims=False):
|
|
@@ -8176,34 +6446,40 @@ def amin(input, axis=None, keepdims=False, *, initial=None, where=None):
|
|
|
8176
6446
|
reduce a dimension of `input` along specified `axis`. `keepdims` determines whether the dimensions of
|
|
8177
6447
|
output and input are the same.
|
|
8178
6448
|
|
|
6449
|
+
Note:
|
|
6450
|
+
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
6451
|
+
|
|
8179
6452
|
Args:
|
|
8180
6453
|
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
8181
6454
|
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
8182
|
-
axis (Union[int, tuple(int), list(int)]): The dimensions to reduce. Default: ``None`` , reduce all
|
|
8183
|
-
Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
|
|
8184
|
-
keepdims (bool): If
|
|
6455
|
+
axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` , reduce all
|
|
6456
|
+
dimensions. Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
|
|
6457
|
+
keepdims (bool): If ``True`` , keep these reduced dimensions and the length is 1. If ``False`` , don't keep
|
|
8185
6458
|
these dimensions. Default: ``False`` .
|
|
8186
6459
|
|
|
8187
6460
|
Keyword Args:
|
|
8188
6461
|
initial (scalar, optional): The minimum value of an output element. Must be present to allow computation
|
|
8189
6462
|
on empty slice. Default: ``None`` .
|
|
8190
|
-
where (Tensor[bool], optional): A Tensor indicating whether to replace the primitive value in `input`
|
|
8191
|
-
|
|
8192
|
-
the corresponding value in `initial` must be assigned. Default: ``None`` , which indicates True by
|
|
6463
|
+
where (Tensor[bool], optional): A Tensor indicating whether to replace the primitive value in `input` with the
|
|
6464
|
+
value in `initial`. If ``True`` , do not replace, otherwise replace. For the index of ``True`` in `where`,
|
|
6465
|
+
the corresponding value in `initial` must be assigned. Default: ``None`` , which indicates ``True`` by
|
|
6466
|
+
default.
|
|
8193
6467
|
|
|
8194
6468
|
Returns:
|
|
8195
6469
|
Tensor, has the same data type as input tensor.
|
|
8196
6470
|
|
|
8197
|
-
- If `axis` is None, and `keepdims` is False,
|
|
6471
|
+
- If `axis` is ``None`` , and `keepdims` is ``False`` ,
|
|
8198
6472
|
the output is a 0-D tensor representing the product of all elements in the input tensor.
|
|
8199
|
-
- If `axis` is int, set as 1, and `keepdims` is False,
|
|
6473
|
+
- If `axis` is int, set as 1, and `keepdims` is ``False`` ,
|
|
8200
6474
|
the shape of output is :math:`(x_0, x_2, ..., x_R)`.
|
|
8201
|
-
- If `axis` is tuple(int), set as (1, 2), and `keepdims` is False,
|
|
6475
|
+
- If `axis` is tuple(int), set as (1, 2), and `keepdims` is ``False`` ,
|
|
6476
|
+
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
|
|
6477
|
+
- If `axis` is 1-D Tensor, set as [1, 2], and `keepdims` is ``False`` ,
|
|
8202
6478
|
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
|
|
8203
6479
|
|
|
8204
6480
|
Raises:
|
|
8205
6481
|
TypeError: If `input` is not a Tensor.
|
|
8206
|
-
TypeError: If `axis` is not one of the following: int, tuple or
|
|
6482
|
+
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
|
|
8207
6483
|
TypeError: If `keepdims` is not a bool.
|
|
8208
6484
|
ValueError: If `axis` is out of range.
|
|
8209
6485
|
|
|
@@ -8280,33 +6556,39 @@ def amax(input, axis=None, keepdims=False, *, initial=None, where=None):
|
|
|
8280
6556
|
reduce a dimension of `input` along specified `axis`. `keepdims` determines whether the dimensions of
|
|
8281
6557
|
output and input are the same.
|
|
8282
6558
|
|
|
6559
|
+
Note:
|
|
6560
|
+
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
6561
|
+
|
|
8283
6562
|
Args:
|
|
8284
6563
|
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
8285
6564
|
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
8286
|
-
axis (Union[int, tuple(int), list(int)]): The dimensions to reduce. Default: ``None`` , reduce all
|
|
8287
|
-
Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
|
|
8288
|
-
keepdims (bool): If
|
|
8289
|
-
dimensions. Default: ``False`` .
|
|
6565
|
+
axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` , reduce all
|
|
6566
|
+
dimensions. Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
|
|
6567
|
+
keepdims (bool): If ``True`` , keep these reduced dimensions and the length is 1. If ``False`` , don't keep
|
|
6568
|
+
these dimensions. Default: ``False`` .
|
|
8290
6569
|
|
|
8291
6570
|
Keyword Args:
|
|
8292
6571
|
initial (scalar, optional): The minimum value of an output element. Must be present to allow computation
|
|
8293
6572
|
on empty slice. Default: ``None`` .
|
|
8294
|
-
where (Tensor[bool], optional): A Tensor indicating whether to replace the primitive value in `input`
|
|
8295
|
-
|
|
8296
|
-
the corresponding value in `initial` must be assigned. Default: ``None`` , which indicates True by
|
|
6573
|
+
where (Tensor[bool], optional): A Tensor indicating whether to replace the primitive value in `input` with the
|
|
6574
|
+
value in `initial`. If ``True`` , do not replace, otherwise replace. For the index of ``True`` in `where`,
|
|
6575
|
+
the corresponding value in `initial` must be assigned. Default: ``None`` , which indicates ``True`` by
|
|
6576
|
+
default.
|
|
8297
6577
|
|
|
8298
6578
|
Returns:
|
|
8299
6579
|
Tensor, has the same data type as input tensor.
|
|
8300
6580
|
|
|
8301
|
-
- If `axis` is None, and `keepdims` is False, the output is a 0-D tensor representing the product of
|
|
8302
|
-
elements in the input tensor.
|
|
8303
|
-
- If `axis` is int, set as 1, and `keepdims` is False, the shape of output is :math:`(x_0, x_2, ..., x_R)`.
|
|
8304
|
-
- If `axis` is tuple(int), set as (1, 2), and `keepdims` is False, the shape of output is
|
|
6581
|
+
- If `axis` is ``None`` , and `keepdims` is ``False`` , the output is a 0-D tensor representing the product of
|
|
6582
|
+
all elements in the input tensor.
|
|
6583
|
+
- If `axis` is int, set as 1, and `keepdims` is ``False`` , the shape of output is :math:`(x_0, x_2, ..., x_R)`.
|
|
6584
|
+
- If `axis` is tuple(int), set as (1, 2), and `keepdims` is ``False`` , the shape of output is
|
|
6585
|
+
:math:`(x_0, x_3, ..., x_R)`.
|
|
6586
|
+
- If `axis` is 1-D Tensor, set as [1, 2], and `keepdims` is ``False`` , the shape of output is
|
|
8305
6587
|
:math:`(x_0, x_3, ..., x_R)`.
|
|
8306
6588
|
|
|
8307
6589
|
Raises:
|
|
8308
6590
|
TypeError: If `input` is not a Tensor.
|
|
8309
|
-
TypeError: If `axis` is not one of the following: int, tuple or
|
|
6591
|
+
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
|
|
8310
6592
|
TypeError: If `keepdims` is not a bool.
|
|
8311
6593
|
ValueError: If `axis` is out of range.
|
|
8312
6594
|
|
|
@@ -8444,33 +6726,126 @@ def mean(x, axis=None, keep_dims=False):
|
|
|
8444
6726
|
return _get_cache_prim(P.ReduceMean)(keep_dims)(x, axis)
|
|
8445
6727
|
|
|
8446
6728
|
|
|
8447
|
-
def
|
|
6729
|
+
def mean_ext(input, axis=None, keep_dims=False, dtype=None):
|
|
6730
|
+
r"""
|
|
6731
|
+
Reduces all dimension of a tensor by averaging all elements in the dimension, by default.
|
|
6732
|
+
And reduce a dimension of `input` along the specified `axis`. `keep_dims`
|
|
6733
|
+
determines whether the dimensions of the output and input are the same.
|
|
6734
|
+
|
|
6735
|
+
Note:
|
|
6736
|
+
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
6737
|
+
|
|
6738
|
+
Args:
|
|
6739
|
+
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
6740
|
+
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
6741
|
+
axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` ,
|
|
6742
|
+
reduce all dimensions. Only constant value is allowed. Assume the rank of `input` is r,
|
|
6743
|
+
and the value range is [-r,r).
|
|
6744
|
+
keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
6745
|
+
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
6746
|
+
dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
|
|
6747
|
+
|
|
6748
|
+
Returns:
|
|
6749
|
+
Tensor, has the same data type as input tensor.
|
|
6750
|
+
|
|
6751
|
+
- If `axis` is ``None`` , and `keep_dims` is ``False`` ,
|
|
6752
|
+
the output is a 0-D tensor representing the product of all elements in the input tensor.
|
|
6753
|
+
- If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
|
|
6754
|
+
the shape of output is :math:`(x_0, x_2, ..., x_R)`.
|
|
6755
|
+
- If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` ,
|
|
6756
|
+
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
|
|
6757
|
+
- If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` ,
|
|
6758
|
+
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
|
|
6759
|
+
|
|
6760
|
+
Raises:
|
|
6761
|
+
TypeError: If `x` is not a Tensor.
|
|
6762
|
+
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
|
|
6763
|
+
TypeError: If `keep_dims` is not a bool.
|
|
6764
|
+
ValueError: If `axis` is out of range.
|
|
6765
|
+
|
|
6766
|
+
Supported Platforms:
|
|
6767
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
6768
|
+
|
|
6769
|
+
Examples:
|
|
6770
|
+
>>> import mindspore
|
|
6771
|
+
>>> import numpy as np
|
|
6772
|
+
>>> from mindspore import Tensor, ops
|
|
6773
|
+
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
|
|
6774
|
+
>>> output = ops.mean(x, 1, keep_dims=True)
|
|
6775
|
+
>>> result = output.shape
|
|
6776
|
+
>>> print(result)
|
|
6777
|
+
(3, 1, 5, 6)
|
|
6778
|
+
>>> # case 1: Reduces a dimension by averaging all elements in the dimension.
|
|
6779
|
+
>>> x = Tensor(np.array([[[2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2]],
|
|
6780
|
+
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
6781
|
+
... [[6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8], [10, 10, 10, 10, 10, 10]]]),
|
|
6782
|
+
... mindspore.float32)
|
|
6783
|
+
>>> output = ops.mean(x)
|
|
6784
|
+
>>> print(output)
|
|
6785
|
+
5.0
|
|
6786
|
+
>>> print(output.shape)
|
|
6787
|
+
()
|
|
6788
|
+
>>> # case 2: Reduces a dimension along the axis 0
|
|
6789
|
+
>>> output = ops.mean(x, 0, True)
|
|
6790
|
+
>>> print(output)
|
|
6791
|
+
[[[4. 4. 4. 4. 4. 4.]
|
|
6792
|
+
[5. 5. 5. 5. 5. 5.]
|
|
6793
|
+
[6. 6. 6. 6. 6. 6.]]]
|
|
6794
|
+
>>> # case 3: Reduces a dimension along the axis 1
|
|
6795
|
+
>>> output = ops.mean(x, 1, True)
|
|
6796
|
+
>>> print(output)
|
|
6797
|
+
[[[2. 2. 2. 2. 2. 2.]]
|
|
6798
|
+
[[5. 5. 5. 5. 5. 5.]]
|
|
6799
|
+
[[8. 8. 8. 8. 8. 8.]]]
|
|
6800
|
+
>>> # case 4: Reduces a dimension along the axis 2
|
|
6801
|
+
>>> output = ops.mean(x, 2, True)
|
|
6802
|
+
>>> print(output)
|
|
6803
|
+
[[[ 2.]
|
|
6804
|
+
[ 2.]
|
|
6805
|
+
[ 2.]]
|
|
6806
|
+
[[ 4.]
|
|
6807
|
+
[ 5.]
|
|
6808
|
+
[ 6.]]
|
|
6809
|
+
[[ 6.]
|
|
6810
|
+
[ 8.]
|
|
6811
|
+
[10.]]]
|
|
6812
|
+
"""
|
|
6813
|
+
return mean_ext_op(input, axis, keep_dims, dtype)
|
|
6814
|
+
|
|
6815
|
+
|
|
6816
|
+
def prod(input, axis=None, keep_dims=False, dtype=None):
|
|
8448
6817
|
r"""
|
|
8449
6818
|
Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can
|
|
8450
|
-
reduce a dimension of `input` along the axis
|
|
8451
|
-
by controlling `keep_dims`.
|
|
6819
|
+
reduce a dimension of `input` along the `axis`. Determine whether the dimensions of the output and input are the
|
|
6820
|
+
same by controlling `keep_dims`.
|
|
6821
|
+
|
|
6822
|
+
Note:
|
|
6823
|
+
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
8452
6824
|
|
|
8453
6825
|
Args:
|
|
8454
6826
|
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
8455
|
-
|
|
8456
|
-
axis (Union[int, tuple(int), list(int)]): The dimensions to reduce. Default: ``None`` , reduce all
|
|
8457
|
-
|
|
8458
|
-
keep_dims (bool): If
|
|
8459
|
-
|
|
6827
|
+
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
6828
|
+
axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` , reduce all
|
|
6829
|
+
dimensions. Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
|
|
6830
|
+
keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
6831
|
+
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
6832
|
+
dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
|
|
8460
6833
|
|
|
8461
6834
|
Returns:
|
|
8462
6835
|
Tensor, has the same data type as input tensor.
|
|
8463
6836
|
|
|
8464
|
-
- If `axis` is None, and `keep_dims` is False,
|
|
6837
|
+
- If `axis` is ``None`` , and `keep_dims` is ``False`` ,
|
|
8465
6838
|
the output is a 0-D tensor representing the product of all elements in the input tensor.
|
|
8466
|
-
- If `axis` is int, set as 1, and `keep_dims` is False,
|
|
6839
|
+
- If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
|
|
8467
6840
|
the shape of output is :math:`(input_0, input_2, ..., input_R)`.
|
|
8468
|
-
- If `axis` is tuple(int), set as (1, 2), and `keep_dims` is False,
|
|
6841
|
+
- If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` ,
|
|
6842
|
+
the shape of output is :math:`(input_0, input_3, ..., input_R)`.
|
|
6843
|
+
- If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` ,
|
|
8469
6844
|
the shape of output is :math:`(input_0, input_3, ..., input_R)`.
|
|
8470
6845
|
|
|
8471
6846
|
Raises:
|
|
8472
6847
|
TypeError: If `input` is not a Tensor.
|
|
8473
|
-
TypeError: If `axis` is not one of the following: int, tuple or
|
|
6848
|
+
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
|
|
8474
6849
|
TypeError: If `keep_dims` is not a bool.
|
|
8475
6850
|
ValueError: If `axis` is out of range.
|
|
8476
6851
|
|
|
@@ -8520,8 +6895,10 @@ def prod(input, axis=None, keep_dims=False):
|
|
|
8520
6895
|
[2.62144e+05]
|
|
8521
6896
|
[5.31441e+05]]]
|
|
8522
6897
|
"""
|
|
8523
|
-
if axis
|
|
8524
|
-
axis
|
|
6898
|
+
if not isinstance(axis, (tuple, list, Tensor)):
|
|
6899
|
+
return prod_ext_op(input, axis, keep_dims, dtype)
|
|
6900
|
+
if dtype is not None:
|
|
6901
|
+
input = input.astype(dtype)
|
|
8525
6902
|
return _get_cache_prim(P.ReduceProd)(keep_dims)(input, axis)
|
|
8526
6903
|
|
|
8527
6904
|
|
|
@@ -8884,6 +7261,84 @@ def _compute_vector_norm_inf(x, dim, keepdims, norm_func):
|
|
|
8884
7261
|
return ret_norm
|
|
8885
7262
|
|
|
8886
7263
|
|
|
7264
|
+
def norm_ext(A, ord=None, dim=None, keepdim=False, *, dtype=None):
|
|
7265
|
+
r"""
|
|
7266
|
+
Returns the matrix norm or vector norm of a given tensor.
|
|
7267
|
+
|
|
7268
|
+
`ord` is the calculation mode of norm. The following norm modes are supported.
|
|
7269
|
+
|
|
7270
|
+
====================== ================================ ==========================================
|
|
7271
|
+
`ord` norm for matrices norm for vectors
|
|
7272
|
+
====================== ================================ ==========================================
|
|
7273
|
+
`None` (default) Frobenius norm `2`-norm (see below)
|
|
7274
|
+
`'fro'` Frobenius norm -- not supported --
|
|
7275
|
+
`'nuc'` nuclear norm -- not supported --
|
|
7276
|
+
`inf` :math:`max(sum(abs(x), dim=1))` :math:`max(abs(x))`
|
|
7277
|
+
`-inf` :math:`min(sum(abs(x), dim=1))` :math:`min(abs(x))`
|
|
7278
|
+
`0` -- not supported -- :math:`sum(x != 0)`
|
|
7279
|
+
`1` :math:`max(sum(abs(x), dim=0))` as below
|
|
7280
|
+
`-1` :math:`min(sum(abs(x), dim=0))` as below
|
|
7281
|
+
`2` largest singular value as below
|
|
7282
|
+
`-2` smallest singular value as below
|
|
7283
|
+
other `int` or `float` -- not supported -- :math:`sum(abs(x)^{ord})^{(1 / ord)}`
|
|
7284
|
+
====================== ================================ ==========================================
|
|
7285
|
+
|
|
7286
|
+
Args:
|
|
7287
|
+
A (Tensor): Tensor of shape :math:`(*, n)` or :math:`(*, m, n)` where * is zero or more batch dimensions.
|
|
7288
|
+
ord (Union[int, float, inf, -inf, 'fro', 'nuc'], optional): norm's mode. refer to the table above for
|
|
7289
|
+
behavior. Default: ``None`` .
|
|
7290
|
+
dim (Union[int, Tuple(int)], optional): calculate the dimension of vector norm or matrix norm.
|
|
7291
|
+
Default: ``None`` .
|
|
7292
|
+
|
|
7293
|
+
- When `dim` is int, it will be calculated by vector norm.
|
|
7294
|
+
|
|
7295
|
+
- When `dim` is a 2-tuple, it will be calculated by matrix norm.
|
|
7296
|
+
|
|
7297
|
+
- If `dim` is None and `ord` is None, `A` will be flattened to 1D and the 2-norm
|
|
7298
|
+
of the vector will be calculated.
|
|
7299
|
+
|
|
7300
|
+
- If `dim` is None and `ord` is not None, `A` must be 1D or 2D.
|
|
7301
|
+
|
|
7302
|
+
keepdim (bool): whether the output Tensor retains the original dimension. Default: ``False`` .
|
|
7303
|
+
|
|
7304
|
+
Keyword Args:
|
|
7305
|
+
dtype (:class:`mindspore.dtype`, optional): When set, `A` will be converted to the specified type,
|
|
7306
|
+
`dtype`, before execution, and dtype of returned Tensor will also be `dtype`. Default: ``None`` .
|
|
7307
|
+
|
|
7308
|
+
Returns:
|
|
7309
|
+
Tensor, the result of norm calculation on the specified dimension, `dim`, has the same dtype as `A`.
|
|
7310
|
+
|
|
7311
|
+
Raises:
|
|
7312
|
+
ValueError: If `dim` is out of range.
|
|
7313
|
+
TypeError: If `dim` is neither an int nor a tuple of int.
|
|
7314
|
+
TypeError: If `A` is a vector and `ord` is a str.
|
|
7315
|
+
ValueError: If `A` is a matrices and `ord` is not in valid mode.
|
|
7316
|
+
ValueError: If `A` is a matrices and `ord` is an integer but not in [1, -1, 2, -2].
|
|
7317
|
+
ValueError: If two elements of `dim` is same after normalize.
|
|
7318
|
+
ValueError: If any elements of `dim` is out of range.
|
|
7319
|
+
|
|
7320
|
+
Supported Platforms:
|
|
7321
|
+
``Ascend``
|
|
7322
|
+
|
|
7323
|
+
Note:
|
|
7324
|
+
Currently, it only support `ops.norm_ext(A)`.
|
|
7325
|
+
|
|
7326
|
+
Examples:
|
|
7327
|
+
>>> import mindspore as ms
|
|
7328
|
+
>>> import mindspore.ops as ops
|
|
7329
|
+
>>> data_range = ops.arange(-13, 13, dtype=ms.float32)
|
|
7330
|
+
>>> # Exclude 0 from original data for 0 is invalid input when `ord` is negative.
|
|
7331
|
+
>>> x = data_range[data_range != 0]
|
|
7332
|
+
>>> y = x.reshape(5, 5)
|
|
7333
|
+
>>> print(ops.norm_ext(x))
|
|
7334
|
+
38.327538
|
|
7335
|
+
>>> print(ops.norm(x, 0))
|
|
7336
|
+
25.0
|
|
7337
|
+
"""
|
|
7338
|
+
norm_ext_op = Norm()
|
|
7339
|
+
return norm_ext_op(A, ord, dim, keepdim, dtype)
|
|
7340
|
+
|
|
7341
|
+
|
|
8887
7342
|
def vector_norm(x, ord=2, axis=None, keepdims=False, *, dtype=None):
|
|
8888
7343
|
r"""
|
|
8889
7344
|
Returns the vector norm of the given tensor on the specified dimensions.
|
|
@@ -9252,7 +7707,7 @@ def _check_logits_shape(logits):
|
|
|
9252
7707
|
raise ValueError("For gumbel_softmax, the 0-D input is not supported.")
|
|
9253
7708
|
|
|
9254
7709
|
|
|
9255
|
-
def gumbel_softmax(logits, tau=1, hard=False, dim=-1):
|
|
7710
|
+
def gumbel_softmax(logits, tau=1.0, hard=False, dim=-1):
|
|
9256
7711
|
r"""
|
|
9257
7712
|
Returns the samples from the Gumbel-Softmax distribution and optionally discretizes. If `hard = True`, the returned
|
|
9258
7713
|
samples will be one-hot, otherwise it will be probability distributions that sum to 1 across `dim`.
|
|
@@ -9270,9 +7725,9 @@ def gumbel_softmax(logits, tau=1, hard=False, dim=-1):
|
|
|
9270
7725
|
Raises:
|
|
9271
7726
|
TypeError: If `logits` is not a Tensor.
|
|
9272
7727
|
TypeError: If dtype of `logits` is not one of: float16, float32.
|
|
9273
|
-
TypeError: If `tau` is not
|
|
7728
|
+
TypeError: If `tau` is not a float.
|
|
9274
7729
|
TypeError: If `hard` is not a bool.
|
|
9275
|
-
TypeError: If `dim` is not
|
|
7730
|
+
TypeError: If `dim` is not an int.
|
|
9276
7731
|
ValueError: If If `tau` is not positive.
|
|
9277
7732
|
|
|
9278
7733
|
Supported Platforms:
|
|
@@ -9301,13 +7756,11 @@ def gumbel_softmax(logits, tau=1, hard=False, dim=-1):
|
|
|
9301
7756
|
_check_int_range(dim, -len(logits.shape),
|
|
9302
7757
|
len(logits.shape), 'dim', "gumbel_softmax")
|
|
9303
7758
|
|
|
9304
|
-
const_op = _get_cache_prim(P.ScalarToTensor)()
|
|
9305
|
-
|
|
9306
7759
|
sample_shape = shape_(logits)
|
|
9307
|
-
uniform = C.uniform(sample_shape,
|
|
9308
|
-
0.0, mstype.float32),
|
|
7760
|
+
uniform = C.uniform(sample_shape, scalar_to_tensor_(
|
|
7761
|
+
0.0, mstype.float32), scalar_to_tensor_(1.0, mstype.float32))
|
|
9309
7762
|
uniform = cast_(uniform, logits_dtype)
|
|
9310
|
-
gumbel =
|
|
7763
|
+
gumbel = neg(log_(neg(log_(uniform))))
|
|
9311
7764
|
gumbel = (logits + gumbel) / tau
|
|
9312
7765
|
y_soft = _get_cache_prim(P.Softmax)(dim)(gumbel)
|
|
9313
7766
|
if hard:
|
|
@@ -9388,7 +7841,7 @@ def kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None):
|
|
|
9388
7841
|
beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)
|
|
9389
7842
|
) / np.i0(float(beta))
|
|
9390
7843
|
if dtype is not None:
|
|
9391
|
-
w =
|
|
7844
|
+
w = cast_(ms.tensor(w), dtype)
|
|
9392
7845
|
out = Tensor(w[:-1]) if periodic else Tensor(w)
|
|
9393
7846
|
return out
|
|
9394
7847
|
|
|
@@ -9541,18 +7994,6 @@ def _check_value(items, max_size, msg_prefix, shape1, shape2):
|
|
|
9541
7994
|
def _check_matmul_shapes(shape1, shape2, prim_name=None):
|
|
9542
7995
|
"""Checks shape1 and shape2 are valid to perform matmul, and returns output shape after broadcasting."""
|
|
9543
7996
|
msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
|
|
9544
|
-
|
|
9545
|
-
def _check(shape1, shape2):
|
|
9546
|
-
ndim1, ndim2 = len(shape1), len(shape2)
|
|
9547
|
-
if ndim1 < 1 or ndim2 < 1:
|
|
9548
|
-
raise ValueError(f"{msg_prefix} dimension of input operands must be at least 1, but got "
|
|
9549
|
-
f"the length of shape1: {ndim1}, the length of shape2: {ndim2}.")
|
|
9550
|
-
if ndim2 >= 2 and shape1[-1] != shape2[-2]:
|
|
9551
|
-
raise ValueError(f"{msg_prefix} shape1[-1] must be equal to shape2[-2] when the length of shape2 "
|
|
9552
|
-
f"is greater than or equal to 2, but got shape1[-1]: {shape1[-1]}, "
|
|
9553
|
-
f"shape2[-2]: {shape2[-2]}.")
|
|
9554
|
-
|
|
9555
|
-
_check(shape1, shape2)
|
|
9556
7997
|
shape_out = list()
|
|
9557
7998
|
r_shape1 = shape1[:-2]
|
|
9558
7999
|
r_shape2 = shape2[:-2]
|
|
@@ -9571,18 +8012,6 @@ def _check_need_broadcast(shape1, shape2):
|
|
|
9571
8012
|
return shape1[:-2] != shape2[:-2]
|
|
9572
8013
|
|
|
9573
8014
|
|
|
9574
|
-
@_primexpr
|
|
9575
|
-
def _check_input_1d(input_shape, param_name, func_name):
|
|
9576
|
-
if len(input_shape) != 1:
|
|
9577
|
-
raise ValueError(f"{func_name} {param_name} should be 1d, but got shape {input_shape}")
|
|
9578
|
-
|
|
9579
|
-
|
|
9580
|
-
@_primexpr
|
|
9581
|
-
def _check_input_2d(input_shape, param_name, func_name):
|
|
9582
|
-
if len(input_shape) != 2:
|
|
9583
|
-
raise ValueError(f"{func_name} {param_name} should be 2d, but got shape {input_shape}")
|
|
9584
|
-
|
|
9585
|
-
|
|
9586
8015
|
@_primexpr
|
|
9587
8016
|
def _expand(x, ndim):
|
|
9588
8017
|
"""Expand x to ndim from axis, which can be 0 or -1."""
|
|
@@ -9593,8 +8022,7 @@ def _expand(x, ndim):
|
|
|
9593
8022
|
|
|
9594
8023
|
def _broadcast_to(x, shape_cur, shape_to, ndim_to):
|
|
9595
8024
|
"""Broadcasts x from shape_cur to shape_to."""
|
|
9596
|
-
|
|
9597
|
-
size = tile_size_op(shape_cur, shape_to, ndim_to)
|
|
8025
|
+
size = tile_size_(shape_cur, shape_to, ndim_to)
|
|
9598
8026
|
F.stop_gradient(size)
|
|
9599
8027
|
return tile_(x, size)
|
|
9600
8028
|
|
|
@@ -9609,14 +8037,15 @@ def matmul(input, other):
|
|
|
9609
8037
|
On GPU, the supported dtypes are np.float16 and np.float32.
|
|
9610
8038
|
On CPU, the supported dtypes are np.float16 and np.float32.
|
|
9611
8039
|
The dtype of `input` and `other` must be same.
|
|
8040
|
+
On Ascend, the rank of `input` or `other` must be between 1 and 6.
|
|
9612
8041
|
|
|
9613
8042
|
Args:
|
|
9614
8043
|
input (Tensor): Input tensor, scalar not allowed.
|
|
9615
|
-
|
|
9616
|
-
|
|
8044
|
+
The last dimension of `input` must be the same size as the second last dimension of `other`.
|
|
8045
|
+
And the shape of input and other could be broadcast.
|
|
9617
8046
|
other (Tensor): Input tensor, scalar not allowed.
|
|
9618
|
-
|
|
9619
|
-
|
|
8047
|
+
The last dimension of `input` must be the same size as the second last dimension of `other`.
|
|
8048
|
+
And the shape of input and other could be broadcast.
|
|
9620
8049
|
|
|
9621
8050
|
Returns:
|
|
9622
8051
|
Tensor or scalar, the matrix product of the inputs. This is a scalar only
|
|
@@ -9626,7 +8055,8 @@ def matmul(input, other):
|
|
|
9626
8055
|
TypeError: If the dtype of `input` and the dtype of `other` are not the same.
|
|
9627
8056
|
ValueError: If the last dimension of `input` is not the same size as the
|
|
9628
8057
|
second-to-last dimension of `other`, or if a scalar value is passed in.
|
|
9629
|
-
ValueError: If the shape of `input` and `
|
|
8058
|
+
ValueError: If the shape of `input` and `input` could not broadcast together.
|
|
8059
|
+
RuntimeError: If the rank of `input` or `other` is less than 1 or greater than 6.
|
|
9630
8060
|
|
|
9631
8061
|
Supported Platforms:
|
|
9632
8062
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -9657,42 +8087,7 @@ def matmul(input, other):
|
|
|
9657
8087
|
>>> print(output.shape)
|
|
9658
8088
|
(1,)
|
|
9659
8089
|
"""
|
|
9660
|
-
|
|
9661
|
-
raise TypeError("For matmul op, inputs must be all tensors.")
|
|
9662
|
-
|
|
9663
|
-
input_rank, other_rank = rank_(input), rank_(other)
|
|
9664
|
-
if input_rank == 2 and other_rank == 2:
|
|
9665
|
-
_matmul = _get_cache_prim(P.MatMul)(False, False)
|
|
9666
|
-
return _matmul(input, other)
|
|
9667
|
-
|
|
9668
|
-
ndim1_orig, ndim2_orig = rank_(input), rank_(other)
|
|
9669
|
-
shape1_orig, shape2_orig = shape_(input), shape_(other)
|
|
9670
|
-
transpose_b = ndim2_orig == 1
|
|
9671
|
-
shape_backbone = _check_matmul_shapes(shape1_orig, shape2_orig, 'matmul')
|
|
9672
|
-
# infers the shape of the output
|
|
9673
|
-
shape_out = shape_backbone + _infer_shape_rem(shape1_orig, shape2_orig,
|
|
9674
|
-
ndim1_orig, ndim2_orig, transpose_b)
|
|
9675
|
-
|
|
9676
|
-
_matmul = _get_cache_prim(P.MatMul)(False, transpose_b)
|
|
9677
|
-
_batch_matmul = _get_cache_prim(P.BatchMatMul)(False, transpose_b)
|
|
9678
|
-
|
|
9679
|
-
input = _expand(input, 2)
|
|
9680
|
-
other = _expand(other, 2)
|
|
9681
|
-
if rank_(other) == 2:
|
|
9682
|
-
if rank_(input) > 2:
|
|
9683
|
-
input = reshape_(input, (-1, shape1_orig[-1]))
|
|
9684
|
-
res = _matmul(input, other)
|
|
9685
|
-
else:
|
|
9686
|
-
# broadcasts input.shape[:-2] with other.shape[:-2]
|
|
9687
|
-
ndim_aligned = _max(ndim1_orig, ndim2_orig)
|
|
9688
|
-
input = _expand(input, ndim_aligned)
|
|
9689
|
-
other = _expand(other, ndim_aligned)
|
|
9690
|
-
shape1_aligned, shape2_aligned = shape_(input), shape_(other)
|
|
9691
|
-
input = _broadcast_to(input, shape1_aligned[:-2], shape_backbone, ndim_aligned)
|
|
9692
|
-
other = _broadcast_to(other, shape2_aligned[:-2], shape_backbone, ndim_aligned)
|
|
9693
|
-
res = _batch_matmul(input, other)
|
|
9694
|
-
|
|
9695
|
-
return reshape_(res, shape_out)
|
|
8090
|
+
return auto_generate.matmul_ext(input, other)
|
|
9696
8091
|
|
|
9697
8092
|
|
|
9698
8093
|
def inner(input, other):
|
|
@@ -9809,9 +8204,6 @@ def bmm(input_x, mat2):
|
|
|
9809
8204
|
[[3255. 3312. 3369.]]
|
|
9810
8205
|
[[4362. 4428. 4494.]]]]
|
|
9811
8206
|
"""
|
|
9812
|
-
if not (isinstance(input_x, Tensor) and isinstance(mat2, Tensor)):
|
|
9813
|
-
raise TypeError("For bmm op, inputs input_x and mat2 must be all tensors.")
|
|
9814
|
-
|
|
9815
8207
|
return batch_matmul_(input_x, mat2)
|
|
9816
8208
|
|
|
9817
8209
|
|
|
@@ -9976,12 +8368,9 @@ def baddbmm(input, batch1, batch2, beta=1, alpha=1):
|
|
|
9976
8368
|
[5. 5. 5.]
|
|
9977
8369
|
[5. 5. 5.]]]
|
|
9978
8370
|
"""
|
|
9979
|
-
bmmop = _get_cache_prim(
|
|
8371
|
+
bmmop = _get_cache_prim(BatchMatMul)(False, False)
|
|
9980
8372
|
if not (isinstance(input, Tensor) and isinstance(batch1, Tensor) and isinstance(batch2, Tensor)):
|
|
9981
8373
|
raise TypeError("For Baddbmm, inputs must be all tensors.")
|
|
9982
|
-
if len(batch1.shape) != 3 or len(batch2.shape) != 3:
|
|
9983
|
-
raise ValueError("For batch1 and batch2 must be 3-D tensors each containing the same number of matrices, "
|
|
9984
|
-
f"but got length of batch1:'{len(batch1.shape)}', length of batch2:'{len(batch2.shape)}'.")
|
|
9985
8374
|
input_dtype = dtype_(input)
|
|
9986
8375
|
if not (input_dtype == dtype_(batch1) and input_dtype == dtype_(batch2)):
|
|
9987
8376
|
raise TypeError("For Baddbmm, the inputs should be the same dtype.")
|
|
@@ -10173,11 +8562,9 @@ def xdivy(x, y):
|
|
|
10173
8562
|
Divides the first input tensor by the second input tensor element-wise. Returns zero when `x` is zero.
|
|
10174
8563
|
|
|
10175
8564
|
Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
|
|
10176
|
-
The inputs must be two tensors or one tensor and one scalar.
|
|
10177
8565
|
When the inputs are two tensors,
|
|
10178
8566
|
dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
|
|
10179
|
-
|
|
10180
|
-
the scalar could only be a constant.
|
|
8567
|
+
If one of the inputs is scalar, the scalar could only be a constant.
|
|
10181
8568
|
|
|
10182
8569
|
.. note::
|
|
10183
8570
|
When `x` and `y` are both of datatype complex, they should be both complex64 or complex128 at the same time.
|
|
@@ -10193,7 +8580,7 @@ def xdivy(x, y):
|
|
|
10193
8580
|
|
|
10194
8581
|
Raises:
|
|
10195
8582
|
TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
|
|
10196
|
-
TypeError: If dtype of `x` and
|
|
8583
|
+
TypeError: If dtype of `x` and `y` is not in [float16, float32, float64, complex64, complex128, bool].
|
|
10197
8584
|
ValueError: If `x` could not be broadcast to a tensor with shape of `y`.
|
|
10198
8585
|
RuntimeError: If the data type of `x`, `y` conversion of Parameter is given
|
|
10199
8586
|
but data type conversion of Parameter is not supported.
|
|
@@ -10254,37 +8641,6 @@ def log10(input):
|
|
|
10254
8641
|
return output
|
|
10255
8642
|
|
|
10256
8643
|
|
|
10257
|
-
def log1p(input):
|
|
10258
|
-
r"""
|
|
10259
|
-
Returns the natural logarithm of one plus the input tensor element-wise.
|
|
10260
|
-
|
|
10261
|
-
.. math::
|
|
10262
|
-
out_i = {log_e}(input_i + 1)
|
|
10263
|
-
|
|
10264
|
-
Args:
|
|
10265
|
-
input (Tensor): The input tensor. The value must be greater than -1.
|
|
10266
|
-
|
|
10267
|
-
Returns:
|
|
10268
|
-
Tensor, has the same shape as the `input`.
|
|
10269
|
-
|
|
10270
|
-
Raises:
|
|
10271
|
-
TypeError: If `input` is not a Tensor.
|
|
10272
|
-
|
|
10273
|
-
Supported Platforms:
|
|
10274
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
10275
|
-
|
|
10276
|
-
Examples:
|
|
10277
|
-
>>> import mindspore
|
|
10278
|
-
>>> import numpy as np
|
|
10279
|
-
>>> from mindspore import Tensor, ops
|
|
10280
|
-
>>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
|
|
10281
|
-
>>> output = ops.log1p(x)
|
|
10282
|
-
>>> print(output)
|
|
10283
|
-
[0.6931472 1.0986123 1.609438 ]
|
|
10284
|
-
"""
|
|
10285
|
-
return log1p_(input)
|
|
10286
|
-
|
|
10287
|
-
|
|
10288
8644
|
def kron(input, other):
|
|
10289
8645
|
"""
|
|
10290
8646
|
Computes the Kronecker product :math:`input ⊗ other`, denoted by ⊗, of `input` and `other`.
|
|
@@ -10378,93 +8734,41 @@ def _check_is_tensor(param_name, input, cls_name):
|
|
|
10378
8734
|
raise TypeError(f"For {cls_name}, {param_name} must be a Tensor, but got {type(input)}.")
|
|
10379
8735
|
|
|
10380
8736
|
|
|
10381
|
-
def all(input, axis=None, keep_dims=False):
|
|
10382
|
-
r"""
|
|
10383
|
-
Reduces a dimension of `input` by the "logical AND" of all elements in the dimension, by default. And also can
|
|
10384
|
-
reduce a dimension of `input` along the axis. Determine whether the dimensions of the output and input are the same
|
|
10385
|
-
by controlling `keep_dims`.
|
|
10386
|
-
|
|
10387
|
-
Args:
|
|
10388
|
-
input (Tensor): Input Tensor, has the shape :math:`(N, *)` where :math:`*` means,
|
|
10389
|
-
any number of additional dimensions.
|
|
10390
|
-
axis (Union[int, tuple(int), list(int)], optional): The dimensions to reduce. Suppose the rank of `input` is
|
|
10391
|
-
r, axis must be in the range [-rank(input), rank(input)). Default: ``None`` , all dimensions are reduced.
|
|
10392
|
-
keep_dims (bool, optional): If true, keep these reduced dimensions and the length is 1.
|
|
10393
|
-
If false, don't keep these dimensions. Default : ``False`` .
|
|
10394
|
-
|
|
10395
|
-
Returns:
|
|
10396
|
-
Tensor, the dtype is bool.
|
|
10397
|
-
|
|
10398
|
-
- If `axis` is None, and `keep_dims` is ``False`` ,
|
|
10399
|
-
the output is a 0-D Tensor representing the "logical AND" of all elements in the input Tensor.
|
|
10400
|
-
- If `axis` is int, such as 2, and `keep_dims` is ``False`` ,
|
|
10401
|
-
the shape of output is :math:`(input_1, input_3, ..., input_R)`.
|
|
10402
|
-
- If `axis` is tuple(int), such as (2, 3), and `keep_dims` is False,
|
|
10403
|
-
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
10404
|
-
|
|
10405
|
-
Raises:
|
|
10406
|
-
TypeError: If `keep_dims` is not a bool.
|
|
10407
|
-
TypeError: If `input` is not a Tensor.
|
|
10408
|
-
TypeError: If `axis` is not one of the following: int, tuple or list.
|
|
10409
|
-
|
|
10410
|
-
Supported Platforms:
|
|
10411
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
10412
|
-
|
|
10413
|
-
Examples:
|
|
10414
|
-
>>> import numpy as np
|
|
10415
|
-
>>> from mindspore import Tensor, ops
|
|
10416
|
-
>>> x = Tensor(np.array([[True, False], [True, True]]))
|
|
10417
|
-
>>> # case 1: Reduces a dimension by the "logicalAND" of all elements in the dimension.
|
|
10418
|
-
>>> output = ops.all(x, keep_dims=True)
|
|
10419
|
-
>>> print(output)
|
|
10420
|
-
[[False]]
|
|
10421
|
-
>>> print(output.shape)
|
|
10422
|
-
(1, 1)
|
|
10423
|
-
>>> # case 2: Reduces a dimension along axis 0.
|
|
10424
|
-
>>> output = ops.all(x, axis=0)
|
|
10425
|
-
>>> print(output)
|
|
10426
|
-
[ True False]
|
|
10427
|
-
>>> # case 3: Reduces a dimension along axis 1.
|
|
10428
|
-
>>> output = ops.all(x, axis=1)
|
|
10429
|
-
>>> print(output)
|
|
10430
|
-
[False True]
|
|
10431
|
-
"""
|
|
10432
|
-
_check_is_tensor("input", input, "all")
|
|
10433
|
-
if axis is None:
|
|
10434
|
-
axis = ()
|
|
10435
|
-
if input.dtype != mstype.bool_:
|
|
10436
|
-
input = cast_(input, mstype.bool_)
|
|
10437
|
-
return _get_cache_prim(P.ReduceAll)(keep_dims)(input, axis)
|
|
10438
|
-
|
|
10439
8737
|
|
|
10440
8738
|
def any(input, axis=None, keep_dims=False):
|
|
10441
8739
|
r"""
|
|
10442
8740
|
Reduces a dimension of `input` by the "logical OR" of all elements in the dimension, by default. And also can
|
|
10443
|
-
reduce a dimension of `input` along the axis
|
|
10444
|
-
by controlling `keep_dims`.
|
|
8741
|
+
reduce a dimension of `input` along the `axis`. Determine whether the dimensions of the output and input are the
|
|
8742
|
+
same by controlling `keep_dims`.
|
|
8743
|
+
|
|
8744
|
+
Note:
|
|
8745
|
+
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
10445
8746
|
|
|
10446
8747
|
Args:
|
|
10447
8748
|
input (Tensor): Input Tensor, has the shape :math:`(N, *)` where :math:`*` means,
|
|
10448
8749
|
any number of additional dimensions.
|
|
10449
|
-
axis (Union[int, tuple(int), list(int)], optional): The dimensions to reduce.
|
|
10450
|
-
axis must be in the range [-rank(input), rank(input)).
|
|
10451
|
-
|
|
10452
|
-
|
|
8750
|
+
axis (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce.
|
|
8751
|
+
Suppose the rank of `input` is r, `axis` must be in the range [-rank(input), rank(input)).
|
|
8752
|
+
Default: ``None`` , all dimensions are reduced.
|
|
8753
|
+
keep_dims (bool, optional): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
8754
|
+
If ``False`` , don't keep these dimensions. Default : ``False`` .
|
|
10453
8755
|
|
|
10454
8756
|
Returns:
|
|
10455
8757
|
Tensor, the dtype is bool.
|
|
10456
8758
|
|
|
10457
|
-
- If `axis` is None, and `keep_dims` is ``False`` ,
|
|
8759
|
+
- If `axis` is ``None`` , and `keep_dims` is ``False`` ,
|
|
10458
8760
|
the output is a 0-D Tensor representing the "logical OR" of all elements in the input Tensor.
|
|
10459
8761
|
- If `axis` is int, such as 2, and `keep_dims` is ``False`` ,
|
|
10460
8762
|
the shape of output is :math:`(input_1, input_3, ..., input_R)`.
|
|
10461
8763
|
- If `axis` is tuple(int), such as (2, 3), and `keep_dims` is ``False`` ,
|
|
10462
8764
|
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
8765
|
+
- If `axis` is 1-D Tensor, such as [2, 3], and `keep_dims` is ``False`` ,
|
|
8766
|
+
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
10463
8767
|
|
|
10464
8768
|
Raises:
|
|
10465
8769
|
TypeError: If `keep_dims` is not a bool.
|
|
10466
8770
|
TypeError: If `input` is not a Tensor.
|
|
10467
|
-
TypeError: If `axis` is not one of the following: int, tuple or
|
|
8771
|
+
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
|
|
10468
8772
|
|
|
10469
8773
|
Supported Platforms:
|
|
10470
8774
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -10488,11 +8792,8 @@ def any(input, axis=None, keep_dims=False):
|
|
|
10488
8792
|
>>> print(output)
|
|
10489
8793
|
[ True True]
|
|
10490
8794
|
"""
|
|
10491
|
-
_check_is_tensor("input", input, "any")
|
|
10492
8795
|
if axis is None:
|
|
10493
8796
|
axis = ()
|
|
10494
|
-
if input.dtype != mstype.bool_:
|
|
10495
|
-
input = cast_(input, mstype.bool_)
|
|
10496
8797
|
return _get_cache_prim(P.ReduceAny)(keep_dims)(input, axis)
|
|
10497
8798
|
|
|
10498
8799
|
|
|
@@ -10599,21 +8900,21 @@ def iou(anchor_boxes, gt_boxes, mode='iou'):
|
|
|
10599
8900
|
and width are scaled by 0.2 internally.
|
|
10600
8901
|
|
|
10601
8902
|
Args:
|
|
10602
|
-
anchor_boxes (Tensor): Anchor boxes, tensor of shape :math:`(N, 4)` .
|
|
10603
|
-
and the value
|
|
10604
|
-
Data type must be either float16,
|
|
10605
|
-
gt_boxes (Tensor): Ground truth boxes, tensor of shape :math:`(M, 4)` .
|
|
10606
|
-
truth boxes, and the value
|
|
10607
|
-
Data type must be either float16, float32 or float64.
|
|
8903
|
+
anchor_boxes (Tensor): Anchor boxes, tensor of shape :math:`(N, 4)` . :math:`N` indicates the number of
|
|
8904
|
+
anchor boxes, and the value :math:`4` refers to four boundary coordinates of the predicted area
|
|
8905
|
+
"x0", "y0", "x1", and "y1". Data type must be either float16, float32 or float64.
|
|
8906
|
+
gt_boxes (Tensor): Ground truth boxes, tensor of shape :math:`(M, 4)` . :math:`M` indicates the number
|
|
8907
|
+
of ground truth boxes, and the value :math:`4` refers to four boundary coordinates of the truth
|
|
8908
|
+
area "x0", "y0", "x1", and "y1". Data type must be either float16, float32 or float64.
|
|
10608
8909
|
mode (string): The mode is used to specify the calculation method,
|
|
10609
8910
|
now supporting 'iou' (intersection over union) or 'iof' (intersection over foreground) mode.
|
|
10610
8911
|
Default: ``'iou'`` .
|
|
10611
8912
|
|
|
10612
8913
|
Returns:
|
|
10613
|
-
Tensor, the
|
|
8914
|
+
Tensor, the IOU/IOF values, tensor of shape :math:`(M, N)` , with the same data type as `anchor_boxes`.
|
|
10614
8915
|
|
|
10615
8916
|
Raises:
|
|
10616
|
-
KeyError: When `mode` is not 'iou' or 'iof'
|
|
8917
|
+
KeyError: When `mode` is not ``'iou'`` or ``'iof'``.
|
|
10617
8918
|
|
|
10618
8919
|
Supported Platforms:
|
|
10619
8920
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -10659,8 +8960,8 @@ def _check_dim_in_range(dim, ndim):
|
|
|
10659
8960
|
|
|
10660
8961
|
|
|
10661
8962
|
def dotrapezoid(y, dx, dim):
|
|
10662
|
-
y_left =
|
|
10663
|
-
y_right =
|
|
8963
|
+
y_left = _select(y, dim, 0)
|
|
8964
|
+
y_right = _select(y, dim, -1)
|
|
10664
8965
|
y_sum = y.sum(dim)
|
|
10665
8966
|
return (y_sum - (y_left + y_right) * 0.5) * dx
|
|
10666
8967
|
|
|
@@ -10670,10 +8971,10 @@ def dotrapezoid_tensor(y, dx, dim):
|
|
|
10670
8971
|
y_start_dim_left = tuple(y_start_dim_left)
|
|
10671
8972
|
y_start_dim_right = [0 for _ in range(y.ndim - dim - 1)]
|
|
10672
8973
|
y_start_dim_right = tuple(y_start_dim_right)
|
|
10673
|
-
y_slice_size = _tuple_setitem(
|
|
10674
|
-
y_slice_left =
|
|
10675
|
-
y_slice_right =
|
|
10676
|
-
return (
|
|
8974
|
+
y_slice_size = _tuple_setitem(shape_(y), dim, shape_(y)[dim] - 1)
|
|
8975
|
+
y_slice_left = slice_(y, y_start_dim_left + (0,) + y_start_dim_right, y_slice_size)
|
|
8976
|
+
y_slice_right = slice_(y, y_start_dim_left + (1,) + y_start_dim_right, y_slice_size)
|
|
8977
|
+
return (tensor_add(y_slice_left, y_slice_right) * dx).sum(dim) / 2.
|
|
10677
8978
|
|
|
10678
8979
|
|
|
10679
8980
|
def add_padding_to_shape(curr_shape, target_n_dim):
|
|
@@ -10706,8 +9007,8 @@ def trapezoid_tensor(y, x, dim):
|
|
|
10706
9007
|
x_start_dim_right = [0 for _ in range(x.ndim - dim - 1)]
|
|
10707
9008
|
x_start_dim_right = tuple(x_start_dim_right)
|
|
10708
9009
|
x_slice_size = _tuple_setitem(x.shape, dim, x.shape[dim] - 1)
|
|
10709
|
-
x_left =
|
|
10710
|
-
x_right =
|
|
9010
|
+
x_left = slice_(x, x_start_dim_left + (0,) + x_start_dim_right, x_slice_size)
|
|
9011
|
+
x_right = slice_(x, x_start_dim_left + (1,) + x_start_dim_right, x_slice_size)
|
|
10711
9012
|
dx = x_right - x_left
|
|
10712
9013
|
new_sizes = add_padding_to_shape(dx.shape, y.ndim)
|
|
10713
9014
|
dx = dx.view(tuple(new_sizes))
|
|
@@ -10725,8 +9026,8 @@ def trapezoid_tensor(y, x, dim):
|
|
|
10725
9026
|
x_start_dim_right = [0 for _ in range(x_viewed.ndim - dim - 1)]
|
|
10726
9027
|
x_start_dim_right = tuple(x_start_dim_right)
|
|
10727
9028
|
x_slice_size = _tuple_setitem(x_viewed.shape, dim, x_viewed.shape[dim] - 1)
|
|
10728
|
-
x_left =
|
|
10729
|
-
x_right =
|
|
9029
|
+
x_left = slice_(x_viewed, x_start_dim_left + (0,) + x_start_dim_right, x_slice_size)
|
|
9030
|
+
x_right = slice_(x_viewed, x_start_dim_left + (1,) + x_start_dim_right, x_slice_size)
|
|
10730
9031
|
dx = x_right - x_left
|
|
10731
9032
|
return dotrapezoid_tensor(y, dx, dim)
|
|
10732
9033
|
|
|
@@ -10745,12 +9046,12 @@ def get(ts, depth, dim, index, r):
|
|
|
10745
9046
|
return get(item, depth + 1, dim, index, r)
|
|
10746
9047
|
|
|
10747
9048
|
|
|
10748
|
-
def
|
|
9049
|
+
def _select(feat, dim, index):
|
|
10749
9050
|
select_shape = feat.shape
|
|
10750
9051
|
select_shape = list(select_shape)
|
|
10751
9052
|
select_shape[dim] = 1
|
|
10752
9053
|
new_shape = feat.shape[:dim] + feat.shape[dim + 1:]
|
|
10753
|
-
indexes =
|
|
9054
|
+
indexes = ones_(tuple(select_shape), mstype.int32) * (index)
|
|
10754
9055
|
return feat.gather_elements(dim, indexes).reshape(new_shape)
|
|
10755
9056
|
|
|
10756
9057
|
|
|
@@ -10809,14 +9110,14 @@ def trapz(y, x=None, *, dx=1.0, dim=-1):
|
|
|
10809
9110
|
if not isinstance(dim, int):
|
|
10810
9111
|
raise TypeError(f"For `trapz`, the input `dim` must be int, but get {type(dim)}.")
|
|
10811
9112
|
if not _check_is_float(y.dtype):
|
|
10812
|
-
y =
|
|
9113
|
+
y = cast_(y, mstype.float32)
|
|
10813
9114
|
_check_dim_in_range(dim, y.ndim)
|
|
10814
9115
|
dim = dim + y.ndim if dim < 0 else dim
|
|
10815
9116
|
if x is None:
|
|
10816
9117
|
return trapezoid(y, dx, dim)
|
|
10817
9118
|
if not isinstance(x, (Tensor, Tensor_)):
|
|
10818
9119
|
raise TypeError(f"For `trapz`, the input `x` must be Tensor, but get {type(x)}.")
|
|
10819
|
-
x =
|
|
9120
|
+
x = cast_(x, mstype.float32)
|
|
10820
9121
|
return trapezoid_tensor(y, x, dim)
|
|
10821
9122
|
|
|
10822
9123
|
|
|
@@ -10979,42 +9280,6 @@ def cholesky_solve(input, input2, upper=False):
|
|
|
10979
9280
|
return _get_cache_prim(P.CholeskySolve)(upper)(input, input2)
|
|
10980
9281
|
|
|
10981
9282
|
|
|
10982
|
-
def conj(input):
|
|
10983
|
-
r"""
|
|
10984
|
-
Returns a tensor of complex numbers that are the complex conjugate of each element in input.
|
|
10985
|
-
The complex numbers in input must be of the form a + bj, where a is the real part and b is the imaginary part.
|
|
10986
|
-
|
|
10987
|
-
The complex conjugate returned by this operation is of the form a - bj.
|
|
10988
|
-
|
|
10989
|
-
If `input` is real, it is returned unchanged.
|
|
10990
|
-
|
|
10991
|
-
Args:
|
|
10992
|
-
input (Tensor): The input tensor to compute to. Must have numeric type.
|
|
10993
|
-
|
|
10994
|
-
Returns:
|
|
10995
|
-
Tensor, has the same dtype as the `input`.
|
|
10996
|
-
|
|
10997
|
-
Raises:
|
|
10998
|
-
TypeError: If the dtype of `input` is not a numeric type.
|
|
10999
|
-
TypeError: If the `input` is not a Tensor.
|
|
11000
|
-
|
|
11001
|
-
Supported Platforms:
|
|
11002
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
11003
|
-
|
|
11004
|
-
Examples:
|
|
11005
|
-
>>> import mindspore
|
|
11006
|
-
>>> import numpy as np
|
|
11007
|
-
>>> from mindspore import Tensor, ops
|
|
11008
|
-
>>> x = Tensor(np.asarray(np.complex(1.3+0.4j)), mindspore.complex64)
|
|
11009
|
-
>>> output = ops.conj(x)
|
|
11010
|
-
>>> print(output)
|
|
11011
|
-
(1.3-0.4j)
|
|
11012
|
-
"""
|
|
11013
|
-
if not isinstance(input, (Tensor, Tensor_)):
|
|
11014
|
-
raise TypeError("For conj op, input must be Tensor.")
|
|
11015
|
-
return conj_(input)
|
|
11016
|
-
|
|
11017
|
-
|
|
11018
9283
|
def cross(input, other, dim=None):
|
|
11019
9284
|
r"""
|
|
11020
9285
|
Computes the cross product of `input` and `other` in dimension `dim`.
|
|
@@ -11184,91 +9449,6 @@ def einsum(equation, *operands):
|
|
|
11184
9449
|
return _get_cache_prim(P.Einsum)(equation)(operands)
|
|
11185
9450
|
|
|
11186
9451
|
|
|
11187
|
-
def erfinv(input):
|
|
11188
|
-
r"""
|
|
11189
|
-
Returns the result of the inverse error function with `input`, which is defined in the
|
|
11190
|
-
range `(-1, 1)` as:
|
|
11191
|
-
|
|
11192
|
-
.. math::
|
|
11193
|
-
|
|
11194
|
-
erfinv(erf(x)) = x
|
|
11195
|
-
|
|
11196
|
-
where :math:`x` is the `input`.
|
|
11197
|
-
|
|
11198
|
-
Args:
|
|
11199
|
-
input (Tensor): The input tensor. Supported dtypes:
|
|
11200
|
-
|
|
11201
|
-
- Ascend: float16, float32.
|
|
11202
|
-
- GPU/CPU: float16, float32, float64.
|
|
11203
|
-
|
|
11204
|
-
Returns:
|
|
11205
|
-
Tensor, has the same shape and dtype as `input`.
|
|
11206
|
-
|
|
11207
|
-
Raises:
|
|
11208
|
-
TypeError: If dtype of `input` is not float16, float32 or float64.
|
|
11209
|
-
|
|
11210
|
-
Supported Platforms:
|
|
11211
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
11212
|
-
|
|
11213
|
-
Examples:
|
|
11214
|
-
>>> import mindspore
|
|
11215
|
-
>>> import numpy as np
|
|
11216
|
-
>>> from mindspore import Tensor, ops
|
|
11217
|
-
>>> x = Tensor(np.array([0, 0.5, -0.9]), mindspore.float32)
|
|
11218
|
-
>>> output = ops.erfinv(x)
|
|
11219
|
-
>>> print(output)
|
|
11220
|
-
[ 0. 0.47695306 -1.1630805 ]
|
|
11221
|
-
"""
|
|
11222
|
-
return erfinv_(input)
|
|
11223
|
-
|
|
11224
|
-
|
|
11225
|
-
def less_equal(input, other):
|
|
11226
|
-
r"""
|
|
11227
|
-
Computes the boolean value of :math:`input <= other` element-wise.
|
|
11228
|
-
|
|
11229
|
-
.. math::
|
|
11230
|
-
out_{i} =\begin{cases}
|
|
11231
|
-
& \text{True, if } input_{i}<=other_{i} \\
|
|
11232
|
-
& \text{False, if } input_{i}>other_{i}
|
|
11233
|
-
\end{cases}
|
|
11234
|
-
|
|
11235
|
-
.. note::
|
|
11236
|
-
- Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
|
|
11237
|
-
consistent.
|
|
11238
|
-
- The inputs must be two tensors or one tensor and one scalar.
|
|
11239
|
-
- When the inputs are one tensor and one scalar, the scalar could only be a constant.
|
|
11240
|
-
|
|
11241
|
-
Args:
|
|
11242
|
-
input (Union[Tensor, Number, bool]): The first input is a Number or
|
|
11243
|
-
a bool or a tensor whose data type is
|
|
11244
|
-
`number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
11245
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_.
|
|
11246
|
-
other (Union[Tensor, Number, bool]): The second input, when the first input is a Tensor,
|
|
11247
|
-
the second input should be a Number or bool value, or a Tensor whose data type is number or bool\_.
|
|
11248
|
-
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
|
|
11249
|
-
|
|
11250
|
-
Returns:
|
|
11251
|
-
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
|
|
11252
|
-
|
|
11253
|
-
Raises:
|
|
11254
|
-
TypeError: If neither `input` nor `other` is a Tensor.
|
|
11255
|
-
|
|
11256
|
-
Supported Platforms:
|
|
11257
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
11258
|
-
|
|
11259
|
-
Examples:
|
|
11260
|
-
>>> import mindspore
|
|
11261
|
-
>>> import numpy as np
|
|
11262
|
-
>>> from mindspore import Tensor, ops
|
|
11263
|
-
>>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
|
|
11264
|
-
>>> other = Tensor(np.array([1, 1, 4]), mindspore.int32)
|
|
11265
|
-
>>> output = ops.less_equal(x, other)
|
|
11266
|
-
>>> print(output)
|
|
11267
|
-
[ True False True]
|
|
11268
|
-
"""
|
|
11269
|
-
return tensor_le(input, other)
|
|
11270
|
-
|
|
11271
|
-
|
|
11272
9452
|
def cumprod(input, dim, dtype=None):
|
|
11273
9453
|
r"""
|
|
11274
9454
|
Computes the cumulative product of the `input` tensor along dimension `dim`.
|
|
@@ -11310,70 +9490,6 @@ def cumprod(input, dim, dtype=None):
|
|
|
11310
9490
|
return output
|
|
11311
9491
|
|
|
11312
9492
|
|
|
11313
|
-
def greater(input, other):
|
|
11314
|
-
r"""
|
|
11315
|
-
Computes the boolean value of :math:`input > other` element-wise.
|
|
11316
|
-
|
|
11317
|
-
Args:
|
|
11318
|
-
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
11319
|
-
a bool or a tensor whose data type is
|
|
11320
|
-
`number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
11321
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ .
|
|
11322
|
-
other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
|
|
11323
|
-
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
|
|
11324
|
-
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
|
|
11325
|
-
|
|
11326
|
-
Returns:
|
|
11327
|
-
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
|
|
11328
|
-
|
|
11329
|
-
Supported Platforms:
|
|
11330
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
11331
|
-
|
|
11332
|
-
Examples:
|
|
11333
|
-
>>> import mindspore
|
|
11334
|
-
>>> import numpy as np
|
|
11335
|
-
>>> from mindspore import Tensor, ops
|
|
11336
|
-
>>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
|
|
11337
|
-
>>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
|
|
11338
|
-
>>> output = ops.greater(x, y)
|
|
11339
|
-
>>> print(output)
|
|
11340
|
-
[False True False]
|
|
11341
|
-
"""
|
|
11342
|
-
return tensor_gt(input, other)
|
|
11343
|
-
|
|
11344
|
-
|
|
11345
|
-
def greater_equal(input, other):
|
|
11346
|
-
r"""
|
|
11347
|
-
Computes the boolean value of :math:`input \geq other` element-wise.
|
|
11348
|
-
|
|
11349
|
-
Args:
|
|
11350
|
-
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
11351
|
-
a bool or a tensor whose data type is
|
|
11352
|
-
`number <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ or
|
|
11353
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ .
|
|
11354
|
-
other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
|
|
11355
|
-
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
|
|
11356
|
-
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
|
|
11357
|
-
|
|
11358
|
-
Returns:
|
|
11359
|
-
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
|
|
11360
|
-
|
|
11361
|
-
Supported Platforms:
|
|
11362
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
11363
|
-
|
|
11364
|
-
Examples:
|
|
11365
|
-
>>> import mindspore
|
|
11366
|
-
>>> import numpy as np
|
|
11367
|
-
>>> from mindspore import Tensor, ops
|
|
11368
|
-
>>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
|
|
11369
|
-
>>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
|
|
11370
|
-
>>> output = ops.greater_equal(x, y)
|
|
11371
|
-
>>> print(output)
|
|
11372
|
-
[True True False]
|
|
11373
|
-
"""
|
|
11374
|
-
return tensor_ge(input, other)
|
|
11375
|
-
|
|
11376
|
-
|
|
11377
9493
|
def igamma(input, other):
|
|
11378
9494
|
r"""
|
|
11379
9495
|
Calculates lower regularized incomplete Gamma function.
|
|
@@ -11421,8 +9537,7 @@ def igamma(input, other):
|
|
|
11421
9537
|
>>> print(output)
|
|
11422
9538
|
[0.593994 0.35276785 0.21486944 0.13337152]
|
|
11423
9539
|
"""
|
|
11424
|
-
|
|
11425
|
-
return igamma_op(input, other)
|
|
9540
|
+
return igamma_(input, other)
|
|
11426
9541
|
|
|
11427
9542
|
|
|
11428
9543
|
def igammac(input, other):
|
|
@@ -11472,8 +9587,7 @@ def igammac(input, other):
|
|
|
11472
9587
|
>>> print (output)
|
|
11473
9588
|
[0.40600586 0.6472318 0.7851304 0.8666283]
|
|
11474
9589
|
"""
|
|
11475
|
-
|
|
11476
|
-
return igammac_op(input, other)
|
|
9590
|
+
return igammac_(input, other)
|
|
11477
9591
|
|
|
11478
9592
|
|
|
11479
9593
|
def lgamma(input):
|
|
@@ -11700,7 +9814,7 @@ def logical_xor(input, other):
|
|
|
11700
9814
|
|
|
11701
9815
|
.. math::
|
|
11702
9816
|
|
|
11703
|
-
out_{i} =
|
|
9817
|
+
out_{i} = input_{i} \oplus other_{i}
|
|
11704
9818
|
|
|
11705
9819
|
Args:
|
|
11706
9820
|
input (Tensor): The first input is a tensor whose data type can be implicitly converted to bool.
|
|
@@ -11843,7 +9957,7 @@ def nansum(input, axis=None, keepdims=False, *, dtype=None):
|
|
|
11843
9957
|
if input.dtype == mstype.bool_:
|
|
11844
9958
|
input = input.astype(mstype.int64)
|
|
11845
9959
|
is_nan = isnan_(input)
|
|
11846
|
-
input = ops.masked_fill(input, is_nan, 0)
|
|
9960
|
+
input = ops.masked_fill(input, is_nan, ops.cast(0, input.dtype))
|
|
11847
9961
|
input = _get_cache_prim(P.ReduceSum)(keepdims)(input, axis)
|
|
11848
9962
|
if dtype is not None and input.dtype != dtype:
|
|
11849
9963
|
input = input.astype(dtype)
|
|
@@ -11937,7 +10051,7 @@ def diag_embed(input, offset=0, dim1=-2, dim2=-1):
|
|
|
11937
10051
|
diag_plane = (dsize, dsize)
|
|
11938
10052
|
output_shape_trans = batch_shape + diag_plane
|
|
11939
10053
|
output = zeros(output_shape_trans, input.dtype)
|
|
11940
|
-
k =
|
|
10054
|
+
k = cast_(offset, mstype.int32)
|
|
11941
10055
|
output = matrix_set_diag_op(output, input, k)
|
|
11942
10056
|
dim = 0
|
|
11943
10057
|
perm = ()
|
|
@@ -11956,25 +10070,28 @@ def sum(input, dim=None, keepdim=False, *, dtype=None):
|
|
|
11956
10070
|
"""
|
|
11957
10071
|
Calculate sum of Tensor elements over a given dim.
|
|
11958
10072
|
|
|
10073
|
+
Note:
|
|
10074
|
+
The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
10075
|
+
|
|
11959
10076
|
Args:
|
|
11960
10077
|
input (Tensor): The input tensor.
|
|
11961
|
-
dim (Union[None, int, tuple(int), list(int)]): Dimensions along which a sum is performed.
|
|
11962
|
-
If None, sum all the elements of the input tensor.
|
|
10078
|
+
dim (Union[None, int, tuple(int), list(int), Tensor]): Dimensions along which a sum is performed.
|
|
10079
|
+
If ``None`` , sum all the elements of the input tensor.
|
|
11963
10080
|
If the `dim` is a tuple or list of ints, a sum is performed on all the dimensions specified in the tuple.
|
|
11964
|
-
Must be in the range :math:`[-input.ndim, input.ndim)` . Default: ``None
|
|
10081
|
+
Must be in the range :math:`[-input.ndim, input.ndim)` . Default: ``None`` .
|
|
11965
10082
|
keepdim (bool): Whether the output tensor has dim retained or not.
|
|
11966
|
-
If True, keep these reduced dimensions and the length is 1.
|
|
11967
|
-
If False, don't keep these dimensions. Default: ``False
|
|
10083
|
+
If ``True`` , keep these reduced dimensions and the length is 1.
|
|
10084
|
+
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
11968
10085
|
|
|
11969
10086
|
Keyword Args:
|
|
11970
|
-
dtype (:class:`mindspore.dtype`, optional): The desired data type of returned Tensor. Default: ``None
|
|
10087
|
+
dtype (:class:`mindspore.dtype`, optional): The desired data type of returned Tensor. Default: ``None`` .
|
|
11971
10088
|
|
|
11972
10089
|
Returns:
|
|
11973
|
-
A Tensor, sum of elements over a given dim in `input`.
|
|
10090
|
+
A Tensor, sum of elements over a given `dim` in `input`.
|
|
11974
10091
|
|
|
11975
10092
|
Raises:
|
|
11976
10093
|
TypeError: If `input` is not a Tensor.
|
|
11977
|
-
TypeError: If `dim` is not an int, tulpe(int), list(int) or None.
|
|
10094
|
+
TypeError: If `dim` is not an int, tulpe(int), list(int), Tensor or None.
|
|
11978
10095
|
ValueError: If `dim` is not in the range :math:`[-input.ndim, input.ndim)` .
|
|
11979
10096
|
TypeError: If `keepdim` is not a bool.
|
|
11980
10097
|
|
|
@@ -12008,23 +10125,7 @@ def sum(input, dim=None, keepdim=False, *, dtype=None):
|
|
|
12008
10125
|
[48.]
|
|
12009
10126
|
[54.]]]
|
|
12010
10127
|
"""
|
|
12011
|
-
|
|
12012
|
-
raise TypeError(f"For 'sum', 'input' must be Tensor, but got{type(input)}")
|
|
12013
|
-
if dim is not None and not isinstance(dim, (int, tuple, list)):
|
|
12014
|
-
raise TypeError(f"For 'sum', 'dim' must be int, tuple(int), list(int) or None, but got {type(dim)}")
|
|
12015
|
-
if not isinstance(keepdim, bool):
|
|
12016
|
-
raise TypeError(f"For 'sum', 'keepdim' must be bool, but got {type(keepdim)}")
|
|
12017
|
-
|
|
12018
|
-
if input.dtype == mstype.bool_:
|
|
12019
|
-
input = input.astype(mstype.int64)
|
|
12020
|
-
if dtype is not None:
|
|
12021
|
-
input = input.astype(dtype)
|
|
12022
|
-
reduce_sum = _get_cache_prim(P.ReduceSum)(keep_dims=keepdim)
|
|
12023
|
-
if dim is not None:
|
|
12024
|
-
out = reduce_sum(input, dim)
|
|
12025
|
-
else:
|
|
12026
|
-
out = reduce_sum(input)
|
|
12027
|
-
return out
|
|
10128
|
+
return sum_ext_op(input, dim, keepdim, dtype)
|
|
12028
10129
|
|
|
12029
10130
|
|
|
12030
10131
|
def tanhshrink(input):
|
|
@@ -12229,6 +10330,8 @@ def _canonicalize_fft_shape_and_dim(input, shape, dim):
|
|
|
12229
10330
|
def as_strided(x, shape=None, strides=None):
|
|
12230
10331
|
n = np.dtype(mstype.dtype_to_nptype(x.dtype)).itemsize
|
|
12231
10332
|
strides = tuple(np.array(strides) * n)
|
|
10333
|
+
if x.dtype == mstype.bfloat16:
|
|
10334
|
+
return Tensor(np.lib.stride_tricks.as_strided(x.float().asnumpy(), shape, strides, False, True), dtype=x.dtype)
|
|
12232
10335
|
return Tensor(np.lib.stride_tricks.as_strided(x.asnumpy(), shape, strides, False, True), dtype=x.dtype)
|
|
12233
10336
|
|
|
12234
10337
|
|
|
@@ -12250,13 +10353,13 @@ def _resize_input(input, input_dim, ret_dim, ret_shape, input_sizes):
|
|
|
12250
10353
|
if input_sizes[value] > ret_shape[i]:
|
|
12251
10354
|
start_index = [0] * input_dim
|
|
12252
10355
|
input_sizes[value] = ret_shape[i]
|
|
12253
|
-
input =
|
|
10356
|
+
input = slice_(input, start_index, input_sizes)
|
|
12254
10357
|
|
|
12255
10358
|
if must_copy:
|
|
12256
10359
|
paddings = np.reshape(paddings, (input_dim, 2)).tolist()
|
|
12257
10360
|
paddings.reverse()
|
|
12258
10361
|
paddings = (*paddings,)
|
|
12259
|
-
input = P.Pad(paddings)(input)
|
|
10362
|
+
input = _get_cache_prim(P.Pad)(paddings)(input)
|
|
12260
10363
|
|
|
12261
10364
|
return input
|
|
12262
10365
|
|
|
@@ -12408,7 +10511,7 @@ def fft(input, n=None, dim=-1, norm=None): # pylint: disable=redefined-outer-na
|
|
|
12408
10511
|
Default: -1.
|
|
12409
10512
|
norm (string, optional): Normalization mode. Three modes are defined as,
|
|
12410
10513
|
``"forward"`` (normalize by :math `1/n`), ``"backward"``(no normalization),
|
|
12411
|
-
``"ortho"`` (normalize by :math
|
|
10514
|
+
``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
|
|
12412
10515
|
Default: ``None`` that means ``"backward"``.
|
|
12413
10516
|
|
|
12414
10517
|
Returns:
|
|
@@ -12487,7 +10590,7 @@ def fft2(input, s=None, dim=(-2, -1), norm=None): # pylint: disable=redefined-o
|
|
|
12487
10590
|
Default: last two dimensions.
|
|
12488
10591
|
norm (string, optional): Normalization mode. Three modes are defined as,
|
|
12489
10592
|
``"forward"``(normalize by :math `1/n`), ``"backward"``(no normalization),
|
|
12490
|
-
``"ortho"``(normalize by :math
|
|
10593
|
+
``"ortho"``(normalize by :math:`1/\sqrt{n}`). Where :math `n = prod(s)` is the logical FFT size.
|
|
12491
10594
|
Default: ``None`` that means ``"backward"``.
|
|
12492
10595
|
|
|
12493
10596
|
Returns:
|
|
@@ -12530,7 +10633,7 @@ def fftn(input, s=None, dim=None, norm=None): # pylint: disable=redefined-outer
|
|
|
12530
10633
|
Default: all dimensions, or the last `len(s)` dimensions if `s` is given.
|
|
12531
10634
|
norm (string, optional): Normalization mode. Three modes are defined as,
|
|
12532
10635
|
``"forward"``(normalize by :math `1/n`), ``"backward"``(no normalization),
|
|
12533
|
-
``"ortho"``(normalize by :math
|
|
10636
|
+
``"ortho"``(normalize by :math:`1/\sqrt{n}`). Where :math `n = prod(s)` is the logical FFT size.
|
|
12534
10637
|
Default: ``None`` that means ``"backward"``.
|
|
12535
10638
|
|
|
12536
10639
|
Returns:
|
|
@@ -12579,7 +10682,7 @@ def ifft(input, n=None, dim=-1, norm=None): # pylint: disable=redefined-outer-n
|
|
|
12579
10682
|
Default: -1.
|
|
12580
10683
|
norm (string, optional): Normalization mode. Three modes are defined as,
|
|
12581
10684
|
``"forward"``(normalize by :math `1/n`), ``"backward"``(no normalization),
|
|
12582
|
-
``"ortho"``(normalize by :math
|
|
10685
|
+
``"ortho"``(normalize by :math:`1/\sqrt{n}`).
|
|
12583
10686
|
Default: ``None`` that means ``"backward"``.
|
|
12584
10687
|
|
|
12585
10688
|
Returns:
|
|
@@ -12659,7 +10762,7 @@ def ifft2(input, s=None, dim=(-2, -1), norm=None): # pylint: disable=redefined-
|
|
|
12659
10762
|
Default: (-2, -1).
|
|
12660
10763
|
norm (string, optional): Normalization mode. Three modes are defined as,
|
|
12661
10764
|
``"forward"``(normalize by :math `1/n`), ``"backward"``(no normalization),
|
|
12662
|
-
``"ortho"``(normalize by :math
|
|
10765
|
+
``"ortho"``(normalize by :math:`1/\sqrt{n}`). Where :math `n = prod(s)` is the logical IFFT size.
|
|
12663
10766
|
Default: ``None`` that means ``"backward"``.
|
|
12664
10767
|
|
|
12665
10768
|
Returns:
|
|
@@ -12702,7 +10805,7 @@ def ifftn(input, s=None, dim=None, norm=None): # pylint: disable=redefined-oute
|
|
|
12702
10805
|
Default: all dimensions, or the last `len(s)` dimensions if `s` is given.
|
|
12703
10806
|
norm (string, optional): Normalization mode. Three modes are defined as,
|
|
12704
10807
|
``"forward"``(normalize by :math `1/n`), ``"backward"``(no normalization),
|
|
12705
|
-
``"ortho"``(normalize by :math
|
|
10808
|
+
``"ortho"``(normalize by :math:`1/\sqrt{n}`). Where :math `n = prod(s)` is the logical IFFT size.
|
|
12706
10809
|
Default: ``None`` that means ``"backward"``.
|
|
12707
10810
|
|
|
12708
10811
|
Returns:
|
|
@@ -12762,7 +10865,7 @@ def count_nonzero(x, axis=(), keep_dims=False, dtype=mstype.int32):
|
|
|
12762
10865
|
|
|
12763
10866
|
Args:
|
|
12764
10867
|
x (Tensor): Input data is used to count non-zero numbers. With shape
|
|
12765
|
-
:math:`(
|
|
10868
|
+
:math:`(*)` where :math:`*` means, any number of additional dimensions.
|
|
12766
10869
|
axis (Union[int, tuple(int), list(int)], optional): The dimensions to reduce.
|
|
12767
10870
|
Default: ``()`` , reduce all dimensions.
|
|
12768
10871
|
keep_dims (bool, optional): Whether to maintain dimensions specified by `axis`.
|
|
@@ -12821,7 +10924,7 @@ def count_nonzero(x, axis=(), keep_dims=False, dtype=mstype.int32):
|
|
|
12821
10924
|
reduce_sum = _get_cache_prim(P.ReduceSum)(keep_dims)
|
|
12822
10925
|
|
|
12823
10926
|
tensor_0 = ops.zeros(x.shape, x.dtype)
|
|
12824
|
-
nonzero_bool =
|
|
10927
|
+
nonzero_bool = not_equal(x, tensor_0)
|
|
12825
10928
|
# ReduceSum only support float16 or float32 tensor.
|
|
12826
10929
|
nonzero_val = cast_(nonzero_bool, mstype.float32)
|
|
12827
10930
|
nonzero_num = cast_(reduce_sum(nonzero_val, axis), dtype)
|
|
@@ -13048,7 +11151,8 @@ def vecdot(x, y, *, axis=-1):
|
|
|
13048
11151
|
Calculates the dot product of two batches of vectors across the specified dimension.
|
|
13049
11152
|
|
|
13050
11153
|
The formula of calculation is as follows.
|
|
13051
|
-
:math:`\bar{x_{i}}` represents the conjugate for complex vectors,
|
|
11154
|
+
:math:`\bar{x_{i}}` represents the conjugate for complex vectors,
|
|
11155
|
+
and :math:`\bar{x_{i}}` is the raw value for real vectors.
|
|
13052
11156
|
|
|
13053
11157
|
.. math::
|
|
13054
11158
|
|
|
@@ -13358,7 +11462,8 @@ def _get_output_shape(batch_size, x1_ret, x2_ret):
|
|
|
13358
11462
|
|
|
13359
11463
|
def batch_dot(x1, x2, axes=None):
|
|
13360
11464
|
"""
|
|
13361
|
-
Computation of batch dot product between samples in two tensors containing batch dims.
|
|
11465
|
+
Computation of batch dot product between samples in two tensors containing batch dims, i.e. `x1` or `x2` 's
|
|
11466
|
+
first dimension is batch size.
|
|
13362
11467
|
|
|
13363
11468
|
.. math::
|
|
13364
11469
|
output = x1[batch, :] * x2[batch, :]
|
|
@@ -13486,13 +11591,13 @@ __all__ = [
|
|
|
13486
11591
|
'addcdiv',
|
|
13487
11592
|
'addcmul',
|
|
13488
11593
|
'angle',
|
|
11594
|
+
'argmax',
|
|
13489
11595
|
'argmin',
|
|
13490
11596
|
'arccosh',
|
|
13491
11597
|
'arcsin',
|
|
13492
11598
|
'arctan',
|
|
13493
11599
|
'arctan2',
|
|
13494
11600
|
'bincount',
|
|
13495
|
-
'neg_tensor',
|
|
13496
11601
|
'neg',
|
|
13497
11602
|
'negative',
|
|
13498
11603
|
'tensor_lt',
|
|
@@ -13504,6 +11609,7 @@ __all__ = [
|
|
|
13504
11609
|
'le',
|
|
13505
11610
|
'lerp',
|
|
13506
11611
|
'norm',
|
|
11612
|
+
'norm_ext',
|
|
13507
11613
|
'vector_norm',
|
|
13508
11614
|
'matrix_norm',
|
|
13509
11615
|
'tensor_gt',
|
|
@@ -13573,6 +11679,7 @@ __all__ = [
|
|
|
13573
11679
|
'matrix_determinant',
|
|
13574
11680
|
'det',
|
|
13575
11681
|
'linspace',
|
|
11682
|
+
'linspace_ext',
|
|
13576
11683
|
'logspace',
|
|
13577
11684
|
'lu_solve',
|
|
13578
11685
|
'matrix_solve',
|
|
@@ -13660,6 +11767,7 @@ __all__ = [
|
|
|
13660
11767
|
'amin',
|
|
13661
11768
|
'amax',
|
|
13662
11769
|
'mean',
|
|
11770
|
+
'mean_ext',
|
|
13663
11771
|
'prod',
|
|
13664
11772
|
'all',
|
|
13665
11773
|
'any',
|
|
@@ -13750,6 +11858,6 @@ __all__ = [
|
|
|
13750
11858
|
'vecdot',
|
|
13751
11859
|
'dot',
|
|
13752
11860
|
'batch_dot',
|
|
13753
|
-
'eps'
|
|
11861
|
+
'eps',
|
|
13754
11862
|
]
|
|
13755
11863
|
__all__.sort()
|