mindspore 2.2.14__cp37-cp37m-manylinux1_x86_64.whl → 2.3.0rc2__cp37-cp37m-manylinux1_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +4 -4
- mindspore/_akg/akg/composite/build_module.py +155 -11
- mindspore/_akg/akg/config/repository.json +38 -0
- mindspore/_akg/akg/ms/info_version_adapt.py +29 -0
- mindspore/_akg/akg/tvm/contrib/nvcc.py +4 -1
- mindspore/_akg/akg/utils/ascend_profilier/path_manager.py +2 -1
- mindspore/_akg/akg/utils/composite_op_helper.py +4 -2
- mindspore/_akg/akg/utils/dump_ascend_meta.py +2 -2
- mindspore/_akg/akg/utils/gen_random.py +14 -8
- mindspore/_akg/akg/utils/op_dsl.py +11 -0
- mindspore/_akg/akg/utils/tbe_codegen_utils.py +18 -8
- mindspore/_c_dataengine.cpython-37m-x86_64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-37m-x86_64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-37m-x86_64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +78 -0
- mindspore/_extends/builtin_operations.py +2 -1
- mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
- mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
- mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
- mindspore/_extends/parse/__init__.py +18 -14
- mindspore/_extends/parse/compile_config.py +229 -0
- mindspore/_extends/parse/parser.py +155 -59
- mindspore/_extends/parse/resources.py +40 -7
- mindspore/_extends/parse/standard_method.py +127 -206
- mindspore/_extends/remote/kernel_build_server.py +2 -0
- mindspore/_mindspore_offline_debug.cpython-37m-x86_64-linux-gnu.so +0 -0
- mindspore/{ops/_op_impl/tbe/atomic_addr_clean.py → _profiler.py} +13 -16
- mindspore/amp.py +24 -18
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/boost_cell_wrapper.py +1 -1
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +7 -3
- mindspore/common/_jit_fallback_utils.py +2 -3
- mindspore/common/_register_for_adapter.py +7 -0
- mindspore/common/_register_for_recompute.py +48 -0
- mindspore/common/_stub_tensor.py +7 -1
- mindspore/common/_utils.py +5 -17
- mindspore/common/api.py +145 -50
- mindspore/common/auto_dynamic_shape.py +27 -14
- mindspore/common/dtype.py +9 -6
- mindspore/common/dump.py +5 -4
- mindspore/common/hook_handle.py +51 -4
- mindspore/common/initializer.py +1 -1
- mindspore/common/jit_config.py +33 -13
- mindspore/common/lazy_inline.py +58 -17
- mindspore/common/mindir_util.py +12 -2
- mindspore/common/mutable.py +79 -14
- mindspore/common/parameter.py +24 -4
- mindspore/common/recompute.py +247 -0
- mindspore/common/seed.py +9 -9
- mindspore/common/sparse_tensor.py +251 -18
- mindspore/common/symbol.py +122 -0
- mindspore/common/tensor.py +391 -465
- mindspore/communication/__init__.py +3 -3
- mindspore/communication/_comm_helper.py +5 -0
- mindspore/communication/management.py +53 -38
- mindspore/config/op_info.config +22 -54
- mindspore/context.py +176 -55
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +6 -6
- mindspore/dataset/audio/transforms.py +711 -158
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/engine/cache_client.py +2 -2
- mindspore/dataset/engine/datasets.py +72 -38
- mindspore/dataset/engine/datasets_audio.py +14 -14
- mindspore/dataset/engine/datasets_standard_format.py +33 -3
- mindspore/dataset/engine/datasets_text.py +38 -38
- mindspore/dataset/engine/datasets_user_defined.py +7 -7
- mindspore/dataset/engine/datasets_vision.py +75 -71
- mindspore/dataset/engine/offload.py +5 -7
- mindspore/dataset/text/__init__.py +3 -3
- mindspore/dataset/text/transforms.py +408 -121
- mindspore/dataset/text/utils.py +9 -9
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/transforms.py +261 -76
- mindspore/dataset/utils/browse_dataset.py +9 -9
- mindspore/dataset/vision/__init__.py +3 -3
- mindspore/dataset/vision/c_transforms.py +5 -5
- mindspore/dataset/vision/transforms.py +2264 -514
- mindspore/dataset/vision/utils.py +40 -9
- mindspore/dataset/vision/validators.py +7 -1
- mindspore/experimental/optim/__init__.py +12 -2
- mindspore/experimental/optim/adadelta.py +161 -0
- mindspore/experimental/optim/adagrad.py +168 -0
- mindspore/experimental/optim/adam.py +35 -34
- mindspore/experimental/optim/adamax.py +170 -0
- mindspore/experimental/optim/adamw.py +40 -16
- mindspore/experimental/optim/asgd.py +153 -0
- mindspore/experimental/optim/lr_scheduler.py +66 -121
- mindspore/experimental/optim/nadam.py +157 -0
- mindspore/experimental/optim/optimizer.py +15 -8
- mindspore/experimental/optim/radam.py +194 -0
- mindspore/experimental/optim/rmsprop.py +154 -0
- mindspore/experimental/optim/rprop.py +164 -0
- mindspore/experimental/optim/sgd.py +28 -19
- mindspore/hal/__init__.py +34 -0
- mindspore/hal/_ascend.py +57 -0
- mindspore/hal/_base.py +57 -0
- mindspore/hal/_cpu.py +56 -0
- mindspore/hal/_gpu.py +57 -0
- mindspore/hal/device.py +356 -0
- mindspore/hal/event.py +179 -0
- mindspore/hal/stream.py +339 -0
- mindspore/include/api/data_type.h +2 -2
- mindspore/include/api/dual_abi_helper.h +16 -3
- mindspore/include/api/model.h +1 -3
- mindspore/include/api/status.h +14 -0
- mindspore/include/c_api/model_c.h +173 -0
- mindspore/include/c_api/ms/base/types.h +1 -0
- mindspore/include/c_api/types_c.h +19 -0
- mindspore/include/dataset/execute.h +1 -3
- mindspore/include/mindapi/base/format.h +125 -23
- mindspore/include/mindapi/base/types.h +12 -0
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +2044 -154
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +2044 -33
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/build_tbe_kernel.py +529 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/compiler.py +56 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/custom.py +1109 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/get_file_path.py +36 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/tbe_topi.py +556 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6318 -1760
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_add_custom.h +49 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +59 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +59 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +52 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +232 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +232 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.cpp +192 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.cpp +274 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +39 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/x86_64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/cpu/libakg.so +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
- mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu10.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.6/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/{libmindspore_ascend.so.1 → libmindspore_ascend.so.2} +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
- mindspore/log.py +2 -2
- mindspore/mindrecord/__init__.py +5 -1
- mindspore/mindrecord/config.py +809 -0
- mindspore/mindrecord/filereader.py +25 -0
- mindspore/mindrecord/filewriter.py +74 -56
- mindspore/mindrecord/mindpage.py +40 -6
- mindspore/mindrecord/shardutils.py +3 -2
- mindspore/mindrecord/shardwriter.py +7 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +8 -13
- mindspore/mindrecord/tools/cifar10_to_mr.py +9 -15
- mindspore/mindrecord/tools/csv_to_mr.py +4 -9
- mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
- mindspore/mindrecord/tools/mnist_to_mr.py +7 -12
- mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -6
- mindspore/mint/__init__.py +457 -0
- mindspore/mint/nn/__init__.py +430 -0
- mindspore/mint/nn/functional.py +424 -0
- mindspore/mint/optim/__init__.py +24 -0
- mindspore/mint/optim/adamw.py +186 -0
- mindspore/multiprocessing/__init__.py +72 -0
- mindspore/nn/__init__.py +3 -0
- mindspore/nn/cell.py +131 -174
- mindspore/nn/dynamic_lr.py +2 -2
- mindspore/nn/extend/__init__.py +29 -0
- mindspore/nn/extend/basic.py +140 -0
- mindspore/nn/extend/embedding.py +143 -0
- mindspore/{rewrite/ast_creator_register.py → nn/extend/layer/__init__.py} +9 -19
- mindspore/nn/extend/layer/normalization.py +107 -0
- mindspore/nn/extend/pooling.py +117 -0
- mindspore/nn/generator.py +297 -0
- mindspore/nn/layer/activation.py +79 -90
- mindspore/nn/layer/basic.py +113 -81
- mindspore/nn/layer/channel_shuffle.py +3 -16
- mindspore/nn/layer/container.py +3 -3
- mindspore/nn/layer/conv.py +71 -71
- mindspore/nn/layer/embedding.py +105 -44
- mindspore/nn/layer/image.py +4 -7
- mindspore/nn/layer/normalization.py +52 -66
- mindspore/nn/layer/padding.py +30 -39
- mindspore/nn/layer/pooling.py +13 -9
- mindspore/nn/layer/rnn_cells.py +5 -15
- mindspore/nn/layer/rnns.py +6 -5
- mindspore/nn/layer/thor_layer.py +1 -2
- mindspore/nn/layer/timedistributed.py +1 -1
- mindspore/nn/layer/transformer.py +52 -50
- mindspore/nn/learning_rate_schedule.py +6 -5
- mindspore/nn/loss/loss.py +43 -64
- mindspore/nn/optim/ada_grad.py +4 -2
- mindspore/nn/optim/adadelta.py +3 -1
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +102 -181
- mindspore/nn/optim/adamax.py +4 -2
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +4 -2
- mindspore/nn/optim/ftrl.py +31 -61
- mindspore/nn/optim/lamb.py +5 -3
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +6 -4
- mindspore/nn/optim/momentum.py +13 -25
- mindspore/nn/optim/optimizer.py +6 -3
- mindspore/nn/optim/proximal_ada_grad.py +4 -2
- mindspore/nn/optim/rmsprop.py +9 -3
- mindspore/nn/optim/rprop.py +4 -2
- mindspore/nn/optim/sgd.py +6 -5
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
- mindspore/nn/probability/distribution/beta.py +2 -2
- mindspore/nn/probability/distribution/categorical.py +4 -6
- mindspore/nn/probability/distribution/cauchy.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/poisson.py +2 -2
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +13 -1
- mindspore/nn/wrap/__init__.py +2 -1
- mindspore/nn/wrap/cell_wrapper.py +33 -12
- mindspore/nn/wrap/grad_reducer.py +148 -8
- mindspore/nn/wrap/loss_scale.py +7 -7
- mindspore/numpy/__init__.py +2 -0
- mindspore/numpy/array_creations.py +2 -0
- mindspore/numpy/array_ops.py +1 -5
- mindspore/numpy/fft.py +431 -0
- mindspore/numpy/math_ops.py +54 -60
- mindspore/numpy/utils.py +3 -0
- mindspore/ops/__init__.py +5 -4
- mindspore/ops/_grad_experimental/grad_array_ops.py +4 -129
- mindspore/ops/_grad_experimental/grad_comm_ops.py +14 -18
- mindspore/ops/_grad_experimental/grad_math_ops.py +68 -283
- mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
- mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
- mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
- mindspore/ops/_op_impl/__init__.py +0 -1
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
- mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +1 -1
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
- mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -3
- mindspore/ops/_op_impl/cpu/adam.py +2 -2
- mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
- mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
- mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
- mindspore/ops/_vmap/vmap_array_ops.py +137 -101
- mindspore/ops/_vmap/vmap_base.py +8 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +143 -58
- mindspore/ops/_vmap/vmap_image_ops.py +70 -13
- mindspore/ops/_vmap/vmap_math_ops.py +101 -57
- mindspore/ops/_vmap/vmap_nn_ops.py +230 -97
- mindspore/ops/_vmap/vmap_other_ops.py +1 -1
- mindspore/ops/auto_generate/__init__.py +31 -0
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +205 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +257 -0
- mindspore/ops/auto_generate/gen_arg_handler.py +171 -0
- mindspore/ops/auto_generate/gen_extend_func.py +404 -0
- mindspore/ops/auto_generate/gen_ops_def.py +5653 -0
- mindspore/ops/auto_generate/gen_ops_prim.py +11623 -0
- mindspore/ops/auto_generate/pyboost_inner_prim.py +359 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +118 -17
- mindspore/ops/composite/math_ops.py +9 -48
- mindspore/ops/composite/multitype_ops/_compile_utils.py +168 -602
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +24 -133
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
- mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
- mindspore/ops/deprecated.py +14 -3
- mindspore/ops/extend/__init__.py +54 -0
- mindspore/ops/extend/array_func.py +259 -0
- mindspore/ops/extend/math_func.py +76 -0
- mindspore/ops/extend/nn_func.py +384 -0
- mindspore/ops/function/__init__.py +37 -12
- mindspore/ops/function/array_func.py +702 -1867
- mindspore/ops/function/clip_func.py +19 -31
- mindspore/ops/function/debug_func.py +1 -4
- mindspore/ops/function/fft_func.py +31 -0
- mindspore/ops/function/grad/grad_func.py +24 -17
- mindspore/ops/function/image_func.py +27 -21
- mindspore/ops/function/linalg_func.py +35 -68
- mindspore/ops/function/math_func.py +639 -2531
- mindspore/ops/function/nn_func.py +1274 -832
- mindspore/ops/function/other_func.py +4 -5
- mindspore/ops/function/parameter_func.py +5 -93
- mindspore/ops/function/random_func.py +84 -71
- mindspore/ops/function/sparse_unary_func.py +9 -16
- mindspore/ops/function/spectral_func.py +1 -1
- mindspore/ops/function/vmap_func.py +14 -14
- mindspore/ops/functional.py +57 -63
- mindspore/ops/op_info_register.py +16 -43
- mindspore/ops/operations/__init__.py +19 -20
- mindspore/ops/operations/_grad_ops.py +20 -828
- mindspore/ops/operations/_inner_ops.py +180 -288
- mindspore/ops/operations/_scalar_ops.py +5 -480
- mindspore/ops/operations/_sequence_ops.py +6 -36
- mindspore/ops/operations/array_ops.py +83 -2697
- mindspore/ops/operations/comm_ops.py +38 -46
- mindspore/ops/operations/custom_ops.py +14 -96
- mindspore/ops/operations/debug_ops.py +100 -31
- mindspore/ops/operations/image_ops.py +1 -217
- mindspore/ops/operations/inner_ops.py +3 -38
- mindspore/ops/operations/linalg_ops.py +1 -49
- mindspore/{rewrite/ast_transformers → ops/operations/manually_defined}/__init__.py +11 -4
- mindspore/ops/operations/manually_defined/_inner.py +61 -0
- mindspore/ops/operations/manually_defined/ops_def.py +1716 -0
- mindspore/ops/operations/math_ops.py +581 -4629
- mindspore/ops/operations/nn_ops.py +260 -1941
- mindspore/ops/operations/other_ops.py +50 -42
- mindspore/ops/operations/random_ops.py +3 -52
- mindspore/ops/operations/sparse_ops.py +3 -3
- mindspore/ops/primitive.py +196 -96
- mindspore/ops_generate/__init__.py +27 -0
- mindspore/ops_generate/arg_dtype_cast.py +257 -0
- mindspore/ops_generate/arg_handler.py +171 -0
- mindspore/ops_generate/gen_aclnn_implement.py +266 -0
- mindspore/ops_generate/gen_ops.py +1062 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
- mindspore/ops_generate/gen_pyboost_func.py +939 -0
- mindspore/ops_generate/gen_utils.py +188 -0
- mindspore/ops_generate/op_proto.py +138 -0
- mindspore/ops_generate/pyboost_utils.py +349 -0
- mindspore/ops_generate/template.py +238 -0
- mindspore/parallel/__init__.py +6 -4
- mindspore/parallel/_auto_parallel_context.py +52 -2
- mindspore/parallel/_cell_wrapper.py +16 -9
- mindspore/parallel/_cost_model_context.py +1 -1
- mindspore/parallel/_dp_allreduce_fusion.py +159 -159
- mindspore/parallel/_parallel_serialization.py +29 -13
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +19 -7
- mindspore/parallel/_transformer/__init__.py +1 -1
- mindspore/parallel/_transformer/layers.py +1 -1
- mindspore/parallel/_transformer/loss.py +1 -1
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/op_parallel_config.py +1 -1
- mindspore/parallel/_transformer/transformer.py +1 -1
- mindspore/parallel/_utils.py +147 -6
- mindspore/parallel/algo_parameter_config.py +6 -6
- mindspore/parallel/checkpoint_transform.py +180 -24
- mindspore/parallel/cluster/__init__.py +15 -0
- mindspore/parallel/cluster/process_entity/__init__.py +18 -0
- mindspore/parallel/cluster/process_entity/_api.py +345 -0
- mindspore/parallel/cluster/process_entity/_utils.py +116 -0
- mindspore/parallel/cluster/run.py +139 -0
- mindspore/parallel/mpi/__init__.py +1 -1
- mindspore/parallel/mpi/_mpi_config.py +1 -1
- mindspore/parallel/parameter_broadcast.py +152 -0
- mindspore/parallel/shard.py +99 -2
- mindspore/profiler/common/util.py +20 -0
- mindspore/profiler/envprofiling.py +1 -1
- mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +66 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +77 -0
- mindspore/profiler/parser/ascend_analysis/function_event.py +146 -0
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +109 -0
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +80 -0
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +52 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +116 -0
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +59 -0
- mindspore/profiler/parser/ascend_cluster_generator.py +14 -9
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -1
- mindspore/profiler/parser/ascend_flops_generator.py +20 -4
- mindspore/profiler/parser/ascend_hccl_generator.py +25 -277
- mindspore/profiler/parser/ascend_msprof_exporter.py +112 -132
- mindspore/profiler/parser/ascend_msprof_generator.py +73 -283
- mindspore/profiler/parser/ascend_op_generator.py +92 -42
- mindspore/profiler/parser/ascend_timeline_generator.py +294 -133
- mindspore/profiler/parser/base_timeline_generator.py +6 -0
- mindspore/profiler/parser/framework_parser.py +3 -2
- mindspore/profiler/parser/integrator.py +3 -1
- mindspore/profiler/parser/msadvisor_analyzer.py +1 -1
- mindspore/profiler/parser/msadvisor_parser.py +1 -1
- mindspore/profiler/parser/profiler_info.py +16 -1
- mindspore/profiler/profiling.py +305 -167
- mindspore/rewrite/__init__.py +2 -13
- mindspore/rewrite/api/node.py +121 -35
- mindspore/rewrite/api/pattern_engine.py +2 -3
- mindspore/rewrite/api/scoped_value.py +16 -15
- mindspore/rewrite/api/symbol_tree.py +45 -29
- mindspore/rewrite/ast_helpers/__init__.py +3 -6
- mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
- mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
- mindspore/rewrite/common/__init__.py +1 -2
- mindspore/rewrite/common/config.py +24 -0
- mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
- mindspore/rewrite/{namer.py → common/namer.py} +63 -18
- mindspore/rewrite/common/namespace.py +118 -0
- mindspore/rewrite/node/__init__.py +5 -5
- mindspore/rewrite/node/call_function.py +23 -7
- mindspore/rewrite/node/cell_container.py +7 -3
- mindspore/rewrite/node/control_flow.py +53 -28
- mindspore/rewrite/node/node.py +212 -196
- mindspore/rewrite/node/node_manager.py +51 -22
- mindspore/rewrite/node/node_topological_manager.py +3 -23
- mindspore/rewrite/parsers/__init__.py +12 -0
- mindspore/rewrite/parsers/arguments_parser.py +8 -9
- mindspore/rewrite/parsers/assign_parser.py +635 -413
- mindspore/rewrite/parsers/attribute_parser.py +3 -4
- mindspore/rewrite/parsers/class_def_parser.py +107 -144
- mindspore/rewrite/parsers/constant_parser.py +5 -5
- mindspore/rewrite/parsers/container_parser.py +4 -6
- mindspore/rewrite/parsers/expr_parser.py +55 -0
- mindspore/rewrite/parsers/for_parser.py +31 -98
- mindspore/rewrite/parsers/function_def_parser.py +13 -5
- mindspore/rewrite/parsers/if_parser.py +28 -10
- mindspore/rewrite/parsers/module_parser.py +8 -182
- mindspore/rewrite/parsers/parser.py +1 -5
- mindspore/rewrite/parsers/parser_register.py +1 -1
- mindspore/rewrite/parsers/return_parser.py +5 -10
- mindspore/rewrite/parsers/while_parser.py +59 -0
- mindspore/rewrite/sparsify/utils.py +1 -1
- mindspore/rewrite/symbol_tree/__init__.py +20 -0
- mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
- mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
- mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
- mindspore/run_check/_check_version.py +6 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/safeguard/rewrite_obfuscation.py +9 -19
- mindspore/scipy/__init__.py +2 -1
- mindspore/scipy/fft.py +133 -0
- mindspore/scipy/linalg.py +140 -55
- mindspore/scipy/ops.py +15 -71
- mindspore/scipy/ops_grad.py +5 -34
- mindspore/scipy/optimize/line_search.py +2 -2
- mindspore/scipy/optimize/minimize.py +1 -1
- mindspore/train/__init__.py +3 -2
- mindspore/train/_utils.py +178 -4
- mindspore/train/amp.py +167 -245
- mindspore/train/anf_ir_pb2.py +8 -2
- mindspore/train/callback/_backup_and_restore.py +4 -4
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +39 -13
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_landscape.py +14 -8
- mindspore/train/callback/_loss_monitor.py +2 -2
- mindspore/train/callback/_on_request_exit.py +2 -2
- mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
- mindspore/train/callback/_summary_collector.py +7 -7
- mindspore/train/callback/_time_monitor.py +2 -2
- mindspore/train/data_sink.py +1 -1
- mindspore/train/dataset_helper.py +18 -4
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/accuracy.py +7 -7
- mindspore/train/metrics/confusion_matrix.py +8 -6
- mindspore/train/metrics/cosine_similarity.py +6 -4
- mindspore/train/metrics/error.py +2 -2
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/metrics/perplexity.py +2 -1
- mindspore/train/metrics/topk.py +2 -2
- mindspore/train/mind_ir_pb2.py +89 -15
- mindspore/train/model.py +24 -22
- mindspore/train/serialization.py +257 -133
- mindspore/train/summary/summary_record.py +51 -28
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/version.py +1 -1
- {mindspore-2.2.14.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +2 -2
- {mindspore-2.2.14.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +534 -1066
- {mindspore-2.2.14.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +1 -0
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
- mindspore/config/super_bar_config.json +0 -544
- mindspore/gen_ops.py +0 -273
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/nn/layer/flash_attention.py +0 -189
- mindspore/ops/_op_impl/cpu/concat.py +0 -39
- mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
- mindspore/ops/_op_impl/tbe/__init__.py +0 -47
- mindspore/ops/_op_impl/tbe/abs.py +0 -38
- mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/acos.py +0 -37
- mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/acosh.py +0 -37
- mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
- mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
- mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
- mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
- mindspore/ops/_op_impl/tbe/add.py +0 -42
- mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/add_n.py +0 -39
- mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
- mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
- mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
- mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
- mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
- mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
- mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
- mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/asin.py +0 -37
- mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/asinh.py +0 -37
- mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/assign.py +0 -79
- mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
- mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
- mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/atan.py +0 -37
- mindspore/ops/_op_impl/tbe/atan2.py +0 -38
- mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/atanh.py +0 -37
- mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
- mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
- mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
- mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
- mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
- mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
- mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
- mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
- mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
- mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
- mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cast.py +0 -55
- mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/cdist.py +0 -38
- mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/ceil.py +0 -37
- mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/celu.py +0 -39
- mindspore/ops/_op_impl/tbe/centralization.py +0 -39
- mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
- mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/concat.py +0 -40
- mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
- mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
- mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
- mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
- mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
- mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/cos.py +0 -37
- mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/cosh.py +0 -37
- mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
- mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cummin.py +0 -41
- mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
- mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
- mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
- mindspore/ops/_op_impl/tbe/diag.py +0 -38
- mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
- mindspore/ops/_op_impl/tbe/dilation.py +0 -40
- mindspore/ops/_op_impl/tbe/div.py +0 -41
- mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
- mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
- mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
- mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
- mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
- mindspore/ops/_op_impl/tbe/elu.py +0 -38
- mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/equal.py +0 -42
- mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/erf.py +0 -37
- mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfc.py +0 -37
- mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
- mindspore/ops/_op_impl/tbe/exp.py +0 -40
- mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
- mindspore/ops/_op_impl/tbe/expm1.py +0 -37
- mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
- mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/fill.py +0 -56
- mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/flatten.py +0 -48
- mindspore/ops/_op_impl/tbe/floor.py +0 -37
- mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
- mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
- mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
- mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
- mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
- mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
- mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
- mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/ger.py +0 -43
- mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/greater.py +0 -43
- mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
- mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
- mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
- mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
- mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
- mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
- mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
- mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/im2col.py +0 -42
- mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
- mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
- mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/inv.py +0 -38
- mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/invert.py +0 -37
- mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/iou.py +0 -38
- mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/is_close.py +0 -40
- mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
- mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
- mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
- mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
- mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
- mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
- mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/lerp.py +0 -38
- mindspore/ops/_op_impl/tbe/less.py +0 -41
- mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/log.py +0 -40
- mindspore/ops/_op_impl/tbe/log1p.py +0 -37
- mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
- mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
- mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
- mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
- mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/matmul.py +0 -53
- mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
- mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
- mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
- mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum.py +0 -39
- mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
- mindspore/ops/_op_impl/tbe/minimum.py +0 -40
- mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mish.py +0 -37
- mindspore/ops/_op_impl/tbe/mod.py +0 -41
- mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/mul.py +0 -37
- mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
- mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
- mindspore/ops/_op_impl/tbe/neg.py +0 -39
- mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
- mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
- mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
- mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
- mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
- mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/pack.py +0 -58
- mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
- mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
- mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/pdist.py +0 -36
- mindspore/ops/_op_impl/tbe/pooling.py +0 -46
- mindspore/ops/_op_impl/tbe/population_count.py +0 -38
- mindspore/ops/_op_impl/tbe/pow.py +0 -41
- mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/prelu.py +0 -37
- mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/range.py +0 -39
- mindspore/ops/_op_impl/tbe/real_div.py +0 -38
- mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
- mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
- mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
- mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
- mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6.py +0 -38
- mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/renorm.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
- mindspore/ops/_op_impl/tbe/rint.py +0 -37
- mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roll.py +0 -42
- mindspore/ops/_op_impl/tbe/round.py +0 -38
- mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
- mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
- mindspore/ops/_op_impl/tbe/select.py +0 -38
- mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/selu.py +0 -39
- mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sgd.py +0 -62
- mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sign.py +0 -38
- mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/sin.py +0 -37
- mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sinh.py +0 -37
- mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/slice.py +0 -58
- mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
- mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax.py +0 -37
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
- mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/softplus.py +0 -37
- mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softsign.py +0 -37
- mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sort.py +0 -38
- mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/split_d.py +0 -38
- mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/split_v.py +0 -39
- mindspore/ops/_op_impl/tbe/splitv.py +0 -39
- mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/square.py +0 -38
- mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
- mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
- mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
- mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
- mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
- mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
- mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
- mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
- mindspore/ops/_op_impl/tbe/sub.py +0 -39
- mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tan.py +0 -38
- mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh.py +0 -37
- mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
- mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
- mindspore/ops/_op_impl/tbe/tile.py +0 -37
- mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
- mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
- mindspore/ops/_op_impl/tbe/transpose.py +0 -60
- mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
- mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
- mindspore/ops/_op_impl/tbe/trunc.py +0 -39
- mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/unpack.py +0 -38
- mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
- mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
- mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
- mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
- mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
- mindspore/ops/_tracefunc.py +0 -241
- mindspore/ops/arg_dtype_cast.py +0 -54
- mindspore/rewrite/api/tree_node_helper.py +0 -60
- mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
- mindspore/rewrite/namespace.py +0 -53
- {mindspore-2.2.14.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
- {mindspore-2.2.14.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2022 Huawei Technologies Co., Ltd
|
|
1
|
+
# Copyright 2022-2023 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -31,18 +31,18 @@ from mindspore.ops.operations._inner_ops import DynamicBroadcastTo
|
|
|
31
31
|
from mindspore.ops.operations._sequence_ops import TupleToTensor
|
|
32
32
|
from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
|
|
33
33
|
from mindspore.ops.operations._sequence_ops import TensorToList
|
|
34
|
+
from mindspore.ops.auto_generate import OnesLikeExt, ZerosLikeExt, FillScalar, FillTensor, Arange, Chunk
|
|
35
|
+
from mindspore.ops.auto_generate.gen_ops_prim import SplitTensor
|
|
36
|
+
from mindspore.ops.auto_generate.gen_ops_prim import SplitWithSize, RepeatInterleave
|
|
34
37
|
|
|
35
38
|
from mindspore.ops.operations.array_ops import (
|
|
36
39
|
UniqueConsecutive,
|
|
37
40
|
SearchSorted,
|
|
38
|
-
NonZero,
|
|
39
41
|
MatrixDiagV3,
|
|
40
42
|
MatrixDiagPartV3,
|
|
41
43
|
MatrixSetDiagV3,
|
|
42
44
|
Fills,
|
|
43
45
|
Col2Im,
|
|
44
|
-
ArgMaxWithValue,
|
|
45
|
-
ArgMinWithValue,
|
|
46
46
|
ScatterNdMax,
|
|
47
47
|
ScatterNdMul,
|
|
48
48
|
IndexFill,
|
|
@@ -51,8 +51,8 @@ from mindspore.ops.operations.array_ops import (
|
|
|
51
51
|
Expand,
|
|
52
52
|
Lstsq,
|
|
53
53
|
Mvlgamma,
|
|
54
|
-
|
|
55
|
-
|
|
54
|
+
ArgMaxWithValue,
|
|
55
|
+
ArgMinWithValue
|
|
56
56
|
)
|
|
57
57
|
from mindspore.ops.operations.array_ops import TensorScatterElements
|
|
58
58
|
from mindspore.common import Tensor
|
|
@@ -61,53 +61,76 @@ from mindspore import _checkparam as validator
|
|
|
61
61
|
from mindspore._c_expression import Tensor as Tensor_
|
|
62
62
|
from mindspore.ops._utils.utils import ms_arrange
|
|
63
63
|
|
|
64
|
-
|
|
64
|
+
from mindspore.ops.auto_generate import cat, range, scatter_nd, deepcopy, masked_fill, diagonal, expand_dims, \
|
|
65
|
+
nonzero, flip, transpose, tril, triu, unsorted_segment_sum, diag, gather, gather_d, gather_nd, reshape, \
|
|
66
|
+
broadcast_to, strided_slice, ones, zeros, max_, min_, select
|
|
67
|
+
from mindspore.ops.auto_generate.gen_ops_prim import scatter_add_ext_op
|
|
68
|
+
from mindspore.ops.operations.manually_defined import tile, rank, scalar_cast
|
|
69
|
+
|
|
70
|
+
arg_max_with_value_ = ArgMaxWithValue()
|
|
71
|
+
batch_to_space_nd_v2_ = P.BatchToSpaceNDV2()
|
|
72
|
+
cast_ = P.Cast()
|
|
73
|
+
diag_ = P.Diag()
|
|
74
|
+
dynamic_broadcast_to_ = DynamicBroadcastTo()
|
|
65
75
|
eye_ = P.Eye()
|
|
66
76
|
fills_ = Fills()
|
|
77
|
+
fillv2_ = P.FillV2()
|
|
78
|
+
flatten_ = P.Flatten()
|
|
79
|
+
gather_ = P.Gather()
|
|
80
|
+
gather_d_ = P.GatherD()
|
|
81
|
+
gather_nd_ = P.GatherNd()
|
|
82
|
+
ger_ = P.Ger()
|
|
83
|
+
index_fill_ = IndexFill()
|
|
84
|
+
lstsq_ = Lstsq()
|
|
85
|
+
masked_select_ = P.MaskedSelect()
|
|
86
|
+
matrix_band_part_ = P.array_ops.MatrixBandPart()
|
|
67
87
|
ones_ = P.Ones()
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
unique_with_pad_ = P.UniqueWithPad()
|
|
71
|
-
size_ = P.Size()
|
|
72
|
-
shape_ = P.Shape()
|
|
88
|
+
population_count_ = P.PopulationCount()
|
|
89
|
+
range_ = P.Range()
|
|
73
90
|
rank_ = P.Rank()
|
|
74
|
-
|
|
91
|
+
reduce_max_ = P.ReduceMax()
|
|
92
|
+
reduce_min_ = P.ReduceMin()
|
|
75
93
|
reshape_ = P.Reshape()
|
|
76
|
-
|
|
77
|
-
expand_dims_ = P.ExpandDims()
|
|
78
|
-
transpose_ = P.Transpose()
|
|
94
|
+
scalar_to_tensor_ = P.ScalarToTensor()
|
|
79
95
|
scatter_add_ = P.ScatterAdd()
|
|
96
|
+
scatter_div_ = P.ScatterDiv()
|
|
80
97
|
scatter_max_ = P.ScatterMax()
|
|
81
98
|
scatter_min_ = P.ScatterMin()
|
|
82
99
|
scatter_mul_ = P.ScatterMul()
|
|
83
|
-
scatter_div_ = P.ScatterDiv()
|
|
84
100
|
scatter_nd_ = P.ScatterNd()
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
101
|
+
scatter_update_ = P.ScatterUpdate()
|
|
102
|
+
shape_ = P.Shape()
|
|
103
|
+
split_tensor = SplitTensor()
|
|
104
|
+
split_with_size = SplitWithSize()
|
|
105
|
+
size_ = P.Size()
|
|
90
106
|
tensor_scatter_add_ = P.TensorScatterAdd()
|
|
91
|
-
tensor_scatter_sub_ = P.TensorScatterSub()
|
|
92
|
-
tensor_scatter_mul_ = P.TensorScatterMul()
|
|
93
107
|
tensor_scatter_div_ = P.TensorScatterDiv()
|
|
94
|
-
tensor_scatter_min_ = P.TensorScatterMin()
|
|
95
108
|
tensor_scatter_max_ = P.TensorScatterMax()
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
matrix_band_part_ = P.array_ops.MatrixBandPart()
|
|
100
|
-
ger_ = P.Ger()
|
|
101
|
-
diag_ = P.Diag()
|
|
102
|
-
range_ = P.Range()
|
|
103
|
-
zeros_like_ = P.ZerosLike()
|
|
104
|
-
cast_ = P.Cast()
|
|
109
|
+
tensor_scatter_min_ = P.TensorScatterMin()
|
|
110
|
+
tensor_scatter_mul_ = P.TensorScatterMul()
|
|
111
|
+
tensor_scatter_sub_ = P.TensorScatterSub()
|
|
105
112
|
tensor_select_ = P.Select()
|
|
106
|
-
|
|
113
|
+
tensor_shape_ = P.TensorShape()
|
|
114
|
+
tensor_slice = P.Slice()
|
|
115
|
+
tile_ = P.Tile()
|
|
116
|
+
transpose_ = P.Transpose()
|
|
117
|
+
tuple_to_array_ = P.TupleToArray()
|
|
118
|
+
tuple_to_tensor_ = TupleToTensor()
|
|
119
|
+
unique_ = P.Unique()
|
|
120
|
+
unique_with_pad_ = P.UniqueWithPad()
|
|
121
|
+
unsorted_segment_max_ = P.UnsortedSegmentMax()
|
|
122
|
+
unsorted_segment_min_ = P.UnsortedSegmentMin()
|
|
123
|
+
unsorted_segment_prod_ = P.UnsortedSegmentProd()
|
|
107
124
|
unsorted_segment_sum_ = P.UnsortedSegmentSum()
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
125
|
+
ones_like_ = P.OnesLike()
|
|
126
|
+
zeros_like_ = P.ZerosLike()
|
|
127
|
+
ones_like_ext_ = OnesLikeExt()
|
|
128
|
+
zeros_like_ext_ = ZerosLikeExt()
|
|
129
|
+
fill_scalar_ = FillScalar()
|
|
130
|
+
fill_tensor_ = FillTensor()
|
|
131
|
+
arange_ = Arange()
|
|
132
|
+
chunk_ = Chunk()
|
|
133
|
+
repeat_interleave_ = RepeatInterleave()
|
|
111
134
|
|
|
112
135
|
|
|
113
136
|
@_primexpr
|
|
@@ -187,8 +210,11 @@ def arange(start=0, end=None, step=1, *, dtype=None):
|
|
|
187
210
|
|
|
188
211
|
Keyword Args:
|
|
189
212
|
dtype (mindspore.dtype, optional): The required data type of returned Tensor. Default: ``None`` .
|
|
190
|
-
|
|
191
|
-
|
|
213
|
+
When `dtype` is not specified or ``None``:
|
|
214
|
+
|
|
215
|
+
If `start`, `end`, and `step` are all integers, the dtype of output is int64,
|
|
216
|
+
|
|
217
|
+
If `start`, `end`, and `step` contain at least one floating-point number, the dtype of output is float32.
|
|
192
218
|
|
|
193
219
|
Returns:
|
|
194
220
|
A 1-D Tensor, with the same type as the inputs.
|
|
@@ -225,7 +251,7 @@ def arange(start=0, end=None, step=1, *, dtype=None):
|
|
|
225
251
|
>>> print(output)
|
|
226
252
|
[12. 11. 10. 9. 8. 7. 6. 5. 4. 3.]
|
|
227
253
|
>>> print(output.dtype)
|
|
228
|
-
|
|
254
|
+
Float32
|
|
229
255
|
"""
|
|
230
256
|
if end is None:
|
|
231
257
|
start, end = 0, start
|
|
@@ -237,67 +263,92 @@ def arange(start=0, end=None, step=1, *, dtype=None):
|
|
|
237
263
|
if start.shape != () or end.shape != () or step.shape != ():
|
|
238
264
|
raise ValueError(f"For arange, the input args must be a TensorScalar,"
|
|
239
265
|
f" but got start shape:{start.shape}, end shape:{end.shape}, step shape:{step.shape}")
|
|
240
|
-
|
|
241
|
-
data = range_op(start, end, step)
|
|
266
|
+
data = range_(start, end, step)
|
|
242
267
|
if dtype is not None:
|
|
243
268
|
data = cast_(data, dtype)
|
|
244
269
|
return data
|
|
245
270
|
|
|
246
271
|
|
|
247
|
-
def
|
|
272
|
+
def arange_ext(start=0, end=None, step=1, *, dtype=None):
|
|
248
273
|
r"""
|
|
249
|
-
|
|
274
|
+
Creates a sequence of numbers that begins at `start` and extends by increments of
|
|
275
|
+
`step` up to but not including `end`.
|
|
250
276
|
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
277
|
+
Args:
|
|
278
|
+
start (Union[float, int, Tensor], optional): The start of the interval.
|
|
279
|
+
If Tensor, the shape must be :math:`()` . Default: ``0`` .
|
|
280
|
+
end (Union[float, int, Tensor], optional): The end of the interval, exclusive.
|
|
281
|
+
If Tensor, the shape must be :math:`()`.
|
|
282
|
+
Default: ``None`` . If ``None`` , it defaults to the value of `start`, and 0 is used as the starting value.
|
|
283
|
+
step (Union[float, int, Tensor], optional): Number that increments `start`.
|
|
284
|
+
If Tensor, the shape must be :math:`()`. Default: ``1`` .
|
|
255
285
|
|
|
256
|
-
|
|
286
|
+
Keyword Args:
|
|
287
|
+
dtype (mindspore.dtype, optional): The required data type of returned Tensor. Default: ``None`` .
|
|
288
|
+
When `dtype` is not specified or ``None``:
|
|
257
289
|
|
|
258
|
-
|
|
290
|
+
If `start`, `end`, and `step` are all integers, the dtype of output is int64,
|
|
259
291
|
|
|
260
|
-
|
|
261
|
-
tensors (Union[tuple, list]): A tuple or a list of input tensors.
|
|
262
|
-
Suppose there are two tensors in this tuple or list, namely t1 and t2.
|
|
263
|
-
To perform `concat` in the axis 0 direction, except for the :math:`0`-th axis,
|
|
264
|
-
all other dimensions should be equal, that is,
|
|
265
|
-
:math:`t1.shape[1] = t2.shape[1], t1.shape[2] = t2.shape[2], ..., t1.shape[R-1] = t2.shape[R-1]`,
|
|
266
|
-
where :math:`R` represents the rank of tensor.
|
|
267
|
-
axis (int): The specified axis, whose value is in range :math:`[-R, R)`. Default: ``0`` .
|
|
292
|
+
If `start`, `end`, and `step` contain at least one floating-point number, the dtype of output is float32.
|
|
268
293
|
|
|
269
294
|
Returns:
|
|
270
|
-
Tensor, the
|
|
271
|
-
The data type is the same with `tensors`.
|
|
295
|
+
A 1-D Tensor, with the same type as the inputs.
|
|
272
296
|
|
|
273
297
|
Raises:
|
|
274
|
-
TypeError: If `
|
|
275
|
-
|
|
276
|
-
ValueError: If `
|
|
277
|
-
|
|
298
|
+
TypeError: If `start`, `end` or `step` is not an int or a float or a TensorScalar(Special Tensor with shape ())
|
|
299
|
+
in valid dtypes.
|
|
300
|
+
ValueError: If `step` = 0.
|
|
301
|
+
ValueError: If `start` >= `end` when `step` > 0.
|
|
302
|
+
ValueError: If `start` <= `end` when `step` < 0.
|
|
278
303
|
|
|
279
304
|
Supported Platforms:
|
|
280
|
-
``Ascend``
|
|
305
|
+
``Ascend``
|
|
281
306
|
|
|
282
307
|
Examples:
|
|
283
|
-
>>> import mindspore
|
|
284
|
-
>>> import
|
|
285
|
-
>>>
|
|
286
|
-
>>> input_x1 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
|
|
287
|
-
>>> input_x2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
|
|
288
|
-
>>> output = ops.cat((input_x1, input_x2))
|
|
308
|
+
>>> import mindspore as ms
|
|
309
|
+
>>> from mindspore import Tensor, mint
|
|
310
|
+
>>> output = mint.arange(1, 6)
|
|
289
311
|
>>> print(output)
|
|
290
|
-
[
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
>>> output
|
|
312
|
+
[1 2 3 4 5]
|
|
313
|
+
>>> print(output.dtype)
|
|
314
|
+
Int64
|
|
315
|
+
>>> output = mint.arange(0, 3, 1.2)
|
|
316
|
+
>>> print(output)
|
|
317
|
+
[0. 1.2 2.4]
|
|
318
|
+
>>> print(output.dtype)
|
|
319
|
+
Float32
|
|
320
|
+
>>> output = mint.arange(7, 1, -2)
|
|
295
321
|
>>> print(output)
|
|
296
|
-
[
|
|
297
|
-
|
|
322
|
+
[7 5 3]
|
|
323
|
+
>>> print(output.dtype)
|
|
324
|
+
Int64
|
|
325
|
+
>>> output = mint.arange(ms.Tensor(12.0, dtype=ms.float64), 2, ms.Tensor(-1.0, dtype=ms.float32))
|
|
326
|
+
>>> print(output)
|
|
327
|
+
[12. 11. 10. 9. 8. 7. 6. 5. 4. 3.]
|
|
328
|
+
>>> print(output.dtype)
|
|
329
|
+
Float32
|
|
298
330
|
"""
|
|
299
|
-
|
|
300
|
-
|
|
331
|
+
if end is None:
|
|
332
|
+
start, end = 0, start
|
|
333
|
+
|
|
334
|
+
out = arange_(start, end, step)
|
|
335
|
+
if dtype is not None:
|
|
336
|
+
out = cast_(out, dtype)
|
|
337
|
+
return out
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
def concat(tensors, axis=0):
|
|
341
|
+
"""
|
|
342
|
+
Alias for :func:`mindspore.ops.cat()`.
|
|
343
|
+
|
|
344
|
+
Tutorial Examples:
|
|
345
|
+
- `Tensor - Tensor Operation <https://mindspore.cn/tutorials/en/master/beginner/tensor.html#tensor-operation>`_
|
|
346
|
+
- `Vision Transformer Image Classification - Building ViT as a whole
|
|
347
|
+
<https://mindspore.cn/tutorials/application/en/master/cv/vit.html#building-vit-as-a-whole>`_
|
|
348
|
+
- `Sentiment Classification Implemented by RNN - Dense
|
|
349
|
+
<https://mindspore.cn/tutorials/application/en/master/nlp/sentiment_analysis.html#dense>`_
|
|
350
|
+
"""
|
|
351
|
+
return cat(tensors, axis)
|
|
301
352
|
|
|
302
353
|
|
|
303
354
|
def eye(n, m=None, dtype=None):
|
|
@@ -305,14 +356,14 @@ def eye(n, m=None, dtype=None):
|
|
|
305
356
|
Creates a tensor with ones on the diagonal and zeros in the rest.
|
|
306
357
|
|
|
307
358
|
Note:
|
|
308
|
-
|
|
309
|
-
|
|
359
|
+
The data type of returned tensor can be float16, float32, int8, int16, int32, int64, uint8
|
|
360
|
+
or bool on Ascend platforms.
|
|
310
361
|
|
|
311
362
|
Args:
|
|
312
363
|
n (int): The number of rows of returned tensor. Constant value only.
|
|
313
|
-
m (int): The number of columns of returned tensor. Constant value only.
|
|
364
|
+
m (int, optional): The number of columns of returned tensor. Constant value only.
|
|
314
365
|
Default: ``None`` , if ``None`` , the number of columns is as the same as n.
|
|
315
|
-
dtype (mindspore.dtype): MindSpore's dtype, the data type of the returned tensor.
|
|
366
|
+
dtype (mindspore.dtype, optional): MindSpore's dtype, the data type of the returned tensor.
|
|
316
367
|
The data type can be bool or Number.
|
|
317
368
|
Default: ``None`` , the data type of the returned tensor is mindspore.float32.
|
|
318
369
|
|
|
@@ -336,11 +387,11 @@ def eye(n, m=None, dtype=None):
|
|
|
336
387
|
[0 1]]
|
|
337
388
|
>>> print(output.dtype)
|
|
338
389
|
Int32
|
|
339
|
-
>>> output = ops.eye(1, 2, mindspore.
|
|
390
|
+
>>> output = ops.eye(1, 2, mindspore.float32)
|
|
340
391
|
>>> print(output)
|
|
341
392
|
[[1. 0.]]
|
|
342
393
|
>>> print(output.dtype)
|
|
343
|
-
|
|
394
|
+
Float32
|
|
344
395
|
>>> output = ops.eye(2, dtype=mindspore.int32)
|
|
345
396
|
>>> print(output)
|
|
346
397
|
[[1 0]
|
|
@@ -419,25 +470,25 @@ def hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype
|
|
|
419
470
|
return out
|
|
420
471
|
|
|
421
472
|
|
|
422
|
-
def where(condition,
|
|
473
|
+
def where(condition, input, other):
|
|
423
474
|
r"""
|
|
424
|
-
Selects elements from `
|
|
475
|
+
Selects elements from `input` or `other` based on `condition` and returns a tensor.
|
|
425
476
|
|
|
426
477
|
.. math::
|
|
427
|
-
output_i = \begin{cases}
|
|
478
|
+
output_i = \begin{cases} input_i,\quad &if\ condition_i \\ other_i,\quad &otherwise \end{cases}
|
|
428
479
|
|
|
429
480
|
Args:
|
|
430
|
-
condition (Tensor[bool]): If True, yield `
|
|
431
|
-
|
|
432
|
-
|
|
481
|
+
condition (Tensor[bool]): If True, yield `input`, otherwise yield `other`.
|
|
482
|
+
input (Union[Tensor, Scalar]): When `condition` is True, values to select from.
|
|
483
|
+
other (Union[Tensor, Scalar]): When `condition` is False, values to select from.
|
|
433
484
|
|
|
434
485
|
Returns:
|
|
435
|
-
Tensor, elements are selected from `
|
|
486
|
+
Tensor, elements are selected from `input` and `other`.
|
|
436
487
|
|
|
437
488
|
Raises:
|
|
438
489
|
TypeError: If `condition` is not a Tensor.
|
|
439
|
-
TypeError: If both `
|
|
440
|
-
ValueError: If `condition`, `
|
|
490
|
+
TypeError: If both `input` and `other` are scalars.
|
|
491
|
+
ValueError: If `condition`, `input` and `other` can not broadcast to each other.
|
|
441
492
|
|
|
442
493
|
Supported Platforms:
|
|
443
494
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -454,66 +505,15 @@ def where(condition, x, y):
|
|
|
454
505
|
[[0. 1.]
|
|
455
506
|
[2. 1.]]
|
|
456
507
|
"""
|
|
457
|
-
|
|
458
|
-
raise TypeError(f"For 'where', 'condition' must be a Tensor, but got {type(condition)}.")
|
|
459
|
-
if isinstance(x, (int, float)):
|
|
460
|
-
if not isinstance(y, Tensor):
|
|
461
|
-
raise TypeError(
|
|
462
|
-
f"For 'where', at least one of 'x' and 'y' should be Tensor, but got x:{type(x)}, y:{type(y)}."
|
|
463
|
-
)
|
|
464
|
-
x = cast_(x, y.dtype)
|
|
465
|
-
elif isinstance(y, (int, float)):
|
|
466
|
-
if not isinstance(x, Tensor):
|
|
467
|
-
raise TypeError(
|
|
468
|
-
f"For 'where', at least one of 'x' and 'y' should be Tensor, but got x:{type(x)}, y:{type(y)}."
|
|
469
|
-
)
|
|
470
|
-
y = cast_(y, x.dtype)
|
|
471
|
-
output_shape = _calc_broadcast_shape(x.shape, y.shape, condition.shape)
|
|
472
|
-
condition = broadcast_to(condition, output_shape)
|
|
473
|
-
x = broadcast_to(x, output_shape)
|
|
474
|
-
y = broadcast_to(y, output_shape)
|
|
475
|
-
_select = P.Select()
|
|
476
|
-
return _select(condition, x, y)
|
|
508
|
+
return tensor_select_(condition, input, other)
|
|
477
509
|
|
|
478
510
|
|
|
479
511
|
def reverse(x, axis):
|
|
480
512
|
"""
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
.. warning::
|
|
484
|
-
The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "input_x".
|
|
485
|
-
|
|
486
|
-
Args:
|
|
487
|
-
x (Tensor): The target tensor.
|
|
488
|
-
The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
489
|
-
axis (Union[tuple(int), list(int)]): The indices of the dimensions to reverse.
|
|
490
|
-
|
|
491
|
-
Outputs:
|
|
492
|
-
Tensor, has the same shape and type as `x`.
|
|
493
|
-
|
|
494
|
-
Raises:
|
|
495
|
-
TypeError: If `axis` is neither list nor tuple.
|
|
496
|
-
TypeError: If element of `axis` is not an int.
|
|
497
|
-
|
|
498
|
-
Supported Platforms:
|
|
499
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
500
|
-
|
|
501
|
-
Examples:
|
|
502
|
-
>>> import mindspore
|
|
503
|
-
>>> import numpy as np
|
|
504
|
-
>>> from mindspore import Tensor, ops
|
|
505
|
-
>>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
|
|
506
|
-
>>> output = ops.reverse(input_x, axis=[1])
|
|
507
|
-
>>> print(output)
|
|
508
|
-
[[4 3 2 1]
|
|
509
|
-
[8 7 6 5]]
|
|
510
|
-
>>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
|
|
511
|
-
>>> output = ops.reverse(input_x, axis=[1, 0])
|
|
512
|
-
>>> print(output)
|
|
513
|
-
[[8 7 6 5]
|
|
514
|
-
[4 3 2 1]]
|
|
513
|
+
:func:`mindspore.ops.reverse` will be deprecated in the future.
|
|
514
|
+
Please use :func:`mindspore.ops.flip` instead.
|
|
515
515
|
"""
|
|
516
|
-
return
|
|
516
|
+
return flip(x, axis)
|
|
517
517
|
|
|
518
518
|
|
|
519
519
|
def ravel(input):
|
|
@@ -659,8 +659,9 @@ def one_hot(indices, depth, on_value=1, off_value=0, axis=-1):
|
|
|
659
659
|
other locations take value `off_value`.
|
|
660
660
|
|
|
661
661
|
Note:
|
|
662
|
-
If the input indices
|
|
663
|
-
On Ascend, if `on_value` is int64 dtype, `indices` must be
|
|
662
|
+
If the input `indices` has rank `N`, the output will have rank `N+1`.
|
|
663
|
+
The new axis is created at dimension `axis`. On Ascend, if `on_value` is int64 dtype, `indices` must be
|
|
664
|
+
int64 dtype, and the value for `on_value` and `off_value` can only be 1 and 0.
|
|
664
665
|
|
|
665
666
|
Args:
|
|
666
667
|
indices(Tensor): A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
|
|
@@ -682,6 +683,7 @@ def one_hot(indices, depth, on_value=1, off_value=0, axis=-1):
|
|
|
682
683
|
Raises:
|
|
683
684
|
TypeError: If `axis` or `depth` is not an int.
|
|
684
685
|
TypeError: If dtype of `indices` is not int32 or int64.
|
|
686
|
+
TypeError: If dtype of `on_value` is not int32, int64, float16 or float32.
|
|
685
687
|
TypeError: If `indices`, `on_value` or `off_value` is not a Tensor.
|
|
686
688
|
ValueError: If `axis` is not in range [-1, ndim].
|
|
687
689
|
ValueError: If `depth` is less than 0.
|
|
@@ -715,8 +717,8 @@ def fill(type, shape, value): # pylint: disable=redefined-outer-name
|
|
|
715
717
|
|
|
716
718
|
Args:
|
|
717
719
|
type (mindspore.dtype): The specified type of output tensor. The data type only supports
|
|
718
|
-
`bool_ <https://www.mindspore.cn/docs/en/
|
|
719
|
-
`number <https://www.mindspore.cn/docs/en/
|
|
720
|
+
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ and
|
|
721
|
+
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ .
|
|
720
722
|
shape (Union(Tensor, tuple[int])): The specified shape of output tensor.
|
|
721
723
|
value (Union(Tensor, number.Number, bool)): Value to fill the returned tensor.
|
|
722
724
|
|
|
@@ -743,7 +745,7 @@ def fill(type, shape, value): # pylint: disable=redefined-outer-name
|
|
|
743
745
|
[0. 0. 0.]]
|
|
744
746
|
"""
|
|
745
747
|
value = cast_(value, type)
|
|
746
|
-
return
|
|
748
|
+
return fillv2_(shape, value)
|
|
747
749
|
|
|
748
750
|
|
|
749
751
|
def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-name
|
|
@@ -791,6 +793,45 @@ def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-na
|
|
|
791
793
|
return ops.fill(dtype, size, fill_value)
|
|
792
794
|
|
|
793
795
|
|
|
796
|
+
def full_ext(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-name
|
|
797
|
+
"""
|
|
798
|
+
Create a Tensor of the specified shape and fill it with the specified value.
|
|
799
|
+
|
|
800
|
+
Args:
|
|
801
|
+
size (Union(tuple[int], list[int])): The specified shape of output tensor.
|
|
802
|
+
fill_value (number.Number): Value to fill the returned tensor. Complex numbers are not supported for now.
|
|
803
|
+
|
|
804
|
+
Keyword Args:
|
|
805
|
+
dtype (mindspore.dtype): The specified type of output tensor. `bool_` and `number` are supported, for details,
|
|
806
|
+
please refer to :class:`mindspore.dtype` . Default: ``None`` .
|
|
807
|
+
|
|
808
|
+
Returns:
|
|
809
|
+
Tensor.
|
|
810
|
+
|
|
811
|
+
Raises:
|
|
812
|
+
TypeError: If `size` is not a tuple or list.
|
|
813
|
+
ValueError: The element in `size` is less than 0.
|
|
814
|
+
|
|
815
|
+
Supported Platforms:
|
|
816
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
817
|
+
|
|
818
|
+
Examples:
|
|
819
|
+
>>> from mindspore import ops
|
|
820
|
+
>>> output = ops.full((2, 2), 1)
|
|
821
|
+
>>> print(output)
|
|
822
|
+
[[1. 1.]
|
|
823
|
+
[1. 1.]]
|
|
824
|
+
>>> output = ops.full((3, 3), 0)
|
|
825
|
+
>>> print(output)
|
|
826
|
+
[[0. 0. 0.]
|
|
827
|
+
[0. 0. 0.]
|
|
828
|
+
[0. 0. 0.]]
|
|
829
|
+
"""
|
|
830
|
+
if isinstance(fill_value, Tensor):
|
|
831
|
+
return fill_tensor_(size, fill_value, dtype)
|
|
832
|
+
return fill_scalar_(size, fill_value, dtype)
|
|
833
|
+
|
|
834
|
+
|
|
794
835
|
def full_like(input, fill_value, *, dtype=None):
|
|
795
836
|
"""
|
|
796
837
|
Return a Tensor of the same shape as `input` and filled with `fill_value`.
|
|
@@ -883,24 +924,63 @@ def chunk(input, chunks, axis=0):
|
|
|
883
924
|
length_along_dim = arr_shape[arr_axis]
|
|
884
925
|
|
|
885
926
|
if chunks > length_along_dim:
|
|
886
|
-
res = P.Split(arr_axis, length_along_dim)(input)
|
|
927
|
+
res = _get_cache_prim(P.Split)(arr_axis, length_along_dim)(input)
|
|
887
928
|
elif length_along_dim % chunks == 0:
|
|
888
|
-
res = P.Split(arr_axis, chunks)(input)
|
|
929
|
+
res = _get_cache_prim(P.Split)(arr_axis, chunks)(input)
|
|
889
930
|
else:
|
|
890
931
|
block_size = int(np.ceil(length_along_dim / chunks))
|
|
891
932
|
true_chunks = int(length_along_dim // block_size)
|
|
892
933
|
length1 = true_chunks * block_size
|
|
893
934
|
length2 = length_along_dim - length1
|
|
894
|
-
start1 = _list_comprehensions(
|
|
935
|
+
start1 = _list_comprehensions(rank_(input), 0, True)
|
|
895
936
|
size1 = _tuple_setitem(arr_shape, arr_axis, length1)
|
|
896
937
|
start2 = _tuple_setitem(start1, arr_axis, length1)
|
|
897
938
|
size2 = _tuple_setitem(arr_shape, arr_axis, length2)
|
|
898
|
-
res = P.Split(arr_axis, true_chunks)(tensor_slice(input, start1, size1))
|
|
939
|
+
res = _get_cache_prim(P.Split)(arr_axis, true_chunks)(tensor_slice(input, start1, size1))
|
|
899
940
|
if length2:
|
|
900
|
-
res += P.Split(arr_axis, 1)(tensor_slice(input, start2, size2))
|
|
941
|
+
res += _get_cache_prim(P.Split)(arr_axis, 1)(tensor_slice(input, start2, size2))
|
|
901
942
|
return res
|
|
902
943
|
|
|
903
944
|
|
|
945
|
+
def chunk_ext(input, chunks, dim=0):
|
|
946
|
+
"""
|
|
947
|
+
Cut the input Tensor into `chunks` sub-tensors along the specified axis.
|
|
948
|
+
|
|
949
|
+
Note:
|
|
950
|
+
This function may return less than the specified number of chunks!
|
|
951
|
+
|
|
952
|
+
Args:
|
|
953
|
+
input (Tensor): A Tensor to be cut.
|
|
954
|
+
chunks (int): Number of sub-tensors to cut.
|
|
955
|
+
dim (int, optional): Specify the dimensions that you want to split. Default: ``0`` .
|
|
956
|
+
|
|
957
|
+
Returns:
|
|
958
|
+
A tuple of sub-tensors.
|
|
959
|
+
|
|
960
|
+
Raises:
|
|
961
|
+
TypeError: If argument `input` is not Tensor.
|
|
962
|
+
TypeError: The sum of `chunks` is not int.
|
|
963
|
+
TypeError: If argument `dim` is not int.
|
|
964
|
+
ValueError: If argument `dim` is out of range of :math:`[-input.ndim, input.ndim)` .
|
|
965
|
+
ValueError: If argument `chunks` is not positive number.
|
|
966
|
+
|
|
967
|
+
Supported Platforms:
|
|
968
|
+
``Ascend``
|
|
969
|
+
|
|
970
|
+
Examples:
|
|
971
|
+
>>> import numpy as np
|
|
972
|
+
>>> import mindspore
|
|
973
|
+
>>> from mindspore import Tensor
|
|
974
|
+
>>> input_x = np.arange(9).astype("float32")
|
|
975
|
+
>>> output = mindspore.mint.chunk(Tensor(input_x), 3)
|
|
976
|
+
>>> print(output)
|
|
977
|
+
(Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
|
|
978
|
+
Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
|
|
979
|
+
Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))
|
|
980
|
+
"""
|
|
981
|
+
return chunk_(input, chunks, dim)
|
|
982
|
+
|
|
983
|
+
|
|
904
984
|
def fills(x, value):
|
|
905
985
|
"""
|
|
906
986
|
`fills` is deprecated, please use `ops.fill` instead.
|
|
@@ -920,50 +1000,6 @@ def fills(x, value):
|
|
|
920
1000
|
return fills_(x, value_)
|
|
921
1001
|
|
|
922
1002
|
|
|
923
|
-
def ones(shape, dtype=None): # pylint: disable=redefined-outer-name
|
|
924
|
-
r"""
|
|
925
|
-
Creates a tensor filled with value ones.
|
|
926
|
-
|
|
927
|
-
Creates a tensor with shape described by the first argument and fills it with value ones in type of the second
|
|
928
|
-
argument.
|
|
929
|
-
|
|
930
|
-
Args:
|
|
931
|
-
shape (Union[tuple[int], int, Tensor]): The specified shape of output tensor. Only positive integer or
|
|
932
|
-
tuple or Tensor containing positive integers are allowed. If it is a Tensor,
|
|
933
|
-
it must be a 0-D or 1-D Tensor with int32 or int64 dtypes.
|
|
934
|
-
dtype (:class:`mindspore.dtype`): The specified type of output tensor. If `dtype` is ``None`` ,
|
|
935
|
-
`mindspore.float32` will be used. Default: ``None`` .
|
|
936
|
-
|
|
937
|
-
Returns:
|
|
938
|
-
Tensor, has the same type and shape as input shape value.
|
|
939
|
-
|
|
940
|
-
Raises:
|
|
941
|
-
TypeError: If `shape` is not tuple, int or Tensor.
|
|
942
|
-
|
|
943
|
-
Supported Platforms:
|
|
944
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
945
|
-
|
|
946
|
-
Examples:
|
|
947
|
-
>>> import mindspore
|
|
948
|
-
>>> from mindspore import ops
|
|
949
|
-
>>> output = ops.ones((2, 2), mindspore.float32)
|
|
950
|
-
>>> print(output)
|
|
951
|
-
[[1. 1.]
|
|
952
|
-
[1. 1.]]
|
|
953
|
-
"""
|
|
954
|
-
_dtype = mstype.float32 if dtype is None else dtype
|
|
955
|
-
ones_op = _get_cache_prim(P.FillV2)()
|
|
956
|
-
value = Tensor(1, _dtype)
|
|
957
|
-
if isinstance(shape, int):
|
|
958
|
-
shape = tuple([shape])
|
|
959
|
-
elif isinstance(shape, list):
|
|
960
|
-
shape = Tensor(shape, dtype=mstype.int64)
|
|
961
|
-
elif isinstance(shape, Tensor) and shape.ndim == 0 and shape.size == 1:
|
|
962
|
-
shape = shape.reshape(1)
|
|
963
|
-
output = ones_op(shape, value)
|
|
964
|
-
return output
|
|
965
|
-
|
|
966
|
-
|
|
967
1003
|
def ones_like(input, *, dtype=None):
|
|
968
1004
|
"""
|
|
969
1005
|
Returns a Tensor with a value of 1 and its shape is the same as the input.
|
|
@@ -993,57 +1029,15 @@ def ones_like(input, *, dtype=None):
|
|
|
993
1029
|
[[1 1]
|
|
994
1030
|
[1 1]]
|
|
995
1031
|
"""
|
|
996
|
-
|
|
997
|
-
output = ones_like_op(input)
|
|
1032
|
+
output = ones_like_(input)
|
|
998
1033
|
_dtype = input.dtype if dtype is None else dtype
|
|
999
1034
|
output = cast_(output, _dtype)
|
|
1000
1035
|
return output
|
|
1001
1036
|
|
|
1002
1037
|
|
|
1003
|
-
def zeros(size, dtype=None): # pylint: disable=redefined-outer-name
|
|
1004
|
-
r"""
|
|
1005
|
-
Creates a tensor filled with 0 with shape described by `shape` and fills it with value 0 in type of `dtype`.
|
|
1006
|
-
|
|
1007
|
-
Args:
|
|
1008
|
-
size (Union[tuple[int], int, Tensor]): The specified shape of output tensor. Only positive integer or
|
|
1009
|
-
tuple or Tensor containing positive integers are allowed. If it is a Tensor,
|
|
1010
|
-
it must be a 0-D or 1-D Tensor with int32 or int64 dtypes.
|
|
1011
|
-
dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor. If `dtype` is ``None`` ,
|
|
1012
|
-
mindspore.float32 will be used. Default: ``None`` .
|
|
1013
|
-
|
|
1014
|
-
Returns:
|
|
1015
|
-
Tensor, has the same dtype and size as input.
|
|
1016
|
-
|
|
1017
|
-
Raises:
|
|
1018
|
-
TypeError: If `size` is not tuple, int or Tensor.
|
|
1019
|
-
|
|
1020
|
-
Supported Platforms:
|
|
1021
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1022
|
-
|
|
1023
|
-
Examples:
|
|
1024
|
-
>>> import mindspore
|
|
1025
|
-
>>> from mindspore import ops
|
|
1026
|
-
>>> output = ops.zeros((2, 2), mindspore.float32)
|
|
1027
|
-
>>> print(output)
|
|
1028
|
-
[[0. 0.]
|
|
1029
|
-
[0. 0.]]
|
|
1030
|
-
"""
|
|
1031
|
-
zero_op = _get_cache_prim(P.FillV2)()
|
|
1032
|
-
_dtype = mstype.float32 if dtype is None else dtype
|
|
1033
|
-
value = Tensor(0, _dtype)
|
|
1034
|
-
if isinstance(size, int):
|
|
1035
|
-
size = tuple([size])
|
|
1036
|
-
elif isinstance(size, list):
|
|
1037
|
-
size = Tensor(size, dtype=mstype.int64)
|
|
1038
|
-
elif isinstance(size, Tensor) and size.ndim == 0 and size.size == 1:
|
|
1039
|
-
size = size.reshape(1)
|
|
1040
|
-
output = zero_op(size, value)
|
|
1041
|
-
return output
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
1038
|
def zeros_like(input, *, dtype=None):
|
|
1045
1039
|
r"""
|
|
1046
|
-
Creates a tensor filled with 0, with the same size as
|
|
1040
|
+
Creates a tensor filled with 0, with the same size as input, and the given dtype.
|
|
1047
1041
|
|
|
1048
1042
|
If `dtype = None`, the tensor will have the same dtype as input `input`.
|
|
1049
1043
|
|
|
@@ -1074,125 +1068,76 @@ def zeros_like(input, *, dtype=None):
|
|
|
1074
1068
|
[0. 0.]]
|
|
1075
1069
|
"""
|
|
1076
1070
|
_dtype = input.dtype if dtype is None else dtype
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
output = _zeros_like(input)
|
|
1080
|
-
output = _cast(output, _dtype)
|
|
1071
|
+
output = zeros_like_(input)
|
|
1072
|
+
output = cast_(output, _dtype)
|
|
1081
1073
|
return output
|
|
1082
1074
|
|
|
1083
1075
|
|
|
1084
|
-
def
|
|
1085
|
-
|
|
1086
|
-
|
|
1087
|
-
|
|
1088
|
-
Creates a new tensor by replicating `input` `multiples` times. The i'th dimension of
|
|
1089
|
-
output tensor has `input.shape[i] * multiples[i]` elements, and the values of `input`
|
|
1090
|
-
are replicated `multiples[i]` times along the i'th dimension.
|
|
1091
|
-
|
|
1092
|
-
Note:
|
|
1093
|
-
The length of `multiples` must be greater or equal to the length of dimension in `input`.
|
|
1076
|
+
def ones_like_ext(input, *, dtype=None):
|
|
1077
|
+
"""
|
|
1078
|
+
Returns a Tensor with a value of 1 and its shape is the same as the input.
|
|
1094
1079
|
|
|
1095
1080
|
Args:
|
|
1096
|
-
input (Tensor):
|
|
1097
|
-
:math:`(x_1, x_2, ..., x_S)` .
|
|
1081
|
+
input (Tensor): Tensor of any dimension.
|
|
1098
1082
|
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
Only constant value is allowed.
|
|
1083
|
+
Keyword Args:
|
|
1084
|
+
dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype` is ``None`` ,
|
|
1085
|
+
the dtype of the input tensor will be used. Default: ``None`` .
|
|
1103
1086
|
|
|
1104
1087
|
Returns:
|
|
1105
|
-
Tensor, has the same
|
|
1106
|
-
the dimension of `input` is `input.dim`, and the shape of `input` is :math:`(x_1, x_2, ..., x_S)`.
|
|
1107
|
-
|
|
1108
|
-
- If `input.dim = d`, then the shape of their corresponding positions can be multiplied, and
|
|
1109
|
-
the shape of Outputs is :math:`(x_1*y_1, x_2*y_2, ..., x_S*y_S)`.
|
|
1110
|
-
- If `input.dim < d`, fill in multiple 1 in the length of the shape of `input` until their
|
|
1111
|
-
lengths are consistent. Such as set the shape of `input` as :math:`(1, ..., x_1, x_2, ..., x_S)`,
|
|
1112
|
-
then the shape of their corresponding positions can be multiplied, and the shape of Outputs is
|
|
1113
|
-
:math:`(1*y_1, ..., x_R*y_R, x_S*y_S)`.
|
|
1088
|
+
Tensor, has the same shape as `input` but filled with ones.
|
|
1114
1089
|
|
|
1115
1090
|
Raises:
|
|
1116
|
-
TypeError: If `
|
|
1117
|
-
ValueError: If the elements of `multiples` are not all greater than 0.
|
|
1118
|
-
ValueError: If the length of `multiples` are smaller than the length of dimension in `input`.
|
|
1091
|
+
TypeError: If `input` is not a Tensor.
|
|
1119
1092
|
|
|
1120
1093
|
Supported Platforms:
|
|
1121
1094
|
``Ascend`` ``GPU`` ``CPU``
|
|
1122
1095
|
|
|
1123
1096
|
Examples:
|
|
1124
|
-
>>> import mindspore
|
|
1125
1097
|
>>> import numpy as np
|
|
1126
1098
|
>>> from mindspore import Tensor, ops
|
|
1127
|
-
>>>
|
|
1128
|
-
>>>
|
|
1129
|
-
>>> output = ops.tile(input, multiples)
|
|
1130
|
-
>>> print(output)
|
|
1131
|
-
[[1. 2. 1. 2. 1. 2.]
|
|
1132
|
-
[3. 4. 3. 4. 3. 4.]
|
|
1133
|
-
[1. 2. 1. 2. 1. 2.]
|
|
1134
|
-
[3. 4. 3. 4. 3. 4.]]
|
|
1135
|
-
>>> multiples = (2, 3, 2)
|
|
1136
|
-
>>> output = ops.tile(input, multiples)
|
|
1099
|
+
>>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
|
|
1100
|
+
>>> output = ops.mint.ones_like(x)
|
|
1137
1101
|
>>> print(output)
|
|
1138
|
-
[[
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
|
|
1144
|
-
|
|
1145
|
-
[3. 4. 3. 4.]
|
|
1146
|
-
[1. 2. 1. 2.]
|
|
1147
|
-
[3. 4. 3. 4.]
|
|
1148
|
-
[1. 2. 1. 2.]
|
|
1149
|
-
[3. 4. 3. 4.]]]
|
|
1150
|
-
"""
|
|
1151
|
-
tile_op = _get_cache_prim(P.Tile)()
|
|
1152
|
-
return tile_op(input, multiples)
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
def range(start, end, step):
|
|
1102
|
+
[[1 1]
|
|
1103
|
+
[1 1]]
|
|
1104
|
+
"""
|
|
1105
|
+
return ones_like_ext_(input, dtype)
|
|
1106
|
+
|
|
1107
|
+
|
|
1108
|
+
def zeros_like_ext(input, *, dtype=None):
|
|
1156
1109
|
r"""
|
|
1157
|
-
Creates a
|
|
1158
|
-
`limit` up to but not including `end`.
|
|
1110
|
+
Creates a tensor filled with 0, with the same size as input, and the given dtype.
|
|
1159
1111
|
|
|
1160
|
-
|
|
1161
|
-
the same as the type of the inputs.
|
|
1112
|
+
If `dtype = None`, the tensor will have the same dtype as input `input`.
|
|
1162
1113
|
|
|
1163
1114
|
Args:
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
type: int32 ,int64, float32 or float64.
|
|
1115
|
+
input (Tensor): Tensor of any dimension.
|
|
1116
|
+
|
|
1117
|
+
Keyword Args:
|
|
1118
|
+
dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype` is ``None`` ,
|
|
1119
|
+
the dtype of the input tensor will be used. Default: ``None`` .
|
|
1170
1120
|
|
|
1171
1121
|
Returns:
|
|
1172
|
-
|
|
1122
|
+
Tensor, filled with 0.
|
|
1173
1123
|
|
|
1174
1124
|
Raises:
|
|
1175
|
-
TypeError: If
|
|
1176
|
-
TypeError: If datatype of `start`, `end` or `step` is not same.
|
|
1177
|
-
TypeError: If datatype of `start`, `end` or `step` is not supported.
|
|
1178
|
-
ValueError: If `step` = 0.
|
|
1179
|
-
ValueError: If `start` >= `end` when `step` > 0.
|
|
1180
|
-
ValueError: If `start` <= `end` when `step` < 0.
|
|
1125
|
+
TypeError: If dtype is not a MindSpore dtype.
|
|
1181
1126
|
|
|
1182
1127
|
Supported Platforms:
|
|
1183
|
-
``GPU`` ``CPU``
|
|
1128
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1184
1129
|
|
|
1185
1130
|
Examples:
|
|
1131
|
+
>>> import mindspore
|
|
1132
|
+
>>> import numpy as np
|
|
1186
1133
|
>>> from mindspore import Tensor, ops
|
|
1187
|
-
>>>
|
|
1188
|
-
>>>
|
|
1189
|
-
>>> end = Tensor(10, mstype.int32)
|
|
1190
|
-
>>> step = Tensor(4, mstype.int32)
|
|
1191
|
-
>>> output = ops.range(start, end, step)
|
|
1134
|
+
>>> x = Tensor(np.arange(4).reshape(2, 2))
|
|
1135
|
+
>>> output = ops.mint.zeros_like(x, dtype=mindspore.float32)
|
|
1192
1136
|
>>> print(output)
|
|
1193
|
-
[0
|
|
1137
|
+
[[0. 0.]
|
|
1138
|
+
[0. 0.]]
|
|
1194
1139
|
"""
|
|
1195
|
-
return
|
|
1140
|
+
return zeros_like_ext_(input, dtype)
|
|
1196
1141
|
|
|
1197
1142
|
|
|
1198
1143
|
##############################
|
|
@@ -1246,15 +1191,11 @@ def unique(input):
|
|
|
1246
1191
|
>>> print(idx)
|
|
1247
1192
|
[0 1 2 1]
|
|
1248
1193
|
"""
|
|
1249
|
-
|
|
1250
|
-
unique_op = _get_cache_prim(P.Unique)()
|
|
1251
|
-
reshape_op = _get_cache_prim(P.Reshape)()
|
|
1252
|
-
|
|
1253
1194
|
shape_x = input.shape
|
|
1254
1195
|
length_x = get_x_shape(shape_x)
|
|
1255
|
-
input =
|
|
1256
|
-
y, idx =
|
|
1257
|
-
idx =
|
|
1196
|
+
input = reshape_(input, length_x)
|
|
1197
|
+
y, idx = unique_(input)
|
|
1198
|
+
idx = reshape_(idx, shape_x)
|
|
1258
1199
|
return y, idx
|
|
1259
1200
|
|
|
1260
1201
|
|
|
@@ -1381,7 +1322,7 @@ def searchsorted(sorted_sequence, values, *, out_int32=False, right=False):
|
|
|
1381
1322
|
|
|
1382
1323
|
Returns:
|
|
1383
1324
|
Tensor containing the indices from the innermost dimension of `sorted_sequence` such that,
|
|
1384
|
-
if insert the corresponding value in the `values`
|
|
1325
|
+
if insert the corresponding value in the `values` Tensor, the order of `sorted_sequence` would be preserved,
|
|
1385
1326
|
whose datatype is int32 if out_int32 is ``True`` , otherwise int64, and shape is the same as the shape of
|
|
1386
1327
|
`values`.
|
|
1387
1328
|
|
|
@@ -1457,7 +1398,7 @@ def size(input_x):
|
|
|
1457
1398
|
|
|
1458
1399
|
Args:
|
|
1459
1400
|
input_x (Tensor): Input parameters, the shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is
|
|
1460
|
-
`number <https://www.mindspore.cn/docs/en/
|
|
1401
|
+
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
|
|
1461
1402
|
|
|
1462
1403
|
Returns:
|
|
1463
1404
|
int. A scalar representing the elements' size of `input_x`, tensor is the number of elements
|
|
@@ -1472,84 +1413,25 @@ def size(input_x):
|
|
|
1472
1413
|
Examples:
|
|
1473
1414
|
>>> import mindspore
|
|
1474
1415
|
>>> import numpy as np
|
|
1475
|
-
>>> from mindspore import Tensor, ops
|
|
1476
|
-
>>> input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
|
|
1477
|
-
>>> output = ops.size(input_x)
|
|
1478
|
-
>>> print(output)
|
|
1479
|
-
4
|
|
1480
|
-
"""
|
|
1481
|
-
return size_(input_x)
|
|
1482
|
-
|
|
1483
|
-
|
|
1484
|
-
def shape(input_x):
|
|
1485
|
-
"""
|
|
1486
|
-
Returns the shape of the input tensor.
|
|
1487
|
-
|
|
1488
|
-
Args:
|
|
1489
|
-
input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
1490
|
-
|
|
1491
|
-
Returns:
|
|
1492
|
-
tuple[int], the output tuple is constructed by multiple integers,
|
|
1493
|
-
:math:`(x_1, x_2, ..., x_R)`.
|
|
1494
|
-
|
|
1495
|
-
Raises:
|
|
1496
|
-
TypeError: If `input_x` is not a Tensor.
|
|
1497
|
-
|
|
1498
|
-
Supported Platforms:
|
|
1499
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1500
|
-
|
|
1501
|
-
Examples:
|
|
1502
|
-
>>> import mindspore
|
|
1503
|
-
>>> import numpy as np
|
|
1504
|
-
>>> from mindspore import Tensor, ops
|
|
1505
|
-
>>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
|
|
1506
|
-
>>> output = ops.shape(input_x)
|
|
1507
|
-
>>> print(output)
|
|
1508
|
-
(3, 2, 1)
|
|
1509
|
-
"""
|
|
1510
|
-
return shape_(input_x)
|
|
1511
|
-
|
|
1512
|
-
|
|
1513
|
-
def dyn_shape(input_x):
|
|
1514
|
-
"""
|
|
1515
|
-
Returns the shape of the input tensor.
|
|
1516
|
-
|
|
1517
|
-
Args:
|
|
1518
|
-
input_x (Tensor): The input Tensor.
|
|
1519
|
-
|
|
1520
|
-
Returns:
|
|
1521
|
-
Tensor, the shape of `input_x` .
|
|
1522
|
-
|
|
1523
|
-
Raises:
|
|
1524
|
-
TypeError: If `input_x` is not a Tensor.
|
|
1525
|
-
|
|
1526
|
-
Supported Platforms:
|
|
1527
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1528
|
-
|
|
1529
|
-
Examples:
|
|
1530
|
-
>>> import mindspore
|
|
1531
|
-
>>> import numpy as np
|
|
1532
|
-
>>> from mindspore import Tensor, ops
|
|
1533
|
-
>>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
|
|
1534
|
-
>>> output = ops.dyn_shape(input_x)
|
|
1416
|
+
>>> from mindspore import Tensor, ops
|
|
1417
|
+
>>> input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
|
|
1418
|
+
>>> output = ops.size(input_x)
|
|
1535
1419
|
>>> print(output)
|
|
1536
|
-
|
|
1420
|
+
4
|
|
1537
1421
|
"""
|
|
1538
|
-
return
|
|
1422
|
+
return size_(input_x)
|
|
1539
1423
|
|
|
1540
1424
|
|
|
1541
|
-
def
|
|
1425
|
+
def shape(input_x):
|
|
1542
1426
|
"""
|
|
1543
|
-
Returns the
|
|
1544
|
-
|
|
1545
|
-
Returns a 0-D int32 Tensor representing the rank of input; the rank of a tensor
|
|
1546
|
-
is the number of indices required to uniquely select each element of the tensor.
|
|
1427
|
+
Returns the shape of the input tensor.
|
|
1547
1428
|
|
|
1548
1429
|
Args:
|
|
1549
|
-
input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
1430
|
+
input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
1550
1431
|
|
|
1551
1432
|
Returns:
|
|
1552
|
-
|
|
1433
|
+
tuple[int], the output tuple is constructed by multiple integers,
|
|
1434
|
+
:math:`(x_1, x_2, ..., x_R)`.
|
|
1553
1435
|
|
|
1554
1436
|
Raises:
|
|
1555
1437
|
TypeError: If `input_x` is not a Tensor.
|
|
@@ -1561,35 +1443,26 @@ def rank(input_x):
|
|
|
1561
1443
|
>>> import mindspore
|
|
1562
1444
|
>>> import numpy as np
|
|
1563
1445
|
>>> from mindspore import Tensor, ops
|
|
1564
|
-
>>>
|
|
1565
|
-
>>> output = ops.
|
|
1446
|
+
>>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
|
|
1447
|
+
>>> output = ops.shape(input_x)
|
|
1566
1448
|
>>> print(output)
|
|
1567
|
-
2
|
|
1568
|
-
>>> print(type(output))
|
|
1569
|
-
<class 'int'>
|
|
1449
|
+
(3, 2, 1)
|
|
1570
1450
|
"""
|
|
1571
|
-
return
|
|
1451
|
+
return shape_(input_x)
|
|
1572
1452
|
|
|
1573
1453
|
|
|
1574
|
-
def
|
|
1454
|
+
def dyn_shape(input_x):
|
|
1575
1455
|
"""
|
|
1576
|
-
|
|
1577
|
-
|
|
1578
|
-
The 'shape' can only have one -1 at most, in which case it's inferred from the remaining dimensions and
|
|
1579
|
-
the number of elements in the input.
|
|
1456
|
+
Returns the shape of the input tensor.
|
|
1580
1457
|
|
|
1581
1458
|
Args:
|
|
1582
|
-
|
|
1583
|
-
shape (Union[tuple[int], Tensor[int]]): Constructed by multiple
|
|
1584
|
-
integers, i.e., :math:`(y_1, y_2, ..., y_S)`. Only constant value is allowed.
|
|
1459
|
+
input_x (Tensor): The input Tensor.
|
|
1585
1460
|
|
|
1586
1461
|
Returns:
|
|
1587
|
-
Tensor, the shape of
|
|
1462
|
+
Tensor, the shape of `input_x` .
|
|
1588
1463
|
|
|
1589
1464
|
Raises:
|
|
1590
|
-
|
|
1591
|
-
of its elements is less than or equal to 0 or cannot be divided by the product
|
|
1592
|
-
of the input tensor shape; or if it does not match the input's array size.
|
|
1465
|
+
TypeError: If `input_x` is not a Tensor.
|
|
1593
1466
|
|
|
1594
1467
|
Supported Platforms:
|
|
1595
1468
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1598,14 +1471,12 @@ def reshape(input, shape):
|
|
|
1598
1471
|
>>> import mindspore
|
|
1599
1472
|
>>> import numpy as np
|
|
1600
1473
|
>>> from mindspore import Tensor, ops
|
|
1601
|
-
>>>
|
|
1602
|
-
>>> output = ops.
|
|
1474
|
+
>>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
|
|
1475
|
+
>>> output = ops.dyn_shape(input_x)
|
|
1603
1476
|
>>> print(output)
|
|
1604
|
-
[
|
|
1605
|
-
[ 3.6 0.4]
|
|
1606
|
-
[ 0.5 -3.2]]
|
|
1477
|
+
[3 2 1]
|
|
1607
1478
|
"""
|
|
1608
|
-
return
|
|
1479
|
+
return tensor_shape_(input_x)
|
|
1609
1480
|
|
|
1610
1481
|
|
|
1611
1482
|
def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
|
|
@@ -1672,7 +1543,7 @@ def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
|
|
|
1672
1543
|
[[4. 3. 2. 1.]
|
|
1673
1544
|
[8. 7. 6. 5.]]
|
|
1674
1545
|
"""
|
|
1675
|
-
return P.ReverseSequence(seq_dim=seq_dim, batch_dim=batch_dim)(x, seq_lengths)
|
|
1546
|
+
return _get_cache_prim(P.ReverseSequence)(seq_dim=seq_dim, batch_dim=batch_dim)(x, seq_lengths)
|
|
1676
1547
|
|
|
1677
1548
|
|
|
1678
1549
|
def flatten(input, order='C', *, start_dim=1, end_dim=-1):
|
|
@@ -1696,7 +1567,7 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
|
|
|
1696
1567
|
Raises:
|
|
1697
1568
|
TypeError: If `input` is not a Tensor.
|
|
1698
1569
|
TypeError: If `order` is not string type.
|
|
1699
|
-
ValueError: If `order` is string type, but not 'C' or 'F'
|
|
1570
|
+
ValueError: If `order` is string type, but not ``'C'`` or ``'F'``.
|
|
1700
1571
|
TypeError: If `start_dim` or `end_dim` is not int.
|
|
1701
1572
|
ValueError: If `start_dim` is greater than `end_dim` after canonicalized.
|
|
1702
1573
|
ValueError: If `start_dim` or `end_dim` is not in range of [-input.dim, input.dim-1].
|
|
@@ -1741,7 +1612,7 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
|
|
|
1741
1612
|
return reshape_(input, (-1,))
|
|
1742
1613
|
perm = ops.make_range(0, x_rank)
|
|
1743
1614
|
new_order = ops.tuple_reversed(perm)
|
|
1744
|
-
input =
|
|
1615
|
+
input = transpose_(input, new_order)
|
|
1745
1616
|
|
|
1746
1617
|
# Handle the default case.
|
|
1747
1618
|
x_shape = shape_(input)
|
|
@@ -1749,7 +1620,7 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
|
|
|
1749
1620
|
if start_dim == 1 and end_dim == -1:
|
|
1750
1621
|
if x_rank in (0, 1):
|
|
1751
1622
|
return reshape_(input, (-1,))
|
|
1752
|
-
return
|
|
1623
|
+
return flatten_(input)
|
|
1753
1624
|
|
|
1754
1625
|
# Check axis.
|
|
1755
1626
|
start_dim = canonicalize_axis(start_dim, x_rank)
|
|
@@ -1771,341 +1642,6 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
|
|
|
1771
1642
|
return reshape_(input, new_shape)
|
|
1772
1643
|
|
|
1773
1644
|
|
|
1774
|
-
@constexpr
|
|
1775
|
-
def _check_select_type_match(scalar, tensor_type, scalar_name, tensor_name):
|
|
1776
|
-
if isinstance(scalar, int) and tensor_type != mstype.int32:
|
|
1777
|
-
raise TypeError(f"For functional operator[select], the input[{scalar_name}] is int, "
|
|
1778
|
-
f"then the input[{tensor_name}] must be a Tensor of int32.")
|
|
1779
|
-
if isinstance(scalar, float) and tensor_type != mstype.float32:
|
|
1780
|
-
raise TypeError(f"For functional operator[select], the input[{scalar_name}] is float, "
|
|
1781
|
-
f"then the input[{tensor_name}] must be a Tensor of float32.")
|
|
1782
|
-
|
|
1783
|
-
|
|
1784
|
-
@_primexpr
|
|
1785
|
-
def _check_select_shape_match(input_shape, cond_shape, tensor_name):
|
|
1786
|
-
if input_shape != cond_shape:
|
|
1787
|
-
raise ValueError(f"For functional operator[select], the cond shape must be same as {tensor_name} shape.")
|
|
1788
|
-
|
|
1789
|
-
|
|
1790
|
-
@constexpr
|
|
1791
|
-
def _check_select_type(is_cond_tensor, is_x_scalar, is_y_scalar, is_x_tensor, is_y_tensor):
|
|
1792
|
-
if not is_cond_tensor:
|
|
1793
|
-
raise TypeError(f"For functional operator[select], the input[cond] must be a Tensor.")
|
|
1794
|
-
if is_x_scalar and not is_y_tensor:
|
|
1795
|
-
raise TypeError(f"For functional operator[select], the input[x] is int or float, "
|
|
1796
|
-
f"then the input[y] must be a Tensor.")
|
|
1797
|
-
if is_y_scalar and not is_x_tensor:
|
|
1798
|
-
raise TypeError(f"For functional operator[select], the input[y] is int or float, "
|
|
1799
|
-
f"then the input[x] must be a Tensor.")
|
|
1800
|
-
|
|
1801
|
-
|
|
1802
|
-
@constexpr
|
|
1803
|
-
def _check_select_shape_same(cond_shape, x_shape, y_shape):
|
|
1804
|
-
"""Check if input of select has same shape."""
|
|
1805
|
-
return cond_shape == x_shape and x_shape == y_shape and cond_shape == y_shape
|
|
1806
|
-
|
|
1807
|
-
|
|
1808
|
-
@constexpr
|
|
1809
|
-
def get_max_value(x, y, z):
|
|
1810
|
-
"""Get the maximum value of x, y and z."""
|
|
1811
|
-
if x >= y and x >= z:
|
|
1812
|
-
return x
|
|
1813
|
-
if y >= x and y >= z:
|
|
1814
|
-
return y
|
|
1815
|
-
return z
|
|
1816
|
-
|
|
1817
|
-
|
|
1818
|
-
@constexpr
|
|
1819
|
-
def _calc_broadcast_shape(cond_shape, x_shape, y_shape):
|
|
1820
|
-
"""Calculate broadcast shape for select"""
|
|
1821
|
-
converted_shape = []
|
|
1822
|
-
cond_reverse = cond_shape[::-1]
|
|
1823
|
-
x_reverse = x_shape[::-1]
|
|
1824
|
-
y_reverse = y_shape[::-1]
|
|
1825
|
-
max_len = get_max_value(len(cond_reverse), len(x_reverse), len(y_reverse))
|
|
1826
|
-
i = 0
|
|
1827
|
-
while i < max_len:
|
|
1828
|
-
cond_element = 1 if i >= len(cond_reverse) else cond_reverse[i]
|
|
1829
|
-
x_element = 1 if i >= len(x_reverse) else x_reverse[i]
|
|
1830
|
-
y_element = 1 if i >= len(y_reverse) else y_reverse[i]
|
|
1831
|
-
broadcast_element = get_max_value(cond_element, x_element, y_element)
|
|
1832
|
-
if cond_element not in (1, broadcast_element):
|
|
1833
|
-
raise ValueError(f"For select, condition input can not broadcast at index {i}")
|
|
1834
|
-
if x_element not in (1, broadcast_element):
|
|
1835
|
-
raise ValueError(f"For select, x input can not broadcast at index {i}")
|
|
1836
|
-
if y_element not in (1, broadcast_element):
|
|
1837
|
-
raise ValueError(f"For select, y input can not broadcast at index {i}")
|
|
1838
|
-
converted_shape.append(broadcast_element)
|
|
1839
|
-
i = i + 1
|
|
1840
|
-
converted_shape.reverse()
|
|
1841
|
-
return tuple(converted_shape)
|
|
1842
|
-
|
|
1843
|
-
|
|
1844
|
-
def select(cond, x, y):
|
|
1845
|
-
r"""
|
|
1846
|
-
The conditional tensor determines whether the corresponding element in the output must be
|
|
1847
|
-
selected from `x` (if true) or `y` (if false) based on the value of each element.
|
|
1848
|
-
|
|
1849
|
-
It can be defined as:
|
|
1850
|
-
|
|
1851
|
-
.. math::
|
|
1852
|
-
out_i = \begin{cases}
|
|
1853
|
-
x_i, & \text{if } cond_i \\
|
|
1854
|
-
y_i, & \text{otherwise}
|
|
1855
|
-
\end{cases}
|
|
1856
|
-
|
|
1857
|
-
Args:
|
|
1858
|
-
cond (Tensor[bool]): The condition tensor, decides which element is chosen.
|
|
1859
|
-
The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
|
|
1860
|
-
x (Union[Tensor, int, float]): The first Tensor or number to be selected.
|
|
1861
|
-
If x is a Tensor, the shape is or can be broadcadt to :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
|
|
1862
|
-
If x is an int or a float, it will be cast to the type of int32 or float32,
|
|
1863
|
-
and broadcast to the same shape as y. One of x and y must be a Tensor.
|
|
1864
|
-
y (Union[Tensor, int, float]): The second Tensor or number to be selected.
|
|
1865
|
-
If y is a Tensor, The shape is or can be broadcadt to :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
|
|
1866
|
-
If y is an int or a float, it will be cast to the type of int32 or float32,
|
|
1867
|
-
and broadcast to the same shape as x. One of x and y must be a Tensor.
|
|
1868
|
-
|
|
1869
|
-
Returns:
|
|
1870
|
-
Tensor, has the same shape as `cond`.
|
|
1871
|
-
|
|
1872
|
-
Raises:
|
|
1873
|
-
TypeError: If `x` or `y` is not a Tensor, int or float.
|
|
1874
|
-
ValueError: The shapes of inputs can not be broadcast.
|
|
1875
|
-
|
|
1876
|
-
Supported Platforms:
|
|
1877
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1878
|
-
|
|
1879
|
-
Examples:
|
|
1880
|
-
>>> import mindspore
|
|
1881
|
-
>>> from mindspore import Tensor, ops
|
|
1882
|
-
>>> # 1) Both inputs are Tensor
|
|
1883
|
-
>>>
|
|
1884
|
-
>>> cond = Tensor([True, False])
|
|
1885
|
-
>>> x = Tensor([2,3], mindspore.float32)
|
|
1886
|
-
>>> y = Tensor([1,2], mindspore.float32)
|
|
1887
|
-
>>> output = ops.select(cond, x, y)
|
|
1888
|
-
>>> print(output)
|
|
1889
|
-
[2. 2.]
|
|
1890
|
-
>>> # 2) y is a float
|
|
1891
|
-
>>> cond = Tensor([True, False])
|
|
1892
|
-
>>> x = Tensor([2,3], mindspore.float32)
|
|
1893
|
-
>>> y = 2.0
|
|
1894
|
-
>>> output = ops.select(cond, x, y)
|
|
1895
|
-
>>> print(output)
|
|
1896
|
-
[2. 2.]
|
|
1897
|
-
"""
|
|
1898
|
-
is_x_scalar = isinstance(x, (int, float))
|
|
1899
|
-
is_y_scalar = isinstance(y, (int, float))
|
|
1900
|
-
is_x_tensor = isinstance(x, Tensor)
|
|
1901
|
-
is_y_tensor = isinstance(y, Tensor)
|
|
1902
|
-
is_cond_tensor = isinstance(cond, Tensor)
|
|
1903
|
-
_check_select_type(is_cond_tensor, is_x_scalar, is_y_scalar, is_x_tensor, is_y_tensor)
|
|
1904
|
-
input_x = x
|
|
1905
|
-
input_y = y
|
|
1906
|
-
if is_x_scalar:
|
|
1907
|
-
_check_select_shape_match(y.shape, cond.shape, "y")
|
|
1908
|
-
_check_select_type_match(x, y.dtype, "x", "y")
|
|
1909
|
-
input_x = zeros_like_(y) + x
|
|
1910
|
-
if isinstance(x, int):
|
|
1911
|
-
input_x = cast_(input_x, mstype.int32)
|
|
1912
|
-
else:
|
|
1913
|
-
input_x = cast_(input_x, mstype.float32)
|
|
1914
|
-
|
|
1915
|
-
if is_y_scalar:
|
|
1916
|
-
_check_select_shape_match(x.shape, cond.shape, "x")
|
|
1917
|
-
_check_select_type_match(y, x.dtype, "y", "x")
|
|
1918
|
-
input_y = zeros_like_(x) + y
|
|
1919
|
-
if isinstance(y, int):
|
|
1920
|
-
input_y = cast_(input_y, mstype.int32)
|
|
1921
|
-
else:
|
|
1922
|
-
input_y = cast_(input_y, mstype.float32)
|
|
1923
|
-
|
|
1924
|
-
if is_x_tensor and is_y_tensor and is_cond_tensor:
|
|
1925
|
-
x_shape = ops.shape(x)
|
|
1926
|
-
y_shape = ops.shape(y)
|
|
1927
|
-
cond_shape = ops.shape(cond)
|
|
1928
|
-
all_constant = ops.isconstant(cond_shape) and ops.isconstant(x_shape) and ops.isconstant(y_shape)
|
|
1929
|
-
if all_constant and not _check_select_shape_same(cond_shape, x_shape, y_shape):
|
|
1930
|
-
broadcast_shape = _calc_broadcast_shape(cond_shape, x_shape, y_shape)
|
|
1931
|
-
new_cond = ops.broadcast_to(cond, broadcast_shape)
|
|
1932
|
-
new_x = ops.broadcast_to(x, broadcast_shape)
|
|
1933
|
-
new_y = ops.broadcast_to(y, broadcast_shape)
|
|
1934
|
-
return tensor_select_(new_cond, new_x, new_y)
|
|
1935
|
-
|
|
1936
|
-
return tensor_select_(cond, input_x, input_y)
|
|
1937
|
-
|
|
1938
|
-
|
|
1939
|
-
def strided_slice(input_x,
|
|
1940
|
-
begin,
|
|
1941
|
-
end,
|
|
1942
|
-
strides,
|
|
1943
|
-
begin_mask=0,
|
|
1944
|
-
end_mask=0,
|
|
1945
|
-
ellipsis_mask=0,
|
|
1946
|
-
new_axis_mask=0,
|
|
1947
|
-
shrink_axis_mask=0):
|
|
1948
|
-
r"""
|
|
1949
|
-
Extracts a strided slice of a Tensor based on `begin/end` index and `strides`.
|
|
1950
|
-
|
|
1951
|
-
This operation extracts a fragment of size (end-begin)/strides from the given 'input_tensor'.
|
|
1952
|
-
Starting from the beginning position, the fragment continues adding strides to the index until
|
|
1953
|
-
all dimensions are not less than the ending position.
|
|
1954
|
-
|
|
1955
|
-
Note:
|
|
1956
|
-
- `begin` , `end` and `strides` must have the same shape.
|
|
1957
|
-
- `begin` , `end` and `strides` are all 1-D Tensor, and their shape size
|
|
1958
|
-
must not greater than the dim of `input_x`.
|
|
1959
|
-
|
|
1960
|
-
During the slicing process, the fragment (end-begin)/strides are extracted from each dimension.
|
|
1961
|
-
|
|
1962
|
-
Example: For Tensor `input_x` with shape :math:`(5, 6, 7)`,
|
|
1963
|
-
set `begin`, `end` and `strides` to (1, 3, 2), (3, 5, 6),
|
|
1964
|
-
(1, 1, 2) respectively, then elements from index 1 to 3 are extrected for dim 0, index 3 to 5
|
|
1965
|
-
are extrected for dim 1 and index 2 to 6 with a `stirded` of 2 are extrected for dim 2, this
|
|
1966
|
-
process is equivalent to a pythonic slice `input_x[1:3, 3:5, 2:6:2]`.
|
|
1967
|
-
|
|
1968
|
-
If the length of `begin` 、 `end` and `strides` is smaller than the dim of `input_x`,
|
|
1969
|
-
then all elements are extracted from the missing dims, it behaves like all the
|
|
1970
|
-
missing dims are filled with zeros, size of that missing dim and ones.
|
|
1971
|
-
|
|
1972
|
-
Example: For Tensor `input_x` with shape :math:`(5, 6, 7)`,
|
|
1973
|
-
set `begin`, `end` and `strides` to (1, 3),
|
|
1974
|
-
(3, 5), (1, 1) respectively, then elements from index 1 to 3 are extrected
|
|
1975
|
-
for dim 0, index 3 to 5 are extrected for dim 1 and index 3 to 5 are extrected
|
|
1976
|
-
for dim 2, this process is equivalent to a pythonic slice `input_x[1:3, 3:5, 0:7]`.
|
|
1977
|
-
|
|
1978
|
-
Here's how a mask works:
|
|
1979
|
-
For each specific mask, it will be converted to a binary representation internally, and then
|
|
1980
|
-
reverse the result to start the calculation. For Tensor `input_x` with
|
|
1981
|
-
shape :math:`(5, 6, 7)`. Given mask value of 3 which
|
|
1982
|
-
can be represented as 0b011. Reverse that we get 0b110, which implies the first and second dim of the
|
|
1983
|
-
original Tensor will be effected by this mask. See examples below, for simplicity all mask mentioned
|
|
1984
|
-
below are all in their reverted binary form:
|
|
1985
|
-
|
|
1986
|
-
- `begin_mask` and `end_mask`
|
|
1987
|
-
|
|
1988
|
-
If the ith bit of `begin_mask` is 1, `begin[i]` is ignored and the fullest
|
|
1989
|
-
possible range in that dimension is used instead. `end_mask` is analogous,
|
|
1990
|
-
except with the end range. For Tensor `input_x` with shape :math:`(5, 6, 7, 8)`, if `begin_mask`
|
|
1991
|
-
is 0b110, `end_mask` is 0b011, the slice `input_x[0:3, 0:6, 2:7:2]` is produced.
|
|
1992
|
-
|
|
1993
|
-
- `ellipsis_mask`
|
|
1994
|
-
|
|
1995
|
-
If the ith bit of `ellipsis_mask` is 1, as many unspecified dimensions as needed
|
|
1996
|
-
will be inserted between other dimensions. Only one non-zero bit is allowed
|
|
1997
|
-
in `ellipsis_mask`. For Tensor `input_x` with shape :math:`(5, 6, 7, 8)`, `input_x[2:,...,:6]`
|
|
1998
|
-
is equivalent to `input_x[2:5,:,:,0:6]` , `input_x[2:,...]` is equivalent
|
|
1999
|
-
to `input_x[2:5,:,:,:]`.
|
|
2000
|
-
|
|
2001
|
-
- `new_axis_mask`
|
|
2002
|
-
|
|
2003
|
-
If the ith bit of `new_axis_mask` is 1, `begin`, `end` and `strides` are
|
|
2004
|
-
ignored and a new length 1 dimension is added at the specified position
|
|
2005
|
-
in the output Tensor. For Tensor `input_x` with shape :math:`(5, 6, 7)`, if `new_axis_mask`
|
|
2006
|
-
is 0b110, a new dim is added to the second dim, which will produce
|
|
2007
|
-
a Tensor with shape :math:`(5, 1, 6, 7)`.
|
|
2008
|
-
|
|
2009
|
-
- `shrink_axis_mask`
|
|
2010
|
-
|
|
2011
|
-
If the ith bit of `shrink_axis_mask` is 1, `begin`, `end` and `strides`
|
|
2012
|
-
are ignored and dimension i will be shrunk to 0.
|
|
2013
|
-
For Tensor `input_x` with shape :math:`(5, 6, 7)`,
|
|
2014
|
-
if `shrink_axis_mask` is 0b010, it is equivalent to slice `x[:, 5, :]`
|
|
2015
|
-
and results in an output shape of :math:`(5, 7)`.
|
|
2016
|
-
|
|
2017
|
-
Note:
|
|
2018
|
-
`new_axis_mask` and `shrink_axis_mask` are not recommended to
|
|
2019
|
-
use at the same time, it might incur unexpected result.
|
|
2020
|
-
|
|
2021
|
-
Args:
|
|
2022
|
-
input_x (Tensor): The input Tensor to be extracted from.
|
|
2023
|
-
begin (tuple[int]): A tuple which represents the location where to start.
|
|
2024
|
-
end (tuple[int]): A tuple or which represents the maximum location where to end.
|
|
2025
|
-
strides (tuple[int]): A tuple which represents the strides is continuously added
|
|
2026
|
-
before reaching the maximum location. Only int is allowed, it can be negative
|
|
2027
|
-
which results in reversed slicing.
|
|
2028
|
-
begin_mask (int, optional): Starting index of the slice. Default: ``0`` .
|
|
2029
|
-
end_mask (int, optional): Ending index of the slice. Default: ``0`` .
|
|
2030
|
-
ellipsis_mask (int, optional): An int mask, ignore slicing operation when set to 1. Default: ``0`` .
|
|
2031
|
-
new_axis_mask (int, optional): An int mask for adding new dims. Default: ``0`` .
|
|
2032
|
-
shrink_axis_mask (int, optional): An int mask for shrinking dims. Default: ``0`` .
|
|
2033
|
-
|
|
2034
|
-
Returns:
|
|
2035
|
-
Tensor, return the extracts a strided slice of a Tensor based on `begin/end` index and `strides`.
|
|
2036
|
-
|
|
2037
|
-
Raises:
|
|
2038
|
-
TypeError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or
|
|
2039
|
-
`shrink_axis_mask` is not an int.
|
|
2040
|
-
TypeError: If `begin`, `end` or `strides` is not tuple[int].
|
|
2041
|
-
ValueError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or
|
|
2042
|
-
`shrink_axis_mask` is less than 0.
|
|
2043
|
-
ValueError: If `begin`, `end` and `strides` have different shapes.
|
|
2044
|
-
|
|
2045
|
-
Supported Platforms:
|
|
2046
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2047
|
-
|
|
2048
|
-
Examples:
|
|
2049
|
-
>>> import mindspore
|
|
2050
|
-
>>> from mindspore import Tensor, ops
|
|
2051
|
-
>>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
|
|
2052
|
-
... [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
|
|
2053
|
-
>>> output = ops.strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1))
|
|
2054
|
-
>>> # Take this " output = strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1)) " as an example,
|
|
2055
|
-
>>> # start = [1, 0, 2] , end = [3, 1, 3], strides = [1, 1, 1], Find a segment of (start, end),
|
|
2056
|
-
>>> # note that end is an open interval
|
|
2057
|
-
>>> # To facilitate understanding, this operator can be divided into three steps:
|
|
2058
|
-
>>> # Step 1: Calculation of the first dimension:
|
|
2059
|
-
>>> # start = 1, end = 3, strides = 1, So can take 1st, 2nd rows, and then gets the final output at this time.
|
|
2060
|
-
>>> # output_1th =
|
|
2061
|
-
>>> # [
|
|
2062
|
-
>>> # [
|
|
2063
|
-
>>> # [3,3,3]
|
|
2064
|
-
>>> # [4,4,4]
|
|
2065
|
-
>>> # ]
|
|
2066
|
-
>>> # [
|
|
2067
|
-
>>> # [5,5,5]
|
|
2068
|
-
>>> # [6,6,6]
|
|
2069
|
-
>>> # ]
|
|
2070
|
-
>>> # ]
|
|
2071
|
-
>>> # Step 2: Calculation of the second dimension
|
|
2072
|
-
>>> # 2nd dimension, start = 0, end = 1, strides = 1. So only 0th rows
|
|
2073
|
-
>>> # can be taken, and the output at this time.
|
|
2074
|
-
>>> # output_2nd =
|
|
2075
|
-
>>> # [
|
|
2076
|
-
>>> # [
|
|
2077
|
-
>>> # [3,3,3]
|
|
2078
|
-
>>> # ]
|
|
2079
|
-
>>> # [
|
|
2080
|
-
>>> # [5,5,5]
|
|
2081
|
-
>>> # ]
|
|
2082
|
-
>>> # ]
|
|
2083
|
-
>>> # Step 3: Calculation of the third dimension
|
|
2084
|
-
>>> # 3nd dimension,start = 2, end = 3, strides = 1, So can take 2th cols,
|
|
2085
|
-
>>> # and you get the final output at this time.
|
|
2086
|
-
>>> # output_3ed =
|
|
2087
|
-
>>> # [
|
|
2088
|
-
>>> # [
|
|
2089
|
-
>>> # [3]
|
|
2090
|
-
>>> # ]
|
|
2091
|
-
>>> # [
|
|
2092
|
-
>>> # [5]
|
|
2093
|
-
>>> # ]
|
|
2094
|
-
>>> # ]
|
|
2095
|
-
>>> # The final output after finishing is:
|
|
2096
|
-
>>> print(output)
|
|
2097
|
-
[[[3.]]
|
|
2098
|
-
[[5.]]]
|
|
2099
|
-
>>> # another example like :
|
|
2100
|
-
>>> output = strided_slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1))
|
|
2101
|
-
>>> print(output)
|
|
2102
|
-
[[[3. 3. 3.]]]
|
|
2103
|
-
"""
|
|
2104
|
-
strided_slice_ = _get_cache_prim(P.StridedSlice)(
|
|
2105
|
-
begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask)
|
|
2106
|
-
return strided_slice_(input_x, begin, end, strides)
|
|
2107
|
-
|
|
2108
|
-
|
|
2109
1645
|
def slice(input_x, begin, size):
|
|
2110
1646
|
r"""
|
|
2111
1647
|
Slices a tensor in the specified shape.
|
|
@@ -2160,20 +1696,6 @@ def slice(input_x, begin, size):
|
|
|
2160
1696
|
return tensor_slice(input_x, begin, size)
|
|
2161
1697
|
|
|
2162
1698
|
|
|
2163
|
-
def concat(tensors, axis=0):
|
|
2164
|
-
"""
|
|
2165
|
-
Alias for :func:`mindspore.ops.cat()`.
|
|
2166
|
-
|
|
2167
|
-
Tutorial Examples:
|
|
2168
|
-
- `Tensor - Tensor Operation <https://mindspore.cn/tutorials/en/r2.2/beginner/tensor.html#tensor-operation>`_
|
|
2169
|
-
- `Vision Transformer Image Classification - Building ViT as a whole
|
|
2170
|
-
<https://mindspore.cn/tutorials/application/en/r2.2/cv/vit.html#building-vit-as-a-whole>`_
|
|
2171
|
-
- `Sentiment Classification Implemented by RNN - Dense
|
|
2172
|
-
<https://mindspore.cn/tutorials/application/en/r2.2/nlp/sentiment_analysis.html#dense>`_
|
|
2173
|
-
"""
|
|
2174
|
-
return cat(tensors, axis)
|
|
2175
|
-
|
|
2176
|
-
|
|
2177
1699
|
def stack(tensors, axis=0):
|
|
2178
1700
|
r"""
|
|
2179
1701
|
Stacks a list of tensors in specified axis.
|
|
@@ -2284,45 +1806,6 @@ def unbind(input, dim=0):
|
|
|
2284
1806
|
return _unstack(input)
|
|
2285
1807
|
|
|
2286
1808
|
|
|
2287
|
-
def expand_dims(input_x, axis):
|
|
2288
|
-
"""
|
|
2289
|
-
Adds an additional dimension to `input_x` at the given axis, the dimension
|
|
2290
|
-
of `input_x` should be greater than or equal to 1.
|
|
2291
|
-
|
|
2292
|
-
Note:
|
|
2293
|
-
If the specified axis is a negative number, the index is counted
|
|
2294
|
-
backward from the end and starts at 1.
|
|
2295
|
-
|
|
2296
|
-
Args:
|
|
2297
|
-
input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
2298
|
-
axis (int): Specifies the dimension index at which to expand
|
|
2299
|
-
the shape of `input_x`. The value of axis must be in the range
|
|
2300
|
-
`[-input_x.ndim-1, input_x.ndim]`. Only constant value is allowed.
|
|
2301
|
-
|
|
2302
|
-
Returns:
|
|
2303
|
-
Tensor, the shape of tensor is :math:`(1, x_1, x_2, ..., x_R)` if the
|
|
2304
|
-
value of `axis` is 0. It has the same data type as `input_x`.
|
|
2305
|
-
|
|
2306
|
-
Raises:
|
|
2307
|
-
TypeError: If `axis` is not an int.
|
|
2308
|
-
ValueError: If `axis` is not in the valid range :math:`[-a.ndim-1, a.ndim]`.
|
|
2309
|
-
|
|
2310
|
-
Supported Platforms:
|
|
2311
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2312
|
-
|
|
2313
|
-
Examples:
|
|
2314
|
-
>>> import mindspore
|
|
2315
|
-
>>> import numpy as np
|
|
2316
|
-
>>> from mindspore import Tensor, ops
|
|
2317
|
-
>>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
|
|
2318
|
-
>>> output = ops.expand_dims(input_tensor, 0)
|
|
2319
|
-
>>> print(output)
|
|
2320
|
-
[[[2. 2.]
|
|
2321
|
-
[2. 2.]]]
|
|
2322
|
-
"""
|
|
2323
|
-
return expand_dims_(input_x, axis)
|
|
2324
|
-
|
|
2325
|
-
|
|
2326
1809
|
def unsqueeze(input, dim):
|
|
2327
1810
|
"""
|
|
2328
1811
|
Adds an additional dimension to `input` at the given dim.
|
|
@@ -2354,7 +1837,7 @@ def unsqueeze(input, dim):
|
|
|
2354
1837
|
[[[2. 2.]
|
|
2355
1838
|
[2. 2.]]]
|
|
2356
1839
|
"""
|
|
2357
|
-
return
|
|
1840
|
+
return expand_dims(input, dim)
|
|
2358
1841
|
|
|
2359
1842
|
|
|
2360
1843
|
def squeeze(input, axis=None):
|
|
@@ -2411,57 +1894,6 @@ def squeeze(input, axis=None):
|
|
|
2411
1894
|
return squeeze_(input)
|
|
2412
1895
|
|
|
2413
1896
|
|
|
2414
|
-
def transpose(input, input_perm):
|
|
2415
|
-
"""
|
|
2416
|
-
Permutes the dimensions of the input tensor according to input permutation.
|
|
2417
|
-
|
|
2418
|
-
For a 1-D array this has no effect, as a transposed vector is simply the same vector.
|
|
2419
|
-
To convert a 1-D array into a 2D column vector please refer the class: mindspore.ops.ExpandDims.
|
|
2420
|
-
For a 2-D array, this is a standard matrix transpose. For an n-D array, if axes are given,
|
|
2421
|
-
their order indicates how the axes are permuted (see Examples).
|
|
2422
|
-
If axes are not provided and a.shape is :math:`(i[0], i[1], ... i[n-2], i[n-1])`,
|
|
2423
|
-
then a.transpose().shape is :math:`(i[n-1], i[n-2], ... i[1], i[0])`.
|
|
2424
|
-
|
|
2425
|
-
Note:
|
|
2426
|
-
On GPU and CPU, if the value of `input_perm` is negative, its actual value is `input_perm[i] + rank(input)`.
|
|
2427
|
-
Negative value of `input_perm` is not supported on Ascend.
|
|
2428
|
-
|
|
2429
|
-
Args:
|
|
2430
|
-
input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
2431
|
-
input_perm (tuple[int]): The permutation to be converted. The elements in `input_perm` are composed of
|
|
2432
|
-
the indexes of each dimension of `input`. The length of `input_perm` and the shape of `input` must be
|
|
2433
|
-
the same. Only constant value is allowed. Must be in the range [-rank(input), rank(input)).
|
|
2434
|
-
|
|
2435
|
-
Returns:
|
|
2436
|
-
Tensor, the type of output tensor is the same as `input` and the shape of output tensor is decided by the
|
|
2437
|
-
shape of `input` and the value of `input_perm`.
|
|
2438
|
-
|
|
2439
|
-
Raises:
|
|
2440
|
-
TypeError: If `input_perm` is not a tuple.
|
|
2441
|
-
ValueError: If length of shape of `input` is not equal to length of shape of `input_perm`.
|
|
2442
|
-
ValueError: If the same element exists in `input_perm`.
|
|
2443
|
-
|
|
2444
|
-
Supported Platforms:
|
|
2445
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2446
|
-
|
|
2447
|
-
Examples:
|
|
2448
|
-
>>> import mindspore
|
|
2449
|
-
>>> import numpy as np
|
|
2450
|
-
>>> from mindspore import Tensor, ops
|
|
2451
|
-
>>> input = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32)
|
|
2452
|
-
>>> input_perm = (0, 2, 1)
|
|
2453
|
-
>>> output = ops.transpose(input, input_perm)
|
|
2454
|
-
>>> print(output)
|
|
2455
|
-
[[[ 1. 4.]
|
|
2456
|
-
[ 2. 5.]
|
|
2457
|
-
[ 3. 6.]]
|
|
2458
|
-
[[ 7. 10.]
|
|
2459
|
-
[ 8. 11.]
|
|
2460
|
-
[ 9. 12.]]]
|
|
2461
|
-
"""
|
|
2462
|
-
return transpose_(input, input_perm)
|
|
2463
|
-
|
|
2464
|
-
|
|
2465
1897
|
def scatter_mul(input_x, indices, updates):
|
|
2466
1898
|
r"""
|
|
2467
1899
|
Using given values to update tensor value through the mul operation, along with the input indices.
|
|
@@ -2792,111 +2224,6 @@ def scatter_div(input_x, indices, updates):
|
|
|
2792
2224
|
return scatter_div_(input_x, indices, updates)
|
|
2793
2225
|
|
|
2794
2226
|
|
|
2795
|
-
def scatter_nd(indices, updates, shape):
|
|
2796
|
-
r"""
|
|
2797
|
-
Scatters a tensor into a new tensor depending on the specified indices.
|
|
2798
|
-
|
|
2799
|
-
Creates an empty tensor with the given `shape`, and set values by scattering the update tensor
|
|
2800
|
-
depending on indices. The empty tensor has rank :math:`P` and `indices` has rank :math:`Q`.
|
|
2801
|
-
|
|
2802
|
-
The `shape` is :math:`(s_0, s_1, ..., s_{P-1})`, where :math:`P \ge 1`.
|
|
2803
|
-
|
|
2804
|
-
`indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)`, where :math:`Q \ge 2` and :math:`N \le P`.
|
|
2805
|
-
|
|
2806
|
-
The last dimension of `indices` (with length :math:`N` ) indicates slices along the :math:`N` th dimension of the
|
|
2807
|
-
empty tensor.
|
|
2808
|
-
|
|
2809
|
-
`updates` is a tensor of rank :math:`Q-1+P-N`, and
|
|
2810
|
-
its shape is :math:`(i_0, i_1, ..., i_{Q-2}, s_N, s_{N+1}, ..., s_{P-1})`.
|
|
2811
|
-
|
|
2812
|
-
If `indices` contains duplicates, the duplicate `updates` are summed.
|
|
2813
|
-
|
|
2814
|
-
The following figure shows the calculation process of inserting two new value matrices into the first dimension
|
|
2815
|
-
with rank-3:
|
|
2816
|
-
|
|
2817
|
-
.. image:: ScatterNd.png
|
|
2818
|
-
|
|
2819
|
-
Args:
|
|
2820
|
-
indices (Tensor): Define the index of scattering in the new tensor with int32 or int64 data type.
|
|
2821
|
-
The rank of `indices` must be at least 2 and `indices.shape[-1] <= len(shape)`.
|
|
2822
|
-
updates (Tensor): Define the source Tensor to be updated.
|
|
2823
|
-
It has shape `indices.shape[:-1] + shape[indices.shape[-1]:]`.
|
|
2824
|
-
shape (tuple[int]): Define the shape of the output tensor, has the same data type as indices.
|
|
2825
|
-
`shape` can not be empty, and the elements in `shape` must be greater than or equal to 1.
|
|
2826
|
-
|
|
2827
|
-
Returns:
|
|
2828
|
-
Tensor, the new tensor, has the same type as `update` and the same shape as `shape`.
|
|
2829
|
-
|
|
2830
|
-
Raises:
|
|
2831
|
-
TypeError: If `shape` is not a tuple.
|
|
2832
|
-
ValueError: If any element of `shape` is less than 1.
|
|
2833
|
-
|
|
2834
|
-
Supported Platforms:
|
|
2835
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2836
|
-
|
|
2837
|
-
Examples:
|
|
2838
|
-
>>> import mindspore
|
|
2839
|
-
>>> import numpy as np
|
|
2840
|
-
>>> from mindspore import Tensor, ops
|
|
2841
|
-
>>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
|
|
2842
|
-
>>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2],
|
|
2843
|
-
... [3, 3, 3, 3], [4, 4, 4, 4]],
|
|
2844
|
-
... [[1, 1, 1, 1], [2, 2, 2, 2],
|
|
2845
|
-
... [3, 3, 3, 3], [4, 4, 4, 4]]]), mindspore.float32)
|
|
2846
|
-
>>> shape = (4, 4, 4)
|
|
2847
|
-
>>> output = ops.scatter_nd(indices, updates, shape)
|
|
2848
|
-
>>> print(output)
|
|
2849
|
-
[[[1. 1. 1. 1.]
|
|
2850
|
-
[2. 2. 2. 2.]
|
|
2851
|
-
[3. 3. 3. 3.]
|
|
2852
|
-
[4. 4. 4. 4.]]
|
|
2853
|
-
[[0. 0. 0. 0.]
|
|
2854
|
-
[0. 0. 0. 0.]
|
|
2855
|
-
[0. 0. 0. 0.]
|
|
2856
|
-
[0. 0. 0. 0.]]
|
|
2857
|
-
[[1. 1. 1. 1.]
|
|
2858
|
-
[2. 2. 2. 2.]
|
|
2859
|
-
[3. 3. 3. 3.]
|
|
2860
|
-
[4. 4. 4. 4.]]
|
|
2861
|
-
[[0. 0. 0. 0.]
|
|
2862
|
-
[0. 0. 0. 0.]
|
|
2863
|
-
[0. 0. 0. 0.]
|
|
2864
|
-
[0. 0. 0. 0.]]]
|
|
2865
|
-
>>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)
|
|
2866
|
-
>>> updates = Tensor(np.array([3.2, 1.1]), mindspore.float32)
|
|
2867
|
-
>>> shape = (3, 3)
|
|
2868
|
-
>>> output = ops.scatter_nd(indices, updates, shape)
|
|
2869
|
-
>>> # In order to facilitate understanding, explain the operator pseudo-operation process step by step:
|
|
2870
|
-
>>> # Step 1: Generate an empty Tensor of the specified shape according to the shape
|
|
2871
|
-
>>> # [
|
|
2872
|
-
>>> # [0. 0. 0.]
|
|
2873
|
-
>>> # [0. 0. 0.]
|
|
2874
|
-
>>> # [0. 0. 0.]
|
|
2875
|
-
>>> # ]
|
|
2876
|
-
>>> # Step 2: Modify the data at the specified location according to the indicators
|
|
2877
|
-
>>> # 0th row of indices is [0, 1], 0th row of updates is 3.2.
|
|
2878
|
-
>>> # means that the empty tensor in the 0th row and 1st col set to 3.2
|
|
2879
|
-
>>> # [
|
|
2880
|
-
>>> # [0. 3.2. 0.]
|
|
2881
|
-
>>> # [0. 0. 0.]
|
|
2882
|
-
>>> # [0. 0. 0.]
|
|
2883
|
-
>>> # ]
|
|
2884
|
-
>>> # 1th row of indices is [1, 1], 1th row of updates is 1.1.
|
|
2885
|
-
>>> # means that the empty tensor in the 1th row and 1st col set to 1.1
|
|
2886
|
-
>>> # [
|
|
2887
|
-
>>> # [0. 3.2. 0.]
|
|
2888
|
-
>>> # [0. 1.1 0.]
|
|
2889
|
-
>>> # [0. 0. 0.]
|
|
2890
|
-
>>> # ]
|
|
2891
|
-
>>> # The final result is as follows:
|
|
2892
|
-
>>> print(output)
|
|
2893
|
-
[[0. 3.2 0.]
|
|
2894
|
-
[0. 1.1 0.]
|
|
2895
|
-
[0. 0. 0.]]
|
|
2896
|
-
"""
|
|
2897
|
-
return scatter_nd_(indices, updates, shape)
|
|
2898
|
-
|
|
2899
|
-
|
|
2900
2227
|
def scatter_update(input_x, indices, updates):
|
|
2901
2228
|
r"""
|
|
2902
2229
|
Updates tensor values by using input indices and value.
|
|
@@ -2946,8 +2273,7 @@ def scatter_update(input_x, indices, updates):
|
|
|
2946
2273
|
[[2. 1.2 1.]
|
|
2947
2274
|
[3. 1.2 1.]]
|
|
2948
2275
|
"""
|
|
2949
|
-
|
|
2950
|
-
return scatter_update_inner(input_x, indices, updates)
|
|
2276
|
+
return scatter_update_(input_x, indices, updates)
|
|
2951
2277
|
|
|
2952
2278
|
|
|
2953
2279
|
def scatter_nd_add(input_x, indices, updates, use_locking=False):
|
|
@@ -3414,8 +2740,8 @@ def sort(input_x, axis=-1, descending=False):
|
|
|
3414
2740
|
are sorted in descending order, or else sorted in ascending order. Default: ``False`` .
|
|
3415
2741
|
|
|
3416
2742
|
.. warning::
|
|
3417
|
-
Currently, the data types of
|
|
3418
|
-
If use
|
|
2743
|
+
Currently, the data types of float16, uint8, int8, int16, int32, int64 are well supported.
|
|
2744
|
+
If use float32, it may cause loss of accuracy.
|
|
3419
2745
|
|
|
3420
2746
|
Returns:
|
|
3421
2747
|
|
|
@@ -3438,143 +2764,33 @@ def sort(input_x, axis=-1, descending=False):
|
|
|
3438
2764
|
>>> from mindspore import Tensor, ops
|
|
3439
2765
|
>>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
|
|
3440
2766
|
>>> output = ops.sort(x)
|
|
3441
|
-
>>> # The output below is based on the Ascend platform.
|
|
3442
|
-
>>> print(output)
|
|
3443
|
-
(Tensor(shape=[3, 3], dtype=Float16, value=
|
|
3444
|
-
[[ 1.0000e+00, 2.0000e+00, 8.0000e+00],
|
|
3445
|
-
[ 3.0000e+00, 5.0000e+00, 9.0000e+00],
|
|
3446
|
-
[ 4.0000e+00, 6.0000e+00, 7.0000e+00]]), Tensor(shape=[3, 3], dtype=Int32, value=
|
|
3447
|
-
[[2, 1, 0],
|
|
3448
|
-
[2, 0, 1],
|
|
3449
|
-
[0, 1, 2]]))
|
|
3450
|
-
"""
|
|
3451
|
-
_sort = _get_cache_prim(P.Sort)(axis, descending)
|
|
3452
|
-
return _sort(input_x)
|
|
3453
|
-
|
|
3454
|
-
|
|
3455
|
-
def argsort(input, axis=-1, descending=False):
|
|
3456
|
-
r"""
|
|
3457
|
-
Sorts the input tensor along the given dimension in specified order and return the sorted indices.
|
|
3458
|
-
|
|
3459
|
-
Args:
|
|
3460
|
-
input(Tensor): The input tensor to sort.
|
|
3461
|
-
axis (int): The axis to sort along. Default: ``-1`` , means the last dimension.
|
|
3462
|
-
The Ascend backend only supports sorting the last dimension.
|
|
3463
|
-
descending (bool): The sort order. If `descending` is True then the elements
|
|
3464
|
-
are sorted in descending order by value. Otherwise sort in ascending order. Default: ``False`` .
|
|
3465
|
-
|
|
3466
|
-
Returns:
|
|
3467
|
-
Tensor, the indices of sorted input tensor. Data type is int32.
|
|
3468
|
-
|
|
3469
|
-
Supported Platforms:
|
|
3470
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3471
|
-
|
|
3472
|
-
Examples:
|
|
3473
|
-
>>> import mindspore
|
|
3474
|
-
>>> import numpy as np
|
|
3475
|
-
>>> from mindspore import Tensor, ops
|
|
3476
|
-
>>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
|
|
3477
|
-
>>> sort = ops.argsort(x)
|
|
3478
|
-
>>> print(sort)
|
|
3479
|
-
[[2 1 0]
|
|
3480
|
-
[2 0 1]
|
|
3481
|
-
[0 1 2]]
|
|
3482
|
-
"""
|
|
3483
|
-
_sort = _get_cache_prim(P.Sort)(axis, descending)
|
|
3484
|
-
_, arg_sort = _sort(input)
|
|
3485
|
-
return arg_sort
|
|
3486
|
-
|
|
3487
|
-
|
|
3488
|
-
def gather(input_params, input_indices, axis, batch_dims=0):
|
|
3489
|
-
r"""
|
|
3490
|
-
Returns the slice of the input tensor corresponding to the elements of `input_indices` on the specified `axis`.
|
|
3491
|
-
|
|
3492
|
-
The following figure shows the calculation process of Gather commonly:
|
|
3493
|
-
|
|
3494
|
-
.. image:: Gather.png
|
|
3495
|
-
|
|
3496
|
-
where params represents the input `input_params`, and indices represents the index to be sliced `input_indices`.
|
|
3497
|
-
|
|
3498
|
-
.. note::
|
|
3499
|
-
1. The value of input_indices must be in the range of `[0, input_param.shape[axis])`.
|
|
3500
|
-
On CPU and GPU, an error is raised if an out of bound indice is found. On Ascend, the results may be
|
|
3501
|
-
undefined.
|
|
3502
|
-
|
|
3503
|
-
2. The data type of input_params cannot be
|
|
3504
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ on Ascend
|
|
3505
|
-
platform currently.
|
|
3506
|
-
|
|
3507
|
-
Args:
|
|
3508
|
-
input_params (Tensor): The original Tensor. The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
3509
|
-
input_indices (Tensor): Index tensor to be sliced, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
|
|
3510
|
-
Specifies the indices of elements of the original Tensor. The data type can be int32 or int64.
|
|
3511
|
-
axis (Union(int, Tensor[int])): Specifies the dimension index to gather indices.
|
|
3512
|
-
It must be greater than or equal to `batch_dims`.
|
|
3513
|
-
When `axis` is a Tensor, the size must be 1.
|
|
3514
|
-
batch_dims (int): Specifies the number of batch dimensions. It must be less than or euqal to the rank
|
|
3515
|
-
of `input_indices`. Default: ``0`` .
|
|
3516
|
-
|
|
3517
|
-
Returns:
|
|
3518
|
-
Tensor, the shape of tensor is
|
|
3519
|
-
:math:`input\_params.shape[:axis] + input\_indices.shape[batch\_dims:] + input\_params.shape[axis + 1:]`.
|
|
3520
|
-
|
|
3521
|
-
Raises:
|
|
3522
|
-
TypeError: If `axis` is not an int or Tensor.
|
|
3523
|
-
ValueError: If `axis` is a Tensor and its size is not 1.
|
|
3524
|
-
TypeError: If `input_params` is not a tensor.
|
|
3525
|
-
TypeError: If `input_indices` is not a tensor of type int.
|
|
3526
|
-
RuntimeError: If `input_indices` is out of range `[0, input_param.shape[axis])` on CPU or GPU.
|
|
3527
|
-
|
|
3528
|
-
Supported Platforms:
|
|
3529
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3530
|
-
|
|
3531
|
-
Examples:
|
|
3532
|
-
>>> import mindspore
|
|
3533
|
-
>>> import numpy as np
|
|
3534
|
-
>>> from mindspore import Tensor, ops
|
|
3535
|
-
>>> # case1: input_indices is a Tensor with shape (5, ).
|
|
3536
|
-
>>> input_params = Tensor(np.array([1, 2, 3, 4, 5, 6, 7]), mindspore.float32)
|
|
3537
|
-
>>> input_indices = Tensor(np.array([0, 2, 4, 2, 6]), mindspore.int32)
|
|
3538
|
-
>>> axis = 0
|
|
3539
|
-
>>> output = ops.gather(input_params, input_indices, axis)
|
|
3540
|
-
>>> print(output)
|
|
3541
|
-
[1. 3. 5. 3. 7.]
|
|
3542
|
-
>>> # case2: input_indices is a Tensor with shape (2, 2). When the input_params has one dimension,
|
|
3543
|
-
>>> # the output shape is equal to the input_indices shape.
|
|
3544
|
-
>>> input_indices = Tensor(np.array([[0, 2], [2, 6]]), mindspore.int32)
|
|
3545
|
-
>>> axis = 0
|
|
3546
|
-
>>> output = ops.gather(input_params, input_indices, axis)
|
|
3547
|
-
>>> print(output)
|
|
3548
|
-
[[1. 3.]
|
|
3549
|
-
[3. 7.]]
|
|
3550
|
-
>>> # case3: input_indices is a Tensor with shape (2, ) and
|
|
3551
|
-
>>> # input_params is a Tensor with shape (3, 4) and axis is 0.
|
|
3552
|
-
>>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
|
|
3553
|
-
>>> input_indices = Tensor(np.array([0, 2]), mindspore.int32)
|
|
3554
|
-
>>> axis = 0
|
|
3555
|
-
>>> output = ops.gather(input_params, input_indices, axis)
|
|
3556
|
-
>>> print(output)
|
|
3557
|
-
[[ 1. 2. 3. 4.]
|
|
3558
|
-
[ 9. 10. 11. 12.]]
|
|
3559
|
-
>>> # case4: input_indices is a Tensor with shape (2, ) and
|
|
3560
|
-
>>> # input_params is a Tensor with shape (3, 4) and axis is 1, batch_dims is 1.
|
|
3561
|
-
>>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
|
|
3562
|
-
>>> input_indices = Tensor(np.array([0, 2, 1]), mindspore.int32)
|
|
3563
|
-
>>> axis = 1
|
|
3564
|
-
>>> batch_dims = 1
|
|
3565
|
-
>>> output = ops.gather(input_params, input_indices, axis, batch_dims)
|
|
2767
|
+
>>> # The output below is based on the Ascend platform.
|
|
3566
2768
|
>>> print(output)
|
|
3567
|
-
[
|
|
2769
|
+
(Tensor(shape=[3, 3], dtype=Float16, value=
|
|
2770
|
+
[[ 1.0000e+00, 2.0000e+00, 8.0000e+00],
|
|
2771
|
+
[ 3.0000e+00, 5.0000e+00, 9.0000e+00],
|
|
2772
|
+
[ 4.0000e+00, 6.0000e+00, 7.0000e+00]]), Tensor(shape=[3, 3], dtype=Int32, value=
|
|
2773
|
+
[[2, 1, 0],
|
|
2774
|
+
[2, 0, 1],
|
|
2775
|
+
[0, 1, 2]]))
|
|
3568
2776
|
"""
|
|
3569
|
-
|
|
3570
|
-
return
|
|
2777
|
+
_sort = _get_cache_prim(P.Sort)(axis, descending)
|
|
2778
|
+
return _sort(input_x)
|
|
3571
2779
|
|
|
3572
2780
|
|
|
3573
|
-
def
|
|
3574
|
-
"""
|
|
3575
|
-
|
|
2781
|
+
def argsort(input, axis=-1, descending=False):
|
|
2782
|
+
r"""
|
|
2783
|
+
Sorts the input tensor along the given dimension in specified order and return the sorted indices.
|
|
2784
|
+
|
|
2785
|
+
Args:
|
|
2786
|
+
input(Tensor): The input tensor to sort.
|
|
2787
|
+
axis (int): The axis to sort along. Default: ``-1`` , means the last dimension.
|
|
2788
|
+
The Ascend backend only supports sorting the last dimension.
|
|
2789
|
+
descending (bool): The sort order. If `descending` is True then the elements
|
|
2790
|
+
are sorted in descending order by value. Otherwise sort in ascending order. Default: ``False`` .
|
|
3576
2791
|
|
|
3577
|
-
|
|
2792
|
+
Returns:
|
|
2793
|
+
Tensor, the indices of sorted input tensor. Data type is int32.
|
|
3578
2794
|
|
|
3579
2795
|
Supported Platforms:
|
|
3580
2796
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -3583,15 +2799,16 @@ def gather_d(x, dim, index):
|
|
|
3583
2799
|
>>> import mindspore
|
|
3584
2800
|
>>> import numpy as np
|
|
3585
2801
|
>>> from mindspore import Tensor, ops
|
|
3586
|
-
>>> x = Tensor(np.array([[
|
|
3587
|
-
>>>
|
|
3588
|
-
>>>
|
|
3589
|
-
|
|
3590
|
-
|
|
3591
|
-
|
|
3592
|
-
[4 3]]
|
|
2802
|
+
>>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
|
|
2803
|
+
>>> sort = ops.argsort(x)
|
|
2804
|
+
>>> print(sort)
|
|
2805
|
+
[[2 1 0]
|
|
2806
|
+
[2 0 1]
|
|
2807
|
+
[0 1 2]]
|
|
3593
2808
|
"""
|
|
3594
|
-
|
|
2809
|
+
_sort = _get_cache_prim(P.Sort)(axis, descending)
|
|
2810
|
+
_, arg_sort = _sort(input)
|
|
2811
|
+
return arg_sort
|
|
3595
2812
|
|
|
3596
2813
|
|
|
3597
2814
|
def gather_elements(input, dim, index):
|
|
@@ -3608,26 +2825,29 @@ def gather_elements(input, dim, index):
|
|
|
3608
2825
|
|
|
3609
2826
|
output[i][j][k] = x[i][j][index[i][j][k]] # if dim == 2
|
|
3610
2827
|
|
|
3611
|
-
`input` and `index` have the same length of dimensions, and
|
|
3612
|
-
|
|
3613
|
-
|
|
3614
|
-
|
|
2828
|
+
`input` and `index` have the same length of dimensions, and `index.shape[axis] <= input.shape[axis]`
|
|
2829
|
+
where axis goes through all dimensions of `input` except `dim`.
|
|
2830
|
+
|
|
2831
|
+
.. warning::
|
|
2832
|
+
On Ascend, the behavior is unpredictable in the following cases:
|
|
2833
|
+
|
|
2834
|
+
- the value of `index` is not in the range `[-input.shape[dim], input.shape[dim])` in forward;
|
|
2835
|
+
- the value of `index` is not in the range `[0, input.shape[dim])` in backward.
|
|
3615
2836
|
|
|
3616
2837
|
Args:
|
|
3617
2838
|
input (Tensor): The input tensor.
|
|
3618
|
-
dim (int): The axis along which to index. It must be int32 or int64. The value range is [-input.ndim,
|
|
3619
|
-
input.ndim)
|
|
2839
|
+
dim (int): The axis along which to index. It must be int32 or int64. The value range is `[-input.ndim,
|
|
2840
|
+
input.ndim)`.
|
|
3620
2841
|
index (Tensor): The indices of elements to gather. It can be one of the following data types:
|
|
3621
|
-
int32, int64. The value range of each index element is [-input.shape(dim), input.shape(dim))
|
|
2842
|
+
int32, int64. The value range of each index element is `[-input.shape(dim), input.shape(dim))`.
|
|
3622
2843
|
|
|
3623
2844
|
Returns:
|
|
3624
|
-
Tensor, has the same shape as index
|
|
3625
|
-
and has the same data type with `input`.
|
|
2845
|
+
Tensor, has the same shape as `index` and has the same data type with `input`.
|
|
3626
2846
|
|
|
3627
2847
|
Raises:
|
|
3628
2848
|
TypeError: If dtype of `dim` or `index` is neither int32 nor int64.
|
|
3629
2849
|
ValueError: If length of shape of `input` is not equal to length of shape of `index`.
|
|
3630
|
-
ValueError: If the size of the dimension except `dim` is
|
|
2850
|
+
ValueError: If the size of the dimension except `dim` in `input` is less than size in `index`.
|
|
3631
2851
|
ValueError: If the value of `dim` is not in the expected range.
|
|
3632
2852
|
|
|
3633
2853
|
Supported Platforms:
|
|
@@ -3648,48 +2868,6 @@ def gather_elements(input, dim, index):
|
|
|
3648
2868
|
return gather_d_(input, dim, index)
|
|
3649
2869
|
|
|
3650
2870
|
|
|
3651
|
-
def gather_nd(input_x, indices):
|
|
3652
|
-
r"""
|
|
3653
|
-
Gathers slices from a tensor by indices.
|
|
3654
|
-
|
|
3655
|
-
Using given indices to gather slices from a tensor with a specified shape.
|
|
3656
|
-
|
|
3657
|
-
`indices` is an K-dimensional integer tensor. Supposes it as a (K-1)-dimensional tensor and each element of it
|
|
3658
|
-
defines a slice of `input_x`:
|
|
3659
|
-
|
|
3660
|
-
.. math::
|
|
3661
|
-
output[(i_0, ..., i_{K-2})] = input\_x[indices[(i_0, ..., i_{K-2})]]
|
|
3662
|
-
|
|
3663
|
-
The last dimension of `indices` can not more than the rank of `input_x`:
|
|
3664
|
-
:math:`indices.shape[-1] <= input\_x.rank`.
|
|
3665
|
-
|
|
3666
|
-
Args:
|
|
3667
|
-
input_x (Tensor): The target tensor to gather values.
|
|
3668
|
-
indices (Tensor): The index tensor, with int32 or int64 data type.
|
|
3669
|
-
|
|
3670
|
-
Returns:
|
|
3671
|
-
Tensor, has the same type as `input_x` and the shape is
|
|
3672
|
-
:math:`indices\_shape[:-1] + input\_x\_shape[indices\_shape[-1]:]`.
|
|
3673
|
-
|
|
3674
|
-
Raises:
|
|
3675
|
-
ValueError: If length of shape of `input_x` is less than the last dimension of `indices`.
|
|
3676
|
-
|
|
3677
|
-
Supported Platforms:
|
|
3678
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3679
|
-
|
|
3680
|
-
Examples:
|
|
3681
|
-
>>> import mindspore
|
|
3682
|
-
>>> import numpy as np
|
|
3683
|
-
>>> from mindspore import Tensor, ops
|
|
3684
|
-
>>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
|
|
3685
|
-
>>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
|
|
3686
|
-
>>> output = ops.gather_nd(input_x, indices)
|
|
3687
|
-
>>> print(output)
|
|
3688
|
-
[-0.1 0.5]
|
|
3689
|
-
"""
|
|
3690
|
-
return gather_nd_(input_x, indices)
|
|
3691
|
-
|
|
3692
|
-
|
|
3693
2871
|
def tensor_scatter_add(input_x, indices, updates):
|
|
3694
2872
|
r"""
|
|
3695
2873
|
Creates a new tensor by adding the values from the positions in `input_x` indicated by
|
|
@@ -3700,7 +2878,7 @@ def tensor_scatter_add(input_x, indices, updates):
|
|
|
3700
2878
|
|
|
3701
2879
|
The last axis of `indices` is the depth of each index vectors. For each index vector,
|
|
3702
2880
|
there must be a corresponding value in `updates`. The shape of `updates` should be
|
|
3703
|
-
equal to the shape of `input_x[indices]`. For more details, see
|
|
2881
|
+
equal to the shape of `input_x[indices]`. For more details, see Examples.
|
|
3704
2882
|
|
|
3705
2883
|
.. math::
|
|
3706
2884
|
output\left [indices \right ] = input\_x + update
|
|
@@ -3758,7 +2936,7 @@ def tensor_scatter_sub(input_x, indices, updates):
|
|
|
3758
2936
|
|
|
3759
2937
|
The last axis of `indices` is the depth of each index vectors. For each index vector,
|
|
3760
2938
|
there must be a corresponding value in `updates`. The shape of `updates` should be
|
|
3761
|
-
equal to the shape of `input_x[indices]`. For more details, see
|
|
2939
|
+
equal to the shape of `input_x[indices]`. For more details, see Examples.
|
|
3762
2940
|
|
|
3763
2941
|
.. math::
|
|
3764
2942
|
output[indices] = input\_x - update
|
|
@@ -3943,14 +3121,12 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
|
|
|
3943
3121
|
nondeterministic.
|
|
3944
3122
|
- On Ascend, the reduction only support set to "none" for now.
|
|
3945
3123
|
- On Ascend, the data type of `input_x` must be float16 or float32.
|
|
3124
|
+
- This is an experimental API that is subject to change or deletion.
|
|
3946
3125
|
|
|
3947
3126
|
Note:
|
|
3948
3127
|
If some values of the `indices` exceed the upper or lower bounds of the index of `input_x`, instead of raising
|
|
3949
3128
|
an index error, the corresponding `updates` will not be updated to `input_x`.
|
|
3950
3129
|
|
|
3951
|
-
.. warning::
|
|
3952
|
-
This is an experimental API that is subject to change or deletion.
|
|
3953
|
-
|
|
3954
3130
|
Args:
|
|
3955
3131
|
input_x (Tensor): The target tensor. The rank must be at least 1.
|
|
3956
3132
|
indices (Tensor): The index of `input_x` to do scatter operation whose data type must be mindspore.int32 or
|
|
@@ -4065,6 +3241,66 @@ def scatter(input, axis, index, src):
|
|
|
4065
3241
|
return ops.tensor_scatter_elements(input_x=input, indices=index, updates=src, axis=axis)
|
|
4066
3242
|
|
|
4067
3243
|
|
|
3244
|
+
def scatter_add_ext(input, dim, index, src):
|
|
3245
|
+
"""
|
|
3246
|
+
Update the value in `src` to `input` according to the specified index.
|
|
3247
|
+
|
|
3248
|
+
Args:
|
|
3249
|
+
input (Tensor): The target tensor. The rank of `input` must be at least 1.
|
|
3250
|
+
dim (int): Which axis to scatter. Accepted range is [-r, r) where r = rank(input).
|
|
3251
|
+
index (Tensor): The index to do update operation whose data type must be mindspore.int32 or
|
|
3252
|
+
mindspore.int64. Same rank as `input` . And accepted range is [-s, s) where s is the size along axis.
|
|
3253
|
+
src (Tensor): The tensor doing the update operation with `input` , has the same type as `input` ,
|
|
3254
|
+
and the shape of `src` should be equal to the shape of `index` .
|
|
3255
|
+
|
|
3256
|
+
Returns:
|
|
3257
|
+
Tensor, has the same shape and type as `input` .
|
|
3258
|
+
|
|
3259
|
+
Raises:
|
|
3260
|
+
TypeError: If `index` is neither int32 nor int64.
|
|
3261
|
+
ValueError: If anyone of the rank among `input` , `index` and `src` less than 1.
|
|
3262
|
+
ValueError: If the shape of `src` is not equal to the shape of `index` .
|
|
3263
|
+
ValueError: If the rank of `src` is not equal to the rank of `input` .
|
|
3264
|
+
RuntimeError: If the data type of `input` and `src` conversion of Parameter
|
|
3265
|
+
is required when data type conversion of Parameter is not supported.
|
|
3266
|
+
|
|
3267
|
+
Supported Platforms:
|
|
3268
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
3269
|
+
|
|
3270
|
+
Examples:
|
|
3271
|
+
>>> import numpy as np
|
|
3272
|
+
>>> import mindspore as ms
|
|
3273
|
+
>>> from mindspore import Tensor, ops
|
|
3274
|
+
>>> input = Tensor(np.array([[1, 2, 3, 4, 5]]), dtype=ms.float32)
|
|
3275
|
+
>>> src = Tensor(np.array([[8, 8]]), dtype=ms.float32)
|
|
3276
|
+
>>> index = Tensor(np.array([[2, 4]]), dtype=ms.int64)
|
|
3277
|
+
>>> out = ops.scatter_add_ext(input=input, dim=1, index=index, src=src)
|
|
3278
|
+
>>> print(out)
|
|
3279
|
+
[[1. 2. 8. 4. 8.]]
|
|
3280
|
+
>>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
|
|
3281
|
+
>>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
|
|
3282
|
+
>>> index = Tensor(np.array([[0, 0, 0], [2, 2, 2], [4, 4, 4]]), dtype=ms.int64)
|
|
3283
|
+
>>> out = ops.scatter_add_ext(input=input, dim=0, index=index, src=src)
|
|
3284
|
+
>>> print(out)
|
|
3285
|
+
[[1. 2. 3. 0. 0.]
|
|
3286
|
+
[0. 0. 0. 0. 0.]
|
|
3287
|
+
[4. 5. 6. 0. 0.]
|
|
3288
|
+
[0. 0. 0. 0. 0.]
|
|
3289
|
+
[7. 8. 9. 0. 0.]]
|
|
3290
|
+
>>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
|
|
3291
|
+
>>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
|
|
3292
|
+
>>> index = Tensor(np.array([[0, 2, 4], [0, 2, 4], [0, 2, 4]]), dtype=ms.int64)
|
|
3293
|
+
>>> out = ops.scatter_add_ext(input=input, dim=1, index=index, src=src)
|
|
3294
|
+
>>> print(out)
|
|
3295
|
+
[[1. 0. 2. 0. 3.]
|
|
3296
|
+
[4. 0. 5. 0. 6.]
|
|
3297
|
+
[7. 0. 8. 0. 9.]
|
|
3298
|
+
[0. 0. 0. 0. 0.]
|
|
3299
|
+
[0. 0. 0. 0. 0.]]
|
|
3300
|
+
"""
|
|
3301
|
+
return scatter_add_ext_op(input, dim, index, src)
|
|
3302
|
+
|
|
3303
|
+
|
|
4068
3304
|
def _get_slice_scatter_const(x_shape, axis, start, end, step):
|
|
4069
3305
|
r"""
|
|
4070
3306
|
Calculate the rank of input, embedded dimensions and index.
|
|
@@ -4074,7 +3310,7 @@ def _get_slice_scatter_const(x_shape, axis, start, end, step):
|
|
|
4074
3310
|
start = start if start is not None else 0
|
|
4075
3311
|
start = start if start >= 0 else start + x_rank
|
|
4076
3312
|
end = end if end is not None else x_shape[axis]
|
|
4077
|
-
end = end if end >= 0 else end +
|
|
3313
|
+
end = end if end >= 0 else end + x_shape[axis]
|
|
4078
3314
|
end = end if end < x_shape[axis] else x_shape[axis]
|
|
4079
3315
|
index = list(builtins.range(start, end, step))
|
|
4080
3316
|
return x_rank, index, axis
|
|
@@ -4121,6 +3357,8 @@ def slice_scatter(input, src, axis=0, start=None, end=None, step=1):
|
|
|
4121
3357
|
[1. 0. 1. 0. 1. 0.]
|
|
4122
3358
|
[1. 0. 1. 0. 1. 0.]]
|
|
4123
3359
|
"""
|
|
3360
|
+
_check_is_tensor("input", input, "slice_scatter")
|
|
3361
|
+
_check_is_tensor("src", src, "slice_scatter")
|
|
4124
3362
|
input_shape = input.shape
|
|
4125
3363
|
input_rank, index, axis = _get_slice_scatter_const(input_shape, axis, start, end, step)
|
|
4126
3364
|
|
|
@@ -4136,6 +3374,8 @@ def slice_scatter(input, src, axis=0, start=None, end=None, step=1):
|
|
|
4136
3374
|
for _ in builtins.range(input_rank - axis - 1):
|
|
4137
3375
|
index_tensor = index_tensor.expand_dims(-1)
|
|
4138
3376
|
index_tensor = index_tensor.broadcast_to(src.shape)
|
|
3377
|
+
if index_tensor.dtype not in mstype.int_type:
|
|
3378
|
+
index_tensor = index_tensor.astype(mstype.int64)
|
|
4139
3379
|
return tensor_scatter_elements(input, axis=axis, indices=index_tensor, updates=src)
|
|
4140
3380
|
|
|
4141
3381
|
|
|
@@ -4174,10 +3414,12 @@ def select_scatter(input, src, axis, index):
|
|
|
4174
3414
|
[1. 1. 1.]
|
|
4175
3415
|
[0. 0. 0.]]]
|
|
4176
3416
|
"""
|
|
3417
|
+
_check_is_tensor("input", input, "select_scatter")
|
|
3418
|
+
_check_is_tensor("src", src, "select_scatter")
|
|
4177
3419
|
src = src.expand_dims(axis=axis)
|
|
4178
3420
|
x_rank = input.ndim
|
|
4179
3421
|
axis = axis if axis >= 0 else axis + x_rank
|
|
4180
|
-
index = index if index >= 0 else index +
|
|
3422
|
+
index = index if index >= 0 else index + input.shape[axis]
|
|
4181
3423
|
return slice_scatter(input, src, axis, start=index, end=index + 1)
|
|
4182
3424
|
|
|
4183
3425
|
|
|
@@ -4303,49 +3545,11 @@ def batch_to_space_nd(input_x, block_shape, crops):
|
|
|
4303
3545
|
[3. 4.]]]]
|
|
4304
3546
|
"""
|
|
4305
3547
|
if isinstance(block_shape, Tensor):
|
|
4306
|
-
|
|
4307
|
-
return _batch_to_space_ndv2(input_x, block_shape, crops)
|
|
3548
|
+
return batch_to_space_nd_v2_(input_x, block_shape, crops)
|
|
4308
3549
|
_batch_to_space_nd = _get_cache_prim(P.BatchToSpaceND)(block_shape, crops)
|
|
4309
3550
|
return _batch_to_space_nd(input_x)
|
|
4310
3551
|
|
|
4311
3552
|
|
|
4312
|
-
def nonzero(input):
|
|
4313
|
-
"""
|
|
4314
|
-
Return a Tensor of the positions of all non-zero values.
|
|
4315
|
-
|
|
4316
|
-
Args:
|
|
4317
|
-
input (Tensor): The input Tensor, its rank should be greater than or eaqual to 1.
|
|
4318
|
-
|
|
4319
|
-
Returns:
|
|
4320
|
-
Tensor, a 2-D Tensor whose data type is int64, containing the positions of all non-zero values of the input.
|
|
4321
|
-
|
|
4322
|
-
Raises:
|
|
4323
|
-
TypeError: If `input` is not Tensor.
|
|
4324
|
-
ValueError: If dim of `x` equals to 0.
|
|
4325
|
-
|
|
4326
|
-
Supported Platforms:
|
|
4327
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4328
|
-
|
|
4329
|
-
Examples:
|
|
4330
|
-
>>> import mindspore
|
|
4331
|
-
>>> import numpy as np
|
|
4332
|
-
>>> from mindspore import Tensor
|
|
4333
|
-
>>> import mindspore.ops as ops
|
|
4334
|
-
>>> x = Tensor(np.array([[[1, 0], [-5, 0]]]), mindspore.int32)
|
|
4335
|
-
>>> output = ops.nonzero(x)
|
|
4336
|
-
>>> print(output)
|
|
4337
|
-
[[0 0 0]
|
|
4338
|
-
[0 1 0]]
|
|
4339
|
-
>>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
|
|
4340
|
-
>>> output = ops.nonzero(x)
|
|
4341
|
-
>>> print(output)
|
|
4342
|
-
[[0]
|
|
4343
|
-
[2]
|
|
4344
|
-
[4]]
|
|
4345
|
-
"""
|
|
4346
|
-
return nonzero_(input)
|
|
4347
|
-
|
|
4348
|
-
|
|
4349
3553
|
def matrix_diag(x, k=0, num_rows=-1, num_cols=-1, padding_value=0, align="RIGHT_LEFT"):
|
|
4350
3554
|
r"""
|
|
4351
3555
|
Returns a Tensor with the contents in `x` as k[0]-th to k[1]-th diagonals of a matrix, with everything else padded
|
|
@@ -4605,18 +3809,19 @@ def meshgrid(*inputs, indexing='xy'):
|
|
|
4605
3809
|
|
|
4606
3810
|
Keyword Args:
|
|
4607
3811
|
indexing (str, optional): Cartesian ('xy', default) or
|
|
4608
|
-
matrix ('ij') indexing of output. Valid options: xy' or 'ij'
|
|
3812
|
+
matrix ('ij') indexing of output. Valid options: xy' or ``'ij'``. In the 2-D case with
|
|
4609
3813
|
inputs of length `M` and `N`, the outputs are of shape :math:`(N, M)`
|
|
4610
|
-
for 'xy' indexing and :math:`(M, N)` for 'ij' indexing. In the 3-D
|
|
3814
|
+
for ``'xy'`` indexing and :math:`(M, N)` for ``'ij'`` indexing. In the 3-D
|
|
4611
3815
|
case with inputs of length `M`, `N` and `P`, outputs are of shape
|
|
4612
|
-
:math:`(N, M, P)` for 'xy' indexing and :math:`(M, N, P)` for 'ij' indexing.
|
|
3816
|
+
:math:`(N, M, P)` for ``'xy'`` indexing and :math:`(M, N, P)` for ``'ij'`` indexing.
|
|
3817
|
+
Default: ``'xy'`` .
|
|
4613
3818
|
|
|
4614
3819
|
Returns:
|
|
4615
3820
|
Tensors, a Tuple of N N-D Tensor objects. The data type is the same with the Inputs.
|
|
4616
3821
|
|
|
4617
3822
|
Raises:
|
|
4618
3823
|
TypeError: If `indexing` is not a str or `inputs` is not a tuple.
|
|
4619
|
-
ValueError: If `indexing` is neither 'xy' nor 'ij'
|
|
3824
|
+
ValueError: If `indexing` is neither ``'xy'`` nor ``'ij'``.
|
|
4620
3825
|
|
|
4621
3826
|
Supported Platforms:
|
|
4622
3827
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4723,87 +3928,6 @@ def affine_grid(theta, size, align_corners=False):
|
|
|
4723
3928
|
return affine_grid_op(theta, size)
|
|
4724
3929
|
|
|
4725
3930
|
|
|
4726
|
-
def broadcast_to(input, shape): # pylint: disable=redefined-outer-name
|
|
4727
|
-
"""
|
|
4728
|
-
Broadcasts input tensor to a given shape. The dim of input shape must be smaller
|
|
4729
|
-
than or equal to that of target shape. Suppose input shape is :math:`(x_1, x_2, ..., x_m)`,
|
|
4730
|
-
target shape is :math:`(*, y_1, y_2, ..., y_m)`, where :math:`*` means any additional dimension.
|
|
4731
|
-
The broadcast rules are as follows:
|
|
4732
|
-
|
|
4733
|
-
Compare the value of :math:`x_m` and :math:`y_m`, :math:`x_{m-1}` and :math:`y_{m-1}`, ...,
|
|
4734
|
-
:math:`x_1` and :math:`y_1` consecutively and
|
|
4735
|
-
decide whether these shapes are broadcastable and what the broadcast result is.
|
|
4736
|
-
|
|
4737
|
-
If the value pairs at a specific dim are equal, then that value goes right into that dim of output shape.
|
|
4738
|
-
With an input shape :math:`(2, 3)`, target shape :math:`(2, 3)` , the inferred output shape is :math:`(2, 3)`.
|
|
4739
|
-
|
|
4740
|
-
If the value pairs are unequal, there are three cases:
|
|
4741
|
-
|
|
4742
|
-
Case 1: If the value of the target shape in the dimension is -1, the value of the
|
|
4743
|
-
output shape in the dimension is the value of the corresponding input shape in the dimension.
|
|
4744
|
-
With an input shape :math:`(3, 3)`, target
|
|
4745
|
-
shape :math:`(-1, 3)`, the output shape is :math:`(3, 3)`.
|
|
4746
|
-
|
|
4747
|
-
Case 2: If the value of target shape in the dimension is not -1, but the corresponding
|
|
4748
|
-
value in the input shape is 1, then the corresponding value of the output shape
|
|
4749
|
-
is that of the target shape. With an input shape :math:`(1, 3)`, target
|
|
4750
|
-
shape :math:`(8, 3)`, the output shape is :math:`(8, 3)`.
|
|
4751
|
-
|
|
4752
|
-
Case 3: If the corresponding values of the two shapes do not satisfy the above cases,
|
|
4753
|
-
it means that broadcasting from the input shape to the target shape is not supported.
|
|
4754
|
-
|
|
4755
|
-
So far we got the last m dims of the outshape, now focus on the first :math:`*` dims, there are
|
|
4756
|
-
two cases:
|
|
4757
|
-
|
|
4758
|
-
If the first :math:`*` dims of output shape does not have -1 in it, then fill the input
|
|
4759
|
-
shape with ones until their length are the same, and then refer to
|
|
4760
|
-
Case 2 mentioned above to calculate the output shape. With target shape :math:`(3, 1, 4, 1, 5, 9)`,
|
|
4761
|
-
input shape :math:`(1, 5, 9)`, the filled input shape will be :math:`(1, 1, 1, 1, 5, 9)` and thus the
|
|
4762
|
-
output shape is :math:`(3, 1, 4, 1, 5, 9)`.
|
|
4763
|
-
|
|
4764
|
-
If the first :math:`*` dims of output shape have -1 in it, it implies this -1 is corresponding to
|
|
4765
|
-
a non-existing dim so they're not broadcastable. With target shape :math:`(3, -1, 4, 1, 5, 9)`,
|
|
4766
|
-
input shape :math:`(1, 5, 9)`, instead of operating the dim-filling process first, it raises errors directly.
|
|
4767
|
-
|
|
4768
|
-
Args:
|
|
4769
|
-
input (Tensor): The input Tensor.
|
|
4770
|
-
shape (tuple): The target shape to broadcast. Can be fully specified, or have -1 in one position
|
|
4771
|
-
where it will be substituted by the input tensor's shape in that position, see example.
|
|
4772
|
-
|
|
4773
|
-
Returns:
|
|
4774
|
-
Tensor, with the given `shape` and the same data type as `input`.
|
|
4775
|
-
|
|
4776
|
-
Raises:
|
|
4777
|
-
TypeError: If `shape` is not a tuple.
|
|
4778
|
-
ValueError: If the target and input shapes are incompatible, or if a - 1 in the target shape is in an invalid
|
|
4779
|
-
location.
|
|
4780
|
-
|
|
4781
|
-
Supported Platforms:
|
|
4782
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4783
|
-
|
|
4784
|
-
Examples:
|
|
4785
|
-
>>> import numpy as np
|
|
4786
|
-
>>> from mindspore import Tensor, ops
|
|
4787
|
-
>>> shape = (2, 3)
|
|
4788
|
-
>>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
|
|
4789
|
-
>>> output = ops.broadcast_to(x, shape)
|
|
4790
|
-
>>> print(output)
|
|
4791
|
-
[[1. 2. 3.]
|
|
4792
|
-
[1. 2. 3.]]
|
|
4793
|
-
>>> shape = (-1, 2)
|
|
4794
|
-
>>> x = Tensor(np.array([[1], [2]]).astype(np.float32))
|
|
4795
|
-
>>> output = ops.broadcast_to(x, shape)
|
|
4796
|
-
>>> print(output)
|
|
4797
|
-
[[1. 1.]
|
|
4798
|
-
[2. 2.]]
|
|
4799
|
-
"""
|
|
4800
|
-
if isinstance(shape, Tensor) or ops.is_sequence_value_unknown(shape):
|
|
4801
|
-
_dyn_broadcast_to = _get_cache_prim(DynamicBroadcastTo)()
|
|
4802
|
-
return _dyn_broadcast_to(input, shape)
|
|
4803
|
-
_broadcast_to = _get_cache_prim(P.BroadcastTo)(shape)
|
|
4804
|
-
return _broadcast_to(input)
|
|
4805
|
-
|
|
4806
|
-
|
|
4807
3931
|
def unsorted_segment_min(x, segment_ids, num_segments):
|
|
4808
3932
|
r"""
|
|
4809
3933
|
Computes the minimum of a tensor along segments.
|
|
@@ -4827,14 +3951,13 @@ def unsorted_segment_min(x, segment_ids, num_segments):
|
|
|
4827
3951
|
x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
|
|
4828
3952
|
segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
|
|
4829
3953
|
Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
|
|
4830
|
-
num_segments (int):
|
|
3954
|
+
num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
|
|
4831
3955
|
|
|
4832
3956
|
Returns:
|
|
4833
|
-
Tensor,
|
|
3957
|
+
Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
|
|
4834
3958
|
|
|
4835
3959
|
Raises:
|
|
4836
3960
|
TypeError: If `num_segments` is not an int.
|
|
4837
|
-
ValueError: If length of shape of `segment_ids` is not equal to 1.
|
|
4838
3961
|
|
|
4839
3962
|
Supported Platforms:
|
|
4840
3963
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4851,7 +3974,6 @@ def unsorted_segment_min(x, segment_ids, num_segments):
|
|
|
4851
3974
|
[[1. 2. 3.]
|
|
4852
3975
|
[4. 2. 1.]]
|
|
4853
3976
|
"""
|
|
4854
|
-
unsorted_segment_min_ = P.UnsortedSegmentMin()
|
|
4855
3977
|
return unsorted_segment_min_(x, segment_ids, num_segments)
|
|
4856
3978
|
|
|
4857
3979
|
|
|
@@ -4878,14 +4000,13 @@ def unsorted_segment_max(x, segment_ids, num_segments):
|
|
|
4878
4000
|
x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
|
|
4879
4001
|
segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
|
|
4880
4002
|
Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
|
|
4881
|
-
num_segments (int):
|
|
4003
|
+
num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
|
|
4882
4004
|
|
|
4883
4005
|
Returns:
|
|
4884
|
-
Tensor,
|
|
4006
|
+
Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
|
|
4885
4007
|
|
|
4886
4008
|
Raises:
|
|
4887
4009
|
TypeError: If `num_segments` is not an int.
|
|
4888
|
-
ValueError: If length of shape of `segment_ids` is not equal to 1.
|
|
4889
4010
|
|
|
4890
4011
|
Supported Platforms:
|
|
4891
4012
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4902,7 +4023,6 @@ def unsorted_segment_max(x, segment_ids, num_segments):
|
|
|
4902
4023
|
[[1. 2. 3.]
|
|
4903
4024
|
[4. 5. 6.]]
|
|
4904
4025
|
"""
|
|
4905
|
-
unsorted_segment_max_ = P.UnsortedSegmentMax()
|
|
4906
4026
|
return unsorted_segment_max_(x, segment_ids, num_segments)
|
|
4907
4027
|
|
|
4908
4028
|
|
|
@@ -4920,16 +4040,15 @@ def unsorted_segment_prod(x, segment_ids, num_segments):
|
|
|
4920
4040
|
|
|
4921
4041
|
Args:
|
|
4922
4042
|
x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
|
|
4923
|
-
segment_ids (Tensor):
|
|
4924
|
-
|
|
4925
|
-
num_segments (int):
|
|
4043
|
+
segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
|
|
4044
|
+
Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R. The data type must be int32.
|
|
4045
|
+
num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
|
|
4926
4046
|
|
|
4927
4047
|
Returns:
|
|
4928
|
-
Tensor,
|
|
4048
|
+
Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
|
|
4929
4049
|
|
|
4930
4050
|
Raises:
|
|
4931
4051
|
TypeError: If `num_segments` is not an int.
|
|
4932
|
-
ValueError: If length of shape of `segment_ids` is not equal to 1.
|
|
4933
4052
|
|
|
4934
4053
|
Supported Platforms:
|
|
4935
4054
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4946,7 +4065,6 @@ def unsorted_segment_prod(x, segment_ids, num_segments):
|
|
|
4946
4065
|
[[4. 4. 3.]
|
|
4947
4066
|
[4. 5. 6.]]
|
|
4948
4067
|
"""
|
|
4949
|
-
unsorted_segment_prod_ = P.UnsortedSegmentProd()
|
|
4950
4068
|
return unsorted_segment_prod_(x, segment_ids, num_segments)
|
|
4951
4069
|
|
|
4952
4070
|
|
|
@@ -5158,33 +4276,6 @@ def is_nonzero(input):
|
|
|
5158
4276
|
return bool(out)
|
|
5159
4277
|
|
|
5160
4278
|
|
|
5161
|
-
def scalar_cast(input_x, input_y):
|
|
5162
|
-
"""
|
|
5163
|
-
Casts the input scalar to another type.
|
|
5164
|
-
|
|
5165
|
-
Args:
|
|
5166
|
-
input_x (scalar): The input scalar. Only constant value is allowed.
|
|
5167
|
-
input_y (mindspore.dtype): The type to be cast. Only constant value is allowed.
|
|
5168
|
-
|
|
5169
|
-
Returns:
|
|
5170
|
-
Scalar. The type is the same as the python type corresponding to `input_y`.
|
|
5171
|
-
|
|
5172
|
-
Raises:
|
|
5173
|
-
TypeError: If neither `input_x` nor `input_y` is a constant value.
|
|
5174
|
-
|
|
5175
|
-
Supported Platforms:
|
|
5176
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
5177
|
-
|
|
5178
|
-
Examples:
|
|
5179
|
-
>>> import mindspore
|
|
5180
|
-
>>> from mindspore import ops
|
|
5181
|
-
>>> output = ops.scalar_cast(255.0, mindspore.int32)
|
|
5182
|
-
>>> print(output)
|
|
5183
|
-
255
|
|
5184
|
-
"""
|
|
5185
|
-
return scalar_cast_(input_x, input_y)
|
|
5186
|
-
|
|
5187
|
-
|
|
5188
4279
|
def tensor_scatter_mul(input_x, indices, updates):
|
|
5189
4280
|
r"""
|
|
5190
4281
|
Creates a new tensor by multiplying the values from the positions in `input_x` indicated by
|
|
@@ -5194,10 +4285,10 @@ def tensor_scatter_mul(input_x, indices, updates):
|
|
|
5194
4285
|
|
|
5195
4286
|
The last axis of `indices` is the depth of each index vectors. For each index vector,
|
|
5196
4287
|
there must be a corresponding value in `updates`. The shape of `updates` should be
|
|
5197
|
-
equal to the shape of `input_x[indices]`. For more details, see
|
|
4288
|
+
equal to the shape of `input_x[indices]`. For more details, see Examples.
|
|
5198
4289
|
|
|
5199
4290
|
.. math::
|
|
5200
|
-
output[indices] = input\_x
|
|
4291
|
+
output\left [indices \right ] = input\_x\times update
|
|
5201
4292
|
|
|
5202
4293
|
Note:
|
|
5203
4294
|
- If some values of the `indices` are out of bound, instead of raising an index error,
|
|
@@ -5254,7 +4345,7 @@ def tensor_scatter_div(input_x, indices, updates):
|
|
|
5254
4345
|
|
|
5255
4346
|
The last axis of `indices` is the depth of each index vectors. For each index vector,
|
|
5256
4347
|
there must be a corresponding value in `updates`. The shape of `updates` should be
|
|
5257
|
-
equal to the shape of `input_x[indices]`. For more details, see
|
|
4348
|
+
equal to the shape of `input_x[indices]`. For more details, see Examples.
|
|
5258
4349
|
|
|
5259
4350
|
.. math::
|
|
5260
4351
|
output\left [indices \right ] = input\_x \div update
|
|
@@ -5396,92 +4487,15 @@ def masked_select(input, mask):
|
|
|
5396
4487
|
|
|
5397
4488
|
Examples:
|
|
5398
4489
|
>>> import numpy as np
|
|
5399
|
-
>>> import mindspore
|
|
5400
|
-
>>> from mindspore import Tensor, ops
|
|
5401
|
-
>>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
|
|
5402
|
-
>>> mask = Tensor(np.array([1, 0, 1, 0]), mindspore.bool_)
|
|
5403
|
-
>>> output = ops.masked_select(x, mask)
|
|
5404
|
-
>>> print(output)
|
|
5405
|
-
[1 3]
|
|
5406
|
-
"""
|
|
5407
|
-
return masked_select_(input, mask)
|
|
5408
|
-
|
|
5409
|
-
|
|
5410
|
-
def masked_fill(input_x, mask, value):
|
|
5411
|
-
"""
|
|
5412
|
-
Fills elements of Tensor with value where mask is True.
|
|
5413
|
-
The shapes of `input_x` and `mask` need to be the same or broadcastable.
|
|
5414
|
-
|
|
5415
|
-
Args:
|
|
5416
|
-
input_x (Tensor): The source Tensor whose data type is one of bool, uint8, int8, int16, int32,
|
|
5417
|
-
int64, float16, float32, float64, complex64, complex128.
|
|
5418
|
-
mask (Tensor[bool]): The boolean mask.
|
|
5419
|
-
value (Union[float, Tensor]): The value to fill in with, which dtype is the same as `input_x`.
|
|
5420
|
-
|
|
5421
|
-
Returns:
|
|
5422
|
-
Tensor, has the same type and shape as `input_x`.
|
|
5423
|
-
|
|
5424
|
-
Raises:
|
|
5425
|
-
TypeError: If dtype of `mask` is not bool.
|
|
5426
|
-
TypeError: If `input_x` or `mask` is not a Tensor.
|
|
5427
|
-
ValueError: If the shapes of `input_x` and `mask` could not be broadcast.
|
|
5428
|
-
TypeError: If dtype of `input_x` or `value` is not one of bool, uint8, int8, int16, int32,
|
|
5429
|
-
int64, float16, float32, float64, complex64, complex128.
|
|
5430
|
-
TypeError: If dtype of `value` is different from that of `input_x`.
|
|
5431
|
-
TypeError: If `value` is neither float number nor Tensor.
|
|
5432
|
-
|
|
5433
|
-
Supported Platforms:
|
|
5434
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
5435
|
-
|
|
5436
|
-
Examples:
|
|
5437
|
-
>>> import mindspore
|
|
5438
|
-
>>> import numpy as np
|
|
5439
|
-
>>> from mindspore import Tensor, ops
|
|
5440
|
-
>>> input_x = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
|
|
5441
|
-
>>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)
|
|
5442
|
-
>>> output = ops.masked_fill(input_x, mask, 0.5)
|
|
5443
|
-
>>> print(output)
|
|
5444
|
-
[0.5 0.5 3. 0.5]
|
|
5445
|
-
"""
|
|
5446
|
-
if isinstance(value, (float, int)) and isinstance(input_x, Tensor):
|
|
5447
|
-
value = scalar_to_tensor_(value, input_x.dtype)
|
|
5448
|
-
masked_fill_ = _get_cache_prim(P.MaskedFill)()
|
|
5449
|
-
return masked_fill_(input_x, mask, value)
|
|
5450
|
-
|
|
5451
|
-
|
|
5452
|
-
def diag(input):
|
|
5453
|
-
r"""
|
|
5454
|
-
Constructs a diagonal tensor with a given diagonal values.
|
|
5455
|
-
|
|
5456
|
-
Assume `input` has dimensions :math:`(D_1,... D_k)` , the output is a tensor of
|
|
5457
|
-
rank 2k with dimensions :math:`(D_1,..., D_k, D_1,..., D_k)` where:
|
|
5458
|
-
:math:`output[i_1,..., i_k, i_1,..., i_k] = input[i_1,..., i_k]` and 0 everywhere else.
|
|
5459
|
-
|
|
5460
|
-
Args:
|
|
5461
|
-
input (Tensor): The input tensor.
|
|
5462
|
-
|
|
5463
|
-
Returns:
|
|
5464
|
-
Tensor, has the same dtype as the `input`.
|
|
5465
|
-
|
|
5466
|
-
Raises:
|
|
5467
|
-
TypeError: If `input` is not a Tensor.
|
|
5468
|
-
ValueError: If rank of `input` is less than 1.
|
|
5469
|
-
|
|
5470
|
-
Supported Platforms:
|
|
5471
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
5472
|
-
|
|
5473
|
-
Examples:
|
|
5474
|
-
>>> from mindspore import Tensor
|
|
5475
|
-
>>> import mindspore.ops as ops
|
|
5476
|
-
>>> input_x = Tensor([1, 2, 3, 4]).astype('int32')
|
|
5477
|
-
>>> output = ops.diag(input_x)
|
|
4490
|
+
>>> import mindspore
|
|
4491
|
+
>>> from mindspore import Tensor, ops
|
|
4492
|
+
>>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
|
|
4493
|
+
>>> mask = Tensor(np.array([1, 0, 1, 0]), mindspore.bool_)
|
|
4494
|
+
>>> output = ops.masked_select(x, mask)
|
|
5478
4495
|
>>> print(output)
|
|
5479
|
-
[
|
|
5480
|
-
[0 2 0 0]
|
|
5481
|
-
[0 0 3 0]
|
|
5482
|
-
[0 0 0 4]]
|
|
4496
|
+
[1 3]
|
|
5483
4497
|
"""
|
|
5484
|
-
return
|
|
4498
|
+
return masked_select_(input, mask)
|
|
5485
4499
|
|
|
5486
4500
|
|
|
5487
4501
|
def diagflat(input, offset=0):
|
|
@@ -5542,7 +4556,7 @@ def col2im(input_x, output_size, kernel_size, dilation, padding_value, stride):
|
|
|
5542
4556
|
Combines an array of sliding local blocks into a large containing tensor.
|
|
5543
4557
|
|
|
5544
4558
|
Args:
|
|
5545
|
-
input_x (Tensor): 4D tensor with data type float16 or
|
|
4559
|
+
input_x (Tensor): 4D tensor with data type float16 or float32.
|
|
5546
4560
|
output_size (Tensor): 1D tensor with 2 elements of data type int.
|
|
5547
4561
|
kernel_size (Union[int, tuple[int], list[int]]): The size of the kernel, should be two int
|
|
5548
4562
|
for height and width. If type is int, it means that height equal with width. Must be specified.
|
|
@@ -5598,7 +4612,7 @@ def _split_int(x, split_size_or_sections, axis):
|
|
|
5598
4612
|
num_sections = length_along_dim // split_size_or_sections
|
|
5599
4613
|
length1 = num_sections * split_size_or_sections
|
|
5600
4614
|
length2 = length_along_dim - length1
|
|
5601
|
-
start1 = _list_comprehensions(
|
|
4615
|
+
start1 = _list_comprehensions(rank_(x), 0, True)
|
|
5602
4616
|
size1 = _tuple_setitem(arr_shape, axis, length1)
|
|
5603
4617
|
start2 = _tuple_setitem(start1, axis, length1)
|
|
5604
4618
|
size2 = _tuple_setitem(arr_shape, axis, length2)
|
|
@@ -5628,7 +4642,6 @@ def _split_sub_tensors(x, split_size_or_sections, axis):
|
|
|
5628
4642
|
sub_tensors.append(sliced_tensor)
|
|
5629
4643
|
return sub_tensors
|
|
5630
4644
|
|
|
5631
|
-
|
|
5632
4645
|
def split(tensor, split_size_or_sections, axis=0):
|
|
5633
4646
|
"""
|
|
5634
4647
|
Splits the Tensor into chunks along the given axis.
|
|
@@ -5650,9 +4663,9 @@ def split(tensor, split_size_or_sections, axis=0):
|
|
|
5650
4663
|
TypeError: If argument `tensor` is not Tensor.
|
|
5651
4664
|
TypeError: If argument `axis` is not Tensor.
|
|
5652
4665
|
ValueError: If argument `axis` is out of range of :math:`[-tensor.ndim, tensor.ndim)` .
|
|
5653
|
-
TypeError: If each element in
|
|
5654
|
-
TypeError: If argument `
|
|
5655
|
-
ValueError: The sum of
|
|
4666
|
+
TypeError: If each element in `split_size_or_sections` is not integer.
|
|
4667
|
+
TypeError: If argument `split_size_or_sections` is not int, tuple(int) or list(int).
|
|
4668
|
+
ValueError: The sum of `split_size_or_sections` is not equal to x.shape[axis].
|
|
5656
4669
|
|
|
5657
4670
|
Supported Platforms:
|
|
5658
4671
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -5696,127 +4709,52 @@ def split(tensor, split_size_or_sections, axis=0):
|
|
|
5696
4709
|
f"but got {type(split_size_or_sections)}")
|
|
5697
4710
|
return tuple(res)
|
|
5698
4711
|
|
|
5699
|
-
|
|
5700
|
-
def tril(input, diagonal=0): # pylint: disable=redefined-outer-name
|
|
4712
|
+
def split_ext(tensor, split_size_or_sections, axis=0):
|
|
5701
4713
|
"""
|
|
5702
|
-
|
|
5703
|
-
and set the other elements to zeros.
|
|
5704
|
-
|
|
5705
|
-
Args:
|
|
5706
|
-
input (Tensor): A Tensor with shape :math:`(x_1, x_2, ..., x_R)`. The rank must be at least 2.
|
|
5707
|
-
Supporting all number types including bool.
|
|
5708
|
-
diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: 0,
|
|
5709
|
-
indicating the main diagonal.
|
|
5710
|
-
|
|
5711
|
-
Returns:
|
|
5712
|
-
Tensor, the same shape and data type as the input `x`.
|
|
5713
|
-
|
|
5714
|
-
Raises:
|
|
5715
|
-
TypeError: If `x` is not a Tensor.
|
|
5716
|
-
TypeError: If `diagonal` is not an int.
|
|
5717
|
-
TypeError: If the type of `x` is neither number nor bool.
|
|
5718
|
-
ValueError: If the rank of `x` is less than 2.
|
|
5719
|
-
|
|
5720
|
-
Supported Platforms:
|
|
5721
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
5722
|
-
|
|
5723
|
-
Examples:
|
|
5724
|
-
>>> import numpy as np
|
|
5725
|
-
>>> from mindspore import Tensor, ops
|
|
5726
|
-
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
5727
|
-
... [ 5, 6, 7, 8],
|
|
5728
|
-
... [10, 11, 12, 13],
|
|
5729
|
-
... [14, 15, 16, 17]]))
|
|
5730
|
-
>>> result = ops.tril(x)
|
|
5731
|
-
>>> print(result)
|
|
5732
|
-
[[ 1 0 0 0]
|
|
5733
|
-
[ 5 6 0 0]
|
|
5734
|
-
[10 11 12 0]
|
|
5735
|
-
[14 15 16 17]]
|
|
5736
|
-
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
5737
|
-
... [ 5, 6, 7, 8],
|
|
5738
|
-
... [10, 11, 12, 13],
|
|
5739
|
-
... [14, 15, 16, 17]]))
|
|
5740
|
-
>>> result = ops.tril(x, diagonal=1)
|
|
5741
|
-
>>> print(result)
|
|
5742
|
-
[[ 1 2 0 0]
|
|
5743
|
-
[ 5 6 7 0]
|
|
5744
|
-
[10 11 12 13]
|
|
5745
|
-
[14 15 16 17]]
|
|
5746
|
-
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
5747
|
-
... [ 5, 6, 7, 8],
|
|
5748
|
-
... [10, 11, 12, 13],
|
|
5749
|
-
... [14, 15, 16, 17]]))
|
|
5750
|
-
>>> result = ops.tril(x, diagonal=-1)
|
|
5751
|
-
>>> print(result)
|
|
5752
|
-
[[ 0 0 0 0]
|
|
5753
|
-
[ 5 0 0 0]
|
|
5754
|
-
[10 11 0 0]
|
|
5755
|
-
[14 15 16 0]]
|
|
5756
|
-
"""
|
|
5757
|
-
tril_ = Tril(diagonal)
|
|
5758
|
-
return tril_(input)
|
|
5759
|
-
|
|
5760
|
-
|
|
5761
|
-
def triu(input, diagonal=0): # pylint: disable=redefined-outer-name
|
|
5762
|
-
r"""
|
|
5763
|
-
Returns the upper triangle part of 'input' (elements that contain the diagonal and below),
|
|
5764
|
-
and set the other elements to zeros.
|
|
5765
|
-
|
|
5766
|
-
.. warning::
|
|
5767
|
-
This is an experimental API that is subject to change or deletion.
|
|
4714
|
+
Splits the Tensor into chunks along the given axis.
|
|
5768
4715
|
|
|
5769
4716
|
Args:
|
|
5770
|
-
|
|
5771
|
-
|
|
5772
|
-
|
|
4717
|
+
tensor (Tensor): A Tensor to be divided.
|
|
4718
|
+
split_size_or_sections (Union[int, tuple(int), list(int)]):
|
|
4719
|
+
If `split_size_or_sections` is an int type, `tensor` will be split into equally sized chunks,
|
|
4720
|
+
each chunk with size `split_size_or_sections`. Last chunk will be smaller than `split_size_or_sections`
|
|
4721
|
+
if `tensor.shape[axis]` is not divisible by `split_size_or_sections`.
|
|
4722
|
+
If `split_size_or_sections` is a list type, then `tensor` will be split into len(split_size_or_sections)
|
|
4723
|
+
chunks with sizes `split_size_or_sections` along the given `axis`.
|
|
4724
|
+
axis (int): The axis along which to split. Default: ``0`` .
|
|
5773
4725
|
|
|
5774
4726
|
Returns:
|
|
5775
|
-
|
|
4727
|
+
A tuple of sub-tensors.
|
|
5776
4728
|
|
|
5777
4729
|
Raises:
|
|
5778
|
-
TypeError: If `
|
|
5779
|
-
TypeError: If `
|
|
5780
|
-
ValueError: If
|
|
4730
|
+
TypeError: If argument `tensor` is not Tensor.
|
|
4731
|
+
TypeError: If argument `axis` is not Tensor.
|
|
4732
|
+
ValueError: If argument `axis` is out of range of :math:`[-tensor.ndim, tensor.ndim)` .
|
|
4733
|
+
TypeError: If each element in `split_size_or_sections` is not integer.
|
|
4734
|
+
TypeError: If argument `split_size_or_sections` is not int, tuple(int) or list(int).
|
|
4735
|
+
ValueError: The sum of `split_size_or_sections` is not equal to x.shape[axis].
|
|
5781
4736
|
|
|
5782
4737
|
Supported Platforms:
|
|
5783
|
-
``Ascend``
|
|
4738
|
+
``Ascend``
|
|
5784
4739
|
|
|
5785
4740
|
Examples:
|
|
5786
4741
|
>>> import numpy as np
|
|
5787
|
-
>>> from mindspore import
|
|
5788
|
-
>>>
|
|
5789
|
-
|
|
5790
|
-
|
|
5791
|
-
|
|
5792
|
-
|
|
5793
|
-
|
|
5794
|
-
|
|
5795
|
-
|
|
5796
|
-
|
|
5797
|
-
|
|
5798
|
-
|
|
5799
|
-
|
|
5800
|
-
|
|
5801
|
-
|
|
5802
|
-
|
|
5803
|
-
>>> print(result)
|
|
5804
|
-
[[ 0 2 3 4]
|
|
5805
|
-
[ 0 0 7 8]
|
|
5806
|
-
[ 0 0 0 13]
|
|
5807
|
-
[ 0 0 0 0]]
|
|
5808
|
-
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
5809
|
-
... [ 5, 6, 7, 8],
|
|
5810
|
-
... [10, 11, 12, 13],
|
|
5811
|
-
... [14, 15, 16, 17]]))
|
|
5812
|
-
>>> result = ops.triu(x, diagonal=-1)
|
|
5813
|
-
>>> print(result)
|
|
5814
|
-
[[ 1 2 3 4]
|
|
5815
|
-
[ 5 6 7 8]
|
|
5816
|
-
[ 0 11 12 13]
|
|
5817
|
-
[ 0 0 16 17]]
|
|
5818
|
-
"""
|
|
5819
|
-
return _get_cache_prim(P.Triu)(diagonal)(input)
|
|
4742
|
+
>>> from mindspore import ops, Tensor
|
|
4743
|
+
>>> input_x = np.arange(9).astype("float32")
|
|
4744
|
+
>>> output = ops.split(Tensor(input_x), 3)
|
|
4745
|
+
>>> print(output)
|
|
4746
|
+
(Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
|
|
4747
|
+
Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
|
|
4748
|
+
Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))
|
|
4749
|
+
"""
|
|
4750
|
+
if isinstance(split_size_or_sections, int):
|
|
4751
|
+
res = split_tensor(tensor, split_size_or_sections, axis)
|
|
4752
|
+
elif isinstance(split_size_or_sections, (list, tuple)):
|
|
4753
|
+
res = split_with_size(tensor, split_size_or_sections, axis)
|
|
4754
|
+
else:
|
|
4755
|
+
raise TypeError(f"Type of Argument `split_size_or_sections` should be integer, tuple(int) or list(int), " \
|
|
4756
|
+
f"but got {type(split_size_or_sections)}")
|
|
4757
|
+
return res
|
|
5820
4758
|
|
|
5821
4759
|
|
|
5822
4760
|
@_primexpr
|
|
@@ -5918,24 +4856,24 @@ def _tensor_split_sub_int(x, indices_or_sections, axis):
|
|
|
5918
4856
|
arr_shape = x.shape
|
|
5919
4857
|
length_along_dim = arr_shape[axis]
|
|
5920
4858
|
if indices_or_sections > length_along_dim:
|
|
5921
|
-
res = P.Split(axis, length_along_dim)(x)
|
|
4859
|
+
res = _get_cache_prim(P.Split)(axis, length_along_dim)(x)
|
|
5922
4860
|
indices_or_sections_n = [length_along_dim, length_along_dim + 1]
|
|
5923
4861
|
res2 = _tensor_split_sub_tensors(x, indices_or_sections_n, axis)
|
|
5924
4862
|
for _ in np.arange(length_along_dim, indices_or_sections):
|
|
5925
4863
|
res += tuple(res2)[1:]
|
|
5926
4864
|
elif length_along_dim % indices_or_sections == 0:
|
|
5927
|
-
res = P.Split(axis, indices_or_sections)(x)
|
|
4865
|
+
res = _get_cache_prim(P.Split)(axis, indices_or_sections)(x)
|
|
5928
4866
|
else:
|
|
5929
4867
|
num_long_tensor = length_along_dim % indices_or_sections
|
|
5930
4868
|
num_short_tensor = indices_or_sections - num_long_tensor
|
|
5931
4869
|
length1 = num_long_tensor * (length_along_dim // indices_or_sections + 1)
|
|
5932
4870
|
length2 = length_along_dim - length1
|
|
5933
|
-
start1 = _list_comprehensions(
|
|
4871
|
+
start1 = _list_comprehensions(rank_(x), 0, True)
|
|
5934
4872
|
size1 = _tuple_setitem(arr_shape, axis, length1)
|
|
5935
4873
|
start2 = _tuple_setitem(start1, axis, length1)
|
|
5936
4874
|
size2 = _tuple_setitem(arr_shape, axis, length2)
|
|
5937
|
-
res = P.Split(axis, num_long_tensor)(tensor_slice(x, start1, size1)) + \
|
|
5938
|
-
P.Split(axis, num_short_tensor)(tensor_slice(x, start2, size2))
|
|
4875
|
+
res = _get_cache_prim(P.Split)(axis, num_long_tensor)(tensor_slice(x, start1, size1)) + \
|
|
4876
|
+
_get_cache_prim(P.Split)(axis, num_short_tensor)(tensor_slice(x, start2, size2))
|
|
5939
4877
|
return res
|
|
5940
4878
|
|
|
5941
4879
|
|
|
@@ -5949,11 +4887,11 @@ def tensor_split(input, indices_or_sections, axis=0):
|
|
|
5949
4887
|
|
|
5950
4888
|
- If `indices_or_sections` is an integer n, input tensor will be split into n sections.
|
|
5951
4889
|
|
|
5952
|
-
- If :math:`input.shape
|
|
5953
|
-
:math:`input.shape
|
|
5954
|
-
- If :math:`input.shape
|
|
5955
|
-
will have size :math:`input.shape
|
|
5956
|
-
size :math:`input.shape
|
|
4890
|
+
- If :math:`input.shape[axis]` can be divisible by n, sub-sections will have equal size
|
|
4891
|
+
:math:`input.shape[axis] / n` .
|
|
4892
|
+
- If :math:`input.shape[axis]` is not divisible by n, the first :math:`input.shape[axis] \bmod n` sections
|
|
4893
|
+
will have size :math:`input.shape[axis] // n + 1` , and the rest will have
|
|
4894
|
+
size :math:`input.shape[axis] // n` .
|
|
5957
4895
|
- If `indices_or_sections` is of type tuple(int) or list(int), the input tensor will be split at the
|
|
5958
4896
|
indices in the list or tuple. For example, given parameters :math:`indices\_or\_sections=[1, 4]`
|
|
5959
4897
|
and :math:`axis=0` , the input tensor will be split into sections :math:`input[:1]` ,
|
|
@@ -6166,7 +5104,7 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
|
|
|
6166
5104
|
tensor.
|
|
6167
5105
|
|
|
6168
5106
|
- values (Tensor) - The maximum value of input tensor, with the same shape as index, and same dtype as x.
|
|
6169
|
-
- index (Tensor) - The index for the maximum value of the input tensor, with dtype
|
|
5107
|
+
- index (Tensor) - The index for the maximum value of the input tensor, with dtype int64. If `keepdims`
|
|
6170
5108
|
is true, the shape of output tensors is :math:`(input_1, input_2, ..., input_{axis-1}, 1, input_{axis+1},
|
|
6171
5109
|
..., input_N)` . Otherwise, the shape is :math:`(input_1, input_2, ..., input_{axis-1}, input_{axis+1},
|
|
6172
5110
|
..., input_N)` .
|
|
@@ -6195,65 +5133,19 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
|
|
|
6195
5133
|
[[3.2 0.4 0.4 2.9 4. ]] [[1 1 0 1 1]]
|
|
6196
5134
|
"""
|
|
6197
5135
|
if not input.shape:
|
|
6198
|
-
return (input, Tensor(0, dtype=mstype.
|
|
5136
|
+
return (input, Tensor(0, dtype=mstype.int64))
|
|
6199
5137
|
if axis is None:
|
|
6200
|
-
|
|
6201
|
-
return (reduce_max_op(input), Tensor(0, dtype=mstype.int32))
|
|
5138
|
+
return (max_(input), Tensor(0, dtype=mstype.int64))
|
|
6202
5139
|
if initial is not None and not isinstance(initial, numbers.Number):
|
|
6203
5140
|
raise TypeError(f"For 'max', 'initial' must be a scalar, but got {type(initial)}")
|
|
6204
5141
|
if axis is not None and not isinstance(axis, int):
|
|
6205
5142
|
raise TypeError(f"For 'max', 'axis' must be int, but got {type(axis)}")
|
|
6206
5143
|
input = _init_and_select_elem(input, initial, where, ops.maximum)
|
|
6207
|
-
argmax_with_value_op = ArgMaxWithValue(axis, keepdims)
|
|
5144
|
+
argmax_with_value_op = _get_cache_prim(ArgMaxWithValue)(axis, keepdims)
|
|
6208
5145
|
indices, values = argmax_with_value_op(input)
|
|
6209
5146
|
return values, indices
|
|
6210
5147
|
|
|
6211
5148
|
|
|
6212
|
-
def argmax(input, dim=None, keepdim=False):
|
|
6213
|
-
"""
|
|
6214
|
-
Return the indices of the maximum values of a tensor across a dimension.
|
|
6215
|
-
|
|
6216
|
-
Args:
|
|
6217
|
-
input (Tensor): Input tensor.
|
|
6218
|
-
dim (Union[int, None], optional): The dimension to reduce. If `dim` is ``None`` , the indices of the maximum
|
|
6219
|
-
value within the flattened input will be returned. Default: ``None`` .
|
|
6220
|
-
keepdim (bool, optional): Whether the output tensor retains the specified
|
|
6221
|
-
dimension. Ignored if `dim` is None. Default: ``False`` .
|
|
6222
|
-
|
|
6223
|
-
Returns:
|
|
6224
|
-
Tensor, indices of the maximum values across a dimension.
|
|
6225
|
-
|
|
6226
|
-
Raises:
|
|
6227
|
-
TypeError: If `keepdim` is not bool.
|
|
6228
|
-
ValueError: If `dim` is out of range.
|
|
6229
|
-
|
|
6230
|
-
Supported Platforms:
|
|
6231
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
6232
|
-
|
|
6233
|
-
Examples:
|
|
6234
|
-
>>> import numpy as np
|
|
6235
|
-
>>> from mindspore import Tensor, ops
|
|
6236
|
-
>>> x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
|
|
6237
|
-
>>> output = ops.argmax(x, dim=-1)
|
|
6238
|
-
>>> print(output)
|
|
6239
|
-
[1 0 0]
|
|
6240
|
-
"""
|
|
6241
|
-
_check_attr_dtype("keepdim", keepdim, [bool], "argmax")
|
|
6242
|
-
if not input.shape:
|
|
6243
|
-
return Tensor(0)
|
|
6244
|
-
if input.dtype == mstype.bool_:
|
|
6245
|
-
input = input.astype(mstype.int32)
|
|
6246
|
-
is_dim_none = False
|
|
6247
|
-
if dim is None:
|
|
6248
|
-
input = reshape_(input, (-1,))
|
|
6249
|
-
dim = 0
|
|
6250
|
-
is_dim_none = True
|
|
6251
|
-
out = _get_cache_prim(Argmax)(dim, mstype.int64)(input)
|
|
6252
|
-
if keepdim and not is_dim_none:
|
|
6253
|
-
out = expand_dims_(out, dim)
|
|
6254
|
-
return out
|
|
6255
|
-
|
|
6256
|
-
|
|
6257
5149
|
def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylint: disable=redefined-outer-name
|
|
6258
5150
|
"""
|
|
6259
5151
|
Calculates the minimum value along with the given axis for the input tensor. It returns the minimum values and
|
|
@@ -6312,16 +5204,16 @@ def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
|
|
|
6312
5204
|
0.0 0
|
|
6313
5205
|
"""
|
|
6314
5206
|
if not input.shape:
|
|
6315
|
-
return (input, Tensor(0, dtype=mstype.
|
|
5207
|
+
return (input, Tensor(0, dtype=mstype.int64))
|
|
6316
5208
|
if axis is None:
|
|
6317
|
-
return (
|
|
5209
|
+
return (min_(input), Tensor(0, dtype=mstype.int64))
|
|
6318
5210
|
if initial is not None and not isinstance(initial, numbers.Number):
|
|
6319
5211
|
raise TypeError(f"For 'min', 'initial' must be a scalar, but got {type(initial)}")
|
|
6320
5212
|
if axis is not None and not isinstance(axis, int):
|
|
6321
5213
|
raise TypeError(f"For 'min', 'axis' must be int, but got {type(axis)}")
|
|
6322
5214
|
input = _init_and_select_elem(input, initial, where, ops.minimum)
|
|
6323
|
-
|
|
6324
|
-
indices, values =
|
|
5215
|
+
argmin_with_value_op = _get_cache_prim(ArgMinWithValue)(axis, keepdims)
|
|
5216
|
+
indices, values = argmin_with_value_op(input)
|
|
6325
5217
|
return values, indices
|
|
6326
5218
|
|
|
6327
5219
|
|
|
@@ -6379,8 +5271,8 @@ def aminmax(input, *, axis=0, keepdims=False):
|
|
|
6379
5271
|
output0 = ops.reshape(output0, [1] * input.ndim)
|
|
6380
5272
|
output1 = ops.reshape(output1, [1] * input.ndim)
|
|
6381
5273
|
return output0, output1
|
|
6382
|
-
argmin_with_value_op =
|
|
6383
|
-
argmax_with_value_op =
|
|
5274
|
+
argmin_with_value_op = _get_cache_prim(ArgMinWithValue)(axis, keepdims)
|
|
5275
|
+
argmax_with_value_op = _get_cache_prim(ArgMaxWithValue)(axis, keepdims)
|
|
6384
5276
|
_, output0 = argmin_with_value_op(input)
|
|
6385
5277
|
_, output1 = argmax_with_value_op(input)
|
|
6386
5278
|
if keepdims is True and input.ndim == 0:
|
|
@@ -6435,69 +5327,101 @@ def narrow(input, axis, start, length):
|
|
|
6435
5327
|
begins[axis] = start
|
|
6436
5328
|
sizes = list(input.shape)
|
|
6437
5329
|
sizes[axis] = length
|
|
6438
|
-
return
|
|
5330
|
+
return tensor_slice(input, begins, sizes)
|
|
6439
5331
|
|
|
6440
5332
|
|
|
6441
|
-
def
|
|
5333
|
+
def topk(input, k, dim=None, largest=True, sorted=True):
|
|
6442
5334
|
r"""
|
|
6443
|
-
|
|
5335
|
+
Finds values and indices of the `k` largest or smallest entries along a given dimension.
|
|
5336
|
+
|
|
5337
|
+
.. warning::
|
|
5338
|
+
- If sorted is set to False, it will use the aicpu operator, the performance may be reduced. In addition, due to
|
|
5339
|
+
different memory layout and traversal methods on different platforms, the display order of calculation results
|
|
5340
|
+
may be inconsistent when `sorted` is False.
|
|
6444
5341
|
|
|
6445
|
-
|
|
6446
|
-
|
|
6447
|
-
|
|
6448
|
-
up. Segment_ids does not need to be sorted, and it does not need to cover all values in the entire valid value
|
|
6449
|
-
range.
|
|
5342
|
+
If the `input` is a one-dimensional Tensor, finds the `k` largest or smallest entries in the Tensor,
|
|
5343
|
+
and outputs its value and index as a Tensor. values[`k`] is the `k` largest item in `input`,
|
|
5344
|
+
and its index is indices [`k`].
|
|
6450
5345
|
|
|
6451
|
-
|
|
5346
|
+
For a multi-dimensional matrix,
|
|
5347
|
+
calculates the first or last `k` entries in a given dimension, therefore:
|
|
6452
5348
|
|
|
6453
|
-
..
|
|
5349
|
+
.. math::
|
|
6454
5350
|
|
|
6455
|
-
|
|
6456
|
-
- If the segment_id i is absent in the segment_ids, then output[i] will be filled with 0.
|
|
6457
|
-
- On Ascend, if the value of segment_id is less than 0 or greater than the length of the input data shape, an
|
|
6458
|
-
execution error will occur.
|
|
5351
|
+
values.shape = indices.shape
|
|
6459
5352
|
|
|
6460
|
-
If the
|
|
6461
|
-
is negative, the value will be ignored. 'num_segments' must be equal to the number of different segment_ids.
|
|
5353
|
+
If the two compared elements are the same, the one with the smaller index value is returned first.
|
|
6462
5354
|
|
|
6463
5355
|
Args:
|
|
6464
|
-
|
|
6465
|
-
|
|
6466
|
-
|
|
6467
|
-
|
|
6468
|
-
|
|
5356
|
+
input (Tensor): Input to be computed, data type must be float16, float32 or int32.
|
|
5357
|
+
k (int): The number of top or bottom elements to be computed along the last dimension.
|
|
5358
|
+
dim (int, optional): The dimension to sort along. Default: ``None`` .
|
|
5359
|
+
largest (bool, optional): If largest is ``False`` then the k smallest elements are returned.
|
|
5360
|
+
Default: ``True`` .
|
|
5361
|
+
sorted (bool, optional): If ``True`` , the obtained elements will be sorted by the values in descending order.
|
|
5362
|
+
If ``False`` , the obtained elements will not be sorted. Default: ``True`` .
|
|
6469
5363
|
|
|
6470
5364
|
Returns:
|
|
6471
|
-
|
|
5365
|
+
A tuple consisting of `values` and `indexes`.
|
|
5366
|
+
|
|
5367
|
+
- values (Tensor): The `k` largest or smallest elements in each slice of the given dimension.
|
|
5368
|
+
- indices (Tensor): The indices of values within the last dimension of input.
|
|
6472
5369
|
|
|
6473
5370
|
Raises:
|
|
6474
|
-
TypeError: If `
|
|
6475
|
-
|
|
5371
|
+
TypeError: If `sorted` is not a bool.
|
|
5372
|
+
TypeError: If `input` is not a Tensor.
|
|
5373
|
+
TypeError: If `k` is not an int.
|
|
5374
|
+
TypeError: If dtype of `input` is not one of the following: float16, float32 or int32.
|
|
6476
5375
|
|
|
6477
5376
|
Supported Platforms:
|
|
6478
5377
|
``Ascend`` ``GPU`` ``CPU``
|
|
6479
5378
|
|
|
6480
5379
|
Examples:
|
|
6481
|
-
>>>
|
|
5380
|
+
>>> import mindspore as ms
|
|
6482
5381
|
>>> from mindspore import ops
|
|
6483
|
-
>>>
|
|
6484
|
-
|
|
6485
|
-
|
|
6486
|
-
>>>
|
|
6487
|
-
>>> output = ops.unsorted_segment_sum(input_x, segment_ids, num_segments)
|
|
6488
|
-
>>> print(output)
|
|
6489
|
-
[3. 3. 4. 0.]
|
|
6490
|
-
>>> input_x = Tensor([1, 2, 3, 4, 2, 5], mindspore.float32)
|
|
6491
|
-
>>> segment_ids = Tensor([0, 0, 1, 2, 3, 4], mindspore.int32)
|
|
6492
|
-
>>> num_segments = 6
|
|
6493
|
-
>>> output = ops.unsorted_segment_sum(input_x, segment_ids, num_segments)
|
|
5382
|
+
>>> x = ms.Tensor([[0.5368, 0.2447, 0.4302, 0.9673],
|
|
5383
|
+
... [0.4388, 0.6525, 0.4685, 0.1868],
|
|
5384
|
+
... [0.3563, 0.5152, 0.9675, 0.8230]], dtype=ms.float32)
|
|
5385
|
+
>>> output = ops.topk(x, 2, dim=1)
|
|
6494
5386
|
>>> print(output)
|
|
6495
|
-
[3
|
|
5387
|
+
(Tensor(shape=[3, 2], dtype=Float32, value=
|
|
5388
|
+
[[ 9.67299998e-01, 5.36800027e-01],
|
|
5389
|
+
[ 6.52499974e-01, 4.68499988e-01],
|
|
5390
|
+
[ 9.67499971e-01, 8.23000014e-01]]), Tensor(shape=[3, 2], dtype=Int32, value=
|
|
5391
|
+
[[3, 0],
|
|
5392
|
+
[1, 2],
|
|
5393
|
+
[2, 3]]))
|
|
5394
|
+
>>> output2 = ops.topk(x, 2, dim=1, largest=False)
|
|
5395
|
+
>>> print(output2)
|
|
5396
|
+
(Tensor(shape=[3, 2], dtype=Float32, value=
|
|
5397
|
+
[[ 2.44700000e-01, 4.30200011e-01],
|
|
5398
|
+
[ 1.86800003e-01, 4.38800007e-01],
|
|
5399
|
+
[ 3.56299996e-01, 5.15200019e-01]]), Tensor(shape=[3, 2], dtype=Int32, value=
|
|
5400
|
+
[[1, 2],
|
|
5401
|
+
[3, 0],
|
|
5402
|
+
[0, 1]]))
|
|
6496
5403
|
"""
|
|
6497
|
-
|
|
5404
|
+
top_k_ = _get_cache_prim(P.TopK)(sorted)
|
|
5405
|
+
if not largest:
|
|
5406
|
+
input = -input
|
|
5407
|
+
if dim is None or dim == input.ndim - 1:
|
|
5408
|
+
if not largest:
|
|
5409
|
+
res = top_k_(input, k)
|
|
5410
|
+
values, indices = -res[0], res[1]
|
|
5411
|
+
return values, indices
|
|
5412
|
+
return top_k_(input, k)
|
|
5413
|
+
input = input.swapaxes(dim, input.ndim - 1)
|
|
5414
|
+
output = top_k_(input, k)
|
|
5415
|
+
values = output[0].swapaxes(dim, input.ndim - 1)
|
|
5416
|
+
indices = output[1].swapaxes(dim, input.ndim - 1)
|
|
5417
|
+
if not largest:
|
|
5418
|
+
res = (-values, indices)
|
|
5419
|
+
else:
|
|
5420
|
+
res = (values, indices)
|
|
5421
|
+
return res
|
|
6498
5422
|
|
|
6499
5423
|
|
|
6500
|
-
def
|
|
5424
|
+
def topk_ext(input, k, dim=-1, largest=True, sorted=True):
|
|
6501
5425
|
r"""
|
|
6502
5426
|
Finds values and indices of the `k` largest or smallest entries along a given dimension.
|
|
6503
5427
|
|
|
@@ -6522,7 +5446,7 @@ def topk(input, k, dim=None, largest=True, sorted=True):
|
|
|
6522
5446
|
Args:
|
|
6523
5447
|
input (Tensor): Input to be computed, data type must be float16, float32 or int32.
|
|
6524
5448
|
k (int): The number of top or bottom elements to be computed along the last dimension.
|
|
6525
|
-
dim (int, optional): The dimension to sort along. Default: ``
|
|
5449
|
+
dim (int, optional): The dimension to sort along. Default: ``-1`` .
|
|
6526
5450
|
largest (bool, optional): If largest is ``False`` then the k smallest elements are returned.
|
|
6527
5451
|
Default: ``True`` .
|
|
6528
5452
|
sorted (bool, optional): If ``True`` , the obtained elements will be sorted by the values in descending order.
|
|
@@ -6549,7 +5473,7 @@ def topk(input, k, dim=None, largest=True, sorted=True):
|
|
|
6549
5473
|
>>> x = ms.Tensor([[0.5368, 0.2447, 0.4302, 0.9673],
|
|
6550
5474
|
... [0.4388, 0.6525, 0.4685, 0.1868],
|
|
6551
5475
|
... [0.3563, 0.5152, 0.9675, 0.8230]], dtype=ms.float32)
|
|
6552
|
-
>>> output = ops.
|
|
5476
|
+
>>> output = ops.topk_ext(x, 2, dim=1)
|
|
6553
5477
|
>>> print(output)
|
|
6554
5478
|
(Tensor(shape=[3, 2], dtype=Float32, value=
|
|
6555
5479
|
[[ 9.67299998e-01, 5.36800027e-01],
|
|
@@ -6568,24 +5492,7 @@ def topk(input, k, dim=None, largest=True, sorted=True):
|
|
|
6568
5492
|
[3, 0],
|
|
6569
5493
|
[0, 1]]))
|
|
6570
5494
|
"""
|
|
6571
|
-
|
|
6572
|
-
if not largest:
|
|
6573
|
-
input = -input
|
|
6574
|
-
if dim is None or dim == input.ndim - 1:
|
|
6575
|
-
if not largest:
|
|
6576
|
-
res = top_k_(input, k)
|
|
6577
|
-
values, indices = -res[0], res[1]
|
|
6578
|
-
return values, indices
|
|
6579
|
-
return top_k_(input, k)
|
|
6580
|
-
input = input.swapaxes(dim, input.ndim - 1)
|
|
6581
|
-
output = top_k_(input, k)
|
|
6582
|
-
values = output[0].swapaxes(dim, input.ndim - 1)
|
|
6583
|
-
indices = output[1].swapaxes(dim, input.ndim - 1)
|
|
6584
|
-
if not largest:
|
|
6585
|
-
res = (-values, indices)
|
|
6586
|
-
else:
|
|
6587
|
-
res = (values, indices)
|
|
6588
|
-
return res
|
|
5495
|
+
return _get_cache_prim(ops.auto_generate.TopkExt)()(input, k, dim, largest, sorted)
|
|
6589
5496
|
|
|
6590
5497
|
|
|
6591
5498
|
def expand(input_x, size):
|
|
@@ -6728,9 +5635,7 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
6728
5635
|
.. warning::
|
|
6729
5636
|
- The output is a 3-dimensional Tensor whose shape is
|
|
6730
5637
|
:math:`(N, C \times \prod(\text{kernel_size}), L)` .
|
|
6731
|
-
|
|
6732
|
-
.. warning::
|
|
6733
|
-
This is an experimental API that is subject to change or deletion.
|
|
5638
|
+
- This is an experimental API that is subject to change or deletion.
|
|
6734
5639
|
|
|
6735
5640
|
Args:
|
|
6736
5641
|
input (Tensor): 4-D Tensor, supported dtypes: float16, float32, float64, complex64 and complex128.
|
|
@@ -6739,10 +5644,11 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
6739
5644
|
dilation (Union[int, tuple[int], list[int]], optional): The dilation of the window, should be two int
|
|
6740
5645
|
for height and width. If type is int, it means that height equal with width. Default: ``1`` .
|
|
6741
5646
|
padding (Union[int, tuple[int], list[int]], optional): The pad of the window, that must be
|
|
6742
|
-
a tuple/list of one or two `int` for height and width.
|
|
6743
|
-
|
|
6744
|
-
If
|
|
6745
|
-
|
|
5647
|
+
a tuple/list of one or two `int` for height and width. Default: ``0`` .
|
|
5648
|
+
|
|
5649
|
+
- If one int, pad_height = pad_width.
|
|
5650
|
+
- If two int, pad_height = padding[0], pad_width = padding[1].
|
|
5651
|
+
|
|
6746
5652
|
stride (Union[int, tuple[int], list[int]], optional): The stride of the window, should be two int
|
|
6747
5653
|
for height and width. If type is int, it means that height equal with width. Default: ``1`` .
|
|
6748
5654
|
|
|
@@ -6789,98 +5695,6 @@ def _check_diagonal_axes(dim1, dim2, x_ndim):
|
|
|
6789
5695
|
return axes
|
|
6790
5696
|
|
|
6791
5697
|
|
|
6792
|
-
def diagonal(input, offset=0, dim1=0, dim2=1):
|
|
6793
|
-
"""
|
|
6794
|
-
Returns specified diagonals of `input`.
|
|
6795
|
-
|
|
6796
|
-
If `input` is 2-D, returns the diagonal of `input` with the given offset.
|
|
6797
|
-
If `input` has more than two
|
|
6798
|
-
dimensions, then the axes specified by `dim1` and `dim2` are used to determine
|
|
6799
|
-
the 2-D sub-array whose diagonal is returned. In this case, remove the `dim1` and `dim2` dimensions of `input`
|
|
6800
|
-
and insert the last dimension of `input` by the diagonal elements determined by `dim1` and `dim2`.
|
|
6801
|
-
|
|
6802
|
-
Args:
|
|
6803
|
-
input (Tensor): Array from which the diagonals are taken.
|
|
6804
|
-
offset (int, optional): Offset of the diagonal from the main diagonal.
|
|
6805
|
-
Can be positive or negative. Default: ``0`` .
|
|
6806
|
-
dim1 (int, optional): Axis to be used as the first axis of the 2-D
|
|
6807
|
-
sub-arrays from which the diagonals should be taken. Defaults to
|
|
6808
|
-
first axis (0). Default: ``0`` .
|
|
6809
|
-
dim2 (int, optional): Axis to be used as the second axis of the 2-D
|
|
6810
|
-
sub-arrays from which the diagonals should be taken. Defaults to
|
|
6811
|
-
second axis (1). Default: ``1`` .
|
|
6812
|
-
|
|
6813
|
-
Returns:
|
|
6814
|
-
Tensor, if `input` is 2-D, then `input` 1-D array containing the diagonal. If
|
|
6815
|
-
``input.ndim > 2``, then the dimensions specified by `dim1` and `dim2` are removed,
|
|
6816
|
-
and a new axis inserted at the end corresponding to the diagonal.
|
|
6817
|
-
|
|
6818
|
-
Raises:
|
|
6819
|
-
TypeError: if `dim1` or `dim2` are not an int.
|
|
6820
|
-
ValueError: if the input tensor has less than two dimensions.
|
|
6821
|
-
|
|
6822
|
-
Supported Platforms:
|
|
6823
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
6824
|
-
|
|
6825
|
-
Examples:
|
|
6826
|
-
>>> from mindspore import Tensor, ops
|
|
6827
|
-
>>> from mindspore import dtype as mstype
|
|
6828
|
-
>>> x = Tensor([[0, 1], [2, 3]], mstype.float32)
|
|
6829
|
-
>>> output = ops.diagonal(x)
|
|
6830
|
-
>>> print(output)
|
|
6831
|
-
[0 3]
|
|
6832
|
-
"""
|
|
6833
|
-
x_ndim = input.ndim
|
|
6834
|
-
if x_ndim < 2:
|
|
6835
|
-
raise ValueError(f"For 'ops.diagonal', the original tensor requires at least two dimensions, but got {x_ndim}")
|
|
6836
|
-
_check_attr_dtype("dim1", dim1, [int], "diagonal")
|
|
6837
|
-
_check_attr_dtype("dim2", dim2, [int], "diagonal")
|
|
6838
|
-
dtype = input.dtype
|
|
6839
|
-
|
|
6840
|
-
axes = _check_diagonal_axes(dim1, dim2, x_ndim)
|
|
6841
|
-
perm = ()
|
|
6842
|
-
for i in ms_arrange(x_ndim):
|
|
6843
|
-
if i not in axes:
|
|
6844
|
-
perm += (i,)
|
|
6845
|
-
perm += axes
|
|
6846
|
-
input = input.transpose(perm)
|
|
6847
|
-
|
|
6848
|
-
x_shape = input.shape
|
|
6849
|
-
n, m = x_shape[-2:]
|
|
6850
|
-
|
|
6851
|
-
e = ops.eye(n, m, dtype)
|
|
6852
|
-
if offset >= m or offset <= -n:
|
|
6853
|
-
zero_shape = x_shape[:-2] + (0,)
|
|
6854
|
-
return ops.zeros(zero_shape, dtype)
|
|
6855
|
-
if offset != 0:
|
|
6856
|
-
e = e.astype(mstype.float32)
|
|
6857
|
-
if offset > 0:
|
|
6858
|
-
e_left = ops.fill(mstype.float32, (n, offset), 0)
|
|
6859
|
-
e_right = e[..., 0:m - offset:1]
|
|
6860
|
-
e = ops.cat((e_left, e_right), 1).astype(dtype)
|
|
6861
|
-
elif offset < 0:
|
|
6862
|
-
e_upper = ops.fill(mstype.float32, (-offset, m), 0)
|
|
6863
|
-
e_lower = e[0:n + offset:1, ...]
|
|
6864
|
-
e = ops.cat((e_upper, e_lower), 0).astype(dtype)
|
|
6865
|
-
e = ops.broadcast_to(e, x_shape)
|
|
6866
|
-
|
|
6867
|
-
prod_val = ops.mul(input, e)
|
|
6868
|
-
res = ops.ReduceSum()(prod_val.astype(mstype.float32), -1)
|
|
6869
|
-
|
|
6870
|
-
begin = ()
|
|
6871
|
-
for _ in ms_arrange(x_ndim - 2):
|
|
6872
|
-
begin += (0,)
|
|
6873
|
-
last_dim_begin = builtins.max(0, -offset)
|
|
6874
|
-
begin += (last_dim_begin,)
|
|
6875
|
-
res_size = res.shape[:-1]
|
|
6876
|
-
last_dim_end = builtins.min(x_shape[-2], builtins.max(0, x_shape[-1] - offset)) - last_dim_begin
|
|
6877
|
-
if last_dim_end <= 0:
|
|
6878
|
-
return Tensor([])
|
|
6879
|
-
res_size += (last_dim_end,)
|
|
6880
|
-
res = ops.slice(res, begin, res_size)
|
|
6881
|
-
return res.astype(dtype)
|
|
6882
|
-
|
|
6883
|
-
|
|
6884
5698
|
def _check_is_tensor(param_name, input, cls_name):
|
|
6885
5699
|
"""Returns True if input is Tensor."""
|
|
6886
5700
|
if not isinstance(input, Tensor):
|
|
@@ -6900,6 +5714,9 @@ def diagonal_scatter(input, src, offset=0, dim1=0, dim2=1):
|
|
|
6900
5714
|
the elements in these two dimensions will be treated as elements of a matrix,
|
|
6901
5715
|
and `src` is embedded on the diagonal of the matrix.
|
|
6902
5716
|
|
|
5717
|
+
Note:
|
|
5718
|
+
Currently, ``inf`` value of elements in `input` or `src` is not supported.
|
|
5719
|
+
|
|
6903
5720
|
Args:
|
|
6904
5721
|
input (Tensor): Input Tensor, whose dimension is larger than 1.
|
|
6905
5722
|
src (Tensor): The source Tensor to embed.
|
|
@@ -6936,16 +5753,39 @@ def diagonal_scatter(input, src, offset=0, dim1=0, dim2=1):
|
|
|
6936
5753
|
"""
|
|
6937
5754
|
_check_is_tensor("input", input, "diagonal_scatter")
|
|
6938
5755
|
_check_is_tensor("src", src, "diagonal_scatter")
|
|
6939
|
-
_check_is_int(offset, "offset", "diagonal_scatter")
|
|
6940
|
-
_check_is_int(dim1, "dim1", "diagonal_scatter")
|
|
6941
|
-
_check_is_int(dim2, "dim2", "diagonal_scatter")
|
|
6942
5756
|
input_diag = input.diagonal(offset, dim1, dim2)
|
|
6943
5757
|
_check_diagonal_scatter_shape(input_diag.shape, src.shape)
|
|
6944
|
-
|
|
6945
|
-
|
|
6946
|
-
|
|
5758
|
+
input_shape = input.shape
|
|
5759
|
+
zeros_shape = list(input_shape)
|
|
5760
|
+
m, n = input_shape[dim1], input_shape[dim2]
|
|
5761
|
+
if m == n:
|
|
5762
|
+
src = src - input_diag
|
|
5763
|
+
src = ops.diag_embed(src, offset, dim1, dim2)
|
|
5764
|
+
return input + src
|
|
5765
|
+
if m > n:
|
|
5766
|
+
axis = dim2
|
|
5767
|
+
zeros_shape[axis] = m - n
|
|
5768
|
+
else:
|
|
5769
|
+
axis = dim1
|
|
5770
|
+
zeros_shape[axis] = n - m
|
|
5771
|
+
zeros_tensor = zeros(zeros_shape, dtype=input.dtype)
|
|
5772
|
+
input = concat((input, zeros_tensor), axis)
|
|
5773
|
+
input_diag = input.diagonal(offset, dim1, dim2)
|
|
5774
|
+
if src.shape != input_diag.shape:
|
|
5775
|
+
zeros_shape = []
|
|
5776
|
+
for i, ax in enumerate(src.shape):
|
|
5777
|
+
if ax == input_diag.shape[i]:
|
|
5778
|
+
zeros_shape.append(ax)
|
|
5779
|
+
else:
|
|
5780
|
+
axis = i
|
|
5781
|
+
zeros_shape.append(input_diag.shape[i] - ax)
|
|
5782
|
+
zeros_tensor = zeros(zeros_shape, dtype=src.dtype)
|
|
5783
|
+
src = concat((src, zeros_tensor), axis)
|
|
5784
|
+
src = src - input_diag
|
|
6947
5785
|
src = ops.diag_embed(src, offset, dim1, dim2)
|
|
6948
|
-
|
|
5786
|
+
input = input + src
|
|
5787
|
+
begin = (0,) * input.ndim
|
|
5788
|
+
return slice(input, begin, input_shape)
|
|
6949
5789
|
|
|
6950
5790
|
|
|
6951
5791
|
def lstsq(input, A):
|
|
@@ -7004,8 +5844,7 @@ def lstsq(input, A):
|
|
|
7004
5844
|
[-6.5000005 -4.500001 ]
|
|
7005
5845
|
[-3.500002 -2.5000017]]
|
|
7006
5846
|
"""
|
|
7007
|
-
|
|
7008
|
-
return lstsq_op(input, A)
|
|
5847
|
+
return lstsq_(input, A)
|
|
7009
5848
|
|
|
7010
5849
|
|
|
7011
5850
|
def mvlgamma(input, p):
|
|
@@ -7080,7 +5919,7 @@ def argwhere(input):
|
|
|
7080
5919
|
[[0 0 0]
|
|
7081
5920
|
[0 1 0]]
|
|
7082
5921
|
"""
|
|
7083
|
-
return
|
|
5922
|
+
return nonzero(input)
|
|
7084
5923
|
|
|
7085
5924
|
|
|
7086
5925
|
def column_stack(tensors):
|
|
@@ -7117,14 +5956,13 @@ def column_stack(tensors):
|
|
|
7117
5956
|
raise TypeError(f"For column_stack, the input must be list or tuple of tensors, but got {type(tensors)}.")
|
|
7118
5957
|
|
|
7119
5958
|
trans_x = ()
|
|
7120
|
-
_expand_dims = _get_cache_prim(P.ExpandDims)()
|
|
7121
5959
|
for tensor in tensors:
|
|
7122
5960
|
if not isinstance(tensor, Tensor):
|
|
7123
5961
|
raise TypeError(f"For column_stack, the input element must be tensor, but got {type(tensor)}.")
|
|
7124
5962
|
if tensor.ndim < 1:
|
|
7125
|
-
tensor =
|
|
5963
|
+
tensor = expand_dims(tensor, 0)
|
|
7126
5964
|
if tensor.ndim == 1:
|
|
7127
|
-
tensor =
|
|
5965
|
+
tensor = expand_dims(tensor, 1)
|
|
7128
5966
|
trans_x += (tensor,)
|
|
7129
5967
|
if not trans_x:
|
|
7130
5968
|
raise ValueError(f"For column_stack, the input must have at least 1 tensor, but got 0.")
|
|
@@ -7170,7 +6008,7 @@ def hstack(tensors):
|
|
|
7170
6008
|
if not isinstance(tensor, Tensor):
|
|
7171
6009
|
raise TypeError(f"For hstack, the input element must be tensor, but got {type(tensor)}.")
|
|
7172
6010
|
if tensor.ndim < 1:
|
|
7173
|
-
tensor =
|
|
6011
|
+
tensor = expand_dims(tensor, 0)
|
|
7174
6012
|
tuple_of_tensor += (tensor,)
|
|
7175
6013
|
if not tuple_of_tensor:
|
|
7176
6014
|
raise ValueError("For hstack, the input must have at least 1 tensor, but got 0.")
|
|
@@ -7270,7 +6108,7 @@ def movedim(x, source, destination):
|
|
|
7270
6108
|
f"For `source` and `destination` arguments, the number of elements must be the same, but got 'source':"
|
|
7271
6109
|
f" {len(source)} and 'destination': {len(destination)}.")
|
|
7272
6110
|
perm = _get_moved_perm(ndim, source, destination)
|
|
7273
|
-
return
|
|
6111
|
+
return transpose_(x, perm)
|
|
7274
6112
|
|
|
7275
6113
|
|
|
7276
6114
|
def moveaxis(x, source, destination):
|
|
@@ -7345,7 +6183,7 @@ def swapaxes(input, axis0, axis1):
|
|
|
7345
6183
|
new_perm = perm[0:axis0] + perm[axis1:axis1 + 1] + \
|
|
7346
6184
|
perm[axis0 + 1:axis1] + perm[axis0:axis0 + 1]
|
|
7347
6185
|
|
|
7348
|
-
return
|
|
6186
|
+
return transpose_(input, new_perm)
|
|
7349
6187
|
|
|
7350
6188
|
|
|
7351
6189
|
def swapdims(input, dim0, dim1):
|
|
@@ -7453,9 +6291,56 @@ def repeat_interleave(input, repeats, axis=None):
|
|
|
7453
6291
|
return output
|
|
7454
6292
|
|
|
7455
6293
|
|
|
6294
|
+
def repeat_interleave_ext(tensor, repeats, axis=None, output_size=None):
|
|
6295
|
+
r"""
|
|
6296
|
+
Repeat elements of a tensor.
|
|
6297
|
+
|
|
6298
|
+
Args:
|
|
6299
|
+
tensor (Tensor): the input tensor.
|
|
6300
|
+
repeats (Union[int, list, tuple, Tensor]) the number of repetitions for each element
|
|
6301
|
+
axis (int, optional) the axis along wich to repeat, if None, defaults to 0.
|
|
6302
|
+
output_size (int, optional): Calculated output size along specified axis.
|
|
6303
|
+
|
|
6304
|
+
Returns:
|
|
6305
|
+
Tensor, one-hot tensor.
|
|
6306
|
+
|
|
6307
|
+
Supported Platforms:
|
|
6308
|
+
``Ascend``
|
|
6309
|
+
|
|
6310
|
+
Examples:
|
|
6311
|
+
>>> import mindspore
|
|
6312
|
+
>>> import numpy as np
|
|
6313
|
+
>>> from mindspore import mint
|
|
6314
|
+
>>> from mindspore import Tensor
|
|
6315
|
+
>>> tensor = Tensor(np.array([0, 1, 2], [3, 4, 5]), mindspore.int32)
|
|
6316
|
+
>>> repeats = 2
|
|
6317
|
+
>>> axis = 0
|
|
6318
|
+
>>> output = mint.repeat_interleave(tensor, repeats, axis)
|
|
6319
|
+
>>> print(output)
|
|
6320
|
+
[[0. 1. 2.]
|
|
6321
|
+
[0. 1. 2.]
|
|
6322
|
+
[3. 4. 5.]
|
|
6323
|
+
[3. 4. 5.]]
|
|
6324
|
+
"""
|
|
6325
|
+
if axis is None:
|
|
6326
|
+
tensor = tensor.ravel()
|
|
6327
|
+
axis = 0
|
|
6328
|
+
|
|
6329
|
+
size = tensor.shape[axis]
|
|
6330
|
+
if output_size is None:
|
|
6331
|
+
if isinstance(repeats, int):
|
|
6332
|
+
output_size = size*repeats
|
|
6333
|
+
elif len(repeats) == 1:
|
|
6334
|
+
output_size = size*repeats[0]
|
|
6335
|
+
else:
|
|
6336
|
+
output_size = sum(repeats)
|
|
6337
|
+
|
|
6338
|
+
return repeat_interleave_(tensor, repeats, axis, output_size)
|
|
6339
|
+
|
|
6340
|
+
|
|
7456
6341
|
def repeat_elements(x, rep, axis=0):
|
|
7457
6342
|
"""
|
|
7458
|
-
Repeat elements of a tensor along an axis, like `
|
|
6343
|
+
Repeat elements of a tensor along an axis, like `numpy.repeat` .
|
|
7459
6344
|
|
|
7460
6345
|
Args:
|
|
7461
6346
|
x (Tensor): The tensor to repeat values for. Must be of type: float16,
|
|
@@ -7493,34 +6378,19 @@ def repeat_elements(x, rep, axis=0):
|
|
|
7493
6378
|
const_utils.check_type_valid(ops.dtype(x), mstype.number_type, 'input x')
|
|
7494
6379
|
rep = _check_positive_int(rep, "rep", "repeat_elements")
|
|
7495
6380
|
axis = _check_is_int(axis, "axis", "repeat_elements")
|
|
7496
|
-
|
|
7497
|
-
rank_op = P.Rank()
|
|
7498
|
-
tile_op = P.Tile()
|
|
7499
|
-
expand_dims_op = P.ExpandDims()
|
|
7500
|
-
reshape_op = P.Reshape()
|
|
7501
|
-
x_rank = rank_op(x)
|
|
6381
|
+
x_rank = rank_(x)
|
|
7502
6382
|
axis = _check_axis_range(axis, x_rank, "axis", "repeat_elements")
|
|
6383
|
+
axis = axis + x.ndim if axis < 0 else axis
|
|
7503
6384
|
expand_axis = axis + 1
|
|
7504
|
-
x_expand =
|
|
6385
|
+
x_expand = expand_dims(x, expand_axis)
|
|
7505
6386
|
rep_dims = _cal_repeat_dims(x_rank, rep, expand_axis)
|
|
7506
|
-
x_expand =
|
|
7507
|
-
x_shape =
|
|
6387
|
+
x_expand = tile_(x_expand, rep_dims)
|
|
6388
|
+
x_shape = shape_(x)
|
|
7508
6389
|
x_reshape = _cal_reshape(x_shape, rep, axis)
|
|
7509
|
-
x_rep =
|
|
6390
|
+
x_rep = reshape_(x_expand, x_reshape)
|
|
7510
6391
|
return x_rep
|
|
7511
6392
|
|
|
7512
6393
|
|
|
7513
|
-
@_primexpr
|
|
7514
|
-
def _check_sequence_mask_input_len(input_shape, prim_name=None):
|
|
7515
|
-
msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
|
|
7516
|
-
if not input_shape:
|
|
7517
|
-
raise ValueError(f"{msg_prefix} input_shape must be greater than 0, but got {input_shape}.")
|
|
7518
|
-
# broadcast only supports 7d shape
|
|
7519
|
-
shape_size = len(input_shape)
|
|
7520
|
-
if shape_size >= 7:
|
|
7521
|
-
raise ValueError(f"{msg_prefix} dimension of input_shape must be less than 7, but got {shape_size}d.")
|
|
7522
|
-
|
|
7523
|
-
|
|
7524
6394
|
def sequence_mask(lengths, maxlen=None):
|
|
7525
6395
|
"""
|
|
7526
6396
|
Returns a mask tensor representing the first N positions of each cell.
|
|
@@ -7573,29 +6443,19 @@ def sequence_mask(lengths, maxlen=None):
|
|
|
7573
6443
|
[[ True True False False ]
|
|
7574
6444
|
[ True True True True ]]]
|
|
7575
6445
|
"""
|
|
7576
|
-
|
|
7577
|
-
argmax_op = P.ArgMaxWithValue()
|
|
7578
|
-
reshape_op = P.Reshape()
|
|
7579
|
-
range_op = P.Range()
|
|
7580
|
-
expand_op = P.ExpandDims()
|
|
7581
|
-
cast_op = P.Cast()
|
|
7582
|
-
to_tensor_op = P.ScalarToTensor()
|
|
7583
|
-
shape_op = P.Shape()
|
|
7584
|
-
|
|
7585
6446
|
const_utils.check_type_valid(ops.dtype(lengths), [mstype.int64, mstype.int32], 'lengths')
|
|
7586
|
-
_check_sequence_mask_input_len(shape_op(lengths), "sequence_mask")
|
|
7587
6447
|
|
|
7588
6448
|
if maxlen is None:
|
|
7589
|
-
flatten_data =
|
|
7590
|
-
flatten_data =
|
|
7591
|
-
_, value =
|
|
7592
|
-
maxlen =
|
|
6449
|
+
flatten_data = reshape_(lengths, (-1,))
|
|
6450
|
+
flatten_data = cast_(flatten_data, mstype.float32)
|
|
6451
|
+
_, value = arg_max_with_value_(flatten_data)
|
|
6452
|
+
maxlen = cast_(value, mstype.int32)
|
|
7593
6453
|
else:
|
|
7594
6454
|
maxlen = _check_positive_int(maxlen, "maxlen", "sequence_mask")
|
|
7595
|
-
maxlen =
|
|
6455
|
+
maxlen = scalar_to_tensor_(maxlen, mstype.int32)
|
|
7596
6456
|
|
|
7597
|
-
range_vector =
|
|
7598
|
-
mask =
|
|
6457
|
+
range_vector = range_(scalar_to_tensor_(0, mstype.int32), maxlen, scalar_to_tensor_(1, mstype.int32))
|
|
6458
|
+
mask = expand_dims(lengths, -1)
|
|
7599
6459
|
result = range_vector < mask
|
|
7600
6460
|
return result
|
|
7601
6461
|
|
|
@@ -7608,35 +6468,6 @@ def top_k(input_x, k, sorted=True):
|
|
|
7608
6468
|
return top_k_(input_x, k)
|
|
7609
6469
|
|
|
7610
6470
|
|
|
7611
|
-
def deepcopy(input_x):
|
|
7612
|
-
"""
|
|
7613
|
-
Returns a deepcopy of input tensor.
|
|
7614
|
-
|
|
7615
|
-
Args:
|
|
7616
|
-
input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
7617
|
-
|
|
7618
|
-
Returns:
|
|
7619
|
-
Tensor, a deepcopy of `input_x`.
|
|
7620
|
-
|
|
7621
|
-
Raises:
|
|
7622
|
-
TypeError: If `input_x` is not a Tensor.
|
|
7623
|
-
|
|
7624
|
-
Supported Platforms:
|
|
7625
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
7626
|
-
|
|
7627
|
-
Examples:
|
|
7628
|
-
>>> import mindspore
|
|
7629
|
-
>>> from mindspore import Tensor, ops
|
|
7630
|
-
>>> input = Tensor([[0, 1], [2, 1]], dtype=mindspore.int32)
|
|
7631
|
-
>>> output = ops.deepcopy(input)
|
|
7632
|
-
>>> print(output)
|
|
7633
|
-
[[0 1]
|
|
7634
|
-
[2 1]]
|
|
7635
|
-
"""
|
|
7636
|
-
_deepcopy = _get_cache_prim(P.Identity)()
|
|
7637
|
-
return _deepcopy(input_x)
|
|
7638
|
-
|
|
7639
|
-
|
|
7640
6471
|
__all__ = [
|
|
7641
6472
|
'unique',
|
|
7642
6473
|
'unique_with_pad',
|
|
@@ -7651,8 +6482,10 @@ __all__ = [
|
|
|
7651
6482
|
'ger',
|
|
7652
6483
|
'ones',
|
|
7653
6484
|
'ones_like',
|
|
6485
|
+
'ones_like_ext',
|
|
7654
6486
|
'zeros',
|
|
7655
6487
|
'zeros_like',
|
|
6488
|
+
'zeros_like_ext',
|
|
7656
6489
|
'shape',
|
|
7657
6490
|
'shape_',
|
|
7658
6491
|
'reverse',
|
|
@@ -7660,11 +6493,12 @@ __all__ = [
|
|
|
7660
6493
|
'hamming_window',
|
|
7661
6494
|
'chunk',
|
|
7662
6495
|
'full',
|
|
6496
|
+
'full_ext',
|
|
7663
6497
|
'full_like',
|
|
7664
6498
|
'dyn_shape',
|
|
7665
6499
|
'rank',
|
|
7666
|
-
'range',
|
|
7667
6500
|
'arange',
|
|
6501
|
+
'range',
|
|
7668
6502
|
'reshape',
|
|
7669
6503
|
'reshape_',
|
|
7670
6504
|
'flatten',
|
|
@@ -7718,6 +6552,7 @@ __all__ = [
|
|
|
7718
6552
|
'narrow',
|
|
7719
6553
|
'ravel',
|
|
7720
6554
|
'scatter_add',
|
|
6555
|
+
'scatter_add_ext',
|
|
7721
6556
|
'scatter_mul',
|
|
7722
6557
|
'scatter_max',
|
|
7723
6558
|
'scatter_min',
|
|
@@ -7746,7 +6581,6 @@ __all__ = [
|
|
|
7746
6581
|
'index_fill',
|
|
7747
6582
|
'index_select',
|
|
7748
6583
|
'max',
|
|
7749
|
-
'argmax',
|
|
7750
6584
|
'min',
|
|
7751
6585
|
'unsorted_segment_sum',
|
|
7752
6586
|
'population_count',
|
|
@@ -7773,6 +6607,7 @@ __all__ = [
|
|
|
7773
6607
|
'aminmax',
|
|
7774
6608
|
'sort',
|
|
7775
6609
|
'top_k',
|
|
7776
|
-
'deepcopy'
|
|
6610
|
+
'deepcopy',
|
|
6611
|
+
'flip'
|
|
7777
6612
|
]
|
|
7778
6613
|
__all__.sort()
|