mindspore 2.2.11__cp37-cp37m-manylinux1_x86_64.whl → 2.3.0rc1__cp37-cp37m-manylinux1_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +5 -4
- mindspore/_akg/akg/composite/build_module.py +155 -11
- mindspore/_akg/akg/config/repository.json +38 -0
- mindspore/_akg/akg/ms/info_version_adapt.py +29 -0
- mindspore/_akg/akg/topi/cpp/impl.py +1 -1
- mindspore/_akg/akg/tvm/_ffi/base.py +1 -1
- mindspore/_akg/akg/tvm/contrib/nvcc.py +4 -1
- mindspore/_akg/akg/utils/ascend_profilier/path_manager.py +2 -1
- mindspore/_akg/akg/utils/composite_op_helper.py +4 -2
- mindspore/_akg/akg/utils/dump_ascend_meta.py +2 -2
- mindspore/_akg/akg/utils/gen_random.py +14 -8
- mindspore/_akg/akg/utils/op_dsl.py +11 -0
- mindspore/_akg/akg/utils/tbe_codegen_utils.py +5 -5
- mindspore/_c_dataengine.cpython-37m-x86_64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-37m-x86_64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-37m-x86_64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +58 -0
- mindspore/_extends/builtin_operations.py +2 -1
- mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
- mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
- mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
- mindspore/_extends/parse/__init__.py +18 -14
- mindspore/_extends/parse/compile_config.py +229 -0
- mindspore/_extends/parse/parser.py +155 -59
- mindspore/_extends/parse/resources.py +40 -7
- mindspore/_extends/parse/standard_method.py +124 -204
- mindspore/_extends/remote/kernel_build_server.py +2 -0
- mindspore/_mindspore_offline_debug.cpython-37m-x86_64-linux-gnu.so +0 -0
- mindspore/_profiler.py +30 -0
- mindspore/amp.py +24 -18
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/boost_cell_wrapper.py +1 -1
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +3 -1
- mindspore/common/_jit_fallback_utils.py +2 -3
- mindspore/common/_register_for_adapter.py +7 -0
- mindspore/common/_stub_tensor.py +6 -1
- mindspore/common/_utils.py +5 -17
- mindspore/common/api.py +91 -48
- mindspore/common/auto_dynamic_shape.py +27 -14
- mindspore/common/dtype.py +5 -4
- mindspore/common/dump.py +5 -4
- mindspore/common/initializer.py +1 -1
- mindspore/common/jit_config.py +20 -11
- mindspore/common/lazy_inline.py +58 -17
- mindspore/common/mindir_util.py +12 -2
- mindspore/common/mutable.py +79 -14
- mindspore/common/parameter.py +19 -4
- mindspore/common/seed.py +9 -9
- mindspore/common/sparse_tensor.py +251 -18
- mindspore/common/symbol.py +122 -0
- mindspore/common/tensor.py +321 -435
- mindspore/communication/__init__.py +3 -3
- mindspore/communication/_comm_helper.py +5 -0
- mindspore/communication/management.py +56 -38
- mindspore/config/op_info.config +22 -54
- mindspore/context.py +192 -54
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +6 -6
- mindspore/dataset/audio/transforms.py +711 -158
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/engine/cache_client.py +2 -2
- mindspore/dataset/engine/datasets.py +95 -38
- mindspore/dataset/engine/datasets_audio.py +14 -14
- mindspore/dataset/engine/datasets_standard_format.py +33 -3
- mindspore/dataset/engine/datasets_text.py +38 -38
- mindspore/dataset/engine/datasets_user_defined.py +7 -7
- mindspore/dataset/engine/datasets_vision.py +75 -71
- mindspore/dataset/engine/offload.py +5 -7
- mindspore/dataset/engine/validators.py +1 -1
- mindspore/dataset/text/__init__.py +3 -3
- mindspore/dataset/text/transforms.py +408 -121
- mindspore/dataset/text/utils.py +9 -9
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/transforms.py +261 -76
- mindspore/dataset/utils/browse_dataset.py +9 -9
- mindspore/dataset/vision/__init__.py +3 -3
- mindspore/dataset/vision/c_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +2 -2
- mindspore/dataset/vision/transforms.py +2264 -514
- mindspore/dataset/vision/utils.py +40 -9
- mindspore/dataset/vision/validators.py +7 -1
- mindspore/experimental/optim/__init__.py +12 -2
- mindspore/experimental/optim/adadelta.py +161 -0
- mindspore/experimental/optim/adagrad.py +168 -0
- mindspore/experimental/optim/adam.py +35 -34
- mindspore/experimental/optim/adamax.py +170 -0
- mindspore/experimental/optim/adamw.py +40 -16
- mindspore/experimental/optim/asgd.py +153 -0
- mindspore/experimental/optim/lr_scheduler.py +65 -125
- mindspore/experimental/optim/nadam.py +157 -0
- mindspore/experimental/optim/optimizer.py +15 -8
- mindspore/experimental/optim/radam.py +194 -0
- mindspore/experimental/optim/rmsprop.py +154 -0
- mindspore/experimental/optim/rprop.py +164 -0
- mindspore/experimental/optim/sgd.py +28 -19
- mindspore/hal/__init__.py +34 -0
- mindspore/hal/_ascend.py +57 -0
- mindspore/hal/_base.py +57 -0
- mindspore/hal/_cpu.py +56 -0
- mindspore/hal/_gpu.py +57 -0
- mindspore/hal/device.py +356 -0
- mindspore/hal/event.py +179 -0
- mindspore/hal/stream.py +337 -0
- mindspore/include/api/data_type.h +2 -2
- mindspore/include/api/dual_abi_helper.h +16 -3
- mindspore/include/api/model.h +1 -3
- mindspore/include/api/status.h +14 -0
- mindspore/include/c_api/model_c.h +173 -0
- mindspore/include/c_api/ms/base/types.h +1 -0
- mindspore/include/c_api/types_c.h +19 -0
- mindspore/include/dataset/execute.h +1 -3
- mindspore/include/mindapi/base/format.h +125 -23
- mindspore/include/mindapi/base/types.h +7 -0
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +2044 -154
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +2044 -33
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/build_tbe_kernel.py +529 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/compiler.py +56 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/custom.py +1109 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/get_file_path.py +36 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/tbe_topi.py +556 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6365 -1759
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_add_custom.h +49 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +59 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +59 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +52 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +232 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +232 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.cpp +192 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.cpp +274 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +39 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/x86_64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/cpu/libakg.so +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
- mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu10.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.6/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
- mindspore/mindrecord/__init__.py +5 -1
- mindspore/mindrecord/config.py +809 -0
- mindspore/mindrecord/filereader.py +25 -0
- mindspore/mindrecord/filewriter.py +74 -56
- mindspore/mindrecord/mindpage.py +40 -6
- mindspore/mindrecord/shardutils.py +3 -2
- mindspore/mindrecord/shardwriter.py +7 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +53 -66
- mindspore/mindrecord/tools/cifar10_to_mr.py +48 -63
- mindspore/mindrecord/tools/csv_to_mr.py +7 -17
- mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
- mindspore/mindrecord/tools/mnist_to_mr.py +11 -21
- mindspore/mindrecord/tools/tfrecord_to_mr.py +2 -10
- mindspore/multiprocessing/__init__.py +68 -0
- mindspore/nn/cell.py +86 -133
- mindspore/nn/dynamic_lr.py +2 -2
- mindspore/nn/layer/activation.py +80 -91
- mindspore/nn/layer/basic.py +4 -80
- mindspore/nn/layer/channel_shuffle.py +3 -16
- mindspore/nn/layer/container.py +3 -3
- mindspore/nn/layer/conv.py +71 -71
- mindspore/nn/layer/embedding.py +107 -46
- mindspore/nn/layer/image.py +4 -7
- mindspore/nn/layer/normalization.py +46 -38
- mindspore/nn/layer/padding.py +26 -39
- mindspore/nn/layer/pooling.py +13 -9
- mindspore/nn/layer/rnn_cells.py +5 -15
- mindspore/nn/layer/rnns.py +6 -5
- mindspore/nn/layer/thor_layer.py +1 -2
- mindspore/nn/layer/timedistributed.py +1 -1
- mindspore/nn/layer/transformer.py +52 -50
- mindspore/nn/learning_rate_schedule.py +6 -5
- mindspore/nn/loss/loss.py +44 -65
- mindspore/nn/optim/ada_grad.py +6 -4
- mindspore/nn/optim/adadelta.py +3 -1
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +102 -181
- mindspore/nn/optim/adamax.py +4 -2
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +4 -2
- mindspore/nn/optim/ftrl.py +31 -61
- mindspore/nn/optim/lamb.py +5 -3
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +6 -4
- mindspore/nn/optim/momentum.py +13 -25
- mindspore/nn/optim/optimizer.py +6 -3
- mindspore/nn/optim/proximal_ada_grad.py +4 -2
- mindspore/nn/optim/rmsprop.py +9 -3
- mindspore/nn/optim/rprop.py +4 -2
- mindspore/nn/optim/sgd.py +4 -2
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
- mindspore/nn/probability/distribution/beta.py +2 -2
- mindspore/nn/probability/distribution/categorical.py +4 -6
- mindspore/nn/probability/distribution/cauchy.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/poisson.py +2 -2
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +13 -1
- mindspore/nn/wrap/__init__.py +2 -1
- mindspore/nn/wrap/cell_wrapper.py +33 -12
- mindspore/nn/wrap/grad_reducer.py +148 -8
- mindspore/nn/wrap/loss_scale.py +7 -7
- mindspore/numpy/__init__.py +2 -0
- mindspore/numpy/array_creations.py +2 -0
- mindspore/numpy/array_ops.py +1 -5
- mindspore/numpy/fft.py +431 -0
- mindspore/numpy/math_ops.py +53 -59
- mindspore/numpy/utils.py +3 -0
- mindspore/ops/__init__.py +7 -3
- mindspore/ops/_grad_experimental/grad_array_ops.py +4 -160
- mindspore/ops/_grad_experimental/grad_comm_ops.py +14 -18
- mindspore/ops/_grad_experimental/grad_inner_ops.py +8 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +92 -287
- mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
- mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
- mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
- mindspore/ops/_op_impl/__init__.py +0 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +1 -0
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
- mindspore/ops/_op_impl/{cpu/concat.py → aicpu/generate_eod_mask.py} +16 -17
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
- mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -3
- mindspore/ops/_op_impl/cpu/adam.py +2 -2
- mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
- mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
- mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
- mindspore/ops/_vmap/vmap_array_ops.py +137 -101
- mindspore/ops/_vmap/vmap_base.py +8 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +102 -56
- mindspore/ops/_vmap/vmap_image_ops.py +70 -13
- mindspore/ops/_vmap/vmap_math_ops.py +74 -49
- mindspore/ops/_vmap/vmap_nn_ops.py +164 -89
- mindspore/ops/_vmap/vmap_other_ops.py +1 -1
- mindspore/ops/auto_generate/__init__.py +31 -0
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +133 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +248 -0
- mindspore/ops/auto_generate/gen_arg_handler.py +147 -0
- mindspore/ops/auto_generate/gen_extend_func.py +130 -0
- mindspore/ops/auto_generate/gen_ops_def.py +4786 -0
- mindspore/ops/auto_generate/gen_ops_prim.py +8335 -0
- mindspore/ops/auto_generate/pyboost_inner_prim.py +77 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +118 -17
- mindspore/ops/composite/math_ops.py +9 -48
- mindspore/ops/composite/multitype_ops/_compile_utils.py +166 -601
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +15 -133
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
- mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
- mindspore/ops/deprecated.py +14 -3
- mindspore/ops/extend/__init__.py +46 -0
- mindspore/ops/extend/array_func.py +152 -0
- mindspore/ops/extend/math_func.py +76 -0
- mindspore/ops/{_op_impl/tbe/atomic_addr_clean.py → extend/nn_func.py} +5 -15
- mindspore/ops/function/__init__.py +19 -11
- mindspore/ops/function/array_func.py +248 -1436
- mindspore/ops/function/clip_func.py +12 -13
- mindspore/ops/function/debug_func.py +2 -5
- mindspore/ops/function/fft_func.py +31 -0
- mindspore/ops/function/grad/grad_func.py +24 -17
- mindspore/ops/function/image_func.py +27 -21
- mindspore/ops/function/linalg_func.py +30 -53
- mindspore/ops/function/math_func.py +450 -2356
- mindspore/ops/function/nn_func.py +470 -789
- mindspore/ops/function/other_func.py +4 -5
- mindspore/ops/function/parameter_func.py +6 -92
- mindspore/ops/function/random_func.py +24 -80
- mindspore/ops/function/sparse_unary_func.py +11 -18
- mindspore/ops/function/spectral_func.py +1 -1
- mindspore/ops/function/vmap_func.py +15 -14
- mindspore/ops/functional.py +56 -62
- mindspore/ops/op_info_register.py +22 -19
- mindspore/ops/operations/__init__.py +19 -19
- mindspore/ops/operations/_embedding_cache_ops.py +1 -1
- mindspore/ops/operations/_grad_ops.py +20 -723
- mindspore/ops/operations/_inner_ops.py +233 -286
- mindspore/ops/operations/_quant_ops.py +4 -4
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +5 -480
- mindspore/ops/operations/_sequence_ops.py +4 -34
- mindspore/ops/operations/array_ops.py +100 -2481
- mindspore/ops/operations/comm_ops.py +38 -46
- mindspore/ops/operations/custom_ops.py +9 -9
- mindspore/ops/operations/debug_ops.py +101 -32
- mindspore/ops/operations/image_ops.py +3 -219
- mindspore/ops/operations/inner_ops.py +52 -38
- mindspore/ops/operations/linalg_ops.py +1 -49
- mindspore/{rewrite/ast_transformers → ops/operations/manually_defined}/__init__.py +11 -4
- mindspore/ops/operations/manually_defined/_inner.py +61 -0
- mindspore/ops/operations/manually_defined/ops_def.py +1391 -0
- mindspore/ops/operations/math_ops.py +752 -4588
- mindspore/ops/operations/nn_ops.py +380 -1750
- mindspore/ops/operations/other_ops.py +50 -42
- mindspore/ops/operations/random_ops.py +3 -50
- mindspore/ops/operations/sparse_ops.py +4 -4
- mindspore/ops/primitive.py +196 -96
- mindspore/ops/silent_check.py +162 -0
- mindspore/ops_generate/__init__.py +27 -0
- mindspore/ops_generate/arg_dtype_cast.py +248 -0
- mindspore/ops_generate/arg_handler.py +147 -0
- mindspore/ops_generate/gen_aclnn_implement.py +266 -0
- mindspore/ops_generate/gen_ops.py +1062 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +129 -0
- mindspore/ops_generate/gen_pyboost_func.py +932 -0
- mindspore/ops_generate/gen_utils.py +188 -0
- mindspore/ops_generate/op_proto.py +138 -0
- mindspore/ops_generate/pyboost_utils.py +364 -0
- mindspore/ops_generate/template.py +238 -0
- mindspore/parallel/__init__.py +6 -4
- mindspore/parallel/_auto_parallel_context.py +28 -4
- mindspore/parallel/_cell_wrapper.py +16 -9
- mindspore/parallel/_cost_model_context.py +1 -1
- mindspore/parallel/_dp_allreduce_fusion.py +159 -159
- mindspore/parallel/_parallel_serialization.py +28 -12
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +22 -8
- mindspore/parallel/_transformer/__init__.py +1 -1
- mindspore/parallel/_transformer/layers.py +1 -1
- mindspore/parallel/_transformer/loss.py +1 -1
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/op_parallel_config.py +1 -1
- mindspore/parallel/_transformer/transformer.py +9 -9
- mindspore/parallel/_utils.py +131 -6
- mindspore/parallel/algo_parameter_config.py +6 -6
- mindspore/parallel/checkpoint_transform.py +156 -26
- mindspore/parallel/cluster/__init__.py +15 -0
- mindspore/parallel/cluster/process_entity/__init__.py +18 -0
- mindspore/parallel/cluster/process_entity/_api.py +345 -0
- mindspore/parallel/cluster/process_entity/_utils.py +116 -0
- mindspore/parallel/cluster/run.py +139 -0
- mindspore/parallel/mpi/__init__.py +1 -1
- mindspore/parallel/mpi/_mpi_config.py +1 -1
- mindspore/parallel/parameter_broadcast.py +152 -0
- mindspore/parallel/shard.py +99 -2
- mindspore/profiler/common/util.py +20 -0
- mindspore/profiler/envprofiling.py +1 -1
- mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +66 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +77 -0
- mindspore/profiler/parser/ascend_analysis/function_event.py +146 -0
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +108 -0
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +80 -0
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +52 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +104 -0
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +59 -0
- mindspore/profiler/parser/ascend_cluster_generator.py +116 -0
- mindspore/profiler/parser/ascend_communicate_generator.py +314 -0
- mindspore/profiler/parser/ascend_flops_generator.py +27 -5
- mindspore/profiler/parser/ascend_fpbp_generator.py +8 -2
- mindspore/profiler/parser/ascend_hccl_generator.py +27 -279
- mindspore/profiler/parser/ascend_msprof_exporter.py +122 -118
- mindspore/profiler/parser/ascend_msprof_generator.py +67 -273
- mindspore/profiler/parser/ascend_op_generator.py +68 -27
- mindspore/profiler/parser/ascend_timeline_generator.py +292 -131
- mindspore/profiler/parser/base_timeline_generator.py +17 -3
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +2 -1
- mindspore/profiler/parser/framework_parser.py +11 -4
- mindspore/profiler/parser/integrator.py +3 -1
- mindspore/profiler/parser/memory_usage_parser.py +8 -2
- mindspore/profiler/parser/minddata_analyzer.py +8 -2
- mindspore/profiler/parser/minddata_parser.py +1 -1
- mindspore/profiler/parser/msadvisor_analyzer.py +5 -3
- mindspore/profiler/parser/msadvisor_parser.py +10 -4
- mindspore/profiler/parser/profiler_info.py +5 -0
- mindspore/profiler/profiling.py +373 -171
- mindspore/rewrite/__init__.py +2 -13
- mindspore/rewrite/api/node.py +122 -36
- mindspore/rewrite/api/pattern_engine.py +2 -3
- mindspore/rewrite/api/scoped_value.py +16 -15
- mindspore/rewrite/api/symbol_tree.py +46 -30
- mindspore/rewrite/ast_helpers/__init__.py +3 -6
- mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
- mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
- mindspore/rewrite/common/__init__.py +1 -2
- mindspore/rewrite/common/config.py +24 -0
- mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
- mindspore/rewrite/{namer.py → common/namer.py} +63 -18
- mindspore/rewrite/common/namespace.py +118 -0
- mindspore/rewrite/node/__init__.py +5 -5
- mindspore/rewrite/node/call_function.py +23 -7
- mindspore/rewrite/node/cell_container.py +7 -3
- mindspore/rewrite/node/control_flow.py +53 -28
- mindspore/rewrite/node/node.py +212 -196
- mindspore/rewrite/node/node_manager.py +51 -22
- mindspore/rewrite/node/node_topological_manager.py +3 -23
- mindspore/rewrite/parsers/__init__.py +12 -0
- mindspore/rewrite/parsers/arguments_parser.py +8 -9
- mindspore/rewrite/parsers/assign_parser.py +635 -413
- mindspore/rewrite/parsers/attribute_parser.py +3 -4
- mindspore/rewrite/parsers/class_def_parser.py +107 -144
- mindspore/rewrite/parsers/constant_parser.py +5 -5
- mindspore/rewrite/parsers/container_parser.py +4 -6
- mindspore/rewrite/parsers/expr_parser.py +55 -0
- mindspore/rewrite/parsers/for_parser.py +31 -98
- mindspore/rewrite/parsers/function_def_parser.py +13 -5
- mindspore/rewrite/parsers/if_parser.py +28 -10
- mindspore/rewrite/parsers/module_parser.py +8 -182
- mindspore/rewrite/parsers/parser.py +1 -5
- mindspore/rewrite/parsers/parser_register.py +1 -1
- mindspore/rewrite/parsers/return_parser.py +5 -10
- mindspore/rewrite/parsers/while_parser.py +59 -0
- mindspore/rewrite/sparsify/utils.py +1 -1
- mindspore/rewrite/symbol_tree/__init__.py +20 -0
- mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
- mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
- mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
- mindspore/run_check/_check_version.py +6 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/safeguard/rewrite_obfuscation.py +9 -19
- mindspore/scipy/__init__.py +2 -1
- mindspore/scipy/fft.py +133 -0
- mindspore/scipy/linalg.py +140 -55
- mindspore/scipy/ops.py +15 -71
- mindspore/scipy/ops_grad.py +5 -34
- mindspore/scipy/optimize/line_search.py +2 -2
- mindspore/scipy/optimize/minimize.py +1 -1
- mindspore/train/__init__.py +3 -2
- mindspore/train/_utils.py +178 -4
- mindspore/train/amp.py +167 -245
- mindspore/train/callback/_backup_and_restore.py +4 -4
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +47 -21
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_landscape.py +15 -10
- mindspore/train/callback/_loss_monitor.py +2 -2
- mindspore/train/callback/_on_request_exit.py +2 -2
- mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
- mindspore/train/callback/_summary_collector.py +13 -14
- mindspore/train/callback/_time_monitor.py +2 -2
- mindspore/train/data_sink.py +1 -1
- mindspore/train/dataset_helper.py +19 -4
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/accuracy.py +7 -7
- mindspore/train/metrics/confusion_matrix.py +8 -6
- mindspore/train/metrics/cosine_similarity.py +6 -4
- mindspore/train/metrics/error.py +2 -2
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/metrics/perplexity.py +2 -1
- mindspore/train/metrics/topk.py +2 -2
- mindspore/train/mind_ir_pb2.py +75 -6
- mindspore/train/model.py +41 -27
- mindspore/train/serialization.py +262 -133
- mindspore/train/summary/_writer_pool.py +1 -1
- mindspore/train/summary/summary_record.py +56 -34
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/version.py +1 -1
- {mindspore-2.2.11.dist-info → mindspore-2.3.0rc1.dist-info}/METADATA +2 -2
- {mindspore-2.2.11.dist-info → mindspore-2.3.0rc1.dist-info}/RECORD +532 -1075
- {mindspore-2.2.11.dist-info → mindspore-2.3.0rc1.dist-info}/entry_points.txt +1 -0
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
- mindspore/config/super_bar_config.json +0 -544
- mindspore/gen_ops.py +0 -273
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
- mindspore/nn/layer/flash_attention.py +0 -189
- mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
- mindspore/ops/_op_impl/tbe/__init__.py +0 -47
- mindspore/ops/_op_impl/tbe/abs.py +0 -38
- mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/acos.py +0 -37
- mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/acosh.py +0 -37
- mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
- mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
- mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
- mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
- mindspore/ops/_op_impl/tbe/add.py +0 -42
- mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/add_n.py +0 -39
- mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
- mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
- mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
- mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
- mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
- mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
- mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
- mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/asin.py +0 -37
- mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/asinh.py +0 -37
- mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/assign.py +0 -79
- mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
- mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
- mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/atan.py +0 -37
- mindspore/ops/_op_impl/tbe/atan2.py +0 -38
- mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/atanh.py +0 -37
- mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
- mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
- mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
- mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
- mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
- mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
- mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
- mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
- mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
- mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
- mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cast.py +0 -55
- mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/cdist.py +0 -38
- mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/ceil.py +0 -37
- mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/celu.py +0 -39
- mindspore/ops/_op_impl/tbe/centralization.py +0 -39
- mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
- mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/concat.py +0 -40
- mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
- mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
- mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
- mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
- mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
- mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/cos.py +0 -37
- mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/cosh.py +0 -37
- mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
- mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cummin.py +0 -41
- mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
- mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
- mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
- mindspore/ops/_op_impl/tbe/diag.py +0 -38
- mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
- mindspore/ops/_op_impl/tbe/dilation.py +0 -40
- mindspore/ops/_op_impl/tbe/div.py +0 -41
- mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
- mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
- mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
- mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
- mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
- mindspore/ops/_op_impl/tbe/elu.py +0 -38
- mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/equal.py +0 -42
- mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/erf.py +0 -37
- mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfc.py +0 -37
- mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
- mindspore/ops/_op_impl/tbe/exp.py +0 -40
- mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
- mindspore/ops/_op_impl/tbe/expm1.py +0 -37
- mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
- mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/fill.py +0 -56
- mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/flatten.py +0 -48
- mindspore/ops/_op_impl/tbe/floor.py +0 -37
- mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
- mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
- mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
- mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
- mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
- mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
- mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
- mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/ger.py +0 -43
- mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/greater.py +0 -43
- mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
- mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
- mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
- mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
- mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
- mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
- mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
- mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/im2col.py +0 -42
- mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
- mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
- mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/inv.py +0 -38
- mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/invert.py +0 -37
- mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/iou.py +0 -38
- mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/is_close.py +0 -40
- mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
- mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
- mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
- mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
- mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
- mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
- mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/lerp.py +0 -38
- mindspore/ops/_op_impl/tbe/less.py +0 -41
- mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/log.py +0 -40
- mindspore/ops/_op_impl/tbe/log1p.py +0 -37
- mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
- mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
- mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
- mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
- mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/matmul.py +0 -53
- mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
- mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
- mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
- mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum.py +0 -39
- mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
- mindspore/ops/_op_impl/tbe/minimum.py +0 -40
- mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mish.py +0 -37
- mindspore/ops/_op_impl/tbe/mod.py +0 -41
- mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/mul.py +0 -37
- mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
- mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
- mindspore/ops/_op_impl/tbe/neg.py +0 -39
- mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
- mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
- mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
- mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
- mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
- mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/pack.py +0 -58
- mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
- mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
- mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/pdist.py +0 -36
- mindspore/ops/_op_impl/tbe/pooling.py +0 -46
- mindspore/ops/_op_impl/tbe/population_count.py +0 -38
- mindspore/ops/_op_impl/tbe/pow.py +0 -41
- mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/prelu.py +0 -37
- mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/range.py +0 -39
- mindspore/ops/_op_impl/tbe/real_div.py +0 -38
- mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
- mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
- mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
- mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
- mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6.py +0 -38
- mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/renorm.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
- mindspore/ops/_op_impl/tbe/rint.py +0 -37
- mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roll.py +0 -42
- mindspore/ops/_op_impl/tbe/round.py +0 -38
- mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
- mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
- mindspore/ops/_op_impl/tbe/select.py +0 -38
- mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/selu.py +0 -39
- mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sgd.py +0 -62
- mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sign.py +0 -38
- mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/sin.py +0 -37
- mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sinh.py +0 -37
- mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/slice.py +0 -58
- mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
- mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax.py +0 -37
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
- mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/softplus.py +0 -37
- mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softsign.py +0 -37
- mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sort.py +0 -38
- mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/split_d.py +0 -38
- mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/split_v.py +0 -39
- mindspore/ops/_op_impl/tbe/splitv.py +0 -39
- mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/square.py +0 -38
- mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
- mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
- mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
- mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
- mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
- mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
- mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
- mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
- mindspore/ops/_op_impl/tbe/sub.py +0 -39
- mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tan.py +0 -38
- mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh.py +0 -37
- mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
- mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
- mindspore/ops/_op_impl/tbe/tile.py +0 -37
- mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
- mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
- mindspore/ops/_op_impl/tbe/transpose.py +0 -60
- mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
- mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
- mindspore/ops/_op_impl/tbe/trunc.py +0 -39
- mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/unpack.py +0 -38
- mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
- mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
- mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
- mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
- mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
- mindspore/ops/_tracefunc.py +0 -241
- mindspore/ops/arg_dtype_cast.py +0 -54
- mindspore/rewrite/api/tree_node_helper.py +0 -60
- mindspore/rewrite/ast_creator_register.py +0 -37
- mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
- mindspore/rewrite/namespace.py +0 -53
- {mindspore-2.2.11.dist-info → mindspore-2.3.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.2.11.dist-info → mindspore-2.3.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2022 Huawei Technologies Co., Ltd
|
|
1
|
+
# Copyright 2022-2023 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -35,14 +35,11 @@ from mindspore.ops.operations._sequence_ops import TensorToList
|
|
|
35
35
|
from mindspore.ops.operations.array_ops import (
|
|
36
36
|
UniqueConsecutive,
|
|
37
37
|
SearchSorted,
|
|
38
|
-
NonZero,
|
|
39
38
|
MatrixDiagV3,
|
|
40
39
|
MatrixDiagPartV3,
|
|
41
40
|
MatrixSetDiagV3,
|
|
42
41
|
Fills,
|
|
43
42
|
Col2Im,
|
|
44
|
-
ArgMaxWithValue,
|
|
45
|
-
ArgMinWithValue,
|
|
46
43
|
ScatterNdMax,
|
|
47
44
|
ScatterNdMul,
|
|
48
45
|
IndexFill,
|
|
@@ -52,7 +49,9 @@ from mindspore.ops.operations.array_ops import (
|
|
|
52
49
|
Lstsq,
|
|
53
50
|
Mvlgamma,
|
|
54
51
|
Tril,
|
|
55
|
-
Argmax
|
|
52
|
+
Argmax,
|
|
53
|
+
ArgMaxWithValue,
|
|
54
|
+
ArgMinWithValue
|
|
56
55
|
)
|
|
57
56
|
from mindspore.ops.operations.array_ops import TensorScatterElements
|
|
58
57
|
from mindspore.common import Tensor
|
|
@@ -61,53 +60,66 @@ from mindspore import _checkparam as validator
|
|
|
61
60
|
from mindspore._c_expression import Tensor as Tensor_
|
|
62
61
|
from mindspore.ops._utils.utils import ms_arrange
|
|
63
62
|
|
|
64
|
-
|
|
63
|
+
from mindspore.ops.auto_generate import cat, range, scatter_nd, deepcopy, masked_fill, diagonal, expand_dims, \
|
|
64
|
+
nonzero, reverse, transpose, unsorted_segment_sum, diag, gather, gather_d, gather_nd, reshape, broadcast_to, \
|
|
65
|
+
strided_slice
|
|
66
|
+
from mindspore.ops.operations.manually_defined import tile, rank, scalar_cast
|
|
67
|
+
|
|
68
|
+
arg_max_with_value_ = ArgMaxWithValue()
|
|
69
|
+
batch_to_space_nd_v2_ = P.BatchToSpaceNDV2()
|
|
70
|
+
cast_ = P.Cast()
|
|
71
|
+
diag_ = P.Diag()
|
|
72
|
+
dynamic_broadcast_to_ = DynamicBroadcastTo()
|
|
65
73
|
eye_ = P.Eye()
|
|
66
74
|
fills_ = Fills()
|
|
75
|
+
fillv2_ = P.FillV2()
|
|
76
|
+
flatten_ = P.Flatten()
|
|
77
|
+
gather_ = P.Gather()
|
|
78
|
+
gather_d_ = P.GatherD()
|
|
79
|
+
gather_nd_ = P.GatherNd()
|
|
80
|
+
ger_ = P.Ger()
|
|
81
|
+
index_fill_ = IndexFill()
|
|
82
|
+
lstsq_ = Lstsq()
|
|
83
|
+
masked_select_ = P.MaskedSelect()
|
|
84
|
+
matrix_band_part_ = P.array_ops.MatrixBandPart()
|
|
67
85
|
ones_ = P.Ones()
|
|
68
86
|
ones_like_ = P.OnesLike()
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
size_ = P.Size()
|
|
72
|
-
shape_ = P.Shape()
|
|
87
|
+
population_count_ = P.PopulationCount()
|
|
88
|
+
range_ = P.Range()
|
|
73
89
|
rank_ = P.Rank()
|
|
74
|
-
|
|
90
|
+
reduce_max_ = P.ReduceMax()
|
|
91
|
+
reduce_min_ = P.ReduceMin()
|
|
75
92
|
reshape_ = P.Reshape()
|
|
76
|
-
|
|
77
|
-
expand_dims_ = P.ExpandDims()
|
|
78
|
-
transpose_ = P.Transpose()
|
|
93
|
+
scalar_to_tensor_ = P.ScalarToTensor()
|
|
79
94
|
scatter_add_ = P.ScatterAdd()
|
|
95
|
+
scatter_div_ = P.ScatterDiv()
|
|
80
96
|
scatter_max_ = P.ScatterMax()
|
|
81
97
|
scatter_min_ = P.ScatterMin()
|
|
82
98
|
scatter_mul_ = P.ScatterMul()
|
|
83
|
-
scatter_div_ = P.ScatterDiv()
|
|
84
99
|
scatter_nd_ = P.ScatterNd()
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
nonzero_ = NonZero()
|
|
89
|
-
scalar_cast_ = P.ScalarCast()
|
|
100
|
+
scatter_update_ = P.ScatterUpdate()
|
|
101
|
+
shape_ = P.Shape()
|
|
102
|
+
size_ = P.Size()
|
|
90
103
|
tensor_scatter_add_ = P.TensorScatterAdd()
|
|
91
|
-
tensor_scatter_sub_ = P.TensorScatterSub()
|
|
92
|
-
tensor_scatter_mul_ = P.TensorScatterMul()
|
|
93
104
|
tensor_scatter_div_ = P.TensorScatterDiv()
|
|
94
|
-
tensor_scatter_min_ = P.TensorScatterMin()
|
|
95
105
|
tensor_scatter_max_ = P.TensorScatterMax()
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
matrix_band_part_ = P.array_ops.MatrixBandPart()
|
|
100
|
-
ger_ = P.Ger()
|
|
101
|
-
diag_ = P.Diag()
|
|
102
|
-
range_ = P.Range()
|
|
103
|
-
zeros_like_ = P.ZerosLike()
|
|
104
|
-
cast_ = P.Cast()
|
|
106
|
+
tensor_scatter_min_ = P.TensorScatterMin()
|
|
107
|
+
tensor_scatter_mul_ = P.TensorScatterMul()
|
|
108
|
+
tensor_scatter_sub_ = P.TensorScatterSub()
|
|
105
109
|
tensor_select_ = P.Select()
|
|
106
|
-
|
|
110
|
+
tensor_shape_ = P.TensorShape()
|
|
111
|
+
tensor_slice = P.Slice()
|
|
112
|
+
tile_ = P.Tile()
|
|
113
|
+
transpose_ = P.Transpose()
|
|
114
|
+
tuple_to_array_ = P.TupleToArray()
|
|
115
|
+
tuple_to_tensor_ = TupleToTensor()
|
|
116
|
+
unique_ = P.Unique()
|
|
117
|
+
unique_with_pad_ = P.UniqueWithPad()
|
|
118
|
+
unsorted_segment_max_ = P.UnsortedSegmentMax()
|
|
119
|
+
unsorted_segment_min_ = P.UnsortedSegmentMin()
|
|
120
|
+
unsorted_segment_prod_ = P.UnsortedSegmentProd()
|
|
107
121
|
unsorted_segment_sum_ = P.UnsortedSegmentSum()
|
|
108
|
-
|
|
109
|
-
reduce_max = P.ReduceMax()
|
|
110
|
-
reduce_min = P.ReduceMin()
|
|
122
|
+
zeros_like_ = P.ZerosLike()
|
|
111
123
|
|
|
112
124
|
|
|
113
125
|
@_primexpr
|
|
@@ -187,8 +199,11 @@ def arange(start=0, end=None, step=1, *, dtype=None):
|
|
|
187
199
|
|
|
188
200
|
Keyword Args:
|
|
189
201
|
dtype (mindspore.dtype, optional): The required data type of returned Tensor. Default: ``None`` .
|
|
190
|
-
|
|
191
|
-
|
|
202
|
+
When `dtype` is not specified or ``None``:
|
|
203
|
+
|
|
204
|
+
If `start`, `end`, and `step` are all integers, the dtype of output is int64,
|
|
205
|
+
|
|
206
|
+
If `start`, `end`, and `step` contain at least one floating-point number, the dtype of output is float32.
|
|
192
207
|
|
|
193
208
|
Returns:
|
|
194
209
|
A 1-D Tensor, with the same type as the inputs.
|
|
@@ -225,7 +240,7 @@ def arange(start=0, end=None, step=1, *, dtype=None):
|
|
|
225
240
|
>>> print(output)
|
|
226
241
|
[12. 11. 10. 9. 8. 7. 6. 5. 4. 3.]
|
|
227
242
|
>>> print(output.dtype)
|
|
228
|
-
|
|
243
|
+
Float32
|
|
229
244
|
"""
|
|
230
245
|
if end is None:
|
|
231
246
|
start, end = 0, start
|
|
@@ -237,67 +252,24 @@ def arange(start=0, end=None, step=1, *, dtype=None):
|
|
|
237
252
|
if start.shape != () or end.shape != () or step.shape != ():
|
|
238
253
|
raise ValueError(f"For arange, the input args must be a TensorScalar,"
|
|
239
254
|
f" but got start shape:{start.shape}, end shape:{end.shape}, step shape:{step.shape}")
|
|
240
|
-
|
|
241
|
-
data = range_op(start, end, step)
|
|
255
|
+
data = range_(start, end, step)
|
|
242
256
|
if dtype is not None:
|
|
243
257
|
data = cast_(data, dtype)
|
|
244
258
|
return data
|
|
245
259
|
|
|
246
260
|
|
|
247
|
-
def
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
The input data is a tuple or a list of tensors. These tensors have the same rank :math:`R`.
|
|
252
|
-
Set the given axis as :math:`m`, and :math:`0 \le m < R`. Set the number of input tensors as :math:`N`.
|
|
253
|
-
For the :math:`i`-th tensor :math:`t_i`, it has the shape of :math:`(x_1, x_2, ..., x_{mi}, ..., x_R)`.
|
|
254
|
-
:math:`x_{mi}` is the :math:`m`-th dimension of the :math:`t_i`. Then, the shape of the output tensor is
|
|
255
|
-
|
|
256
|
-
.. math::
|
|
257
|
-
|
|
258
|
-
(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)
|
|
259
|
-
|
|
260
|
-
Args:
|
|
261
|
-
tensors (Union[tuple, list]): A tuple or a list of input tensors.
|
|
262
|
-
Suppose there are two tensors in this tuple or list, namely t1 and t2.
|
|
263
|
-
To perform `concat` in the axis 0 direction, except for the :math:`0`-th axis,
|
|
264
|
-
all other dimensions should be equal, that is,
|
|
265
|
-
:math:`t1.shape[1] = t2.shape[1], t1.shape[2] = t2.shape[2], ..., t1.shape[R-1] = t2.shape[R-1]`,
|
|
266
|
-
where :math:`R` represents the rank of tensor.
|
|
267
|
-
axis (int): The specified axis, whose value is in range :math:`[-R, R)`. Default: ``0`` .
|
|
268
|
-
|
|
269
|
-
Returns:
|
|
270
|
-
Tensor, the shape is :math:`(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)`.
|
|
271
|
-
The data type is the same with `tensors`.
|
|
272
|
-
|
|
273
|
-
Raises:
|
|
274
|
-
TypeError: If `axis` is not an int.
|
|
275
|
-
ValueError: If `tensors` have different dimension of tensor.
|
|
276
|
-
ValueError: If `axis` not in range :math:`[-R, R)`.
|
|
277
|
-
RuntimeError: If tensor's shape in `tensors` except for `axis` are different.
|
|
278
|
-
|
|
279
|
-
Supported Platforms:
|
|
280
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
261
|
+
def concat(tensors, axis=0):
|
|
262
|
+
"""
|
|
263
|
+
Alias for :func:`mindspore.ops.cat()`.
|
|
281
264
|
|
|
282
|
-
Examples:
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
>>> output = ops.cat((input_x1, input_x2))
|
|
289
|
-
>>> print(output)
|
|
290
|
-
[[0. 1.]
|
|
291
|
-
[2. 1.]
|
|
292
|
-
[0. 1.]
|
|
293
|
-
[2. 1.]]
|
|
294
|
-
>>> output = ops.cat((input_x1, input_x2), 1)
|
|
295
|
-
>>> print(output)
|
|
296
|
-
[[0. 1. 0. 1.]
|
|
297
|
-
[2. 1. 2. 1.]]
|
|
265
|
+
Tutorial Examples:
|
|
266
|
+
- `Tensor - Tensor Operation <https://mindspore.cn/tutorials/en/r2.3.q1/beginner/tensor.html#tensor-operation>`_
|
|
267
|
+
- `Vision Transformer Image Classification - Building ViT as a whole
|
|
268
|
+
<https://mindspore.cn/tutorials/application/en/r2.3.q1/cv/vit.html#building-vit-as-a-whole>`_
|
|
269
|
+
- `Sentiment Classification Implemented by RNN - Dense
|
|
270
|
+
<https://mindspore.cn/tutorials/application/en/r2.3.q1/nlp/sentiment_analysis.html#dense>`_
|
|
298
271
|
"""
|
|
299
|
-
|
|
300
|
-
return _concat(tensors)
|
|
272
|
+
return cat(tensors, axis)
|
|
301
273
|
|
|
302
274
|
|
|
303
275
|
def eye(n, m=None, dtype=None):
|
|
@@ -305,14 +277,14 @@ def eye(n, m=None, dtype=None):
|
|
|
305
277
|
Creates a tensor with ones on the diagonal and zeros in the rest.
|
|
306
278
|
|
|
307
279
|
Note:
|
|
308
|
-
|
|
309
|
-
|
|
280
|
+
The data type of returned tensor can be float16, float32, int8, int16, int32, int64, uint8
|
|
281
|
+
or bool on Ascend platforms.
|
|
310
282
|
|
|
311
283
|
Args:
|
|
312
284
|
n (int): The number of rows of returned tensor. Constant value only.
|
|
313
|
-
m (int): The number of columns of returned tensor. Constant value only.
|
|
285
|
+
m (int, optional): The number of columns of returned tensor. Constant value only.
|
|
314
286
|
Default: ``None`` , if ``None`` , the number of columns is as the same as n.
|
|
315
|
-
dtype (mindspore.dtype): MindSpore's dtype, the data type of the returned tensor.
|
|
287
|
+
dtype (mindspore.dtype, optional): MindSpore's dtype, the data type of the returned tensor.
|
|
316
288
|
The data type can be bool or Number.
|
|
317
289
|
Default: ``None`` , the data type of the returned tensor is mindspore.float32.
|
|
318
290
|
|
|
@@ -336,11 +308,11 @@ def eye(n, m=None, dtype=None):
|
|
|
336
308
|
[0 1]]
|
|
337
309
|
>>> print(output.dtype)
|
|
338
310
|
Int32
|
|
339
|
-
>>> output = ops.eye(1, 2, mindspore.
|
|
311
|
+
>>> output = ops.eye(1, 2, mindspore.float32)
|
|
340
312
|
>>> print(output)
|
|
341
313
|
[[1. 0.]]
|
|
342
314
|
>>> print(output.dtype)
|
|
343
|
-
|
|
315
|
+
Float32
|
|
344
316
|
>>> output = ops.eye(2, dtype=mindspore.int32)
|
|
345
317
|
>>> print(output)
|
|
346
318
|
[[1 0]
|
|
@@ -472,48 +444,7 @@ def where(condition, x, y):
|
|
|
472
444
|
condition = broadcast_to(condition, output_shape)
|
|
473
445
|
x = broadcast_to(x, output_shape)
|
|
474
446
|
y = broadcast_to(y, output_shape)
|
|
475
|
-
|
|
476
|
-
return _select(condition, x, y)
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
def reverse(x, axis):
|
|
480
|
-
"""
|
|
481
|
-
Reverses specific dimensions of a tensor.
|
|
482
|
-
|
|
483
|
-
.. warning::
|
|
484
|
-
The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "input_x".
|
|
485
|
-
|
|
486
|
-
Args:
|
|
487
|
-
x (Tensor): The target tensor.
|
|
488
|
-
The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
489
|
-
axis (Union[tuple(int), list(int)]): The indices of the dimensions to reverse.
|
|
490
|
-
|
|
491
|
-
Outputs:
|
|
492
|
-
Tensor, has the same shape and type as `x`.
|
|
493
|
-
|
|
494
|
-
Raises:
|
|
495
|
-
TypeError: If `axis` is neither list nor tuple.
|
|
496
|
-
TypeError: If element of `axis` is not an int.
|
|
497
|
-
|
|
498
|
-
Supported Platforms:
|
|
499
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
500
|
-
|
|
501
|
-
Examples:
|
|
502
|
-
>>> import mindspore
|
|
503
|
-
>>> import numpy as np
|
|
504
|
-
>>> from mindspore import Tensor, ops
|
|
505
|
-
>>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
|
|
506
|
-
>>> output = ops.reverse(input_x, axis=[1])
|
|
507
|
-
>>> print(output)
|
|
508
|
-
[[4 3 2 1]
|
|
509
|
-
[8 7 6 5]]
|
|
510
|
-
>>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
|
|
511
|
-
>>> output = ops.reverse(input_x, axis=[1, 0])
|
|
512
|
-
>>> print(output)
|
|
513
|
-
[[8 7 6 5]
|
|
514
|
-
[4 3 2 1]]
|
|
515
|
-
"""
|
|
516
|
-
return P.ReverseV2(axis)(x)
|
|
447
|
+
return tensor_select_(condition, x, y)
|
|
517
448
|
|
|
518
449
|
|
|
519
450
|
def ravel(input):
|
|
@@ -659,8 +590,9 @@ def one_hot(indices, depth, on_value=1, off_value=0, axis=-1):
|
|
|
659
590
|
other locations take value `off_value`.
|
|
660
591
|
|
|
661
592
|
Note:
|
|
662
|
-
If the input indices
|
|
663
|
-
On Ascend, if `on_value` is
|
|
593
|
+
If the input `indices` has rank `N`, the output will have rank `N+1`.
|
|
594
|
+
The new axis is created at dimension `axis`. On Ascend, if `on_value` is int64 dtype, `indices` must be
|
|
595
|
+
int64 dtype, and the value for `on_value` and `off_value` can only be 1 and 0.
|
|
664
596
|
|
|
665
597
|
Args:
|
|
666
598
|
indices(Tensor): A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
|
|
@@ -682,6 +614,7 @@ def one_hot(indices, depth, on_value=1, off_value=0, axis=-1):
|
|
|
682
614
|
Raises:
|
|
683
615
|
TypeError: If `axis` or `depth` is not an int.
|
|
684
616
|
TypeError: If dtype of `indices` is not int32 or int64.
|
|
617
|
+
TypeError: If dtype of `on_value` is not int32, int64, float16 or float32.
|
|
685
618
|
TypeError: If `indices`, `on_value` or `off_value` is not a Tensor.
|
|
686
619
|
ValueError: If `axis` is not in range [-1, ndim].
|
|
687
620
|
ValueError: If `depth` is less than 0.
|
|
@@ -715,8 +648,8 @@ def fill(type, shape, value): # pylint: disable=redefined-outer-name
|
|
|
715
648
|
|
|
716
649
|
Args:
|
|
717
650
|
type (mindspore.dtype): The specified type of output tensor. The data type only supports
|
|
718
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.
|
|
719
|
-
`number <https://www.mindspore.cn/docs/en/r2.
|
|
651
|
+
`bool_ <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_ and
|
|
652
|
+
`number <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_ .
|
|
720
653
|
shape (Union(Tensor, tuple[int])): The specified shape of output tensor.
|
|
721
654
|
value (Union(Tensor, number.Number, bool)): Value to fill the returned tensor.
|
|
722
655
|
|
|
@@ -743,7 +676,7 @@ def fill(type, shape, value): # pylint: disable=redefined-outer-name
|
|
|
743
676
|
[0. 0. 0.]]
|
|
744
677
|
"""
|
|
745
678
|
value = cast_(value, type)
|
|
746
|
-
return
|
|
679
|
+
return fillv2_(shape, value)
|
|
747
680
|
|
|
748
681
|
|
|
749
682
|
def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-name
|
|
@@ -883,21 +816,21 @@ def chunk(input, chunks, axis=0):
|
|
|
883
816
|
length_along_dim = arr_shape[arr_axis]
|
|
884
817
|
|
|
885
818
|
if chunks > length_along_dim:
|
|
886
|
-
res = P.Split(arr_axis, length_along_dim)(input)
|
|
819
|
+
res = _get_cache_prim(P.Split)(arr_axis, length_along_dim)(input)
|
|
887
820
|
elif length_along_dim % chunks == 0:
|
|
888
|
-
res = P.Split(arr_axis, chunks)(input)
|
|
821
|
+
res = _get_cache_prim(P.Split)(arr_axis, chunks)(input)
|
|
889
822
|
else:
|
|
890
823
|
block_size = int(np.ceil(length_along_dim / chunks))
|
|
891
824
|
true_chunks = int(length_along_dim // block_size)
|
|
892
825
|
length1 = true_chunks * block_size
|
|
893
826
|
length2 = length_along_dim - length1
|
|
894
|
-
start1 = _list_comprehensions(
|
|
827
|
+
start1 = _list_comprehensions(rank_(input), 0, True)
|
|
895
828
|
size1 = _tuple_setitem(arr_shape, arr_axis, length1)
|
|
896
829
|
start2 = _tuple_setitem(start1, arr_axis, length1)
|
|
897
830
|
size2 = _tuple_setitem(arr_shape, arr_axis, length2)
|
|
898
|
-
res = P.Split(arr_axis, true_chunks)(tensor_slice(input, start1, size1))
|
|
831
|
+
res = _get_cache_prim(P.Split)(arr_axis, true_chunks)(tensor_slice(input, start1, size1))
|
|
899
832
|
if length2:
|
|
900
|
-
res += P.Split(arr_axis, 1)(tensor_slice(input, start2, size2))
|
|
833
|
+
res += _get_cache_prim(P.Split)(arr_axis, 1)(tensor_slice(input, start2, size2))
|
|
901
834
|
return res
|
|
902
835
|
|
|
903
836
|
|
|
@@ -952,15 +885,17 @@ def ones(shape, dtype=None): # pylint: disable=redefined-outer-name
|
|
|
952
885
|
[1. 1.]]
|
|
953
886
|
"""
|
|
954
887
|
_dtype = mstype.float32 if dtype is None else dtype
|
|
955
|
-
ones_op = _get_cache_prim(P.FillV2)()
|
|
956
888
|
value = Tensor(1, _dtype)
|
|
957
889
|
if isinstance(shape, int):
|
|
958
890
|
shape = tuple([shape])
|
|
959
891
|
elif isinstance(shape, list):
|
|
960
|
-
|
|
892
|
+
if not shape:
|
|
893
|
+
shape = Tensor_(shape, dtype=mstype.int64)
|
|
894
|
+
else:
|
|
895
|
+
shape = Tensor(shape, dtype=mstype.int64)
|
|
961
896
|
elif isinstance(shape, Tensor) and shape.ndim == 0 and shape.size == 1:
|
|
962
897
|
shape = shape.reshape(1)
|
|
963
|
-
output =
|
|
898
|
+
output = fillv2_(shape, value)
|
|
964
899
|
return output
|
|
965
900
|
|
|
966
901
|
|
|
@@ -993,8 +928,7 @@ def ones_like(input, *, dtype=None):
|
|
|
993
928
|
[[1 1]
|
|
994
929
|
[1 1]]
|
|
995
930
|
"""
|
|
996
|
-
|
|
997
|
-
output = ones_like_op(input)
|
|
931
|
+
output = ones_like_(input)
|
|
998
932
|
_dtype = input.dtype if dtype is None else dtype
|
|
999
933
|
output = cast_(output, _dtype)
|
|
1000
934
|
return output
|
|
@@ -1028,22 +962,24 @@ def zeros(size, dtype=None): # pylint: disable=redefined-outer-name
|
|
|
1028
962
|
[[0. 0.]
|
|
1029
963
|
[0. 0.]]
|
|
1030
964
|
"""
|
|
1031
|
-
zero_op = _get_cache_prim(P.FillV2)()
|
|
1032
965
|
_dtype = mstype.float32 if dtype is None else dtype
|
|
1033
966
|
value = Tensor(0, _dtype)
|
|
1034
967
|
if isinstance(size, int):
|
|
1035
968
|
size = tuple([size])
|
|
1036
969
|
elif isinstance(size, list):
|
|
1037
|
-
|
|
970
|
+
if not size:
|
|
971
|
+
size = Tensor_(size, dtype=mstype.int64)
|
|
972
|
+
else:
|
|
973
|
+
size = Tensor(size, dtype=mstype.int64)
|
|
1038
974
|
elif isinstance(size, Tensor) and size.ndim == 0 and size.size == 1:
|
|
1039
975
|
size = size.reshape(1)
|
|
1040
|
-
output =
|
|
976
|
+
output = fillv2_(size, value)
|
|
1041
977
|
return output
|
|
1042
978
|
|
|
1043
979
|
|
|
1044
980
|
def zeros_like(input, *, dtype=None):
|
|
1045
981
|
r"""
|
|
1046
|
-
Creates a tensor filled with 0, with the same size as
|
|
982
|
+
Creates a tensor filled with 0, with the same size as input, and the given dtype.
|
|
1047
983
|
|
|
1048
984
|
If `dtype = None`, the tensor will have the same dtype as input `input`.
|
|
1049
985
|
|
|
@@ -1074,127 +1010,11 @@ def zeros_like(input, *, dtype=None):
|
|
|
1074
1010
|
[0. 0.]]
|
|
1075
1011
|
"""
|
|
1076
1012
|
_dtype = input.dtype if dtype is None else dtype
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
output = _zeros_like(input)
|
|
1080
|
-
output = _cast(output, _dtype)
|
|
1013
|
+
output = zeros_like_(input)
|
|
1014
|
+
output = cast_(output, _dtype)
|
|
1081
1015
|
return output
|
|
1082
1016
|
|
|
1083
1017
|
|
|
1084
|
-
def tile(input, multiples):
|
|
1085
|
-
r"""
|
|
1086
|
-
Replicates an input tensor with given multiples times.
|
|
1087
|
-
|
|
1088
|
-
Creates a new tensor by replicating `input` `multiples` times. The i'th dimension of
|
|
1089
|
-
output tensor has `input.shape[i] * multiples[i]` elements, and the values of `input`
|
|
1090
|
-
are replicated `multiples[i]` times along the i'th dimension.
|
|
1091
|
-
|
|
1092
|
-
Note:
|
|
1093
|
-
The length of `multiples` must be greater or equal to the length of dimension in `input`.
|
|
1094
|
-
|
|
1095
|
-
Args:
|
|
1096
|
-
input (Tensor): 1-D or higher dimensional Tensor. Set the shape of input tensor as
|
|
1097
|
-
:math:`(x_1, x_2, ..., x_S)` .
|
|
1098
|
-
|
|
1099
|
-
multiples (tuple[int]): The parameter that specifies the number of replications,
|
|
1100
|
-
the parameter type is tuple, and the data type is int, i.e., :math:`(y_1, y_2, ..., y_S)`.
|
|
1101
|
-
The length of `multiples` cannot be smaller than the length of the shape of `input`.
|
|
1102
|
-
Only constant value is allowed.
|
|
1103
|
-
|
|
1104
|
-
Returns:
|
|
1105
|
-
Tensor, has the same data type as the `input`. Suppose the length of `multiples` is `d`,
|
|
1106
|
-
the dimension of `input` is `input.dim`, and the shape of `input` is :math:`(x_1, x_2, ..., x_S)`.
|
|
1107
|
-
|
|
1108
|
-
- If `input.dim = d`, then the shape of their corresponding positions can be multiplied, and
|
|
1109
|
-
the shape of Outputs is :math:`(x_1*y_1, x_2*y_2, ..., x_S*y_S)`.
|
|
1110
|
-
- If `input.dim < d`, fill in multiple 1 in the length of the shape of `input` until their
|
|
1111
|
-
lengths are consistent. Such as set the shape of `input` as :math:`(1, ..., x_1, x_2, ..., x_S)`,
|
|
1112
|
-
then the shape of their corresponding positions can be multiplied, and the shape of Outputs is
|
|
1113
|
-
:math:`(1*y_1, ..., x_R*y_R, x_S*y_S)`.
|
|
1114
|
-
|
|
1115
|
-
Raises:
|
|
1116
|
-
TypeError: If `multiples` is not a tuple or its elements are not all int.
|
|
1117
|
-
ValueError: If the elements of `multiples` are not all greater than 0.
|
|
1118
|
-
ValueError: If the length of `multiples` are smaller than the length of dimension in `input`.
|
|
1119
|
-
|
|
1120
|
-
Supported Platforms:
|
|
1121
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1122
|
-
|
|
1123
|
-
Examples:
|
|
1124
|
-
>>> import mindspore
|
|
1125
|
-
>>> import numpy as np
|
|
1126
|
-
>>> from mindspore import Tensor, ops
|
|
1127
|
-
>>> input = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32)
|
|
1128
|
-
>>> multiples = (2, 3)
|
|
1129
|
-
>>> output = ops.tile(input, multiples)
|
|
1130
|
-
>>> print(output)
|
|
1131
|
-
[[1. 2. 1. 2. 1. 2.]
|
|
1132
|
-
[3. 4. 3. 4. 3. 4.]
|
|
1133
|
-
[1. 2. 1. 2. 1. 2.]
|
|
1134
|
-
[3. 4. 3. 4. 3. 4.]]
|
|
1135
|
-
>>> multiples = (2, 3, 2)
|
|
1136
|
-
>>> output = ops.tile(input, multiples)
|
|
1137
|
-
>>> print(output)
|
|
1138
|
-
[[[1. 2. 1. 2.]
|
|
1139
|
-
[3. 4. 3. 4.]
|
|
1140
|
-
[1. 2. 1. 2.]
|
|
1141
|
-
[3. 4. 3. 4.]
|
|
1142
|
-
[1. 2. 1. 2.]
|
|
1143
|
-
[3. 4. 3. 4.]]
|
|
1144
|
-
[[1. 2. 1. 2.]
|
|
1145
|
-
[3. 4. 3. 4.]
|
|
1146
|
-
[1. 2. 1. 2.]
|
|
1147
|
-
[3. 4. 3. 4.]
|
|
1148
|
-
[1. 2. 1. 2.]
|
|
1149
|
-
[3. 4. 3. 4.]]]
|
|
1150
|
-
"""
|
|
1151
|
-
tile_op = _get_cache_prim(P.Tile)()
|
|
1152
|
-
return tile_op(input, multiples)
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
def range(start, end, step):
|
|
1156
|
-
r"""
|
|
1157
|
-
Creates a sequence of numbers that begins at `start` and extends by increments of
|
|
1158
|
-
`limit` up to but not including `end`.
|
|
1159
|
-
|
|
1160
|
-
The types of all 3 inputs must be the same. The type of the resulting tensor is
|
|
1161
|
-
the same as the type of the inputs.
|
|
1162
|
-
|
|
1163
|
-
Args:
|
|
1164
|
-
start (Tensor): A scalar Tensor. The first number in the sequence. Must have
|
|
1165
|
-
type: int32 ,int64, float32 or float64.
|
|
1166
|
-
end (Tensor): A scalar Tensor. Upper limit of the sequence, exclusive. Must
|
|
1167
|
-
have type: int32 ,int64, float32 or float64.
|
|
1168
|
-
step (Tensor): A scalar Tensor. Number that increments `start`. Must have
|
|
1169
|
-
type: int32 ,int64, float32 or float64.
|
|
1170
|
-
|
|
1171
|
-
Returns:
|
|
1172
|
-
A 1-D Tensor, with the same type as the inputs.
|
|
1173
|
-
|
|
1174
|
-
Raises:
|
|
1175
|
-
TypeError: If `start`, `end` or `step` is not scalar Tensor.
|
|
1176
|
-
TypeError: If datatype of `start`, `end` or `step` is not same.
|
|
1177
|
-
TypeError: If datatype of `start`, `end` or `step` is not supported.
|
|
1178
|
-
ValueError: If `step` = 0.
|
|
1179
|
-
ValueError: If `start` >= `end` when `step` > 0.
|
|
1180
|
-
ValueError: If `start` <= `end` when `step` < 0.
|
|
1181
|
-
|
|
1182
|
-
Supported Platforms:
|
|
1183
|
-
``GPU`` ``CPU``
|
|
1184
|
-
|
|
1185
|
-
Examples:
|
|
1186
|
-
>>> from mindspore import Tensor, ops
|
|
1187
|
-
>>> from mindspore import dtype as mstype
|
|
1188
|
-
>>> start = Tensor(0, mstype.int32)
|
|
1189
|
-
>>> end = Tensor(10, mstype.int32)
|
|
1190
|
-
>>> step = Tensor(4, mstype.int32)
|
|
1191
|
-
>>> output = ops.range(start, end, step)
|
|
1192
|
-
>>> print(output)
|
|
1193
|
-
[0 4 8]
|
|
1194
|
-
"""
|
|
1195
|
-
return range_(start, end, step)
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
1018
|
##############################
|
|
1199
1019
|
# Tensor Operation Functions.
|
|
1200
1020
|
##############################
|
|
@@ -1246,15 +1066,11 @@ def unique(input):
|
|
|
1246
1066
|
>>> print(idx)
|
|
1247
1067
|
[0 1 2 1]
|
|
1248
1068
|
"""
|
|
1249
|
-
|
|
1250
|
-
unique_op = _get_cache_prim(P.Unique)()
|
|
1251
|
-
reshape_op = _get_cache_prim(P.Reshape)()
|
|
1252
|
-
|
|
1253
1069
|
shape_x = input.shape
|
|
1254
1070
|
length_x = get_x_shape(shape_x)
|
|
1255
|
-
input =
|
|
1256
|
-
y, idx =
|
|
1257
|
-
idx =
|
|
1071
|
+
input = reshape_(input, length_x)
|
|
1072
|
+
y, idx = unique_(input)
|
|
1073
|
+
idx = reshape_(idx, shape_x)
|
|
1258
1074
|
return y, idx
|
|
1259
1075
|
|
|
1260
1076
|
|
|
@@ -1381,7 +1197,7 @@ def searchsorted(sorted_sequence, values, *, out_int32=False, right=False):
|
|
|
1381
1197
|
|
|
1382
1198
|
Returns:
|
|
1383
1199
|
Tensor containing the indices from the innermost dimension of `sorted_sequence` such that,
|
|
1384
|
-
if insert the corresponding value in the `values`
|
|
1200
|
+
if insert the corresponding value in the `values` Tensor, the order of `sorted_sequence` would be preserved,
|
|
1385
1201
|
whose datatype is int32 if out_int32 is ``True`` , otherwise int64, and shape is the same as the shape of
|
|
1386
1202
|
`values`.
|
|
1387
1203
|
|
|
@@ -1457,7 +1273,7 @@ def size(input_x):
|
|
|
1457
1273
|
|
|
1458
1274
|
Args:
|
|
1459
1275
|
input_x (Tensor): Input parameters, the shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is
|
|
1460
|
-
`number <https://www.mindspore.cn/docs/en/r2.
|
|
1276
|
+
`number <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_.
|
|
1461
1277
|
|
|
1462
1278
|
Returns:
|
|
1463
1279
|
int. A scalar representing the elements' size of `input_x`, tensor is the number of elements
|
|
@@ -1538,76 +1354,6 @@ def dyn_shape(input_x):
|
|
|
1538
1354
|
return tensor_shape_(input_x)
|
|
1539
1355
|
|
|
1540
1356
|
|
|
1541
|
-
def rank(input_x):
|
|
1542
|
-
"""
|
|
1543
|
-
Returns the rank of a tensor.
|
|
1544
|
-
|
|
1545
|
-
Returns a 0-D int32 Tensor representing the rank of input; the rank of a tensor
|
|
1546
|
-
is the number of indices required to uniquely select each element of the tensor.
|
|
1547
|
-
|
|
1548
|
-
Args:
|
|
1549
|
-
input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is Number.
|
|
1550
|
-
|
|
1551
|
-
Returns:
|
|
1552
|
-
Tensor. 0-D int32 Tensor representing the rank of input, i.e., :math:`R`. The data type is an int.
|
|
1553
|
-
|
|
1554
|
-
Raises:
|
|
1555
|
-
TypeError: If `input_x` is not a Tensor.
|
|
1556
|
-
|
|
1557
|
-
Supported Platforms:
|
|
1558
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1559
|
-
|
|
1560
|
-
Examples:
|
|
1561
|
-
>>> import mindspore
|
|
1562
|
-
>>> import numpy as np
|
|
1563
|
-
>>> from mindspore import Tensor, ops
|
|
1564
|
-
>>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
|
|
1565
|
-
>>> output = ops.rank(input_tensor)
|
|
1566
|
-
>>> print(output)
|
|
1567
|
-
2
|
|
1568
|
-
>>> print(type(output))
|
|
1569
|
-
<class 'int'>
|
|
1570
|
-
"""
|
|
1571
|
-
return rank_(input_x)
|
|
1572
|
-
|
|
1573
|
-
|
|
1574
|
-
def reshape(input, shape):
|
|
1575
|
-
"""
|
|
1576
|
-
Rearranges the input Tensor based on the given shape.
|
|
1577
|
-
|
|
1578
|
-
The 'shape' can only have one -1 at most, in which case it's inferred from the remaining dimensions and
|
|
1579
|
-
the number of elements in the input.
|
|
1580
|
-
|
|
1581
|
-
Args:
|
|
1582
|
-
input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
1583
|
-
shape (Union[tuple[int], Tensor[int]]): Constructed by multiple
|
|
1584
|
-
integers, i.e., :math:`(y_1, y_2, ..., y_S)`. Only constant value is allowed.
|
|
1585
|
-
|
|
1586
|
-
Returns:
|
|
1587
|
-
Tensor, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
|
|
1588
|
-
|
|
1589
|
-
Raises:
|
|
1590
|
-
ValueError: Given a shape tuple, if it has several -1; or if the product
|
|
1591
|
-
of its elements is less than or equal to 0 or cannot be divided by the product
|
|
1592
|
-
of the input tensor shape; or if it does not match the input's array size.
|
|
1593
|
-
|
|
1594
|
-
Supported Platforms:
|
|
1595
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1596
|
-
|
|
1597
|
-
Examples:
|
|
1598
|
-
>>> import mindspore
|
|
1599
|
-
>>> import numpy as np
|
|
1600
|
-
>>> from mindspore import Tensor, ops
|
|
1601
|
-
>>> input = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
|
|
1602
|
-
>>> output = ops.reshape(input, (3, 2))
|
|
1603
|
-
>>> print(output)
|
|
1604
|
-
[[-0.1 0.3]
|
|
1605
|
-
[ 3.6 0.4]
|
|
1606
|
-
[ 0.5 -3.2]]
|
|
1607
|
-
"""
|
|
1608
|
-
return reshape_(input, shape)
|
|
1609
|
-
|
|
1610
|
-
|
|
1611
1357
|
def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
|
|
1612
1358
|
r"""
|
|
1613
1359
|
Reverses variable length slices.
|
|
@@ -1672,7 +1418,7 @@ def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
|
|
|
1672
1418
|
[[4. 3. 2. 1.]
|
|
1673
1419
|
[8. 7. 6. 5.]]
|
|
1674
1420
|
"""
|
|
1675
|
-
return P.ReverseSequence(seq_dim=seq_dim, batch_dim=batch_dim)(x, seq_lengths)
|
|
1421
|
+
return _get_cache_prim(P.ReverseSequence)(seq_dim=seq_dim, batch_dim=batch_dim)(x, seq_lengths)
|
|
1676
1422
|
|
|
1677
1423
|
|
|
1678
1424
|
def flatten(input, order='C', *, start_dim=1, end_dim=-1):
|
|
@@ -1696,7 +1442,7 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
|
|
|
1696
1442
|
Raises:
|
|
1697
1443
|
TypeError: If `input` is not a Tensor.
|
|
1698
1444
|
TypeError: If `order` is not string type.
|
|
1699
|
-
ValueError: If `order` is string type, but not 'C' or 'F'
|
|
1445
|
+
ValueError: If `order` is string type, but not ``'C'`` or ``'F'``.
|
|
1700
1446
|
TypeError: If `start_dim` or `end_dim` is not int.
|
|
1701
1447
|
ValueError: If `start_dim` is greater than `end_dim` after canonicalized.
|
|
1702
1448
|
ValueError: If `start_dim` or `end_dim` is not in range of [-input.dim, input.dim-1].
|
|
@@ -1741,7 +1487,7 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
|
|
|
1741
1487
|
return reshape_(input, (-1,))
|
|
1742
1488
|
perm = ops.make_range(0, x_rank)
|
|
1743
1489
|
new_order = ops.tuple_reversed(perm)
|
|
1744
|
-
input =
|
|
1490
|
+
input = transpose_(input, new_order)
|
|
1745
1491
|
|
|
1746
1492
|
# Handle the default case.
|
|
1747
1493
|
x_shape = shape_(input)
|
|
@@ -1749,7 +1495,7 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
|
|
|
1749
1495
|
if start_dim == 1 and end_dim == -1:
|
|
1750
1496
|
if x_rank in (0, 1):
|
|
1751
1497
|
return reshape_(input, (-1,))
|
|
1752
|
-
return
|
|
1498
|
+
return flatten_(input)
|
|
1753
1499
|
|
|
1754
1500
|
# Check axis.
|
|
1755
1501
|
start_dim = canonicalize_axis(start_dim, x_rank)
|
|
@@ -1936,176 +1682,6 @@ def select(cond, x, y):
|
|
|
1936
1682
|
return tensor_select_(cond, input_x, input_y)
|
|
1937
1683
|
|
|
1938
1684
|
|
|
1939
|
-
def strided_slice(input_x,
|
|
1940
|
-
begin,
|
|
1941
|
-
end,
|
|
1942
|
-
strides,
|
|
1943
|
-
begin_mask=0,
|
|
1944
|
-
end_mask=0,
|
|
1945
|
-
ellipsis_mask=0,
|
|
1946
|
-
new_axis_mask=0,
|
|
1947
|
-
shrink_axis_mask=0):
|
|
1948
|
-
r"""
|
|
1949
|
-
Extracts a strided slice of a Tensor based on `begin/end` index and `strides`.
|
|
1950
|
-
|
|
1951
|
-
This operation extracts a fragment of size (end-begin)/strides from the given 'input_tensor'.
|
|
1952
|
-
Starting from the beginning position, the fragment continues adding strides to the index until
|
|
1953
|
-
all dimensions are not less than the ending position.
|
|
1954
|
-
|
|
1955
|
-
Note:
|
|
1956
|
-
- `begin` , `end` and `strides` must have the same shape.
|
|
1957
|
-
- `begin` , `end` and `strides` are all 1-D Tensor, and their shape size
|
|
1958
|
-
must not greater than the dim of `input_x`.
|
|
1959
|
-
|
|
1960
|
-
During the slicing process, the fragment (end-begin)/strides are extracted from each dimension.
|
|
1961
|
-
|
|
1962
|
-
Example: For Tensor `input_x` with shape :math:`(5, 6, 7)`,
|
|
1963
|
-
set `begin`, `end` and `strides` to (1, 3, 2), (3, 5, 6),
|
|
1964
|
-
(1, 1, 2) respectively, then elements from index 1 to 3 are extrected for dim 0, index 3 to 5
|
|
1965
|
-
are extrected for dim 1 and index 2 to 6 with a `stirded` of 2 are extrected for dim 2, this
|
|
1966
|
-
process is equivalent to a pythonic slice `input_x[1:3, 3:5, 2:6:2]`.
|
|
1967
|
-
|
|
1968
|
-
If the length of `begin` 、 `end` and `strides` is smaller than the dim of `input_x`,
|
|
1969
|
-
then all elements are extracted from the missing dims, it behaves like all the
|
|
1970
|
-
missing dims are filled with zeros, size of that missing dim and ones.
|
|
1971
|
-
|
|
1972
|
-
Example: For Tensor `input_x` with shape :math:`(5, 6, 7)`,
|
|
1973
|
-
set `begin`, `end` and `strides` to (1, 3),
|
|
1974
|
-
(3, 5), (1, 1) respectively, then elements from index 1 to 3 are extrected
|
|
1975
|
-
for dim 0, index 3 to 5 are extrected for dim 1 and index 3 to 5 are extrected
|
|
1976
|
-
for dim 2, this process is equivalent to a pythonic slice `input_x[1:3, 3:5, 0:7]`.
|
|
1977
|
-
|
|
1978
|
-
Here's how a mask works:
|
|
1979
|
-
For each specific mask, it will be converted to a binary representation internally, and then
|
|
1980
|
-
reverse the result to start the calculation. For Tensor `input_x` with
|
|
1981
|
-
shape :math:`(5, 6, 7)`. Given mask value of 3 which
|
|
1982
|
-
can be represented as 0b011. Reverse that we get 0b110, which implies the first and second dim of the
|
|
1983
|
-
original Tensor will be effected by this mask. See examples below, for simplicity all mask mentioned
|
|
1984
|
-
below are all in their reverted binary form:
|
|
1985
|
-
|
|
1986
|
-
- `begin_mask` and `end_mask`
|
|
1987
|
-
|
|
1988
|
-
If the ith bit of `begin_mask` is 1, `begin[i]` is ignored and the fullest
|
|
1989
|
-
possible range in that dimension is used instead. `end_mask` is analogous,
|
|
1990
|
-
except with the end range. For Tensor `input_x` with shape :math:`(5, 6, 7, 8)`, if `begin_mask`
|
|
1991
|
-
is 0b110, `end_mask` is 0b011, the slice `input_x[0:3, 0:6, 2:7:2]` is produced.
|
|
1992
|
-
|
|
1993
|
-
- `ellipsis_mask`
|
|
1994
|
-
|
|
1995
|
-
If the ith bit of `ellipsis_mask` is 1, as many unspecified dimensions as needed
|
|
1996
|
-
will be inserted between other dimensions. Only one non-zero bit is allowed
|
|
1997
|
-
in `ellipsis_mask`. For Tensor `input_x` with shape :math:`(5, 6, 7, 8)`, `input_x[2:,...,:6]`
|
|
1998
|
-
is equivalent to `input_x[2:5,:,:,0:6]` , `input_x[2:,...]` is equivalent
|
|
1999
|
-
to `input_x[2:5,:,:,:]`.
|
|
2000
|
-
|
|
2001
|
-
- `new_axis_mask`
|
|
2002
|
-
|
|
2003
|
-
If the ith bit of `new_axis_mask` is 1, `begin`, `end` and `strides` are
|
|
2004
|
-
ignored and a new length 1 dimension is added at the specified position
|
|
2005
|
-
in the output Tensor. For Tensor `input_x` with shape :math:`(5, 6, 7)`, if `new_axis_mask`
|
|
2006
|
-
is 0b110, a new dim is added to the second dim, which will produce
|
|
2007
|
-
a Tensor with shape :math:`(5, 1, 6, 7)`.
|
|
2008
|
-
|
|
2009
|
-
- `shrink_axis_mask`
|
|
2010
|
-
|
|
2011
|
-
If the ith bit of `shrink_axis_mask` is 1, `begin`, `end` and `strides`
|
|
2012
|
-
are ignored and dimension i will be shrunk to 0.
|
|
2013
|
-
For Tensor `input_x` with shape :math:`(5, 6, 7)`,
|
|
2014
|
-
if `shrink_axis_mask` is 0b010, it is equivalent to slice `x[:, 5, :]`
|
|
2015
|
-
and results in an output shape of :math:`(5, 7)`.
|
|
2016
|
-
|
|
2017
|
-
Note:
|
|
2018
|
-
`new_axis_mask` and `shrink_axis_mask` are not recommended to
|
|
2019
|
-
use at the same time, it might incur unexpected result.
|
|
2020
|
-
|
|
2021
|
-
Args:
|
|
2022
|
-
input_x (Tensor): The input Tensor to be extracted from.
|
|
2023
|
-
begin (tuple[int]): A tuple which represents the location where to start.
|
|
2024
|
-
end (tuple[int]): A tuple or which represents the maximum location where to end.
|
|
2025
|
-
strides (tuple[int]): A tuple which represents the strides is continuously added
|
|
2026
|
-
before reaching the maximum location. Only int is allowed, it can be negative
|
|
2027
|
-
which results in reversed slicing.
|
|
2028
|
-
begin_mask (int, optional): Starting index of the slice. Default: ``0`` .
|
|
2029
|
-
end_mask (int, optional): Ending index of the slice. Default: ``0`` .
|
|
2030
|
-
ellipsis_mask (int, optional): An int mask, ignore slicing operation when set to 1. Default: ``0`` .
|
|
2031
|
-
new_axis_mask (int, optional): An int mask for adding new dims. Default: ``0`` .
|
|
2032
|
-
shrink_axis_mask (int, optional): An int mask for shrinking dims. Default: ``0`` .
|
|
2033
|
-
|
|
2034
|
-
Returns:
|
|
2035
|
-
Tensor, return the extracts a strided slice of a Tensor based on `begin/end` index and `strides`.
|
|
2036
|
-
|
|
2037
|
-
Raises:
|
|
2038
|
-
TypeError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or
|
|
2039
|
-
`shrink_axis_mask` is not an int.
|
|
2040
|
-
TypeError: If `begin`, `end` or `strides` is not tuple[int].
|
|
2041
|
-
ValueError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or
|
|
2042
|
-
`shrink_axis_mask` is less than 0.
|
|
2043
|
-
ValueError: If `begin`, `end` and `strides` have different shapes.
|
|
2044
|
-
|
|
2045
|
-
Supported Platforms:
|
|
2046
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2047
|
-
|
|
2048
|
-
Examples:
|
|
2049
|
-
>>> import mindspore
|
|
2050
|
-
>>> from mindspore import Tensor, ops
|
|
2051
|
-
>>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
|
|
2052
|
-
... [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
|
|
2053
|
-
>>> output = ops.strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1))
|
|
2054
|
-
>>> # Take this " output = strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1)) " as an example,
|
|
2055
|
-
>>> # start = [1, 0, 2] , end = [3, 1, 3], strides = [1, 1, 1], Find a segment of (start, end),
|
|
2056
|
-
>>> # note that end is an open interval
|
|
2057
|
-
>>> # To facilitate understanding, this operator can be divided into three steps:
|
|
2058
|
-
>>> # Step 1: Calculation of the first dimension:
|
|
2059
|
-
>>> # start = 1, end = 3, strides = 1, So can take 1st, 2nd rows, and then gets the final output at this time.
|
|
2060
|
-
>>> # output_1th =
|
|
2061
|
-
>>> # [
|
|
2062
|
-
>>> # [
|
|
2063
|
-
>>> # [3,3,3]
|
|
2064
|
-
>>> # [4,4,4]
|
|
2065
|
-
>>> # ]
|
|
2066
|
-
>>> # [
|
|
2067
|
-
>>> # [5,5,5]
|
|
2068
|
-
>>> # [6,6,6]
|
|
2069
|
-
>>> # ]
|
|
2070
|
-
>>> # ]
|
|
2071
|
-
>>> # Step 2: Calculation of the second dimension
|
|
2072
|
-
>>> # 2nd dimension, start = 0, end = 1, strides = 1. So only 0th rows
|
|
2073
|
-
>>> # can be taken, and the output at this time.
|
|
2074
|
-
>>> # output_2nd =
|
|
2075
|
-
>>> # [
|
|
2076
|
-
>>> # [
|
|
2077
|
-
>>> # [3,3,3]
|
|
2078
|
-
>>> # ]
|
|
2079
|
-
>>> # [
|
|
2080
|
-
>>> # [5,5,5]
|
|
2081
|
-
>>> # ]
|
|
2082
|
-
>>> # ]
|
|
2083
|
-
>>> # Step 3: Calculation of the third dimension
|
|
2084
|
-
>>> # 3nd dimension,start = 2, end = 3, strides = 1, So can take 2th cols,
|
|
2085
|
-
>>> # and you get the final output at this time.
|
|
2086
|
-
>>> # output_3ed =
|
|
2087
|
-
>>> # [
|
|
2088
|
-
>>> # [
|
|
2089
|
-
>>> # [3]
|
|
2090
|
-
>>> # ]
|
|
2091
|
-
>>> # [
|
|
2092
|
-
>>> # [5]
|
|
2093
|
-
>>> # ]
|
|
2094
|
-
>>> # ]
|
|
2095
|
-
>>> # The final output after finishing is:
|
|
2096
|
-
>>> print(output)
|
|
2097
|
-
[[[3.]]
|
|
2098
|
-
[[5.]]]
|
|
2099
|
-
>>> # another example like :
|
|
2100
|
-
>>> output = strided_slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1))
|
|
2101
|
-
>>> print(output)
|
|
2102
|
-
[[[3. 3. 3.]]]
|
|
2103
|
-
"""
|
|
2104
|
-
strided_slice_ = _get_cache_prim(P.StridedSlice)(
|
|
2105
|
-
begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask)
|
|
2106
|
-
return strided_slice_(input_x, begin, end, strides)
|
|
2107
|
-
|
|
2108
|
-
|
|
2109
1685
|
def slice(input_x, begin, size):
|
|
2110
1686
|
r"""
|
|
2111
1687
|
Slices a tensor in the specified shape.
|
|
@@ -2160,20 +1736,6 @@ def slice(input_x, begin, size):
|
|
|
2160
1736
|
return tensor_slice(input_x, begin, size)
|
|
2161
1737
|
|
|
2162
1738
|
|
|
2163
|
-
def concat(tensors, axis=0):
|
|
2164
|
-
"""
|
|
2165
|
-
Alias for :func:`mindspore.ops.cat()`.
|
|
2166
|
-
|
|
2167
|
-
Tutorial Examples:
|
|
2168
|
-
- `Tensor - Tensor Operation <https://mindspore.cn/tutorials/en/r2.2/beginner/tensor.html#tensor-operation>`_
|
|
2169
|
-
- `Vision Transformer Image Classification - Building ViT as a whole
|
|
2170
|
-
<https://mindspore.cn/tutorials/application/en/r2.2/cv/vit.html#building-vit-as-a-whole>`_
|
|
2171
|
-
- `Sentiment Classification Implemented by RNN - Dense
|
|
2172
|
-
<https://mindspore.cn/tutorials/application/en/r2.2/nlp/sentiment_analysis.html#dense>`_
|
|
2173
|
-
"""
|
|
2174
|
-
return cat(tensors, axis)
|
|
2175
|
-
|
|
2176
|
-
|
|
2177
1739
|
def stack(tensors, axis=0):
|
|
2178
1740
|
r"""
|
|
2179
1741
|
Stacks a list of tensors in specified axis.
|
|
@@ -2284,45 +1846,6 @@ def unbind(input, dim=0):
|
|
|
2284
1846
|
return _unstack(input)
|
|
2285
1847
|
|
|
2286
1848
|
|
|
2287
|
-
def expand_dims(input_x, axis):
|
|
2288
|
-
"""
|
|
2289
|
-
Adds an additional dimension to `input_x` at the given axis, the dimension
|
|
2290
|
-
of `input_x` should be greater than or equal to 1.
|
|
2291
|
-
|
|
2292
|
-
Note:
|
|
2293
|
-
If the specified axis is a negative number, the index is counted
|
|
2294
|
-
backward from the end and starts at 1.
|
|
2295
|
-
|
|
2296
|
-
Args:
|
|
2297
|
-
input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
2298
|
-
axis (int): Specifies the dimension index at which to expand
|
|
2299
|
-
the shape of `input_x`. The value of axis must be in the range
|
|
2300
|
-
`[-input_x.ndim-1, input_x.ndim]`. Only constant value is allowed.
|
|
2301
|
-
|
|
2302
|
-
Returns:
|
|
2303
|
-
Tensor, the shape of tensor is :math:`(1, x_1, x_2, ..., x_R)` if the
|
|
2304
|
-
value of `axis` is 0. It has the same data type as `input_x`.
|
|
2305
|
-
|
|
2306
|
-
Raises:
|
|
2307
|
-
TypeError: If `axis` is not an int.
|
|
2308
|
-
ValueError: If `axis` is not in the valid range :math:`[-a.ndim-1, a.ndim]`.
|
|
2309
|
-
|
|
2310
|
-
Supported Platforms:
|
|
2311
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2312
|
-
|
|
2313
|
-
Examples:
|
|
2314
|
-
>>> import mindspore
|
|
2315
|
-
>>> import numpy as np
|
|
2316
|
-
>>> from mindspore import Tensor, ops
|
|
2317
|
-
>>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
|
|
2318
|
-
>>> output = ops.expand_dims(input_tensor, 0)
|
|
2319
|
-
>>> print(output)
|
|
2320
|
-
[[[2. 2.]
|
|
2321
|
-
[2. 2.]]]
|
|
2322
|
-
"""
|
|
2323
|
-
return expand_dims_(input_x, axis)
|
|
2324
|
-
|
|
2325
|
-
|
|
2326
1849
|
def unsqueeze(input, dim):
|
|
2327
1850
|
"""
|
|
2328
1851
|
Adds an additional dimension to `input` at the given dim.
|
|
@@ -2354,7 +1877,7 @@ def unsqueeze(input, dim):
|
|
|
2354
1877
|
[[[2. 2.]
|
|
2355
1878
|
[2. 2.]]]
|
|
2356
1879
|
"""
|
|
2357
|
-
return
|
|
1880
|
+
return expand_dims(input, dim)
|
|
2358
1881
|
|
|
2359
1882
|
|
|
2360
1883
|
def squeeze(input, axis=None):
|
|
@@ -2411,57 +1934,6 @@ def squeeze(input, axis=None):
|
|
|
2411
1934
|
return squeeze_(input)
|
|
2412
1935
|
|
|
2413
1936
|
|
|
2414
|
-
def transpose(input, input_perm):
|
|
2415
|
-
"""
|
|
2416
|
-
Permutes the dimensions of the input tensor according to input permutation.
|
|
2417
|
-
|
|
2418
|
-
For a 1-D array this has no effect, as a transposed vector is simply the same vector.
|
|
2419
|
-
To convert a 1-D array into a 2D column vector please refer the class: mindspore.ops.ExpandDims.
|
|
2420
|
-
For a 2-D array, this is a standard matrix transpose. For an n-D array, if axes are given,
|
|
2421
|
-
their order indicates how the axes are permuted (see Examples).
|
|
2422
|
-
If axes are not provided and a.shape is :math:`(i[0], i[1], ... i[n-2], i[n-1])`,
|
|
2423
|
-
then a.transpose().shape is :math:`(i[n-1], i[n-2], ... i[1], i[0])`.
|
|
2424
|
-
|
|
2425
|
-
Note:
|
|
2426
|
-
On GPU and CPU, if the value of `input_perm` is negative, its actual value is `input_perm[i] + rank(input)`.
|
|
2427
|
-
Negative value of `input_perm` is not supported on Ascend.
|
|
2428
|
-
|
|
2429
|
-
Args:
|
|
2430
|
-
input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
2431
|
-
input_perm (tuple[int]): The permutation to be converted. The elements in `input_perm` are composed of
|
|
2432
|
-
the indexes of each dimension of `input`. The length of `input_perm` and the shape of `input` must be
|
|
2433
|
-
the same. Only constant value is allowed. Must be in the range [-rank(input), rank(input)).
|
|
2434
|
-
|
|
2435
|
-
Returns:
|
|
2436
|
-
Tensor, the type of output tensor is the same as `input` and the shape of output tensor is decided by the
|
|
2437
|
-
shape of `input` and the value of `input_perm`.
|
|
2438
|
-
|
|
2439
|
-
Raises:
|
|
2440
|
-
TypeError: If `input_perm` is not a tuple.
|
|
2441
|
-
ValueError: If length of shape of `input` is not equal to length of shape of `input_perm`.
|
|
2442
|
-
ValueError: If the same element exists in `input_perm`.
|
|
2443
|
-
|
|
2444
|
-
Supported Platforms:
|
|
2445
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2446
|
-
|
|
2447
|
-
Examples:
|
|
2448
|
-
>>> import mindspore
|
|
2449
|
-
>>> import numpy as np
|
|
2450
|
-
>>> from mindspore import Tensor, ops
|
|
2451
|
-
>>> input = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32)
|
|
2452
|
-
>>> input_perm = (0, 2, 1)
|
|
2453
|
-
>>> output = ops.transpose(input, input_perm)
|
|
2454
|
-
>>> print(output)
|
|
2455
|
-
[[[ 1. 4.]
|
|
2456
|
-
[ 2. 5.]
|
|
2457
|
-
[ 3. 6.]]
|
|
2458
|
-
[[ 7. 10.]
|
|
2459
|
-
[ 8. 11.]
|
|
2460
|
-
[ 9. 12.]]]
|
|
2461
|
-
"""
|
|
2462
|
-
return transpose_(input, input_perm)
|
|
2463
|
-
|
|
2464
|
-
|
|
2465
1937
|
def scatter_mul(input_x, indices, updates):
|
|
2466
1938
|
r"""
|
|
2467
1939
|
Using given values to update tensor value through the mul operation, along with the input indices.
|
|
@@ -2792,111 +2264,6 @@ def scatter_div(input_x, indices, updates):
|
|
|
2792
2264
|
return scatter_div_(input_x, indices, updates)
|
|
2793
2265
|
|
|
2794
2266
|
|
|
2795
|
-
def scatter_nd(indices, updates, shape):
|
|
2796
|
-
r"""
|
|
2797
|
-
Scatters a tensor into a new tensor depending on the specified indices.
|
|
2798
|
-
|
|
2799
|
-
Creates an empty tensor with the given `shape`, and set values by scattering the update tensor
|
|
2800
|
-
depending on indices. The empty tensor has rank :math:`P` and `indices` has rank :math:`Q`.
|
|
2801
|
-
|
|
2802
|
-
The `shape` is :math:`(s_0, s_1, ..., s_{P-1})`, where :math:`P \ge 1`.
|
|
2803
|
-
|
|
2804
|
-
`indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)`, where :math:`Q \ge 2` and :math:`N \le P`.
|
|
2805
|
-
|
|
2806
|
-
The last dimension of `indices` (with length :math:`N` ) indicates slices along the :math:`N` th dimension of the
|
|
2807
|
-
empty tensor.
|
|
2808
|
-
|
|
2809
|
-
`updates` is a tensor of rank :math:`Q-1+P-N`, and
|
|
2810
|
-
its shape is :math:`(i_0, i_1, ..., i_{Q-2}, s_N, s_{N+1}, ..., s_{P-1})`.
|
|
2811
|
-
|
|
2812
|
-
If `indices` contains duplicates, the duplicate `updates` are summed.
|
|
2813
|
-
|
|
2814
|
-
The following figure shows the calculation process of inserting two new value matrices into the first dimension
|
|
2815
|
-
with rank-3:
|
|
2816
|
-
|
|
2817
|
-
.. image:: ScatterNd.png
|
|
2818
|
-
|
|
2819
|
-
Args:
|
|
2820
|
-
indices (Tensor): Define the index of scattering in the new tensor with int32 or int64 data type.
|
|
2821
|
-
The rank of `indices` must be at least 2 and `indices.shape[-1] <= len(shape)`.
|
|
2822
|
-
updates (Tensor): Define the source Tensor to be updated.
|
|
2823
|
-
It has shape `indices.shape[:-1] + shape[indices.shape[-1]:]`.
|
|
2824
|
-
shape (tuple[int]): Define the shape of the output tensor, has the same data type as indices.
|
|
2825
|
-
`shape` can not be empty, and the elements in `shape` must be greater than or equal to 1.
|
|
2826
|
-
|
|
2827
|
-
Returns:
|
|
2828
|
-
Tensor, the new tensor, has the same type as `update` and the same shape as `shape`.
|
|
2829
|
-
|
|
2830
|
-
Raises:
|
|
2831
|
-
TypeError: If `shape` is not a tuple.
|
|
2832
|
-
ValueError: If any element of `shape` is less than 1.
|
|
2833
|
-
|
|
2834
|
-
Supported Platforms:
|
|
2835
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2836
|
-
|
|
2837
|
-
Examples:
|
|
2838
|
-
>>> import mindspore
|
|
2839
|
-
>>> import numpy as np
|
|
2840
|
-
>>> from mindspore import Tensor, ops
|
|
2841
|
-
>>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
|
|
2842
|
-
>>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2],
|
|
2843
|
-
... [3, 3, 3, 3], [4, 4, 4, 4]],
|
|
2844
|
-
... [[1, 1, 1, 1], [2, 2, 2, 2],
|
|
2845
|
-
... [3, 3, 3, 3], [4, 4, 4, 4]]]), mindspore.float32)
|
|
2846
|
-
>>> shape = (4, 4, 4)
|
|
2847
|
-
>>> output = ops.scatter_nd(indices, updates, shape)
|
|
2848
|
-
>>> print(output)
|
|
2849
|
-
[[[1. 1. 1. 1.]
|
|
2850
|
-
[2. 2. 2. 2.]
|
|
2851
|
-
[3. 3. 3. 3.]
|
|
2852
|
-
[4. 4. 4. 4.]]
|
|
2853
|
-
[[0. 0. 0. 0.]
|
|
2854
|
-
[0. 0. 0. 0.]
|
|
2855
|
-
[0. 0. 0. 0.]
|
|
2856
|
-
[0. 0. 0. 0.]]
|
|
2857
|
-
[[1. 1. 1. 1.]
|
|
2858
|
-
[2. 2. 2. 2.]
|
|
2859
|
-
[3. 3. 3. 3.]
|
|
2860
|
-
[4. 4. 4. 4.]]
|
|
2861
|
-
[[0. 0. 0. 0.]
|
|
2862
|
-
[0. 0. 0. 0.]
|
|
2863
|
-
[0. 0. 0. 0.]
|
|
2864
|
-
[0. 0. 0. 0.]]]
|
|
2865
|
-
>>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)
|
|
2866
|
-
>>> updates = Tensor(np.array([3.2, 1.1]), mindspore.float32)
|
|
2867
|
-
>>> shape = (3, 3)
|
|
2868
|
-
>>> output = ops.scatter_nd(indices, updates, shape)
|
|
2869
|
-
>>> # In order to facilitate understanding, explain the operator pseudo-operation process step by step:
|
|
2870
|
-
>>> # Step 1: Generate an empty Tensor of the specified shape according to the shape
|
|
2871
|
-
>>> # [
|
|
2872
|
-
>>> # [0. 0. 0.]
|
|
2873
|
-
>>> # [0. 0. 0.]
|
|
2874
|
-
>>> # [0. 0. 0.]
|
|
2875
|
-
>>> # ]
|
|
2876
|
-
>>> # Step 2: Modify the data at the specified location according to the indicators
|
|
2877
|
-
>>> # 0th row of indices is [0, 1], 0th row of updates is 3.2.
|
|
2878
|
-
>>> # means that the empty tensor in the 0th row and 1st col set to 3.2
|
|
2879
|
-
>>> # [
|
|
2880
|
-
>>> # [0. 3.2. 0.]
|
|
2881
|
-
>>> # [0. 0. 0.]
|
|
2882
|
-
>>> # [0. 0. 0.]
|
|
2883
|
-
>>> # ]
|
|
2884
|
-
>>> # 1th row of indices is [1, 1], 1th row of updates is 1.1.
|
|
2885
|
-
>>> # means that the empty tensor in the 1th row and 1st col set to 1.1
|
|
2886
|
-
>>> # [
|
|
2887
|
-
>>> # [0. 3.2. 0.]
|
|
2888
|
-
>>> # [0. 1.1 0.]
|
|
2889
|
-
>>> # [0. 0. 0.]
|
|
2890
|
-
>>> # ]
|
|
2891
|
-
>>> # The final result is as follows:
|
|
2892
|
-
>>> print(output)
|
|
2893
|
-
[[0. 3.2 0.]
|
|
2894
|
-
[0. 1.1 0.]
|
|
2895
|
-
[0. 0. 0.]]
|
|
2896
|
-
"""
|
|
2897
|
-
return scatter_nd_(indices, updates, shape)
|
|
2898
|
-
|
|
2899
|
-
|
|
2900
2267
|
def scatter_update(input_x, indices, updates):
|
|
2901
2268
|
r"""
|
|
2902
2269
|
Updates tensor values by using input indices and value.
|
|
@@ -2946,8 +2313,7 @@ def scatter_update(input_x, indices, updates):
|
|
|
2946
2313
|
[[2. 1.2 1.]
|
|
2947
2314
|
[3. 1.2 1.]]
|
|
2948
2315
|
"""
|
|
2949
|
-
|
|
2950
|
-
return scatter_update_inner(input_x, indices, updates)
|
|
2316
|
+
return scatter_update_(input_x, indices, updates)
|
|
2951
2317
|
|
|
2952
2318
|
|
|
2953
2319
|
def scatter_nd_add(input_x, indices, updates, use_locking=False):
|
|
@@ -3414,8 +2780,8 @@ def sort(input_x, axis=-1, descending=False):
|
|
|
3414
2780
|
are sorted in descending order, or else sorted in ascending order. Default: ``False`` .
|
|
3415
2781
|
|
|
3416
2782
|
.. warning::
|
|
3417
|
-
Currently, the data types of
|
|
3418
|
-
If use
|
|
2783
|
+
Currently, the data types of float16, uint8, int8, int16, int32, int64 are well supported.
|
|
2784
|
+
If use float32, it may cause loss of accuracy.
|
|
3419
2785
|
|
|
3420
2786
|
Returns:
|
|
3421
2787
|
|
|
@@ -3477,121 +2843,12 @@ def argsort(input, axis=-1, descending=False):
|
|
|
3477
2843
|
>>> sort = ops.argsort(x)
|
|
3478
2844
|
>>> print(sort)
|
|
3479
2845
|
[[2 1 0]
|
|
3480
|
-
[2 0 1]
|
|
3481
|
-
[0 1 2]]
|
|
3482
|
-
"""
|
|
3483
|
-
_sort = _get_cache_prim(P.Sort)(axis, descending)
|
|
3484
|
-
_, arg_sort = _sort(input)
|
|
3485
|
-
return arg_sort
|
|
3486
|
-
|
|
3487
|
-
|
|
3488
|
-
def gather(input_params, input_indices, axis, batch_dims=0):
|
|
3489
|
-
r"""
|
|
3490
|
-
Returns the slice of the input tensor corresponding to the elements of `input_indices` on the specified `axis`.
|
|
3491
|
-
|
|
3492
|
-
The following figure shows the calculation process of Gather commonly:
|
|
3493
|
-
|
|
3494
|
-
.. image:: Gather.png
|
|
3495
|
-
|
|
3496
|
-
where params represents the input `input_params`, and indices represents the index to be sliced `input_indices`.
|
|
3497
|
-
|
|
3498
|
-
.. note::
|
|
3499
|
-
1. The value of input_indices must be in the range of `[0, input_param.shape[axis])`.
|
|
3500
|
-
On CPU and GPU, an error is raised if an out of bound indice is found. On Ascend, the results may be
|
|
3501
|
-
undefined.
|
|
3502
|
-
|
|
3503
|
-
2. The data type of input_params cannot be
|
|
3504
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ on Ascend
|
|
3505
|
-
platform currently.
|
|
3506
|
-
|
|
3507
|
-
Args:
|
|
3508
|
-
input_params (Tensor): The original Tensor. The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
3509
|
-
input_indices (Tensor): Index tensor to be sliced, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
|
|
3510
|
-
Specifies the indices of elements of the original Tensor. The data type can be int32 or int64.
|
|
3511
|
-
axis (Union(int, Tensor[int])): Specifies the dimension index to gather indices.
|
|
3512
|
-
It must be greater than or equal to `batch_dims`.
|
|
3513
|
-
When `axis` is a Tensor, the size must be 1.
|
|
3514
|
-
batch_dims (int): Specifies the number of batch dimensions. It must be less than or euqal to the rank
|
|
3515
|
-
of `input_indices`. Default: ``0`` .
|
|
3516
|
-
|
|
3517
|
-
Returns:
|
|
3518
|
-
Tensor, the shape of tensor is
|
|
3519
|
-
:math:`input\_params.shape[:axis] + input\_indices.shape[batch\_dims:] + input\_params.shape[axis + 1:]`.
|
|
3520
|
-
|
|
3521
|
-
Raises:
|
|
3522
|
-
TypeError: If `axis` is not an int or Tensor.
|
|
3523
|
-
ValueError: If `axis` is a Tensor and its size is not 1.
|
|
3524
|
-
TypeError: If `input_params` is not a tensor.
|
|
3525
|
-
TypeError: If `input_indices` is not a tensor of type int.
|
|
3526
|
-
RuntimeError: If `input_indices` is out of range `[0, input_param.shape[axis])` on CPU or GPU.
|
|
3527
|
-
|
|
3528
|
-
Supported Platforms:
|
|
3529
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3530
|
-
|
|
3531
|
-
Examples:
|
|
3532
|
-
>>> import mindspore
|
|
3533
|
-
>>> import numpy as np
|
|
3534
|
-
>>> from mindspore import Tensor, ops
|
|
3535
|
-
>>> # case1: input_indices is a Tensor with shape (5, ).
|
|
3536
|
-
>>> input_params = Tensor(np.array([1, 2, 3, 4, 5, 6, 7]), mindspore.float32)
|
|
3537
|
-
>>> input_indices = Tensor(np.array([0, 2, 4, 2, 6]), mindspore.int32)
|
|
3538
|
-
>>> axis = 0
|
|
3539
|
-
>>> output = ops.gather(input_params, input_indices, axis)
|
|
3540
|
-
>>> print(output)
|
|
3541
|
-
[1. 3. 5. 3. 7.]
|
|
3542
|
-
>>> # case2: input_indices is a Tensor with shape (2, 2). When the input_params has one dimension,
|
|
3543
|
-
>>> # the output shape is equal to the input_indices shape.
|
|
3544
|
-
>>> input_indices = Tensor(np.array([[0, 2], [2, 6]]), mindspore.int32)
|
|
3545
|
-
>>> axis = 0
|
|
3546
|
-
>>> output = ops.gather(input_params, input_indices, axis)
|
|
3547
|
-
>>> print(output)
|
|
3548
|
-
[[1. 3.]
|
|
3549
|
-
[3. 7.]]
|
|
3550
|
-
>>> # case3: input_indices is a Tensor with shape (2, ) and
|
|
3551
|
-
>>> # input_params is a Tensor with shape (3, 4) and axis is 0.
|
|
3552
|
-
>>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
|
|
3553
|
-
>>> input_indices = Tensor(np.array([0, 2]), mindspore.int32)
|
|
3554
|
-
>>> axis = 0
|
|
3555
|
-
>>> output = ops.gather(input_params, input_indices, axis)
|
|
3556
|
-
>>> print(output)
|
|
3557
|
-
[[ 1. 2. 3. 4.]
|
|
3558
|
-
[ 9. 10. 11. 12.]]
|
|
3559
|
-
>>> # case4: input_indices is a Tensor with shape (2, ) and
|
|
3560
|
-
>>> # input_params is a Tensor with shape (3, 4) and axis is 1, batch_dims is 1.
|
|
3561
|
-
>>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
|
|
3562
|
-
>>> input_indices = Tensor(np.array([0, 2, 1]), mindspore.int32)
|
|
3563
|
-
>>> axis = 1
|
|
3564
|
-
>>> batch_dims = 1
|
|
3565
|
-
>>> output = ops.gather(input_params, input_indices, axis, batch_dims)
|
|
3566
|
-
>>> print(output)
|
|
3567
|
-
[ 1. 7. 10.]
|
|
3568
|
-
"""
|
|
3569
|
-
_gather = _get_cache_prim(P.Gather)(batch_dims)
|
|
3570
|
-
return _gather(input_params, input_indices, axis)
|
|
3571
|
-
|
|
3572
|
-
|
|
3573
|
-
def gather_d(x, dim, index):
|
|
3574
|
-
"""
|
|
3575
|
-
Gathers elements along an axis specified by dim.
|
|
3576
|
-
|
|
3577
|
-
Refer to :func:`mindspore.ops.gather_elements` for more detail.
|
|
3578
|
-
|
|
3579
|
-
Supported Platforms:
|
|
3580
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3581
|
-
|
|
3582
|
-
Examples:
|
|
3583
|
-
>>> import mindspore
|
|
3584
|
-
>>> import numpy as np
|
|
3585
|
-
>>> from mindspore import Tensor, ops
|
|
3586
|
-
>>> x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.int32)
|
|
3587
|
-
>>> index = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32)
|
|
3588
|
-
>>> dim = 1
|
|
3589
|
-
>>> output = ops.gather_d(x, dim, index)
|
|
3590
|
-
>>> print(output)
|
|
3591
|
-
[[1 1]
|
|
3592
|
-
[4 3]]
|
|
2846
|
+
[2 0 1]
|
|
2847
|
+
[0 1 2]]
|
|
3593
2848
|
"""
|
|
3594
|
-
|
|
2849
|
+
_sort = _get_cache_prim(P.Sort)(axis, descending)
|
|
2850
|
+
_, arg_sort = _sort(input)
|
|
2851
|
+
return arg_sort
|
|
3595
2852
|
|
|
3596
2853
|
|
|
3597
2854
|
def gather_elements(input, dim, index):
|
|
@@ -3608,26 +2865,29 @@ def gather_elements(input, dim, index):
|
|
|
3608
2865
|
|
|
3609
2866
|
output[i][j][k] = x[i][j][index[i][j][k]] # if dim == 2
|
|
3610
2867
|
|
|
3611
|
-
`input` and `index` have the same length of dimensions, and
|
|
3612
|
-
|
|
3613
|
-
|
|
3614
|
-
|
|
2868
|
+
`input` and `index` have the same length of dimensions, and `index.shape[axis] <= input.shape[axis]`
|
|
2869
|
+
where axis goes through all dimensions of `input` except `dim`.
|
|
2870
|
+
|
|
2871
|
+
.. warning::
|
|
2872
|
+
On Ascend, the behavior is unpredictable in the following cases:
|
|
2873
|
+
|
|
2874
|
+
- the value of `index` is not in the range `[-input.shape[dim], input.shape[dim])` in forward;
|
|
2875
|
+
- the value of `index` is not in the range `[0, input.shape[dim])` in backward.
|
|
3615
2876
|
|
|
3616
2877
|
Args:
|
|
3617
2878
|
input (Tensor): The input tensor.
|
|
3618
|
-
dim (int): The axis along which to index. It must be int32 or int64. The value range is [-input.ndim,
|
|
3619
|
-
input.ndim)
|
|
2879
|
+
dim (int): The axis along which to index. It must be int32 or int64. The value range is `[-input.ndim,
|
|
2880
|
+
input.ndim)`.
|
|
3620
2881
|
index (Tensor): The indices of elements to gather. It can be one of the following data types:
|
|
3621
|
-
int32, int64. The value range of each index element is [-input.shape(dim), input.shape(dim))
|
|
2882
|
+
int32, int64. The value range of each index element is `[-input.shape(dim), input.shape(dim))`.
|
|
3622
2883
|
|
|
3623
2884
|
Returns:
|
|
3624
|
-
Tensor, has the same shape as index tensor
|
|
3625
|
-
and has the same data type with `input`.
|
|
2885
|
+
Tensor, has the same shape as `index` tensor and has the same data type with `input`.
|
|
3626
2886
|
|
|
3627
2887
|
Raises:
|
|
3628
2888
|
TypeError: If dtype of `dim` or `index` is neither int32 nor int64.
|
|
3629
2889
|
ValueError: If length of shape of `input` is not equal to length of shape of `index`.
|
|
3630
|
-
ValueError: If the size of the dimension except `dim` is
|
|
2890
|
+
ValueError: If the size of the dimension except `dim` in `input` is less than size in `index`.
|
|
3631
2891
|
ValueError: If the value of `dim` is not in the expected range.
|
|
3632
2892
|
|
|
3633
2893
|
Supported Platforms:
|
|
@@ -3648,48 +2908,6 @@ def gather_elements(input, dim, index):
|
|
|
3648
2908
|
return gather_d_(input, dim, index)
|
|
3649
2909
|
|
|
3650
2910
|
|
|
3651
|
-
def gather_nd(input_x, indices):
|
|
3652
|
-
r"""
|
|
3653
|
-
Gathers slices from a tensor by indices.
|
|
3654
|
-
|
|
3655
|
-
Using given indices to gather slices from a tensor with a specified shape.
|
|
3656
|
-
|
|
3657
|
-
`indices` is an K-dimensional integer tensor. Supposes it as a (K-1)-dimensional tensor and each element of it
|
|
3658
|
-
defines a slice of `input_x`:
|
|
3659
|
-
|
|
3660
|
-
.. math::
|
|
3661
|
-
output[(i_0, ..., i_{K-2})] = input\_x[indices[(i_0, ..., i_{K-2})]]
|
|
3662
|
-
|
|
3663
|
-
The last dimension of `indices` can not more than the rank of `input_x`:
|
|
3664
|
-
:math:`indices.shape[-1] <= input\_x.rank`.
|
|
3665
|
-
|
|
3666
|
-
Args:
|
|
3667
|
-
input_x (Tensor): The target tensor to gather values.
|
|
3668
|
-
indices (Tensor): The index tensor, with int32 or int64 data type.
|
|
3669
|
-
|
|
3670
|
-
Returns:
|
|
3671
|
-
Tensor, has the same type as `input_x` and the shape is
|
|
3672
|
-
:math:`indices\_shape[:-1] + input\_x\_shape[indices\_shape[-1]:]`.
|
|
3673
|
-
|
|
3674
|
-
Raises:
|
|
3675
|
-
ValueError: If length of shape of `input_x` is less than the last dimension of `indices`.
|
|
3676
|
-
|
|
3677
|
-
Supported Platforms:
|
|
3678
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3679
|
-
|
|
3680
|
-
Examples:
|
|
3681
|
-
>>> import mindspore
|
|
3682
|
-
>>> import numpy as np
|
|
3683
|
-
>>> from mindspore import Tensor, ops
|
|
3684
|
-
>>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
|
|
3685
|
-
>>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
|
|
3686
|
-
>>> output = ops.gather_nd(input_x, indices)
|
|
3687
|
-
>>> print(output)
|
|
3688
|
-
[-0.1 0.5]
|
|
3689
|
-
"""
|
|
3690
|
-
return gather_nd_(input_x, indices)
|
|
3691
|
-
|
|
3692
|
-
|
|
3693
2911
|
def tensor_scatter_add(input_x, indices, updates):
|
|
3694
2912
|
r"""
|
|
3695
2913
|
Creates a new tensor by adding the values from the positions in `input_x` indicated by
|
|
@@ -3700,7 +2918,7 @@ def tensor_scatter_add(input_x, indices, updates):
|
|
|
3700
2918
|
|
|
3701
2919
|
The last axis of `indices` is the depth of each index vectors. For each index vector,
|
|
3702
2920
|
there must be a corresponding value in `updates`. The shape of `updates` should be
|
|
3703
|
-
equal to the shape of `input_x[indices]`. For more details, see
|
|
2921
|
+
equal to the shape of `input_x[indices]`. For more details, see Examples.
|
|
3704
2922
|
|
|
3705
2923
|
.. math::
|
|
3706
2924
|
output\left [indices \right ] = input\_x + update
|
|
@@ -3758,7 +2976,7 @@ def tensor_scatter_sub(input_x, indices, updates):
|
|
|
3758
2976
|
|
|
3759
2977
|
The last axis of `indices` is the depth of each index vectors. For each index vector,
|
|
3760
2978
|
there must be a corresponding value in `updates`. The shape of `updates` should be
|
|
3761
|
-
equal to the shape of `input_x[indices]`. For more details, see
|
|
2979
|
+
equal to the shape of `input_x[indices]`. For more details, see Examples.
|
|
3762
2980
|
|
|
3763
2981
|
.. math::
|
|
3764
2982
|
output[indices] = input\_x - update
|
|
@@ -3943,14 +3161,12 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
|
|
|
3943
3161
|
nondeterministic.
|
|
3944
3162
|
- On Ascend, the reduction only support set to "none" for now.
|
|
3945
3163
|
- On Ascend, the data type of `input_x` must be float16 or float32.
|
|
3164
|
+
- This is an experimental API that is subject to change or deletion.
|
|
3946
3165
|
|
|
3947
3166
|
Note:
|
|
3948
3167
|
If some values of the `indices` exceed the upper or lower bounds of the index of `input_x`, instead of raising
|
|
3949
3168
|
an index error, the corresponding `updates` will not be updated to `input_x`.
|
|
3950
3169
|
|
|
3951
|
-
.. warning::
|
|
3952
|
-
This is an experimental API that is subject to change or deletion.
|
|
3953
|
-
|
|
3954
3170
|
Args:
|
|
3955
3171
|
input_x (Tensor): The target tensor. The rank must be at least 1.
|
|
3956
3172
|
indices (Tensor): The index of `input_x` to do scatter operation whose data type must be mindspore.int32 or
|
|
@@ -4074,7 +3290,7 @@ def _get_slice_scatter_const(x_shape, axis, start, end, step):
|
|
|
4074
3290
|
start = start if start is not None else 0
|
|
4075
3291
|
start = start if start >= 0 else start + x_rank
|
|
4076
3292
|
end = end if end is not None else x_shape[axis]
|
|
4077
|
-
end = end if end >= 0 else end +
|
|
3293
|
+
end = end if end >= 0 else end + x_shape[axis]
|
|
4078
3294
|
end = end if end < x_shape[axis] else x_shape[axis]
|
|
4079
3295
|
index = list(builtins.range(start, end, step))
|
|
4080
3296
|
return x_rank, index, axis
|
|
@@ -4121,6 +3337,8 @@ def slice_scatter(input, src, axis=0, start=None, end=None, step=1):
|
|
|
4121
3337
|
[1. 0. 1. 0. 1. 0.]
|
|
4122
3338
|
[1. 0. 1. 0. 1. 0.]]
|
|
4123
3339
|
"""
|
|
3340
|
+
_check_is_tensor("input", input, "slice_scatter")
|
|
3341
|
+
_check_is_tensor("src", src, "slice_scatter")
|
|
4124
3342
|
input_shape = input.shape
|
|
4125
3343
|
input_rank, index, axis = _get_slice_scatter_const(input_shape, axis, start, end, step)
|
|
4126
3344
|
|
|
@@ -4136,6 +3354,8 @@ def slice_scatter(input, src, axis=0, start=None, end=None, step=1):
|
|
|
4136
3354
|
for _ in builtins.range(input_rank - axis - 1):
|
|
4137
3355
|
index_tensor = index_tensor.expand_dims(-1)
|
|
4138
3356
|
index_tensor = index_tensor.broadcast_to(src.shape)
|
|
3357
|
+
if index_tensor.dtype not in mstype.int_type:
|
|
3358
|
+
index_tensor = index_tensor.astype(mstype.int64)
|
|
4139
3359
|
return tensor_scatter_elements(input, axis=axis, indices=index_tensor, updates=src)
|
|
4140
3360
|
|
|
4141
3361
|
|
|
@@ -4174,10 +3394,12 @@ def select_scatter(input, src, axis, index):
|
|
|
4174
3394
|
[1. 1. 1.]
|
|
4175
3395
|
[0. 0. 0.]]]
|
|
4176
3396
|
"""
|
|
3397
|
+
_check_is_tensor("input", input, "select_scatter")
|
|
3398
|
+
_check_is_tensor("src", src, "select_scatter")
|
|
4177
3399
|
src = src.expand_dims(axis=axis)
|
|
4178
3400
|
x_rank = input.ndim
|
|
4179
3401
|
axis = axis if axis >= 0 else axis + x_rank
|
|
4180
|
-
index = index if index >= 0 else index +
|
|
3402
|
+
index = index if index >= 0 else index + input.shape[axis]
|
|
4181
3403
|
return slice_scatter(input, src, axis, start=index, end=index + 1)
|
|
4182
3404
|
|
|
4183
3405
|
|
|
@@ -4228,6 +3450,7 @@ def space_to_batch_nd(input_x, block_size, paddings):
|
|
|
4228
3450
|
|
|
4229
3451
|
Examples:
|
|
4230
3452
|
>>> import numpy as np
|
|
3453
|
+
>>> import mindspore
|
|
4231
3454
|
>>> from mindspore import Tensor, ops
|
|
4232
3455
|
>>> block_size = [2, 2]
|
|
4233
3456
|
>>> paddings = [[0, 0], [0, 0]]
|
|
@@ -4302,49 +3525,11 @@ def batch_to_space_nd(input_x, block_shape, crops):
|
|
|
4302
3525
|
[3. 4.]]]]
|
|
4303
3526
|
"""
|
|
4304
3527
|
if isinstance(block_shape, Tensor):
|
|
4305
|
-
|
|
4306
|
-
return _batch_to_space_ndv2(input_x, block_shape, crops)
|
|
3528
|
+
return batch_to_space_nd_v2_(input_x, block_shape, crops)
|
|
4307
3529
|
_batch_to_space_nd = _get_cache_prim(P.BatchToSpaceND)(block_shape, crops)
|
|
4308
3530
|
return _batch_to_space_nd(input_x)
|
|
4309
3531
|
|
|
4310
3532
|
|
|
4311
|
-
def nonzero(input):
|
|
4312
|
-
"""
|
|
4313
|
-
Return a Tensor of the positions of all non-zero values.
|
|
4314
|
-
|
|
4315
|
-
Args:
|
|
4316
|
-
input (Tensor): The input Tensor, its rank should be greater than or eaqual to 1.
|
|
4317
|
-
|
|
4318
|
-
Returns:
|
|
4319
|
-
Tensor, a 2-D Tensor whose data type is int64, containing the positions of all non-zero values of the input.
|
|
4320
|
-
|
|
4321
|
-
Raises:
|
|
4322
|
-
TypeError: If `input` is not Tensor.
|
|
4323
|
-
ValueError: If dim of `x` equals to 0.
|
|
4324
|
-
|
|
4325
|
-
Supported Platforms:
|
|
4326
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4327
|
-
|
|
4328
|
-
Examples:
|
|
4329
|
-
>>> import mindspore
|
|
4330
|
-
>>> import numpy as np
|
|
4331
|
-
>>> from mindspore import Tensor
|
|
4332
|
-
>>> import mindspore.ops as ops
|
|
4333
|
-
>>> x = Tensor(np.array([[[1, 0], [-5, 0]]]), mindspore.int32)
|
|
4334
|
-
>>> output = ops.nonzero(x)
|
|
4335
|
-
>>> print(output)
|
|
4336
|
-
[[0 0 0]
|
|
4337
|
-
[0 1 0]]
|
|
4338
|
-
>>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
|
|
4339
|
-
>>> output = ops.nonzero(x)
|
|
4340
|
-
>>> print(output)
|
|
4341
|
-
[[0]
|
|
4342
|
-
[2]
|
|
4343
|
-
[4]]
|
|
4344
|
-
"""
|
|
4345
|
-
return nonzero_(input)
|
|
4346
|
-
|
|
4347
|
-
|
|
4348
3533
|
def matrix_diag(x, k=0, num_rows=-1, num_cols=-1, padding_value=0, align="RIGHT_LEFT"):
|
|
4349
3534
|
r"""
|
|
4350
3535
|
Returns a Tensor with the contents in `x` as k[0]-th to k[1]-th diagonals of a matrix, with everything else padded
|
|
@@ -4604,18 +3789,19 @@ def meshgrid(*inputs, indexing='xy'):
|
|
|
4604
3789
|
|
|
4605
3790
|
Keyword Args:
|
|
4606
3791
|
indexing (str, optional): Cartesian ('xy', default) or
|
|
4607
|
-
matrix ('ij') indexing of output. Valid options: xy' or 'ij'
|
|
3792
|
+
matrix ('ij') indexing of output. Valid options: xy' or ``'ij'``. In the 2-D case with
|
|
4608
3793
|
inputs of length `M` and `N`, the outputs are of shape :math:`(N, M)`
|
|
4609
|
-
for 'xy' indexing and :math:`(M, N)` for 'ij' indexing. In the 3-D
|
|
3794
|
+
for ``'xy'`` indexing and :math:`(M, N)` for ``'ij'`` indexing. In the 3-D
|
|
4610
3795
|
case with inputs of length `M`, `N` and `P`, outputs are of shape
|
|
4611
|
-
:math:`(N, M, P)` for 'xy' indexing and :math:`(M, N, P)` for 'ij' indexing.
|
|
3796
|
+
:math:`(N, M, P)` for ``'xy'`` indexing and :math:`(M, N, P)` for ``'ij'`` indexing.
|
|
3797
|
+
Default: ``'xy'`` .
|
|
4612
3798
|
|
|
4613
3799
|
Returns:
|
|
4614
3800
|
Tensors, a Tuple of N N-D Tensor objects. The data type is the same with the Inputs.
|
|
4615
3801
|
|
|
4616
3802
|
Raises:
|
|
4617
3803
|
TypeError: If `indexing` is not a str or `inputs` is not a tuple.
|
|
4618
|
-
ValueError: If `indexing` is neither 'xy' nor 'ij'
|
|
3804
|
+
ValueError: If `indexing` is neither ``'xy'`` nor ``'ij'``.
|
|
4619
3805
|
|
|
4620
3806
|
Supported Platforms:
|
|
4621
3807
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4722,87 +3908,6 @@ def affine_grid(theta, size, align_corners=False):
|
|
|
4722
3908
|
return affine_grid_op(theta, size)
|
|
4723
3909
|
|
|
4724
3910
|
|
|
4725
|
-
def broadcast_to(input, shape): # pylint: disable=redefined-outer-name
|
|
4726
|
-
"""
|
|
4727
|
-
Broadcasts input tensor to a given shape. The dim of input shape must be smaller
|
|
4728
|
-
than or equal to that of target shape. Suppose input shape is :math:`(x_1, x_2, ..., x_m)`,
|
|
4729
|
-
target shape is :math:`(*, y_1, y_2, ..., y_m)`, where :math:`*` means any additional dimension.
|
|
4730
|
-
The broadcast rules are as follows:
|
|
4731
|
-
|
|
4732
|
-
Compare the value of :math:`x_m` and :math:`y_m`, :math:`x_{m-1}` and :math:`y_{m-1}`, ...,
|
|
4733
|
-
:math:`x_1` and :math:`y_1` consecutively and
|
|
4734
|
-
decide whether these shapes are broadcastable and what the broadcast result is.
|
|
4735
|
-
|
|
4736
|
-
If the value pairs at a specific dim are equal, then that value goes right into that dim of output shape.
|
|
4737
|
-
With an input shape :math:`(2, 3)`, target shape :math:`(2, 3)` , the inferred output shape is :math:`(2, 3)`.
|
|
4738
|
-
|
|
4739
|
-
If the value pairs are unequal, there are three cases:
|
|
4740
|
-
|
|
4741
|
-
Case 1: If the value of the target shape in the dimension is -1, the value of the
|
|
4742
|
-
output shape in the dimension is the value of the corresponding input shape in the dimension.
|
|
4743
|
-
With an input shape :math:`(3, 3)`, target
|
|
4744
|
-
shape :math:`(-1, 3)`, the output shape is :math:`(3, 3)`.
|
|
4745
|
-
|
|
4746
|
-
Case 2: If the value of target shape in the dimension is not -1, but the corresponding
|
|
4747
|
-
value in the input shape is 1, then the corresponding value of the output shape
|
|
4748
|
-
is that of the target shape. With an input shape :math:`(1, 3)`, target
|
|
4749
|
-
shape :math:`(8, 3)`, the output shape is :math:`(8, 3)`.
|
|
4750
|
-
|
|
4751
|
-
Case 3: If the corresponding values of the two shapes do not satisfy the above cases,
|
|
4752
|
-
it means that broadcasting from the input shape to the target shape is not supported.
|
|
4753
|
-
|
|
4754
|
-
So far we got the last m dims of the outshape, now focus on the first :math:`*` dims, there are
|
|
4755
|
-
two cases:
|
|
4756
|
-
|
|
4757
|
-
If the first :math:`*` dims of output shape does not have -1 in it, then fill the input
|
|
4758
|
-
shape with ones until their length are the same, and then refer to
|
|
4759
|
-
Case 2 mentioned above to calculate the output shape. With target shape :math:`(3, 1, 4, 1, 5, 9)`,
|
|
4760
|
-
input shape :math:`(1, 5, 9)`, the filled input shape will be :math:`(1, 1, 1, 1, 5, 9)` and thus the
|
|
4761
|
-
output shape is :math:`(3, 1, 4, 1, 5, 9)`.
|
|
4762
|
-
|
|
4763
|
-
If the first :math:`*` dims of output shape have -1 in it, it implies this -1 is corresponding to
|
|
4764
|
-
a non-existing dim so they're not broadcastable. With target shape :math:`(3, -1, 4, 1, 5, 9)`,
|
|
4765
|
-
input shape :math:`(1, 5, 9)`, instead of operating the dim-filling process first, it raises errors directly.
|
|
4766
|
-
|
|
4767
|
-
Args:
|
|
4768
|
-
input (Tensor): The input Tensor.
|
|
4769
|
-
shape (tuple): The target shape to broadcast. Can be fully specified, or have -1 in one position
|
|
4770
|
-
where it will be substituted by the input tensor's shape in that position, see example.
|
|
4771
|
-
|
|
4772
|
-
Returns:
|
|
4773
|
-
Tensor, with the given `shape` and the same data type as `input`.
|
|
4774
|
-
|
|
4775
|
-
Raises:
|
|
4776
|
-
TypeError: If `shape` is not a tuple.
|
|
4777
|
-
ValueError: If the target and input shapes are incompatible, or if a - 1 in the target shape is in an invalid
|
|
4778
|
-
location.
|
|
4779
|
-
|
|
4780
|
-
Supported Platforms:
|
|
4781
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4782
|
-
|
|
4783
|
-
Examples:
|
|
4784
|
-
>>> import numpy as np
|
|
4785
|
-
>>> from mindspore import Tensor, ops
|
|
4786
|
-
>>> shape = (2, 3)
|
|
4787
|
-
>>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
|
|
4788
|
-
>>> output = ops.broadcast_to(x, shape)
|
|
4789
|
-
>>> print(output)
|
|
4790
|
-
[[1. 2. 3.]
|
|
4791
|
-
[1. 2. 3.]]
|
|
4792
|
-
>>> shape = (-1, 2)
|
|
4793
|
-
>>> x = Tensor(np.array([[1], [2]]).astype(np.float32))
|
|
4794
|
-
>>> output = ops.broadcast_to(x, shape)
|
|
4795
|
-
>>> print(output)
|
|
4796
|
-
[[1. 1.]
|
|
4797
|
-
[2. 2.]]
|
|
4798
|
-
"""
|
|
4799
|
-
if isinstance(shape, Tensor) or ops.is_sequence_value_unknown(shape):
|
|
4800
|
-
_dyn_broadcast_to = _get_cache_prim(DynamicBroadcastTo)()
|
|
4801
|
-
return _dyn_broadcast_to(input, shape)
|
|
4802
|
-
_broadcast_to = _get_cache_prim(P.BroadcastTo)(shape)
|
|
4803
|
-
return _broadcast_to(input)
|
|
4804
|
-
|
|
4805
|
-
|
|
4806
3911
|
def unsorted_segment_min(x, segment_ids, num_segments):
|
|
4807
3912
|
r"""
|
|
4808
3913
|
Computes the minimum of a tensor along segments.
|
|
@@ -4826,14 +3931,13 @@ def unsorted_segment_min(x, segment_ids, num_segments):
|
|
|
4826
3931
|
x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
|
|
4827
3932
|
segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
|
|
4828
3933
|
Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
|
|
4829
|
-
num_segments (int):
|
|
3934
|
+
num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
|
|
4830
3935
|
|
|
4831
3936
|
Returns:
|
|
4832
|
-
Tensor,
|
|
3937
|
+
Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
|
|
4833
3938
|
|
|
4834
3939
|
Raises:
|
|
4835
3940
|
TypeError: If `num_segments` is not an int.
|
|
4836
|
-
ValueError: If length of shape of `segment_ids` is not equal to 1.
|
|
4837
3941
|
|
|
4838
3942
|
Supported Platforms:
|
|
4839
3943
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4850,7 +3954,6 @@ def unsorted_segment_min(x, segment_ids, num_segments):
|
|
|
4850
3954
|
[[1. 2. 3.]
|
|
4851
3955
|
[4. 2. 1.]]
|
|
4852
3956
|
"""
|
|
4853
|
-
unsorted_segment_min_ = P.UnsortedSegmentMin()
|
|
4854
3957
|
return unsorted_segment_min_(x, segment_ids, num_segments)
|
|
4855
3958
|
|
|
4856
3959
|
|
|
@@ -4877,14 +3980,13 @@ def unsorted_segment_max(x, segment_ids, num_segments):
|
|
|
4877
3980
|
x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
|
|
4878
3981
|
segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
|
|
4879
3982
|
Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
|
|
4880
|
-
num_segments (int):
|
|
3983
|
+
num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
|
|
4881
3984
|
|
|
4882
3985
|
Returns:
|
|
4883
|
-
Tensor,
|
|
3986
|
+
Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
|
|
4884
3987
|
|
|
4885
3988
|
Raises:
|
|
4886
3989
|
TypeError: If `num_segments` is not an int.
|
|
4887
|
-
ValueError: If length of shape of `segment_ids` is not equal to 1.
|
|
4888
3990
|
|
|
4889
3991
|
Supported Platforms:
|
|
4890
3992
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4901,7 +4003,6 @@ def unsorted_segment_max(x, segment_ids, num_segments):
|
|
|
4901
4003
|
[[1. 2. 3.]
|
|
4902
4004
|
[4. 5. 6.]]
|
|
4903
4005
|
"""
|
|
4904
|
-
unsorted_segment_max_ = P.UnsortedSegmentMax()
|
|
4905
4006
|
return unsorted_segment_max_(x, segment_ids, num_segments)
|
|
4906
4007
|
|
|
4907
4008
|
|
|
@@ -4919,16 +4020,15 @@ def unsorted_segment_prod(x, segment_ids, num_segments):
|
|
|
4919
4020
|
|
|
4920
4021
|
Args:
|
|
4921
4022
|
x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
|
|
4922
|
-
segment_ids (Tensor):
|
|
4923
|
-
|
|
4924
|
-
num_segments (int):
|
|
4023
|
+
segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
|
|
4024
|
+
Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R. The data type must be int32.
|
|
4025
|
+
num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
|
|
4925
4026
|
|
|
4926
4027
|
Returns:
|
|
4927
|
-
Tensor,
|
|
4028
|
+
Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
|
|
4928
4029
|
|
|
4929
4030
|
Raises:
|
|
4930
4031
|
TypeError: If `num_segments` is not an int.
|
|
4931
|
-
ValueError: If length of shape of `segment_ids` is not equal to 1.
|
|
4932
4032
|
|
|
4933
4033
|
Supported Platforms:
|
|
4934
4034
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4945,7 +4045,6 @@ def unsorted_segment_prod(x, segment_ids, num_segments):
|
|
|
4945
4045
|
[[4. 4. 3.]
|
|
4946
4046
|
[4. 5. 6.]]
|
|
4947
4047
|
"""
|
|
4948
|
-
unsorted_segment_prod_ = P.UnsortedSegmentProd()
|
|
4949
4048
|
return unsorted_segment_prod_(x, segment_ids, num_segments)
|
|
4950
4049
|
|
|
4951
4050
|
|
|
@@ -5157,33 +4256,6 @@ def is_nonzero(input):
|
|
|
5157
4256
|
return bool(out)
|
|
5158
4257
|
|
|
5159
4258
|
|
|
5160
|
-
def scalar_cast(input_x, input_y):
|
|
5161
|
-
"""
|
|
5162
|
-
Casts the input scalar to another type.
|
|
5163
|
-
|
|
5164
|
-
Args:
|
|
5165
|
-
input_x (scalar): The input scalar. Only constant value is allowed.
|
|
5166
|
-
input_y (mindspore.dtype): The type to be cast. Only constant value is allowed.
|
|
5167
|
-
|
|
5168
|
-
Returns:
|
|
5169
|
-
Scalar. The type is the same as the python type corresponding to `input_y`.
|
|
5170
|
-
|
|
5171
|
-
Raises:
|
|
5172
|
-
TypeError: If neither `input_x` nor `input_y` is a constant value.
|
|
5173
|
-
|
|
5174
|
-
Supported Platforms:
|
|
5175
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
5176
|
-
|
|
5177
|
-
Examples:
|
|
5178
|
-
>>> import mindspore
|
|
5179
|
-
>>> from mindspore import ops
|
|
5180
|
-
>>> output = ops.scalar_cast(255.0, mindspore.int32)
|
|
5181
|
-
>>> print(output)
|
|
5182
|
-
255
|
|
5183
|
-
"""
|
|
5184
|
-
return scalar_cast_(input_x, input_y)
|
|
5185
|
-
|
|
5186
|
-
|
|
5187
4259
|
def tensor_scatter_mul(input_x, indices, updates):
|
|
5188
4260
|
r"""
|
|
5189
4261
|
Creates a new tensor by multiplying the values from the positions in `input_x` indicated by
|
|
@@ -5193,10 +4265,10 @@ def tensor_scatter_mul(input_x, indices, updates):
|
|
|
5193
4265
|
|
|
5194
4266
|
The last axis of `indices` is the depth of each index vectors. For each index vector,
|
|
5195
4267
|
there must be a corresponding value in `updates`. The shape of `updates` should be
|
|
5196
|
-
equal to the shape of `input_x[indices]`. For more details, see
|
|
4268
|
+
equal to the shape of `input_x[indices]`. For more details, see Examples.
|
|
5197
4269
|
|
|
5198
4270
|
.. math::
|
|
5199
|
-
output[indices] = input\_x
|
|
4271
|
+
output\left [indices \right ] = input\_x\times update
|
|
5200
4272
|
|
|
5201
4273
|
Note:
|
|
5202
4274
|
- If some values of the `indices` are out of bound, instead of raising an index error,
|
|
@@ -5253,7 +4325,7 @@ def tensor_scatter_div(input_x, indices, updates):
|
|
|
5253
4325
|
|
|
5254
4326
|
The last axis of `indices` is the depth of each index vectors. For each index vector,
|
|
5255
4327
|
there must be a corresponding value in `updates`. The shape of `updates` should be
|
|
5256
|
-
equal to the shape of `input_x[indices]`. For more details, see
|
|
4328
|
+
equal to the shape of `input_x[indices]`. For more details, see Examples.
|
|
5257
4329
|
|
|
5258
4330
|
.. math::
|
|
5259
4331
|
output\left [indices \right ] = input\_x \div update
|
|
@@ -5395,8 +4467,8 @@ def masked_select(input, mask):
|
|
|
5395
4467
|
|
|
5396
4468
|
Examples:
|
|
5397
4469
|
>>> import numpy as np
|
|
5398
|
-
>>> import mindspore
|
|
5399
|
-
>>> from mindspore import Tensor
|
|
4470
|
+
>>> import mindspore
|
|
4471
|
+
>>> from mindspore import Tensor, ops
|
|
5400
4472
|
>>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
|
|
5401
4473
|
>>> mask = Tensor(np.array([1, 0, 1, 0]), mindspore.bool_)
|
|
5402
4474
|
>>> output = ops.masked_select(x, mask)
|
|
@@ -5406,83 +4478,6 @@ def masked_select(input, mask):
|
|
|
5406
4478
|
return masked_select_(input, mask)
|
|
5407
4479
|
|
|
5408
4480
|
|
|
5409
|
-
def masked_fill(input_x, mask, value):
|
|
5410
|
-
"""
|
|
5411
|
-
Fills elements of Tensor with value where mask is True.
|
|
5412
|
-
The shapes of `input_x` and `mask` need to be the same or broadcastable.
|
|
5413
|
-
|
|
5414
|
-
Args:
|
|
5415
|
-
input_x (Tensor): The source Tensor whose data type is one of bool, uint8, int8, int16, int32,
|
|
5416
|
-
int64, float16, float32, float64, complex64, complex128.
|
|
5417
|
-
mask (Tensor[bool]): The boolean mask.
|
|
5418
|
-
value (Union[float, Tensor]): The value to fill in with, which dtype is the same as `input_x`.
|
|
5419
|
-
|
|
5420
|
-
Returns:
|
|
5421
|
-
Tensor, has the same type and shape as `input_x`.
|
|
5422
|
-
|
|
5423
|
-
Raises:
|
|
5424
|
-
TypeError: If dtype of `mask` is not bool.
|
|
5425
|
-
TypeError: If `input_x` or `mask` is not a Tensor.
|
|
5426
|
-
ValueError: If the shapes of `input_x` and `mask` could not be broadcast.
|
|
5427
|
-
TypeError: If dtype of `input_x` or `value` is not one of bool, uint8, int8, int16, int32,
|
|
5428
|
-
int64, float16, float32, float64, complex64, complex128.
|
|
5429
|
-
TypeError: If dtype of `value` is different from that of `input_x`.
|
|
5430
|
-
TypeError: If `value` is neither float number nor Tensor.
|
|
5431
|
-
|
|
5432
|
-
Supported Platforms:
|
|
5433
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
5434
|
-
|
|
5435
|
-
Examples:
|
|
5436
|
-
>>> import mindspore
|
|
5437
|
-
>>> import numpy as np
|
|
5438
|
-
>>> from mindspore import Tensor, ops
|
|
5439
|
-
>>> input_x = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
|
|
5440
|
-
>>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)
|
|
5441
|
-
>>> output = ops.masked_fill(input_x, mask, 0.5)
|
|
5442
|
-
>>> print(output)
|
|
5443
|
-
[0.5 0.5 3. 0.5]
|
|
5444
|
-
"""
|
|
5445
|
-
if isinstance(value, (float, int)) and isinstance(input_x, Tensor):
|
|
5446
|
-
value = scalar_to_tensor_(value, input_x.dtype)
|
|
5447
|
-
masked_fill_ = _get_cache_prim(P.MaskedFill)()
|
|
5448
|
-
return masked_fill_(input_x, mask, value)
|
|
5449
|
-
|
|
5450
|
-
|
|
5451
|
-
def diag(input):
|
|
5452
|
-
r"""
|
|
5453
|
-
Constructs a diagonal tensor with a given diagonal values.
|
|
5454
|
-
|
|
5455
|
-
Assume `input` has dimensions :math:`(D_1,... D_k)` , the output is a tensor of
|
|
5456
|
-
rank 2k with dimensions :math:`(D_1,..., D_k, D_1,..., D_k)` where:
|
|
5457
|
-
:math:`output[i_1,..., i_k, i_1,..., i_k] = input[i_1,..., i_k]` and 0 everywhere else.
|
|
5458
|
-
|
|
5459
|
-
Args:
|
|
5460
|
-
input (Tensor): The input tensor.
|
|
5461
|
-
|
|
5462
|
-
Returns:
|
|
5463
|
-
Tensor, has the same dtype as the `input`.
|
|
5464
|
-
|
|
5465
|
-
Raises:
|
|
5466
|
-
TypeError: If `input` is not a Tensor.
|
|
5467
|
-
ValueError: If rank of `input` is less than 1.
|
|
5468
|
-
|
|
5469
|
-
Supported Platforms:
|
|
5470
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
5471
|
-
|
|
5472
|
-
Examples:
|
|
5473
|
-
>>> from mindspore import Tensor
|
|
5474
|
-
>>> import mindspore.ops as ops
|
|
5475
|
-
>>> input_x = Tensor([1, 2, 3, 4]).astype('int32')
|
|
5476
|
-
>>> output = ops.diag(input_x)
|
|
5477
|
-
>>> print(output)
|
|
5478
|
-
[[1 0 0 0]
|
|
5479
|
-
[0 2 0 0]
|
|
5480
|
-
[0 0 3 0]
|
|
5481
|
-
[0 0 0 4]]
|
|
5482
|
-
"""
|
|
5483
|
-
return diag_(input)
|
|
5484
|
-
|
|
5485
|
-
|
|
5486
4481
|
def diagflat(input, offset=0):
|
|
5487
4482
|
r"""
|
|
5488
4483
|
Create a 2-D Tensor which diagonal is the flattened `input` .
|
|
@@ -5541,7 +4536,7 @@ def col2im(input_x, output_size, kernel_size, dilation, padding_value, stride):
|
|
|
5541
4536
|
Combines an array of sliding local blocks into a large containing tensor.
|
|
5542
4537
|
|
|
5543
4538
|
Args:
|
|
5544
|
-
input_x (Tensor): 4D tensor with data type float16 or
|
|
4539
|
+
input_x (Tensor): 4D tensor with data type float16 or float32.
|
|
5545
4540
|
output_size (Tensor): 1D tensor with 2 elements of data type int.
|
|
5546
4541
|
kernel_size (Union[int, tuple[int], list[int]]): The size of the kernel, should be two int
|
|
5547
4542
|
for height and width. If type is int, it means that height equal with width. Must be specified.
|
|
@@ -5597,7 +4592,7 @@ def _split_int(x, split_size_or_sections, axis):
|
|
|
5597
4592
|
num_sections = length_along_dim // split_size_or_sections
|
|
5598
4593
|
length1 = num_sections * split_size_or_sections
|
|
5599
4594
|
length2 = length_along_dim - length1
|
|
5600
|
-
start1 = _list_comprehensions(
|
|
4595
|
+
start1 = _list_comprehensions(rank_(x), 0, True)
|
|
5601
4596
|
size1 = _tuple_setitem(arr_shape, axis, length1)
|
|
5602
4597
|
start2 = _tuple_setitem(start1, axis, length1)
|
|
5603
4598
|
size2 = _tuple_setitem(arr_shape, axis, length2)
|
|
@@ -5649,9 +4644,9 @@ def split(tensor, split_size_or_sections, axis=0):
|
|
|
5649
4644
|
TypeError: If argument `tensor` is not Tensor.
|
|
5650
4645
|
TypeError: If argument `axis` is not Tensor.
|
|
5651
4646
|
ValueError: If argument `axis` is out of range of :math:`[-tensor.ndim, tensor.ndim)` .
|
|
5652
|
-
TypeError: If each element in
|
|
5653
|
-
TypeError: If argument `
|
|
5654
|
-
ValueError: The sum of
|
|
4647
|
+
TypeError: If each element in `split_size_or_sections` is not integer.
|
|
4648
|
+
TypeError: If argument `split_size_or_sections` is not int, tuple(int) or list(int).
|
|
4649
|
+
ValueError: The sum of `split_size_or_sections` is not equal to x.shape[axis].
|
|
5655
4650
|
|
|
5656
4651
|
Supported Platforms:
|
|
5657
4652
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -5917,24 +4912,24 @@ def _tensor_split_sub_int(x, indices_or_sections, axis):
|
|
|
5917
4912
|
arr_shape = x.shape
|
|
5918
4913
|
length_along_dim = arr_shape[axis]
|
|
5919
4914
|
if indices_or_sections > length_along_dim:
|
|
5920
|
-
res = P.Split(axis, length_along_dim)(x)
|
|
4915
|
+
res = _get_cache_prim(P.Split)(axis, length_along_dim)(x)
|
|
5921
4916
|
indices_or_sections_n = [length_along_dim, length_along_dim + 1]
|
|
5922
4917
|
res2 = _tensor_split_sub_tensors(x, indices_or_sections_n, axis)
|
|
5923
4918
|
for _ in np.arange(length_along_dim, indices_or_sections):
|
|
5924
4919
|
res += tuple(res2)[1:]
|
|
5925
4920
|
elif length_along_dim % indices_or_sections == 0:
|
|
5926
|
-
res = P.Split(axis, indices_or_sections)(x)
|
|
4921
|
+
res = _get_cache_prim(P.Split)(axis, indices_or_sections)(x)
|
|
5927
4922
|
else:
|
|
5928
4923
|
num_long_tensor = length_along_dim % indices_or_sections
|
|
5929
4924
|
num_short_tensor = indices_or_sections - num_long_tensor
|
|
5930
4925
|
length1 = num_long_tensor * (length_along_dim // indices_or_sections + 1)
|
|
5931
4926
|
length2 = length_along_dim - length1
|
|
5932
|
-
start1 = _list_comprehensions(
|
|
4927
|
+
start1 = _list_comprehensions(rank_(x), 0, True)
|
|
5933
4928
|
size1 = _tuple_setitem(arr_shape, axis, length1)
|
|
5934
4929
|
start2 = _tuple_setitem(start1, axis, length1)
|
|
5935
4930
|
size2 = _tuple_setitem(arr_shape, axis, length2)
|
|
5936
|
-
res = P.Split(axis, num_long_tensor)(tensor_slice(x, start1, size1)) + \
|
|
5937
|
-
P.Split(axis, num_short_tensor)(tensor_slice(x, start2, size2))
|
|
4931
|
+
res = _get_cache_prim(P.Split)(axis, num_long_tensor)(tensor_slice(x, start1, size1)) + \
|
|
4932
|
+
_get_cache_prim(P.Split)(axis, num_short_tensor)(tensor_slice(x, start2, size2))
|
|
5938
4933
|
return res
|
|
5939
4934
|
|
|
5940
4935
|
|
|
@@ -5948,11 +4943,11 @@ def tensor_split(input, indices_or_sections, axis=0):
|
|
|
5948
4943
|
|
|
5949
4944
|
- If `indices_or_sections` is an integer n, input tensor will be split into n sections.
|
|
5950
4945
|
|
|
5951
|
-
- If :math:`input.shape
|
|
5952
|
-
:math:`input.shape
|
|
5953
|
-
- If :math:`input.shape
|
|
5954
|
-
will have size :math:`input.shape
|
|
5955
|
-
size :math:`input.shape
|
|
4946
|
+
- If :math:`input.shape[axis]` can be divisible by n, sub-sections will have equal size
|
|
4947
|
+
:math:`input.shape[axis] / n` .
|
|
4948
|
+
- If :math:`input.shape[axis]` is not divisible by n, the first :math:`input.shape[axis] \bmod n` sections
|
|
4949
|
+
will have size :math:`input.shape[axis] // n + 1` , and the rest will have
|
|
4950
|
+
size :math:`input.shape[axis] // n` .
|
|
5956
4951
|
- If `indices_or_sections` is of type tuple(int) or list(int), the input tensor will be split at the
|
|
5957
4952
|
indices in the list or tuple. For example, given parameters :math:`indices\_or\_sections=[1, 4]`
|
|
5958
4953
|
and :math:`axis=0` , the input tensor will be split into sections :math:`input[:1]` ,
|
|
@@ -6165,7 +5160,7 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
|
|
|
6165
5160
|
tensor.
|
|
6166
5161
|
|
|
6167
5162
|
- values (Tensor) - The maximum value of input tensor, with the same shape as index, and same dtype as x.
|
|
6168
|
-
- index (Tensor) - The index for the maximum value of the input tensor, with dtype
|
|
5163
|
+
- index (Tensor) - The index for the maximum value of the input tensor, with dtype int64. If `keepdims`
|
|
6169
5164
|
is true, the shape of output tensors is :math:`(input_1, input_2, ..., input_{axis-1}, 1, input_{axis+1},
|
|
6170
5165
|
..., input_N)` . Otherwise, the shape is :math:`(input_1, input_2, ..., input_{axis-1}, input_{axis+1},
|
|
6171
5166
|
..., input_N)` .
|
|
@@ -6194,16 +5189,15 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
|
|
|
6194
5189
|
[[3.2 0.4 0.4 2.9 4. ]] [[1 1 0 1 1]]
|
|
6195
5190
|
"""
|
|
6196
5191
|
if not input.shape:
|
|
6197
|
-
return (input, Tensor(0, dtype=mstype.
|
|
5192
|
+
return (input, Tensor(0, dtype=mstype.int64))
|
|
6198
5193
|
if axis is None:
|
|
6199
|
-
|
|
6200
|
-
return (reduce_max_op(input), Tensor(0, dtype=mstype.int32))
|
|
5194
|
+
return (reduce_max_(input), Tensor(0, dtype=mstype.int64))
|
|
6201
5195
|
if initial is not None and not isinstance(initial, numbers.Number):
|
|
6202
5196
|
raise TypeError(f"For 'max', 'initial' must be a scalar, but got {type(initial)}")
|
|
6203
5197
|
if axis is not None and not isinstance(axis, int):
|
|
6204
5198
|
raise TypeError(f"For 'max', 'axis' must be int, but got {type(axis)}")
|
|
6205
5199
|
input = _init_and_select_elem(input, initial, where, ops.maximum)
|
|
6206
|
-
argmax_with_value_op = ArgMaxWithValue(axis, keepdims)
|
|
5200
|
+
argmax_with_value_op = _get_cache_prim(ArgMaxWithValue)(axis, keepdims)
|
|
6207
5201
|
indices, values = argmax_with_value_op(input)
|
|
6208
5202
|
return values, indices
|
|
6209
5203
|
|
|
@@ -6249,7 +5243,7 @@ def argmax(input, dim=None, keepdim=False):
|
|
|
6249
5243
|
is_dim_none = True
|
|
6250
5244
|
out = _get_cache_prim(Argmax)(dim, mstype.int64)(input)
|
|
6251
5245
|
if keepdim and not is_dim_none:
|
|
6252
|
-
out =
|
|
5246
|
+
out = expand_dims(out, dim)
|
|
6253
5247
|
return out
|
|
6254
5248
|
|
|
6255
5249
|
|
|
@@ -6311,16 +5305,16 @@ def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
|
|
|
6311
5305
|
0.0 0
|
|
6312
5306
|
"""
|
|
6313
5307
|
if not input.shape:
|
|
6314
|
-
return (input, Tensor(0, dtype=mstype.
|
|
5308
|
+
return (input, Tensor(0, dtype=mstype.int64))
|
|
6315
5309
|
if axis is None:
|
|
6316
|
-
return (
|
|
5310
|
+
return (reduce_min_(input), Tensor(0, dtype=mstype.int64))
|
|
6317
5311
|
if initial is not None and not isinstance(initial, numbers.Number):
|
|
6318
5312
|
raise TypeError(f"For 'min', 'initial' must be a scalar, but got {type(initial)}")
|
|
6319
5313
|
if axis is not None and not isinstance(axis, int):
|
|
6320
5314
|
raise TypeError(f"For 'min', 'axis' must be int, but got {type(axis)}")
|
|
6321
5315
|
input = _init_and_select_elem(input, initial, where, ops.minimum)
|
|
6322
|
-
|
|
6323
|
-
indices, values =
|
|
5316
|
+
argmin_with_value_op = _get_cache_prim(ArgMinWithValue)(axis, keepdims)
|
|
5317
|
+
indices, values = argmin_with_value_op(input)
|
|
6324
5318
|
return values, indices
|
|
6325
5319
|
|
|
6326
5320
|
|
|
@@ -6378,8 +5372,8 @@ def aminmax(input, *, axis=0, keepdims=False):
|
|
|
6378
5372
|
output0 = ops.reshape(output0, [1] * input.ndim)
|
|
6379
5373
|
output1 = ops.reshape(output1, [1] * input.ndim)
|
|
6380
5374
|
return output0, output1
|
|
6381
|
-
argmin_with_value_op =
|
|
6382
|
-
argmax_with_value_op =
|
|
5375
|
+
argmin_with_value_op = _get_cache_prim(ArgMinWithValue)(axis, keepdims)
|
|
5376
|
+
argmax_with_value_op = _get_cache_prim(ArgMaxWithValue)(axis, keepdims)
|
|
6383
5377
|
_, output0 = argmin_with_value_op(input)
|
|
6384
5378
|
_, output1 = argmax_with_value_op(input)
|
|
6385
5379
|
if keepdims is True and input.ndim == 0:
|
|
@@ -6434,66 +5428,7 @@ def narrow(input, axis, start, length):
|
|
|
6434
5428
|
begins[axis] = start
|
|
6435
5429
|
sizes = list(input.shape)
|
|
6436
5430
|
sizes[axis] = length
|
|
6437
|
-
return
|
|
6438
|
-
|
|
6439
|
-
|
|
6440
|
-
def unsorted_segment_sum(input_x, segment_ids, num_segments):
|
|
6441
|
-
r"""
|
|
6442
|
-
Computes the sum of a tensor along segments.
|
|
6443
|
-
|
|
6444
|
-
Calculates a tensor such that :math:`\text{output}[i] = \sum_{segment\_ids[j] == i} \text{data}[j, \ldots]`, where
|
|
6445
|
-
:math:`j,...` is a tuple describing the index of element in data.
|
|
6446
|
-
`segment_ids` selects which elements in data to sum
|
|
6447
|
-
up. Segment_ids does not need to be sorted, and it does not need to cover all values in the entire valid value
|
|
6448
|
-
range.
|
|
6449
|
-
|
|
6450
|
-
The following figure shows the calculation process of unsorted_segment_sum:
|
|
6451
|
-
|
|
6452
|
-
.. image:: UnsortedSegmentSum.png
|
|
6453
|
-
|
|
6454
|
-
Note:
|
|
6455
|
-
- If the segment_id i is absent in the segment_ids, then output[i] will be filled with 0.
|
|
6456
|
-
- On Ascend, if the value of segment_id is less than 0 or greater than the length of the input data shape, an
|
|
6457
|
-
execution error will occur.
|
|
6458
|
-
|
|
6459
|
-
If the sum of the given segment_ids :math:`i` is empty, then :math:`\text{output}[i] = 0`. If the given segment_ids
|
|
6460
|
-
is negative, the value will be ignored. 'num_segments' must be equal to the number of different segment_ids.
|
|
6461
|
-
|
|
6462
|
-
Args:
|
|
6463
|
-
input_x (Tensor): Input Tensor contains the data to be summed.
|
|
6464
|
-
The shape is :math:`(x_1, x_2, ..., x_R)`.
|
|
6465
|
-
segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
|
|
6466
|
-
Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
|
|
6467
|
-
num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
|
|
6468
|
-
|
|
6469
|
-
Returns:
|
|
6470
|
-
Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
|
|
6471
|
-
|
|
6472
|
-
Raises:
|
|
6473
|
-
TypeError: If `num_segments` is not an int or 0-D Tensor.
|
|
6474
|
-
ValueError: If length of shape of `segment_ids` is less than 1.
|
|
6475
|
-
|
|
6476
|
-
Supported Platforms:
|
|
6477
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
6478
|
-
|
|
6479
|
-
Examples:
|
|
6480
|
-
>>> from mindspore import Tensor
|
|
6481
|
-
>>> from mindspore import ops
|
|
6482
|
-
>>> import mindspore
|
|
6483
|
-
>>> input_x = Tensor([1, 2, 3, 4], mindspore.float32)
|
|
6484
|
-
>>> segment_ids = Tensor([0, 0, 1, 2], mindspore.int32)
|
|
6485
|
-
>>> num_segments = 4
|
|
6486
|
-
>>> output = ops.unsorted_segment_sum(input_x, segment_ids, num_segments)
|
|
6487
|
-
>>> print(output)
|
|
6488
|
-
[3. 3. 4. 0.]
|
|
6489
|
-
>>> input_x = Tensor([1, 2, 3, 4, 2, 5], mindspore.float32)
|
|
6490
|
-
>>> segment_ids = Tensor([0, 0, 1, 2, 3, 4], mindspore.int32)
|
|
6491
|
-
>>> num_segments = 6
|
|
6492
|
-
>>> output = ops.unsorted_segment_sum(input_x, segment_ids, num_segments)
|
|
6493
|
-
>>> print(output)
|
|
6494
|
-
[3. 3. 4. 2. 5. 0.]
|
|
6495
|
-
"""
|
|
6496
|
-
return unsorted_segment_sum_(input_x, segment_ids, num_segments)
|
|
5431
|
+
return tensor_slice(input, begins, sizes)
|
|
6497
5432
|
|
|
6498
5433
|
|
|
6499
5434
|
def topk(input, k, dim=None, largest=True, sorted=True):
|
|
@@ -6727,9 +5662,7 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
6727
5662
|
.. warning::
|
|
6728
5663
|
- The output is a 3-dimensional Tensor whose shape is
|
|
6729
5664
|
:math:`(N, C \times \prod(\text{kernel_size}), L)` .
|
|
6730
|
-
|
|
6731
|
-
.. warning::
|
|
6732
|
-
This is an experimental API that is subject to change or deletion.
|
|
5665
|
+
- This is an experimental API that is subject to change or deletion.
|
|
6733
5666
|
|
|
6734
5667
|
Args:
|
|
6735
5668
|
input (Tensor): 4-D Tensor, supported dtypes: float16, float32, float64, complex64 and complex128.
|
|
@@ -6738,10 +5671,11 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
6738
5671
|
dilation (Union[int, tuple[int], list[int]], optional): The dilation of the window, should be two int
|
|
6739
5672
|
for height and width. If type is int, it means that height equal with width. Default: ``1`` .
|
|
6740
5673
|
padding (Union[int, tuple[int], list[int]], optional): The pad of the window, that must be
|
|
6741
|
-
a tuple/list of one or two `int` for height and width.
|
|
6742
|
-
|
|
6743
|
-
If
|
|
6744
|
-
|
|
5674
|
+
a tuple/list of one or two `int` for height and width. Default: ``0`` .
|
|
5675
|
+
|
|
5676
|
+
- If one int, pad_height = pad_width.
|
|
5677
|
+
- If two int, pad_height = padding[0], pad_width = padding[1].
|
|
5678
|
+
|
|
6745
5679
|
stride (Union[int, tuple[int], list[int]], optional): The stride of the window, should be two int
|
|
6746
5680
|
for height and width. If type is int, it means that height equal with width. Default: ``1`` .
|
|
6747
5681
|
|
|
@@ -6788,98 +5722,6 @@ def _check_diagonal_axes(dim1, dim2, x_ndim):
|
|
|
6788
5722
|
return axes
|
|
6789
5723
|
|
|
6790
5724
|
|
|
6791
|
-
def diagonal(input, offset=0, dim1=0, dim2=1):
|
|
6792
|
-
"""
|
|
6793
|
-
Returns specified diagonals of `input`.
|
|
6794
|
-
|
|
6795
|
-
If `input` is 2-D, returns the diagonal of `input` with the given offset.
|
|
6796
|
-
If `input` has more than two
|
|
6797
|
-
dimensions, then the axes specified by `dim1` and `dim2` are used to determine
|
|
6798
|
-
the 2-D sub-array whose diagonal is returned. In this case, remove the `dim1` and `dim2` dimensions of `input`
|
|
6799
|
-
and insert the last dimension of `input` by the diagonal elements determined by `dim1` and `dim2`.
|
|
6800
|
-
|
|
6801
|
-
Args:
|
|
6802
|
-
input (Tensor): Array from which the diagonals are taken.
|
|
6803
|
-
offset (int, optional): Offset of the diagonal from the main diagonal.
|
|
6804
|
-
Can be positive or negative. Default: ``0`` .
|
|
6805
|
-
dim1 (int, optional): Axis to be used as the first axis of the 2-D
|
|
6806
|
-
sub-arrays from which the diagonals should be taken. Defaults to
|
|
6807
|
-
first axis (0). Default: ``0`` .
|
|
6808
|
-
dim2 (int, optional): Axis to be used as the second axis of the 2-D
|
|
6809
|
-
sub-arrays from which the diagonals should be taken. Defaults to
|
|
6810
|
-
second axis (1). Default: ``1`` .
|
|
6811
|
-
|
|
6812
|
-
Returns:
|
|
6813
|
-
Tensor, if `input` is 2-D, then `input` 1-D array containing the diagonal. If
|
|
6814
|
-
``input.ndim > 2``, then the dimensions specified by `dim1` and `dim2` are removed,
|
|
6815
|
-
and a new axis inserted at the end corresponding to the diagonal.
|
|
6816
|
-
|
|
6817
|
-
Raises:
|
|
6818
|
-
TypeError: if `dim1` or `dim2` are not an int.
|
|
6819
|
-
ValueError: if the input tensor has less than two dimensions.
|
|
6820
|
-
|
|
6821
|
-
Supported Platforms:
|
|
6822
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
6823
|
-
|
|
6824
|
-
Examples:
|
|
6825
|
-
>>> from mindspore import Tensor, ops
|
|
6826
|
-
>>> from mindspore import dtype as mstype
|
|
6827
|
-
>>> x = Tensor([[0, 1], [2, 3]], mstype.float32)
|
|
6828
|
-
>>> output = ops.diagonal(x)
|
|
6829
|
-
>>> print(output)
|
|
6830
|
-
[0 3]
|
|
6831
|
-
"""
|
|
6832
|
-
x_ndim = input.ndim
|
|
6833
|
-
if x_ndim < 2:
|
|
6834
|
-
raise ValueError(f"For 'ops.diagonal', the original tensor requires at least two dimensions, but got {x_ndim}")
|
|
6835
|
-
_check_attr_dtype("dim1", dim1, [int], "diagonal")
|
|
6836
|
-
_check_attr_dtype("dim2", dim2, [int], "diagonal")
|
|
6837
|
-
dtype = input.dtype
|
|
6838
|
-
|
|
6839
|
-
axes = _check_diagonal_axes(dim1, dim2, x_ndim)
|
|
6840
|
-
perm = ()
|
|
6841
|
-
for i in ms_arrange(x_ndim):
|
|
6842
|
-
if i not in axes:
|
|
6843
|
-
perm += (i,)
|
|
6844
|
-
perm += axes
|
|
6845
|
-
input = input.transpose(perm)
|
|
6846
|
-
|
|
6847
|
-
x_shape = input.shape
|
|
6848
|
-
n, m = x_shape[-2:]
|
|
6849
|
-
|
|
6850
|
-
e = ops.eye(n, m, dtype)
|
|
6851
|
-
if offset >= m or offset <= -n:
|
|
6852
|
-
zero_shape = x_shape[:-2] + (0,)
|
|
6853
|
-
return ops.zeros(zero_shape, dtype)
|
|
6854
|
-
if offset != 0:
|
|
6855
|
-
e = e.astype(mstype.float32)
|
|
6856
|
-
if offset > 0:
|
|
6857
|
-
e_left = ops.fill(mstype.float32, (n, offset), 0)
|
|
6858
|
-
e_right = e[..., 0:m - offset:1]
|
|
6859
|
-
e = ops.cat((e_left, e_right), 1).astype(dtype)
|
|
6860
|
-
elif offset < 0:
|
|
6861
|
-
e_upper = ops.fill(mstype.float32, (-offset, m), 0)
|
|
6862
|
-
e_lower = e[0:n + offset:1, ...]
|
|
6863
|
-
e = ops.cat((e_upper, e_lower), 0).astype(dtype)
|
|
6864
|
-
e = ops.broadcast_to(e, x_shape)
|
|
6865
|
-
|
|
6866
|
-
prod_val = ops.mul(input, e)
|
|
6867
|
-
res = ops.ReduceSum()(prod_val.astype(mstype.float32), -1)
|
|
6868
|
-
|
|
6869
|
-
begin = ()
|
|
6870
|
-
for _ in ms_arrange(x_ndim - 2):
|
|
6871
|
-
begin += (0,)
|
|
6872
|
-
last_dim_begin = builtins.max(0, -offset)
|
|
6873
|
-
begin += (last_dim_begin,)
|
|
6874
|
-
res_size = res.shape[:-1]
|
|
6875
|
-
last_dim_end = builtins.min(x_shape[-2], builtins.max(0, x_shape[-1] - offset)) - last_dim_begin
|
|
6876
|
-
if last_dim_end <= 0:
|
|
6877
|
-
return Tensor([])
|
|
6878
|
-
res_size += (last_dim_end,)
|
|
6879
|
-
res = ops.slice(res, begin, res_size)
|
|
6880
|
-
return res.astype(dtype)
|
|
6881
|
-
|
|
6882
|
-
|
|
6883
5725
|
def _check_is_tensor(param_name, input, cls_name):
|
|
6884
5726
|
"""Returns True if input is Tensor."""
|
|
6885
5727
|
if not isinstance(input, Tensor):
|
|
@@ -6899,6 +5741,9 @@ def diagonal_scatter(input, src, offset=0, dim1=0, dim2=1):
|
|
|
6899
5741
|
the elements in these two dimensions will be treated as elements of a matrix,
|
|
6900
5742
|
and `src` is embedded on the diagonal of the matrix.
|
|
6901
5743
|
|
|
5744
|
+
Note:
|
|
5745
|
+
Currently, ``inf`` value of elements in `input` or `src` is not supported.
|
|
5746
|
+
|
|
6902
5747
|
Args:
|
|
6903
5748
|
input (Tensor): Input Tensor, whose dimension is larger than 1.
|
|
6904
5749
|
src (Tensor): The source Tensor to embed.
|
|
@@ -6935,16 +5780,39 @@ def diagonal_scatter(input, src, offset=0, dim1=0, dim2=1):
|
|
|
6935
5780
|
"""
|
|
6936
5781
|
_check_is_tensor("input", input, "diagonal_scatter")
|
|
6937
5782
|
_check_is_tensor("src", src, "diagonal_scatter")
|
|
6938
|
-
_check_is_int(offset, "offset", "diagonal_scatter")
|
|
6939
|
-
_check_is_int(dim1, "dim1", "diagonal_scatter")
|
|
6940
|
-
_check_is_int(dim2, "dim2", "diagonal_scatter")
|
|
6941
5783
|
input_diag = input.diagonal(offset, dim1, dim2)
|
|
6942
5784
|
_check_diagonal_scatter_shape(input_diag.shape, src.shape)
|
|
6943
|
-
|
|
6944
|
-
|
|
6945
|
-
|
|
5785
|
+
input_shape = input.shape
|
|
5786
|
+
zeros_shape = list(input_shape)
|
|
5787
|
+
m, n = input_shape[dim1], input_shape[dim2]
|
|
5788
|
+
if m == n:
|
|
5789
|
+
src = src - input_diag
|
|
5790
|
+
src = ops.diag_embed(src, offset, dim1, dim2)
|
|
5791
|
+
return input + src
|
|
5792
|
+
if m > n:
|
|
5793
|
+
axis = dim2
|
|
5794
|
+
zeros_shape[axis] = m - n
|
|
5795
|
+
else:
|
|
5796
|
+
axis = dim1
|
|
5797
|
+
zeros_shape[axis] = n - m
|
|
5798
|
+
zeros_tensor = zeros(zeros_shape, dtype=input.dtype)
|
|
5799
|
+
input = concat((input, zeros_tensor), axis)
|
|
5800
|
+
input_diag = input.diagonal(offset, dim1, dim2)
|
|
5801
|
+
if src.shape != input_diag.shape:
|
|
5802
|
+
zeros_shape = []
|
|
5803
|
+
for i, ax in enumerate(src.shape):
|
|
5804
|
+
if ax == input_diag.shape[i]:
|
|
5805
|
+
zeros_shape.append(ax)
|
|
5806
|
+
else:
|
|
5807
|
+
axis = i
|
|
5808
|
+
zeros_shape.append(input_diag.shape[i] - ax)
|
|
5809
|
+
zeros_tensor = zeros(zeros_shape, dtype=src.dtype)
|
|
5810
|
+
src = concat((src, zeros_tensor), axis)
|
|
5811
|
+
src = src - input_diag
|
|
6946
5812
|
src = ops.diag_embed(src, offset, dim1, dim2)
|
|
6947
|
-
|
|
5813
|
+
input = input + src
|
|
5814
|
+
begin = (0,) * input.ndim
|
|
5815
|
+
return slice(input, begin, input_shape)
|
|
6948
5816
|
|
|
6949
5817
|
|
|
6950
5818
|
def lstsq(input, A):
|
|
@@ -7003,8 +5871,7 @@ def lstsq(input, A):
|
|
|
7003
5871
|
[-6.5000005 -4.500001 ]
|
|
7004
5872
|
[-3.500002 -2.5000017]]
|
|
7005
5873
|
"""
|
|
7006
|
-
|
|
7007
|
-
return lstsq_op(input, A)
|
|
5874
|
+
return lstsq_(input, A)
|
|
7008
5875
|
|
|
7009
5876
|
|
|
7010
5877
|
def mvlgamma(input, p):
|
|
@@ -7079,7 +5946,7 @@ def argwhere(input):
|
|
|
7079
5946
|
[[0 0 0]
|
|
7080
5947
|
[0 1 0]]
|
|
7081
5948
|
"""
|
|
7082
|
-
return
|
|
5949
|
+
return nonzero(input)
|
|
7083
5950
|
|
|
7084
5951
|
|
|
7085
5952
|
def column_stack(tensors):
|
|
@@ -7116,14 +5983,13 @@ def column_stack(tensors):
|
|
|
7116
5983
|
raise TypeError(f"For column_stack, the input must be list or tuple of tensors, but got {type(tensors)}.")
|
|
7117
5984
|
|
|
7118
5985
|
trans_x = ()
|
|
7119
|
-
_expand_dims = _get_cache_prim(P.ExpandDims)()
|
|
7120
5986
|
for tensor in tensors:
|
|
7121
5987
|
if not isinstance(tensor, Tensor):
|
|
7122
5988
|
raise TypeError(f"For column_stack, the input element must be tensor, but got {type(tensor)}.")
|
|
7123
5989
|
if tensor.ndim < 1:
|
|
7124
|
-
tensor =
|
|
5990
|
+
tensor = expand_dims(tensor, 0)
|
|
7125
5991
|
if tensor.ndim == 1:
|
|
7126
|
-
tensor =
|
|
5992
|
+
tensor = expand_dims(tensor, 1)
|
|
7127
5993
|
trans_x += (tensor,)
|
|
7128
5994
|
if not trans_x:
|
|
7129
5995
|
raise ValueError(f"For column_stack, the input must have at least 1 tensor, but got 0.")
|
|
@@ -7169,7 +6035,7 @@ def hstack(tensors):
|
|
|
7169
6035
|
if not isinstance(tensor, Tensor):
|
|
7170
6036
|
raise TypeError(f"For hstack, the input element must be tensor, but got {type(tensor)}.")
|
|
7171
6037
|
if tensor.ndim < 1:
|
|
7172
|
-
tensor =
|
|
6038
|
+
tensor = expand_dims(tensor, 0)
|
|
7173
6039
|
tuple_of_tensor += (tensor,)
|
|
7174
6040
|
if not tuple_of_tensor:
|
|
7175
6041
|
raise ValueError("For hstack, the input must have at least 1 tensor, but got 0.")
|
|
@@ -7269,7 +6135,7 @@ def movedim(x, source, destination):
|
|
|
7269
6135
|
f"For `source` and `destination` arguments, the number of elements must be the same, but got 'source':"
|
|
7270
6136
|
f" {len(source)} and 'destination': {len(destination)}.")
|
|
7271
6137
|
perm = _get_moved_perm(ndim, source, destination)
|
|
7272
|
-
return
|
|
6138
|
+
return transpose_(x, perm)
|
|
7273
6139
|
|
|
7274
6140
|
|
|
7275
6141
|
def moveaxis(x, source, destination):
|
|
@@ -7344,7 +6210,7 @@ def swapaxes(input, axis0, axis1):
|
|
|
7344
6210
|
new_perm = perm[0:axis0] + perm[axis1:axis1 + 1] + \
|
|
7345
6211
|
perm[axis0 + 1:axis1] + perm[axis0:axis0 + 1]
|
|
7346
6212
|
|
|
7347
|
-
return
|
|
6213
|
+
return transpose_(input, new_perm)
|
|
7348
6214
|
|
|
7349
6215
|
|
|
7350
6216
|
def swapdims(input, dim0, dim1):
|
|
@@ -7454,7 +6320,7 @@ def repeat_interleave(input, repeats, axis=None):
|
|
|
7454
6320
|
|
|
7455
6321
|
def repeat_elements(x, rep, axis=0):
|
|
7456
6322
|
"""
|
|
7457
|
-
Repeat elements of a tensor along an axis, like `
|
|
6323
|
+
Repeat elements of a tensor along an axis, like `numpy.repeat` .
|
|
7458
6324
|
|
|
7459
6325
|
Args:
|
|
7460
6326
|
x (Tensor): The tensor to repeat values for. Must be of type: float16,
|
|
@@ -7492,34 +6358,19 @@ def repeat_elements(x, rep, axis=0):
|
|
|
7492
6358
|
const_utils.check_type_valid(ops.dtype(x), mstype.number_type, 'input x')
|
|
7493
6359
|
rep = _check_positive_int(rep, "rep", "repeat_elements")
|
|
7494
6360
|
axis = _check_is_int(axis, "axis", "repeat_elements")
|
|
7495
|
-
|
|
7496
|
-
rank_op = P.Rank()
|
|
7497
|
-
tile_op = P.Tile()
|
|
7498
|
-
expand_dims_op = P.ExpandDims()
|
|
7499
|
-
reshape_op = P.Reshape()
|
|
7500
|
-
x_rank = rank_op(x)
|
|
6361
|
+
x_rank = rank_(x)
|
|
7501
6362
|
axis = _check_axis_range(axis, x_rank, "axis", "repeat_elements")
|
|
6363
|
+
axis = axis + x.ndim if axis < 0 else axis
|
|
7502
6364
|
expand_axis = axis + 1
|
|
7503
|
-
x_expand =
|
|
6365
|
+
x_expand = expand_dims(x, expand_axis)
|
|
7504
6366
|
rep_dims = _cal_repeat_dims(x_rank, rep, expand_axis)
|
|
7505
|
-
x_expand =
|
|
7506
|
-
x_shape =
|
|
6367
|
+
x_expand = tile_(x_expand, rep_dims)
|
|
6368
|
+
x_shape = shape_(x)
|
|
7507
6369
|
x_reshape = _cal_reshape(x_shape, rep, axis)
|
|
7508
|
-
x_rep =
|
|
6370
|
+
x_rep = reshape_(x_expand, x_reshape)
|
|
7509
6371
|
return x_rep
|
|
7510
6372
|
|
|
7511
6373
|
|
|
7512
|
-
@_primexpr
|
|
7513
|
-
def _check_sequence_mask_input_len(input_shape, prim_name=None):
|
|
7514
|
-
msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
|
|
7515
|
-
if not input_shape:
|
|
7516
|
-
raise ValueError(f"{msg_prefix} input_shape must be greater than 0, but got {input_shape}.")
|
|
7517
|
-
# broadcast only supports 7d shape
|
|
7518
|
-
shape_size = len(input_shape)
|
|
7519
|
-
if shape_size >= 7:
|
|
7520
|
-
raise ValueError(f"{msg_prefix} dimension of input_shape must be less than 7, but got {shape_size}d.")
|
|
7521
|
-
|
|
7522
|
-
|
|
7523
6374
|
def sequence_mask(lengths, maxlen=None):
|
|
7524
6375
|
"""
|
|
7525
6376
|
Returns a mask tensor representing the first N positions of each cell.
|
|
@@ -7572,29 +6423,19 @@ def sequence_mask(lengths, maxlen=None):
|
|
|
7572
6423
|
[[ True True False False ]
|
|
7573
6424
|
[ True True True True ]]]
|
|
7574
6425
|
"""
|
|
7575
|
-
|
|
7576
|
-
argmax_op = P.ArgMaxWithValue()
|
|
7577
|
-
reshape_op = P.Reshape()
|
|
7578
|
-
range_op = P.Range()
|
|
7579
|
-
expand_op = P.ExpandDims()
|
|
7580
|
-
cast_op = P.Cast()
|
|
7581
|
-
to_tensor_op = P.ScalarToTensor()
|
|
7582
|
-
shape_op = P.Shape()
|
|
7583
|
-
|
|
7584
6426
|
const_utils.check_type_valid(ops.dtype(lengths), [mstype.int64, mstype.int32], 'lengths')
|
|
7585
|
-
_check_sequence_mask_input_len(shape_op(lengths), "sequence_mask")
|
|
7586
6427
|
|
|
7587
6428
|
if maxlen is None:
|
|
7588
|
-
flatten_data =
|
|
7589
|
-
flatten_data =
|
|
7590
|
-
_, value =
|
|
7591
|
-
maxlen =
|
|
6429
|
+
flatten_data = reshape_(lengths, (-1,))
|
|
6430
|
+
flatten_data = cast_(flatten_data, mstype.float32)
|
|
6431
|
+
_, value = arg_max_with_value_(flatten_data)
|
|
6432
|
+
maxlen = cast_(value, mstype.int32)
|
|
7592
6433
|
else:
|
|
7593
6434
|
maxlen = _check_positive_int(maxlen, "maxlen", "sequence_mask")
|
|
7594
|
-
maxlen =
|
|
6435
|
+
maxlen = scalar_to_tensor_(maxlen, mstype.int32)
|
|
7595
6436
|
|
|
7596
|
-
range_vector =
|
|
7597
|
-
mask =
|
|
6437
|
+
range_vector = range_(scalar_to_tensor_(0, mstype.int32), maxlen, scalar_to_tensor_(1, mstype.int32))
|
|
6438
|
+
mask = expand_dims(lengths, -1)
|
|
7598
6439
|
result = range_vector < mask
|
|
7599
6440
|
return result
|
|
7600
6441
|
|
|
@@ -7607,35 +6448,6 @@ def top_k(input_x, k, sorted=True):
|
|
|
7607
6448
|
return top_k_(input_x, k)
|
|
7608
6449
|
|
|
7609
6450
|
|
|
7610
|
-
def deepcopy(input_x):
|
|
7611
|
-
"""
|
|
7612
|
-
Returns a deepcopy of input tensor.
|
|
7613
|
-
|
|
7614
|
-
Args:
|
|
7615
|
-
input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
7616
|
-
|
|
7617
|
-
Returns:
|
|
7618
|
-
Tensor, a deepcopy of `input_x`.
|
|
7619
|
-
|
|
7620
|
-
Raises:
|
|
7621
|
-
TypeError: If `input_x` is not a Tensor.
|
|
7622
|
-
|
|
7623
|
-
Supported Platforms:
|
|
7624
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
7625
|
-
|
|
7626
|
-
Examples:
|
|
7627
|
-
>>> import mindspore
|
|
7628
|
-
>>> from mindspore import Tensor, ops
|
|
7629
|
-
>>> input = Tensor([[0, 1], [2, 1]], dtype=mindspore.int32)
|
|
7630
|
-
>>> output = ops.deepcopy(input)
|
|
7631
|
-
>>> print(output)
|
|
7632
|
-
[[0 1]
|
|
7633
|
-
[2 1]]
|
|
7634
|
-
"""
|
|
7635
|
-
_deepcopy = _get_cache_prim(P.Identity)()
|
|
7636
|
-
return _deepcopy(input_x)
|
|
7637
|
-
|
|
7638
|
-
|
|
7639
6451
|
__all__ = [
|
|
7640
6452
|
'unique',
|
|
7641
6453
|
'unique_with_pad',
|
|
@@ -7662,8 +6474,8 @@ __all__ = [
|
|
|
7662
6474
|
'full_like',
|
|
7663
6475
|
'dyn_shape',
|
|
7664
6476
|
'rank',
|
|
7665
|
-
'range',
|
|
7666
6477
|
'arange',
|
|
6478
|
+
'range',
|
|
7667
6479
|
'reshape',
|
|
7668
6480
|
'reshape_',
|
|
7669
6481
|
'flatten',
|