mindspore 2.2.11__cp39-cp39-win_amd64.whl → 2.3.0__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +7 -5
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +76 -18
- mindspore/_extends/builtin_operations.py +2 -1
- mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
- mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
- mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
- mindspore/_extends/parse/__init__.py +18 -14
- mindspore/_extends/parse/compile_config.py +258 -0
- mindspore/_extends/parse/namespace.py +2 -2
- mindspore/_extends/parse/parser.py +174 -62
- mindspore/_extends/parse/resources.py +45 -14
- mindspore/_extends/parse/standard_method.py +142 -240
- mindspore/{ops/_op_impl/tbe/atomic_addr_clean.py → _extends/pijit/__init__.py} +6 -16
- mindspore/_extends/pijit/pijit_func_white_list.py +343 -0
- mindspore/_extends/remote/kernel_build_server.py +2 -0
- mindspore/_profiler.py +30 -0
- mindspore/amp.py +51 -24
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/base.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +2 -2
- mindspore/boost/grad_freeze.py +2 -2
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/boost/less_batch_normalization.py +9 -6
- mindspore/common/__init__.py +15 -4
- mindspore/common/_jit_fallback_utils.py +2 -3
- mindspore/common/_register_for_adapter.py +7 -0
- mindspore/common/_register_for_recompute.py +48 -0
- mindspore/common/_register_for_tensor.py +8 -9
- mindspore/common/_stub_tensor.py +7 -1
- mindspore/common/_utils.py +5 -17
- mindspore/common/api.py +411 -106
- mindspore/common/auto_dynamic_shape.py +27 -14
- mindspore/common/dtype.py +17 -10
- mindspore/common/dump.py +6 -8
- mindspore/common/file_system.py +48 -0
- mindspore/common/generator.py +260 -0
- mindspore/common/hook_handle.py +51 -4
- mindspore/common/initializer.py +1 -1
- mindspore/common/jit_config.py +34 -14
- mindspore/common/lazy_inline.py +72 -19
- mindspore/common/mindir_util.py +12 -2
- mindspore/common/mutable.py +79 -14
- mindspore/common/no_inline.py +54 -0
- mindspore/common/np_dtype.py +25 -0
- mindspore/common/parameter.py +30 -11
- mindspore/common/recompute.py +262 -0
- mindspore/common/seed.py +9 -9
- mindspore/common/sparse_tensor.py +272 -24
- mindspore/common/symbol.py +122 -0
- mindspore/common/tensor.py +468 -496
- mindspore/communication/__init__.py +6 -11
- mindspore/communication/_comm_helper.py +5 -0
- mindspore/communication/comm_func.py +1140 -0
- mindspore/communication/management.py +118 -102
- mindspore/config/op_info.config +22 -54
- mindspore/context.py +378 -65
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +6 -6
- mindspore/dataset/audio/transforms.py +711 -158
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/engine/cache_client.py +2 -2
- mindspore/dataset/engine/datasets.py +163 -83
- mindspore/dataset/engine/datasets_audio.py +14 -14
- mindspore/dataset/engine/datasets_standard_format.py +33 -3
- mindspore/dataset/engine/datasets_text.py +38 -38
- mindspore/dataset/engine/datasets_user_defined.py +78 -59
- mindspore/dataset/engine/datasets_vision.py +77 -73
- mindspore/dataset/engine/offload.py +5 -7
- mindspore/dataset/engine/queue.py +56 -38
- mindspore/dataset/engine/validators.py +11 -5
- mindspore/dataset/text/__init__.py +3 -3
- mindspore/dataset/text/transforms.py +408 -121
- mindspore/dataset/text/utils.py +9 -9
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/transforms.py +261 -76
- mindspore/dataset/utils/browse_dataset.py +9 -9
- mindspore/dataset/vision/__init__.py +8 -8
- mindspore/dataset/vision/c_transforms.py +10 -10
- mindspore/dataset/vision/py_transforms_util.py +3 -3
- mindspore/dataset/vision/transforms.py +2844 -549
- mindspore/dataset/vision/utils.py +161 -10
- mindspore/dataset/vision/validators.py +14 -2
- mindspore/dnnl.dll +0 -0
- mindspore/experimental/optim/__init__.py +12 -2
- mindspore/experimental/optim/adadelta.py +161 -0
- mindspore/experimental/optim/adagrad.py +168 -0
- mindspore/experimental/optim/adam.py +35 -34
- mindspore/experimental/optim/adamax.py +170 -0
- mindspore/experimental/optim/adamw.py +40 -16
- mindspore/experimental/optim/asgd.py +153 -0
- mindspore/experimental/optim/lr_scheduler.py +71 -127
- mindspore/experimental/optim/nadam.py +157 -0
- mindspore/experimental/optim/optimizer.py +15 -8
- mindspore/experimental/optim/radam.py +194 -0
- mindspore/experimental/optim/rmsprop.py +154 -0
- mindspore/experimental/optim/rprop.py +164 -0
- mindspore/experimental/optim/sgd.py +28 -19
- mindspore/hal/__init__.py +40 -0
- mindspore/hal/_ascend.py +57 -0
- mindspore/hal/_base.py +57 -0
- mindspore/hal/_cpu.py +56 -0
- mindspore/hal/_gpu.py +57 -0
- mindspore/hal/device.py +356 -0
- mindspore/hal/event.py +179 -0
- mindspore/hal/memory.py +326 -0
- mindspore/hal/stream.py +339 -0
- mindspore/include/api/data_type.h +2 -2
- mindspore/include/api/dual_abi_helper.h +16 -3
- mindspore/include/api/model.h +4 -3
- mindspore/include/api/status.h +14 -0
- mindspore/include/c_api/model_c.h +173 -0
- mindspore/include/c_api/ms/base/types.h +1 -0
- mindspore/include/c_api/types_c.h +19 -0
- mindspore/include/dataset/execute.h +1 -3
- mindspore/include/dataset/vision.h +54 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +2 -2
- mindspore/mindrecord/__init__.py +5 -1
- mindspore/mindrecord/config.py +809 -0
- mindspore/mindrecord/filereader.py +25 -0
- mindspore/mindrecord/filewriter.py +76 -58
- mindspore/mindrecord/mindpage.py +40 -6
- mindspore/mindrecord/shardutils.py +3 -2
- mindspore/mindrecord/shardwriter.py +7 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +53 -66
- mindspore/mindrecord/tools/cifar10_to_mr.py +48 -63
- mindspore/mindrecord/tools/csv_to_mr.py +7 -17
- mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
- mindspore/mindrecord/tools/mnist_to_mr.py +11 -21
- mindspore/mindrecord/tools/tfrecord_to_mr.py +2 -10
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/mint/__init__.py +1137 -0
- mindspore/{rewrite/ast_transformers → mint/linalg}/__init__.py +9 -4
- mindspore/mint/nn/__init__.py +512 -0
- mindspore/mint/nn/functional.py +573 -0
- mindspore/mint/optim/__init__.py +24 -0
- mindspore/mint/optim/adamw.py +185 -0
- mindspore/multiprocessing/__init__.py +72 -0
- mindspore/nn/__init__.py +1 -0
- mindspore/nn/cell.py +213 -257
- mindspore/nn/dynamic_lr.py +2 -2
- mindspore/nn/extend/__init__.py +29 -0
- mindspore/nn/extend/basic.py +140 -0
- mindspore/nn/extend/embedding.py +143 -0
- mindspore/{rewrite/ast_creator_register.py → nn/extend/layer/__init__.py} +9 -19
- mindspore/nn/extend/layer/normalization.py +109 -0
- mindspore/nn/extend/pooling.py +117 -0
- mindspore/nn/layer/activation.py +84 -94
- mindspore/nn/layer/basic.py +177 -82
- mindspore/nn/layer/channel_shuffle.py +3 -16
- mindspore/nn/layer/container.py +3 -3
- mindspore/nn/layer/conv.py +75 -66
- mindspore/nn/layer/embedding.py +103 -45
- mindspore/nn/layer/embedding_service.py +531 -0
- mindspore/nn/layer/embedding_service_layer.py +393 -0
- mindspore/nn/layer/image.py +4 -7
- mindspore/nn/layer/math.py +1 -1
- mindspore/nn/layer/normalization.py +52 -66
- mindspore/nn/layer/padding.py +30 -39
- mindspore/nn/layer/pooling.py +18 -9
- mindspore/nn/layer/rnn_cells.py +6 -16
- mindspore/nn/layer/rnns.py +6 -5
- mindspore/nn/layer/thor_layer.py +1 -2
- mindspore/nn/layer/timedistributed.py +1 -1
- mindspore/nn/layer/transformer.py +52 -50
- mindspore/nn/learning_rate_schedule.py +6 -5
- mindspore/nn/loss/loss.py +63 -84
- mindspore/nn/optim/ada_grad.py +6 -4
- mindspore/nn/optim/adadelta.py +3 -1
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +102 -181
- mindspore/nn/optim/adamax.py +4 -2
- mindspore/nn/optim/adasum.py +3 -3
- mindspore/nn/optim/asgd.py +4 -2
- mindspore/nn/optim/ftrl.py +31 -61
- mindspore/nn/optim/lamb.py +5 -3
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +6 -4
- mindspore/nn/optim/momentum.py +13 -25
- mindspore/nn/optim/optimizer.py +6 -3
- mindspore/nn/optim/proximal_ada_grad.py +4 -2
- mindspore/nn/optim/rmsprop.py +9 -3
- mindspore/nn/optim/rprop.py +4 -2
- mindspore/nn/optim/sgd.py +7 -4
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
- mindspore/nn/probability/distribution/beta.py +2 -2
- mindspore/nn/probability/distribution/categorical.py +4 -6
- mindspore/nn/probability/distribution/cauchy.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -2
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/logistic.py +1 -1
- mindspore/nn/probability/distribution/poisson.py +2 -2
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +13 -1
- mindspore/nn/wrap/__init__.py +2 -1
- mindspore/nn/wrap/cell_wrapper.py +58 -13
- mindspore/nn/wrap/grad_reducer.py +148 -8
- mindspore/nn/wrap/loss_scale.py +32 -9
- mindspore/numpy/__init__.py +2 -0
- mindspore/numpy/array_creations.py +2 -0
- mindspore/numpy/array_ops.py +6 -6
- mindspore/numpy/dtypes.py +3 -3
- mindspore/numpy/fft.py +431 -0
- mindspore/numpy/math_ops.py +61 -67
- mindspore/numpy/utils.py +3 -0
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +8 -4
- mindspore/ops/_grad_experimental/grad_array_ops.py +4 -160
- mindspore/ops/_grad_experimental/grad_comm_ops.py +93 -36
- mindspore/ops/_grad_experimental/grad_inner_ops.py +8 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +92 -287
- mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
- mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
- mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
- mindspore/ops/_op_impl/__init__.py +0 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +1 -0
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
- mindspore/ops/_op_impl/{cpu/concat.py → aicpu/generate_eod_mask.py} +16 -17
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
- mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -3
- mindspore/ops/_op_impl/cpu/adam.py +2 -2
- mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
- mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
- mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
- mindspore/ops/_vmap/vmap_array_ops.py +164 -101
- mindspore/ops/_vmap/vmap_base.py +8 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +143 -58
- mindspore/ops/_vmap/vmap_image_ops.py +70 -13
- mindspore/ops/_vmap/vmap_math_ops.py +130 -58
- mindspore/ops/_vmap/vmap_nn_ops.py +249 -115
- mindspore/ops/_vmap/vmap_other_ops.py +1 -1
- mindspore/ops/auto_generate/__init__.py +31 -0
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +231 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +250 -0
- mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
- mindspore/ops/auto_generate/gen_extend_func.py +980 -0
- mindspore/ops/auto_generate/gen_ops_def.py +6443 -0
- mindspore/ops/auto_generate/gen_ops_prim.py +13167 -0
- mindspore/ops/auto_generate/pyboost_inner_prim.py +429 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +121 -23
- mindspore/ops/composite/math_ops.py +10 -49
- mindspore/ops/composite/multitype_ops/_compile_utils.py +191 -618
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +25 -134
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
- mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
- mindspore/ops/deprecated.py +14 -3
- mindspore/ops/extend/__init__.py +53 -0
- mindspore/ops/extend/array_func.py +218 -0
- mindspore/ops/extend/math_func.py +76 -0
- mindspore/ops/extend/nn_func.py +308 -0
- mindspore/ops/function/__init__.py +31 -11
- mindspore/ops/function/array_func.py +848 -1736
- mindspore/ops/function/clip_func.py +19 -31
- mindspore/ops/function/debug_func.py +2 -5
- mindspore/ops/function/fft_func.py +31 -0
- mindspore/ops/function/grad/grad_func.py +27 -20
- mindspore/ops/function/image_func.py +27 -21
- mindspore/ops/function/linalg_func.py +30 -53
- mindspore/ops/function/math_func.py +916 -2791
- mindspore/ops/function/nn_func.py +1445 -889
- mindspore/ops/function/other_func.py +6 -7
- mindspore/ops/function/parameter_func.py +6 -92
- mindspore/ops/function/random_func.py +254 -108
- mindspore/ops/function/reshard_func.py +102 -0
- mindspore/ops/function/sparse_func.py +4 -4
- mindspore/ops/function/sparse_unary_func.py +11 -18
- mindspore/ops/function/spectral_func.py +1 -1
- mindspore/ops/function/vmap_func.py +15 -14
- mindspore/ops/functional.py +342 -343
- mindspore/ops/op_info_register.py +16 -43
- mindspore/ops/operations/__init__.py +32 -23
- mindspore/ops/operations/_embedding_cache_ops.py +1 -1
- mindspore/ops/operations/_grad_ops.py +21 -853
- mindspore/ops/operations/_infer_ops.py +19 -0
- mindspore/ops/operations/_inner_ops.py +155 -511
- mindspore/ops/operations/_quant_ops.py +4 -4
- mindspore/ops/operations/_rl_inner_ops.py +3 -3
- mindspore/ops/operations/_scalar_ops.py +5 -480
- mindspore/ops/operations/_sequence_ops.py +6 -36
- mindspore/ops/operations/_tensor_array.py +8 -8
- mindspore/ops/operations/array_ops.py +112 -2698
- mindspore/ops/operations/comm_ops.py +801 -118
- mindspore/ops/operations/custom_ops.py +62 -121
- mindspore/ops/operations/debug_ops.py +105 -36
- mindspore/ops/operations/image_ops.py +3 -219
- mindspore/ops/operations/inner_ops.py +54 -40
- mindspore/ops/operations/linalg_ops.py +1 -49
- mindspore/ops/operations/manually_defined/__init__.py +24 -0
- mindspore/ops/operations/manually_defined/_inner.py +61 -0
- mindspore/ops/operations/manually_defined/ops_def.py +2016 -0
- mindspore/ops/operations/math_ops.py +621 -4654
- mindspore/ops/operations/nn_ops.py +316 -2226
- mindspore/ops/operations/other_ops.py +53 -45
- mindspore/ops/operations/random_ops.py +4 -51
- mindspore/ops/operations/reshard_ops.py +53 -0
- mindspore/ops/operations/sparse_ops.py +8 -8
- mindspore/ops/primitive.py +204 -103
- mindspore/ops/silent_check.py +162 -0
- mindspore/ops_generate/__init__.py +27 -0
- mindspore/ops_generate/arg_dtype_cast.py +250 -0
- mindspore/ops_generate/arg_handler.py +197 -0
- mindspore/ops_generate/gen_aclnn_implement.py +263 -0
- mindspore/ops_generate/gen_ops.py +1084 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
- mindspore/ops_generate/gen_pyboost_func.py +968 -0
- mindspore/ops_generate/gen_utils.py +209 -0
- mindspore/ops_generate/op_proto.py +138 -0
- mindspore/ops_generate/pyboost_utils.py +354 -0
- mindspore/ops_generate/template.py +239 -0
- mindspore/parallel/__init__.py +7 -4
- mindspore/parallel/_auto_parallel_context.py +155 -6
- mindspore/parallel/_cell_wrapper.py +16 -9
- mindspore/parallel/_cost_model_context.py +1 -1
- mindspore/parallel/_dp_allreduce_fusion.py +159 -159
- mindspore/parallel/_parallel_serialization.py +62 -14
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +18 -9
- mindspore/parallel/_transformer/__init__.py +1 -1
- mindspore/parallel/_transformer/layers.py +1 -1
- mindspore/parallel/_transformer/loss.py +1 -1
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/op_parallel_config.py +1 -1
- mindspore/parallel/_transformer/transformer.py +10 -10
- mindspore/parallel/_utils.py +161 -6
- mindspore/parallel/algo_parameter_config.py +6 -8
- mindspore/parallel/checkpoint_transform.py +369 -64
- mindspore/parallel/cluster/__init__.py +15 -0
- mindspore/parallel/cluster/process_entity/__init__.py +18 -0
- mindspore/parallel/cluster/process_entity/_api.py +344 -0
- mindspore/parallel/cluster/process_entity/_utils.py +126 -0
- mindspore/parallel/cluster/run.py +136 -0
- mindspore/parallel/mpi/__init__.py +1 -1
- mindspore/parallel/mpi/_mpi_config.py +1 -1
- mindspore/parallel/parameter_broadcast.py +152 -0
- mindspore/parallel/shard.py +128 -17
- mindspore/profiler/__init__.py +3 -2
- mindspore/profiler/common/process_pool.py +41 -0
- mindspore/profiler/common/singleton.py +28 -0
- mindspore/profiler/common/util.py +125 -0
- mindspore/profiler/envprofiling.py +2 -2
- mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +53 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +159 -0
- mindspore/profiler/parser/ascend_analysis/function_event.py +161 -0
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +131 -0
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +85 -0
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +57 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +116 -0
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +68 -0
- mindspore/profiler/parser/ascend_cluster_generator.py +116 -0
- mindspore/profiler/parser/ascend_communicate_generator.py +314 -0
- mindspore/profiler/parser/ascend_flops_generator.py +27 -5
- mindspore/profiler/parser/ascend_fpbp_generator.py +8 -2
- mindspore/profiler/parser/ascend_hccl_generator.py +31 -280
- mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
- mindspore/profiler/parser/ascend_memory_generator.py +185 -0
- mindspore/profiler/parser/ascend_msprof_exporter.py +151 -126
- mindspore/profiler/parser/ascend_msprof_generator.py +75 -274
- mindspore/profiler/parser/ascend_op_generator.py +94 -36
- mindspore/profiler/parser/ascend_timeline_generator.py +297 -131
- mindspore/profiler/parser/base_timeline_generator.py +17 -3
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +2 -1
- mindspore/profiler/parser/framework_parser.py +11 -4
- mindspore/profiler/parser/integrator.py +3 -1
- mindspore/profiler/parser/memory_usage_parser.py +8 -2
- mindspore/profiler/parser/minddata_analyzer.py +8 -2
- mindspore/profiler/parser/minddata_parser.py +73 -4
- mindspore/profiler/parser/msadvisor_analyzer.py +5 -3
- mindspore/profiler/parser/msadvisor_parser.py +10 -4
- mindspore/profiler/parser/profiler_info.py +16 -1
- mindspore/profiler/profiling.py +522 -195
- mindspore/rewrite/__init__.py +2 -13
- mindspore/rewrite/api/node.py +123 -37
- mindspore/rewrite/api/pattern_engine.py +2 -3
- mindspore/rewrite/api/scoped_value.py +16 -15
- mindspore/rewrite/api/symbol_tree.py +46 -30
- mindspore/rewrite/ast_helpers/__init__.py +3 -6
- mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
- mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
- mindspore/rewrite/common/__init__.py +1 -2
- mindspore/rewrite/common/config.py +24 -0
- mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
- mindspore/rewrite/{namer.py → common/namer.py} +63 -18
- mindspore/rewrite/common/namespace.py +118 -0
- mindspore/rewrite/node/__init__.py +5 -5
- mindspore/rewrite/node/call_function.py +23 -7
- mindspore/rewrite/node/cell_container.py +7 -3
- mindspore/rewrite/node/control_flow.py +53 -28
- mindspore/rewrite/node/node.py +212 -196
- mindspore/rewrite/node/node_manager.py +51 -22
- mindspore/rewrite/node/node_topological_manager.py +3 -23
- mindspore/rewrite/parsers/__init__.py +12 -0
- mindspore/rewrite/parsers/arguments_parser.py +8 -9
- mindspore/rewrite/parsers/assign_parser.py +637 -413
- mindspore/rewrite/parsers/attribute_parser.py +3 -4
- mindspore/rewrite/parsers/class_def_parser.py +115 -148
- mindspore/rewrite/parsers/constant_parser.py +5 -5
- mindspore/rewrite/parsers/container_parser.py +4 -6
- mindspore/rewrite/parsers/expr_parser.py +55 -0
- mindspore/rewrite/parsers/for_parser.py +31 -98
- mindspore/rewrite/parsers/function_def_parser.py +13 -5
- mindspore/rewrite/parsers/if_parser.py +28 -10
- mindspore/rewrite/parsers/module_parser.py +8 -182
- mindspore/rewrite/parsers/parser.py +1 -5
- mindspore/rewrite/parsers/parser_register.py +1 -1
- mindspore/rewrite/parsers/return_parser.py +5 -10
- mindspore/rewrite/parsers/while_parser.py +59 -0
- mindspore/rewrite/sparsify/utils.py +1 -1
- mindspore/rewrite/symbol_tree/__init__.py +20 -0
- mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
- mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
- mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
- mindspore/run_check/_check_version.py +6 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/safeguard/rewrite_obfuscation.py +9 -19
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +6 -5
- mindspore/train/_utils.py +178 -4
- mindspore/train/amp.py +167 -245
- mindspore/train/anf_ir_pb2.py +14 -2
- mindspore/train/callback/__init__.py +5 -2
- mindspore/train/callback/_backup_and_restore.py +5 -5
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +151 -37
- mindspore/train/callback/_cluster_monitor.py +201 -0
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_flops_collector.py +238 -0
- mindspore/train/callback/_landscape.py +16 -11
- mindspore/train/callback/_loss_monitor.py +2 -2
- mindspore/train/callback/_mindio_ttp.py +443 -0
- mindspore/train/callback/_on_request_exit.py +2 -2
- mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
- mindspore/train/callback/_summary_collector.py +13 -14
- mindspore/train/callback/_time_monitor.py +3 -3
- mindspore/train/data_sink.py +6 -5
- mindspore/train/dataset_helper.py +66 -21
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/accuracy.py +7 -7
- mindspore/train/metrics/confusion_matrix.py +8 -6
- mindspore/train/metrics/cosine_similarity.py +6 -4
- mindspore/train/metrics/error.py +2 -2
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/metrics/perplexity.py +2 -1
- mindspore/train/metrics/topk.py +2 -2
- mindspore/train/mind_ir_pb2.py +89 -15
- mindspore/train/model.py +298 -56
- mindspore/train/serialization.py +501 -221
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/_writer_pool.py +1 -1
- mindspore/train/summary/summary_record.py +56 -34
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/turbojpeg.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.2.11.dist-info → mindspore-2.3.0.dist-info}/METADATA +3 -3
- mindspore-2.3.0.dist-info/RECORD +1400 -0
- {mindspore-2.2.11.dist-info → mindspore-2.3.0.dist-info}/entry_points.txt +1 -0
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
- mindspore/gen_ops.py +0 -273
- mindspore/nn/layer/flash_attention.py +0 -189
- mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
- mindspore/ops/_op_impl/tbe/__init__.py +0 -47
- mindspore/ops/_op_impl/tbe/abs.py +0 -38
- mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/acos.py +0 -37
- mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/acosh.py +0 -37
- mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
- mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
- mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
- mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
- mindspore/ops/_op_impl/tbe/add.py +0 -42
- mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/add_n.py +0 -39
- mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
- mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
- mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
- mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
- mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
- mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
- mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
- mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/asin.py +0 -37
- mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/asinh.py +0 -37
- mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/assign.py +0 -79
- mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
- mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
- mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/atan.py +0 -37
- mindspore/ops/_op_impl/tbe/atan2.py +0 -38
- mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/atanh.py +0 -37
- mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
- mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
- mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
- mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
- mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
- mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
- mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
- mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
- mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
- mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
- mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cast.py +0 -55
- mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/cdist.py +0 -38
- mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/ceil.py +0 -37
- mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/celu.py +0 -39
- mindspore/ops/_op_impl/tbe/centralization.py +0 -39
- mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
- mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/concat.py +0 -40
- mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
- mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
- mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
- mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
- mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
- mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/cos.py +0 -37
- mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/cosh.py +0 -37
- mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
- mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cummin.py +0 -41
- mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
- mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
- mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
- mindspore/ops/_op_impl/tbe/diag.py +0 -38
- mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
- mindspore/ops/_op_impl/tbe/dilation.py +0 -40
- mindspore/ops/_op_impl/tbe/div.py +0 -41
- mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
- mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
- mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
- mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
- mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
- mindspore/ops/_op_impl/tbe/elu.py +0 -38
- mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/equal.py +0 -42
- mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/erf.py +0 -37
- mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfc.py +0 -37
- mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
- mindspore/ops/_op_impl/tbe/exp.py +0 -40
- mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
- mindspore/ops/_op_impl/tbe/expm1.py +0 -37
- mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
- mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/fill.py +0 -56
- mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/flatten.py +0 -48
- mindspore/ops/_op_impl/tbe/floor.py +0 -37
- mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
- mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
- mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
- mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
- mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
- mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
- mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
- mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/ger.py +0 -43
- mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/greater.py +0 -43
- mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
- mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
- mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
- mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
- mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
- mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
- mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
- mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/im2col.py +0 -42
- mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
- mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
- mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/inv.py +0 -38
- mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/invert.py +0 -37
- mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/iou.py +0 -38
- mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/is_close.py +0 -40
- mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
- mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
- mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
- mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
- mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
- mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
- mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/lerp.py +0 -38
- mindspore/ops/_op_impl/tbe/less.py +0 -41
- mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/log.py +0 -40
- mindspore/ops/_op_impl/tbe/log1p.py +0 -37
- mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
- mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
- mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
- mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
- mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/matmul.py +0 -53
- mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
- mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
- mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
- mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum.py +0 -39
- mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
- mindspore/ops/_op_impl/tbe/minimum.py +0 -40
- mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mish.py +0 -37
- mindspore/ops/_op_impl/tbe/mod.py +0 -41
- mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/mul.py +0 -37
- mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
- mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
- mindspore/ops/_op_impl/tbe/neg.py +0 -39
- mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
- mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
- mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
- mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
- mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
- mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/pack.py +0 -58
- mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
- mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
- mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/pdist.py +0 -36
- mindspore/ops/_op_impl/tbe/pooling.py +0 -46
- mindspore/ops/_op_impl/tbe/population_count.py +0 -38
- mindspore/ops/_op_impl/tbe/pow.py +0 -41
- mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/prelu.py +0 -37
- mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/range.py +0 -39
- mindspore/ops/_op_impl/tbe/real_div.py +0 -38
- mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
- mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
- mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
- mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
- mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6.py +0 -38
- mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/renorm.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
- mindspore/ops/_op_impl/tbe/rint.py +0 -37
- mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roll.py +0 -42
- mindspore/ops/_op_impl/tbe/round.py +0 -38
- mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
- mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
- mindspore/ops/_op_impl/tbe/select.py +0 -38
- mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/selu.py +0 -39
- mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sgd.py +0 -62
- mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sign.py +0 -38
- mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/sin.py +0 -37
- mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sinh.py +0 -37
- mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/slice.py +0 -58
- mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
- mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax.py +0 -37
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
- mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/softplus.py +0 -37
- mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softsign.py +0 -37
- mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sort.py +0 -38
- mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/split_d.py +0 -38
- mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/split_v.py +0 -39
- mindspore/ops/_op_impl/tbe/splitv.py +0 -39
- mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/square.py +0 -38
- mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
- mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
- mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
- mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
- mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
- mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
- mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
- mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
- mindspore/ops/_op_impl/tbe/sub.py +0 -39
- mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tan.py +0 -38
- mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh.py +0 -37
- mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
- mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
- mindspore/ops/_op_impl/tbe/tile.py +0 -37
- mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
- mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
- mindspore/ops/_op_impl/tbe/transpose.py +0 -60
- mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
- mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
- mindspore/ops/_op_impl/tbe/trunc.py +0 -39
- mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/unpack.py +0 -38
- mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
- mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
- mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
- mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
- mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
- mindspore/ops/_tracefunc.py +0 -241
- mindspore/ops/arg_dtype_cast.py +0 -54
- mindspore/rewrite/api/tree_node_helper.py +0 -60
- mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
- mindspore/rewrite/namespace.py +0 -53
- mindspore-2.2.11.dist-info/RECORD +0 -1920
- {mindspore-2.2.11.dist-info → mindspore-2.3.0.dist-info}/WHEEL +0 -0
- {mindspore-2.2.11.dist-info → mindspore-2.3.0.dist-info}/top_level.txt +0 -0
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2022 Huawei Technologies Co., Ltd
|
|
1
|
+
# Copyright 2022-2023 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -24,6 +24,7 @@ import numpy as np
|
|
|
24
24
|
import mindspore as ms
|
|
25
25
|
import mindspore.common.dtype as mstype
|
|
26
26
|
from mindspore.ops import operations as P
|
|
27
|
+
from mindspore.ops import functional as F
|
|
27
28
|
from mindspore.ops.primitive import constexpr
|
|
28
29
|
from mindspore.ops.primitive import _primexpr
|
|
29
30
|
import mindspore.ops as ops
|
|
@@ -31,18 +32,19 @@ from mindspore.ops.operations._inner_ops import DynamicBroadcastTo
|
|
|
31
32
|
from mindspore.ops.operations._sequence_ops import TupleToTensor
|
|
32
33
|
from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
|
|
33
34
|
from mindspore.ops.operations._sequence_ops import TensorToList
|
|
35
|
+
from mindspore.ops.auto_generate import OnesLikeExt, ZerosLikeExt, FillScalar, FillTensor, Arange, Chunk, UniqueDim,\
|
|
36
|
+
Unique2, SortExt, NonZero, NonZeroExt
|
|
37
|
+
from mindspore.ops.auto_generate.gen_ops_prim import SplitTensor
|
|
38
|
+
from mindspore.ops.auto_generate.gen_ops_prim import SplitWithSize, RepeatInterleaveInt, RepeatInterleaveTensor
|
|
34
39
|
|
|
35
40
|
from mindspore.ops.operations.array_ops import (
|
|
36
41
|
UniqueConsecutive,
|
|
37
42
|
SearchSorted,
|
|
38
|
-
NonZero,
|
|
39
43
|
MatrixDiagV3,
|
|
40
44
|
MatrixDiagPartV3,
|
|
41
45
|
MatrixSetDiagV3,
|
|
42
46
|
Fills,
|
|
43
47
|
Col2Im,
|
|
44
|
-
ArgMaxWithValue,
|
|
45
|
-
ArgMinWithValue,
|
|
46
48
|
ScatterNdMax,
|
|
47
49
|
ScatterNdMul,
|
|
48
50
|
IndexFill,
|
|
@@ -52,7 +54,9 @@ from mindspore.ops.operations.array_ops import (
|
|
|
52
54
|
Lstsq,
|
|
53
55
|
Mvlgamma,
|
|
54
56
|
Tril,
|
|
55
|
-
Argmax
|
|
57
|
+
Argmax,
|
|
58
|
+
ArgMaxWithValue,
|
|
59
|
+
ArgMinWithValue
|
|
56
60
|
)
|
|
57
61
|
from mindspore.ops.operations.array_ops import TensorScatterElements
|
|
58
62
|
from mindspore.common import Tensor
|
|
@@ -61,53 +65,83 @@ from mindspore import _checkparam as validator
|
|
|
61
65
|
from mindspore._c_expression import Tensor as Tensor_
|
|
62
66
|
from mindspore.ops._utils.utils import ms_arrange
|
|
63
67
|
|
|
64
|
-
|
|
68
|
+
from mindspore.ops.auto_generate import cat, range, scatter_nd, deepcopy, masked_fill, diagonal, expand_dims, \
|
|
69
|
+
flip, transpose, triu, unsorted_segment_sum, diag, gather, gather_d, gather_nd, reshape, \
|
|
70
|
+
broadcast_to, strided_slice, ones, zeros, max_, min_, select
|
|
71
|
+
from mindspore.ops.auto_generate.gen_ops_prim import scatter_add_ext_op, slice_ext_op
|
|
72
|
+
from mindspore.ops.operations.manually_defined import tile, rank, scalar_cast
|
|
73
|
+
|
|
74
|
+
arg_max_with_value_ = ArgMaxWithValue()
|
|
75
|
+
arg_min_with_value_ = ArgMinWithValue()
|
|
76
|
+
batch_to_space_nd_v2_ = P.BatchToSpaceNDV2()
|
|
77
|
+
cast_ = P.Cast()
|
|
78
|
+
diag_ = P.Diag()
|
|
79
|
+
dynamic_broadcast_to_ = DynamicBroadcastTo()
|
|
65
80
|
eye_ = P.Eye()
|
|
66
81
|
fills_ = Fills()
|
|
82
|
+
fillv2_ = P.FillV2()
|
|
83
|
+
flatten_ = P.Flatten()
|
|
84
|
+
gather_ = P.Gather()
|
|
85
|
+
gather_d_ = P.GatherD()
|
|
86
|
+
gather_nd_ = P.GatherNd()
|
|
87
|
+
ger_ = P.Ger()
|
|
88
|
+
index_fill_ = IndexFill()
|
|
89
|
+
lstsq_ = Lstsq()
|
|
90
|
+
masked_select_ = P.MaskedSelect()
|
|
91
|
+
matrix_band_part_ = P.array_ops.MatrixBandPart()
|
|
67
92
|
ones_ = P.Ones()
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
unique_with_pad_ = P.UniqueWithPad()
|
|
71
|
-
size_ = P.Size()
|
|
72
|
-
shape_ = P.Shape()
|
|
93
|
+
population_count_ = P.PopulationCount()
|
|
94
|
+
range_ = P.Range()
|
|
73
95
|
rank_ = P.Rank()
|
|
74
|
-
|
|
96
|
+
reduce_max_ = P.ReduceMax()
|
|
97
|
+
reduce_min_ = P.ReduceMin()
|
|
75
98
|
reshape_ = P.Reshape()
|
|
76
|
-
|
|
77
|
-
expand_dims_ = P.ExpandDims()
|
|
78
|
-
transpose_ = P.Transpose()
|
|
99
|
+
scalar_to_tensor_ = P.ScalarToTensor()
|
|
79
100
|
scatter_add_ = P.ScatterAdd()
|
|
101
|
+
scatter_div_ = P.ScatterDiv()
|
|
80
102
|
scatter_max_ = P.ScatterMax()
|
|
81
103
|
scatter_min_ = P.ScatterMin()
|
|
82
104
|
scatter_mul_ = P.ScatterMul()
|
|
83
|
-
scatter_div_ = P.ScatterDiv()
|
|
84
105
|
scatter_nd_ = P.ScatterNd()
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
106
|
+
scatter_update_ = P.ScatterUpdate()
|
|
107
|
+
shape_ = P.Shape()
|
|
108
|
+
split_tensor = SplitTensor()
|
|
109
|
+
split_with_size = SplitWithSize()
|
|
110
|
+
size_ = P.Size()
|
|
90
111
|
tensor_scatter_add_ = P.TensorScatterAdd()
|
|
91
|
-
tensor_scatter_sub_ = P.TensorScatterSub()
|
|
92
|
-
tensor_scatter_mul_ = P.TensorScatterMul()
|
|
93
112
|
tensor_scatter_div_ = P.TensorScatterDiv()
|
|
94
|
-
tensor_scatter_min_ = P.TensorScatterMin()
|
|
95
113
|
tensor_scatter_max_ = P.TensorScatterMax()
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
matrix_band_part_ = P.array_ops.MatrixBandPart()
|
|
100
|
-
ger_ = P.Ger()
|
|
101
|
-
diag_ = P.Diag()
|
|
102
|
-
range_ = P.Range()
|
|
103
|
-
zeros_like_ = P.ZerosLike()
|
|
104
|
-
cast_ = P.Cast()
|
|
114
|
+
tensor_scatter_min_ = P.TensorScatterMin()
|
|
115
|
+
tensor_scatter_mul_ = P.TensorScatterMul()
|
|
116
|
+
tensor_scatter_sub_ = P.TensorScatterSub()
|
|
105
117
|
tensor_select_ = P.Select()
|
|
106
|
-
|
|
118
|
+
tensor_shape_ = P.TensorShape()
|
|
119
|
+
tensor_slice = P.Slice()
|
|
120
|
+
tile_ = P.Tile()
|
|
121
|
+
transpose_ = P.Transpose()
|
|
122
|
+
tuple_to_array_ = P.TupleToArray()
|
|
123
|
+
tuple_to_tensor_ = TupleToTensor()
|
|
124
|
+
unique_ = P.Unique()
|
|
125
|
+
unique_with_pad_ = P.UniqueWithPad()
|
|
126
|
+
unsorted_segment_max_ = P.UnsortedSegmentMax()
|
|
127
|
+
unsorted_segment_min_ = P.UnsortedSegmentMin()
|
|
128
|
+
unsorted_segment_prod_ = P.UnsortedSegmentProd()
|
|
107
129
|
unsorted_segment_sum_ = P.UnsortedSegmentSum()
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
130
|
+
ones_like_ = P.OnesLike()
|
|
131
|
+
zeros_like_ = P.ZerosLike()
|
|
132
|
+
ones_like_ext_ = OnesLikeExt()
|
|
133
|
+
zeros_like_ext_ = ZerosLikeExt()
|
|
134
|
+
fill_scalar_ = FillScalar()
|
|
135
|
+
fill_tensor_ = FillTensor()
|
|
136
|
+
sort_ext_ = SortExt()
|
|
137
|
+
arange_ = Arange()
|
|
138
|
+
chunk_ = Chunk()
|
|
139
|
+
repeat_interleave_int_ = RepeatInterleaveInt()
|
|
140
|
+
repeat_interleave_tensor_ = RepeatInterleaveTensor()
|
|
141
|
+
unique_dim_ = UniqueDim()
|
|
142
|
+
unique2_ = Unique2()
|
|
143
|
+
non_zero_ = NonZero()
|
|
144
|
+
non_zero_ext_ = NonZeroExt()
|
|
111
145
|
|
|
112
146
|
|
|
113
147
|
@_primexpr
|
|
@@ -187,8 +221,11 @@ def arange(start=0, end=None, step=1, *, dtype=None):
|
|
|
187
221
|
|
|
188
222
|
Keyword Args:
|
|
189
223
|
dtype (mindspore.dtype, optional): The required data type of returned Tensor. Default: ``None`` .
|
|
190
|
-
|
|
191
|
-
|
|
224
|
+
When `dtype` is not specified or ``None``:
|
|
225
|
+
|
|
226
|
+
If `start`, `end`, and `step` are all integers, the dtype of output is int64,
|
|
227
|
+
|
|
228
|
+
If `start`, `end`, and `step` contain at least one floating-point number, the dtype of output is float32.
|
|
192
229
|
|
|
193
230
|
Returns:
|
|
194
231
|
A 1-D Tensor, with the same type as the inputs.
|
|
@@ -225,7 +262,7 @@ def arange(start=0, end=None, step=1, *, dtype=None):
|
|
|
225
262
|
>>> print(output)
|
|
226
263
|
[12. 11. 10. 9. 8. 7. 6. 5. 4. 3.]
|
|
227
264
|
>>> print(output.dtype)
|
|
228
|
-
|
|
265
|
+
Float32
|
|
229
266
|
"""
|
|
230
267
|
if end is None:
|
|
231
268
|
start, end = 0, start
|
|
@@ -237,67 +274,84 @@ def arange(start=0, end=None, step=1, *, dtype=None):
|
|
|
237
274
|
if start.shape != () or end.shape != () or step.shape != ():
|
|
238
275
|
raise ValueError(f"For arange, the input args must be a TensorScalar,"
|
|
239
276
|
f" but got start shape:{start.shape}, end shape:{end.shape}, step shape:{step.shape}")
|
|
240
|
-
|
|
241
|
-
data = range_op(start, end, step)
|
|
277
|
+
data = range_(start, end, step)
|
|
242
278
|
if dtype is not None:
|
|
243
279
|
data = cast_(data, dtype)
|
|
244
280
|
return data
|
|
245
281
|
|
|
246
282
|
|
|
247
|
-
def
|
|
283
|
+
def arange_ext(start=0, end=None, step=1, *, dtype=None):
|
|
248
284
|
r"""
|
|
249
|
-
|
|
285
|
+
Creates a sequence of numbers that begins at `start` and extends by increments of
|
|
286
|
+
`step` up to but not including `end`.
|
|
250
287
|
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
288
|
+
Args:
|
|
289
|
+
start (Union[float, int], optional): The start of the interval. Default: ``0`` .
|
|
290
|
+
end (Union[float, int], optional): The end of the interval, exclusive.
|
|
291
|
+
Default: ``None`` . If ``None`` , it defaults to the value of `start`, and 0 is used as the starting value.
|
|
292
|
+
step (Union[float, int], optional): The step size with which the array element increments. Default: ``1`` .
|
|
255
293
|
|
|
256
|
-
|
|
294
|
+
Keyword Args:
|
|
295
|
+
dtype (mindspore.dtype, optional): The required data type of returned Tensor. Default: ``None`` .
|
|
296
|
+
When `dtype` is not specified or ``None``:
|
|
257
297
|
|
|
258
|
-
|
|
298
|
+
If `start`, `end`, and `step` are all integers, the dtype of output is int64,
|
|
259
299
|
|
|
260
|
-
|
|
261
|
-
tensors (Union[tuple, list]): A tuple or a list of input tensors.
|
|
262
|
-
Suppose there are two tensors in this tuple or list, namely t1 and t2.
|
|
263
|
-
To perform `concat` in the axis 0 direction, except for the :math:`0`-th axis,
|
|
264
|
-
all other dimensions should be equal, that is,
|
|
265
|
-
:math:`t1.shape[1] = t2.shape[1], t1.shape[2] = t2.shape[2], ..., t1.shape[R-1] = t2.shape[R-1]`,
|
|
266
|
-
where :math:`R` represents the rank of tensor.
|
|
267
|
-
axis (int): The specified axis, whose value is in range :math:`[-R, R)`. Default: ``0`` .
|
|
300
|
+
If `start`, `end`, and `step` contain at least one floating-point number, the dtype of output is float32.
|
|
268
301
|
|
|
269
302
|
Returns:
|
|
270
|
-
Tensor,
|
|
271
|
-
The data type is the same with `tensors`.
|
|
303
|
+
A 1-D Tensor, cast to `dtype` if provided, may potentially lose precision due to casting.
|
|
272
304
|
|
|
273
305
|
Raises:
|
|
274
|
-
TypeError: If `
|
|
275
|
-
ValueError: If `
|
|
276
|
-
ValueError: If `
|
|
277
|
-
|
|
306
|
+
TypeError: If `start`, `end` or `step` are not of type int or float.
|
|
307
|
+
ValueError: If `step` = 0.
|
|
308
|
+
ValueError: If `start` >= `end` when `step` > 0.
|
|
309
|
+
ValueError: If `start` <= `end` when `step` < 0.
|
|
278
310
|
|
|
279
311
|
Supported Platforms:
|
|
280
|
-
``Ascend``
|
|
312
|
+
``Ascend``
|
|
281
313
|
|
|
282
314
|
Examples:
|
|
283
|
-
>>> import mindspore
|
|
284
|
-
>>> import numpy as np
|
|
315
|
+
>>> import mindspore as ms
|
|
285
316
|
>>> from mindspore import Tensor, ops
|
|
286
|
-
>>>
|
|
287
|
-
>>> input_x2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
|
|
288
|
-
>>> output = ops.cat((input_x1, input_x2))
|
|
317
|
+
>>> output = ops.arange_ext(1, 6)
|
|
289
318
|
>>> print(output)
|
|
290
|
-
[
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
>>> output
|
|
319
|
+
[1 2 3 4 5]
|
|
320
|
+
>>> print(output.dtype)
|
|
321
|
+
Int64
|
|
322
|
+
>>> output = ops.arange_ext(0, 3, 1.2)
|
|
323
|
+
>>> print(output)
|
|
324
|
+
[0. 1.2 2.4]
|
|
325
|
+
>>> print(output.dtype)
|
|
326
|
+
Float32
|
|
327
|
+
>>> output = ops.arange_ext(7, 1, -2)
|
|
295
328
|
>>> print(output)
|
|
296
|
-
[
|
|
297
|
-
|
|
329
|
+
[7 5 3]
|
|
330
|
+
>>> print(output.dtype)
|
|
331
|
+
Int64
|
|
332
|
+
>>> output = ops.arange_ext(12, 2, -1, dtype=ms.bfloat16))
|
|
333
|
+
>>> print(output)
|
|
334
|
+
[12. 11. 10. 9. 8. 7. 6. 5. 4. 3.]
|
|
335
|
+
>>> print(output.dtype)
|
|
336
|
+
BFloat16
|
|
337
|
+
"""
|
|
338
|
+
if end is None:
|
|
339
|
+
start, end = 0, start
|
|
340
|
+
return arange_(start, end, step, dtype)
|
|
341
|
+
|
|
342
|
+
|
|
343
|
+
def concat(tensors, axis=0):
|
|
344
|
+
"""
|
|
345
|
+
Alias for :func:`mindspore.ops.cat()`.
|
|
346
|
+
|
|
347
|
+
Tutorial Examples:
|
|
348
|
+
- `Tensor - Tensor Operation <https://mindspore.cn/tutorials/en/master/beginner/tensor.html#tensor-operation>`_
|
|
349
|
+
- `Vision Transformer Image Classification - Building ViT as a whole
|
|
350
|
+
<https://mindspore.cn/tutorials/application/en/master/cv/vit.html#building-vit-as-a-whole>`_
|
|
351
|
+
- `Sentiment Classification Implemented by RNN - Dense
|
|
352
|
+
<https://mindspore.cn/tutorials/application/en/master/nlp/sentiment_analysis.html#dense>`_
|
|
298
353
|
"""
|
|
299
|
-
|
|
300
|
-
return _concat(tensors)
|
|
354
|
+
return cat(tensors, axis)
|
|
301
355
|
|
|
302
356
|
|
|
303
357
|
def eye(n, m=None, dtype=None):
|
|
@@ -305,14 +359,14 @@ def eye(n, m=None, dtype=None):
|
|
|
305
359
|
Creates a tensor with ones on the diagonal and zeros in the rest.
|
|
306
360
|
|
|
307
361
|
Note:
|
|
308
|
-
|
|
309
|
-
|
|
362
|
+
The data type of returned tensor can be float16, float32, int8, int16, int32, int64, uint8
|
|
363
|
+
or bool on Ascend platforms.
|
|
310
364
|
|
|
311
365
|
Args:
|
|
312
366
|
n (int): The number of rows of returned tensor. Constant value only.
|
|
313
|
-
m (int): The number of columns of returned tensor. Constant value only.
|
|
367
|
+
m (int, optional): The number of columns of returned tensor. Constant value only.
|
|
314
368
|
Default: ``None`` , if ``None`` , the number of columns is as the same as n.
|
|
315
|
-
dtype (mindspore.dtype): MindSpore's dtype, the data type of the returned tensor.
|
|
369
|
+
dtype (mindspore.dtype, optional): MindSpore's dtype, the data type of the returned tensor.
|
|
316
370
|
The data type can be bool or Number.
|
|
317
371
|
Default: ``None`` , the data type of the returned tensor is mindspore.float32.
|
|
318
372
|
|
|
@@ -336,11 +390,11 @@ def eye(n, m=None, dtype=None):
|
|
|
336
390
|
[0 1]]
|
|
337
391
|
>>> print(output.dtype)
|
|
338
392
|
Int32
|
|
339
|
-
>>> output = ops.eye(1, 2, mindspore.
|
|
393
|
+
>>> output = ops.eye(1, 2, mindspore.float32)
|
|
340
394
|
>>> print(output)
|
|
341
395
|
[[1. 0.]]
|
|
342
396
|
>>> print(output.dtype)
|
|
343
|
-
|
|
397
|
+
Float32
|
|
344
398
|
>>> output = ops.eye(2, dtype=mindspore.int32)
|
|
345
399
|
>>> print(output)
|
|
346
400
|
[[1 0]
|
|
@@ -419,25 +473,25 @@ def hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype
|
|
|
419
473
|
return out
|
|
420
474
|
|
|
421
475
|
|
|
422
|
-
def where(condition,
|
|
476
|
+
def where(condition, input, other):
|
|
423
477
|
r"""
|
|
424
|
-
Selects elements from `
|
|
478
|
+
Selects elements from `input` or `other` based on `condition` and returns a tensor.
|
|
425
479
|
|
|
426
480
|
.. math::
|
|
427
|
-
output_i = \begin{cases}
|
|
481
|
+
output_i = \begin{cases} input_i,\quad &if\ condition_i \\ other_i,\quad &otherwise \end{cases}
|
|
428
482
|
|
|
429
483
|
Args:
|
|
430
|
-
condition (Tensor[bool]): If True, yield `
|
|
431
|
-
|
|
432
|
-
|
|
484
|
+
condition (Tensor[bool]): If True, yield `input`, otherwise yield `other`.
|
|
485
|
+
input (Union[Tensor, Scalar]): When `condition` is True, values to select from.
|
|
486
|
+
other (Union[Tensor, Scalar]): When `condition` is False, values to select from.
|
|
433
487
|
|
|
434
488
|
Returns:
|
|
435
|
-
Tensor, elements are selected from `
|
|
489
|
+
Tensor, elements are selected from `input` and `other`.
|
|
436
490
|
|
|
437
491
|
Raises:
|
|
438
492
|
TypeError: If `condition` is not a Tensor.
|
|
439
|
-
TypeError: If both `
|
|
440
|
-
ValueError: If `condition`, `
|
|
493
|
+
TypeError: If both `input` and `other` are scalars.
|
|
494
|
+
ValueError: If `condition`, `input` and `other` can not broadcast to each other.
|
|
441
495
|
|
|
442
496
|
Supported Platforms:
|
|
443
497
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -454,66 +508,15 @@ def where(condition, x, y):
|
|
|
454
508
|
[[0. 1.]
|
|
455
509
|
[2. 1.]]
|
|
456
510
|
"""
|
|
457
|
-
|
|
458
|
-
raise TypeError(f"For 'where', 'condition' must be a Tensor, but got {type(condition)}.")
|
|
459
|
-
if isinstance(x, (int, float)):
|
|
460
|
-
if not isinstance(y, Tensor):
|
|
461
|
-
raise TypeError(
|
|
462
|
-
f"For 'where', at least one of 'x' and 'y' should be Tensor, but got x:{type(x)}, y:{type(y)}."
|
|
463
|
-
)
|
|
464
|
-
x = cast_(x, y.dtype)
|
|
465
|
-
elif isinstance(y, (int, float)):
|
|
466
|
-
if not isinstance(x, Tensor):
|
|
467
|
-
raise TypeError(
|
|
468
|
-
f"For 'where', at least one of 'x' and 'y' should be Tensor, but got x:{type(x)}, y:{type(y)}."
|
|
469
|
-
)
|
|
470
|
-
y = cast_(y, x.dtype)
|
|
471
|
-
output_shape = _calc_broadcast_shape(x.shape, y.shape, condition.shape)
|
|
472
|
-
condition = broadcast_to(condition, output_shape)
|
|
473
|
-
x = broadcast_to(x, output_shape)
|
|
474
|
-
y = broadcast_to(y, output_shape)
|
|
475
|
-
_select = P.Select()
|
|
476
|
-
return _select(condition, x, y)
|
|
511
|
+
return tensor_select_(condition, input, other)
|
|
477
512
|
|
|
478
513
|
|
|
479
514
|
def reverse(x, axis):
|
|
480
515
|
"""
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
.. warning::
|
|
484
|
-
The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "input_x".
|
|
485
|
-
|
|
486
|
-
Args:
|
|
487
|
-
x (Tensor): The target tensor.
|
|
488
|
-
The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
489
|
-
axis (Union[tuple(int), list(int)]): The indices of the dimensions to reverse.
|
|
490
|
-
|
|
491
|
-
Outputs:
|
|
492
|
-
Tensor, has the same shape and type as `x`.
|
|
493
|
-
|
|
494
|
-
Raises:
|
|
495
|
-
TypeError: If `axis` is neither list nor tuple.
|
|
496
|
-
TypeError: If element of `axis` is not an int.
|
|
497
|
-
|
|
498
|
-
Supported Platforms:
|
|
499
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
500
|
-
|
|
501
|
-
Examples:
|
|
502
|
-
>>> import mindspore
|
|
503
|
-
>>> import numpy as np
|
|
504
|
-
>>> from mindspore import Tensor, ops
|
|
505
|
-
>>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
|
|
506
|
-
>>> output = ops.reverse(input_x, axis=[1])
|
|
507
|
-
>>> print(output)
|
|
508
|
-
[[4 3 2 1]
|
|
509
|
-
[8 7 6 5]]
|
|
510
|
-
>>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
|
|
511
|
-
>>> output = ops.reverse(input_x, axis=[1, 0])
|
|
512
|
-
>>> print(output)
|
|
513
|
-
[[8 7 6 5]
|
|
514
|
-
[4 3 2 1]]
|
|
516
|
+
:func:`mindspore.ops.reverse` will be deprecated in the future.
|
|
517
|
+
Please use :func:`mindspore.ops.flip` instead.
|
|
515
518
|
"""
|
|
516
|
-
return
|
|
519
|
+
return flip(x, axis)
|
|
517
520
|
|
|
518
521
|
|
|
519
522
|
def ravel(input):
|
|
@@ -659,8 +662,9 @@ def one_hot(indices, depth, on_value=1, off_value=0, axis=-1):
|
|
|
659
662
|
other locations take value `off_value`.
|
|
660
663
|
|
|
661
664
|
Note:
|
|
662
|
-
If the input indices
|
|
663
|
-
On Ascend, if `on_value` is
|
|
665
|
+
If the input `indices` has rank `N`, the output will have rank `N+1`.
|
|
666
|
+
The new axis is created at dimension `axis`. On Ascend, if `on_value` is int64 dtype, `indices` must be
|
|
667
|
+
int64 dtype, and the value for `on_value` and `off_value` can only be 1 and 0.
|
|
664
668
|
|
|
665
669
|
Args:
|
|
666
670
|
indices(Tensor): A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
|
|
@@ -682,6 +686,7 @@ def one_hot(indices, depth, on_value=1, off_value=0, axis=-1):
|
|
|
682
686
|
Raises:
|
|
683
687
|
TypeError: If `axis` or `depth` is not an int.
|
|
684
688
|
TypeError: If dtype of `indices` is not int32 or int64.
|
|
689
|
+
TypeError: If dtype of `on_value` is not int32, int64, float16 or float32.
|
|
685
690
|
TypeError: If `indices`, `on_value` or `off_value` is not a Tensor.
|
|
686
691
|
ValueError: If `axis` is not in range [-1, ndim].
|
|
687
692
|
ValueError: If `depth` is less than 0.
|
|
@@ -715,8 +720,8 @@ def fill(type, shape, value): # pylint: disable=redefined-outer-name
|
|
|
715
720
|
|
|
716
721
|
Args:
|
|
717
722
|
type (mindspore.dtype): The specified type of output tensor. The data type only supports
|
|
718
|
-
`bool_ <https://www.mindspore.cn/docs/en/
|
|
719
|
-
`number <https://www.mindspore.cn/docs/en/
|
|
723
|
+
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ and
|
|
724
|
+
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ .
|
|
720
725
|
shape (Union(Tensor, tuple[int])): The specified shape of output tensor.
|
|
721
726
|
value (Union(Tensor, number.Number, bool)): Value to fill the returned tensor.
|
|
722
727
|
|
|
@@ -743,7 +748,7 @@ def fill(type, shape, value): # pylint: disable=redefined-outer-name
|
|
|
743
748
|
[0. 0. 0.]]
|
|
744
749
|
"""
|
|
745
750
|
value = cast_(value, type)
|
|
746
|
-
return
|
|
751
|
+
return fillv2_(shape, value)
|
|
747
752
|
|
|
748
753
|
|
|
749
754
|
def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-name
|
|
@@ -791,6 +796,45 @@ def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-na
|
|
|
791
796
|
return ops.fill(dtype, size, fill_value)
|
|
792
797
|
|
|
793
798
|
|
|
799
|
+
def full_ext(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-name
|
|
800
|
+
"""
|
|
801
|
+
Create a Tensor of the specified shape and fill it with the specified value.
|
|
802
|
+
|
|
803
|
+
Args:
|
|
804
|
+
size (Union(tuple[int], list[int])): The specified shape of output tensor.
|
|
805
|
+
fill_value (number.Number): Value to fill the returned tensor. Complex numbers are not supported for now.
|
|
806
|
+
|
|
807
|
+
Keyword Args:
|
|
808
|
+
dtype (mindspore.dtype): The specified type of output tensor. `bool_` and `number` are supported, for details,
|
|
809
|
+
please refer to :class:`mindspore.dtype` . Default: ``None`` .
|
|
810
|
+
|
|
811
|
+
Returns:
|
|
812
|
+
Tensor.
|
|
813
|
+
|
|
814
|
+
Raises:
|
|
815
|
+
TypeError: If `size` is not a tuple or list.
|
|
816
|
+
ValueError: The element in `size` is less than 0.
|
|
817
|
+
|
|
818
|
+
Supported Platforms:
|
|
819
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
820
|
+
|
|
821
|
+
Examples:
|
|
822
|
+
>>> from mindspore import ops
|
|
823
|
+
>>> output = ops.full((2, 2), 1)
|
|
824
|
+
>>> print(output)
|
|
825
|
+
[[1. 1.]
|
|
826
|
+
[1. 1.]]
|
|
827
|
+
>>> output = ops.full((3, 3), 0)
|
|
828
|
+
>>> print(output)
|
|
829
|
+
[[0. 0. 0.]
|
|
830
|
+
[0. 0. 0.]
|
|
831
|
+
[0. 0. 0.]]
|
|
832
|
+
"""
|
|
833
|
+
if isinstance(fill_value, Tensor):
|
|
834
|
+
return fill_tensor_(size, fill_value, dtype)
|
|
835
|
+
return fill_scalar_(size, fill_value, dtype)
|
|
836
|
+
|
|
837
|
+
|
|
794
838
|
def full_like(input, fill_value, *, dtype=None):
|
|
795
839
|
"""
|
|
796
840
|
Return a Tensor of the same shape as `input` and filled with `fill_value`.
|
|
@@ -883,24 +927,63 @@ def chunk(input, chunks, axis=0):
|
|
|
883
927
|
length_along_dim = arr_shape[arr_axis]
|
|
884
928
|
|
|
885
929
|
if chunks > length_along_dim:
|
|
886
|
-
res = P.Split(arr_axis, length_along_dim)(input)
|
|
930
|
+
res = _get_cache_prim(P.Split)(arr_axis, length_along_dim)(input)
|
|
887
931
|
elif length_along_dim % chunks == 0:
|
|
888
|
-
res = P.Split(arr_axis, chunks)(input)
|
|
932
|
+
res = _get_cache_prim(P.Split)(arr_axis, chunks)(input)
|
|
889
933
|
else:
|
|
890
934
|
block_size = int(np.ceil(length_along_dim / chunks))
|
|
891
935
|
true_chunks = int(length_along_dim // block_size)
|
|
892
936
|
length1 = true_chunks * block_size
|
|
893
937
|
length2 = length_along_dim - length1
|
|
894
|
-
start1 = _list_comprehensions(
|
|
938
|
+
start1 = _list_comprehensions(rank_(input), 0, True)
|
|
895
939
|
size1 = _tuple_setitem(arr_shape, arr_axis, length1)
|
|
896
940
|
start2 = _tuple_setitem(start1, arr_axis, length1)
|
|
897
941
|
size2 = _tuple_setitem(arr_shape, arr_axis, length2)
|
|
898
|
-
res = P.Split(arr_axis, true_chunks)(tensor_slice(input, start1, size1))
|
|
942
|
+
res = _get_cache_prim(P.Split)(arr_axis, true_chunks)(tensor_slice(input, start1, size1))
|
|
899
943
|
if length2:
|
|
900
|
-
res += P.Split(arr_axis, 1)(tensor_slice(input, start2, size2))
|
|
944
|
+
res += _get_cache_prim(P.Split)(arr_axis, 1)(tensor_slice(input, start2, size2))
|
|
901
945
|
return res
|
|
902
946
|
|
|
903
947
|
|
|
948
|
+
def chunk_ext(input, chunks, dim=0):
|
|
949
|
+
"""
|
|
950
|
+
Cut the input Tensor into `chunks` sub-tensors along the specified axis.
|
|
951
|
+
|
|
952
|
+
Note:
|
|
953
|
+
This function may return less than the specified number of chunks!
|
|
954
|
+
|
|
955
|
+
Args:
|
|
956
|
+
input (Tensor): A Tensor to be cut.
|
|
957
|
+
chunks (int): Number of sub-tensors to cut.
|
|
958
|
+
dim (int, optional): Specify the dimensions that you want to split. Default: ``0`` .
|
|
959
|
+
|
|
960
|
+
Returns:
|
|
961
|
+
A tuple of sub-tensors.
|
|
962
|
+
|
|
963
|
+
Raises:
|
|
964
|
+
TypeError: If argument `input` is not Tensor.
|
|
965
|
+
TypeError: The sum of `chunks` is not int.
|
|
966
|
+
TypeError: If argument `dim` is not int.
|
|
967
|
+
ValueError: If argument `dim` is out of range of :math:`[-input.ndim, input.ndim)` .
|
|
968
|
+
ValueError: If argument `chunks` is not positive number.
|
|
969
|
+
|
|
970
|
+
Supported Platforms:
|
|
971
|
+
``Ascend``
|
|
972
|
+
|
|
973
|
+
Examples:
|
|
974
|
+
>>> import numpy as np
|
|
975
|
+
>>> import mindspore
|
|
976
|
+
>>> from mindspore import Tensor
|
|
977
|
+
>>> input_x = np.arange(9).astype("float32")
|
|
978
|
+
>>> output = mindspore.mint.chunk(Tensor(input_x), 3)
|
|
979
|
+
>>> print(output)
|
|
980
|
+
(Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
|
|
981
|
+
Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
|
|
982
|
+
Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))
|
|
983
|
+
"""
|
|
984
|
+
return chunk_(input, chunks, dim)
|
|
985
|
+
|
|
986
|
+
|
|
904
987
|
def fills(x, value):
|
|
905
988
|
"""
|
|
906
989
|
`fills` is deprecated, please use `ops.fill` instead.
|
|
@@ -920,50 +1003,6 @@ def fills(x, value):
|
|
|
920
1003
|
return fills_(x, value_)
|
|
921
1004
|
|
|
922
1005
|
|
|
923
|
-
def ones(shape, dtype=None): # pylint: disable=redefined-outer-name
|
|
924
|
-
r"""
|
|
925
|
-
Creates a tensor filled with value ones.
|
|
926
|
-
|
|
927
|
-
Creates a tensor with shape described by the first argument and fills it with value ones in type of the second
|
|
928
|
-
argument.
|
|
929
|
-
|
|
930
|
-
Args:
|
|
931
|
-
shape (Union[tuple[int], int, Tensor]): The specified shape of output tensor. Only positive integer or
|
|
932
|
-
tuple or Tensor containing positive integers are allowed. If it is a Tensor,
|
|
933
|
-
it must be a 0-D or 1-D Tensor with int32 or int64 dtypes.
|
|
934
|
-
dtype (:class:`mindspore.dtype`): The specified type of output tensor. If `dtype` is ``None`` ,
|
|
935
|
-
`mindspore.float32` will be used. Default: ``None`` .
|
|
936
|
-
|
|
937
|
-
Returns:
|
|
938
|
-
Tensor, has the same type and shape as input shape value.
|
|
939
|
-
|
|
940
|
-
Raises:
|
|
941
|
-
TypeError: If `shape` is not tuple, int or Tensor.
|
|
942
|
-
|
|
943
|
-
Supported Platforms:
|
|
944
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
945
|
-
|
|
946
|
-
Examples:
|
|
947
|
-
>>> import mindspore
|
|
948
|
-
>>> from mindspore import ops
|
|
949
|
-
>>> output = ops.ones((2, 2), mindspore.float32)
|
|
950
|
-
>>> print(output)
|
|
951
|
-
[[1. 1.]
|
|
952
|
-
[1. 1.]]
|
|
953
|
-
"""
|
|
954
|
-
_dtype = mstype.float32 if dtype is None else dtype
|
|
955
|
-
ones_op = _get_cache_prim(P.FillV2)()
|
|
956
|
-
value = Tensor(1, _dtype)
|
|
957
|
-
if isinstance(shape, int):
|
|
958
|
-
shape = tuple([shape])
|
|
959
|
-
elif isinstance(shape, list):
|
|
960
|
-
shape = Tensor(shape, dtype=mstype.int64)
|
|
961
|
-
elif isinstance(shape, Tensor) and shape.ndim == 0 and shape.size == 1:
|
|
962
|
-
shape = shape.reshape(1)
|
|
963
|
-
output = ones_op(shape, value)
|
|
964
|
-
return output
|
|
965
|
-
|
|
966
|
-
|
|
967
1006
|
def ones_like(input, *, dtype=None):
|
|
968
1007
|
"""
|
|
969
1008
|
Returns a Tensor with a value of 1 and its shape is the same as the input.
|
|
@@ -993,57 +1032,15 @@ def ones_like(input, *, dtype=None):
|
|
|
993
1032
|
[[1 1]
|
|
994
1033
|
[1 1]]
|
|
995
1034
|
"""
|
|
996
|
-
|
|
997
|
-
output = ones_like_op(input)
|
|
1035
|
+
output = ones_like_(input)
|
|
998
1036
|
_dtype = input.dtype if dtype is None else dtype
|
|
999
1037
|
output = cast_(output, _dtype)
|
|
1000
1038
|
return output
|
|
1001
1039
|
|
|
1002
1040
|
|
|
1003
|
-
def zeros(size, dtype=None): # pylint: disable=redefined-outer-name
|
|
1004
|
-
r"""
|
|
1005
|
-
Creates a tensor filled with 0 with shape described by `shape` and fills it with value 0 in type of `dtype`.
|
|
1006
|
-
|
|
1007
|
-
Args:
|
|
1008
|
-
size (Union[tuple[int], int, Tensor]): The specified shape of output tensor. Only positive integer or
|
|
1009
|
-
tuple or Tensor containing positive integers are allowed. If it is a Tensor,
|
|
1010
|
-
it must be a 0-D or 1-D Tensor with int32 or int64 dtypes.
|
|
1011
|
-
dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor. If `dtype` is ``None`` ,
|
|
1012
|
-
mindspore.float32 will be used. Default: ``None`` .
|
|
1013
|
-
|
|
1014
|
-
Returns:
|
|
1015
|
-
Tensor, has the same dtype and size as input.
|
|
1016
|
-
|
|
1017
|
-
Raises:
|
|
1018
|
-
TypeError: If `size` is not tuple, int or Tensor.
|
|
1019
|
-
|
|
1020
|
-
Supported Platforms:
|
|
1021
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1022
|
-
|
|
1023
|
-
Examples:
|
|
1024
|
-
>>> import mindspore
|
|
1025
|
-
>>> from mindspore import ops
|
|
1026
|
-
>>> output = ops.zeros((2, 2), mindspore.float32)
|
|
1027
|
-
>>> print(output)
|
|
1028
|
-
[[0. 0.]
|
|
1029
|
-
[0. 0.]]
|
|
1030
|
-
"""
|
|
1031
|
-
zero_op = _get_cache_prim(P.FillV2)()
|
|
1032
|
-
_dtype = mstype.float32 if dtype is None else dtype
|
|
1033
|
-
value = Tensor(0, _dtype)
|
|
1034
|
-
if isinstance(size, int):
|
|
1035
|
-
size = tuple([size])
|
|
1036
|
-
elif isinstance(size, list):
|
|
1037
|
-
size = Tensor(size, dtype=mstype.int64)
|
|
1038
|
-
elif isinstance(size, Tensor) and size.ndim == 0 and size.size == 1:
|
|
1039
|
-
size = size.reshape(1)
|
|
1040
|
-
output = zero_op(size, value)
|
|
1041
|
-
return output
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
1041
|
def zeros_like(input, *, dtype=None):
|
|
1045
1042
|
r"""
|
|
1046
|
-
Creates a tensor filled with 0, with the same size as
|
|
1043
|
+
Creates a tensor filled with 0, with the same size as input, and the given dtype.
|
|
1047
1044
|
|
|
1048
1045
|
If `dtype = None`, the tensor will have the same dtype as input `input`.
|
|
1049
1046
|
|
|
@@ -1074,125 +1071,78 @@ def zeros_like(input, *, dtype=None):
|
|
|
1074
1071
|
[0. 0.]]
|
|
1075
1072
|
"""
|
|
1076
1073
|
_dtype = input.dtype if dtype is None else dtype
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
output = _zeros_like(input)
|
|
1080
|
-
output = _cast(output, _dtype)
|
|
1074
|
+
output = zeros_like_(input)
|
|
1075
|
+
output = cast_(output, _dtype)
|
|
1081
1076
|
return output
|
|
1082
1077
|
|
|
1083
1078
|
|
|
1084
|
-
def
|
|
1085
|
-
|
|
1086
|
-
|
|
1087
|
-
|
|
1088
|
-
Creates a new tensor by replicating `input` `multiples` times. The i'th dimension of
|
|
1089
|
-
output tensor has `input.shape[i] * multiples[i]` elements, and the values of `input`
|
|
1090
|
-
are replicated `multiples[i]` times along the i'th dimension.
|
|
1079
|
+
def ones_like_ext(input, *, dtype=None):
|
|
1080
|
+
"""
|
|
1081
|
+
Creates a tensor filled with 1, with the same shape as input, and its data type is determined by the given dtype.
|
|
1091
1082
|
|
|
1092
|
-
|
|
1093
|
-
The length of `multiples` must be greater or equal to the length of dimension in `input`.
|
|
1083
|
+
If `dtype = None`, the tensor will have the same dtype as input `input`.
|
|
1094
1084
|
|
|
1095
1085
|
Args:
|
|
1096
|
-
input (Tensor):
|
|
1097
|
-
:math:`(x_1, x_2, ..., x_S)` .
|
|
1086
|
+
input (Tensor): Tensor of any dimension.
|
|
1098
1087
|
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
Only constant value is allowed.
|
|
1088
|
+
Keyword Args:
|
|
1089
|
+
dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype` is ``None`` ,
|
|
1090
|
+
the dtype of the input tensor will be used. Default: ``None`` .
|
|
1103
1091
|
|
|
1104
1092
|
Returns:
|
|
1105
|
-
Tensor, has the same
|
|
1106
|
-
the dimension of `input` is `input.dim`, and the shape of `input` is :math:`(x_1, x_2, ..., x_S)`.
|
|
1107
|
-
|
|
1108
|
-
- If `input.dim = d`, then the shape of their corresponding positions can be multiplied, and
|
|
1109
|
-
the shape of Outputs is :math:`(x_1*y_1, x_2*y_2, ..., x_S*y_S)`.
|
|
1110
|
-
- If `input.dim < d`, fill in multiple 1 in the length of the shape of `input` until their
|
|
1111
|
-
lengths are consistent. Such as set the shape of `input` as :math:`(1, ..., x_1, x_2, ..., x_S)`,
|
|
1112
|
-
then the shape of their corresponding positions can be multiplied, and the shape of Outputs is
|
|
1113
|
-
:math:`(1*y_1, ..., x_R*y_R, x_S*y_S)`.
|
|
1093
|
+
Tensor, has the same shape as `input` but filled with ones.
|
|
1114
1094
|
|
|
1115
1095
|
Raises:
|
|
1116
|
-
TypeError: If `
|
|
1117
|
-
ValueError: If the elements of `multiples` are not all greater than 0.
|
|
1118
|
-
ValueError: If the length of `multiples` are smaller than the length of dimension in `input`.
|
|
1096
|
+
TypeError: If `input` is not a Tensor.
|
|
1119
1097
|
|
|
1120
1098
|
Supported Platforms:
|
|
1121
1099
|
``Ascend`` ``GPU`` ``CPU``
|
|
1122
1100
|
|
|
1123
1101
|
Examples:
|
|
1124
|
-
>>> import mindspore
|
|
1125
1102
|
>>> import numpy as np
|
|
1126
1103
|
>>> from mindspore import Tensor, ops
|
|
1127
|
-
>>>
|
|
1128
|
-
>>>
|
|
1129
|
-
>>> output
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
>>> print(output)
|
|
1138
|
-
[[[1. 2. 1. 2.]
|
|
1139
|
-
[3. 4. 3. 4.]
|
|
1140
|
-
[1. 2. 1. 2.]
|
|
1141
|
-
[3. 4. 3. 4.]
|
|
1142
|
-
[1. 2. 1. 2.]
|
|
1143
|
-
[3. 4. 3. 4.]]
|
|
1144
|
-
[[1. 2. 1. 2.]
|
|
1145
|
-
[3. 4. 3. 4.]
|
|
1146
|
-
[1. 2. 1. 2.]
|
|
1147
|
-
[3. 4. 3. 4.]
|
|
1148
|
-
[1. 2. 1. 2.]
|
|
1149
|
-
[3. 4. 3. 4.]]]
|
|
1150
|
-
"""
|
|
1151
|
-
tile_op = _get_cache_prim(P.Tile)()
|
|
1152
|
-
return tile_op(input, multiples)
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
def range(start, end, step):
|
|
1104
|
+
>>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
|
|
1105
|
+
>>> output = ops.function.array_func.ones_like_ext(x)
|
|
1106
|
+
>>> print(output)
|
|
1107
|
+
[[1 1]
|
|
1108
|
+
[1 1]]
|
|
1109
|
+
"""
|
|
1110
|
+
return ones_like_ext_(input, dtype)
|
|
1111
|
+
|
|
1112
|
+
|
|
1113
|
+
def zeros_like_ext(input, *, dtype=None):
|
|
1156
1114
|
r"""
|
|
1157
|
-
Creates a
|
|
1158
|
-
`limit` up to but not including `end`.
|
|
1115
|
+
Creates a tensor filled with 0, with the same size as input. Its data type is determined by the given dtype.
|
|
1159
1116
|
|
|
1160
|
-
|
|
1161
|
-
the same as the type of the inputs.
|
|
1117
|
+
If `dtype = None`, the tensor will have the same dtype as input `input`.
|
|
1162
1118
|
|
|
1163
1119
|
Args:
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
type: int32 ,int64, float32 or float64.
|
|
1120
|
+
input (Tensor): Tensor of any dimension.
|
|
1121
|
+
|
|
1122
|
+
Keyword Args:
|
|
1123
|
+
dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype` is ``None`` ,
|
|
1124
|
+
the dtype of the input tensor will be used. Default: ``None`` .
|
|
1170
1125
|
|
|
1171
1126
|
Returns:
|
|
1172
|
-
|
|
1127
|
+
Tensor, filled with 0.
|
|
1173
1128
|
|
|
1174
1129
|
Raises:
|
|
1175
|
-
TypeError: If
|
|
1176
|
-
TypeError: If datatype of `start`, `end` or `step` is not same.
|
|
1177
|
-
TypeError: If datatype of `start`, `end` or `step` is not supported.
|
|
1178
|
-
ValueError: If `step` = 0.
|
|
1179
|
-
ValueError: If `start` >= `end` when `step` > 0.
|
|
1180
|
-
ValueError: If `start` <= `end` when `step` < 0.
|
|
1130
|
+
TypeError: If dtype is not a MindSpore dtype.
|
|
1181
1131
|
|
|
1182
1132
|
Supported Platforms:
|
|
1183
|
-
``GPU`` ``CPU``
|
|
1133
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1184
1134
|
|
|
1185
1135
|
Examples:
|
|
1136
|
+
>>> import mindspore
|
|
1137
|
+
>>> import numpy as np
|
|
1186
1138
|
>>> from mindspore import Tensor, ops
|
|
1187
|
-
>>>
|
|
1188
|
-
>>>
|
|
1189
|
-
>>> end = Tensor(10, mstype.int32)
|
|
1190
|
-
>>> step = Tensor(4, mstype.int32)
|
|
1191
|
-
>>> output = ops.range(start, end, step)
|
|
1139
|
+
>>> x = Tensor(np.arange(4).reshape(2, 2))
|
|
1140
|
+
>>> output = ops.function.array_func.zeros_like_ext(x, dtype=mindspore.float32)
|
|
1192
1141
|
>>> print(output)
|
|
1193
|
-
[0
|
|
1142
|
+
[[0. 0.]
|
|
1143
|
+
[0. 0.]]
|
|
1194
1144
|
"""
|
|
1195
|
-
return
|
|
1145
|
+
return zeros_like_ext_(input, dtype)
|
|
1196
1146
|
|
|
1197
1147
|
|
|
1198
1148
|
##############################
|
|
@@ -1228,7 +1178,70 @@ def unique(input):
|
|
|
1228
1178
|
TypeError: If `input` is not a Tensor.
|
|
1229
1179
|
|
|
1230
1180
|
Supported Platforms:
|
|
1231
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1181
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1182
|
+
|
|
1183
|
+
Examples:
|
|
1184
|
+
>>> import mindspore
|
|
1185
|
+
>>> import numpy as np
|
|
1186
|
+
>>> from mindspore import Tensor, nn
|
|
1187
|
+
>>> from mindspore import ops
|
|
1188
|
+
>>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
|
|
1189
|
+
>>> output = ops.unique(x)
|
|
1190
|
+
>>> print(output)
|
|
1191
|
+
(Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1]))
|
|
1192
|
+
>>> y = output[0]
|
|
1193
|
+
>>> print(y)
|
|
1194
|
+
[1 2 5]
|
|
1195
|
+
>>> idx = output[1]
|
|
1196
|
+
>>> print(idx)
|
|
1197
|
+
[0 1 2 1]
|
|
1198
|
+
"""
|
|
1199
|
+
shape_x = input.shape
|
|
1200
|
+
length_x = get_x_shape(shape_x)
|
|
1201
|
+
input = reshape_(input, length_x)
|
|
1202
|
+
y, idx = unique_(input)
|
|
1203
|
+
idx = reshape_(idx, shape_x)
|
|
1204
|
+
return y, idx
|
|
1205
|
+
|
|
1206
|
+
|
|
1207
|
+
def unique_ext(input, sorted=True, return_inverse=False, return_counts=False, dim=None):
|
|
1208
|
+
"""
|
|
1209
|
+
Returns the unique elements of input tensor.
|
|
1210
|
+
|
|
1211
|
+
when `return_inverse=True`, also return a tensor containing the index of each value of input
|
|
1212
|
+
tensor corresponding to the output unique tensor.
|
|
1213
|
+
when `return_counts=True`, also return a tensor containing the number of occurrences for each
|
|
1214
|
+
unique value or tensor
|
|
1215
|
+
|
|
1216
|
+
Args:
|
|
1217
|
+
input (Tensor): The input tensor.
|
|
1218
|
+
sorted(bool): Whether to sort the unique elements in ascending order before returning as output.
|
|
1219
|
+
Default: ``True`` .
|
|
1220
|
+
return_inverse(bool): Whether to also return the indices for where elements in the original input ended up in
|
|
1221
|
+
the returned unique list. Default: ``False`` .
|
|
1222
|
+
return_counts(bool): Whether to also return the counts for each unique element. Default: ``False`` .
|
|
1223
|
+
dim(int): the dimension to operate upon. If ``None``, the unique of the flattened input is returned.
|
|
1224
|
+
Otherwise, each of the tensors indexed by the given dimension is treated as one of the elements to apply the
|
|
1225
|
+
unique operation upon. Default: ``None`` .
|
|
1226
|
+
|
|
1227
|
+
|
|
1228
|
+
Returns:
|
|
1229
|
+
A tensor or a tuple of tensors containing some of tensor objects (`output`, `inverse_indices`, `counts`).
|
|
1230
|
+
|
|
1231
|
+
- output(Tensor) - The output tensor including the unique elements of input tensor, it has same dtype as input.
|
|
1232
|
+
- inverse_indices(Tensor) - Return when ``return_inverse`` is True. It represents the indices for where
|
|
1233
|
+
elements in the original input map to in the output. When ``dim`` is ``None``, it has same shape as input,
|
|
1234
|
+
otherwise, the shape is input.shape[dim].
|
|
1235
|
+
- counts(Tensor) - Return when ``return_counts`` is True. It represents the number of occurrences for each
|
|
1236
|
+
unique value or tensor. When ``dim`` is ``None``, it has same shape as output, otherwise, the shape is
|
|
1237
|
+
output.shape(dim).
|
|
1238
|
+
|
|
1239
|
+
|
|
1240
|
+
Raises:
|
|
1241
|
+
TypeError: If `input` is not a Tensor.
|
|
1242
|
+
|
|
1243
|
+
Supported Platforms:
|
|
1244
|
+
``Ascend``
|
|
1232
1245
|
|
|
1233
1246
|
Examples:
|
|
1234
1247
|
>>> import mindspore
|
|
@@ -1236,9 +1249,9 @@ def unique(input):
|
|
|
1236
1249
|
>>> from mindspore import Tensor, nn
|
|
1237
1250
|
>>> from mindspore import ops
|
|
1238
1251
|
>>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
|
|
1239
|
-
>>> output = ops.
|
|
1252
|
+
>>> output = ops.unique_ext(x, return_inverse=True)
|
|
1240
1253
|
>>> print(output)
|
|
1241
|
-
(Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=
|
|
1254
|
+
(Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int64, value= [0, 1, 2, 1]))
|
|
1242
1255
|
>>> y = output[0]
|
|
1243
1256
|
>>> print(y)
|
|
1244
1257
|
[1 2 5]
|
|
@@ -1246,16 +1259,20 @@ def unique(input):
|
|
|
1246
1259
|
>>> print(idx)
|
|
1247
1260
|
[0 1 2 1]
|
|
1248
1261
|
"""
|
|
1249
|
-
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
|
|
1253
|
-
|
|
1254
|
-
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1262
|
+
if not F.isconstant(return_inverse) or not F.isconstant(return_counts):
|
|
1263
|
+
raise ValueError(f"For 'unique_ext', 'return_inverse' and 'return_counts' cannot be mutable")
|
|
1264
|
+
if dim is None:
|
|
1265
|
+
y, inverse, counts = unique2_(input, sorted, return_inverse, return_counts)
|
|
1266
|
+
else:
|
|
1267
|
+
validator.check_value_type("return_counts", return_counts, [bool], "unique_ext")
|
|
1268
|
+
y, inverse, counts = unique_dim_(input, sorted, return_inverse, dim)
|
|
1269
|
+
if return_inverse and return_counts:
|
|
1270
|
+
return y, inverse, counts
|
|
1271
|
+
if return_inverse:
|
|
1272
|
+
return y, inverse
|
|
1273
|
+
if return_counts:
|
|
1274
|
+
return y, counts
|
|
1275
|
+
return y
|
|
1259
1276
|
|
|
1260
1277
|
|
|
1261
1278
|
def unique_with_pad(x, pad_num):
|
|
@@ -1363,7 +1380,7 @@ def unique_consecutive(input, return_idx=False, return_counts=False, axis=None):
|
|
|
1363
1380
|
return output
|
|
1364
1381
|
|
|
1365
1382
|
|
|
1366
|
-
def searchsorted(sorted_sequence, values, *, out_int32=False, right=False):
|
|
1383
|
+
def searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=None, sorter=None):
|
|
1367
1384
|
"""
|
|
1368
1385
|
Return the position indices such that after inserting the values into the `sorted_sequence`, the order of innermost
|
|
1369
1386
|
dimension of the `sorted_sequence` remains unchanged.
|
|
@@ -1378,16 +1395,24 @@ def searchsorted(sorted_sequence, values, *, out_int32=False, right=False):
|
|
|
1378
1395
|
if ``False`` , the output datatype will be int64. Default: ``False`` .
|
|
1379
1396
|
right (bool, optional): Search Strategy. If ``True`` , return the last suitable index found;
|
|
1380
1397
|
if ``False`` , return the first such index. Default: ``False`` .
|
|
1398
|
+
side (str, optional): the same as right but preferred. ``"left"`` corresponds to ``False`` for `right`
|
|
1399
|
+
and ``"right"`` corresponds to ``True`` for `right`. An error will be reported if this parameter is
|
|
1400
|
+
set to ``"left"`` while `right` is ``True``. Default: ``None`` .
|
|
1401
|
+
sorter(Tensor, optional): if provided, a tensor matching the shape of the unsorted sorted_sequence
|
|
1402
|
+
containing a sequence of indices that sort it in the ascending order on the innermost
|
|
1403
|
+
dimension and type must be int64. Default: ``None`` .
|
|
1381
1404
|
|
|
1382
1405
|
Returns:
|
|
1383
1406
|
Tensor containing the indices from the innermost dimension of `sorted_sequence` such that,
|
|
1384
|
-
if insert the corresponding value in the `values`
|
|
1407
|
+
if insert the corresponding value in the `values` Tensor, the order of `sorted_sequence` would be preserved,
|
|
1385
1408
|
whose datatype is int32 if out_int32 is ``True`` , otherwise int64, and shape is the same as the shape of
|
|
1386
1409
|
`values`.
|
|
1387
1410
|
|
|
1388
1411
|
Raises:
|
|
1389
1412
|
ValueError: If the dimension of `sorted_sequence` isn't 1 and all dimensions except the last dimension of
|
|
1390
1413
|
`sorted_sequence` and `values` are different.
|
|
1414
|
+
ValueError: If `sorted_sequence` value is a scalar.
|
|
1415
|
+
ValueError: If `values` is a scalar when `sorted_sequence` dimension is not 1.
|
|
1391
1416
|
|
|
1392
1417
|
Supported Platforms:
|
|
1393
1418
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1404,10 +1429,16 @@ def searchsorted(sorted_sequence, values, *, out_int32=False, right=False):
|
|
|
1404
1429
|
[1 2 4]]
|
|
1405
1430
|
"""
|
|
1406
1431
|
|
|
1407
|
-
|
|
1408
|
-
|
|
1432
|
+
validator.check_value_type("out_int32", out_int32, [bool], "search_sorted")
|
|
1433
|
+
validator.check_value_type("right", right, [bool], "search_sorted")
|
|
1434
|
+
dtype = mstype.int32 if bool(out_int32) else mstype.int64
|
|
1435
|
+
if (side == "left" and right is True):
|
|
1436
|
+
raise ValueError(f"For 'searchsorted', side and right can't be set to opposites,"
|
|
1437
|
+
f"got side of left while right was True.")
|
|
1438
|
+
if side == "right":
|
|
1439
|
+
right = True
|
|
1409
1440
|
search_sorted_ = SearchSorted(dtype, right)
|
|
1410
|
-
return search_sorted_(sorted_sequence, values)
|
|
1441
|
+
return search_sorted_(sorted_sequence, values, sorter)
|
|
1411
1442
|
|
|
1412
1443
|
|
|
1413
1444
|
def ger(input, vec2):
|
|
@@ -1457,7 +1488,7 @@ def size(input_x):
|
|
|
1457
1488
|
|
|
1458
1489
|
Args:
|
|
1459
1490
|
input_x (Tensor): Input parameters, the shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is
|
|
1460
|
-
`number <https://www.mindspore.cn/docs/en/
|
|
1491
|
+
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
|
|
1461
1492
|
|
|
1462
1493
|
Returns:
|
|
1463
1494
|
int. A scalar representing the elements' size of `input_x`, tensor is the number of elements
|
|
@@ -1538,76 +1569,6 @@ def dyn_shape(input_x):
|
|
|
1538
1569
|
return tensor_shape_(input_x)
|
|
1539
1570
|
|
|
1540
1571
|
|
|
1541
|
-
def rank(input_x):
|
|
1542
|
-
"""
|
|
1543
|
-
Returns the rank of a tensor.
|
|
1544
|
-
|
|
1545
|
-
Returns a 0-D int32 Tensor representing the rank of input; the rank of a tensor
|
|
1546
|
-
is the number of indices required to uniquely select each element of the tensor.
|
|
1547
|
-
|
|
1548
|
-
Args:
|
|
1549
|
-
input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is Number.
|
|
1550
|
-
|
|
1551
|
-
Returns:
|
|
1552
|
-
Tensor. 0-D int32 Tensor representing the rank of input, i.e., :math:`R`. The data type is an int.
|
|
1553
|
-
|
|
1554
|
-
Raises:
|
|
1555
|
-
TypeError: If `input_x` is not a Tensor.
|
|
1556
|
-
|
|
1557
|
-
Supported Platforms:
|
|
1558
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1559
|
-
|
|
1560
|
-
Examples:
|
|
1561
|
-
>>> import mindspore
|
|
1562
|
-
>>> import numpy as np
|
|
1563
|
-
>>> from mindspore import Tensor, ops
|
|
1564
|
-
>>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
|
|
1565
|
-
>>> output = ops.rank(input_tensor)
|
|
1566
|
-
>>> print(output)
|
|
1567
|
-
2
|
|
1568
|
-
>>> print(type(output))
|
|
1569
|
-
<class 'int'>
|
|
1570
|
-
"""
|
|
1571
|
-
return rank_(input_x)
|
|
1572
|
-
|
|
1573
|
-
|
|
1574
|
-
def reshape(input, shape):
|
|
1575
|
-
"""
|
|
1576
|
-
Rearranges the input Tensor based on the given shape.
|
|
1577
|
-
|
|
1578
|
-
The 'shape' can only have one -1 at most, in which case it's inferred from the remaining dimensions and
|
|
1579
|
-
the number of elements in the input.
|
|
1580
|
-
|
|
1581
|
-
Args:
|
|
1582
|
-
input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
1583
|
-
shape (Union[tuple[int], Tensor[int]]): Constructed by multiple
|
|
1584
|
-
integers, i.e., :math:`(y_1, y_2, ..., y_S)`. Only constant value is allowed.
|
|
1585
|
-
|
|
1586
|
-
Returns:
|
|
1587
|
-
Tensor, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
|
|
1588
|
-
|
|
1589
|
-
Raises:
|
|
1590
|
-
ValueError: Given a shape tuple, if it has several -1; or if the product
|
|
1591
|
-
of its elements is less than or equal to 0 or cannot be divided by the product
|
|
1592
|
-
of the input tensor shape; or if it does not match the input's array size.
|
|
1593
|
-
|
|
1594
|
-
Supported Platforms:
|
|
1595
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1596
|
-
|
|
1597
|
-
Examples:
|
|
1598
|
-
>>> import mindspore
|
|
1599
|
-
>>> import numpy as np
|
|
1600
|
-
>>> from mindspore import Tensor, ops
|
|
1601
|
-
>>> input = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
|
|
1602
|
-
>>> output = ops.reshape(input, (3, 2))
|
|
1603
|
-
>>> print(output)
|
|
1604
|
-
[[-0.1 0.3]
|
|
1605
|
-
[ 3.6 0.4]
|
|
1606
|
-
[ 0.5 -3.2]]
|
|
1607
|
-
"""
|
|
1608
|
-
return reshape_(input, shape)
|
|
1609
|
-
|
|
1610
|
-
|
|
1611
1572
|
def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
|
|
1612
1573
|
r"""
|
|
1613
1574
|
Reverses variable length slices.
|
|
@@ -1672,7 +1633,7 @@ def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
|
|
|
1672
1633
|
[[4. 3. 2. 1.]
|
|
1673
1634
|
[8. 7. 6. 5.]]
|
|
1674
1635
|
"""
|
|
1675
|
-
return P.ReverseSequence(seq_dim=seq_dim, batch_dim=batch_dim)(x, seq_lengths)
|
|
1636
|
+
return _get_cache_prim(P.ReverseSequence)(seq_dim=seq_dim, batch_dim=batch_dim)(x, seq_lengths)
|
|
1676
1637
|
|
|
1677
1638
|
|
|
1678
1639
|
def flatten(input, order='C', *, start_dim=1, end_dim=-1):
|
|
@@ -1696,7 +1657,7 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
|
|
|
1696
1657
|
Raises:
|
|
1697
1658
|
TypeError: If `input` is not a Tensor.
|
|
1698
1659
|
TypeError: If `order` is not string type.
|
|
1699
|
-
ValueError: If `order` is string type, but not 'C' or 'F'
|
|
1660
|
+
ValueError: If `order` is string type, but not ``'C'`` or ``'F'``.
|
|
1700
1661
|
TypeError: If `start_dim` or `end_dim` is not int.
|
|
1701
1662
|
ValueError: If `start_dim` is greater than `end_dim` after canonicalized.
|
|
1702
1663
|
ValueError: If `start_dim` or `end_dim` is not in range of [-input.dim, input.dim-1].
|
|
@@ -1741,7 +1702,7 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
|
|
|
1741
1702
|
return reshape_(input, (-1,))
|
|
1742
1703
|
perm = ops.make_range(0, x_rank)
|
|
1743
1704
|
new_order = ops.tuple_reversed(perm)
|
|
1744
|
-
input =
|
|
1705
|
+
input = transpose_(input, new_order)
|
|
1745
1706
|
|
|
1746
1707
|
# Handle the default case.
|
|
1747
1708
|
x_shape = shape_(input)
|
|
@@ -1749,7 +1710,7 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
|
|
|
1749
1710
|
if start_dim == 1 and end_dim == -1:
|
|
1750
1711
|
if x_rank in (0, 1):
|
|
1751
1712
|
return reshape_(input, (-1,))
|
|
1752
|
-
return
|
|
1713
|
+
return flatten_(input)
|
|
1753
1714
|
|
|
1754
1715
|
# Check axis.
|
|
1755
1716
|
start_dim = canonicalize_axis(start_dim, x_rank)
|
|
@@ -1771,341 +1732,6 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
|
|
|
1771
1732
|
return reshape_(input, new_shape)
|
|
1772
1733
|
|
|
1773
1734
|
|
|
1774
|
-
@constexpr
|
|
1775
|
-
def _check_select_type_match(scalar, tensor_type, scalar_name, tensor_name):
|
|
1776
|
-
if isinstance(scalar, int) and tensor_type != mstype.int32:
|
|
1777
|
-
raise TypeError(f"For functional operator[select], the input[{scalar_name}] is int, "
|
|
1778
|
-
f"then the input[{tensor_name}] must be a Tensor of int32.")
|
|
1779
|
-
if isinstance(scalar, float) and tensor_type != mstype.float32:
|
|
1780
|
-
raise TypeError(f"For functional operator[select], the input[{scalar_name}] is float, "
|
|
1781
|
-
f"then the input[{tensor_name}] must be a Tensor of float32.")
|
|
1782
|
-
|
|
1783
|
-
|
|
1784
|
-
@_primexpr
|
|
1785
|
-
def _check_select_shape_match(input_shape, cond_shape, tensor_name):
|
|
1786
|
-
if input_shape != cond_shape:
|
|
1787
|
-
raise ValueError(f"For functional operator[select], the cond shape must be same as {tensor_name} shape.")
|
|
1788
|
-
|
|
1789
|
-
|
|
1790
|
-
@constexpr
|
|
1791
|
-
def _check_select_type(is_cond_tensor, is_x_scalar, is_y_scalar, is_x_tensor, is_y_tensor):
|
|
1792
|
-
if not is_cond_tensor:
|
|
1793
|
-
raise TypeError(f"For functional operator[select], the input[cond] must be a Tensor.")
|
|
1794
|
-
if is_x_scalar and not is_y_tensor:
|
|
1795
|
-
raise TypeError(f"For functional operator[select], the input[x] is int or float, "
|
|
1796
|
-
f"then the input[y] must be a Tensor.")
|
|
1797
|
-
if is_y_scalar and not is_x_tensor:
|
|
1798
|
-
raise TypeError(f"For functional operator[select], the input[y] is int or float, "
|
|
1799
|
-
f"then the input[x] must be a Tensor.")
|
|
1800
|
-
|
|
1801
|
-
|
|
1802
|
-
@constexpr
|
|
1803
|
-
def _check_select_shape_same(cond_shape, x_shape, y_shape):
|
|
1804
|
-
"""Check if input of select has same shape."""
|
|
1805
|
-
return cond_shape == x_shape and x_shape == y_shape and cond_shape == y_shape
|
|
1806
|
-
|
|
1807
|
-
|
|
1808
|
-
@constexpr
|
|
1809
|
-
def get_max_value(x, y, z):
|
|
1810
|
-
"""Get the maximum value of x, y and z."""
|
|
1811
|
-
if x >= y and x >= z:
|
|
1812
|
-
return x
|
|
1813
|
-
if y >= x and y >= z:
|
|
1814
|
-
return y
|
|
1815
|
-
return z
|
|
1816
|
-
|
|
1817
|
-
|
|
1818
|
-
@constexpr
|
|
1819
|
-
def _calc_broadcast_shape(cond_shape, x_shape, y_shape):
|
|
1820
|
-
"""Calculate broadcast shape for select"""
|
|
1821
|
-
converted_shape = []
|
|
1822
|
-
cond_reverse = cond_shape[::-1]
|
|
1823
|
-
x_reverse = x_shape[::-1]
|
|
1824
|
-
y_reverse = y_shape[::-1]
|
|
1825
|
-
max_len = get_max_value(len(cond_reverse), len(x_reverse), len(y_reverse))
|
|
1826
|
-
i = 0
|
|
1827
|
-
while i < max_len:
|
|
1828
|
-
cond_element = 1 if i >= len(cond_reverse) else cond_reverse[i]
|
|
1829
|
-
x_element = 1 if i >= len(x_reverse) else x_reverse[i]
|
|
1830
|
-
y_element = 1 if i >= len(y_reverse) else y_reverse[i]
|
|
1831
|
-
broadcast_element = get_max_value(cond_element, x_element, y_element)
|
|
1832
|
-
if cond_element not in (1, broadcast_element):
|
|
1833
|
-
raise ValueError(f"For select, condition input can not broadcast at index {i}")
|
|
1834
|
-
if x_element not in (1, broadcast_element):
|
|
1835
|
-
raise ValueError(f"For select, x input can not broadcast at index {i}")
|
|
1836
|
-
if y_element not in (1, broadcast_element):
|
|
1837
|
-
raise ValueError(f"For select, y input can not broadcast at index {i}")
|
|
1838
|
-
converted_shape.append(broadcast_element)
|
|
1839
|
-
i = i + 1
|
|
1840
|
-
converted_shape.reverse()
|
|
1841
|
-
return tuple(converted_shape)
|
|
1842
|
-
|
|
1843
|
-
|
|
1844
|
-
def select(cond, x, y):
|
|
1845
|
-
r"""
|
|
1846
|
-
The conditional tensor determines whether the corresponding element in the output must be
|
|
1847
|
-
selected from `x` (if true) or `y` (if false) based on the value of each element.
|
|
1848
|
-
|
|
1849
|
-
It can be defined as:
|
|
1850
|
-
|
|
1851
|
-
.. math::
|
|
1852
|
-
out_i = \begin{cases}
|
|
1853
|
-
x_i, & \text{if } cond_i \\
|
|
1854
|
-
y_i, & \text{otherwise}
|
|
1855
|
-
\end{cases}
|
|
1856
|
-
|
|
1857
|
-
Args:
|
|
1858
|
-
cond (Tensor[bool]): The condition tensor, decides which element is chosen.
|
|
1859
|
-
The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
|
|
1860
|
-
x (Union[Tensor, int, float]): The first Tensor or number to be selected.
|
|
1861
|
-
If x is a Tensor, the shape is or can be broadcadt to :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
|
|
1862
|
-
If x is an int or a float, it will be cast to the type of int32 or float32,
|
|
1863
|
-
and broadcast to the same shape as y. One of x and y must be a Tensor.
|
|
1864
|
-
y (Union[Tensor, int, float]): The second Tensor or number to be selected.
|
|
1865
|
-
If y is a Tensor, The shape is or can be broadcadt to :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
|
|
1866
|
-
If y is an int or a float, it will be cast to the type of int32 or float32,
|
|
1867
|
-
and broadcast to the same shape as x. One of x and y must be a Tensor.
|
|
1868
|
-
|
|
1869
|
-
Returns:
|
|
1870
|
-
Tensor, has the same shape as `cond`.
|
|
1871
|
-
|
|
1872
|
-
Raises:
|
|
1873
|
-
TypeError: If `x` or `y` is not a Tensor, int or float.
|
|
1874
|
-
ValueError: The shapes of inputs can not be broadcast.
|
|
1875
|
-
|
|
1876
|
-
Supported Platforms:
|
|
1877
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1878
|
-
|
|
1879
|
-
Examples:
|
|
1880
|
-
>>> import mindspore
|
|
1881
|
-
>>> from mindspore import Tensor, ops
|
|
1882
|
-
>>> # 1) Both inputs are Tensor
|
|
1883
|
-
>>>
|
|
1884
|
-
>>> cond = Tensor([True, False])
|
|
1885
|
-
>>> x = Tensor([2,3], mindspore.float32)
|
|
1886
|
-
>>> y = Tensor([1,2], mindspore.float32)
|
|
1887
|
-
>>> output = ops.select(cond, x, y)
|
|
1888
|
-
>>> print(output)
|
|
1889
|
-
[2. 2.]
|
|
1890
|
-
>>> # 2) y is a float
|
|
1891
|
-
>>> cond = Tensor([True, False])
|
|
1892
|
-
>>> x = Tensor([2,3], mindspore.float32)
|
|
1893
|
-
>>> y = 2.0
|
|
1894
|
-
>>> output = ops.select(cond, x, y)
|
|
1895
|
-
>>> print(output)
|
|
1896
|
-
[2. 2.]
|
|
1897
|
-
"""
|
|
1898
|
-
is_x_scalar = isinstance(x, (int, float))
|
|
1899
|
-
is_y_scalar = isinstance(y, (int, float))
|
|
1900
|
-
is_x_tensor = isinstance(x, Tensor)
|
|
1901
|
-
is_y_tensor = isinstance(y, Tensor)
|
|
1902
|
-
is_cond_tensor = isinstance(cond, Tensor)
|
|
1903
|
-
_check_select_type(is_cond_tensor, is_x_scalar, is_y_scalar, is_x_tensor, is_y_tensor)
|
|
1904
|
-
input_x = x
|
|
1905
|
-
input_y = y
|
|
1906
|
-
if is_x_scalar:
|
|
1907
|
-
_check_select_shape_match(y.shape, cond.shape, "y")
|
|
1908
|
-
_check_select_type_match(x, y.dtype, "x", "y")
|
|
1909
|
-
input_x = zeros_like_(y) + x
|
|
1910
|
-
if isinstance(x, int):
|
|
1911
|
-
input_x = cast_(input_x, mstype.int32)
|
|
1912
|
-
else:
|
|
1913
|
-
input_x = cast_(input_x, mstype.float32)
|
|
1914
|
-
|
|
1915
|
-
if is_y_scalar:
|
|
1916
|
-
_check_select_shape_match(x.shape, cond.shape, "x")
|
|
1917
|
-
_check_select_type_match(y, x.dtype, "y", "x")
|
|
1918
|
-
input_y = zeros_like_(x) + y
|
|
1919
|
-
if isinstance(y, int):
|
|
1920
|
-
input_y = cast_(input_y, mstype.int32)
|
|
1921
|
-
else:
|
|
1922
|
-
input_y = cast_(input_y, mstype.float32)
|
|
1923
|
-
|
|
1924
|
-
if is_x_tensor and is_y_tensor and is_cond_tensor:
|
|
1925
|
-
x_shape = ops.shape(x)
|
|
1926
|
-
y_shape = ops.shape(y)
|
|
1927
|
-
cond_shape = ops.shape(cond)
|
|
1928
|
-
all_constant = ops.isconstant(cond_shape) and ops.isconstant(x_shape) and ops.isconstant(y_shape)
|
|
1929
|
-
if all_constant and not _check_select_shape_same(cond_shape, x_shape, y_shape):
|
|
1930
|
-
broadcast_shape = _calc_broadcast_shape(cond_shape, x_shape, y_shape)
|
|
1931
|
-
new_cond = ops.broadcast_to(cond, broadcast_shape)
|
|
1932
|
-
new_x = ops.broadcast_to(x, broadcast_shape)
|
|
1933
|
-
new_y = ops.broadcast_to(y, broadcast_shape)
|
|
1934
|
-
return tensor_select_(new_cond, new_x, new_y)
|
|
1935
|
-
|
|
1936
|
-
return tensor_select_(cond, input_x, input_y)
|
|
1937
|
-
|
|
1938
|
-
|
|
1939
|
-
def strided_slice(input_x,
|
|
1940
|
-
begin,
|
|
1941
|
-
end,
|
|
1942
|
-
strides,
|
|
1943
|
-
begin_mask=0,
|
|
1944
|
-
end_mask=0,
|
|
1945
|
-
ellipsis_mask=0,
|
|
1946
|
-
new_axis_mask=0,
|
|
1947
|
-
shrink_axis_mask=0):
|
|
1948
|
-
r"""
|
|
1949
|
-
Extracts a strided slice of a Tensor based on `begin/end` index and `strides`.
|
|
1950
|
-
|
|
1951
|
-
This operation extracts a fragment of size (end-begin)/strides from the given 'input_tensor'.
|
|
1952
|
-
Starting from the beginning position, the fragment continues adding strides to the index until
|
|
1953
|
-
all dimensions are not less than the ending position.
|
|
1954
|
-
|
|
1955
|
-
Note:
|
|
1956
|
-
- `begin` , `end` and `strides` must have the same shape.
|
|
1957
|
-
- `begin` , `end` and `strides` are all 1-D Tensor, and their shape size
|
|
1958
|
-
must not greater than the dim of `input_x`.
|
|
1959
|
-
|
|
1960
|
-
During the slicing process, the fragment (end-begin)/strides are extracted from each dimension.
|
|
1961
|
-
|
|
1962
|
-
Example: For Tensor `input_x` with shape :math:`(5, 6, 7)`,
|
|
1963
|
-
set `begin`, `end` and `strides` to (1, 3, 2), (3, 5, 6),
|
|
1964
|
-
(1, 1, 2) respectively, then elements from index 1 to 3 are extrected for dim 0, index 3 to 5
|
|
1965
|
-
are extrected for dim 1 and index 2 to 6 with a `stirded` of 2 are extrected for dim 2, this
|
|
1966
|
-
process is equivalent to a pythonic slice `input_x[1:3, 3:5, 2:6:2]`.
|
|
1967
|
-
|
|
1968
|
-
If the length of `begin` 、 `end` and `strides` is smaller than the dim of `input_x`,
|
|
1969
|
-
then all elements are extracted from the missing dims, it behaves like all the
|
|
1970
|
-
missing dims are filled with zeros, size of that missing dim and ones.
|
|
1971
|
-
|
|
1972
|
-
Example: For Tensor `input_x` with shape :math:`(5, 6, 7)`,
|
|
1973
|
-
set `begin`, `end` and `strides` to (1, 3),
|
|
1974
|
-
(3, 5), (1, 1) respectively, then elements from index 1 to 3 are extrected
|
|
1975
|
-
for dim 0, index 3 to 5 are extrected for dim 1 and index 3 to 5 are extrected
|
|
1976
|
-
for dim 2, this process is equivalent to a pythonic slice `input_x[1:3, 3:5, 0:7]`.
|
|
1977
|
-
|
|
1978
|
-
Here's how a mask works:
|
|
1979
|
-
For each specific mask, it will be converted to a binary representation internally, and then
|
|
1980
|
-
reverse the result to start the calculation. For Tensor `input_x` with
|
|
1981
|
-
shape :math:`(5, 6, 7)`. Given mask value of 3 which
|
|
1982
|
-
can be represented as 0b011. Reverse that we get 0b110, which implies the first and second dim of the
|
|
1983
|
-
original Tensor will be effected by this mask. See examples below, for simplicity all mask mentioned
|
|
1984
|
-
below are all in their reverted binary form:
|
|
1985
|
-
|
|
1986
|
-
- `begin_mask` and `end_mask`
|
|
1987
|
-
|
|
1988
|
-
If the ith bit of `begin_mask` is 1, `begin[i]` is ignored and the fullest
|
|
1989
|
-
possible range in that dimension is used instead. `end_mask` is analogous,
|
|
1990
|
-
except with the end range. For Tensor `input_x` with shape :math:`(5, 6, 7, 8)`, if `begin_mask`
|
|
1991
|
-
is 0b110, `end_mask` is 0b011, the slice `input_x[0:3, 0:6, 2:7:2]` is produced.
|
|
1992
|
-
|
|
1993
|
-
- `ellipsis_mask`
|
|
1994
|
-
|
|
1995
|
-
If the ith bit of `ellipsis_mask` is 1, as many unspecified dimensions as needed
|
|
1996
|
-
will be inserted between other dimensions. Only one non-zero bit is allowed
|
|
1997
|
-
in `ellipsis_mask`. For Tensor `input_x` with shape :math:`(5, 6, 7, 8)`, `input_x[2:,...,:6]`
|
|
1998
|
-
is equivalent to `input_x[2:5,:,:,0:6]` , `input_x[2:,...]` is equivalent
|
|
1999
|
-
to `input_x[2:5,:,:,:]`.
|
|
2000
|
-
|
|
2001
|
-
- `new_axis_mask`
|
|
2002
|
-
|
|
2003
|
-
If the ith bit of `new_axis_mask` is 1, `begin`, `end` and `strides` are
|
|
2004
|
-
ignored and a new length 1 dimension is added at the specified position
|
|
2005
|
-
in the output Tensor. For Tensor `input_x` with shape :math:`(5, 6, 7)`, if `new_axis_mask`
|
|
2006
|
-
is 0b110, a new dim is added to the second dim, which will produce
|
|
2007
|
-
a Tensor with shape :math:`(5, 1, 6, 7)`.
|
|
2008
|
-
|
|
2009
|
-
- `shrink_axis_mask`
|
|
2010
|
-
|
|
2011
|
-
If the ith bit of `shrink_axis_mask` is 1, `begin`, `end` and `strides`
|
|
2012
|
-
are ignored and dimension i will be shrunk to 0.
|
|
2013
|
-
For Tensor `input_x` with shape :math:`(5, 6, 7)`,
|
|
2014
|
-
if `shrink_axis_mask` is 0b010, it is equivalent to slice `x[:, 5, :]`
|
|
2015
|
-
and results in an output shape of :math:`(5, 7)`.
|
|
2016
|
-
|
|
2017
|
-
Note:
|
|
2018
|
-
`new_axis_mask` and `shrink_axis_mask` are not recommended to
|
|
2019
|
-
use at the same time, it might incur unexpected result.
|
|
2020
|
-
|
|
2021
|
-
Args:
|
|
2022
|
-
input_x (Tensor): The input Tensor to be extracted from.
|
|
2023
|
-
begin (tuple[int]): A tuple which represents the location where to start.
|
|
2024
|
-
end (tuple[int]): A tuple or which represents the maximum location where to end.
|
|
2025
|
-
strides (tuple[int]): A tuple which represents the strides is continuously added
|
|
2026
|
-
before reaching the maximum location. Only int is allowed, it can be negative
|
|
2027
|
-
which results in reversed slicing.
|
|
2028
|
-
begin_mask (int, optional): Starting index of the slice. Default: ``0`` .
|
|
2029
|
-
end_mask (int, optional): Ending index of the slice. Default: ``0`` .
|
|
2030
|
-
ellipsis_mask (int, optional): An int mask, ignore slicing operation when set to 1. Default: ``0`` .
|
|
2031
|
-
new_axis_mask (int, optional): An int mask for adding new dims. Default: ``0`` .
|
|
2032
|
-
shrink_axis_mask (int, optional): An int mask for shrinking dims. Default: ``0`` .
|
|
2033
|
-
|
|
2034
|
-
Returns:
|
|
2035
|
-
Tensor, return the extracts a strided slice of a Tensor based on `begin/end` index and `strides`.
|
|
2036
|
-
|
|
2037
|
-
Raises:
|
|
2038
|
-
TypeError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or
|
|
2039
|
-
`shrink_axis_mask` is not an int.
|
|
2040
|
-
TypeError: If `begin`, `end` or `strides` is not tuple[int].
|
|
2041
|
-
ValueError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or
|
|
2042
|
-
`shrink_axis_mask` is less than 0.
|
|
2043
|
-
ValueError: If `begin`, `end` and `strides` have different shapes.
|
|
2044
|
-
|
|
2045
|
-
Supported Platforms:
|
|
2046
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2047
|
-
|
|
2048
|
-
Examples:
|
|
2049
|
-
>>> import mindspore
|
|
2050
|
-
>>> from mindspore import Tensor, ops
|
|
2051
|
-
>>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
|
|
2052
|
-
... [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
|
|
2053
|
-
>>> output = ops.strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1))
|
|
2054
|
-
>>> # Take this " output = strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1)) " as an example,
|
|
2055
|
-
>>> # start = [1, 0, 2] , end = [3, 1, 3], strides = [1, 1, 1], Find a segment of (start, end),
|
|
2056
|
-
>>> # note that end is an open interval
|
|
2057
|
-
>>> # To facilitate understanding, this operator can be divided into three steps:
|
|
2058
|
-
>>> # Step 1: Calculation of the first dimension:
|
|
2059
|
-
>>> # start = 1, end = 3, strides = 1, So can take 1st, 2nd rows, and then gets the final output at this time.
|
|
2060
|
-
>>> # output_1th =
|
|
2061
|
-
>>> # [
|
|
2062
|
-
>>> # [
|
|
2063
|
-
>>> # [3,3,3]
|
|
2064
|
-
>>> # [4,4,4]
|
|
2065
|
-
>>> # ]
|
|
2066
|
-
>>> # [
|
|
2067
|
-
>>> # [5,5,5]
|
|
2068
|
-
>>> # [6,6,6]
|
|
2069
|
-
>>> # ]
|
|
2070
|
-
>>> # ]
|
|
2071
|
-
>>> # Step 2: Calculation of the second dimension
|
|
2072
|
-
>>> # 2nd dimension, start = 0, end = 1, strides = 1. So only 0th rows
|
|
2073
|
-
>>> # can be taken, and the output at this time.
|
|
2074
|
-
>>> # output_2nd =
|
|
2075
|
-
>>> # [
|
|
2076
|
-
>>> # [
|
|
2077
|
-
>>> # [3,3,3]
|
|
2078
|
-
>>> # ]
|
|
2079
|
-
>>> # [
|
|
2080
|
-
>>> # [5,5,5]
|
|
2081
|
-
>>> # ]
|
|
2082
|
-
>>> # ]
|
|
2083
|
-
>>> # Step 3: Calculation of the third dimension
|
|
2084
|
-
>>> # 3nd dimension,start = 2, end = 3, strides = 1, So can take 2th cols,
|
|
2085
|
-
>>> # and you get the final output at this time.
|
|
2086
|
-
>>> # output_3ed =
|
|
2087
|
-
>>> # [
|
|
2088
|
-
>>> # [
|
|
2089
|
-
>>> # [3]
|
|
2090
|
-
>>> # ]
|
|
2091
|
-
>>> # [
|
|
2092
|
-
>>> # [5]
|
|
2093
|
-
>>> # ]
|
|
2094
|
-
>>> # ]
|
|
2095
|
-
>>> # The final output after finishing is:
|
|
2096
|
-
>>> print(output)
|
|
2097
|
-
[[[3.]]
|
|
2098
|
-
[[5.]]]
|
|
2099
|
-
>>> # another example like :
|
|
2100
|
-
>>> output = strided_slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1))
|
|
2101
|
-
>>> print(output)
|
|
2102
|
-
[[[3. 3. 3.]]]
|
|
2103
|
-
"""
|
|
2104
|
-
strided_slice_ = _get_cache_prim(P.StridedSlice)(
|
|
2105
|
-
begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask)
|
|
2106
|
-
return strided_slice_(input_x, begin, end, strides)
|
|
2107
|
-
|
|
2108
|
-
|
|
2109
1735
|
def slice(input_x, begin, size):
|
|
2110
1736
|
r"""
|
|
2111
1737
|
Slices a tensor in the specified shape.
|
|
@@ -2160,20 +1786,6 @@ def slice(input_x, begin, size):
|
|
|
2160
1786
|
return tensor_slice(input_x, begin, size)
|
|
2161
1787
|
|
|
2162
1788
|
|
|
2163
|
-
def concat(tensors, axis=0):
|
|
2164
|
-
"""
|
|
2165
|
-
Alias for :func:`mindspore.ops.cat()`.
|
|
2166
|
-
|
|
2167
|
-
Tutorial Examples:
|
|
2168
|
-
- `Tensor - Tensor Operation <https://mindspore.cn/tutorials/en/r2.2/beginner/tensor.html#tensor-operation>`_
|
|
2169
|
-
- `Vision Transformer Image Classification - Building ViT as a whole
|
|
2170
|
-
<https://mindspore.cn/tutorials/application/en/r2.2/cv/vit.html#building-vit-as-a-whole>`_
|
|
2171
|
-
- `Sentiment Classification Implemented by RNN - Dense
|
|
2172
|
-
<https://mindspore.cn/tutorials/application/en/r2.2/nlp/sentiment_analysis.html#dense>`_
|
|
2173
|
-
"""
|
|
2174
|
-
return cat(tensors, axis)
|
|
2175
|
-
|
|
2176
|
-
|
|
2177
1789
|
def stack(tensors, axis=0):
|
|
2178
1790
|
r"""
|
|
2179
1791
|
Stacks a list of tensors in specified axis.
|
|
@@ -2284,45 +1896,6 @@ def unbind(input, dim=0):
|
|
|
2284
1896
|
return _unstack(input)
|
|
2285
1897
|
|
|
2286
1898
|
|
|
2287
|
-
def expand_dims(input_x, axis):
|
|
2288
|
-
"""
|
|
2289
|
-
Adds an additional dimension to `input_x` at the given axis, the dimension
|
|
2290
|
-
of `input_x` should be greater than or equal to 1.
|
|
2291
|
-
|
|
2292
|
-
Note:
|
|
2293
|
-
If the specified axis is a negative number, the index is counted
|
|
2294
|
-
backward from the end and starts at 1.
|
|
2295
|
-
|
|
2296
|
-
Args:
|
|
2297
|
-
input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
2298
|
-
axis (int): Specifies the dimension index at which to expand
|
|
2299
|
-
the shape of `input_x`. The value of axis must be in the range
|
|
2300
|
-
`[-input_x.ndim-1, input_x.ndim]`. Only constant value is allowed.
|
|
2301
|
-
|
|
2302
|
-
Returns:
|
|
2303
|
-
Tensor, the shape of tensor is :math:`(1, x_1, x_2, ..., x_R)` if the
|
|
2304
|
-
value of `axis` is 0. It has the same data type as `input_x`.
|
|
2305
|
-
|
|
2306
|
-
Raises:
|
|
2307
|
-
TypeError: If `axis` is not an int.
|
|
2308
|
-
ValueError: If `axis` is not in the valid range :math:`[-a.ndim-1, a.ndim]`.
|
|
2309
|
-
|
|
2310
|
-
Supported Platforms:
|
|
2311
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2312
|
-
|
|
2313
|
-
Examples:
|
|
2314
|
-
>>> import mindspore
|
|
2315
|
-
>>> import numpy as np
|
|
2316
|
-
>>> from mindspore import Tensor, ops
|
|
2317
|
-
>>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
|
|
2318
|
-
>>> output = ops.expand_dims(input_tensor, 0)
|
|
2319
|
-
>>> print(output)
|
|
2320
|
-
[[[2. 2.]
|
|
2321
|
-
[2. 2.]]]
|
|
2322
|
-
"""
|
|
2323
|
-
return expand_dims_(input_x, axis)
|
|
2324
|
-
|
|
2325
|
-
|
|
2326
1899
|
def unsqueeze(input, dim):
|
|
2327
1900
|
"""
|
|
2328
1901
|
Adds an additional dimension to `input` at the given dim.
|
|
@@ -2354,7 +1927,7 @@ def unsqueeze(input, dim):
|
|
|
2354
1927
|
[[[2. 2.]
|
|
2355
1928
|
[2. 2.]]]
|
|
2356
1929
|
"""
|
|
2357
|
-
return
|
|
1930
|
+
return expand_dims(input, dim)
|
|
2358
1931
|
|
|
2359
1932
|
|
|
2360
1933
|
def squeeze(input, axis=None):
|
|
@@ -2411,57 +1984,6 @@ def squeeze(input, axis=None):
|
|
|
2411
1984
|
return squeeze_(input)
|
|
2412
1985
|
|
|
2413
1986
|
|
|
2414
|
-
def transpose(input, input_perm):
|
|
2415
|
-
"""
|
|
2416
|
-
Permutes the dimensions of the input tensor according to input permutation.
|
|
2417
|
-
|
|
2418
|
-
For a 1-D array this has no effect, as a transposed vector is simply the same vector.
|
|
2419
|
-
To convert a 1-D array into a 2D column vector please refer the class: mindspore.ops.ExpandDims.
|
|
2420
|
-
For a 2-D array, this is a standard matrix transpose. For an n-D array, if axes are given,
|
|
2421
|
-
their order indicates how the axes are permuted (see Examples).
|
|
2422
|
-
If axes are not provided and a.shape is :math:`(i[0], i[1], ... i[n-2], i[n-1])`,
|
|
2423
|
-
then a.transpose().shape is :math:`(i[n-1], i[n-2], ... i[1], i[0])`.
|
|
2424
|
-
|
|
2425
|
-
Note:
|
|
2426
|
-
On GPU and CPU, if the value of `input_perm` is negative, its actual value is `input_perm[i] + rank(input)`.
|
|
2427
|
-
Negative value of `input_perm` is not supported on Ascend.
|
|
2428
|
-
|
|
2429
|
-
Args:
|
|
2430
|
-
input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
2431
|
-
input_perm (tuple[int]): The permutation to be converted. The elements in `input_perm` are composed of
|
|
2432
|
-
the indexes of each dimension of `input`. The length of `input_perm` and the shape of `input` must be
|
|
2433
|
-
the same. Only constant value is allowed. Must be in the range [-rank(input), rank(input)).
|
|
2434
|
-
|
|
2435
|
-
Returns:
|
|
2436
|
-
Tensor, the type of output tensor is the same as `input` and the shape of output tensor is decided by the
|
|
2437
|
-
shape of `input` and the value of `input_perm`.
|
|
2438
|
-
|
|
2439
|
-
Raises:
|
|
2440
|
-
TypeError: If `input_perm` is not a tuple.
|
|
2441
|
-
ValueError: If length of shape of `input` is not equal to length of shape of `input_perm`.
|
|
2442
|
-
ValueError: If the same element exists in `input_perm`.
|
|
2443
|
-
|
|
2444
|
-
Supported Platforms:
|
|
2445
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2446
|
-
|
|
2447
|
-
Examples:
|
|
2448
|
-
>>> import mindspore
|
|
2449
|
-
>>> import numpy as np
|
|
2450
|
-
>>> from mindspore import Tensor, ops
|
|
2451
|
-
>>> input = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32)
|
|
2452
|
-
>>> input_perm = (0, 2, 1)
|
|
2453
|
-
>>> output = ops.transpose(input, input_perm)
|
|
2454
|
-
>>> print(output)
|
|
2455
|
-
[[[ 1. 4.]
|
|
2456
|
-
[ 2. 5.]
|
|
2457
|
-
[ 3. 6.]]
|
|
2458
|
-
[[ 7. 10.]
|
|
2459
|
-
[ 8. 11.]
|
|
2460
|
-
[ 9. 12.]]]
|
|
2461
|
-
"""
|
|
2462
|
-
return transpose_(input, input_perm)
|
|
2463
|
-
|
|
2464
|
-
|
|
2465
1987
|
def scatter_mul(input_x, indices, updates):
|
|
2466
1988
|
r"""
|
|
2467
1989
|
Using given values to update tensor value through the mul operation, along with the input indices.
|
|
@@ -2792,111 +2314,6 @@ def scatter_div(input_x, indices, updates):
|
|
|
2792
2314
|
return scatter_div_(input_x, indices, updates)
|
|
2793
2315
|
|
|
2794
2316
|
|
|
2795
|
-
def scatter_nd(indices, updates, shape):
|
|
2796
|
-
r"""
|
|
2797
|
-
Scatters a tensor into a new tensor depending on the specified indices.
|
|
2798
|
-
|
|
2799
|
-
Creates an empty tensor with the given `shape`, and set values by scattering the update tensor
|
|
2800
|
-
depending on indices. The empty tensor has rank :math:`P` and `indices` has rank :math:`Q`.
|
|
2801
|
-
|
|
2802
|
-
The `shape` is :math:`(s_0, s_1, ..., s_{P-1})`, where :math:`P \ge 1`.
|
|
2803
|
-
|
|
2804
|
-
`indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)`, where :math:`Q \ge 2` and :math:`N \le P`.
|
|
2805
|
-
|
|
2806
|
-
The last dimension of `indices` (with length :math:`N` ) indicates slices along the :math:`N` th dimension of the
|
|
2807
|
-
empty tensor.
|
|
2808
|
-
|
|
2809
|
-
`updates` is a tensor of rank :math:`Q-1+P-N`, and
|
|
2810
|
-
its shape is :math:`(i_0, i_1, ..., i_{Q-2}, s_N, s_{N+1}, ..., s_{P-1})`.
|
|
2811
|
-
|
|
2812
|
-
If `indices` contains duplicates, the duplicate `updates` are summed.
|
|
2813
|
-
|
|
2814
|
-
The following figure shows the calculation process of inserting two new value matrices into the first dimension
|
|
2815
|
-
with rank-3:
|
|
2816
|
-
|
|
2817
|
-
.. image:: ScatterNd.png
|
|
2818
|
-
|
|
2819
|
-
Args:
|
|
2820
|
-
indices (Tensor): Define the index of scattering in the new tensor with int32 or int64 data type.
|
|
2821
|
-
The rank of `indices` must be at least 2 and `indices.shape[-1] <= len(shape)`.
|
|
2822
|
-
updates (Tensor): Define the source Tensor to be updated.
|
|
2823
|
-
It has shape `indices.shape[:-1] + shape[indices.shape[-1]:]`.
|
|
2824
|
-
shape (tuple[int]): Define the shape of the output tensor, has the same data type as indices.
|
|
2825
|
-
`shape` can not be empty, and the elements in `shape` must be greater than or equal to 1.
|
|
2826
|
-
|
|
2827
|
-
Returns:
|
|
2828
|
-
Tensor, the new tensor, has the same type as `update` and the same shape as `shape`.
|
|
2829
|
-
|
|
2830
|
-
Raises:
|
|
2831
|
-
TypeError: If `shape` is not a tuple.
|
|
2832
|
-
ValueError: If any element of `shape` is less than 1.
|
|
2833
|
-
|
|
2834
|
-
Supported Platforms:
|
|
2835
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2836
|
-
|
|
2837
|
-
Examples:
|
|
2838
|
-
>>> import mindspore
|
|
2839
|
-
>>> import numpy as np
|
|
2840
|
-
>>> from mindspore import Tensor, ops
|
|
2841
|
-
>>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
|
|
2842
|
-
>>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2],
|
|
2843
|
-
... [3, 3, 3, 3], [4, 4, 4, 4]],
|
|
2844
|
-
... [[1, 1, 1, 1], [2, 2, 2, 2],
|
|
2845
|
-
... [3, 3, 3, 3], [4, 4, 4, 4]]]), mindspore.float32)
|
|
2846
|
-
>>> shape = (4, 4, 4)
|
|
2847
|
-
>>> output = ops.scatter_nd(indices, updates, shape)
|
|
2848
|
-
>>> print(output)
|
|
2849
|
-
[[[1. 1. 1. 1.]
|
|
2850
|
-
[2. 2. 2. 2.]
|
|
2851
|
-
[3. 3. 3. 3.]
|
|
2852
|
-
[4. 4. 4. 4.]]
|
|
2853
|
-
[[0. 0. 0. 0.]
|
|
2854
|
-
[0. 0. 0. 0.]
|
|
2855
|
-
[0. 0. 0. 0.]
|
|
2856
|
-
[0. 0. 0. 0.]]
|
|
2857
|
-
[[1. 1. 1. 1.]
|
|
2858
|
-
[2. 2. 2. 2.]
|
|
2859
|
-
[3. 3. 3. 3.]
|
|
2860
|
-
[4. 4. 4. 4.]]
|
|
2861
|
-
[[0. 0. 0. 0.]
|
|
2862
|
-
[0. 0. 0. 0.]
|
|
2863
|
-
[0. 0. 0. 0.]
|
|
2864
|
-
[0. 0. 0. 0.]]]
|
|
2865
|
-
>>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)
|
|
2866
|
-
>>> updates = Tensor(np.array([3.2, 1.1]), mindspore.float32)
|
|
2867
|
-
>>> shape = (3, 3)
|
|
2868
|
-
>>> output = ops.scatter_nd(indices, updates, shape)
|
|
2869
|
-
>>> # In order to facilitate understanding, explain the operator pseudo-operation process step by step:
|
|
2870
|
-
>>> # Step 1: Generate an empty Tensor of the specified shape according to the shape
|
|
2871
|
-
>>> # [
|
|
2872
|
-
>>> # [0. 0. 0.]
|
|
2873
|
-
>>> # [0. 0. 0.]
|
|
2874
|
-
>>> # [0. 0. 0.]
|
|
2875
|
-
>>> # ]
|
|
2876
|
-
>>> # Step 2: Modify the data at the specified location according to the indicators
|
|
2877
|
-
>>> # 0th row of indices is [0, 1], 0th row of updates is 3.2.
|
|
2878
|
-
>>> # means that the empty tensor in the 0th row and 1st col set to 3.2
|
|
2879
|
-
>>> # [
|
|
2880
|
-
>>> # [0. 3.2. 0.]
|
|
2881
|
-
>>> # [0. 0. 0.]
|
|
2882
|
-
>>> # [0. 0. 0.]
|
|
2883
|
-
>>> # ]
|
|
2884
|
-
>>> # 1th row of indices is [1, 1], 1th row of updates is 1.1.
|
|
2885
|
-
>>> # means that the empty tensor in the 1th row and 1st col set to 1.1
|
|
2886
|
-
>>> # [
|
|
2887
|
-
>>> # [0. 3.2. 0.]
|
|
2888
|
-
>>> # [0. 1.1 0.]
|
|
2889
|
-
>>> # [0. 0. 0.]
|
|
2890
|
-
>>> # ]
|
|
2891
|
-
>>> # The final result is as follows:
|
|
2892
|
-
>>> print(output)
|
|
2893
|
-
[[0. 3.2 0.]
|
|
2894
|
-
[0. 1.1 0.]
|
|
2895
|
-
[0. 0. 0.]]
|
|
2896
|
-
"""
|
|
2897
|
-
return scatter_nd_(indices, updates, shape)
|
|
2898
|
-
|
|
2899
|
-
|
|
2900
2317
|
def scatter_update(input_x, indices, updates):
|
|
2901
2318
|
r"""
|
|
2902
2319
|
Updates tensor values by using input indices and value.
|
|
@@ -2946,8 +2363,7 @@ def scatter_update(input_x, indices, updates):
|
|
|
2946
2363
|
[[2. 1.2 1.]
|
|
2947
2364
|
[3. 1.2 1.]]
|
|
2948
2365
|
"""
|
|
2949
|
-
|
|
2950
|
-
return scatter_update_inner(input_x, indices, updates)
|
|
2366
|
+
return scatter_update_(input_x, indices, updates)
|
|
2951
2367
|
|
|
2952
2368
|
|
|
2953
2369
|
def scatter_nd_add(input_x, indices, updates, use_locking=False):
|
|
@@ -3414,8 +2830,8 @@ def sort(input_x, axis=-1, descending=False):
|
|
|
3414
2830
|
are sorted in descending order, or else sorted in ascending order. Default: ``False`` .
|
|
3415
2831
|
|
|
3416
2832
|
.. warning::
|
|
3417
|
-
Currently, the data types of
|
|
3418
|
-
If use
|
|
2833
|
+
Currently, the data types of float16, uint8, int8, int16, int32, int64 are well supported.
|
|
2834
|
+
If use float32, it may cause loss of accuracy.
|
|
3419
2835
|
|
|
3420
2836
|
Returns:
|
|
3421
2837
|
|
|
@@ -3452,129 +2868,72 @@ def sort(input_x, axis=-1, descending=False):
|
|
|
3452
2868
|
return _sort(input_x)
|
|
3453
2869
|
|
|
3454
2870
|
|
|
3455
|
-
def
|
|
2871
|
+
def sort_ext(input, *, dim=-1, descending=False, stable=False):
|
|
3456
2872
|
r"""
|
|
3457
|
-
Sorts the input tensor along the given dimension in specified order
|
|
2873
|
+
Sorts the elements of the input tensor along the given dimension in the specified order.
|
|
2874
|
+
|
|
2875
|
+
.. warning::
|
|
2876
|
+
Currently, the data types of float16, uint8, int8, int16, int32, int64 are well supported.
|
|
2877
|
+
If use float32, it may cause loss of accuracy.
|
|
3458
2878
|
|
|
3459
2879
|
Args:
|
|
3460
2880
|
input(Tensor): The input tensor to sort.
|
|
3461
|
-
|
|
3462
|
-
The Ascend backend only supports sorting the last dimension.
|
|
3463
|
-
descending (bool): The sort order. If `descending` is True then the elements
|
|
3464
|
-
are sorted in descending order by value. Otherwise sort in ascending order. Default: ``False`` .
|
|
3465
|
-
|
|
3466
|
-
Returns:
|
|
3467
|
-
Tensor, the indices of sorted input tensor. Data type is int32.
|
|
3468
|
-
|
|
3469
|
-
Supported Platforms:
|
|
3470
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3471
|
-
|
|
3472
|
-
Examples:
|
|
3473
|
-
>>> import mindspore
|
|
3474
|
-
>>> import numpy as np
|
|
3475
|
-
>>> from mindspore import Tensor, ops
|
|
3476
|
-
>>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
|
|
3477
|
-
>>> sort = ops.argsort(x)
|
|
3478
|
-
>>> print(sort)
|
|
3479
|
-
[[2 1 0]
|
|
3480
|
-
[2 0 1]
|
|
3481
|
-
[0 1 2]]
|
|
3482
|
-
"""
|
|
3483
|
-
_sort = _get_cache_prim(P.Sort)(axis, descending)
|
|
3484
|
-
_, arg_sort = _sort(input)
|
|
3485
|
-
return arg_sort
|
|
3486
|
-
|
|
3487
|
-
|
|
3488
|
-
def gather(input_params, input_indices, axis, batch_dims=0):
|
|
3489
|
-
r"""
|
|
3490
|
-
Returns the slice of the input tensor corresponding to the elements of `input_indices` on the specified `axis`.
|
|
3491
|
-
|
|
3492
|
-
The following figure shows the calculation process of Gather commonly:
|
|
3493
|
-
|
|
3494
|
-
.. image:: Gather.png
|
|
3495
|
-
|
|
3496
|
-
where params represents the input `input_params`, and indices represents the index to be sliced `input_indices`.
|
|
3497
|
-
|
|
3498
|
-
.. note::
|
|
3499
|
-
1. The value of input_indices must be in the range of `[0, input_param.shape[axis])`.
|
|
3500
|
-
On CPU and GPU, an error is raised if an out of bound indice is found. On Ascend, the results may be
|
|
3501
|
-
undefined.
|
|
3502
|
-
|
|
3503
|
-
2. The data type of input_params cannot be
|
|
3504
|
-
`bool_ <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.html#mindspore.dtype>`_ on Ascend
|
|
3505
|
-
platform currently.
|
|
2881
|
+
The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
3506
2882
|
|
|
3507
|
-
Args:
|
|
3508
|
-
|
|
3509
|
-
|
|
3510
|
-
|
|
3511
|
-
|
|
3512
|
-
|
|
3513
|
-
When `axis` is a Tensor, the size must be 1.
|
|
3514
|
-
batch_dims (int): Specifies the number of batch dimensions. It must be less than or euqal to the rank
|
|
3515
|
-
of `input_indices`. Default: ``0`` .
|
|
2883
|
+
Keyword Args:
|
|
2884
|
+
dim (int, optional): The dimension to sort along. Default: ``-1``, means the last dimension.
|
|
2885
|
+
descending (bool, optional): Controls the sort order. If `descending` is True, the elements
|
|
2886
|
+
are sorted in descending order, or else sorted in ascending order. Default: ``False`` .
|
|
2887
|
+
stable (bool, optional): Controls the sort order. If stable is True then the sorting routine
|
|
2888
|
+
becomes stable, preserving the order of equivalent elements. Default: ``False`` .
|
|
3516
2889
|
|
|
3517
2890
|
Returns:
|
|
3518
|
-
|
|
3519
|
-
|
|
2891
|
+
- y1, a tensor whose values are the sorted values, with the same shape and data type as input.
|
|
2892
|
+
- y2, a tensor that consists of the indices of the elements in the original input tensor.
|
|
2893
|
+
Data type is int64.
|
|
3520
2894
|
|
|
3521
2895
|
Raises:
|
|
3522
|
-
TypeError:
|
|
3523
|
-
|
|
3524
|
-
TypeError:
|
|
3525
|
-
TypeError:
|
|
3526
|
-
|
|
2896
|
+
TypeError: If `dim` is not an int.
|
|
2897
|
+
TypeError: If `descending` is not a bool.
|
|
2898
|
+
TypeError: If `input` not in float16, float32, uint8, int8, int16, int32, int64, bfloat16
|
|
2899
|
+
TypeError: If `stable` is not a bool.
|
|
2900
|
+
ValueError: If `dim` is not in range of [-len(input_x.shape), len(input_x.shape)).
|
|
3527
2901
|
|
|
3528
2902
|
Supported Platforms:
|
|
3529
|
-
``Ascend``
|
|
2903
|
+
``Ascend``
|
|
3530
2904
|
|
|
3531
|
-
Examples:
|
|
3532
|
-
>>> import mindspore
|
|
3533
|
-
>>> import numpy as np
|
|
3534
|
-
>>> from mindspore import Tensor, ops
|
|
3535
|
-
>>>
|
|
3536
|
-
>>>
|
|
3537
|
-
>>>
|
|
3538
|
-
>>> axis = 0
|
|
3539
|
-
>>> output = ops.gather(input_params, input_indices, axis)
|
|
3540
|
-
>>> print(output)
|
|
3541
|
-
[1. 3. 5. 3. 7.]
|
|
3542
|
-
>>> # case2: input_indices is a Tensor with shape (2, 2). When the input_params has one dimension,
|
|
3543
|
-
>>> # the output shape is equal to the input_indices shape.
|
|
3544
|
-
>>> input_indices = Tensor(np.array([[0, 2], [2, 6]]), mindspore.int32)
|
|
3545
|
-
>>> axis = 0
|
|
3546
|
-
>>> output = ops.gather(input_params, input_indices, axis)
|
|
3547
|
-
>>> print(output)
|
|
3548
|
-
[[1. 3.]
|
|
3549
|
-
[3. 7.]]
|
|
3550
|
-
>>> # case3: input_indices is a Tensor with shape (2, ) and
|
|
3551
|
-
>>> # input_params is a Tensor with shape (3, 4) and axis is 0.
|
|
3552
|
-
>>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
|
|
3553
|
-
>>> input_indices = Tensor(np.array([0, 2]), mindspore.int32)
|
|
3554
|
-
>>> axis = 0
|
|
3555
|
-
>>> output = ops.gather(input_params, input_indices, axis)
|
|
3556
|
-
>>> print(output)
|
|
3557
|
-
[[ 1. 2. 3. 4.]
|
|
3558
|
-
[ 9. 10. 11. 12.]]
|
|
3559
|
-
>>> # case4: input_indices is a Tensor with shape (2, ) and
|
|
3560
|
-
>>> # input_params is a Tensor with shape (3, 4) and axis is 1, batch_dims is 1.
|
|
3561
|
-
>>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
|
|
3562
|
-
>>> input_indices = Tensor(np.array([0, 2, 1]), mindspore.int32)
|
|
3563
|
-
>>> axis = 1
|
|
3564
|
-
>>> batch_dims = 1
|
|
3565
|
-
>>> output = ops.gather(input_params, input_indices, axis, batch_dims)
|
|
2905
|
+
Examples:
|
|
2906
|
+
>>> import mindspore
|
|
2907
|
+
>>> import numpy as np
|
|
2908
|
+
>>> from mindspore import Tensor, ops
|
|
2909
|
+
>>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
|
|
2910
|
+
>>> output = ops.function.array_func.sort_ext(x)
|
|
2911
|
+
>>> # The output below is based on the Ascend platform.
|
|
3566
2912
|
>>> print(output)
|
|
3567
|
-
[
|
|
2913
|
+
(Tensor(shape=[3, 3], dtype=Float16, value=
|
|
2914
|
+
[[ 1.0000e+00, 2.0000e+00, 8.0000e+00],
|
|
2915
|
+
[ 3.0000e+00, 5.0000e+00, 9.0000e+00],
|
|
2916
|
+
[ 4.0000e+00, 6.0000e+00, 7.0000e+00]]), Tensor(shape=[3, 3], dtype=Int64, value=
|
|
2917
|
+
[[2, 1, 0],
|
|
2918
|
+
[2, 0, 1],
|
|
2919
|
+
[0, 1, 2]]))
|
|
3568
2920
|
"""
|
|
3569
|
-
|
|
3570
|
-
return _gather(input_params, input_indices, axis)
|
|
2921
|
+
return sort_ext_(input, dim, descending, stable)
|
|
3571
2922
|
|
|
3572
2923
|
|
|
3573
|
-
def
|
|
3574
|
-
"""
|
|
3575
|
-
|
|
2924
|
+
def argsort(input, axis=-1, descending=False):
|
|
2925
|
+
r"""
|
|
2926
|
+
Sorts the input tensor along the given dimension in specified order and return the sorted indices.
|
|
2927
|
+
|
|
2928
|
+
Args:
|
|
2929
|
+
input(Tensor): The input tensor to sort.
|
|
2930
|
+
axis (int): The axis to sort along. Default: ``-1`` , means the last dimension.
|
|
2931
|
+
The Ascend backend only supports sorting the last dimension.
|
|
2932
|
+
descending (bool): The sort order. If `descending` is True then the elements
|
|
2933
|
+
are sorted in descending order by value. Otherwise sort in ascending order. Default: ``False`` .
|
|
3576
2934
|
|
|
3577
|
-
|
|
2935
|
+
Returns:
|
|
2936
|
+
Tensor, the indices of sorted input tensor. Data type is int32.
|
|
3578
2937
|
|
|
3579
2938
|
Supported Platforms:
|
|
3580
2939
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -3583,15 +2942,16 @@ def gather_d(x, dim, index):
|
|
|
3583
2942
|
>>> import mindspore
|
|
3584
2943
|
>>> import numpy as np
|
|
3585
2944
|
>>> from mindspore import Tensor, ops
|
|
3586
|
-
>>> x = Tensor(np.array([[
|
|
3587
|
-
>>>
|
|
3588
|
-
>>>
|
|
3589
|
-
|
|
3590
|
-
|
|
3591
|
-
|
|
3592
|
-
[4 3]]
|
|
2945
|
+
>>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
|
|
2946
|
+
>>> sort = ops.argsort(x)
|
|
2947
|
+
>>> print(sort)
|
|
2948
|
+
[[2 1 0]
|
|
2949
|
+
[2 0 1]
|
|
2950
|
+
[0 1 2]]
|
|
3593
2951
|
"""
|
|
3594
|
-
|
|
2952
|
+
_sort = _get_cache_prim(P.Sort)(axis, descending)
|
|
2953
|
+
_, arg_sort = _sort(input)
|
|
2954
|
+
return arg_sort
|
|
3595
2955
|
|
|
3596
2956
|
|
|
3597
2957
|
def gather_elements(input, dim, index):
|
|
@@ -3608,26 +2968,29 @@ def gather_elements(input, dim, index):
|
|
|
3608
2968
|
|
|
3609
2969
|
output[i][j][k] = x[i][j][index[i][j][k]] # if dim == 2
|
|
3610
2970
|
|
|
3611
|
-
`input` and `index` have the same length of dimensions, and
|
|
3612
|
-
|
|
3613
|
-
|
|
3614
|
-
|
|
2971
|
+
`input` and `index` have the same length of dimensions, and `index.shape[axis] <= input.shape[axis]`
|
|
2972
|
+
where axis goes through all dimensions of `input` except `dim`.
|
|
2973
|
+
|
|
2974
|
+
.. warning::
|
|
2975
|
+
On Ascend, the behavior is unpredictable in the following cases:
|
|
2976
|
+
|
|
2977
|
+
- the value of `index` is not in the range `[-input.shape[dim], input.shape[dim])` in forward;
|
|
2978
|
+
- the value of `index` is not in the range `[0, input.shape[dim])` in backward.
|
|
3615
2979
|
|
|
3616
2980
|
Args:
|
|
3617
2981
|
input (Tensor): The input tensor.
|
|
3618
|
-
dim (int): The axis along which to index. It must be int32 or int64. The value range is [-input.ndim,
|
|
3619
|
-
input.ndim)
|
|
2982
|
+
dim (int): The axis along which to index. It must be int32 or int64. The value range is `[-input.ndim,
|
|
2983
|
+
input.ndim)`.
|
|
3620
2984
|
index (Tensor): The indices of elements to gather. It can be one of the following data types:
|
|
3621
|
-
int32, int64. The value range of each index element is [-input.shape(dim), input.shape(dim))
|
|
2985
|
+
int32, int64. The value range of each index element is `[-input.shape(dim), input.shape(dim))`.
|
|
3622
2986
|
|
|
3623
2987
|
Returns:
|
|
3624
|
-
Tensor, has the same shape as index
|
|
3625
|
-
and has the same data type with `input`.
|
|
2988
|
+
Tensor, has the same shape as `index` and has the same data type with `input`.
|
|
3626
2989
|
|
|
3627
2990
|
Raises:
|
|
3628
2991
|
TypeError: If dtype of `dim` or `index` is neither int32 nor int64.
|
|
3629
2992
|
ValueError: If length of shape of `input` is not equal to length of shape of `index`.
|
|
3630
|
-
ValueError: If the size of the dimension except `dim` is
|
|
2993
|
+
ValueError: If the size of the dimension except `dim` in `input` is less than size in `index`.
|
|
3631
2994
|
ValueError: If the value of `dim` is not in the expected range.
|
|
3632
2995
|
|
|
3633
2996
|
Supported Platforms:
|
|
@@ -3648,48 +3011,6 @@ def gather_elements(input, dim, index):
|
|
|
3648
3011
|
return gather_d_(input, dim, index)
|
|
3649
3012
|
|
|
3650
3013
|
|
|
3651
|
-
def gather_nd(input_x, indices):
|
|
3652
|
-
r"""
|
|
3653
|
-
Gathers slices from a tensor by indices.
|
|
3654
|
-
|
|
3655
|
-
Using given indices to gather slices from a tensor with a specified shape.
|
|
3656
|
-
|
|
3657
|
-
`indices` is an K-dimensional integer tensor. Supposes it as a (K-1)-dimensional tensor and each element of it
|
|
3658
|
-
defines a slice of `input_x`:
|
|
3659
|
-
|
|
3660
|
-
.. math::
|
|
3661
|
-
output[(i_0, ..., i_{K-2})] = input\_x[indices[(i_0, ..., i_{K-2})]]
|
|
3662
|
-
|
|
3663
|
-
The last dimension of `indices` can not more than the rank of `input_x`:
|
|
3664
|
-
:math:`indices.shape[-1] <= input\_x.rank`.
|
|
3665
|
-
|
|
3666
|
-
Args:
|
|
3667
|
-
input_x (Tensor): The target tensor to gather values.
|
|
3668
|
-
indices (Tensor): The index tensor, with int32 or int64 data type.
|
|
3669
|
-
|
|
3670
|
-
Returns:
|
|
3671
|
-
Tensor, has the same type as `input_x` and the shape is
|
|
3672
|
-
:math:`indices\_shape[:-1] + input\_x\_shape[indices\_shape[-1]:]`.
|
|
3673
|
-
|
|
3674
|
-
Raises:
|
|
3675
|
-
ValueError: If length of shape of `input_x` is less than the last dimension of `indices`.
|
|
3676
|
-
|
|
3677
|
-
Supported Platforms:
|
|
3678
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3679
|
-
|
|
3680
|
-
Examples:
|
|
3681
|
-
>>> import mindspore
|
|
3682
|
-
>>> import numpy as np
|
|
3683
|
-
>>> from mindspore import Tensor, ops
|
|
3684
|
-
>>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
|
|
3685
|
-
>>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
|
|
3686
|
-
>>> output = ops.gather_nd(input_x, indices)
|
|
3687
|
-
>>> print(output)
|
|
3688
|
-
[-0.1 0.5]
|
|
3689
|
-
"""
|
|
3690
|
-
return gather_nd_(input_x, indices)
|
|
3691
|
-
|
|
3692
|
-
|
|
3693
3014
|
def tensor_scatter_add(input_x, indices, updates):
|
|
3694
3015
|
r"""
|
|
3695
3016
|
Creates a new tensor by adding the values from the positions in `input_x` indicated by
|
|
@@ -3700,7 +3021,7 @@ def tensor_scatter_add(input_x, indices, updates):
|
|
|
3700
3021
|
|
|
3701
3022
|
The last axis of `indices` is the depth of each index vectors. For each index vector,
|
|
3702
3023
|
there must be a corresponding value in `updates`. The shape of `updates` should be
|
|
3703
|
-
equal to the shape of `input_x[indices]`. For more details, see
|
|
3024
|
+
equal to the shape of `input_x[indices]`. For more details, see Examples.
|
|
3704
3025
|
|
|
3705
3026
|
.. math::
|
|
3706
3027
|
output\left [indices \right ] = input\_x + update
|
|
@@ -3758,7 +3079,7 @@ def tensor_scatter_sub(input_x, indices, updates):
|
|
|
3758
3079
|
|
|
3759
3080
|
The last axis of `indices` is the depth of each index vectors. For each index vector,
|
|
3760
3081
|
there must be a corresponding value in `updates`. The shape of `updates` should be
|
|
3761
|
-
equal to the shape of `input_x[indices]`. For more details, see
|
|
3082
|
+
equal to the shape of `input_x[indices]`. For more details, see Examples.
|
|
3762
3083
|
|
|
3763
3084
|
.. math::
|
|
3764
3085
|
output[indices] = input\_x - update
|
|
@@ -3943,14 +3264,12 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
|
|
|
3943
3264
|
nondeterministic.
|
|
3944
3265
|
- On Ascend, the reduction only support set to "none" for now.
|
|
3945
3266
|
- On Ascend, the data type of `input_x` must be float16 or float32.
|
|
3267
|
+
- This is an experimental API that is subject to change or deletion.
|
|
3946
3268
|
|
|
3947
3269
|
Note:
|
|
3948
3270
|
If some values of the `indices` exceed the upper or lower bounds of the index of `input_x`, instead of raising
|
|
3949
3271
|
an index error, the corresponding `updates` will not be updated to `input_x`.
|
|
3950
3272
|
|
|
3951
|
-
.. warning::
|
|
3952
|
-
This is an experimental API that is subject to change or deletion.
|
|
3953
|
-
|
|
3954
3273
|
Args:
|
|
3955
3274
|
input_x (Tensor): The target tensor. The rank must be at least 1.
|
|
3956
3275
|
indices (Tensor): The index of `input_x` to do scatter operation whose data type must be mindspore.int32 or
|
|
@@ -4065,6 +3384,79 @@ def scatter(input, axis, index, src):
|
|
|
4065
3384
|
return ops.tensor_scatter_elements(input_x=input, indices=index, updates=src, axis=axis)
|
|
4066
3385
|
|
|
4067
3386
|
|
|
3387
|
+
def scatter_add_ext(input, dim, index, src):
|
|
3388
|
+
"""
|
|
3389
|
+
Add all elements in `src` to the index specified by `index` to `input` along dimension specified by `dim`.
|
|
3390
|
+
It takes three inputs `input`, `src` and `index` of the same rank r >= 1.
|
|
3391
|
+
|
|
3392
|
+
For a 3-D tensor, the operation updates input as follows:
|
|
3393
|
+
|
|
3394
|
+
.. code-block::
|
|
3395
|
+
|
|
3396
|
+
input[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
|
|
3397
|
+
|
|
3398
|
+
input[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
|
|
3399
|
+
|
|
3400
|
+
input[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
|
|
3401
|
+
|
|
3402
|
+
Args:
|
|
3403
|
+
input (Tensor): The target tensor. The rank must be at least 1.
|
|
3404
|
+
dim (int): Which dim to scatter. Accepted range is [-r, r) where r = rank(`input`). Default: ``0``.
|
|
3405
|
+
index (Tensor): The index of `input` to do scatter operation whose data type must be mindspore.int32 or
|
|
3406
|
+
mindspore.int64. Same rank as `input`. Except for the dimension specified by `dim`,
|
|
3407
|
+
the size of each dimension of `index` must be less than or equal to the size of
|
|
3408
|
+
the corresponding dimension of `input`.
|
|
3409
|
+
src (Tensor): The tensor doing the scatter operation with `input`, has the same type as `input` and
|
|
3410
|
+
the size of each dimension must be greater than or equal to that of `index`.
|
|
3411
|
+
|
|
3412
|
+
Returns:
|
|
3413
|
+
Tensor, has the same shape and type as `input`.
|
|
3414
|
+
|
|
3415
|
+
Raises:
|
|
3416
|
+
TypeError: If `index` is neither int32 nor int64.
|
|
3417
|
+
ValueError: If anyone of the rank among `input`, `index` and `src` less than 1.
|
|
3418
|
+
ValueError: If the rank of `input`, `index` and `src` is not the same.
|
|
3419
|
+
ValueError: If, outside dimension `dim`, the size of any dimension of `index` is greater than the size of
|
|
3420
|
+
the corresponding dimension of `input` .
|
|
3421
|
+
ValueError: If the size of any dimension of `src` is less than that of `index`.
|
|
3422
|
+
|
|
3423
|
+
Supported Platforms:
|
|
3424
|
+
``Ascend``
|
|
3425
|
+
|
|
3426
|
+
Examples:
|
|
3427
|
+
>>> import numpy as np
|
|
3428
|
+
>>> import mindspore as ms
|
|
3429
|
+
>>> from mindspore import Tensor, ops
|
|
3430
|
+
>>> input = Tensor(np.array([[1, 2, 3, 4, 5]]), dtype=ms.float32)
|
|
3431
|
+
>>> src = Tensor(np.array([[8, 8]]), dtype=ms.float32)
|
|
3432
|
+
>>> index = Tensor(np.array([[2, 4]]), dtype=ms.int64)
|
|
3433
|
+
>>> out = ops.function.array_func.scatter_add_ext(input=input, dim=1, index=index, src=src)
|
|
3434
|
+
>>> print(out)
|
|
3435
|
+
[[1. 2. 11. 4. 13.]]
|
|
3436
|
+
>>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
|
|
3437
|
+
>>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
|
|
3438
|
+
>>> index = Tensor(np.array([[0, 0, 0], [2, 2, 2], [4, 4, 4]]), dtype=ms.int64)
|
|
3439
|
+
>>> out = ops.function.array_func.scatter_add_ext(input=input, dim=0, index=index, src=src)
|
|
3440
|
+
>>> print(out)
|
|
3441
|
+
[[1. 2. 3. 0. 0.]
|
|
3442
|
+
[0. 0. 0. 0. 0.]
|
|
3443
|
+
[4. 5. 6. 0. 0.]
|
|
3444
|
+
[0. 0. 0. 0. 0.]
|
|
3445
|
+
[7. 8. 9. 0. 0.]]
|
|
3446
|
+
>>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
|
|
3447
|
+
>>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
|
|
3448
|
+
>>> index = Tensor(np.array([[0, 2, 4], [0, 2, 4], [0, 2, 4]]), dtype=ms.int64)
|
|
3449
|
+
>>> out = ops.function.array_func.scatter_add_ext(input=input, dim=1, index=index, src=src)
|
|
3450
|
+
>>> print(out)
|
|
3451
|
+
[[1. 0. 2. 0. 3.]
|
|
3452
|
+
[4. 0. 5. 0. 6.]
|
|
3453
|
+
[7. 0. 8. 0. 9.]
|
|
3454
|
+
[0. 0. 0. 0. 0.]
|
|
3455
|
+
[0. 0. 0. 0. 0.]]
|
|
3456
|
+
"""
|
|
3457
|
+
return scatter_add_ext_op(input, dim, index, src)
|
|
3458
|
+
|
|
3459
|
+
|
|
4068
3460
|
def _get_slice_scatter_const(x_shape, axis, start, end, step):
|
|
4069
3461
|
r"""
|
|
4070
3462
|
Calculate the rank of input, embedded dimensions and index.
|
|
@@ -4074,7 +3466,7 @@ def _get_slice_scatter_const(x_shape, axis, start, end, step):
|
|
|
4074
3466
|
start = start if start is not None else 0
|
|
4075
3467
|
start = start if start >= 0 else start + x_rank
|
|
4076
3468
|
end = end if end is not None else x_shape[axis]
|
|
4077
|
-
end = end if end >= 0 else end +
|
|
3469
|
+
end = end if end >= 0 else end + x_shape[axis]
|
|
4078
3470
|
end = end if end < x_shape[axis] else x_shape[axis]
|
|
4079
3471
|
index = list(builtins.range(start, end, step))
|
|
4080
3472
|
return x_rank, index, axis
|
|
@@ -4121,6 +3513,8 @@ def slice_scatter(input, src, axis=0, start=None, end=None, step=1):
|
|
|
4121
3513
|
[1. 0. 1. 0. 1. 0.]
|
|
4122
3514
|
[1. 0. 1. 0. 1. 0.]]
|
|
4123
3515
|
"""
|
|
3516
|
+
_check_is_tensor("input", input, "slice_scatter")
|
|
3517
|
+
_check_is_tensor("src", src, "slice_scatter")
|
|
4124
3518
|
input_shape = input.shape
|
|
4125
3519
|
input_rank, index, axis = _get_slice_scatter_const(input_shape, axis, start, end, step)
|
|
4126
3520
|
|
|
@@ -4136,6 +3530,8 @@ def slice_scatter(input, src, axis=0, start=None, end=None, step=1):
|
|
|
4136
3530
|
for _ in builtins.range(input_rank - axis - 1):
|
|
4137
3531
|
index_tensor = index_tensor.expand_dims(-1)
|
|
4138
3532
|
index_tensor = index_tensor.broadcast_to(src.shape)
|
|
3533
|
+
if index_tensor.dtype not in mstype.int_type:
|
|
3534
|
+
index_tensor = index_tensor.astype(mstype.int64)
|
|
4139
3535
|
return tensor_scatter_elements(input, axis=axis, indices=index_tensor, updates=src)
|
|
4140
3536
|
|
|
4141
3537
|
|
|
@@ -4174,10 +3570,12 @@ def select_scatter(input, src, axis, index):
|
|
|
4174
3570
|
[1. 1. 1.]
|
|
4175
3571
|
[0. 0. 0.]]]
|
|
4176
3572
|
"""
|
|
3573
|
+
_check_is_tensor("input", input, "select_scatter")
|
|
3574
|
+
_check_is_tensor("src", src, "select_scatter")
|
|
4177
3575
|
src = src.expand_dims(axis=axis)
|
|
4178
3576
|
x_rank = input.ndim
|
|
4179
3577
|
axis = axis if axis >= 0 else axis + x_rank
|
|
4180
|
-
index = index if index >= 0 else index +
|
|
3578
|
+
index = index if index >= 0 else index + input.shape[axis]
|
|
4181
3579
|
return slice_scatter(input, src, axis, start=index, end=index + 1)
|
|
4182
3580
|
|
|
4183
3581
|
|
|
@@ -4228,6 +3626,7 @@ def space_to_batch_nd(input_x, block_size, paddings):
|
|
|
4228
3626
|
|
|
4229
3627
|
Examples:
|
|
4230
3628
|
>>> import numpy as np
|
|
3629
|
+
>>> import mindspore
|
|
4231
3630
|
>>> from mindspore import Tensor, ops
|
|
4232
3631
|
>>> block_size = [2, 2]
|
|
4233
3632
|
>>> paddings = [[0, 0], [0, 0]]
|
|
@@ -4302,49 +3701,11 @@ def batch_to_space_nd(input_x, block_shape, crops):
|
|
|
4302
3701
|
[3. 4.]]]]
|
|
4303
3702
|
"""
|
|
4304
3703
|
if isinstance(block_shape, Tensor):
|
|
4305
|
-
|
|
4306
|
-
return _batch_to_space_ndv2(input_x, block_shape, crops)
|
|
3704
|
+
return batch_to_space_nd_v2_(input_x, block_shape, crops)
|
|
4307
3705
|
_batch_to_space_nd = _get_cache_prim(P.BatchToSpaceND)(block_shape, crops)
|
|
4308
3706
|
return _batch_to_space_nd(input_x)
|
|
4309
3707
|
|
|
4310
3708
|
|
|
4311
|
-
def nonzero(input):
|
|
4312
|
-
"""
|
|
4313
|
-
Return a Tensor of the positions of all non-zero values.
|
|
4314
|
-
|
|
4315
|
-
Args:
|
|
4316
|
-
input (Tensor): The input Tensor, its rank should be greater than or eaqual to 1.
|
|
4317
|
-
|
|
4318
|
-
Returns:
|
|
4319
|
-
Tensor, a 2-D Tensor whose data type is int64, containing the positions of all non-zero values of the input.
|
|
4320
|
-
|
|
4321
|
-
Raises:
|
|
4322
|
-
TypeError: If `input` is not Tensor.
|
|
4323
|
-
ValueError: If dim of `x` equals to 0.
|
|
4324
|
-
|
|
4325
|
-
Supported Platforms:
|
|
4326
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4327
|
-
|
|
4328
|
-
Examples:
|
|
4329
|
-
>>> import mindspore
|
|
4330
|
-
>>> import numpy as np
|
|
4331
|
-
>>> from mindspore import Tensor
|
|
4332
|
-
>>> import mindspore.ops as ops
|
|
4333
|
-
>>> x = Tensor(np.array([[[1, 0], [-5, 0]]]), mindspore.int32)
|
|
4334
|
-
>>> output = ops.nonzero(x)
|
|
4335
|
-
>>> print(output)
|
|
4336
|
-
[[0 0 0]
|
|
4337
|
-
[0 1 0]]
|
|
4338
|
-
>>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
|
|
4339
|
-
>>> output = ops.nonzero(x)
|
|
4340
|
-
>>> print(output)
|
|
4341
|
-
[[0]
|
|
4342
|
-
[2]
|
|
4343
|
-
[4]]
|
|
4344
|
-
"""
|
|
4345
|
-
return nonzero_(input)
|
|
4346
|
-
|
|
4347
|
-
|
|
4348
3709
|
def matrix_diag(x, k=0, num_rows=-1, num_cols=-1, padding_value=0, align="RIGHT_LEFT"):
|
|
4349
3710
|
r"""
|
|
4350
3711
|
Returns a Tensor with the contents in `x` as k[0]-th to k[1]-th diagonals of a matrix, with everything else padded
|
|
@@ -4604,18 +3965,19 @@ def meshgrid(*inputs, indexing='xy'):
|
|
|
4604
3965
|
|
|
4605
3966
|
Keyword Args:
|
|
4606
3967
|
indexing (str, optional): Cartesian ('xy', default) or
|
|
4607
|
-
matrix ('ij') indexing of output. Valid options: xy' or 'ij'
|
|
3968
|
+
matrix ('ij') indexing of output. Valid options: xy' or ``'ij'``. In the 2-D case with
|
|
4608
3969
|
inputs of length `M` and `N`, the outputs are of shape :math:`(N, M)`
|
|
4609
|
-
for 'xy' indexing and :math:`(M, N)` for 'ij' indexing. In the 3-D
|
|
3970
|
+
for ``'xy'`` indexing and :math:`(M, N)` for ``'ij'`` indexing. In the 3-D
|
|
4610
3971
|
case with inputs of length `M`, `N` and `P`, outputs are of shape
|
|
4611
|
-
:math:`(N, M, P)` for 'xy' indexing and :math:`(M, N, P)` for 'ij' indexing.
|
|
3972
|
+
:math:`(N, M, P)` for ``'xy'`` indexing and :math:`(M, N, P)` for ``'ij'`` indexing.
|
|
3973
|
+
Default: ``'xy'`` .
|
|
4612
3974
|
|
|
4613
3975
|
Returns:
|
|
4614
3976
|
Tensors, a Tuple of N N-D Tensor objects. The data type is the same with the Inputs.
|
|
4615
3977
|
|
|
4616
3978
|
Raises:
|
|
4617
3979
|
TypeError: If `indexing` is not a str or `inputs` is not a tuple.
|
|
4618
|
-
ValueError: If `indexing` is neither 'xy' nor 'ij'
|
|
3980
|
+
ValueError: If `indexing` is neither ``'xy'`` nor ``'ij'``.
|
|
4619
3981
|
|
|
4620
3982
|
Supported Platforms:
|
|
4621
3983
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4623,7 +3985,7 @@ def meshgrid(*inputs, indexing='xy'):
|
|
|
4623
3985
|
Examples:
|
|
4624
3986
|
>>> import numpy as np
|
|
4625
3987
|
>>> from mindspore import Tensor
|
|
4626
|
-
>>>
|
|
3988
|
+
>>> from mindspore import ops
|
|
4627
3989
|
>>> x = Tensor(np.array([1, 2, 3, 4]).astype(np.int32))
|
|
4628
3990
|
>>> y = Tensor(np.array([5, 6, 7]).astype(np.int32))
|
|
4629
3991
|
>>> z = Tensor(np.array([8, 9, 0, 1, 2]).astype(np.int32))
|
|
@@ -4706,7 +4068,7 @@ def affine_grid(theta, size, align_corners=False):
|
|
|
4706
4068
|
Examples:
|
|
4707
4069
|
>>> import mindspore
|
|
4708
4070
|
>>> from mindspore import Tensor
|
|
4709
|
-
>>>
|
|
4071
|
+
>>> from mindspore import ops
|
|
4710
4072
|
>>> theta = Tensor([[[0.8, 0.5, 0],[-0.5, 0.8, 0]]], mindspore.float32)
|
|
4711
4073
|
>>> out_size = (1, 3, 2, 3)
|
|
4712
4074
|
>>> output = ops.affine_grid(theta, out_size, False)
|
|
@@ -4722,87 +4084,6 @@ def affine_grid(theta, size, align_corners=False):
|
|
|
4722
4084
|
return affine_grid_op(theta, size)
|
|
4723
4085
|
|
|
4724
4086
|
|
|
4725
|
-
def broadcast_to(input, shape): # pylint: disable=redefined-outer-name
|
|
4726
|
-
"""
|
|
4727
|
-
Broadcasts input tensor to a given shape. The dim of input shape must be smaller
|
|
4728
|
-
than or equal to that of target shape. Suppose input shape is :math:`(x_1, x_2, ..., x_m)`,
|
|
4729
|
-
target shape is :math:`(*, y_1, y_2, ..., y_m)`, where :math:`*` means any additional dimension.
|
|
4730
|
-
The broadcast rules are as follows:
|
|
4731
|
-
|
|
4732
|
-
Compare the value of :math:`x_m` and :math:`y_m`, :math:`x_{m-1}` and :math:`y_{m-1}`, ...,
|
|
4733
|
-
:math:`x_1` and :math:`y_1` consecutively and
|
|
4734
|
-
decide whether these shapes are broadcastable and what the broadcast result is.
|
|
4735
|
-
|
|
4736
|
-
If the value pairs at a specific dim are equal, then that value goes right into that dim of output shape.
|
|
4737
|
-
With an input shape :math:`(2, 3)`, target shape :math:`(2, 3)` , the inferred output shape is :math:`(2, 3)`.
|
|
4738
|
-
|
|
4739
|
-
If the value pairs are unequal, there are three cases:
|
|
4740
|
-
|
|
4741
|
-
Case 1: If the value of the target shape in the dimension is -1, the value of the
|
|
4742
|
-
output shape in the dimension is the value of the corresponding input shape in the dimension.
|
|
4743
|
-
With an input shape :math:`(3, 3)`, target
|
|
4744
|
-
shape :math:`(-1, 3)`, the output shape is :math:`(3, 3)`.
|
|
4745
|
-
|
|
4746
|
-
Case 2: If the value of target shape in the dimension is not -1, but the corresponding
|
|
4747
|
-
value in the input shape is 1, then the corresponding value of the output shape
|
|
4748
|
-
is that of the target shape. With an input shape :math:`(1, 3)`, target
|
|
4749
|
-
shape :math:`(8, 3)`, the output shape is :math:`(8, 3)`.
|
|
4750
|
-
|
|
4751
|
-
Case 3: If the corresponding values of the two shapes do not satisfy the above cases,
|
|
4752
|
-
it means that broadcasting from the input shape to the target shape is not supported.
|
|
4753
|
-
|
|
4754
|
-
So far we got the last m dims of the outshape, now focus on the first :math:`*` dims, there are
|
|
4755
|
-
two cases:
|
|
4756
|
-
|
|
4757
|
-
If the first :math:`*` dims of output shape does not have -1 in it, then fill the input
|
|
4758
|
-
shape with ones until their length are the same, and then refer to
|
|
4759
|
-
Case 2 mentioned above to calculate the output shape. With target shape :math:`(3, 1, 4, 1, 5, 9)`,
|
|
4760
|
-
input shape :math:`(1, 5, 9)`, the filled input shape will be :math:`(1, 1, 1, 1, 5, 9)` and thus the
|
|
4761
|
-
output shape is :math:`(3, 1, 4, 1, 5, 9)`.
|
|
4762
|
-
|
|
4763
|
-
If the first :math:`*` dims of output shape have -1 in it, it implies this -1 is corresponding to
|
|
4764
|
-
a non-existing dim so they're not broadcastable. With target shape :math:`(3, -1, 4, 1, 5, 9)`,
|
|
4765
|
-
input shape :math:`(1, 5, 9)`, instead of operating the dim-filling process first, it raises errors directly.
|
|
4766
|
-
|
|
4767
|
-
Args:
|
|
4768
|
-
input (Tensor): The input Tensor.
|
|
4769
|
-
shape (tuple): The target shape to broadcast. Can be fully specified, or have -1 in one position
|
|
4770
|
-
where it will be substituted by the input tensor's shape in that position, see example.
|
|
4771
|
-
|
|
4772
|
-
Returns:
|
|
4773
|
-
Tensor, with the given `shape` and the same data type as `input`.
|
|
4774
|
-
|
|
4775
|
-
Raises:
|
|
4776
|
-
TypeError: If `shape` is not a tuple.
|
|
4777
|
-
ValueError: If the target and input shapes are incompatible, or if a - 1 in the target shape is in an invalid
|
|
4778
|
-
location.
|
|
4779
|
-
|
|
4780
|
-
Supported Platforms:
|
|
4781
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4782
|
-
|
|
4783
|
-
Examples:
|
|
4784
|
-
>>> import numpy as np
|
|
4785
|
-
>>> from mindspore import Tensor, ops
|
|
4786
|
-
>>> shape = (2, 3)
|
|
4787
|
-
>>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
|
|
4788
|
-
>>> output = ops.broadcast_to(x, shape)
|
|
4789
|
-
>>> print(output)
|
|
4790
|
-
[[1. 2. 3.]
|
|
4791
|
-
[1. 2. 3.]]
|
|
4792
|
-
>>> shape = (-1, 2)
|
|
4793
|
-
>>> x = Tensor(np.array([[1], [2]]).astype(np.float32))
|
|
4794
|
-
>>> output = ops.broadcast_to(x, shape)
|
|
4795
|
-
>>> print(output)
|
|
4796
|
-
[[1. 1.]
|
|
4797
|
-
[2. 2.]]
|
|
4798
|
-
"""
|
|
4799
|
-
if isinstance(shape, Tensor) or ops.is_sequence_value_unknown(shape):
|
|
4800
|
-
_dyn_broadcast_to = _get_cache_prim(DynamicBroadcastTo)()
|
|
4801
|
-
return _dyn_broadcast_to(input, shape)
|
|
4802
|
-
_broadcast_to = _get_cache_prim(P.BroadcastTo)(shape)
|
|
4803
|
-
return _broadcast_to(input)
|
|
4804
|
-
|
|
4805
|
-
|
|
4806
4087
|
def unsorted_segment_min(x, segment_ids, num_segments):
|
|
4807
4088
|
r"""
|
|
4808
4089
|
Computes the minimum of a tensor along segments.
|
|
@@ -4826,14 +4107,13 @@ def unsorted_segment_min(x, segment_ids, num_segments):
|
|
|
4826
4107
|
x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
|
|
4827
4108
|
segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
|
|
4828
4109
|
Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
|
|
4829
|
-
num_segments (int):
|
|
4110
|
+
num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
|
|
4830
4111
|
|
|
4831
4112
|
Returns:
|
|
4832
|
-
Tensor,
|
|
4113
|
+
Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
|
|
4833
4114
|
|
|
4834
4115
|
Raises:
|
|
4835
4116
|
TypeError: If `num_segments` is not an int.
|
|
4836
|
-
ValueError: If length of shape of `segment_ids` is not equal to 1.
|
|
4837
4117
|
|
|
4838
4118
|
Supported Platforms:
|
|
4839
4119
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4850,7 +4130,6 @@ def unsorted_segment_min(x, segment_ids, num_segments):
|
|
|
4850
4130
|
[[1. 2. 3.]
|
|
4851
4131
|
[4. 2. 1.]]
|
|
4852
4132
|
"""
|
|
4853
|
-
unsorted_segment_min_ = P.UnsortedSegmentMin()
|
|
4854
4133
|
return unsorted_segment_min_(x, segment_ids, num_segments)
|
|
4855
4134
|
|
|
4856
4135
|
|
|
@@ -4877,14 +4156,13 @@ def unsorted_segment_max(x, segment_ids, num_segments):
|
|
|
4877
4156
|
x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
|
|
4878
4157
|
segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
|
|
4879
4158
|
Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
|
|
4880
|
-
num_segments (int):
|
|
4159
|
+
num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
|
|
4881
4160
|
|
|
4882
4161
|
Returns:
|
|
4883
|
-
Tensor,
|
|
4162
|
+
Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
|
|
4884
4163
|
|
|
4885
4164
|
Raises:
|
|
4886
4165
|
TypeError: If `num_segments` is not an int.
|
|
4887
|
-
ValueError: If length of shape of `segment_ids` is not equal to 1.
|
|
4888
4166
|
|
|
4889
4167
|
Supported Platforms:
|
|
4890
4168
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4901,7 +4179,6 @@ def unsorted_segment_max(x, segment_ids, num_segments):
|
|
|
4901
4179
|
[[1. 2. 3.]
|
|
4902
4180
|
[4. 5. 6.]]
|
|
4903
4181
|
"""
|
|
4904
|
-
unsorted_segment_max_ = P.UnsortedSegmentMax()
|
|
4905
4182
|
return unsorted_segment_max_(x, segment_ids, num_segments)
|
|
4906
4183
|
|
|
4907
4184
|
|
|
@@ -4919,16 +4196,15 @@ def unsorted_segment_prod(x, segment_ids, num_segments):
|
|
|
4919
4196
|
|
|
4920
4197
|
Args:
|
|
4921
4198
|
x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
|
|
4922
|
-
segment_ids (Tensor):
|
|
4923
|
-
|
|
4924
|
-
num_segments (int):
|
|
4199
|
+
segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
|
|
4200
|
+
Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R. The data type must be int32.
|
|
4201
|
+
num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
|
|
4925
4202
|
|
|
4926
4203
|
Returns:
|
|
4927
|
-
Tensor,
|
|
4204
|
+
Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
|
|
4928
4205
|
|
|
4929
4206
|
Raises:
|
|
4930
4207
|
TypeError: If `num_segments` is not an int.
|
|
4931
|
-
ValueError: If length of shape of `segment_ids` is not equal to 1.
|
|
4932
4208
|
|
|
4933
4209
|
Supported Platforms:
|
|
4934
4210
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4945,7 +4221,6 @@ def unsorted_segment_prod(x, segment_ids, num_segments):
|
|
|
4945
4221
|
[[4. 4. 3.]
|
|
4946
4222
|
[4. 5. 6.]]
|
|
4947
4223
|
"""
|
|
4948
|
-
unsorted_segment_prod_ = P.UnsortedSegmentProd()
|
|
4949
4224
|
return unsorted_segment_prod_(x, segment_ids, num_segments)
|
|
4950
4225
|
|
|
4951
4226
|
|
|
@@ -4986,7 +4261,7 @@ def index_fill(x, axis, index, value):
|
|
|
4986
4261
|
Examples:
|
|
4987
4262
|
>>> import mindspore
|
|
4988
4263
|
>>> import numpy as np
|
|
4989
|
-
>>>
|
|
4264
|
+
>>> from mindspore import ops
|
|
4990
4265
|
>>> from mindspore import Tensor
|
|
4991
4266
|
>>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32))
|
|
4992
4267
|
>>> index = Tensor([0, 2], mindspore.int32)
|
|
@@ -5157,33 +4432,6 @@ def is_nonzero(input):
|
|
|
5157
4432
|
return bool(out)
|
|
5158
4433
|
|
|
5159
4434
|
|
|
5160
|
-
def scalar_cast(input_x, input_y):
|
|
5161
|
-
"""
|
|
5162
|
-
Casts the input scalar to another type.
|
|
5163
|
-
|
|
5164
|
-
Args:
|
|
5165
|
-
input_x (scalar): The input scalar. Only constant value is allowed.
|
|
5166
|
-
input_y (mindspore.dtype): The type to be cast. Only constant value is allowed.
|
|
5167
|
-
|
|
5168
|
-
Returns:
|
|
5169
|
-
Scalar. The type is the same as the python type corresponding to `input_y`.
|
|
5170
|
-
|
|
5171
|
-
Raises:
|
|
5172
|
-
TypeError: If neither `input_x` nor `input_y` is a constant value.
|
|
5173
|
-
|
|
5174
|
-
Supported Platforms:
|
|
5175
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
5176
|
-
|
|
5177
|
-
Examples:
|
|
5178
|
-
>>> import mindspore
|
|
5179
|
-
>>> from mindspore import ops
|
|
5180
|
-
>>> output = ops.scalar_cast(255.0, mindspore.int32)
|
|
5181
|
-
>>> print(output)
|
|
5182
|
-
255
|
|
5183
|
-
"""
|
|
5184
|
-
return scalar_cast_(input_x, input_y)
|
|
5185
|
-
|
|
5186
|
-
|
|
5187
4435
|
def tensor_scatter_mul(input_x, indices, updates):
|
|
5188
4436
|
r"""
|
|
5189
4437
|
Creates a new tensor by multiplying the values from the positions in `input_x` indicated by
|
|
@@ -5193,10 +4441,10 @@ def tensor_scatter_mul(input_x, indices, updates):
|
|
|
5193
4441
|
|
|
5194
4442
|
The last axis of `indices` is the depth of each index vectors. For each index vector,
|
|
5195
4443
|
there must be a corresponding value in `updates`. The shape of `updates` should be
|
|
5196
|
-
equal to the shape of `input_x[indices]`. For more details, see
|
|
4444
|
+
equal to the shape of `input_x[indices]`. For more details, see Examples.
|
|
5197
4445
|
|
|
5198
4446
|
.. math::
|
|
5199
|
-
output[indices] = input\_x
|
|
4447
|
+
output\left [indices \right ] = input\_x\times update
|
|
5200
4448
|
|
|
5201
4449
|
Note:
|
|
5202
4450
|
- If some values of the `indices` are out of bound, instead of raising an index error,
|
|
@@ -5253,7 +4501,7 @@ def tensor_scatter_div(input_x, indices, updates):
|
|
|
5253
4501
|
|
|
5254
4502
|
The last axis of `indices` is the depth of each index vectors. For each index vector,
|
|
5255
4503
|
there must be a corresponding value in `updates`. The shape of `updates` should be
|
|
5256
|
-
equal to the shape of `input_x[indices]`. For more details, see
|
|
4504
|
+
equal to the shape of `input_x[indices]`. For more details, see Examples.
|
|
5257
4505
|
|
|
5258
4506
|
.. math::
|
|
5259
4507
|
output\left [indices \right ] = input\_x \div update
|
|
@@ -5374,113 +4622,36 @@ def tuple_to_array(input_x):
|
|
|
5374
4622
|
return tuple_to_tensor_(input_x, dtype)
|
|
5375
4623
|
|
|
5376
4624
|
|
|
5377
|
-
def masked_select(input, mask):
|
|
5378
|
-
"""
|
|
5379
|
-
Returns a new 1-D Tensor which indexes the `x` tensor according to the boolean `mask`.
|
|
5380
|
-
The shapes of the `mask` tensor and the `x` tensor don't need to match, but they must be broadcastable.
|
|
5381
|
-
|
|
5382
|
-
Args:
|
|
5383
|
-
input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
5384
|
-
mask (Tensor[bool]): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
5385
|
-
|
|
5386
|
-
Returns:
|
|
5387
|
-
A 1-D Tensor, with the same type as `input`.
|
|
5388
|
-
|
|
5389
|
-
Raises:
|
|
5390
|
-
TypeError: If `input` or `mask` is not a Tensor.
|
|
5391
|
-
TypeError: If dtype of `mask` is not bool.
|
|
5392
|
-
|
|
5393
|
-
Supported Platforms:
|
|
5394
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
5395
|
-
|
|
5396
|
-
Examples:
|
|
5397
|
-
>>> import numpy as np
|
|
5398
|
-
>>> import mindspore.ops as ops
|
|
5399
|
-
>>> from mindspore import Tensor
|
|
5400
|
-
>>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
|
|
5401
|
-
>>> mask = Tensor(np.array([1, 0, 1, 0]), mindspore.bool_)
|
|
5402
|
-
>>> output = ops.masked_select(x, mask)
|
|
5403
|
-
>>> print(output)
|
|
5404
|
-
[1 3]
|
|
5405
|
-
"""
|
|
5406
|
-
return masked_select_(input, mask)
|
|
5407
|
-
|
|
5408
|
-
|
|
5409
|
-
def masked_fill(input_x, mask, value):
|
|
5410
|
-
"""
|
|
5411
|
-
Fills elements of Tensor with value where mask is True.
|
|
5412
|
-
The shapes of `input_x` and `mask` need to be the same or broadcastable.
|
|
5413
|
-
|
|
5414
|
-
Args:
|
|
5415
|
-
input_x (Tensor): The source Tensor whose data type is one of bool, uint8, int8, int16, int32,
|
|
5416
|
-
int64, float16, float32, float64, complex64, complex128.
|
|
5417
|
-
mask (Tensor[bool]): The boolean mask.
|
|
5418
|
-
value (Union[float, Tensor]): The value to fill in with, which dtype is the same as `input_x`.
|
|
5419
|
-
|
|
5420
|
-
Returns:
|
|
5421
|
-
Tensor, has the same type and shape as `input_x`.
|
|
5422
|
-
|
|
5423
|
-
Raises:
|
|
5424
|
-
TypeError: If dtype of `mask` is not bool.
|
|
5425
|
-
TypeError: If `input_x` or `mask` is not a Tensor.
|
|
5426
|
-
ValueError: If the shapes of `input_x` and `mask` could not be broadcast.
|
|
5427
|
-
TypeError: If dtype of `input_x` or `value` is not one of bool, uint8, int8, int16, int32,
|
|
5428
|
-
int64, float16, float32, float64, complex64, complex128.
|
|
5429
|
-
TypeError: If dtype of `value` is different from that of `input_x`.
|
|
5430
|
-
TypeError: If `value` is neither float number nor Tensor.
|
|
5431
|
-
|
|
5432
|
-
Supported Platforms:
|
|
5433
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
5434
|
-
|
|
5435
|
-
Examples:
|
|
5436
|
-
>>> import mindspore
|
|
5437
|
-
>>> import numpy as np
|
|
5438
|
-
>>> from mindspore import Tensor, ops
|
|
5439
|
-
>>> input_x = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
|
|
5440
|
-
>>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)
|
|
5441
|
-
>>> output = ops.masked_fill(input_x, mask, 0.5)
|
|
5442
|
-
>>> print(output)
|
|
5443
|
-
[0.5 0.5 3. 0.5]
|
|
5444
|
-
"""
|
|
5445
|
-
if isinstance(value, (float, int)) and isinstance(input_x, Tensor):
|
|
5446
|
-
value = scalar_to_tensor_(value, input_x.dtype)
|
|
5447
|
-
masked_fill_ = _get_cache_prim(P.MaskedFill)()
|
|
5448
|
-
return masked_fill_(input_x, mask, value)
|
|
5449
|
-
|
|
5450
|
-
|
|
5451
|
-
def diag(input):
|
|
5452
|
-
r"""
|
|
5453
|
-
Constructs a diagonal tensor with a given diagonal values.
|
|
5454
|
-
|
|
5455
|
-
Assume `input` has dimensions :math:`(D_1,... D_k)` , the output is a tensor of
|
|
5456
|
-
rank 2k with dimensions :math:`(D_1,..., D_k, D_1,..., D_k)` where:
|
|
5457
|
-
:math:`output[i_1,..., i_k, i_1,..., i_k] = input[i_1,..., i_k]` and 0 everywhere else.
|
|
4625
|
+
def masked_select(input, mask):
|
|
4626
|
+
"""
|
|
4627
|
+
Returns a new 1-D Tensor which indexes the `x` tensor according to the boolean `mask`.
|
|
4628
|
+
The shapes of the `mask` tensor and the `x` tensor don't need to match, but they must be broadcastable.
|
|
5458
4629
|
|
|
5459
4630
|
Args:
|
|
5460
|
-
input (Tensor): The
|
|
4631
|
+
input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
4632
|
+
mask (Tensor[bool]): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
5461
4633
|
|
|
5462
4634
|
Returns:
|
|
5463
|
-
Tensor,
|
|
4635
|
+
A 1-D Tensor, with the same type as `input`.
|
|
5464
4636
|
|
|
5465
4637
|
Raises:
|
|
5466
|
-
TypeError: If `input` is not a Tensor.
|
|
5467
|
-
|
|
4638
|
+
TypeError: If `input` or `mask` is not a Tensor.
|
|
4639
|
+
TypeError: If dtype of `mask` is not bool.
|
|
5468
4640
|
|
|
5469
4641
|
Supported Platforms:
|
|
5470
4642
|
``Ascend`` ``GPU`` ``CPU``
|
|
5471
4643
|
|
|
5472
4644
|
Examples:
|
|
5473
|
-
>>>
|
|
5474
|
-
>>> import mindspore
|
|
5475
|
-
>>>
|
|
5476
|
-
>>>
|
|
4645
|
+
>>> import numpy as np
|
|
4646
|
+
>>> import mindspore
|
|
4647
|
+
>>> from mindspore import Tensor, ops
|
|
4648
|
+
>>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
|
|
4649
|
+
>>> mask = Tensor(np.array([1, 0, 1, 0]), mindspore.bool_)
|
|
4650
|
+
>>> output = ops.masked_select(x, mask)
|
|
5477
4651
|
>>> print(output)
|
|
5478
|
-
[
|
|
5479
|
-
[0 2 0 0]
|
|
5480
|
-
[0 0 3 0]
|
|
5481
|
-
[0 0 0 4]]
|
|
4652
|
+
[1 3]
|
|
5482
4653
|
"""
|
|
5483
|
-
return
|
|
4654
|
+
return masked_select_(input, mask)
|
|
5484
4655
|
|
|
5485
4656
|
|
|
5486
4657
|
def diagflat(input, offset=0):
|
|
@@ -5541,7 +4712,7 @@ def col2im(input_x, output_size, kernel_size, dilation, padding_value, stride):
|
|
|
5541
4712
|
Combines an array of sliding local blocks into a large containing tensor.
|
|
5542
4713
|
|
|
5543
4714
|
Args:
|
|
5544
|
-
input_x (Tensor): 4D tensor with data type float16 or
|
|
4715
|
+
input_x (Tensor): 4D tensor with data type float16 or float32.
|
|
5545
4716
|
output_size (Tensor): 1D tensor with 2 elements of data type int.
|
|
5546
4717
|
kernel_size (Union[int, tuple[int], list[int]]): The size of the kernel, should be two int
|
|
5547
4718
|
for height and width. If type is int, it means that height equal with width. Must be specified.
|
|
@@ -5597,7 +4768,7 @@ def _split_int(x, split_size_or_sections, axis):
|
|
|
5597
4768
|
num_sections = length_along_dim // split_size_or_sections
|
|
5598
4769
|
length1 = num_sections * split_size_or_sections
|
|
5599
4770
|
length2 = length_along_dim - length1
|
|
5600
|
-
start1 = _list_comprehensions(
|
|
4771
|
+
start1 = _list_comprehensions(rank_(x), 0, True)
|
|
5601
4772
|
size1 = _tuple_setitem(arr_shape, axis, length1)
|
|
5602
4773
|
start2 = _tuple_setitem(start1, axis, length1)
|
|
5603
4774
|
size2 = _tuple_setitem(arr_shape, axis, length2)
|
|
@@ -5627,7 +4798,6 @@ def _split_sub_tensors(x, split_size_or_sections, axis):
|
|
|
5627
4798
|
sub_tensors.append(sliced_tensor)
|
|
5628
4799
|
return sub_tensors
|
|
5629
4800
|
|
|
5630
|
-
|
|
5631
4801
|
def split(tensor, split_size_or_sections, axis=0):
|
|
5632
4802
|
"""
|
|
5633
4803
|
Splits the Tensor into chunks along the given axis.
|
|
@@ -5649,9 +4819,9 @@ def split(tensor, split_size_or_sections, axis=0):
|
|
|
5649
4819
|
TypeError: If argument `tensor` is not Tensor.
|
|
5650
4820
|
TypeError: If argument `axis` is not Tensor.
|
|
5651
4821
|
ValueError: If argument `axis` is out of range of :math:`[-tensor.ndim, tensor.ndim)` .
|
|
5652
|
-
TypeError: If each element in
|
|
5653
|
-
TypeError: If argument `
|
|
5654
|
-
ValueError: The sum of
|
|
4822
|
+
TypeError: If each element in `split_size_or_sections` is not integer.
|
|
4823
|
+
TypeError: If argument `split_size_or_sections` is not int, tuple(int) or list(int).
|
|
4824
|
+
ValueError: The sum of `split_size_or_sections` is not equal to x.shape[axis].
|
|
5655
4825
|
|
|
5656
4826
|
Supported Platforms:
|
|
5657
4827
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -5695,6 +4865,53 @@ def split(tensor, split_size_or_sections, axis=0):
|
|
|
5695
4865
|
f"but got {type(split_size_or_sections)}")
|
|
5696
4866
|
return tuple(res)
|
|
5697
4867
|
|
|
4868
|
+
def split_ext(tensor, split_size_or_sections, axis=0):
|
|
4869
|
+
"""
|
|
4870
|
+
Splits the Tensor into chunks along the given axis.
|
|
4871
|
+
|
|
4872
|
+
Args:
|
|
4873
|
+
tensor (Tensor): A Tensor to be divided.
|
|
4874
|
+
split_size_or_sections (Union[int, tuple(int), list(int)]):
|
|
4875
|
+
If `split_size_or_sections` is an int type, `tensor` will be split into equally sized chunks,
|
|
4876
|
+
each chunk with size `split_size_or_sections`. Last chunk will be smaller than `split_size_or_sections`
|
|
4877
|
+
if `tensor.shape[axis]` is not divisible by `split_size_or_sections`.
|
|
4878
|
+
If `split_size_or_sections` is a list type, then `tensor` will be split into len(split_size_or_sections)
|
|
4879
|
+
chunks with sizes `split_size_or_sections` along the given `axis`.
|
|
4880
|
+
axis (int): The axis along which to split. Default: ``0`` .
|
|
4881
|
+
|
|
4882
|
+
Returns:
|
|
4883
|
+
A tuple of sub-tensors.
|
|
4884
|
+
|
|
4885
|
+
Raises:
|
|
4886
|
+
TypeError: If argument `tensor` is not Tensor.
|
|
4887
|
+
TypeError: If argument `axis` is not int.
|
|
4888
|
+
ValueError: If argument `axis` is out of range of :[-tensor.ndim, tensor.ndim).
|
|
4889
|
+
TypeError: If each element in `split_size_or_sections` is not integer.
|
|
4890
|
+
TypeError: If argument `split_size_or_sections` is not int, tuple(int) or list(int).
|
|
4891
|
+
ValueError: The sum of `split_size_or_sections` is not equal to x.shape[axis].
|
|
4892
|
+
|
|
4893
|
+
Supported Platforms:
|
|
4894
|
+
``Ascend``
|
|
4895
|
+
|
|
4896
|
+
Examples:
|
|
4897
|
+
>>> import numpy as np
|
|
4898
|
+
>>> from mindspore import ops, Tensor
|
|
4899
|
+
>>> input_x = np.arange(9).astype("float32")
|
|
4900
|
+
>>> output = ops.split_ext(Tensor(input_x), 3)
|
|
4901
|
+
>>> print(output)
|
|
4902
|
+
(Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
|
|
4903
|
+
Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
|
|
4904
|
+
Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))
|
|
4905
|
+
"""
|
|
4906
|
+
if isinstance(split_size_or_sections, int):
|
|
4907
|
+
res = split_tensor(tensor, split_size_or_sections, axis)
|
|
4908
|
+
elif isinstance(split_size_or_sections, (list, tuple)):
|
|
4909
|
+
res = split_with_size(tensor, split_size_or_sections, axis)
|
|
4910
|
+
else:
|
|
4911
|
+
raise TypeError(f"Type of Argument `split_size_or_sections` should be integer, tuple(int) or list(int), " \
|
|
4912
|
+
f"but got {type(split_size_or_sections)}")
|
|
4913
|
+
return res
|
|
4914
|
+
|
|
5698
4915
|
|
|
5699
4916
|
def tril(input, diagonal=0): # pylint: disable=redefined-outer-name
|
|
5700
4917
|
"""
|
|
@@ -5757,67 +4974,6 @@ def tril(input, diagonal=0): # pylint: disable=redefined-outer-name
|
|
|
5757
4974
|
return tril_(input)
|
|
5758
4975
|
|
|
5759
4976
|
|
|
5760
|
-
def triu(input, diagonal=0): # pylint: disable=redefined-outer-name
|
|
5761
|
-
r"""
|
|
5762
|
-
Returns the upper triangle part of 'input' (elements that contain the diagonal and below),
|
|
5763
|
-
and set the other elements to zeros.
|
|
5764
|
-
|
|
5765
|
-
.. warning::
|
|
5766
|
-
This is an experimental API that is subject to change or deletion.
|
|
5767
|
-
|
|
5768
|
-
Args:
|
|
5769
|
-
input (Tensor): The input tensor with shape :math:`(M, N, *)` where * means any number of additional dimensions.
|
|
5770
|
-
diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: 0,
|
|
5771
|
-
indicating the main diagonal.
|
|
5772
|
-
|
|
5773
|
-
Returns:
|
|
5774
|
-
Tensor, a tensor has the same shape and data type as input.
|
|
5775
|
-
|
|
5776
|
-
Raises:
|
|
5777
|
-
TypeError: If `diagonal` is not an int.
|
|
5778
|
-
TypeError: If `input` is not a Tensor.
|
|
5779
|
-
ValueError: If the dimension of `input` is less than 2.
|
|
5780
|
-
|
|
5781
|
-
Supported Platforms:
|
|
5782
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
5783
|
-
|
|
5784
|
-
Examples:
|
|
5785
|
-
>>> import numpy as np
|
|
5786
|
-
>>> from mindspore import Tensor, ops
|
|
5787
|
-
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
5788
|
-
... [ 5, 6, 7, 8],
|
|
5789
|
-
... [10, 11, 12, 13],
|
|
5790
|
-
... [14, 15, 16, 17]]))
|
|
5791
|
-
>>> result = ops.triu(x)
|
|
5792
|
-
>>> print(result)
|
|
5793
|
-
[[ 1 2 3 4]
|
|
5794
|
-
[ 0 6 7 8]
|
|
5795
|
-
[ 0 0 12 13]
|
|
5796
|
-
[ 0 0 0 17]]
|
|
5797
|
-
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
5798
|
-
... [ 5, 6, 7, 8],
|
|
5799
|
-
... [10, 11, 12, 13],
|
|
5800
|
-
... [14, 15, 16, 17]]))
|
|
5801
|
-
>>> result = ops.triu(x, diagonal=1)
|
|
5802
|
-
>>> print(result)
|
|
5803
|
-
[[ 0 2 3 4]
|
|
5804
|
-
[ 0 0 7 8]
|
|
5805
|
-
[ 0 0 0 13]
|
|
5806
|
-
[ 0 0 0 0]]
|
|
5807
|
-
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
5808
|
-
... [ 5, 6, 7, 8],
|
|
5809
|
-
... [10, 11, 12, 13],
|
|
5810
|
-
... [14, 15, 16, 17]]))
|
|
5811
|
-
>>> result = ops.triu(x, diagonal=-1)
|
|
5812
|
-
>>> print(result)
|
|
5813
|
-
[[ 1 2 3 4]
|
|
5814
|
-
[ 5 6 7 8]
|
|
5815
|
-
[ 0 11 12 13]
|
|
5816
|
-
[ 0 0 16 17]]
|
|
5817
|
-
"""
|
|
5818
|
-
return _get_cache_prim(P.Triu)(diagonal)(input)
|
|
5819
|
-
|
|
5820
|
-
|
|
5821
4977
|
@_primexpr
|
|
5822
4978
|
def _canonicalize_axis(axis, ndim):
|
|
5823
4979
|
"""
|
|
@@ -5917,24 +5073,24 @@ def _tensor_split_sub_int(x, indices_or_sections, axis):
|
|
|
5917
5073
|
arr_shape = x.shape
|
|
5918
5074
|
length_along_dim = arr_shape[axis]
|
|
5919
5075
|
if indices_or_sections > length_along_dim:
|
|
5920
|
-
res = P.Split(axis, length_along_dim)(x)
|
|
5076
|
+
res = _get_cache_prim(P.Split)(axis, length_along_dim)(x)
|
|
5921
5077
|
indices_or_sections_n = [length_along_dim, length_along_dim + 1]
|
|
5922
5078
|
res2 = _tensor_split_sub_tensors(x, indices_or_sections_n, axis)
|
|
5923
5079
|
for _ in np.arange(length_along_dim, indices_or_sections):
|
|
5924
5080
|
res += tuple(res2)[1:]
|
|
5925
5081
|
elif length_along_dim % indices_or_sections == 0:
|
|
5926
|
-
res = P.Split(axis, indices_or_sections)(x)
|
|
5082
|
+
res = _get_cache_prim(P.Split)(axis, indices_or_sections)(x)
|
|
5927
5083
|
else:
|
|
5928
5084
|
num_long_tensor = length_along_dim % indices_or_sections
|
|
5929
5085
|
num_short_tensor = indices_or_sections - num_long_tensor
|
|
5930
5086
|
length1 = num_long_tensor * (length_along_dim // indices_or_sections + 1)
|
|
5931
5087
|
length2 = length_along_dim - length1
|
|
5932
|
-
start1 = _list_comprehensions(
|
|
5088
|
+
start1 = _list_comprehensions(rank_(x), 0, True)
|
|
5933
5089
|
size1 = _tuple_setitem(arr_shape, axis, length1)
|
|
5934
5090
|
start2 = _tuple_setitem(start1, axis, length1)
|
|
5935
5091
|
size2 = _tuple_setitem(arr_shape, axis, length2)
|
|
5936
|
-
res = P.Split(axis, num_long_tensor)(tensor_slice(x, start1, size1)) + \
|
|
5937
|
-
P.Split(axis, num_short_tensor)(tensor_slice(x, start2, size2))
|
|
5092
|
+
res = _get_cache_prim(P.Split)(axis, num_long_tensor)(tensor_slice(x, start1, size1)) + \
|
|
5093
|
+
_get_cache_prim(P.Split)(axis, num_short_tensor)(tensor_slice(x, start2, size2))
|
|
5938
5094
|
return res
|
|
5939
5095
|
|
|
5940
5096
|
|
|
@@ -5948,11 +5104,11 @@ def tensor_split(input, indices_or_sections, axis=0):
|
|
|
5948
5104
|
|
|
5949
5105
|
- If `indices_or_sections` is an integer n, input tensor will be split into n sections.
|
|
5950
5106
|
|
|
5951
|
-
- If :math:`input.shape
|
|
5952
|
-
:math:`input.shape
|
|
5953
|
-
- If :math:`input.shape
|
|
5954
|
-
will have size :math:`input.shape
|
|
5955
|
-
size :math:`input.shape
|
|
5107
|
+
- If :math:`input.shape[axis]` can be divisible by n, sub-sections will have equal size
|
|
5108
|
+
:math:`input.shape[axis] / n` .
|
|
5109
|
+
- If :math:`input.shape[axis]` is not divisible by n, the first :math:`input.shape[axis] \bmod n` sections
|
|
5110
|
+
will have size :math:`input.shape[axis] // n + 1` , and the rest will have
|
|
5111
|
+
size :math:`input.shape[axis] // n` .
|
|
5956
5112
|
- If `indices_or_sections` is of type tuple(int) or list(int), the input tensor will be split at the
|
|
5957
5113
|
indices in the list or tuple. For example, given parameters :math:`indices\_or\_sections=[1, 4]`
|
|
5958
5114
|
and :math:`axis=0` , the input tensor will be split into sections :math:`input[:1]` ,
|
|
@@ -6165,7 +5321,7 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
|
|
|
6165
5321
|
tensor.
|
|
6166
5322
|
|
|
6167
5323
|
- values (Tensor) - The maximum value of input tensor, with the same shape as index, and same dtype as x.
|
|
6168
|
-
- index (Tensor) - The index for the maximum value of the input tensor, with dtype
|
|
5324
|
+
- index (Tensor) - The index for the maximum value of the input tensor, with dtype int64. If `keepdims`
|
|
6169
5325
|
is true, the shape of output tensors is :math:`(input_1, input_2, ..., input_{axis-1}, 1, input_{axis+1},
|
|
6170
5326
|
..., input_N)` . Otherwise, the shape is :math:`(input_1, input_2, ..., input_{axis-1}, input_{axis+1},
|
|
6171
5327
|
..., input_N)` .
|
|
@@ -6194,16 +5350,15 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
|
|
|
6194
5350
|
[[3.2 0.4 0.4 2.9 4. ]] [[1 1 0 1 1]]
|
|
6195
5351
|
"""
|
|
6196
5352
|
if not input.shape:
|
|
6197
|
-
return (input, Tensor(0, dtype=mstype.
|
|
5353
|
+
return (input, Tensor(0, dtype=mstype.int64))
|
|
6198
5354
|
if axis is None:
|
|
6199
|
-
|
|
6200
|
-
return (reduce_max_op(input), Tensor(0, dtype=mstype.int32))
|
|
5355
|
+
return (max_(input), Tensor(0, dtype=mstype.int64))
|
|
6201
5356
|
if initial is not None and not isinstance(initial, numbers.Number):
|
|
6202
5357
|
raise TypeError(f"For 'max', 'initial' must be a scalar, but got {type(initial)}")
|
|
6203
5358
|
if axis is not None and not isinstance(axis, int):
|
|
6204
5359
|
raise TypeError(f"For 'max', 'axis' must be int, but got {type(axis)}")
|
|
6205
5360
|
input = _init_and_select_elem(input, initial, where, ops.maximum)
|
|
6206
|
-
argmax_with_value_op = ArgMaxWithValue(axis, keepdims)
|
|
5361
|
+
argmax_with_value_op = _get_cache_prim(ArgMaxWithValue)(axis, keepdims)
|
|
6207
5362
|
indices, values = argmax_with_value_op(input)
|
|
6208
5363
|
return values, indices
|
|
6209
5364
|
|
|
@@ -6249,10 +5404,11 @@ def argmax(input, dim=None, keepdim=False):
|
|
|
6249
5404
|
is_dim_none = True
|
|
6250
5405
|
out = _get_cache_prim(Argmax)(dim, mstype.int64)(input)
|
|
6251
5406
|
if keepdim and not is_dim_none:
|
|
6252
|
-
out =
|
|
5407
|
+
out = expand_dims(out, dim)
|
|
6253
5408
|
return out
|
|
6254
5409
|
|
|
6255
5410
|
|
|
5411
|
+
|
|
6256
5412
|
def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylint: disable=redefined-outer-name
|
|
6257
5413
|
"""
|
|
6258
5414
|
Calculates the minimum value along with the given axis for the input tensor. It returns the minimum values and
|
|
@@ -6311,16 +5467,16 @@ def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
|
|
|
6311
5467
|
0.0 0
|
|
6312
5468
|
"""
|
|
6313
5469
|
if not input.shape:
|
|
6314
|
-
return (input, Tensor(0, dtype=mstype.
|
|
5470
|
+
return (input, Tensor(0, dtype=mstype.int64))
|
|
6315
5471
|
if axis is None:
|
|
6316
|
-
return (
|
|
5472
|
+
return (min_(input), Tensor(0, dtype=mstype.int64))
|
|
6317
5473
|
if initial is not None and not isinstance(initial, numbers.Number):
|
|
6318
5474
|
raise TypeError(f"For 'min', 'initial' must be a scalar, but got {type(initial)}")
|
|
6319
5475
|
if axis is not None and not isinstance(axis, int):
|
|
6320
5476
|
raise TypeError(f"For 'min', 'axis' must be int, but got {type(axis)}")
|
|
6321
5477
|
input = _init_and_select_elem(input, initial, where, ops.minimum)
|
|
6322
|
-
|
|
6323
|
-
indices, values =
|
|
5478
|
+
argmin_with_value_op = _get_cache_prim(ArgMinWithValue)(axis, keepdims)
|
|
5479
|
+
indices, values = argmin_with_value_op(input)
|
|
6324
5480
|
return values, indices
|
|
6325
5481
|
|
|
6326
5482
|
|
|
@@ -6378,8 +5534,8 @@ def aminmax(input, *, axis=0, keepdims=False):
|
|
|
6378
5534
|
output0 = ops.reshape(output0, [1] * input.ndim)
|
|
6379
5535
|
output1 = ops.reshape(output1, [1] * input.ndim)
|
|
6380
5536
|
return output0, output1
|
|
6381
|
-
argmin_with_value_op =
|
|
6382
|
-
argmax_with_value_op =
|
|
5537
|
+
argmin_with_value_op = _get_cache_prim(ArgMinWithValue)(axis, keepdims)
|
|
5538
|
+
argmax_with_value_op = _get_cache_prim(ArgMaxWithValue)(axis, keepdims)
|
|
6383
5539
|
_, output0 = argmin_with_value_op(input)
|
|
6384
5540
|
_, output1 = argmax_with_value_op(input)
|
|
6385
5541
|
if keepdims is True and input.ndim == 0:
|
|
@@ -6434,66 +5590,48 @@ def narrow(input, axis, start, length):
|
|
|
6434
5590
|
begins[axis] = start
|
|
6435
5591
|
sizes = list(input.shape)
|
|
6436
5592
|
sizes[axis] = length
|
|
6437
|
-
return
|
|
6438
|
-
|
|
6439
|
-
|
|
6440
|
-
def unsorted_segment_sum(input_x, segment_ids, num_segments):
|
|
6441
|
-
r"""
|
|
6442
|
-
Computes the sum of a tensor along segments.
|
|
5593
|
+
return tensor_slice(input, begins, sizes)
|
|
6443
5594
|
|
|
6444
|
-
Calculates a tensor such that :math:`\text{output}[i] = \sum_{segment\_ids[j] == i} \text{data}[j, \ldots]`, where
|
|
6445
|
-
:math:`j,...` is a tuple describing the index of element in data.
|
|
6446
|
-
`segment_ids` selects which elements in data to sum
|
|
6447
|
-
up. Segment_ids does not need to be sorted, and it does not need to cover all values in the entire valid value
|
|
6448
|
-
range.
|
|
6449
5595
|
|
|
6450
|
-
|
|
6451
|
-
|
|
6452
|
-
|
|
6453
|
-
|
|
6454
|
-
Note:
|
|
6455
|
-
- If the segment_id i is absent in the segment_ids, then output[i] will be filled with 0.
|
|
6456
|
-
- On Ascend, if the value of segment_id is less than 0 or greater than the length of the input data shape, an
|
|
6457
|
-
execution error will occur.
|
|
6458
|
-
|
|
6459
|
-
If the sum of the given segment_ids :math:`i` is empty, then :math:`\text{output}[i] = 0`. If the given segment_ids
|
|
6460
|
-
is negative, the value will be ignored. 'num_segments' must be equal to the number of different segment_ids.
|
|
5596
|
+
def narrow_ext(input, dim, start, length):
|
|
5597
|
+
"""
|
|
5598
|
+
Returns a narrowed tensor from input tensor, and
|
|
5599
|
+
the dimension axis is input from start to start + length.
|
|
6461
5600
|
|
|
6462
5601
|
Args:
|
|
6463
|
-
|
|
6464
|
-
|
|
6465
|
-
|
|
6466
|
-
|
|
6467
|
-
num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
|
|
5602
|
+
input (Tensor): the tensor to narrow.
|
|
5603
|
+
dim (int): dimension along which to narrow.
|
|
5604
|
+
start (int): the starting dimension.
|
|
5605
|
+
length (int): the distance to the ending dimension.
|
|
6468
5606
|
|
|
6469
5607
|
Returns:
|
|
6470
|
-
Tensor
|
|
5608
|
+
Tensor.
|
|
6471
5609
|
|
|
6472
5610
|
Raises:
|
|
6473
|
-
|
|
6474
|
-
ValueError: If
|
|
5611
|
+
ValueError: If dim is out of range [-input.ndim, input.ndim).
|
|
5612
|
+
ValueError: If start is out of range [-input.shape[dim], input.shape[dim]].
|
|
5613
|
+
ValueError: It length is out of range [0, input.shape[dim]-start].
|
|
6475
5614
|
|
|
6476
5615
|
Supported Platforms:
|
|
6477
|
-
``Ascend``
|
|
5616
|
+
``Ascend``
|
|
6478
5617
|
|
|
6479
5618
|
Examples:
|
|
6480
|
-
>>> from mindspore import Tensor
|
|
6481
|
-
>>> from mindspore import ops
|
|
6482
5619
|
>>> import mindspore
|
|
6483
|
-
>>>
|
|
6484
|
-
>>>
|
|
6485
|
-
>>>
|
|
6486
|
-
>>> output = ops.
|
|
5620
|
+
>>> from mindspore import ops
|
|
5621
|
+
>>> from mindspore import Tensor
|
|
5622
|
+
>>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mindspore.int32)
|
|
5623
|
+
>>> output = ops.narrow(x, 0, 0, 2)
|
|
6487
5624
|
>>> print(output)
|
|
6488
|
-
[
|
|
6489
|
-
|
|
6490
|
-
>>>
|
|
6491
|
-
>>> num_segments = 6
|
|
6492
|
-
>>> output = ops.unsorted_segment_sum(input_x, segment_ids, num_segments)
|
|
5625
|
+
[[ 1 2 3]
|
|
5626
|
+
[ 4 5 6]]
|
|
5627
|
+
>>> output = ops.narrow(x, 1, 1, 2)
|
|
6493
5628
|
>>> print(output)
|
|
6494
|
-
[
|
|
5629
|
+
[[ 2 3]
|
|
5630
|
+
[ 5 6]
|
|
5631
|
+
[ 8 9]]
|
|
6495
5632
|
"""
|
|
6496
|
-
|
|
5633
|
+
validator.check_value_type("input", input, Tensor, "narrow")
|
|
5634
|
+
return slice_ext_op(input, dim, start, start+length, 1)
|
|
6497
5635
|
|
|
6498
5636
|
|
|
6499
5637
|
def topk(input, k, dim=None, largest=True, sorted=True):
|
|
@@ -6520,7 +5658,7 @@ def topk(input, k, dim=None, largest=True, sorted=True):
|
|
|
6520
5658
|
|
|
6521
5659
|
Args:
|
|
6522
5660
|
input (Tensor): Input to be computed, data type must be float16, float32 or int32.
|
|
6523
|
-
k (int): The number of top or bottom elements to be computed along the last dimension
|
|
5661
|
+
k (int): The number of top or bottom elements to be computed along the last dimension.
|
|
6524
5662
|
dim (int, optional): The dimension to sort along. Default: ``None`` .
|
|
6525
5663
|
largest (bool, optional): If largest is ``False`` then the k smallest elements are returned.
|
|
6526
5664
|
Default: ``True`` .
|
|
@@ -6650,8 +5788,8 @@ def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
6650
5788
|
A Tensor, with same type as `input` . And its shape is as described above.
|
|
6651
5789
|
|
|
6652
5790
|
Raises:
|
|
6653
|
-
TypeError: If `kernel_size`, `
|
|
6654
|
-
ValueError: If `kernel_size`, `dilation`, `stride` value is not
|
|
5791
|
+
TypeError: If `output_size`, `kernel_size`, `stride`, `dilation`, `padding` data type is not int, tuple or list.
|
|
5792
|
+
ValueError: If `output_size`, `kernel_size`, `dilation`, `stride` value is not
|
|
6655
5793
|
greater than zero or elements number more than `2`.
|
|
6656
5794
|
ValueError: If `padding` value is less than zero or elements number more than `2`.
|
|
6657
5795
|
ValueError: If `input.shape[1] != kernel_size[0] * kernel_size[1]`
|
|
@@ -6727,9 +5865,7 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
6727
5865
|
.. warning::
|
|
6728
5866
|
- The output is a 3-dimensional Tensor whose shape is
|
|
6729
5867
|
:math:`(N, C \times \prod(\text{kernel_size}), L)` .
|
|
6730
|
-
|
|
6731
|
-
.. warning::
|
|
6732
|
-
This is an experimental API that is subject to change or deletion.
|
|
5868
|
+
- This is an experimental API that is subject to change or deletion.
|
|
6733
5869
|
|
|
6734
5870
|
Args:
|
|
6735
5871
|
input (Tensor): 4-D Tensor, supported dtypes: float16, float32, float64, complex64 and complex128.
|
|
@@ -6738,10 +5874,11 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
6738
5874
|
dilation (Union[int, tuple[int], list[int]], optional): The dilation of the window, should be two int
|
|
6739
5875
|
for height and width. If type is int, it means that height equal with width. Default: ``1`` .
|
|
6740
5876
|
padding (Union[int, tuple[int], list[int]], optional): The pad of the window, that must be
|
|
6741
|
-
a tuple/list of one or two `int` for height and width.
|
|
6742
|
-
|
|
6743
|
-
If
|
|
6744
|
-
|
|
5877
|
+
a tuple/list of one or two `int` for height and width. Default: ``0`` .
|
|
5878
|
+
|
|
5879
|
+
- If one int, pad_height = pad_width.
|
|
5880
|
+
- If two int, pad_height = padding[0], pad_width = padding[1].
|
|
5881
|
+
|
|
6745
5882
|
stride (Union[int, tuple[int], list[int]], optional): The stride of the window, should be two int
|
|
6746
5883
|
for height and width. If type is int, it means that height equal with width. Default: ``1`` .
|
|
6747
5884
|
|
|
@@ -6788,98 +5925,6 @@ def _check_diagonal_axes(dim1, dim2, x_ndim):
|
|
|
6788
5925
|
return axes
|
|
6789
5926
|
|
|
6790
5927
|
|
|
6791
|
-
def diagonal(input, offset=0, dim1=0, dim2=1):
|
|
6792
|
-
"""
|
|
6793
|
-
Returns specified diagonals of `input`.
|
|
6794
|
-
|
|
6795
|
-
If `input` is 2-D, returns the diagonal of `input` with the given offset.
|
|
6796
|
-
If `input` has more than two
|
|
6797
|
-
dimensions, then the axes specified by `dim1` and `dim2` are used to determine
|
|
6798
|
-
the 2-D sub-array whose diagonal is returned. In this case, remove the `dim1` and `dim2` dimensions of `input`
|
|
6799
|
-
and insert the last dimension of `input` by the diagonal elements determined by `dim1` and `dim2`.
|
|
6800
|
-
|
|
6801
|
-
Args:
|
|
6802
|
-
input (Tensor): Array from which the diagonals are taken.
|
|
6803
|
-
offset (int, optional): Offset of the diagonal from the main diagonal.
|
|
6804
|
-
Can be positive or negative. Default: ``0`` .
|
|
6805
|
-
dim1 (int, optional): Axis to be used as the first axis of the 2-D
|
|
6806
|
-
sub-arrays from which the diagonals should be taken. Defaults to
|
|
6807
|
-
first axis (0). Default: ``0`` .
|
|
6808
|
-
dim2 (int, optional): Axis to be used as the second axis of the 2-D
|
|
6809
|
-
sub-arrays from which the diagonals should be taken. Defaults to
|
|
6810
|
-
second axis (1). Default: ``1`` .
|
|
6811
|
-
|
|
6812
|
-
Returns:
|
|
6813
|
-
Tensor, if `input` is 2-D, then `input` 1-D array containing the diagonal. If
|
|
6814
|
-
``input.ndim > 2``, then the dimensions specified by `dim1` and `dim2` are removed,
|
|
6815
|
-
and a new axis inserted at the end corresponding to the diagonal.
|
|
6816
|
-
|
|
6817
|
-
Raises:
|
|
6818
|
-
TypeError: if `dim1` or `dim2` are not an int.
|
|
6819
|
-
ValueError: if the input tensor has less than two dimensions.
|
|
6820
|
-
|
|
6821
|
-
Supported Platforms:
|
|
6822
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
6823
|
-
|
|
6824
|
-
Examples:
|
|
6825
|
-
>>> from mindspore import Tensor, ops
|
|
6826
|
-
>>> from mindspore import dtype as mstype
|
|
6827
|
-
>>> x = Tensor([[0, 1], [2, 3]], mstype.float32)
|
|
6828
|
-
>>> output = ops.diagonal(x)
|
|
6829
|
-
>>> print(output)
|
|
6830
|
-
[0 3]
|
|
6831
|
-
"""
|
|
6832
|
-
x_ndim = input.ndim
|
|
6833
|
-
if x_ndim < 2:
|
|
6834
|
-
raise ValueError(f"For 'ops.diagonal', the original tensor requires at least two dimensions, but got {x_ndim}")
|
|
6835
|
-
_check_attr_dtype("dim1", dim1, [int], "diagonal")
|
|
6836
|
-
_check_attr_dtype("dim2", dim2, [int], "diagonal")
|
|
6837
|
-
dtype = input.dtype
|
|
6838
|
-
|
|
6839
|
-
axes = _check_diagonal_axes(dim1, dim2, x_ndim)
|
|
6840
|
-
perm = ()
|
|
6841
|
-
for i in ms_arrange(x_ndim):
|
|
6842
|
-
if i not in axes:
|
|
6843
|
-
perm += (i,)
|
|
6844
|
-
perm += axes
|
|
6845
|
-
input = input.transpose(perm)
|
|
6846
|
-
|
|
6847
|
-
x_shape = input.shape
|
|
6848
|
-
n, m = x_shape[-2:]
|
|
6849
|
-
|
|
6850
|
-
e = ops.eye(n, m, dtype)
|
|
6851
|
-
if offset >= m or offset <= -n:
|
|
6852
|
-
zero_shape = x_shape[:-2] + (0,)
|
|
6853
|
-
return ops.zeros(zero_shape, dtype)
|
|
6854
|
-
if offset != 0:
|
|
6855
|
-
e = e.astype(mstype.float32)
|
|
6856
|
-
if offset > 0:
|
|
6857
|
-
e_left = ops.fill(mstype.float32, (n, offset), 0)
|
|
6858
|
-
e_right = e[..., 0:m - offset:1]
|
|
6859
|
-
e = ops.cat((e_left, e_right), 1).astype(dtype)
|
|
6860
|
-
elif offset < 0:
|
|
6861
|
-
e_upper = ops.fill(mstype.float32, (-offset, m), 0)
|
|
6862
|
-
e_lower = e[0:n + offset:1, ...]
|
|
6863
|
-
e = ops.cat((e_upper, e_lower), 0).astype(dtype)
|
|
6864
|
-
e = ops.broadcast_to(e, x_shape)
|
|
6865
|
-
|
|
6866
|
-
prod_val = ops.mul(input, e)
|
|
6867
|
-
res = ops.ReduceSum()(prod_val.astype(mstype.float32), -1)
|
|
6868
|
-
|
|
6869
|
-
begin = ()
|
|
6870
|
-
for _ in ms_arrange(x_ndim - 2):
|
|
6871
|
-
begin += (0,)
|
|
6872
|
-
last_dim_begin = builtins.max(0, -offset)
|
|
6873
|
-
begin += (last_dim_begin,)
|
|
6874
|
-
res_size = res.shape[:-1]
|
|
6875
|
-
last_dim_end = builtins.min(x_shape[-2], builtins.max(0, x_shape[-1] - offset)) - last_dim_begin
|
|
6876
|
-
if last_dim_end <= 0:
|
|
6877
|
-
return Tensor([])
|
|
6878
|
-
res_size += (last_dim_end,)
|
|
6879
|
-
res = ops.slice(res, begin, res_size)
|
|
6880
|
-
return res.astype(dtype)
|
|
6881
|
-
|
|
6882
|
-
|
|
6883
5928
|
def _check_is_tensor(param_name, input, cls_name):
|
|
6884
5929
|
"""Returns True if input is Tensor."""
|
|
6885
5930
|
if not isinstance(input, Tensor):
|
|
@@ -6899,6 +5944,9 @@ def diagonal_scatter(input, src, offset=0, dim1=0, dim2=1):
|
|
|
6899
5944
|
the elements in these two dimensions will be treated as elements of a matrix,
|
|
6900
5945
|
and `src` is embedded on the diagonal of the matrix.
|
|
6901
5946
|
|
|
5947
|
+
Note:
|
|
5948
|
+
Currently, ``inf`` value of elements in `input` or `src` is not supported.
|
|
5949
|
+
|
|
6902
5950
|
Args:
|
|
6903
5951
|
input (Tensor): Input Tensor, whose dimension is larger than 1.
|
|
6904
5952
|
src (Tensor): The source Tensor to embed.
|
|
@@ -6935,16 +5983,39 @@ def diagonal_scatter(input, src, offset=0, dim1=0, dim2=1):
|
|
|
6935
5983
|
"""
|
|
6936
5984
|
_check_is_tensor("input", input, "diagonal_scatter")
|
|
6937
5985
|
_check_is_tensor("src", src, "diagonal_scatter")
|
|
6938
|
-
_check_is_int(offset, "offset", "diagonal_scatter")
|
|
6939
|
-
_check_is_int(dim1, "dim1", "diagonal_scatter")
|
|
6940
|
-
_check_is_int(dim2, "dim2", "diagonal_scatter")
|
|
6941
5986
|
input_diag = input.diagonal(offset, dim1, dim2)
|
|
6942
5987
|
_check_diagonal_scatter_shape(input_diag.shape, src.shape)
|
|
6943
|
-
|
|
6944
|
-
|
|
6945
|
-
|
|
5988
|
+
input_shape = input.shape
|
|
5989
|
+
zeros_shape = list(input_shape)
|
|
5990
|
+
m, n = input_shape[dim1], input_shape[dim2]
|
|
5991
|
+
if m == n:
|
|
5992
|
+
src = src - input_diag
|
|
5993
|
+
src = ops.diag_embed(src, offset, dim1, dim2)
|
|
5994
|
+
return input + src
|
|
5995
|
+
if m > n:
|
|
5996
|
+
axis = dim2
|
|
5997
|
+
zeros_shape[axis] = m - n
|
|
5998
|
+
else:
|
|
5999
|
+
axis = dim1
|
|
6000
|
+
zeros_shape[axis] = n - m
|
|
6001
|
+
zeros_tensor = zeros(zeros_shape, dtype=input.dtype)
|
|
6002
|
+
input = concat((input, zeros_tensor), axis)
|
|
6003
|
+
input_diag = input.diagonal(offset, dim1, dim2)
|
|
6004
|
+
if src.shape != input_diag.shape:
|
|
6005
|
+
zeros_shape = []
|
|
6006
|
+
for i, ax in enumerate(src.shape):
|
|
6007
|
+
if ax == input_diag.shape[i]:
|
|
6008
|
+
zeros_shape.append(ax)
|
|
6009
|
+
else:
|
|
6010
|
+
axis = i
|
|
6011
|
+
zeros_shape.append(input_diag.shape[i] - ax)
|
|
6012
|
+
zeros_tensor = zeros(zeros_shape, dtype=src.dtype)
|
|
6013
|
+
src = concat((src, zeros_tensor), axis)
|
|
6014
|
+
src = src - input_diag
|
|
6946
6015
|
src = ops.diag_embed(src, offset, dim1, dim2)
|
|
6947
|
-
|
|
6016
|
+
input = input + src
|
|
6017
|
+
begin = (0,) * input.ndim
|
|
6018
|
+
return slice(input, begin, input_shape)
|
|
6948
6019
|
|
|
6949
6020
|
|
|
6950
6021
|
def lstsq(input, A):
|
|
@@ -7003,8 +6074,7 @@ def lstsq(input, A):
|
|
|
7003
6074
|
[-6.5000005 -4.500001 ]
|
|
7004
6075
|
[-3.500002 -2.5000017]]
|
|
7005
6076
|
"""
|
|
7006
|
-
|
|
7007
|
-
return lstsq_op(input, A)
|
|
6077
|
+
return lstsq_(input, A)
|
|
7008
6078
|
|
|
7009
6079
|
|
|
7010
6080
|
def mvlgamma(input, p):
|
|
@@ -7052,6 +6122,64 @@ def mvlgamma(input, p):
|
|
|
7052
6122
|
return mvlgamma_op(input)
|
|
7053
6123
|
|
|
7054
6124
|
|
|
6125
|
+
def nonzero(input, as_tuple=False):
|
|
6126
|
+
r"""
|
|
6127
|
+
Return the positions of all non-zero values.
|
|
6128
|
+
|
|
6129
|
+
Args:
|
|
6130
|
+
input (Tensor): The input Tensor, its rank should be greater than or equal to 1.
|
|
6131
|
+
as_tuple (bool, optional): Whether the output is tuple.
|
|
6132
|
+
If ``False`` , return Tensor. Default: ``False`` .
|
|
6133
|
+
If ``True`` , return Tuple of Tensor, only support ``Ascend`` .
|
|
6134
|
+
|
|
6135
|
+
|
|
6136
|
+
Returns:
|
|
6137
|
+
- If `as_tuple` is ``False``, return the Tensor, a 2-D Tensor whose data type is int64,
|
|
6138
|
+
containing the positions of all non-zero values of the input.
|
|
6139
|
+
- If `as_tuple` is ``True``, return the Tuple of Tensor and data type is int64.
|
|
6140
|
+
The Tuple length is the dimension of the input tensor,
|
|
6141
|
+
and each element is the 1D tensor of the subscript of all non-zero elements of
|
|
6142
|
+
the input tensor in that dimension.
|
|
6143
|
+
|
|
6144
|
+
Raises:
|
|
6145
|
+
TypeError: If `input` is not Tensor.
|
|
6146
|
+
TypeError: If `as_tuple` is not bool.
|
|
6147
|
+
ValueError: If dim of `input` equals to 0.
|
|
6148
|
+
|
|
6149
|
+
Supported Platforms:
|
|
6150
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
6151
|
+
|
|
6152
|
+
Examples:
|
|
6153
|
+
>>> import mindspore
|
|
6154
|
+
>>> import numpy as np
|
|
6155
|
+
>>> from mindspore import Tensor, ops
|
|
6156
|
+
>>> x = Tensor(np.array([[[1, 0], [-5, 0]]]), mindspore.int32)
|
|
6157
|
+
>>> output = ops.nonzero(x)
|
|
6158
|
+
>>> print(output)
|
|
6159
|
+
[[0 0 0]
|
|
6160
|
+
[0 1 0]]
|
|
6161
|
+
>>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
|
|
6162
|
+
>>> output = ops.nonzero(x, False)
|
|
6163
|
+
>>> print(output)
|
|
6164
|
+
[[0]
|
|
6165
|
+
[2]
|
|
6166
|
+
[4]]
|
|
6167
|
+
>>> x = Tensor(np.array([[[1, 0], [-5, 0]]]), mindspore.int32)
|
|
6168
|
+
>>> output = ops.nonzero(x, True)
|
|
6169
|
+
>>> print(output)
|
|
6170
|
+
(Tensor(shape=[2], dtype=Int64, value=[0, 0]),
|
|
6171
|
+
Tensor(shape=[2], dtype=Int64, value=[0, 1]),
|
|
6172
|
+
Tensor(shape=[2], dtype=Int64, value=[0, 0]))
|
|
6173
|
+
>>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
|
|
6174
|
+
>>> output = ops.nonzero(x, True)
|
|
6175
|
+
>>> print(output)
|
|
6176
|
+
(Tensor(shape=[3], dtype=Int64, value=[0, 2, 4]), )
|
|
6177
|
+
"""
|
|
6178
|
+
if as_tuple:
|
|
6179
|
+
return non_zero_ext_(input)
|
|
6180
|
+
return non_zero_(input)
|
|
6181
|
+
|
|
6182
|
+
|
|
7055
6183
|
def argwhere(input):
|
|
7056
6184
|
"""
|
|
7057
6185
|
Return a Tensor of the positions of all non-zero values.
|
|
@@ -7079,7 +6207,7 @@ def argwhere(input):
|
|
|
7079
6207
|
[[0 0 0]
|
|
7080
6208
|
[0 1 0]]
|
|
7081
6209
|
"""
|
|
7082
|
-
return
|
|
6210
|
+
return nonzero(input)
|
|
7083
6211
|
|
|
7084
6212
|
|
|
7085
6213
|
def column_stack(tensors):
|
|
@@ -7116,14 +6244,13 @@ def column_stack(tensors):
|
|
|
7116
6244
|
raise TypeError(f"For column_stack, the input must be list or tuple of tensors, but got {type(tensors)}.")
|
|
7117
6245
|
|
|
7118
6246
|
trans_x = ()
|
|
7119
|
-
_expand_dims = _get_cache_prim(P.ExpandDims)()
|
|
7120
6247
|
for tensor in tensors:
|
|
7121
6248
|
if not isinstance(tensor, Tensor):
|
|
7122
6249
|
raise TypeError(f"For column_stack, the input element must be tensor, but got {type(tensor)}.")
|
|
7123
6250
|
if tensor.ndim < 1:
|
|
7124
|
-
tensor =
|
|
6251
|
+
tensor = expand_dims(tensor, 0)
|
|
7125
6252
|
if tensor.ndim == 1:
|
|
7126
|
-
tensor =
|
|
6253
|
+
tensor = expand_dims(tensor, 1)
|
|
7127
6254
|
trans_x += (tensor,)
|
|
7128
6255
|
if not trans_x:
|
|
7129
6256
|
raise ValueError(f"For column_stack, the input must have at least 1 tensor, but got 0.")
|
|
@@ -7169,7 +6296,7 @@ def hstack(tensors):
|
|
|
7169
6296
|
if not isinstance(tensor, Tensor):
|
|
7170
6297
|
raise TypeError(f"For hstack, the input element must be tensor, but got {type(tensor)}.")
|
|
7171
6298
|
if tensor.ndim < 1:
|
|
7172
|
-
tensor =
|
|
6299
|
+
tensor = expand_dims(tensor, 0)
|
|
7173
6300
|
tuple_of_tensor += (tensor,)
|
|
7174
6301
|
if not tuple_of_tensor:
|
|
7175
6302
|
raise ValueError("For hstack, the input must have at least 1 tensor, but got 0.")
|
|
@@ -7269,7 +6396,7 @@ def movedim(x, source, destination):
|
|
|
7269
6396
|
f"For `source` and `destination` arguments, the number of elements must be the same, but got 'source':"
|
|
7270
6397
|
f" {len(source)} and 'destination': {len(destination)}.")
|
|
7271
6398
|
perm = _get_moved_perm(ndim, source, destination)
|
|
7272
|
-
return
|
|
6399
|
+
return transpose_(x, perm)
|
|
7273
6400
|
|
|
7274
6401
|
|
|
7275
6402
|
def moveaxis(x, source, destination):
|
|
@@ -7320,7 +6447,7 @@ def swapaxes(input, axis0, axis1):
|
|
|
7320
6447
|
|
|
7321
6448
|
Examples:
|
|
7322
6449
|
>>> import numpy as np
|
|
7323
|
-
>>>
|
|
6450
|
+
>>> from mindspore import ops
|
|
7324
6451
|
>>> from mindspore import Tensor
|
|
7325
6452
|
>>> input = Tensor(np.ones((2,3,4), dtype=np.float32))
|
|
7326
6453
|
>>> output = ops.swapaxes(input, 0, 2)
|
|
@@ -7344,7 +6471,7 @@ def swapaxes(input, axis0, axis1):
|
|
|
7344
6471
|
new_perm = perm[0:axis0] + perm[axis1:axis1 + 1] + \
|
|
7345
6472
|
perm[axis0 + 1:axis1] + perm[axis0:axis0 + 1]
|
|
7346
6473
|
|
|
7347
|
-
return
|
|
6474
|
+
return transpose_(input, new_perm)
|
|
7348
6475
|
|
|
7349
6476
|
|
|
7350
6477
|
def swapdims(input, dim0, dim1):
|
|
@@ -7370,7 +6497,7 @@ def swapdims(input, dim0, dim1):
|
|
|
7370
6497
|
|
|
7371
6498
|
Examples:
|
|
7372
6499
|
>>> import numpy as np
|
|
7373
|
-
>>>
|
|
6500
|
+
>>> from mindspore import ops
|
|
7374
6501
|
>>> from mindspore import Tensor
|
|
7375
6502
|
>>> input = Tensor(np.ones((2,3,4), dtype=np.float32))
|
|
7376
6503
|
>>> output = ops.swapdims(input, 0, 2)
|
|
@@ -7452,9 +6579,47 @@ def repeat_interleave(input, repeats, axis=None):
|
|
|
7452
6579
|
return output
|
|
7453
6580
|
|
|
7454
6581
|
|
|
6582
|
+
def repeat_interleave_ext(input, repeats, dim=None, output_size=None):
|
|
6583
|
+
r"""
|
|
6584
|
+
Repeat elements of a tensor along an axis, like `numpy.repeat`.
|
|
6585
|
+
|
|
6586
|
+
Args:
|
|
6587
|
+
input (Tensor): The tensor to repeat values for. Must be of type: float16,
|
|
6588
|
+
float32, int8, uint8, int16, int32, or int64.
|
|
6589
|
+
repeats (Union[int, tuple, list, Tensor]): The number of times to repeat, must be positive.
|
|
6590
|
+
dim (int, optional): The dim along which to repeat, Default: ``None``. if dims is None,
|
|
6591
|
+
the input Tensor will be flattened and the output will alse be flattened.
|
|
6592
|
+
output_size (int, optional): Total output size for the given axis (e.g. sum of repeats),
|
|
6593
|
+
Default: ``None``.
|
|
6594
|
+
|
|
6595
|
+
Returns:
|
|
6596
|
+
One tensor with values repeated along the specified dim. If input has shape
|
|
6597
|
+
:math:`(s1, s2, ..., sn)` and dim is i, the output will have shape :math:`(s1, s2, ...,
|
|
6598
|
+
si * repeats, ..., sn)`. The output type will be the same as the type of `input`.
|
|
6599
|
+
|
|
6600
|
+
Supported Platforms:
|
|
6601
|
+
``Ascend``
|
|
6602
|
+
|
|
6603
|
+
Examples:
|
|
6604
|
+
>>> import mindspore
|
|
6605
|
+
>>> import numpy as np
|
|
6606
|
+
>>> from mindspore import Tensor, ops
|
|
6607
|
+
>>> input = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
|
|
6608
|
+
>>> output = ops.function.array_func.repeat_interleave_ext(input, repeats=2, dim=0)
|
|
6609
|
+
>>> print(output)
|
|
6610
|
+
[[0 1 2]
|
|
6611
|
+
[0 1 2]
|
|
6612
|
+
[3 4 5]
|
|
6613
|
+
[3 4 5]]
|
|
6614
|
+
"""
|
|
6615
|
+
if isinstance(repeats, int):
|
|
6616
|
+
return repeat_interleave_int_(input, repeats, dim, output_size)
|
|
6617
|
+
return repeat_interleave_tensor_(input, repeats, dim, output_size)
|
|
6618
|
+
|
|
6619
|
+
|
|
7455
6620
|
def repeat_elements(x, rep, axis=0):
|
|
7456
6621
|
"""
|
|
7457
|
-
Repeat elements of a tensor along an axis, like `
|
|
6622
|
+
Repeat elements of a tensor along an axis, like `numpy.repeat` .
|
|
7458
6623
|
|
|
7459
6624
|
Args:
|
|
7460
6625
|
x (Tensor): The tensor to repeat values for. Must be of type: float16,
|
|
@@ -7492,34 +6657,19 @@ def repeat_elements(x, rep, axis=0):
|
|
|
7492
6657
|
const_utils.check_type_valid(ops.dtype(x), mstype.number_type, 'input x')
|
|
7493
6658
|
rep = _check_positive_int(rep, "rep", "repeat_elements")
|
|
7494
6659
|
axis = _check_is_int(axis, "axis", "repeat_elements")
|
|
7495
|
-
|
|
7496
|
-
rank_op = P.Rank()
|
|
7497
|
-
tile_op = P.Tile()
|
|
7498
|
-
expand_dims_op = P.ExpandDims()
|
|
7499
|
-
reshape_op = P.Reshape()
|
|
7500
|
-
x_rank = rank_op(x)
|
|
6660
|
+
x_rank = rank_(x)
|
|
7501
6661
|
axis = _check_axis_range(axis, x_rank, "axis", "repeat_elements")
|
|
6662
|
+
axis = axis + x.ndim if axis < 0 else axis
|
|
7502
6663
|
expand_axis = axis + 1
|
|
7503
|
-
x_expand =
|
|
6664
|
+
x_expand = expand_dims(x, expand_axis)
|
|
7504
6665
|
rep_dims = _cal_repeat_dims(x_rank, rep, expand_axis)
|
|
7505
|
-
x_expand =
|
|
7506
|
-
x_shape =
|
|
6666
|
+
x_expand = tile_(x_expand, rep_dims)
|
|
6667
|
+
x_shape = shape_(x)
|
|
7507
6668
|
x_reshape = _cal_reshape(x_shape, rep, axis)
|
|
7508
|
-
x_rep =
|
|
6669
|
+
x_rep = reshape_(x_expand, x_reshape)
|
|
7509
6670
|
return x_rep
|
|
7510
6671
|
|
|
7511
6672
|
|
|
7512
|
-
@_primexpr
|
|
7513
|
-
def _check_sequence_mask_input_len(input_shape, prim_name=None):
|
|
7514
|
-
msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
|
|
7515
|
-
if not input_shape:
|
|
7516
|
-
raise ValueError(f"{msg_prefix} input_shape must be greater than 0, but got {input_shape}.")
|
|
7517
|
-
# broadcast only supports 7d shape
|
|
7518
|
-
shape_size = len(input_shape)
|
|
7519
|
-
if shape_size >= 7:
|
|
7520
|
-
raise ValueError(f"{msg_prefix} dimension of input_shape must be less than 7, but got {shape_size}d.")
|
|
7521
|
-
|
|
7522
|
-
|
|
7523
6673
|
def sequence_mask(lengths, maxlen=None):
|
|
7524
6674
|
"""
|
|
7525
6675
|
Returns a mask tensor representing the first N positions of each cell.
|
|
@@ -7572,29 +6722,19 @@ def sequence_mask(lengths, maxlen=None):
|
|
|
7572
6722
|
[[ True True False False ]
|
|
7573
6723
|
[ True True True True ]]]
|
|
7574
6724
|
"""
|
|
7575
|
-
|
|
7576
|
-
argmax_op = P.ArgMaxWithValue()
|
|
7577
|
-
reshape_op = P.Reshape()
|
|
7578
|
-
range_op = P.Range()
|
|
7579
|
-
expand_op = P.ExpandDims()
|
|
7580
|
-
cast_op = P.Cast()
|
|
7581
|
-
to_tensor_op = P.ScalarToTensor()
|
|
7582
|
-
shape_op = P.Shape()
|
|
7583
|
-
|
|
7584
6725
|
const_utils.check_type_valid(ops.dtype(lengths), [mstype.int64, mstype.int32], 'lengths')
|
|
7585
|
-
_check_sequence_mask_input_len(shape_op(lengths), "sequence_mask")
|
|
7586
6726
|
|
|
7587
6727
|
if maxlen is None:
|
|
7588
|
-
flatten_data =
|
|
7589
|
-
flatten_data =
|
|
7590
|
-
_, value =
|
|
7591
|
-
maxlen =
|
|
6728
|
+
flatten_data = reshape_(lengths, (-1,))
|
|
6729
|
+
flatten_data = cast_(flatten_data, mstype.float32)
|
|
6730
|
+
_, value = arg_max_with_value_(flatten_data)
|
|
6731
|
+
maxlen = cast_(value, mstype.int32)
|
|
7592
6732
|
else:
|
|
7593
6733
|
maxlen = _check_positive_int(maxlen, "maxlen", "sequence_mask")
|
|
7594
|
-
maxlen =
|
|
6734
|
+
maxlen = scalar_to_tensor_(maxlen, mstype.int32)
|
|
7595
6735
|
|
|
7596
|
-
range_vector =
|
|
7597
|
-
mask =
|
|
6736
|
+
range_vector = range_(scalar_to_tensor_(0, mstype.int32), maxlen, scalar_to_tensor_(1, mstype.int32))
|
|
6737
|
+
mask = expand_dims(lengths, -1)
|
|
7598
6738
|
result = range_vector < mask
|
|
7599
6739
|
return result
|
|
7600
6740
|
|
|
@@ -7607,35 +6747,6 @@ def top_k(input_x, k, sorted=True):
|
|
|
7607
6747
|
return top_k_(input_x, k)
|
|
7608
6748
|
|
|
7609
6749
|
|
|
7610
|
-
def deepcopy(input_x):
|
|
7611
|
-
"""
|
|
7612
|
-
Returns a deepcopy of input tensor.
|
|
7613
|
-
|
|
7614
|
-
Args:
|
|
7615
|
-
input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
7616
|
-
|
|
7617
|
-
Returns:
|
|
7618
|
-
Tensor, a deepcopy of `input_x`.
|
|
7619
|
-
|
|
7620
|
-
Raises:
|
|
7621
|
-
TypeError: If `input_x` is not a Tensor.
|
|
7622
|
-
|
|
7623
|
-
Supported Platforms:
|
|
7624
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
7625
|
-
|
|
7626
|
-
Examples:
|
|
7627
|
-
>>> import mindspore
|
|
7628
|
-
>>> from mindspore import Tensor, ops
|
|
7629
|
-
>>> input = Tensor([[0, 1], [2, 1]], dtype=mindspore.int32)
|
|
7630
|
-
>>> output = ops.deepcopy(input)
|
|
7631
|
-
>>> print(output)
|
|
7632
|
-
[[0 1]
|
|
7633
|
-
[2 1]]
|
|
7634
|
-
"""
|
|
7635
|
-
_deepcopy = _get_cache_prim(P.Identity)()
|
|
7636
|
-
return _deepcopy(input_x)
|
|
7637
|
-
|
|
7638
|
-
|
|
7639
6750
|
__all__ = [
|
|
7640
6751
|
'unique',
|
|
7641
6752
|
'unique_with_pad',
|
|
@@ -7662,8 +6773,8 @@ __all__ = [
|
|
|
7662
6773
|
'full_like',
|
|
7663
6774
|
'dyn_shape',
|
|
7664
6775
|
'rank',
|
|
7665
|
-
'range',
|
|
7666
6776
|
'arange',
|
|
6777
|
+
'range',
|
|
7667
6778
|
'reshape',
|
|
7668
6779
|
'reshape_',
|
|
7669
6780
|
'flatten',
|
|
@@ -7772,6 +6883,7 @@ __all__ = [
|
|
|
7772
6883
|
'aminmax',
|
|
7773
6884
|
'sort',
|
|
7774
6885
|
'top_k',
|
|
7775
|
-
'deepcopy'
|
|
6886
|
+
'deepcopy',
|
|
6887
|
+
'flip',
|
|
7776
6888
|
]
|
|
7777
6889
|
__all__.sort()
|