mindspore 2.2.14__cp39-cp39-win_amd64.whl → 2.4.0__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +8 -5
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +124 -25
- mindspore/_extends/builtin_operations.py +2 -1
- mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
- mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
- mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
- mindspore/_extends/parse/__init__.py +18 -14
- mindspore/_extends/parse/compile_config.py +299 -0
- mindspore/_extends/parse/namespace.py +2 -2
- mindspore/_extends/parse/parser.py +182 -68
- mindspore/_extends/parse/resources.py +45 -14
- mindspore/_extends/parse/standard_method.py +192 -252
- mindspore/{ops/_op_impl/tbe/atomic_addr_clean.py → _extends/pijit/__init__.py} +6 -16
- mindspore/_extends/pijit/pijit_func_white_list.py +669 -0
- mindspore/_extends/remote/kernel_build_server.py +2 -0
- mindspore/_profiler.py +30 -0
- mindspore/amp.py +67 -26
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/base.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +2 -2
- mindspore/boost/grad_freeze.py +2 -2
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/boost/less_batch_normalization.py +9 -6
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +20 -7
- mindspore/common/_jit_fallback_utils.py +2 -3
- mindspore/common/_pijit_context.py +190 -0
- mindspore/common/_register_for_adapter.py +7 -0
- mindspore/common/_register_for_recompute.py +48 -0
- mindspore/common/_register_for_tensor.py +10 -10
- mindspore/common/_stub_tensor.py +7 -1
- mindspore/common/_tensor_overload.py +139 -0
- mindspore/common/_utils.py +5 -17
- mindspore/common/api.py +449 -129
- mindspore/common/auto_dynamic_shape.py +27 -14
- mindspore/common/dtype.py +17 -10
- mindspore/common/dump.py +8 -11
- mindspore/common/file_system.py +48 -0
- mindspore/common/generator.py +254 -0
- mindspore/common/hook_handle.py +65 -30
- mindspore/common/initializer.py +1 -1
- mindspore/common/jit_config.py +34 -14
- mindspore/common/lazy_inline.py +72 -19
- mindspore/common/mindir_util.py +12 -2
- mindspore/common/mutable.py +79 -14
- mindspore/common/no_inline.py +54 -0
- mindspore/common/np_dtype.py +25 -0
- mindspore/common/parameter.py +73 -21
- mindspore/common/recompute.py +292 -0
- mindspore/common/seed.py +9 -9
- mindspore/common/sparse_tensor.py +276 -24
- mindspore/common/symbol.py +122 -0
- mindspore/common/tensor.py +668 -514
- mindspore/communication/__init__.py +6 -11
- mindspore/communication/_comm_helper.py +43 -3
- mindspore/communication/comm_func.py +1395 -0
- mindspore/communication/management.py +117 -104
- mindspore/config/op_info.config +22 -54
- mindspore/context.py +455 -71
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +6 -6
- mindspore/dataset/audio/transforms.py +711 -158
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/core/config.py +7 -0
- mindspore/dataset/core/validator_helpers.py +7 -0
- mindspore/dataset/engine/cache_client.py +2 -2
- mindspore/dataset/engine/datasets.py +201 -116
- mindspore/dataset/engine/datasets_audio.py +14 -14
- mindspore/dataset/engine/datasets_standard_format.py +83 -3
- mindspore/dataset/engine/datasets_text.py +39 -39
- mindspore/dataset/engine/datasets_user_defined.py +230 -141
- mindspore/dataset/engine/datasets_vision.py +78 -74
- mindspore/dataset/engine/iterators.py +29 -0
- mindspore/dataset/engine/obs/util.py +7 -0
- mindspore/dataset/engine/offload.py +5 -7
- mindspore/dataset/engine/queue.py +138 -66
- mindspore/dataset/engine/serializer_deserializer.py +2 -2
- mindspore/dataset/engine/validators.py +41 -15
- mindspore/dataset/text/__init__.py +2 -5
- mindspore/dataset/text/transforms.py +408 -121
- mindspore/dataset/text/utils.py +9 -9
- mindspore/dataset/transforms/__init__.py +0 -3
- mindspore/dataset/transforms/transforms.py +261 -76
- mindspore/dataset/utils/browse_dataset.py +9 -9
- mindspore/dataset/utils/line_reader.py +2 -0
- mindspore/dataset/vision/__init__.py +7 -10
- mindspore/dataset/vision/c_transforms.py +10 -10
- mindspore/dataset/vision/py_transforms_util.py +1 -1
- mindspore/dataset/vision/transforms.py +2844 -549
- mindspore/dataset/vision/utils.py +161 -10
- mindspore/dataset/vision/validators.py +16 -3
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/{rewrite/ast_creator_register.py → experimental/es/__init__.py} +5 -20
- mindspore/experimental/es/embedding_service.py +883 -0
- mindspore/experimental/es/embedding_service_layer.py +581 -0
- mindspore/experimental/llm_boost/__init__.py +21 -0
- mindspore/experimental/llm_boost/atb/__init__.py +23 -0
- mindspore/experimental/llm_boost/atb/boost_base.py +211 -0
- mindspore/experimental/llm_boost/atb/llama_boost.py +115 -0
- mindspore/experimental/llm_boost/atb/qwen_boost.py +101 -0
- mindspore/experimental/llm_boost/register.py +129 -0
- mindspore/experimental/llm_boost/utils.py +31 -0
- mindspore/experimental/optim/__init__.py +12 -2
- mindspore/experimental/optim/adadelta.py +161 -0
- mindspore/experimental/optim/adagrad.py +168 -0
- mindspore/experimental/optim/adam.py +35 -34
- mindspore/experimental/optim/adamax.py +170 -0
- mindspore/experimental/optim/adamw.py +124 -15
- mindspore/experimental/optim/asgd.py +153 -0
- mindspore/experimental/optim/lr_scheduler.py +66 -121
- mindspore/experimental/optim/nadam.py +157 -0
- mindspore/experimental/optim/optimizer.py +18 -8
- mindspore/experimental/optim/radam.py +194 -0
- mindspore/experimental/optim/rmsprop.py +154 -0
- mindspore/experimental/optim/rprop.py +164 -0
- mindspore/experimental/optim/sgd.py +28 -19
- mindspore/hal/__init__.py +40 -0
- mindspore/hal/_ascend.py +57 -0
- mindspore/hal/_base.py +57 -0
- mindspore/hal/_cpu.py +56 -0
- mindspore/hal/_gpu.py +57 -0
- mindspore/hal/contiguous_tensors_handle.py +175 -0
- mindspore/hal/device.py +356 -0
- mindspore/hal/event.py +179 -0
- mindspore/hal/memory.py +326 -0
- mindspore/hal/stream.py +357 -0
- mindspore/include/api/data_type.h +2 -2
- mindspore/include/api/dual_abi_helper.h +16 -3
- mindspore/include/api/model.h +4 -3
- mindspore/include/api/model_group.h +13 -1
- mindspore/include/api/status.h +14 -0
- mindspore/include/api/types.h +10 -10
- mindspore/include/c_api/model_c.h +173 -0
- mindspore/include/c_api/types_c.h +19 -0
- mindspore/include/dataset/config.h +2 -2
- mindspore/include/dataset/constants.h +2 -2
- mindspore/include/dataset/execute.h +3 -5
- mindspore/include/dataset/vision.h +58 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +3 -3
- mindspore/mindrecord/__init__.py +5 -1
- mindspore/mindrecord/config.py +809 -0
- mindspore/mindrecord/filereader.py +25 -0
- mindspore/mindrecord/filewriter.py +138 -103
- mindspore/mindrecord/mindpage.py +40 -6
- mindspore/mindrecord/shardutils.py +3 -2
- mindspore/mindrecord/shardwriter.py +7 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +8 -13
- mindspore/mindrecord/tools/cifar10_to_mr.py +9 -15
- mindspore/mindrecord/tools/csv_to_mr.py +4 -9
- mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
- mindspore/mindrecord/tools/mnist_to_mr.py +7 -12
- mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -6
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mint/__init__.py +1586 -0
- mindspore/mint/distributed/__init__.py +31 -0
- mindspore/mint/distributed/distributed.py +254 -0
- mindspore/{rewrite/ast_transformers → mint/linalg}/__init__.py +9 -4
- mindspore/mint/nn/__init__.py +757 -0
- mindspore/mint/nn/functional.py +679 -0
- mindspore/mint/nn/layer/__init__.py +39 -0
- mindspore/mint/nn/layer/activation.py +133 -0
- mindspore/mint/nn/layer/normalization.py +477 -0
- mindspore/mint/nn/layer/pooling.py +110 -0
- mindspore/mint/optim/__init__.py +24 -0
- mindspore/mint/optim/adamw.py +206 -0
- mindspore/mint/special/__init__.py +63 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/multiprocessing/__init__.py +73 -0
- mindspore/nn/cell.py +461 -323
- mindspore/nn/dynamic_lr.py +2 -2
- mindspore/nn/layer/activation.py +292 -135
- mindspore/nn/layer/basic.py +288 -83
- mindspore/nn/layer/channel_shuffle.py +3 -16
- mindspore/nn/layer/container.py +3 -3
- mindspore/nn/layer/conv.py +75 -66
- mindspore/nn/layer/embedding.py +221 -45
- mindspore/nn/layer/image.py +4 -7
- mindspore/nn/layer/math.py +1 -1
- mindspore/nn/layer/normalization.py +150 -68
- mindspore/nn/layer/padding.py +64 -87
- mindspore/nn/layer/pooling.py +175 -12
- mindspore/nn/layer/rnn_cells.py +6 -16
- mindspore/nn/layer/rnns.py +6 -5
- mindspore/nn/layer/thor_layer.py +1 -2
- mindspore/nn/layer/timedistributed.py +1 -1
- mindspore/nn/layer/transformer.py +55 -53
- mindspore/nn/learning_rate_schedule.py +6 -5
- mindspore/nn/loss/__init__.py +2 -2
- mindspore/nn/loss/loss.py +145 -88
- mindspore/nn/optim/__init__.py +2 -1
- mindspore/nn/optim/ada_grad.py +4 -2
- mindspore/nn/optim/adadelta.py +4 -2
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +102 -181
- mindspore/nn/optim/adamax.py +4 -2
- mindspore/nn/optim/adasum.py +3 -3
- mindspore/nn/optim/asgd.py +4 -2
- mindspore/nn/optim/ftrl.py +31 -61
- mindspore/nn/optim/lamb.py +5 -3
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +6 -4
- mindspore/nn/optim/momentum.py +13 -25
- mindspore/nn/optim/optimizer.py +6 -3
- mindspore/nn/optim/proximal_ada_grad.py +4 -2
- mindspore/nn/optim/rmsprop.py +9 -3
- mindspore/nn/optim/rprop.py +4 -2
- mindspore/nn/optim/sgd.py +5 -3
- mindspore/nn/optim/tft_wrapper.py +127 -0
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
- mindspore/nn/probability/distribution/beta.py +2 -2
- mindspore/nn/probability/distribution/categorical.py +4 -6
- mindspore/nn/probability/distribution/cauchy.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -2
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/logistic.py +1 -1
- mindspore/nn/probability/distribution/poisson.py +2 -2
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +13 -1
- mindspore/nn/wrap/__init__.py +2 -1
- mindspore/nn/wrap/cell_wrapper.py +46 -12
- mindspore/nn/wrap/grad_reducer.py +148 -8
- mindspore/nn/wrap/loss_scale.py +44 -7
- mindspore/numpy/__init__.py +2 -0
- mindspore/numpy/array_creations.py +67 -68
- mindspore/numpy/array_ops.py +70 -66
- mindspore/numpy/dtypes.py +3 -3
- mindspore/numpy/fft.py +966 -0
- mindspore/numpy/logic_ops.py +11 -10
- mindspore/numpy/math_ops.py +147 -152
- mindspore/numpy/utils.py +3 -0
- mindspore/numpy/utils_const.py +4 -4
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +9 -6
- mindspore/ops/_grad_experimental/grad_array_ops.py +4 -129
- mindspore/ops/_grad_experimental/grad_comm_ops.py +135 -36
- mindspore/ops/_grad_experimental/grad_math_ops.py +61 -298
- mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
- mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
- mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
- mindspore/ops/_op_impl/__init__.py +0 -1
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
- mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +1 -1
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
- mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -3
- mindspore/ops/_op_impl/cpu/adam.py +2 -2
- mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
- mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
- mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
- mindspore/ops/_vmap/vmap_array_ops.py +162 -101
- mindspore/ops/_vmap/vmap_base.py +8 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +143 -58
- mindspore/ops/_vmap/vmap_image_ops.py +70 -13
- mindspore/ops/_vmap/vmap_math_ops.py +147 -59
- mindspore/ops/_vmap/vmap_nn_ops.py +292 -117
- mindspore/ops/_vmap/vmap_other_ops.py +1 -1
- mindspore/ops/auto_generate/__init__.py +31 -0
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +309 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +252 -0
- mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
- mindspore/ops/auto_generate/gen_extend_func.py +1701 -0
- mindspore/ops/auto_generate/gen_ops_def.py +8482 -0
- mindspore/ops/auto_generate/gen_ops_prim.py +16704 -0
- mindspore/ops/auto_generate/pyboost_inner_prim.py +549 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +201 -66
- mindspore/ops/composite/math_ops.py +10 -49
- mindspore/ops/composite/multitype_ops/_compile_utils.py +192 -618
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +25 -134
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +8 -3
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
- mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
- mindspore/ops/deprecated.py +14 -3
- mindspore/ops/function/__init__.py +53 -11
- mindspore/ops/function/array_func.py +1269 -1821
- mindspore/ops/function/clip_func.py +19 -31
- mindspore/ops/function/debug_func.py +114 -5
- mindspore/ops/function/fft_func.py +44 -0
- mindspore/ops/function/grad/grad_func.py +30 -22
- mindspore/ops/function/image_func.py +27 -21
- mindspore/ops/function/linalg_func.py +35 -68
- mindspore/ops/function/math_func.py +1170 -2697
- mindspore/ops/function/nn_func.py +2116 -1128
- mindspore/ops/function/other_func.py +8 -8
- mindspore/ops/function/parameter_func.py +5 -93
- mindspore/ops/function/random_func.py +435 -113
- mindspore/ops/function/reshard_func.py +104 -0
- mindspore/ops/function/sparse_func.py +4 -4
- mindspore/ops/function/sparse_unary_func.py +9 -16
- mindspore/ops/function/spectral_func.py +1 -1
- mindspore/ops/function/vmap_func.py +16 -15
- mindspore/ops/functional.py +355 -346
- mindspore/ops/op_info_register.py +18 -45
- mindspore/ops/operations/__init__.py +38 -24
- mindspore/ops/operations/_grad_ops.py +21 -927
- mindspore/ops/operations/_infer_ops.py +19 -0
- mindspore/ops/operations/_inner_ops.py +173 -607
- mindspore/ops/operations/_rl_inner_ops.py +2 -2
- mindspore/ops/operations/_scalar_ops.py +5 -480
- mindspore/ops/operations/_sequence_ops.py +6 -36
- mindspore/ops/operations/_tensor_array.py +8 -8
- mindspore/ops/operations/array_ops.py +106 -2837
- mindspore/ops/operations/comm_ops.py +799 -127
- mindspore/ops/operations/custom_ops.py +124 -119
- mindspore/ops/operations/debug_ops.py +142 -41
- mindspore/ops/operations/image_ops.py +1 -217
- mindspore/ops/operations/inner_ops.py +5 -40
- mindspore/ops/operations/linalg_ops.py +1 -49
- mindspore/ops/operations/manually_defined/__init__.py +24 -0
- mindspore/ops/operations/manually_defined/_inner.py +73 -0
- mindspore/ops/operations/manually_defined/ops_def.py +2271 -0
- mindspore/ops/operations/math_ops.py +666 -4972
- mindspore/ops/operations/nn_ops.py +205 -2213
- mindspore/ops/operations/other_ops.py +60 -49
- mindspore/ops/operations/random_ops.py +50 -54
- mindspore/ops/operations/reshard_ops.py +53 -0
- mindspore/ops/operations/sparse_ops.py +4 -4
- mindspore/ops/primitive.py +216 -103
- mindspore/ops_generate/__init__.py +27 -0
- mindspore/ops_generate/arg_dtype_cast.py +252 -0
- mindspore/ops_generate/arg_handler.py +197 -0
- mindspore/ops_generate/gen_aclnn_implement.py +263 -0
- mindspore/ops_generate/gen_constants.py +36 -0
- mindspore/ops_generate/gen_ops.py +1099 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
- mindspore/ops_generate/gen_pyboost_func.py +1052 -0
- mindspore/ops_generate/gen_utils.py +209 -0
- mindspore/ops_generate/op_proto.py +145 -0
- mindspore/ops_generate/pyboost_utils.py +367 -0
- mindspore/ops_generate/template.py +261 -0
- mindspore/parallel/__init__.py +8 -4
- mindspore/parallel/_auto_parallel_context.py +100 -10
- mindspore/parallel/_cell_wrapper.py +99 -9
- mindspore/parallel/_cost_model_context.py +1 -1
- mindspore/parallel/_dp_allreduce_fusion.py +159 -159
- mindspore/parallel/_parallel_serialization.py +67 -23
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +99 -22
- mindspore/parallel/_transformer/__init__.py +1 -1
- mindspore/parallel/_transformer/layers.py +1 -1
- mindspore/parallel/_transformer/loss.py +1 -1
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/op_parallel_config.py +1 -1
- mindspore/parallel/_transformer/transformer.py +2 -2
- mindspore/parallel/_utils.py +173 -6
- mindspore/parallel/algo_parameter_config.py +8 -10
- mindspore/parallel/checkpoint_transform.py +204 -38
- mindspore/parallel/cluster/__init__.py +15 -0
- mindspore/parallel/cluster/process_entity/__init__.py +18 -0
- mindspore/parallel/cluster/process_entity/_api.py +352 -0
- mindspore/parallel/cluster/process_entity/_utils.py +101 -0
- mindspore/parallel/cluster/run.py +136 -0
- mindspore/parallel/mpi/__init__.py +1 -1
- mindspore/parallel/mpi/_mpi_config.py +1 -1
- mindspore/parallel/parameter_broadcast.py +151 -0
- mindspore/parallel/shard.py +279 -37
- mindspore/parallel/transform_safetensors.py +993 -0
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +4 -2
- mindspore/profiler/common/constant.py +29 -0
- mindspore/profiler/common/process_pool.py +41 -0
- mindspore/profiler/common/registry.py +47 -0
- mindspore/profiler/common/singleton.py +28 -0
- mindspore/profiler/common/util.py +153 -0
- mindspore/profiler/dynamic_profiler.py +694 -0
- mindspore/profiler/envprofiling.py +18 -20
- mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +71 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +180 -0
- mindspore/profiler/parser/ascend_analysis/function_event.py +185 -0
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +136 -0
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +131 -0
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +104 -0
- mindspore/profiler/parser/ascend_analysis/path_manager.py +313 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +123 -0
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +75 -0
- mindspore/profiler/parser/ascend_cluster_generator.py +14 -9
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -1
- mindspore/profiler/parser/ascend_flops_generator.py +20 -4
- mindspore/profiler/parser/ascend_hccl_generator.py +29 -278
- mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
- mindspore/profiler/parser/ascend_memory_generator.py +185 -0
- mindspore/profiler/parser/ascend_msprof_exporter.py +148 -146
- mindspore/profiler/parser/ascend_msprof_generator.py +73 -283
- mindspore/profiler/parser/ascend_op_generator.py +92 -42
- mindspore/profiler/parser/ascend_timeline_generator.py +298 -133
- mindspore/profiler/parser/base_timeline_generator.py +25 -25
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +25 -12
- mindspore/profiler/parser/framework_parser.py +4 -393
- mindspore/profiler/parser/gpu_analysis/__init__.py +14 -0
- mindspore/profiler/parser/gpu_analysis/function_event.py +44 -0
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +89 -0
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +72 -0
- mindspore/profiler/parser/integrator.py +3 -1
- mindspore/profiler/parser/memory_usage_parser.py +0 -154
- mindspore/profiler/parser/minddata_parser.py +72 -3
- mindspore/profiler/parser/profiler_info.py +94 -7
- mindspore/profiler/profiler.py +153 -0
- mindspore/profiler/profiling.py +631 -508
- mindspore/rewrite/__init__.py +2 -14
- mindspore/rewrite/api/node.py +122 -36
- mindspore/rewrite/api/pattern_engine.py +2 -3
- mindspore/rewrite/api/scoped_value.py +16 -15
- mindspore/rewrite/api/symbol_tree.py +45 -29
- mindspore/rewrite/ast_helpers/__init__.py +3 -6
- mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
- mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
- mindspore/rewrite/common/__init__.py +1 -2
- mindspore/rewrite/common/config.py +24 -0
- mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
- mindspore/rewrite/{namer.py → common/namer.py} +63 -18
- mindspore/rewrite/common/namespace.py +118 -0
- mindspore/rewrite/node/__init__.py +5 -5
- mindspore/rewrite/node/call_function.py +23 -7
- mindspore/rewrite/node/cell_container.py +7 -3
- mindspore/rewrite/node/control_flow.py +53 -28
- mindspore/rewrite/node/node.py +212 -196
- mindspore/rewrite/node/node_manager.py +51 -22
- mindspore/rewrite/node/node_topological_manager.py +3 -23
- mindspore/rewrite/parsers/__init__.py +12 -0
- mindspore/rewrite/parsers/arguments_parser.py +8 -9
- mindspore/rewrite/parsers/assign_parser.py +637 -413
- mindspore/rewrite/parsers/attribute_parser.py +3 -4
- mindspore/rewrite/parsers/class_def_parser.py +115 -148
- mindspore/rewrite/parsers/constant_parser.py +5 -5
- mindspore/rewrite/parsers/container_parser.py +4 -6
- mindspore/rewrite/parsers/expr_parser.py +55 -0
- mindspore/rewrite/parsers/for_parser.py +31 -98
- mindspore/rewrite/parsers/function_def_parser.py +13 -5
- mindspore/rewrite/parsers/if_parser.py +28 -10
- mindspore/rewrite/parsers/module_parser.py +8 -182
- mindspore/rewrite/parsers/parser.py +1 -5
- mindspore/rewrite/parsers/parser_register.py +1 -1
- mindspore/rewrite/parsers/return_parser.py +5 -10
- mindspore/rewrite/parsers/while_parser.py +59 -0
- mindspore/rewrite/sparsify/utils.py +1 -1
- mindspore/rewrite/symbol_tree/__init__.py +20 -0
- mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +705 -186
- mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
- mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
- mindspore/run_check/_check_version.py +40 -115
- mindspore/run_check/run_check.py +1 -1
- mindspore/safeguard/rewrite_obfuscation.py +597 -263
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +7 -5
- mindspore/train/_utils.py +204 -4
- mindspore/train/amp.py +335 -295
- mindspore/train/anf_ir_pb2.py +14 -2
- mindspore/train/callback/__init__.py +5 -2
- mindspore/train/callback/_backup_and_restore.py +5 -5
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +220 -43
- mindspore/train/callback/_cluster_monitor.py +201 -0
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_flops_collector.py +239 -0
- mindspore/train/callback/_landscape.py +15 -9
- mindspore/train/callback/_loss_monitor.py +5 -5
- mindspore/train/callback/_on_request_exit.py +136 -33
- mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
- mindspore/train/callback/_summary_collector.py +12 -12
- mindspore/train/callback/_tft_register.py +352 -0
- mindspore/train/callback/_time_monitor.py +3 -3
- mindspore/train/data_sink.py +6 -5
- mindspore/train/dataset_helper.py +66 -23
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/accuracy.py +7 -7
- mindspore/train/metrics/confusion_matrix.py +8 -6
- mindspore/train/metrics/cosine_similarity.py +6 -4
- mindspore/train/metrics/error.py +2 -2
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/metrics/perplexity.py +2 -1
- mindspore/train/metrics/roc.py +4 -4
- mindspore/train/metrics/topk.py +2 -2
- mindspore/train/mind_ir_pb2.py +116 -37
- mindspore/train/model.py +382 -76
- mindspore/train/serialization.py +787 -288
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +51 -28
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +21 -0
- mindspore/utils/utils.py +60 -0
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.2.14.dist-info → mindspore-2.4.0.dist-info}/METADATA +8 -4
- mindspore-2.4.0.dist-info/RECORD +1406 -0
- {mindspore-2.2.14.dist-info → mindspore-2.4.0.dist-info}/entry_points.txt +1 -0
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
- mindspore/gen_ops.py +0 -273
- mindspore/include/c_api/ms/abstract.h +0 -67
- mindspore/include/c_api/ms/attribute.h +0 -197
- mindspore/include/c_api/ms/base/handle_types.h +0 -43
- mindspore/include/c_api/ms/base/macros.h +0 -32
- mindspore/include/c_api/ms/base/status.h +0 -33
- mindspore/include/c_api/ms/base/types.h +0 -282
- mindspore/include/c_api/ms/context.h +0 -102
- mindspore/include/c_api/ms/graph.h +0 -160
- mindspore/include/c_api/ms/node.h +0 -606
- mindspore/include/c_api/ms/tensor.h +0 -161
- mindspore/include/c_api/ms/value.h +0 -84
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/nn/layer/flash_attention.py +0 -189
- mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +0 -93
- mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +0 -66
- mindspore/ops/_op_impl/cpu/concat.py +0 -39
- mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
- mindspore/ops/_op_impl/tbe/__init__.py +0 -47
- mindspore/ops/_op_impl/tbe/abs.py +0 -38
- mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/acos.py +0 -37
- mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/acosh.py +0 -37
- mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
- mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
- mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
- mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
- mindspore/ops/_op_impl/tbe/add.py +0 -42
- mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/add_n.py +0 -39
- mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
- mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
- mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
- mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
- mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
- mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
- mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
- mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/asin.py +0 -37
- mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/asinh.py +0 -37
- mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/assign.py +0 -79
- mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
- mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
- mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/atan.py +0 -37
- mindspore/ops/_op_impl/tbe/atan2.py +0 -38
- mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/atanh.py +0 -37
- mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
- mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
- mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
- mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
- mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
- mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
- mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
- mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
- mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
- mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
- mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cast.py +0 -55
- mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/cdist.py +0 -38
- mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/ceil.py +0 -37
- mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/celu.py +0 -39
- mindspore/ops/_op_impl/tbe/centralization.py +0 -39
- mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
- mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/concat.py +0 -40
- mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
- mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
- mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
- mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
- mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
- mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/cos.py +0 -37
- mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/cosh.py +0 -37
- mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
- mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cummin.py +0 -41
- mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
- mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
- mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
- mindspore/ops/_op_impl/tbe/diag.py +0 -38
- mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
- mindspore/ops/_op_impl/tbe/dilation.py +0 -40
- mindspore/ops/_op_impl/tbe/div.py +0 -41
- mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
- mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
- mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
- mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
- mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
- mindspore/ops/_op_impl/tbe/elu.py +0 -38
- mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/equal.py +0 -42
- mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/erf.py +0 -37
- mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfc.py +0 -37
- mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
- mindspore/ops/_op_impl/tbe/exp.py +0 -40
- mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
- mindspore/ops/_op_impl/tbe/expm1.py +0 -37
- mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
- mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/fill.py +0 -56
- mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/flatten.py +0 -48
- mindspore/ops/_op_impl/tbe/floor.py +0 -37
- mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
- mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
- mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
- mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
- mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
- mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
- mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
- mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/ger.py +0 -43
- mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/greater.py +0 -43
- mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
- mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
- mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
- mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
- mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
- mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
- mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
- mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/im2col.py +0 -42
- mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
- mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
- mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/inv.py +0 -38
- mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/invert.py +0 -37
- mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/iou.py +0 -38
- mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/is_close.py +0 -40
- mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
- mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
- mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
- mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
- mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
- mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
- mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/lerp.py +0 -38
- mindspore/ops/_op_impl/tbe/less.py +0 -41
- mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/log.py +0 -40
- mindspore/ops/_op_impl/tbe/log1p.py +0 -37
- mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
- mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
- mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
- mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
- mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/matmul.py +0 -53
- mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
- mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
- mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
- mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum.py +0 -39
- mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
- mindspore/ops/_op_impl/tbe/minimum.py +0 -40
- mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mish.py +0 -37
- mindspore/ops/_op_impl/tbe/mod.py +0 -41
- mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/mul.py +0 -37
- mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
- mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
- mindspore/ops/_op_impl/tbe/neg.py +0 -39
- mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
- mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
- mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
- mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
- mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
- mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/pack.py +0 -58
- mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
- mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
- mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/pdist.py +0 -36
- mindspore/ops/_op_impl/tbe/pooling.py +0 -46
- mindspore/ops/_op_impl/tbe/population_count.py +0 -38
- mindspore/ops/_op_impl/tbe/pow.py +0 -41
- mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/prelu.py +0 -37
- mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/range.py +0 -39
- mindspore/ops/_op_impl/tbe/real_div.py +0 -38
- mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
- mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
- mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
- mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
- mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6.py +0 -38
- mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/renorm.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
- mindspore/ops/_op_impl/tbe/rint.py +0 -37
- mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roll.py +0 -42
- mindspore/ops/_op_impl/tbe/round.py +0 -38
- mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
- mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
- mindspore/ops/_op_impl/tbe/select.py +0 -38
- mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/selu.py +0 -39
- mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sgd.py +0 -62
- mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sign.py +0 -38
- mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/sin.py +0 -37
- mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sinh.py +0 -37
- mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/slice.py +0 -58
- mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
- mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax.py +0 -37
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
- mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/softplus.py +0 -37
- mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softsign.py +0 -37
- mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sort.py +0 -38
- mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/split_d.py +0 -38
- mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/split_v.py +0 -39
- mindspore/ops/_op_impl/tbe/splitv.py +0 -39
- mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/square.py +0 -38
- mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
- mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
- mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
- mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
- mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
- mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
- mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
- mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
- mindspore/ops/_op_impl/tbe/sub.py +0 -39
- mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tan.py +0 -38
- mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh.py +0 -37
- mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
- mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
- mindspore/ops/_op_impl/tbe/tile.py +0 -37
- mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
- mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
- mindspore/ops/_op_impl/tbe/transpose.py +0 -60
- mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
- mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
- mindspore/ops/_op_impl/tbe/trunc.py +0 -39
- mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/unpack.py +0 -38
- mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
- mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
- mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
- mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
- mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
- mindspore/ops/_tracefunc.py +0 -241
- mindspore/ops/arg_dtype_cast.py +0 -54
- mindspore/ops/silent_check.py +0 -162
- mindspore/profiler/parser/msadvisor_analyzer.py +0 -82
- mindspore/profiler/parser/msadvisor_parser.py +0 -240
- mindspore/rewrite/api/tree_node_helper.py +0 -60
- mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
- mindspore/rewrite/namespace.py +0 -53
- mindspore-2.2.14.dist-info/RECORD +0 -1924
- {mindspore-2.2.14.dist-info → mindspore-2.4.0.dist-info}/WHEEL +0 -0
- {mindspore-2.2.14.dist-info → mindspore-2.4.0.dist-info}/top_level.txt +0 -0
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2020-
|
|
1
|
+
# Copyright 2020-2023 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -29,15 +29,22 @@ from mindspore.common._utils import is_shape_unknown, is_dim_unknown
|
|
|
29
29
|
from mindspore.ops.primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register, _run_op
|
|
30
30
|
from mindspore import _checkparam as validator
|
|
31
31
|
from mindspore._checkparam import _check_3d_int_or_tuple
|
|
32
|
-
from mindspore.ops._tracefunc import PackFunc
|
|
33
32
|
from mindspore.common import dtype as mstype
|
|
34
33
|
from mindspore.common._decorator import deprecated
|
|
35
|
-
from mindspore.common.parameter import Parameter
|
|
36
34
|
from mindspore.common import Tensor, CSRTensor, COOTensor
|
|
37
35
|
from mindspore._c_expression import Tensor as Tensor_
|
|
38
36
|
from mindspore._c_expression import CSRTensor as CSRTensor_
|
|
39
37
|
from mindspore._c_expression import COOTensor as COOTensor_
|
|
40
|
-
|
|
38
|
+
from ..auto_generate import (ExpandDims, Reshape, TensorShape, Transpose, Gather,
|
|
39
|
+
OnesLike, ZerosLike, Argmax, ArgMaxExt,
|
|
40
|
+
ReverseV2, Diag, Eye, ScatterNd, ResizeNearestNeighborV2,
|
|
41
|
+
GatherNd, GatherD, Range, MaskedFill, RightShift, NonZero,
|
|
42
|
+
ResizeNearestNeighbor, Identity, Split, CumSum, CumProd, MaskedSelect,
|
|
43
|
+
Cummax, Cummin, Argmin, Concat, UnsortedSegmentSum, ScalarToTensor,
|
|
44
|
+
Triu, BroadcastTo, StridedSlice, Select, TopkExt, SearchSorted)
|
|
45
|
+
from .manually_defined import Rank, Shape, Tile, Cast, Ones, Zeros
|
|
46
|
+
from ..auto_generate import ArgMaxWithValue, ArgMinWithValue
|
|
47
|
+
from ..auto_generate import TensorScatterElements as TensorScatterElementsExt
|
|
41
48
|
|
|
42
49
|
class _ScatterOp(PrimitiveWithInfer):
|
|
43
50
|
"""
|
|
@@ -187,54 +194,6 @@ class Expand(Primitive):
|
|
|
187
194
|
self.init_prim_io_names(inputs=['x', 'shape'], outputs=['y'])
|
|
188
195
|
|
|
189
196
|
|
|
190
|
-
class ExpandDims(PrimitiveWithCheck):
|
|
191
|
-
"""
|
|
192
|
-
Adds an additional dimension to `input_x` at the given axis, the dimension of
|
|
193
|
-
`input_x` should be greater than or equal to 1.
|
|
194
|
-
|
|
195
|
-
Refer to :func:`mindspore.ops.expand_dims` for more details.
|
|
196
|
-
|
|
197
|
-
Inputs:
|
|
198
|
-
- **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
199
|
-
- **axis** (int) - Specifies the dimension index at which to expand
|
|
200
|
-
the shape of `input_x`. The value of axis must be in the range
|
|
201
|
-
`[-input_x.ndim-1, input_x.ndim]`. Only constant value is allowed.
|
|
202
|
-
|
|
203
|
-
Outputs:
|
|
204
|
-
Tensor, the shape of tensor is :math:`(1, x_1, x_2, ..., x_R)` if the
|
|
205
|
-
value of `axis` is 0. It has the same data type as `input_x`.
|
|
206
|
-
|
|
207
|
-
Supported Platforms:
|
|
208
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
209
|
-
|
|
210
|
-
Examples:
|
|
211
|
-
>>> import mindspore
|
|
212
|
-
>>> import numpy as np
|
|
213
|
-
>>> from mindspore import Tensor, ops
|
|
214
|
-
>>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
|
|
215
|
-
>>> expand_dims = ops.ExpandDims()
|
|
216
|
-
>>> output = expand_dims(input_tensor, 0)
|
|
217
|
-
>>> print(output)
|
|
218
|
-
[[[2. 2.]
|
|
219
|
-
[2. 2.]]]
|
|
220
|
-
"""
|
|
221
|
-
|
|
222
|
-
@prim_attr_register
|
|
223
|
-
def __init__(self):
|
|
224
|
-
"""Initialize ExpandDims"""
|
|
225
|
-
self.init_prim_io_names(inputs=['x', 'axis'], outputs=['output'])
|
|
226
|
-
|
|
227
|
-
def infer_value(self, input_x, axis):
|
|
228
|
-
value = None
|
|
229
|
-
if input_x is not None and axis is not None:
|
|
230
|
-
dtype = input_x.dtype
|
|
231
|
-
if input_x.dtype == mstype.bfloat16:
|
|
232
|
-
cpu_cast = Cast().set_device("CPU")
|
|
233
|
-
input_x = cpu_cast(input_x, mstype.float32)
|
|
234
|
-
value = Tensor(np.expand_dims(input_x.asnumpy(), axis), dtype)
|
|
235
|
-
return value
|
|
236
|
-
|
|
237
|
-
|
|
238
197
|
class DType(Primitive):
|
|
239
198
|
"""
|
|
240
199
|
Returns the data type of the input tensor as mindspore.dtype.
|
|
@@ -304,88 +263,6 @@ class CheckNumerics(Primitive):
|
|
|
304
263
|
self.init_prim_io_names(inputs=['x'], outputs=['y'])
|
|
305
264
|
|
|
306
265
|
|
|
307
|
-
class Cast(PrimitiveWithCheck):
|
|
308
|
-
"""
|
|
309
|
-
Returns a tensor with the new specified data type.
|
|
310
|
-
|
|
311
|
-
Note:
|
|
312
|
-
When converting complex numbers to boolean type, the imaginary part of the complex number is not
|
|
313
|
-
taken into account. As long as the real part is non-zero, it returns True; otherwise, it returns False.
|
|
314
|
-
|
|
315
|
-
Inputs:
|
|
316
|
-
- **input_x** (Union[Tensor, Number]) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
317
|
-
The tensor to be cast.
|
|
318
|
-
- **type** (dtype.Number) - The valid data type of the output tensor. Only constant value is allowed.
|
|
319
|
-
|
|
320
|
-
Outputs:
|
|
321
|
-
Tensor, the shape of tensor is the same as `input_x`, :math:`(x_1, x_2, ..., x_R)`.
|
|
322
|
-
|
|
323
|
-
Raises:
|
|
324
|
-
TypeError: If `input_x` is neither Tensor nor Number.
|
|
325
|
-
TypeError: If `type` is not a Number.
|
|
326
|
-
|
|
327
|
-
Supported Platforms:
|
|
328
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
329
|
-
|
|
330
|
-
Examples:
|
|
331
|
-
>>> import mindspore
|
|
332
|
-
>>> import numpy as np
|
|
333
|
-
>>> from mindspore import Tensor, ops
|
|
334
|
-
>>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
|
|
335
|
-
>>> input_x = Tensor(input_np)
|
|
336
|
-
>>> type_dst = mindspore.int32
|
|
337
|
-
>>> cast = ops.Cast()
|
|
338
|
-
>>> output = cast(input_x, type_dst)
|
|
339
|
-
>>> print(output.dtype)
|
|
340
|
-
Int32
|
|
341
|
-
>>> print(output.shape)
|
|
342
|
-
(2, 3, 4, 5)
|
|
343
|
-
"""
|
|
344
|
-
|
|
345
|
-
@prim_attr_register
|
|
346
|
-
def __init__(self):
|
|
347
|
-
"""Initialize Cast"""
|
|
348
|
-
self.init_prim_io_names(inputs=['x', 'dst_type'], outputs=['output'])
|
|
349
|
-
|
|
350
|
-
def check_elim(self, x, dtype):
|
|
351
|
-
if isinstance(x, (Tensor, numbers.Number, Parameter)):
|
|
352
|
-
if isinstance(x, Parameter):
|
|
353
|
-
data = x.data
|
|
354
|
-
if data.dtype == dtype:
|
|
355
|
-
return (True, x)
|
|
356
|
-
if isinstance(x, Tensor) and x.dtype == dtype and not PackFunc.is_tracing():
|
|
357
|
-
x = Tensor(x)
|
|
358
|
-
x.set_cast_dtype()
|
|
359
|
-
return (True, x)
|
|
360
|
-
if isinstance(x, numbers.Number):
|
|
361
|
-
return (True, Tensor(x, dtype=dtype))
|
|
362
|
-
return (False, None)
|
|
363
|
-
|
|
364
|
-
def infer_value(self, x, dst_type):
|
|
365
|
-
if x is None:
|
|
366
|
-
return None
|
|
367
|
-
src_type = mstype.get_py_obj_dtype(x)
|
|
368
|
-
validator.check_subclass("input_x", src_type,
|
|
369
|
-
[mstype.tensor_type, mstype.number], self.name)
|
|
370
|
-
validator.check_subclass("type", dst_type, mstype.number, self.name)
|
|
371
|
-
|
|
372
|
-
if isinstance(src_type, type(mstype.tensor_type)):
|
|
373
|
-
src_type = src_type.element_type()
|
|
374
|
-
if isinstance(dst_type, type(mstype.tensor_type)):
|
|
375
|
-
dst_type = dst_type.element_type()
|
|
376
|
-
|
|
377
|
-
value = None
|
|
378
|
-
np_dst_type = mstype.dtype_to_nptype(dst_type)
|
|
379
|
-
if isinstance(x, (int, float)):
|
|
380
|
-
value = Tensor(np.array(x).astype(np_dst_type), dtype=dst_type)
|
|
381
|
-
else:
|
|
382
|
-
if x.dtype == mstype.bfloat16:
|
|
383
|
-
cpu_cast = Cast().set_device("CPU")
|
|
384
|
-
x = cpu_cast(x, mstype.float32)
|
|
385
|
-
value = Tensor(x.asnumpy().astype(np_dst_type), dtype=dst_type)
|
|
386
|
-
return value
|
|
387
|
-
|
|
388
|
-
|
|
389
266
|
class Im2Col(Primitive):
|
|
390
267
|
r"""
|
|
391
268
|
Extracts sliding local blocks from a batched input tensor.
|
|
@@ -434,7 +311,6 @@ class Im2Col(Primitive):
|
|
|
434
311
|
|
|
435
312
|
- If one int, :math:`pad\_height = pad\_width`.
|
|
436
313
|
- If two int, :math:`pad\_height = pads[0]`, :math:`pad\_width = pads[1]`.
|
|
437
|
-
- If four int, :math:`pads = [pad\_height\_top, pad\_height\_bottom, pad\_width\_left, pad\_width\_right]`.
|
|
438
314
|
|
|
439
315
|
Inputs:
|
|
440
316
|
- **x** (Tensor) - input tensor, only 4-D input tensors (batched image-like tensors) are supported.
|
|
@@ -499,11 +375,10 @@ class Im2Col(Primitive):
|
|
|
499
375
|
|
|
500
376
|
class Col2Im(Primitive):
|
|
501
377
|
r"""
|
|
502
|
-
|
|
378
|
+
Rearranges a row vector to an image. It is
|
|
503
379
|
usually used to reconstruct an image from a set of image patches(or sliding local blocks).
|
|
504
380
|
|
|
505
|
-
Consider
|
|
506
|
-
e.g., patches of images, of shape :math:`(N, C, \prod(\text{kernel_size}), L)`,
|
|
381
|
+
Consider an input Tensor of shape :math:`(N, C, \prod(\text{kernel_size}), L)`,
|
|
507
382
|
where :math:`N` is batch dimension, :math:`C` is channel dimension,
|
|
508
383
|
:math:`\prod(\text{kernel_size})` is the block size, and
|
|
509
384
|
:math:`L` is the total number of blocks. This operation combines these
|
|
@@ -590,149 +465,6 @@ class Col2Im(Primitive):
|
|
|
590
465
|
self.add_prim_attr('stride', self.stride)
|
|
591
466
|
|
|
592
467
|
|
|
593
|
-
class Reshape(PrimitiveWithCheck):
|
|
594
|
-
"""
|
|
595
|
-
Rearranges the input Tensor based on the given shape.
|
|
596
|
-
|
|
597
|
-
Refer to :func:`mindspore.ops.reshape` for more details.
|
|
598
|
-
|
|
599
|
-
Inputs:
|
|
600
|
-
- **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
601
|
-
- **input_shape** (tuple[int]) - The input tuple is constructed by multiple
|
|
602
|
-
integers, i.e., :math:`(y_1, y_2, ..., y_S)`.
|
|
603
|
-
|
|
604
|
-
Outputs:
|
|
605
|
-
Tensor, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
|
|
606
|
-
|
|
607
|
-
Supported Platforms:
|
|
608
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
609
|
-
|
|
610
|
-
Examples:
|
|
611
|
-
>>> import mindspore
|
|
612
|
-
>>> import numpy as np
|
|
613
|
-
>>> from mindspore import Tensor, ops
|
|
614
|
-
>>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
|
|
615
|
-
>>> reshape = ops.Reshape()
|
|
616
|
-
>>> output = reshape(input_x, (3, 2))
|
|
617
|
-
>>> print(output)
|
|
618
|
-
[[-0.1 0.3]
|
|
619
|
-
[ 3.6 0.4]
|
|
620
|
-
[ 0.5 -3.2]]
|
|
621
|
-
"""
|
|
622
|
-
|
|
623
|
-
@prim_attr_register
|
|
624
|
-
def __init__(self):
|
|
625
|
-
"""Initialize Reshape"""
|
|
626
|
-
self.init_prim_io_names(inputs=['tensor', 'shape'], outputs=['output'])
|
|
627
|
-
|
|
628
|
-
def infer_value(self, x, shape):
|
|
629
|
-
"""infer value"""
|
|
630
|
-
# for shape is not constant
|
|
631
|
-
if shape is None or self.none_in_tuple_or_list(shape) or x is None:
|
|
632
|
-
return None
|
|
633
|
-
|
|
634
|
-
if isinstance(shape, (Tensor, Tensor_)):
|
|
635
|
-
validator.check_tensor_dtype_valid("shape", mstype.TensorType(shape.dtype),
|
|
636
|
-
[mstype.int32, mstype.int64], self.name)
|
|
637
|
-
shape = shape.asnumpy().tolist()
|
|
638
|
-
else:
|
|
639
|
-
validator.check_value_type("shape", shape, [tuple], self.name)
|
|
640
|
-
shape = list(shape)
|
|
641
|
-
|
|
642
|
-
neg_index = -1
|
|
643
|
-
dim_prod = 1
|
|
644
|
-
for i, shp_i in enumerate(shape):
|
|
645
|
-
validator.check_value_type("shape[%d]" % i, shp_i, [int], self.name)
|
|
646
|
-
if shp_i == -1:
|
|
647
|
-
if neg_index != -1:
|
|
648
|
-
raise ValueError(f"For '{self.name}', there can be at most one '-1' in 'input_shape', "
|
|
649
|
-
f"but got {shape}.")
|
|
650
|
-
neg_index = i
|
|
651
|
-
else:
|
|
652
|
-
dim_prod *= shp_i
|
|
653
|
-
out = None
|
|
654
|
-
if not is_shape_unknown(x.shape):
|
|
655
|
-
x_shp = x.shape
|
|
656
|
-
if dim_prod <= 0:
|
|
657
|
-
raise ValueError(f"For '{self.name}', the shape of 'input_x' is {x_shp}, "
|
|
658
|
-
f"the value of 'input_shape' is {shape}. "
|
|
659
|
-
f"The product of 'input_shape' should > 0, but got {dim_prod}.")
|
|
660
|
-
arr_prod = np.prod(x_shp)
|
|
661
|
-
if neg_index != -1:
|
|
662
|
-
shape[neg_index] = int(arr_prod // dim_prod)
|
|
663
|
-
dim_prod *= shape[neg_index]
|
|
664
|
-
if dim_prod != arr_prod:
|
|
665
|
-
raise ValueError(f"For '{self.name}', the product of the 'input_x' shape "
|
|
666
|
-
f"should be equal to product of 'input_shape', but got product of the"
|
|
667
|
-
f" shape of 'input_x': {arr_prod}, product of 'input_shape': {dim_prod}.")
|
|
668
|
-
out = Tensor(x.asnumpy().reshape(shape))
|
|
669
|
-
return out
|
|
670
|
-
|
|
671
|
-
def none_in_tuple_or_list(self, x):
|
|
672
|
-
return isinstance(x, (tuple, list)) and None in x
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
class Shape(Primitive):
|
|
676
|
-
"""
|
|
677
|
-
Returns the shape of the input tensor.
|
|
678
|
-
|
|
679
|
-
Refer to :func:`mindspore.ops.shape` for more details.
|
|
680
|
-
|
|
681
|
-
Inputs:
|
|
682
|
-
- **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
683
|
-
|
|
684
|
-
Outputs:
|
|
685
|
-
tuple[int], the output tuple is constructed by multiple integers,
|
|
686
|
-
:math:`(x_1, x_2, ..., x_R)`.
|
|
687
|
-
|
|
688
|
-
Supported Platforms:
|
|
689
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
690
|
-
|
|
691
|
-
Examples:
|
|
692
|
-
>>> import mindspore
|
|
693
|
-
>>> import numpy as np
|
|
694
|
-
>>> from mindspore import Tensor, ops
|
|
695
|
-
>>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
|
|
696
|
-
>>> shape = ops.Shape()
|
|
697
|
-
>>> output = shape(input_x)
|
|
698
|
-
>>> print(output)
|
|
699
|
-
(3, 2, 1)
|
|
700
|
-
"""
|
|
701
|
-
|
|
702
|
-
@prim_attr_register
|
|
703
|
-
def __init__(self):
|
|
704
|
-
"""Initialize Shape"""
|
|
705
|
-
|
|
706
|
-
def __call__(self, x):
|
|
707
|
-
if isinstance(x, (Tensor, COOTensor, CSRTensor, Tensor_)):
|
|
708
|
-
return x.shape
|
|
709
|
-
raise TypeError(f"For primitive[{self.name}], the input argument must be Tensor, but got {type(x)}.")
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
class TensorShape(Primitive):
|
|
713
|
-
"""
|
|
714
|
-
Returns the shape of the input tensor.
|
|
715
|
-
|
|
716
|
-
Supported Platforms:
|
|
717
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
718
|
-
|
|
719
|
-
Examples:
|
|
720
|
-
>>> import mindspore
|
|
721
|
-
>>> import numpy as np
|
|
722
|
-
>>> from mindspore import Tensor, ops
|
|
723
|
-
>>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
|
|
724
|
-
>>> shape = ops.TensorShape()
|
|
725
|
-
>>> output = shape(input_x)
|
|
726
|
-
>>> print(output)
|
|
727
|
-
[3 2 1]
|
|
728
|
-
"""
|
|
729
|
-
|
|
730
|
-
@prim_attr_register
|
|
731
|
-
def __init__(self):
|
|
732
|
-
"""init Shape"""
|
|
733
|
-
self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
|
|
734
|
-
|
|
735
|
-
|
|
736
468
|
class Unsqueeze(PrimitiveWithCheck):
|
|
737
469
|
"""Unsqueeze"""
|
|
738
470
|
|
|
@@ -788,48 +520,6 @@ class Squeeze(Primitive):
|
|
|
788
520
|
self.add_prim_attr("axis", (axis,))
|
|
789
521
|
|
|
790
522
|
|
|
791
|
-
class Transpose(Primitive):
|
|
792
|
-
"""
|
|
793
|
-
Permutes the dimensions of the input tensor according to input permutation.
|
|
794
|
-
|
|
795
|
-
Refer to :func:`mindspore.ops.transpose` for more details.
|
|
796
|
-
|
|
797
|
-
Inputs:
|
|
798
|
-
- **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
799
|
-
- **input_perm** (tuple[int]) - The permutation to be converted. The elements in `input_perm` are composed of
|
|
800
|
-
the indexes of each dimension of `input_x`. The length of `input_perm` and the shape of `input_x` must be
|
|
801
|
-
the same. Only constant value is allowed. Must be in the range [0, rank(input_x)).
|
|
802
|
-
|
|
803
|
-
Outputs:
|
|
804
|
-
Tensor, the type of output tensor is the same as `input_x` and the shape of output tensor is decided by the
|
|
805
|
-
shape of `input_x` and the value of `input_perm`.
|
|
806
|
-
|
|
807
|
-
Supported Platforms:
|
|
808
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
809
|
-
|
|
810
|
-
Examples:
|
|
811
|
-
>>> import mindspore
|
|
812
|
-
>>> import numpy as np
|
|
813
|
-
>>> from mindspore import Tensor, ops
|
|
814
|
-
>>> input_x = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32)
|
|
815
|
-
>>> input_perm = (0, 2, 1)
|
|
816
|
-
>>> transpose = ops.Transpose()
|
|
817
|
-
>>> output = transpose(input_x, input_perm)
|
|
818
|
-
>>> print(output)
|
|
819
|
-
[[[ 1. 4.]
|
|
820
|
-
[ 2. 5.]
|
|
821
|
-
[ 3. 6.]]
|
|
822
|
-
[[ 7. 10.]
|
|
823
|
-
[ 8. 11.]
|
|
824
|
-
[ 9. 12.]]]
|
|
825
|
-
"""
|
|
826
|
-
|
|
827
|
-
@prim_attr_register
|
|
828
|
-
def __init__(self):
|
|
829
|
-
"""Initialize Transpose"""
|
|
830
|
-
self.init_prim_io_names(inputs=['x', 'perm'], outputs=['output'])
|
|
831
|
-
|
|
832
|
-
|
|
833
523
|
class ConjugateTranspose(Primitive):
|
|
834
524
|
"""
|
|
835
525
|
Calculate the conjugate matrix of input x which has been transposed according to input perm.
|
|
@@ -999,99 +689,6 @@ class UniqueConsecutive(Primitive):
|
|
|
999
689
|
self.add_prim_attr("axis", axis)
|
|
1000
690
|
|
|
1001
691
|
|
|
1002
|
-
class Gather(Primitive):
|
|
1003
|
-
r"""
|
|
1004
|
-
Returns the slice of the input tensor corresponding to the elements of `input_indices` on the specified `axis`.
|
|
1005
|
-
|
|
1006
|
-
Refer to :func:`mindspore.ops.gather` for more details.
|
|
1007
|
-
|
|
1008
|
-
Args:
|
|
1009
|
-
batch_dims (int, optional): Specifies the number of batch dimensions.
|
|
1010
|
-
It must be less than or equal to the rank of `input_indices`. Default: ``0`` .
|
|
1011
|
-
|
|
1012
|
-
Inputs:
|
|
1013
|
-
- **input_params** (Tensor) - The original Tensor. The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
1014
|
-
- **input_indices** (Tensor) - Index tensor to be sliced, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
|
|
1015
|
-
Specifies the indices of elements of the original Tensor. The data type can be int32 or int64.
|
|
1016
|
-
- **axis** (Union(int, Tensor[int])) - Specifies the dimension index to gather indices.
|
|
1017
|
-
When axis is Tensor, the size must be 1.
|
|
1018
|
-
|
|
1019
|
-
Outputs:
|
|
1020
|
-
Tensor, the shape of tensor is
|
|
1021
|
-
:math:`input\_params.shape[:axis] + input\_indices.shape + input\_params.shape[axis + 1:]`.
|
|
1022
|
-
|
|
1023
|
-
Supported Platforms:
|
|
1024
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1025
|
-
|
|
1026
|
-
Examples:
|
|
1027
|
-
>>> import mindspore
|
|
1028
|
-
>>> import numpy as np
|
|
1029
|
-
>>> from mindspore import Tensor, ops
|
|
1030
|
-
>>> # case1: input_indices is a Tensor with shape (5, ).
|
|
1031
|
-
>>> input_params = Tensor(np.array([1, 2, 3, 4, 5, 6, 7]), mindspore.float32)
|
|
1032
|
-
>>> input_indices = Tensor(np.array([0, 2, 4, 2, 6]), mindspore.int32)
|
|
1033
|
-
>>> axis = 0
|
|
1034
|
-
>>> output = ops.Gather()(input_params, input_indices, axis)
|
|
1035
|
-
>>> print(output)
|
|
1036
|
-
[1. 3. 5. 3. 7.]
|
|
1037
|
-
>>> # case2: input_indices is a Tensor with shape (2, 2). When the input_params has one dimension,
|
|
1038
|
-
the output shape is equal to the input_indices shape.
|
|
1039
|
-
>>> input_indices = Tensor(np.array([[0, 2], [2, 6]]), mindspore.int32)
|
|
1040
|
-
>>> axis = 0
|
|
1041
|
-
>>> output = ops.Gather()(input_params, input_indices, axis)
|
|
1042
|
-
>>> print(output)
|
|
1043
|
-
[[ 1. 3.]
|
|
1044
|
-
[ 3. 7.]]
|
|
1045
|
-
>>> # case3: input_indices is a Tensor with shape (2, ). input_params is a Tensor with shape (3, 4) and axis is 0.
|
|
1046
|
-
>>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
|
|
1047
|
-
>>> input_indices = Tensor(np.array([0, 2]), mindspore.int32)
|
|
1048
|
-
>>> axis = 0
|
|
1049
|
-
>>> output = ops.Gather()(input_params, input_indices, axis)
|
|
1050
|
-
>>> print(output)
|
|
1051
|
-
[[1. 2. 3. 4.]
|
|
1052
|
-
[9. 10. 11. 12.]]
|
|
1053
|
-
>>> # case4: input_indices is a Tensor with shape (2, ).
|
|
1054
|
-
>>> # input_params is a Tensor with shape (3, 4) and axis is 1, batch_dims is 1.
|
|
1055
|
-
>>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
|
|
1056
|
-
>>> input_indices = Tensor(np.array([0, 2, 1]), mindspore.int32)
|
|
1057
|
-
>>> axis = 1
|
|
1058
|
-
>>> batch_dims = 1
|
|
1059
|
-
>>> output = ops.Gather(batch_dims)(input_params, input_indices, axis)
|
|
1060
|
-
>>> print(output)
|
|
1061
|
-
[ 1. 7. 10.]
|
|
1062
|
-
"""
|
|
1063
|
-
|
|
1064
|
-
@prim_attr_register
|
|
1065
|
-
def __init__(self, batch_dims=0):
|
|
1066
|
-
"""Initialize Gather"""
|
|
1067
|
-
validator.check_value_type("batch_dims", batch_dims, [int], self.name)
|
|
1068
|
-
self.add_prim_attr("batch_dims", batch_dims)
|
|
1069
|
-
self.init_prim_io_names(inputs=['params', 'indices', 'axis'], outputs=['output'])
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
class GatherV2(PrimitiveWithCheck):
|
|
1073
|
-
"""
|
|
1074
|
-
Same as operator Gather. GatherV2 will be deprecated in the future.
|
|
1075
|
-
Please use Gather instead.
|
|
1076
|
-
"""
|
|
1077
|
-
|
|
1078
|
-
@deprecated("1.1", "Gather", True)
|
|
1079
|
-
@prim_attr_register
|
|
1080
|
-
def __init__(self):
|
|
1081
|
-
"""Initialize GatherV2"""
|
|
1082
|
-
self.add_prim_attr("batch_dims", 0)
|
|
1083
|
-
self.init_prim_io_names(inputs=['params', 'indices', 'axis'], outputs=['output'])
|
|
1084
|
-
|
|
1085
|
-
def __check__(self, params, indices, axis):
|
|
1086
|
-
validator.check_subclass("params", params['dtype'], mstype.tensor_type, self.name)
|
|
1087
|
-
validator.check_tensor_dtype_valid("indices", indices['dtype'], mstype.int_type, self.name)
|
|
1088
|
-
validator.check_subclass("axis", axis['dtype'], [mstype.number], self.name)
|
|
1089
|
-
axis_v = axis['value']
|
|
1090
|
-
validator.check_value_type('axis', axis_v, [int], self.name)
|
|
1091
|
-
rank = len(params['shape'])
|
|
1092
|
-
validator.check_int_range(axis_v, -rank, rank, validator.INC_LEFT, "axis", self.name)
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
692
|
class SparseGatherV2(Primitive):
|
|
1096
693
|
"""
|
|
1097
694
|
Returns a slice of input tensor based on the specified indices and axis.
|
|
@@ -1173,141 +770,19 @@ class Padding(Primitive):
|
|
|
1173
770
|
|
|
1174
771
|
class UniqueWithPad(Primitive):
|
|
1175
772
|
"""
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
The basic function is the same as the Unique operator, but the UniqueWithPad operator adds a Pad function.
|
|
1179
|
-
The returned tuple(`y`, `idx`) after the input Tensor `x` is processed by the unique operator,
|
|
1180
|
-
in which the shapes of `y` and `idx` are mostly not equal. Therefore, in order to solve the above situation,
|
|
1181
|
-
the UniqueWithPad operator will fill the `y` Tensor with the `pad_num` specified by the user
|
|
1182
|
-
to make it have the same shape as the Tensor `idx`.
|
|
1183
|
-
|
|
1184
|
-
Refer to :func:`mindspore.ops.unique_with_pad` for more details.
|
|
1185
|
-
|
|
1186
|
-
Inputs:
|
|
1187
|
-
- **x** (Tensor) - The tensor need to be unique. Must be 1-D vector with types: int32, int64.
|
|
1188
|
-
- **pad_num** (int) - Pad num. The data type is an int.
|
|
1189
|
-
|
|
1190
|
-
Outputs:
|
|
1191
|
-
tuple(Tensor), tuple of 2 tensors, `y` and `idx`.
|
|
1192
|
-
|
|
1193
|
-
- y (Tensor) - The unique elements filled with pad_num, the shape and data type same as `x`.
|
|
1194
|
-
- idx (Tensor) - The index of each value of `x` in the unique output `y`, the shape and data type same as `x`.
|
|
773
|
+
'ops.UniqueWithPad' is deprecated from version 2.4 and will be removed in a future version.
|
|
1195
774
|
|
|
1196
775
|
Supported Platforms:
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
Examples:
|
|
1200
|
-
>>> import mindspore
|
|
1201
|
-
>>> import numpy as np
|
|
1202
|
-
>>> from mindspore import Tensor, ops
|
|
1203
|
-
>>> x = Tensor(np.array([1, 1, 2, 2, 3, 3, 4, 5]), mindspore.int32)
|
|
1204
|
-
>>> pad_num = 8
|
|
1205
|
-
>>> output = ops.UniqueWithPad()(x, pad_num)
|
|
1206
|
-
>>> print(output)
|
|
1207
|
-
(Tensor(shape=[8], dtype=Int32, value= [1, 2, 3, 4, 5, 8, 8, 8]),
|
|
1208
|
-
Tensor(shape=[8], dtype=Int32, value= [0, 0, 1, 1, 2, 2, 3, 4]))
|
|
776
|
+
Deprecated
|
|
1209
777
|
"""
|
|
1210
778
|
|
|
779
|
+
@deprecated("2.4", "ops.Unique and ops.PadV3", False)
|
|
1211
780
|
@prim_attr_register
|
|
1212
781
|
def __init__(self):
|
|
1213
782
|
"""init UniqueWithPad"""
|
|
1214
783
|
self.init_prim_io_names(inputs=['x', 'pad_num'], outputs=['y', 'idx'])
|
|
1215
784
|
|
|
1216
785
|
|
|
1217
|
-
class Split(Primitive):
|
|
1218
|
-
r"""
|
|
1219
|
-
Splits the input tensor into output_num of tensors along the given axis and output numbers.
|
|
1220
|
-
|
|
1221
|
-
Refer to :func:`mindspore.ops.split` for more details.
|
|
1222
|
-
|
|
1223
|
-
Args:
|
|
1224
|
-
axis (int): Index of the split position. Default: ``0`` .
|
|
1225
|
-
output_num (int): The number of output tensors. Must be positive int. Default: ``1`` .
|
|
1226
|
-
|
|
1227
|
-
Inputs:
|
|
1228
|
-
- **input_x** (Tensor) - The shape of tensor is :math:`(x_0, x_1, ..., x_{R-1})`, R >= 1.
|
|
1229
|
-
|
|
1230
|
-
Outputs:
|
|
1231
|
-
tuple[Tensor], the shape of each output tensor is the same, which is
|
|
1232
|
-
:math:`(x_0, x_1, ..., x_{axis}/{output\_num}, ..., x_{R-1})`.
|
|
1233
|
-
And the data type is the same as `input_x`.
|
|
1234
|
-
|
|
1235
|
-
Supported Platforms:
|
|
1236
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1237
|
-
|
|
1238
|
-
Examples:
|
|
1239
|
-
>>> import mindspore
|
|
1240
|
-
>>> import numpy as np
|
|
1241
|
-
>>> from mindspore import Tensor, ops
|
|
1242
|
-
>>> split = ops.Split(1, 2)
|
|
1243
|
-
>>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]), mindspore.int32)
|
|
1244
|
-
>>> print(x)
|
|
1245
|
-
[[1 1 1 1]
|
|
1246
|
-
[2 2 2 2]]
|
|
1247
|
-
>>> output = split(x)
|
|
1248
|
-
>>> print(output)
|
|
1249
|
-
(Tensor(shape=[2, 2], dtype=Int32, value=
|
|
1250
|
-
[[1, 1],
|
|
1251
|
-
[2, 2]]), Tensor(shape=[2, 2], dtype=Int32, value=
|
|
1252
|
-
[[1, 1],
|
|
1253
|
-
[2, 2]]))
|
|
1254
|
-
>>> split = ops.Split(1, 4)
|
|
1255
|
-
>>> output = split(x)
|
|
1256
|
-
>>> print(output)
|
|
1257
|
-
(Tensor(shape=[2, 1], dtype=Int32, value=
|
|
1258
|
-
[[1],
|
|
1259
|
-
[2]]), Tensor(shape=[2, 1], dtype=Int32, value=
|
|
1260
|
-
[[1],
|
|
1261
|
-
[2]]), Tensor(shape=[2, 1], dtype=Int32, value=
|
|
1262
|
-
[[1],
|
|
1263
|
-
[2]]), Tensor(shape=[2, 1], dtype=Int32, value=
|
|
1264
|
-
[[1],
|
|
1265
|
-
[2]]))
|
|
1266
|
-
"""
|
|
1267
|
-
|
|
1268
|
-
@prim_attr_register
|
|
1269
|
-
def __init__(self, axis=0, output_num=1):
|
|
1270
|
-
"""Initialize Split"""
|
|
1271
|
-
validator.check_value_type("axis", axis, [int], self.name)
|
|
1272
|
-
validator.check_value_type("output_num", output_num, [int], self.name)
|
|
1273
|
-
validator.check_positive_int(output_num, "output_num", self.name)
|
|
1274
|
-
self.axis = axis
|
|
1275
|
-
self.output_num = output_num
|
|
1276
|
-
self.add_prim_attr('num_split', self.output_num)
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
|
-
class Rank(Primitive):
|
|
1280
|
-
"""
|
|
1281
|
-
Returns the rank of a tensor.
|
|
1282
|
-
|
|
1283
|
-
Refer to :func:`mindspore.ops.rank` for more details.
|
|
1284
|
-
|
|
1285
|
-
Supported Platforms:
|
|
1286
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1287
|
-
|
|
1288
|
-
Examples:
|
|
1289
|
-
>>> import mindspore
|
|
1290
|
-
>>> import numpy as np
|
|
1291
|
-
>>> from mindspore import Tensor, ops
|
|
1292
|
-
>>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
|
|
1293
|
-
>>> rank = ops.Rank()
|
|
1294
|
-
>>> output = rank(input_tensor)
|
|
1295
|
-
>>> print(output)
|
|
1296
|
-
2
|
|
1297
|
-
>>> print(type(output))
|
|
1298
|
-
<class 'int'>
|
|
1299
|
-
"""
|
|
1300
|
-
|
|
1301
|
-
@prim_attr_register
|
|
1302
|
-
def __init__(self):
|
|
1303
|
-
"""Initialize Rank"""
|
|
1304
|
-
|
|
1305
|
-
def __call__(self, x):
|
|
1306
|
-
if not isinstance(x, (Tensor, Tensor_)):
|
|
1307
|
-
raise TypeError("the input x must be Tensor!")
|
|
1308
|
-
return len(x.shape)
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
786
|
class Size(Primitive):
|
|
1312
787
|
r"""
|
|
1313
788
|
Returns a Scalar of type int that represents the size of the input Tensor and the total number of elements in the
|
|
@@ -1317,7 +792,7 @@ class Size(Primitive):
|
|
|
1317
792
|
|
|
1318
793
|
Inputs:
|
|
1319
794
|
- **input_x** (Tensor) - Input parameters, the shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is
|
|
1320
|
-
`number <https://www.mindspore.cn/docs/en/
|
|
795
|
+
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
1321
796
|
|
|
1322
797
|
Outputs:
|
|
1323
798
|
int. A scalar representing the elements' size of `input_x`, tensor is the number of elements
|
|
@@ -1504,7 +979,7 @@ class MatrixDiagPartV3(Primitive):
|
|
|
1504
979
|
class MatrixSetDiagV3(Primitive):
|
|
1505
980
|
r"""
|
|
1506
981
|
Updates the diagonal part of a batched tensor.
|
|
1507
|
-
It takes
|
|
982
|
+
It takes a Tensor `x` and `diagonal` as input and returns a Tensor in which
|
|
1508
983
|
the specified diagonal values in the innermost matrices will be replaced
|
|
1509
984
|
by the values in the `diagonal`.
|
|
1510
985
|
|
|
@@ -1770,160 +1245,23 @@ class FillV2(PrimitiveWithCheck):
|
|
|
1770
1245
|
self.init_prim_io_names(inputs=['shape', 'value'], outputs=['y'])
|
|
1771
1246
|
|
|
1772
1247
|
def check_elim(self, dims, x):
|
|
1773
|
-
|
|
1774
|
-
|
|
1775
|
-
|
|
1776
|
-
if x_is_invalid or dims_is_invalid:
|
|
1248
|
+
if x is None or (not isinstance(x, (Tensor, Tensor_))) or (x.shape != ()) or \
|
|
1249
|
+
dims is None or (isinstance(dims, (tuple, list)) and dims) or \
|
|
1250
|
+
isinstance(dims, (Tensor, Tensor_)):
|
|
1777
1251
|
return (False, None)
|
|
1778
1252
|
return (True, x)
|
|
1779
1253
|
|
|
1780
1254
|
def infer_value(self, dims, x):
|
|
1781
|
-
|
|
1782
|
-
|
|
1783
|
-
|
|
1784
|
-
if x is None or dims_is_invalid:
|
|
1255
|
+
if x is None or dims is None or isinstance(dims, (Tensor, Tensor_)):
|
|
1256
|
+
return None
|
|
1257
|
+
if isinstance(dims, (tuple, list)) and None in dims:
|
|
1785
1258
|
return None
|
|
1786
|
-
|
|
1787
|
-
|
|
1788
|
-
|
|
1789
|
-
|
|
1790
|
-
|
|
1791
|
-
|
|
1792
|
-
|
|
1793
|
-
Refer to :func:`mindspore.ops.ones` for more details.
|
|
1794
|
-
|
|
1795
|
-
Inputs:
|
|
1796
|
-
- **shape** (Union[tuple[int], int]) - The specified shape of output tensor.
|
|
1797
|
-
- **type** (:class:`mindspore.dtype`) - The specified type of output tensor.
|
|
1798
|
-
|
|
1799
|
-
Outputs:
|
|
1800
|
-
Tensor, has the same type and shape as input shape value.
|
|
1801
|
-
|
|
1802
|
-
Supported Platforms:
|
|
1803
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1804
|
-
|
|
1805
|
-
Examples:
|
|
1806
|
-
>>> import mindspore
|
|
1807
|
-
>>> from mindspore import ops
|
|
1808
|
-
>>> ones = ops.Ones()
|
|
1809
|
-
>>> output = ones((2, 2), mindspore.float32)
|
|
1810
|
-
>>> print(output)
|
|
1811
|
-
[[1. 1.]
|
|
1812
|
-
[1. 1.]]
|
|
1813
|
-
>>> output = ones((3, 3), mindspore.float32)
|
|
1814
|
-
>>> print(output)
|
|
1815
|
-
[[1. 1. 1.]
|
|
1816
|
-
[1. 1. 1.]
|
|
1817
|
-
[1. 1. 1.]]
|
|
1818
|
-
"""
|
|
1819
|
-
|
|
1820
|
-
@prim_attr_register
|
|
1821
|
-
def __init__(self):
|
|
1822
|
-
"""Initialize Ones"""
|
|
1823
|
-
|
|
1824
|
-
|
|
1825
|
-
class Zeros(Primitive):
|
|
1826
|
-
r"""
|
|
1827
|
-
Zeros will be deprecated in the future. Please use class `mindspore.ops.zeros` instead.
|
|
1828
|
-
|
|
1829
|
-
Creates a tensor filled with value zeros.
|
|
1830
|
-
|
|
1831
|
-
Creates a tensor with shape described by the first argument and
|
|
1832
|
-
fills it with value zeros in type of the second argument.
|
|
1833
|
-
|
|
1834
|
-
Inputs:
|
|
1835
|
-
- **shape** (Union[tuple[int], int]) - The specified shape of output tensor.
|
|
1836
|
-
- **type** (mindspore.dtype) - The specified type of output tensor.
|
|
1837
|
-
|
|
1838
|
-
Outputs:
|
|
1839
|
-
Tensor, has the same type and shape as input shape value.
|
|
1840
|
-
|
|
1841
|
-
Raises:
|
|
1842
|
-
TypeError: If `shape` is neither int nor tuple.
|
|
1843
|
-
TypeError: If `shape` is a tuple whose elements are not all int.
|
|
1844
|
-
|
|
1845
|
-
Supported Platforms:
|
|
1846
|
-
Deprecated
|
|
1847
|
-
|
|
1848
|
-
Examples:
|
|
1849
|
-
>>> import mindspore
|
|
1850
|
-
>>> from mindspore import ops
|
|
1851
|
-
>>> zeros = ops.Zeros()
|
|
1852
|
-
>>> output = zeros((2, 2), mindspore.float32)
|
|
1853
|
-
>>> print(output)
|
|
1854
|
-
[[0. 0.]
|
|
1855
|
-
[0. 0.]]
|
|
1856
|
-
|
|
1857
|
-
"""
|
|
1858
|
-
|
|
1859
|
-
@prim_attr_register
|
|
1860
|
-
def __init__(self):
|
|
1861
|
-
"""Initialize Zeros"""
|
|
1862
|
-
|
|
1863
|
-
|
|
1864
|
-
class OnesLike(Primitive):
|
|
1865
|
-
"""
|
|
1866
|
-
Returns a Tensor with a value of 1 and its shape and data type is the same as the input.
|
|
1867
|
-
|
|
1868
|
-
Refer to :func:`mindspore.ops.ones_like` for more details.
|
|
1869
|
-
|
|
1870
|
-
Inputs:
|
|
1871
|
-
- **input_x** (Tensor) - Tensor of any dimension.
|
|
1872
|
-
|
|
1873
|
-
Outputs:
|
|
1874
|
-
Tensor, has the same shape and type as `input_x` but filled with ones.
|
|
1875
|
-
|
|
1876
|
-
Supported Platforms:
|
|
1877
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1878
|
-
|
|
1879
|
-
Examples:
|
|
1880
|
-
>>> import numpy as np
|
|
1881
|
-
>>> from mindspore import Tensor, ops
|
|
1882
|
-
>>> oneslike = ops.OnesLike()
|
|
1883
|
-
>>> input_x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
|
|
1884
|
-
>>> output = oneslike(input_x)
|
|
1885
|
-
>>> print(output)
|
|
1886
|
-
[[1 1]
|
|
1887
|
-
[1 1]]
|
|
1888
|
-
"""
|
|
1889
|
-
|
|
1890
|
-
@prim_attr_register
|
|
1891
|
-
def __init__(self):
|
|
1892
|
-
"""Initialize OnesLike"""
|
|
1893
|
-
self.init_prim_io_names(inputs=['x'], outputs=['y'])
|
|
1894
|
-
|
|
1895
|
-
|
|
1896
|
-
class ZerosLike(Primitive):
|
|
1897
|
-
"""
|
|
1898
|
-
Returns a Tensor with a value of 0 and its shape and data type is the same as the input.
|
|
1899
|
-
|
|
1900
|
-
Inputs:
|
|
1901
|
-
- **input_x** (Tensor) - Input Tensor of any dimension.
|
|
1902
|
-
|
|
1903
|
-
Outputs:
|
|
1904
|
-
Tensor, has the same shape and data type as `input_x` but filled with zeros.
|
|
1905
|
-
|
|
1906
|
-
Raises:
|
|
1907
|
-
TypeError: If `input_x` is not a Tensor.
|
|
1908
|
-
|
|
1909
|
-
Supported Platforms:
|
|
1910
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1911
|
-
|
|
1912
|
-
Examples:
|
|
1913
|
-
>>> import numpy as np
|
|
1914
|
-
>>> from mindspore import Tensor, ops
|
|
1915
|
-
>>> zeroslike = ops.ZerosLike()
|
|
1916
|
-
>>> input_x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
|
|
1917
|
-
>>> output = zeroslike(input_x)
|
|
1918
|
-
>>> print(output)
|
|
1919
|
-
[[0. 0.]
|
|
1920
|
-
[0. 0.]]
|
|
1921
|
-
"""
|
|
1922
|
-
|
|
1923
|
-
@prim_attr_register
|
|
1924
|
-
def __init__(self):
|
|
1925
|
-
"""Initialize ZerosLike"""
|
|
1926
|
-
self.init_prim_io_names(inputs=['x'], outputs=['y'])
|
|
1259
|
+
if 0 in dims:
|
|
1260
|
+
init_func = Zero()
|
|
1261
|
+
init_func.__enable_zero_dim__ = True
|
|
1262
|
+
out = Tensor(shape=dims, dtype=x.dtype, init=init_func)
|
|
1263
|
+
return out
|
|
1264
|
+
return Tensor(np.full(dims, x.asnumpy()))
|
|
1927
1265
|
|
|
1928
1266
|
|
|
1929
1267
|
class TupleToArray(PrimitiveWithInfer):
|
|
@@ -1982,42 +1320,6 @@ class TupleToArray(PrimitiveWithInfer):
|
|
|
1982
1320
|
return _run_op(self, self.name, args)
|
|
1983
1321
|
|
|
1984
1322
|
|
|
1985
|
-
class ScalarToTensor(PrimitiveWithInfer):
|
|
1986
|
-
"""
|
|
1987
|
-
Converts a scalar to a `Tensor`, and converts the data type to the specified type.
|
|
1988
|
-
|
|
1989
|
-
Refer to :func:`mindspore.ops.scalar_to_tensor` for more details.
|
|
1990
|
-
|
|
1991
|
-
Inputs:
|
|
1992
|
-
- **input_x** (Union[int, float]) - The input is a scalar. Only constant value is allowed.
|
|
1993
|
-
- **dtype** (mindspore.dtype) - The target data type. Default: ``mindspore.float32`` . Only
|
|
1994
|
-
constant value is allowed.
|
|
1995
|
-
|
|
1996
|
-
Outputs:
|
|
1997
|
-
Tensor. 0-D Tensor and the content is the input.
|
|
1998
|
-
|
|
1999
|
-
Supported Platforms:
|
|
2000
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2001
|
-
|
|
2002
|
-
Examples:
|
|
2003
|
-
>>> import mindspore
|
|
2004
|
-
>>> from mindspore import ops
|
|
2005
|
-
>>> op = ops.ScalarToTensor()
|
|
2006
|
-
>>> data = 1
|
|
2007
|
-
>>> output = op(data, mindspore.float32)
|
|
2008
|
-
>>> print(output)
|
|
2009
|
-
1.0
|
|
2010
|
-
"""
|
|
2011
|
-
|
|
2012
|
-
@prim_attr_register
|
|
2013
|
-
def __init__(self):
|
|
2014
|
-
self.init_prim_io_names(inputs=['input_scalar', 'dtype'], outputs=['output_data'])
|
|
2015
|
-
|
|
2016
|
-
def __call__(self, x, dtype=mstype.float32):
|
|
2017
|
-
validator.check_value_type("x", x, [bool, int, float], self.name)
|
|
2018
|
-
validator.check_subclass("dtype", dtype, mstype.number, self.name)
|
|
2019
|
-
data_type = mstype.dtype_to_nptype(dtype)
|
|
2020
|
-
return Tensor(np.array(x, data_type), dtype=dtype)
|
|
2021
1323
|
|
|
2022
1324
|
|
|
2023
1325
|
class InvertPermutation(PrimitiveWithInfer):
|
|
@@ -2099,94 +1401,6 @@ class InvertPermutation(PrimitiveWithInfer):
|
|
|
2099
1401
|
'value': tuple(y)}
|
|
2100
1402
|
|
|
2101
1403
|
|
|
2102
|
-
class Argmax(Primitive):
|
|
2103
|
-
"""
|
|
2104
|
-
Returns the indices of the maximum value along a specified `axis` of a Tensor.
|
|
2105
|
-
|
|
2106
|
-
Refer to :func:`mindspore.ops.argmax` for more details.
|
|
2107
|
-
|
|
2108
|
-
Args:
|
|
2109
|
-
axis (int): Axis where the Argmax operation applies to. Default: ``-1`` .
|
|
2110
|
-
output_type (:class:`mindspore.dtype`): Output data type.
|
|
2111
|
-
Supported types: ``mstype.int32`` , ``mstype.int64`` . Default: ``mstype.int32`` .
|
|
2112
|
-
|
|
2113
|
-
Inputs:
|
|
2114
|
-
- **input_x** (Tensor) - The input tensor. :math:`(N, *)` where :math:`*` means, any number of additional
|
|
2115
|
-
dimensions.
|
|
2116
|
-
|
|
2117
|
-
Outputs:
|
|
2118
|
-
Tensor, indices of the max value of input tensor across the axis.
|
|
2119
|
-
|
|
2120
|
-
Supported Platforms:
|
|
2121
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2122
|
-
|
|
2123
|
-
Examples:
|
|
2124
|
-
>>> import mindspore
|
|
2125
|
-
>>> import numpy as np
|
|
2126
|
-
>>> from mindspore import Tensor, ops
|
|
2127
|
-
>>> input_x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
|
|
2128
|
-
>>> output = ops.Argmax(output_type=mindspore.int32)(input_x)
|
|
2129
|
-
>>> print(output)
|
|
2130
|
-
[1 0 0]
|
|
2131
|
-
"""
|
|
2132
|
-
|
|
2133
|
-
@prim_attr_register
|
|
2134
|
-
def __init__(self, axis=-1, output_type=mstype.int32):
|
|
2135
|
-
"""Initialize Argmax"""
|
|
2136
|
-
self.init_prim_io_names(inputs=['x'], outputs=['output'])
|
|
2137
|
-
validator.check_value_type("axis", axis, [int], self.name)
|
|
2138
|
-
validator.check_types_same_and_valid({'output': output_type}, [mstype.int32, mstype.int64], self.name)
|
|
2139
|
-
self.axis = axis
|
|
2140
|
-
self.add_prim_attr('output_type', output_type)
|
|
2141
|
-
|
|
2142
|
-
|
|
2143
|
-
class Argmin(Primitive):
|
|
2144
|
-
"""
|
|
2145
|
-
Returns the indices of the minimum value along a specified `axis` of a Tensor.
|
|
2146
|
-
|
|
2147
|
-
If the shape of input tensor is :math:`(x_1, ..., x_N)`, the shape of the output tensor is
|
|
2148
|
-
:math:`(x_1, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.
|
|
2149
|
-
|
|
2150
|
-
Args:
|
|
2151
|
-
axis (int): Axis where the Argmin operation applies to. Default: ``-1`` .
|
|
2152
|
-
output_type (:class:`mindspore.dtype`): Output data type.
|
|
2153
|
-
Supported types: ``mstype.int32`` , ``mstype.int64`` . Default: ``mstype.int32`` .
|
|
2154
|
-
|
|
2155
|
-
Inputs:
|
|
2156
|
-
- **input_x** (Tensor) - Input tensor.
|
|
2157
|
-
The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
2158
|
-
|
|
2159
|
-
Outputs:
|
|
2160
|
-
Tensor, whose dtype is determined by `output_type`.
|
|
2161
|
-
|
|
2162
|
-
Raises:
|
|
2163
|
-
TypeError: If `axis` is not an int.
|
|
2164
|
-
TypeError: If `output_type` is neither int32 nor int64.
|
|
2165
|
-
|
|
2166
|
-
Supported Platforms:
|
|
2167
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2168
|
-
|
|
2169
|
-
Examples:
|
|
2170
|
-
>>> import mindspore
|
|
2171
|
-
>>> import numpy as np
|
|
2172
|
-
>>> from mindspore import Tensor, ops
|
|
2173
|
-
>>> input_x = Tensor(np.array([2.0, 3.1, 1.2]), mindspore.float32)
|
|
2174
|
-
>>> index = ops.Argmin()(input_x)
|
|
2175
|
-
>>> print(index)
|
|
2176
|
-
2
|
|
2177
|
-
"""
|
|
2178
|
-
|
|
2179
|
-
@prim_attr_register
|
|
2180
|
-
def __init__(self, axis=-1, output_type=mstype.int32):
|
|
2181
|
-
"""Initialize Argmin"""
|
|
2182
|
-
self.init_prim_io_names(inputs=['x'], outputs=['output'])
|
|
2183
|
-
validator.check_value_type("axis", axis, [int], self.name)
|
|
2184
|
-
validator.check_type_name("output_type", output_type, [mstype.int32, mstype.int64], self.name)
|
|
2185
|
-
self.axis = axis
|
|
2186
|
-
self.add_prim_attr('output_type', output_type)
|
|
2187
|
-
self.add_prim_attr('axis', axis)
|
|
2188
|
-
|
|
2189
|
-
|
|
2190
1404
|
class ArgminV2(Primitive):
|
|
2191
1405
|
"""
|
|
2192
1406
|
Returns the indices of the minimum value of a tensor across the axis.
|
|
@@ -2245,328 +1459,6 @@ class ArgminV2(Primitive):
|
|
|
2245
1459
|
return output
|
|
2246
1460
|
|
|
2247
1461
|
|
|
2248
|
-
class ArgMaxWithValue(Primitive):
|
|
2249
|
-
"""
|
|
2250
|
-
Calculates the maximum value along with the given axis for the input tensor, and returns the maximum values and
|
|
2251
|
-
indices.
|
|
2252
|
-
|
|
2253
|
-
Note:
|
|
2254
|
-
In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
|
|
2255
|
-
|
|
2256
|
-
.. warning::
|
|
2257
|
-
- If there are multiple maximum values, the index of the first maximum value is used.
|
|
2258
|
-
- The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "x".
|
|
2259
|
-
|
|
2260
|
-
Also see :func:`mindspore.ops.max`.
|
|
2261
|
-
|
|
2262
|
-
Args:
|
|
2263
|
-
axis (int): The dimension to reduce. Default: ``0`` .
|
|
2264
|
-
keep_dims (bool): Whether to reduce dimension, if ``True`` , the output will keep same dimension with the
|
|
2265
|
-
input, the output will reduce dimension if ``false`` . Default: ``False`` .
|
|
2266
|
-
|
|
2267
|
-
Inputs:
|
|
2268
|
-
- **x** (Tensor) - The input tensor, can be any dimension. Set the shape of input tensor as
|
|
2269
|
-
:math:`(x_1, x_2, ..., x_N)`.
|
|
2270
|
-
|
|
2271
|
-
Outputs:
|
|
2272
|
-
tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the maximum value of the input
|
|
2273
|
-
tensor.
|
|
2274
|
-
|
|
2275
|
-
- **index** (Tensor) - The index for the maximum value of the input tensor, with dtype int32. If `keep_dims`
|
|
2276
|
-
is ``True`` , the shape of output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`.
|
|
2277
|
-
Otherwise, the shape is :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)` .
|
|
2278
|
-
- **values** (Tensor) - The maximum value of input tensor, with the same shape as index, and same dtype as x.
|
|
2279
|
-
|
|
2280
|
-
Raises:
|
|
2281
|
-
TypeError: If `x` is not Tensor.
|
|
2282
|
-
TypeError: If `keep_dims` is not a bool.
|
|
2283
|
-
TypeError: If `axis` is not an int.
|
|
2284
|
-
|
|
2285
|
-
Supported Platforms:
|
|
2286
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2287
|
-
|
|
2288
|
-
Examples:
|
|
2289
|
-
>>> import mindspore
|
|
2290
|
-
>>> import numpy as np
|
|
2291
|
-
>>> from mindspore import Tensor, ops
|
|
2292
|
-
>>> input_x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
|
|
2293
|
-
>>> index, output = ops.ArgMaxWithValue()(input_x)
|
|
2294
|
-
>>> print(index, output)
|
|
2295
|
-
3 0.7
|
|
2296
|
-
>>> index, output = ops.ArgMaxWithValue(keep_dims=True)(input_x)
|
|
2297
|
-
>>> print(index, output)
|
|
2298
|
-
[3] [0.7]
|
|
2299
|
-
"""
|
|
2300
|
-
|
|
2301
|
-
@prim_attr_register
|
|
2302
|
-
def __init__(self, axis=0, keep_dims=False):
|
|
2303
|
-
"""Initialize ArgMaxWithValue"""
|
|
2304
|
-
self.init_prim_io_names(inputs=['x'], outputs=['index', 'values'])
|
|
2305
|
-
validator.check_value_type("axis", axis, [int], self.name)
|
|
2306
|
-
validator.check_value_type('keep_dims', keep_dims, [bool], self.name)
|
|
2307
|
-
self.axis = axis
|
|
2308
|
-
self.keep_dims = keep_dims
|
|
2309
|
-
self.add_prim_attr('dimension', self.axis)
|
|
2310
|
-
|
|
2311
|
-
|
|
2312
|
-
class ArgMinWithValue(Primitive):
|
|
2313
|
-
"""
|
|
2314
|
-
Calculates the minimum value along with the given axis for the input tensor, and returns the minimum values and
|
|
2315
|
-
indices.
|
|
2316
|
-
|
|
2317
|
-
Note:
|
|
2318
|
-
In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
|
|
2319
|
-
|
|
2320
|
-
.. warning::
|
|
2321
|
-
- If there are multiple minimum values, the index of the first minimum value is used.
|
|
2322
|
-
- The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "x".
|
|
2323
|
-
|
|
2324
|
-
Also see :func:`mindspore.ops.min`.
|
|
2325
|
-
|
|
2326
|
-
Args:
|
|
2327
|
-
axis (int): The dimension to reduce. Default: ``0`` .
|
|
2328
|
-
keep_dims (bool): Whether to reduce dimension, if ``True`` the output will keep the same dimension as the
|
|
2329
|
-
input, the output will reduce dimension if ``false`` . Default: ``False`` .
|
|
2330
|
-
|
|
2331
|
-
Inputs:
|
|
2332
|
-
- **x** (Tensor) - The input tensor, can be any dimension. Set the shape of input tensor as
|
|
2333
|
-
:math:`(x_1, x_2, ..., x_N)` .Complex tensor is not supported.
|
|
2334
|
-
|
|
2335
|
-
Outputs:
|
|
2336
|
-
tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the minimum value of the input
|
|
2337
|
-
tensor.
|
|
2338
|
-
|
|
2339
|
-
- **index** (Tensor) - The index for the minimum value of the input tensor, with dtype int32. If `keep_dims`
|
|
2340
|
-
is ``True`` , the shape of output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`.
|
|
2341
|
-
Otherwise, the shape is :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)` .
|
|
2342
|
-
- **values** (Tensor) - The minimum value of input tensor, with the same
|
|
2343
|
-
shape as `index`, and same dtype as `x`.
|
|
2344
|
-
|
|
2345
|
-
Raises:
|
|
2346
|
-
TypeError: If `x` is not Tensor.
|
|
2347
|
-
TypeError: If `keep_dims` is not a bool.
|
|
2348
|
-
TypeError: If `axis` is not an int.
|
|
2349
|
-
|
|
2350
|
-
Supported Platforms:
|
|
2351
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2352
|
-
|
|
2353
|
-
Examples:
|
|
2354
|
-
>>> import mindspore
|
|
2355
|
-
>>> import numpy as np
|
|
2356
|
-
>>> from mindspore import Tensor, ops
|
|
2357
|
-
>>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
|
|
2358
|
-
>>> index, output = ops.ArgMinWithValue()(x)
|
|
2359
|
-
>>> print(index, output)
|
|
2360
|
-
0 0.0
|
|
2361
|
-
>>> index, output = ops.ArgMinWithValue(keep_dims=True)(x)
|
|
2362
|
-
>>> print(index, output)
|
|
2363
|
-
[0] [0.0]
|
|
2364
|
-
"""
|
|
2365
|
-
|
|
2366
|
-
@prim_attr_register
|
|
2367
|
-
def __init__(self, axis=0, keep_dims=False):
|
|
2368
|
-
"""Initialize ArgMinWithValue"""
|
|
2369
|
-
self.init_prim_io_names(inputs=['x'], outputs=['index', 'values'])
|
|
2370
|
-
validator.check_value_type("axis", axis, [int], self.name)
|
|
2371
|
-
validator.check_value_type('keep_dims', keep_dims, [bool], self.name)
|
|
2372
|
-
self.axis = axis
|
|
2373
|
-
self.keep_dims = keep_dims
|
|
2374
|
-
self.add_prim_attr('dimension', self.axis)
|
|
2375
|
-
|
|
2376
|
-
|
|
2377
|
-
class Tile(PrimitiveWithInfer):
|
|
2378
|
-
r"""
|
|
2379
|
-
Replicates an input tensor with given multiples times.
|
|
2380
|
-
|
|
2381
|
-
Refer to :func:`mindspore.ops.tile` for more details.
|
|
2382
|
-
|
|
2383
|
-
Inputs:
|
|
2384
|
-
- **input_x** (Tensor) - 1-D or higher dimensional Tensor. Set the shape of input tensor as
|
|
2385
|
-
:math:`(x_1, x_2, ..., x_S)` .
|
|
2386
|
-
- **multiples** (tuple[int]) - The parameter that specifies the number of replications,
|
|
2387
|
-
the parameter type is tuple, and the data type is int, i.e., :math:`(y_1, y_2, ..., y_S)`.
|
|
2388
|
-
The length of `multiples` cannot be smaller than the length of the shape of `input_x`.
|
|
2389
|
-
Only constant value is allowed.
|
|
2390
|
-
|
|
2391
|
-
Outputs:
|
|
2392
|
-
Tensor, has the same data type as the `input_x`. Suppose the length of `multiples` is `d`,
|
|
2393
|
-
the dimension of `input_x` is `input_x.dim`, and the shape of `input_x` is :math:`(x_1, x_2, ..., x_S)`.
|
|
2394
|
-
|
|
2395
|
-
- If `input_x.dim = d`, then the shape of their corresponding positions can be multiplied, and
|
|
2396
|
-
the shape of Outputs is :math:`(x_1*y_1, x_2*y_2, ..., x_S*y_S)`.
|
|
2397
|
-
- If `input_x.dim < d`, fill in multiple 1 in the length of the shape of `input_x` until their
|
|
2398
|
-
lengths are consistent. Such as set the shape of `input_x` as :math:`(1, ..., x_1, x_2, ..., x_S)`,
|
|
2399
|
-
then the shape of their corresponding positions can be multiplied, and the shape of Outputs is
|
|
2400
|
-
:math:`(1*y_1, ..., x_R*y_R, x_S*y_S)`.
|
|
2401
|
-
|
|
2402
|
-
Supported Platforms:
|
|
2403
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2404
|
-
|
|
2405
|
-
Examples:
|
|
2406
|
-
>>> import mindspore
|
|
2407
|
-
>>> import numpy as np
|
|
2408
|
-
>>> from mindspore import Tensor, ops
|
|
2409
|
-
>>> tile = ops.Tile()
|
|
2410
|
-
>>> input_x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32)
|
|
2411
|
-
>>> multiples = (2, 3)
|
|
2412
|
-
>>> output = tile(input_x, multiples)
|
|
2413
|
-
>>> print(output)
|
|
2414
|
-
[[1. 2. 1. 2. 1. 2.]
|
|
2415
|
-
[3. 4. 3. 4. 3. 4.]
|
|
2416
|
-
[1. 2. 1. 2. 1. 2.]
|
|
2417
|
-
[3. 4. 3. 4. 3. 4.]]
|
|
2418
|
-
>>> multiples = (2, 3, 2)
|
|
2419
|
-
>>> output = tile(input_x, multiples)
|
|
2420
|
-
>>> print(output)
|
|
2421
|
-
[[[1. 2. 1. 2.]
|
|
2422
|
-
[3. 4. 3. 4.]
|
|
2423
|
-
[1. 2. 1. 2.]
|
|
2424
|
-
[3. 4. 3. 4.]
|
|
2425
|
-
[1. 2. 1. 2.]
|
|
2426
|
-
[3. 4. 3. 4.]]
|
|
2427
|
-
[[1. 2. 1. 2.]
|
|
2428
|
-
[3. 4. 3. 4.]
|
|
2429
|
-
[1. 2. 1. 2.]
|
|
2430
|
-
[3. 4. 3. 4.]
|
|
2431
|
-
[1. 2. 1. 2.]
|
|
2432
|
-
[3. 4. 3. 4.]]]
|
|
2433
|
-
"""
|
|
2434
|
-
|
|
2435
|
-
@prim_attr_register
|
|
2436
|
-
def __init__(self):
|
|
2437
|
-
"""Initialize Tile"""
|
|
2438
|
-
self.init_prim_io_names(inputs=['x', 'multiples'], outputs=['output'])
|
|
2439
|
-
|
|
2440
|
-
def check_elim(self, *args):
|
|
2441
|
-
base_tensor, multiplier = args
|
|
2442
|
-
if PackFunc.is_tracing() and not PackFunc.current.is_pynative_mode:
|
|
2443
|
-
return (False, None)
|
|
2444
|
-
if not isinstance(base_tensor, Tensor):
|
|
2445
|
-
raise TypeError(f"For '{self.name}', the type of 'input_x' must be Tensor, "
|
|
2446
|
-
f"but got {type(base_tensor).__name__}.")
|
|
2447
|
-
if not isinstance(multiplier, tuple):
|
|
2448
|
-
raise TypeError(f"For '{self.name}', the type of 'multiplier' must be tuple, "
|
|
2449
|
-
f"but got {type(multiplier).__name__}.")
|
|
2450
|
-
|
|
2451
|
-
if all(v == 1 for v in multiplier) and len(base_tensor.shape) >= len(multiplier):
|
|
2452
|
-
ret = Identity()(base_tensor)
|
|
2453
|
-
return (True, ret)
|
|
2454
|
-
return (False, None)
|
|
2455
|
-
|
|
2456
|
-
def _get_shape_and_range(self, x, multiples):
|
|
2457
|
-
"""calculate tile shape and value"""
|
|
2458
|
-
x_shp = x['shape']
|
|
2459
|
-
if is_dim_unknown(x_shp):
|
|
2460
|
-
return {'shape': x_shp}, None
|
|
2461
|
-
multiples_v = multiples['value']
|
|
2462
|
-
value = None
|
|
2463
|
-
len_sub = len(multiples_v) - len(x_shp)
|
|
2464
|
-
multiples_w = None
|
|
2465
|
-
if len_sub == 0:
|
|
2466
|
-
multiples_w = multiples_v
|
|
2467
|
-
if len_sub > 0:
|
|
2468
|
-
for _ in range(0, len_sub):
|
|
2469
|
-
x_shp.insert(0, 1)
|
|
2470
|
-
multiples_w = multiples_v
|
|
2471
|
-
elif len_sub < 0:
|
|
2472
|
-
raise ValueError(f"For '{self.name}', the length of 'multiples' can not be smaller than "
|
|
2473
|
-
f"the dimension of 'input_x', but got length of 'multiples': {len(multiples_v)} "
|
|
2474
|
-
f"and dimension of 'input_x': {len(x_shp)}.")
|
|
2475
|
-
|
|
2476
|
-
for i, a in enumerate(multiples_w):
|
|
2477
|
-
if x_shp[i] >= 0:
|
|
2478
|
-
x_shp[i] *= a
|
|
2479
|
-
if x['value'] is not None:
|
|
2480
|
-
value = Tensor(np.tile(x['value'].asnumpy(), multiples_w))
|
|
2481
|
-
out_shape = {
|
|
2482
|
-
'shape': x_shp
|
|
2483
|
-
}
|
|
2484
|
-
return out_shape, value
|
|
2485
|
-
|
|
2486
|
-
def __infer__(self, x, multiples):
|
|
2487
|
-
multiples_v = multiples['value']
|
|
2488
|
-
if multiples_v is None or None in multiples_v:
|
|
2489
|
-
if 'max_value' not in multiples or 'min_value' not in multiples:
|
|
2490
|
-
if multiples_v is not None:
|
|
2491
|
-
shape = [len(multiples['shape'])]
|
|
2492
|
-
else:
|
|
2493
|
-
shape = multiples['shape']
|
|
2494
|
-
if len(shape) != 1:
|
|
2495
|
-
raise ValueError(f'For \'{self.name}\', the dim of multiples must be 1.')
|
|
2496
|
-
rank = max(len(x['shape']), shape[0])
|
|
2497
|
-
out_shape = [-1] * rank
|
|
2498
|
-
if -2 in x['shape']:
|
|
2499
|
-
out_shape = [-2]
|
|
2500
|
-
return {
|
|
2501
|
-
'shape': out_shape,
|
|
2502
|
-
'dtype': x['dtype'],
|
|
2503
|
-
'value': None
|
|
2504
|
-
}
|
|
2505
|
-
out_shape, value = self._get_shape_and_range(x, multiples)
|
|
2506
|
-
shape = out_shape.get('shape', None)
|
|
2507
|
-
out = {'shape': shape,
|
|
2508
|
-
'dtype': x['dtype'],
|
|
2509
|
-
'value': value}
|
|
2510
|
-
return out
|
|
2511
|
-
|
|
2512
|
-
validator.check_value_type(
|
|
2513
|
-
"multiples", multiples_v, [tuple], self.name)
|
|
2514
|
-
for i, multiple in enumerate(multiples_v):
|
|
2515
|
-
validator.check_positive_int(
|
|
2516
|
-
multiple, "multiples[%d]" % i, self.name)
|
|
2517
|
-
validator.check_value_type(
|
|
2518
|
-
"x[\'dtype\']", x["dtype"], mstype.TensorType, self.name)
|
|
2519
|
-
out_shp, value = self._get_shape_and_range(x, multiples)
|
|
2520
|
-
shp = out_shp.get('shape', None)
|
|
2521
|
-
out = {'shape': shp,
|
|
2522
|
-
'dtype': x['dtype'],
|
|
2523
|
-
'value': value}
|
|
2524
|
-
return out
|
|
2525
|
-
|
|
2526
|
-
|
|
2527
|
-
class UnsortedSegmentSum(Primitive):
|
|
2528
|
-
r"""
|
|
2529
|
-
Computes the sum of a tensor along segments.
|
|
2530
|
-
|
|
2531
|
-
Refer to :func:`mindspore.ops.unsorted_segment_sum` for more details.
|
|
2532
|
-
|
|
2533
|
-
Inputs:
|
|
2534
|
-
- **input_x** (Tensor) - Input Tensor contains the data to be summed.
|
|
2535
|
-
The shape is :math:`(x_1, x_2, ..., x_R)`.
|
|
2536
|
-
- **segment_ids** (Tensor) - The label indicates the segment to which each element belongs.
|
|
2537
|
-
Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
|
|
2538
|
-
- **num_segments** (int) - Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
|
|
2539
|
-
|
|
2540
|
-
Outputs:
|
|
2541
|
-
Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
|
|
2542
|
-
|
|
2543
|
-
Supported Platforms:
|
|
2544
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2545
|
-
|
|
2546
|
-
Examples:
|
|
2547
|
-
>>> from mindspore import Tensor
|
|
2548
|
-
>>> from mindspore import ops
|
|
2549
|
-
>>> import mindspore
|
|
2550
|
-
>>> input_x = Tensor([1, 2, 3, 4], mindspore.float32)
|
|
2551
|
-
>>> segment_ids = Tensor([0, 0, 1, 2], mindspore.int32)
|
|
2552
|
-
>>> num_segments = 4
|
|
2553
|
-
>>> output = ops.UnsortedSegmentSum()(input_x, segment_ids, num_segments)
|
|
2554
|
-
>>> print(output)
|
|
2555
|
-
[3. 3. 4. 0.]
|
|
2556
|
-
>>> input_x = Tensor([1, 2, 3, 4, 2, 5], mindspore.float32)
|
|
2557
|
-
>>> segment_ids = Tensor([0, 0, 1, 2, 3, 4], mindspore.int32)
|
|
2558
|
-
>>> num_segments = 6
|
|
2559
|
-
>>> output = ops.UnsortedSegmentSum()(input_x, segment_ids, num_segments)
|
|
2560
|
-
>>> print(output)
|
|
2561
|
-
[3. 3. 4. 2. 5. 0.]
|
|
2562
|
-
"""
|
|
2563
|
-
|
|
2564
|
-
@prim_attr_register
|
|
2565
|
-
def __init__(self):
|
|
2566
|
-
"""Initialize UnsortedSegmentSum"""
|
|
2567
|
-
self.init_prim_io_names(inputs=['x', 'segment_ids', 'num_segments'], outputs=['y'])
|
|
2568
|
-
|
|
2569
|
-
|
|
2570
1462
|
class UnsortedSegmentMin(PrimitiveWithCheck):
|
|
2571
1463
|
r"""
|
|
2572
1464
|
Computes the minimum of a tensor along segments.
|
|
@@ -2578,10 +1470,10 @@ class UnsortedSegmentMin(PrimitiveWithCheck):
|
|
|
2578
1470
|
The data type must be float16, float32 or int32.
|
|
2579
1471
|
- **segment_ids** (Tensor) - The label indicates the segment to which each element belongs.
|
|
2580
1472
|
Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
|
|
2581
|
-
- **num_segments** (int) -
|
|
1473
|
+
- **num_segments** (Union[int, Tensor]) - Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
|
|
2582
1474
|
|
|
2583
1475
|
Outputs:
|
|
2584
|
-
Tensor,
|
|
1476
|
+
Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
|
|
2585
1477
|
|
|
2586
1478
|
Supported Platforms:
|
|
2587
1479
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -2640,10 +1532,10 @@ class UnsortedSegmentMax(PrimitiveWithCheck):
|
|
|
2640
1532
|
The data type must be float16, float32 or int32.
|
|
2641
1533
|
- **segment_ids** (Tensor) - The label indicates the segment to which each element belongs.
|
|
2642
1534
|
Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
|
|
2643
|
-
- **num_segments** (int) -
|
|
1535
|
+
- **num_segments** (Union[int, Tensor]) - Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
|
|
2644
1536
|
|
|
2645
1537
|
Outputs:
|
|
2646
|
-
Tensor,
|
|
1538
|
+
Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
|
|
2647
1539
|
|
|
2648
1540
|
Supported Platforms:
|
|
2649
1541
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -2759,13 +1651,12 @@ class UnsortedSegmentProd(Primitive):
|
|
|
2759
1651
|
Inputs:
|
|
2760
1652
|
- **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.
|
|
2761
1653
|
With float16, float32 or int32 data type.
|
|
2762
|
-
- **segment_ids** (Tensor) -
|
|
2763
|
-
Data type must be int32.
|
|
2764
|
-
- **num_segments** (int) -
|
|
2765
|
-
must be greater than 0.
|
|
1654
|
+
- **segment_ids** (Tensor) - The label indicates the segment to which each element belongs.
|
|
1655
|
+
Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R. Data type must be int32.
|
|
1656
|
+
- **num_segments** (Union[int, Tensor]) - Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
|
|
2766
1657
|
|
|
2767
1658
|
Outputs:
|
|
2768
|
-
Tensor,
|
|
1659
|
+
Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
|
|
2769
1660
|
|
|
2770
1661
|
Supported Platforms:
|
|
2771
1662
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -2790,62 +1681,6 @@ class UnsortedSegmentProd(Primitive):
|
|
|
2790
1681
|
self.init_prim_io_names(inputs=['x', 'segment_ids', 'num_segments'], outputs=['y'])
|
|
2791
1682
|
|
|
2792
1683
|
|
|
2793
|
-
class Concat(PrimitiveWithCheck):
|
|
2794
|
-
r"""
|
|
2795
|
-
Connect tensor in the specified axis.
|
|
2796
|
-
|
|
2797
|
-
Refer to :func:`mindspore.ops.concat` for more details.
|
|
2798
|
-
|
|
2799
|
-
Args:
|
|
2800
|
-
axis (int, optional): The specified axis. Default: ``0`` .
|
|
2801
|
-
|
|
2802
|
-
Inputs:
|
|
2803
|
-
- **input_x** (Union[tuple, list]) - A tuple or a list of input tensors.
|
|
2804
|
-
Suppose there are two tensors in this tuple or list, namely x1 and x2.
|
|
2805
|
-
To perform `Concat` in the axis 0 direction, except for the 0th axis, all other axes should be equal,
|
|
2806
|
-
that is, :math:`x1.shape[1] == x2.shape[1], x1.shape[2] == x2.shape[2], ..., x1.shape[R] == x2.shape[R]`,
|
|
2807
|
-
where the :math:`R` indicates the last axis.
|
|
2808
|
-
|
|
2809
|
-
Outputs:
|
|
2810
|
-
- Tensor, the shape is :math:`(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)`.
|
|
2811
|
-
The data type is the same with `input_x`.
|
|
2812
|
-
|
|
2813
|
-
Supported Platforms:
|
|
2814
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2815
|
-
|
|
2816
|
-
Examples:
|
|
2817
|
-
>>> import numpy as np
|
|
2818
|
-
>>> from mindspore import Tensor, ops
|
|
2819
|
-
>>> input_x1 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
|
|
2820
|
-
>>> input_x2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
|
|
2821
|
-
>>> op = ops.Concat()
|
|
2822
|
-
>>> output = op((input_x1, input_x2))
|
|
2823
|
-
>>> print(output)
|
|
2824
|
-
[[0. 1.]
|
|
2825
|
-
[2. 1.]
|
|
2826
|
-
[0. 1.]
|
|
2827
|
-
[2. 1.]]
|
|
2828
|
-
>>> op = ops.Concat(1)
|
|
2829
|
-
>>> output = op((input_x1, input_x2))
|
|
2830
|
-
>>> print(output)
|
|
2831
|
-
[[0. 1. 0. 1.]
|
|
2832
|
-
[2. 1. 2. 1.]]
|
|
2833
|
-
"""
|
|
2834
|
-
|
|
2835
|
-
@prim_attr_register
|
|
2836
|
-
def __init__(self, axis=0):
|
|
2837
|
-
"""Initialize Concat"""
|
|
2838
|
-
self.axis = axis
|
|
2839
|
-
validator.check_value_type("axis", axis, [int], self.name)
|
|
2840
|
-
|
|
2841
|
-
def infer_value(self, input_x):
|
|
2842
|
-
"""Implement Concat infer value"""
|
|
2843
|
-
value = None
|
|
2844
|
-
if input_x is not None and None not in input_x:
|
|
2845
|
-
value = Tensor(np.concatenate([x.asnumpy() for x in input_x], axis=self.axis))
|
|
2846
|
-
return value
|
|
2847
|
-
|
|
2848
|
-
|
|
2849
1684
|
class ConcatOffsetV1(Primitive):
|
|
2850
1685
|
r"""
|
|
2851
1686
|
primitive for computing Concat’s gradient.
|
|
@@ -2959,7 +1794,7 @@ def _get_stack_shape(value, x_shape, x_type, axis, prim_name):
|
|
|
2959
1794
|
|
|
2960
1795
|
out_n = len(x_shape)
|
|
2961
1796
|
for i in range(1, out_n):
|
|
2962
|
-
if x_type[i] != x_type[i-1]:
|
|
1797
|
+
if x_type[i] != x_type[i - 1]:
|
|
2963
1798
|
raise TypeError(f"For {prim_name}, all types should be same, but got {x_type}")
|
|
2964
1799
|
|
|
2965
1800
|
new_x_shape = []
|
|
@@ -3047,6 +1882,7 @@ class Stack(PrimitiveWithInfer):
|
|
|
3047
1882
|
tuple_value = value['value']
|
|
3048
1883
|
input_array = []
|
|
3049
1884
|
infered_value = None
|
|
1885
|
+
dtype = x_type[0]
|
|
3050
1886
|
if tuple_value is not None and None not in tuple_value:
|
|
3051
1887
|
for item in tuple_value:
|
|
3052
1888
|
npy_item = item.asnumpy()
|
|
@@ -3055,23 +1891,9 @@ class Stack(PrimitiveWithInfer):
|
|
|
3055
1891
|
|
|
3056
1892
|
shape = all_shape.get('shape') if isinstance(all_shape, dict) else all_shape
|
|
3057
1893
|
out = {'shape': shape,
|
|
3058
|
-
'dtype':
|
|
1894
|
+
'dtype': dtype,
|
|
3059
1895
|
'value': infered_value}
|
|
3060
1896
|
|
|
3061
|
-
def unpack(x):
|
|
3062
|
-
if isinstance(x, (tuple, list)) and len(x) == 1:
|
|
3063
|
-
return unpack(x[0])
|
|
3064
|
-
return x
|
|
3065
|
-
|
|
3066
|
-
if 'shape_value' in value and value['shape_value'] is not None:
|
|
3067
|
-
input_shape_value = []
|
|
3068
|
-
for item in value['shape_value']:
|
|
3069
|
-
item = unpack(item)
|
|
3070
|
-
item = np.array(item)
|
|
3071
|
-
input_shape_value.append(item)
|
|
3072
|
-
infered_shape_value = np.stack(input_shape_value, axis=self.axis)
|
|
3073
|
-
infered_shape_value = tuple(infered_shape_value.tolist())
|
|
3074
|
-
out['shape_value'] = infered_shape_value
|
|
3075
1897
|
return out
|
|
3076
1898
|
|
|
3077
1899
|
|
|
@@ -3224,61 +2046,6 @@ class Coalesce(Primitive):
|
|
|
3224
2046
|
outputs=['y_indices', 'y_values', 'y_shape'])
|
|
3225
2047
|
|
|
3226
2048
|
|
|
3227
|
-
class ReverseV2(Primitive):
|
|
3228
|
-
"""
|
|
3229
|
-
Reverses specific dimensions of a tensor.
|
|
3230
|
-
|
|
3231
|
-
.. warning::
|
|
3232
|
-
The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "input_x".
|
|
3233
|
-
|
|
3234
|
-
Args:
|
|
3235
|
-
axis (Union[tuple(int), list(int)]): The indices of the dimensions to reverse.
|
|
3236
|
-
|
|
3237
|
-
Inputs:
|
|
3238
|
-
- **input_x** (Tensor) - The target tensor.
|
|
3239
|
-
The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
3240
|
-
|
|
3241
|
-
Outputs:
|
|
3242
|
-
Tensor, has the same shape and type as `input_x`.
|
|
3243
|
-
|
|
3244
|
-
Raises:
|
|
3245
|
-
TypeError: If `axis` is neither list nor tuple.
|
|
3246
|
-
TypeError: If element of `axis` is not an int.
|
|
3247
|
-
ValueError: There are multiple identical axes in `axis`.
|
|
3248
|
-
|
|
3249
|
-
Supported Platforms:
|
|
3250
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3251
|
-
|
|
3252
|
-
Examples:
|
|
3253
|
-
>>> import mindspore
|
|
3254
|
-
>>> import numpy as np
|
|
3255
|
-
>>> from mindspore import Tensor, ops
|
|
3256
|
-
>>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
|
|
3257
|
-
>>> op = ops.ReverseV2(axis=[1])
|
|
3258
|
-
>>> output = op(input_x)
|
|
3259
|
-
>>> print(output)
|
|
3260
|
-
[[4 3 2 1]
|
|
3261
|
-
[8 7 6 5]]
|
|
3262
|
-
>>> op = ops.ReverseV2(axis=[1, 0])
|
|
3263
|
-
>>> output = op(input_x)
|
|
3264
|
-
>>> print(output)
|
|
3265
|
-
[[8 7 6 5]
|
|
3266
|
-
[4 3 2 1]]
|
|
3267
|
-
"""
|
|
3268
|
-
|
|
3269
|
-
@prim_attr_register
|
|
3270
|
-
def __init__(self, axis):
|
|
3271
|
-
"""Initialize ReverseV2."""
|
|
3272
|
-
validator.check_value_type('axis', axis, [list, tuple], self.name)
|
|
3273
|
-
for i, each in enumerate(axis):
|
|
3274
|
-
validator.check_value_type(f'axis[{i}]', each, [int], self.name)
|
|
3275
|
-
self.axis = axis
|
|
3276
|
-
if isinstance(axis, list):
|
|
3277
|
-
self.axis = tuple(axis)
|
|
3278
|
-
self.add_prim_attr('axis', self.axis)
|
|
3279
|
-
self.init_prim_io_names(inputs=['x'], outputs=['output'])
|
|
3280
|
-
|
|
3281
|
-
|
|
3282
2049
|
class Rint(Primitive):
|
|
3283
2050
|
"""
|
|
3284
2051
|
Returns an integer that is closest to `input_x` element-wise.
|
|
@@ -3296,645 +2063,26 @@ class Rint(Primitive):
|
|
|
3296
2063
|
``Ascend`` ``GPU`` ``CPU``
|
|
3297
2064
|
|
|
3298
2065
|
Examples:
|
|
3299
|
-
>>> import mindspore
|
|
3300
|
-
>>> import numpy as np
|
|
3301
|
-
>>> from mindspore import Tensor, ops
|
|
3302
|
-
>>> input_x = Tensor(np.array([-1.6, -0.1, 1.5, 2.0]), mindspore.float32)
|
|
3303
|
-
>>> op = ops.Rint()
|
|
3304
|
-
>>> output = op(input_x)
|
|
3305
|
-
>>> print(output)
|
|
3306
|
-
[-2. 0. 2. 2.]
|
|
3307
|
-
>>> input_x = Tensor(np.array([[-2.0, -1.9, -1.8, -1.7, -1.6],
|
|
3308
|
-
... [-2.0, -1.9, -1.8, -1.7, -1.6]]), mindspore.float32)
|
|
3309
|
-
>>> output = op(input_x)
|
|
3310
|
-
>>> print(output)
|
|
3311
|
-
[[-2. -2. -2. -2. -2.]
|
|
3312
|
-
[-2. -2. -2. -2. -2.]]
|
|
3313
|
-
"""
|
|
3314
|
-
|
|
3315
|
-
@prim_attr_register
|
|
3316
|
-
def __init__(self):
|
|
3317
|
-
"""Initialize Rint."""
|
|
3318
|
-
self.init_prim_io_names(inputs=['x'], outputs=['output'])
|
|
3319
|
-
|
|
3320
|
-
|
|
3321
|
-
class Select(Primitive):
|
|
3322
|
-
r"""
|
|
3323
|
-
The conditional tensor determines whether the corresponding element in the output must be
|
|
3324
|
-
selected from `x` (if True) or `y` (if False) based on the value of each
|
|
3325
|
-
element.
|
|
3326
|
-
|
|
3327
|
-
It can be defined as:
|
|
3328
|
-
|
|
3329
|
-
.. math::
|
|
3330
|
-
out_i = \begin{cases}
|
|
3331
|
-
x_i, & \text{if } condition_i \\
|
|
3332
|
-
y_i, & \text{otherwise}
|
|
3333
|
-
\end{cases}
|
|
3334
|
-
|
|
3335
|
-
Inputs:
|
|
3336
|
-
- **condition** (Tensor[bool]) - The condition tensor, decides which element is chosen.
|
|
3337
|
-
The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
|
|
3338
|
-
- **x** (Tensor) - The first tensor to be selected and the shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
|
|
3339
|
-
- **y** (Tensor) - The second tensor to be selected and the shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
|
|
3340
|
-
|
|
3341
|
-
Outputs:
|
|
3342
|
-
Tensor, has the same shape as `condition`.
|
|
3343
|
-
|
|
3344
|
-
Raises:
|
|
3345
|
-
TypeError: If `x` or `y` is not a Tensor.
|
|
3346
|
-
ValueError: If shape of the three inputs are different.
|
|
3347
|
-
|
|
3348
|
-
Supported Platforms:
|
|
3349
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3350
|
-
|
|
3351
|
-
Examples:
|
|
3352
|
-
>>> import mindspore
|
|
3353
|
-
>>> from mindspore import Tensor, ops
|
|
3354
|
-
>>> select = ops.Select()
|
|
3355
|
-
>>> input_cond = Tensor([True, False])
|
|
3356
|
-
>>> input_x = Tensor([2,3], mindspore.float32)
|
|
3357
|
-
>>> input_y = Tensor([1,2], mindspore.float32)
|
|
3358
|
-
>>> output = select(input_cond, input_x, input_y)
|
|
3359
|
-
>>> print(output)
|
|
3360
|
-
[2. 2.]
|
|
3361
|
-
"""
|
|
3362
|
-
|
|
3363
|
-
@prim_attr_register
|
|
3364
|
-
def __init__(self):
|
|
3365
|
-
"""Initialize Select."""
|
|
3366
|
-
self.init_prim_io_names(inputs=['condition', 'x', 'y'], outputs=['output'])
|
|
3367
|
-
|
|
3368
|
-
|
|
3369
|
-
class StridedSliceV2(Primitive):
|
|
3370
|
-
r"""
|
|
3371
|
-
StridedSliceV2 will be deprecated by StridedSlice in the future.
|
|
3372
|
-
Extracts a strided slice of a tensor.
|
|
3373
|
-
Refer to class StridedSlice for more details.
|
|
3374
|
-
|
|
3375
|
-
Args:
|
|
3376
|
-
begin_mask (int): Starting index of the slice. Default: ``0`` .
|
|
3377
|
-
end_mask (int): Ending index of the slice. Default: ``0`` .
|
|
3378
|
-
ellipsis_mask (int): An int mask. Default: ``0`` .
|
|
3379
|
-
new_axis_mask (int): An int mask. Default: ``0`` .
|
|
3380
|
-
shrink_axis_mask (int): An int mask. Default: ``0`` .
|
|
3381
|
-
|
|
3382
|
-
Inputs:
|
|
3383
|
-
- **input_x** (Tensor) - The input Tensor.
|
|
3384
|
-
- **begin** (tuple[int]) - A tuple which represents the location where to start. Only
|
|
3385
|
-
constant value is allowed.
|
|
3386
|
-
- **end** (tuple[int]) - A tuple or which represents the maximum location where to end.
|
|
3387
|
-
Only constant value is allowed.
|
|
3388
|
-
- **strides** (tuple[int]) - A tuple which represents the stride is continuously added
|
|
3389
|
-
before reaching the maximum location. Only constant value is allowed.
|
|
3390
|
-
|
|
3391
|
-
Outputs:
|
|
3392
|
-
Tensor, The output is explained by following example.
|
|
3393
|
-
|
|
3394
|
-
Raises:
|
|
3395
|
-
TypeError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or `shrink_axis_mask` is not an int.
|
|
3396
|
-
TypeError: If `begin`, `end` or `strides` is not a tuple.
|
|
3397
|
-
ValueError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or `shrink_axis_mask` is less than 0.
|
|
3398
|
-
|
|
3399
|
-
Supported Platforms:
|
|
3400
|
-
``Ascend`` ``CPU``
|
|
3401
|
-
|
|
3402
|
-
Examples:
|
|
3403
|
-
>>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
|
|
3404
|
-
... [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
|
|
3405
|
-
>>> strided_slice_v2 = ops.StridedSliceV2()
|
|
3406
|
-
>>> output = strided_slice_v2(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1))
|
|
3407
|
-
>>> print(output)
|
|
3408
|
-
[[[3.]]
|
|
3409
|
-
[[5.]]]
|
|
3410
|
-
"""
|
|
3411
|
-
|
|
3412
|
-
@prim_attr_register
|
|
3413
|
-
def __init__(self,
|
|
3414
|
-
begin_mask=0,
|
|
3415
|
-
end_mask=0,
|
|
3416
|
-
ellipsis_mask=0,
|
|
3417
|
-
new_axis_mask=0,
|
|
3418
|
-
shrink_axis_mask=0):
|
|
3419
|
-
"""Initialize StridedSliceV2"""
|
|
3420
|
-
self.init_prim_io_names(inputs=['x', 'begin', 'end', 'strides'], outputs=['output'])
|
|
3421
|
-
|
|
3422
|
-
|
|
3423
|
-
class StridedSlice(PrimitiveWithInfer):
|
|
3424
|
-
r"""
|
|
3425
|
-
|
|
3426
|
-
Extracts a strided slice of a tensor.
|
|
3427
|
-
|
|
3428
|
-
Refer to :func:`mindspore.ops.strided_slice` for more details.
|
|
3429
|
-
|
|
3430
|
-
Args:
|
|
3431
|
-
begin_mask (int, optional): Starting index of the slice. Default: ``0`` .
|
|
3432
|
-
end_mask (int, optional): Ending index of the slice. Default: ``0`` .
|
|
3433
|
-
ellipsis_mask (int, optional): An int mask, ignore slicing operation when set to 1. Default: ``0`` .
|
|
3434
|
-
new_axis_mask (int, optional): An int mask for adding new dims. Default: ``0`` .
|
|
3435
|
-
shrink_axis_mask (int, optional): An int mask for shrinking dims. Default: ``0`` .
|
|
3436
|
-
|
|
3437
|
-
Inputs:
|
|
3438
|
-
- **input_x** (Tensor) - The input Tensor to be extracted from.
|
|
3439
|
-
- **begin** (tuple[int]) - A tuple which represents the location where to start.
|
|
3440
|
-
- **end** (tuple[int]) - A tuple or which represents the maximum location where to end.
|
|
3441
|
-
- **strides** (tuple[int]) - A tuple which represents the strides is continuously added
|
|
3442
|
-
before reaching the maximum location. Only int is allowed, it can be negative
|
|
3443
|
-
which results in reversed slicing.
|
|
3444
|
-
|
|
3445
|
-
Outputs:
|
|
3446
|
-
Tensor, return the extracts a strided slice of a Tensor based on `begin/end` index and `strides`.
|
|
3447
|
-
|
|
3448
|
-
Supported Platforms:
|
|
3449
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3450
|
-
|
|
3451
|
-
Examples:
|
|
3452
|
-
>>> import mindspore
|
|
3453
|
-
>>> from mindspore import Tensor, ops
|
|
3454
|
-
>>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
|
|
3455
|
-
... [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
|
|
3456
|
-
>>> # [[[1. 1. 1.]
|
|
3457
|
-
>>> # [2. 2. 2.]]
|
|
3458
|
-
>>> #
|
|
3459
|
-
>>> # [[3. 3. 3.]
|
|
3460
|
-
>>> # [4. 4. 4.]]
|
|
3461
|
-
>>> #
|
|
3462
|
-
>>> # [[5. 5. 5.]
|
|
3463
|
-
>>> # [6. 6. 6.]]]
|
|
3464
|
-
>>> # In order to visually view the multi-dimensional array, write the above as follows
|
|
3465
|
-
>>> # [
|
|
3466
|
-
>>> # [
|
|
3467
|
-
>>> # [1,1,1]
|
|
3468
|
-
>>> # [2,2,2]
|
|
3469
|
-
>>> # ]
|
|
3470
|
-
>>> # [
|
|
3471
|
-
>>> # [3,3,3]
|
|
3472
|
-
>>> # [4,4,4]
|
|
3473
|
-
>>> # ]
|
|
3474
|
-
>>> # [
|
|
3475
|
-
>>> # [5,5,5]
|
|
3476
|
-
>>> # [6,6,6]
|
|
3477
|
-
>>> # ]
|
|
3478
|
-
>>> # ]
|
|
3479
|
-
>>> strided_slice = ops.StridedSlice()
|
|
3480
|
-
>>> output = strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1))
|
|
3481
|
-
>>> # Take this " output = strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1)) " as an example,
|
|
3482
|
-
>>> # start = [1, 0, 2] , end = [3, 1, 3], stride = [1, 1, 1], Find a segment of (start, end),
|
|
3483
|
-
>>> # note that end is an open interval
|
|
3484
|
-
>>> # To facilitate understanding, this operator can be divided into three steps:
|
|
3485
|
-
>>> # Step 1: Calculation of the first dimension:
|
|
3486
|
-
>>> # start = 1, end = 3, stride = 1, So can take 1st, 2nd rows, and then gets the final output at this time.
|
|
3487
|
-
>>> # output_1th =
|
|
3488
|
-
>>> # [
|
|
3489
|
-
>>> # [
|
|
3490
|
-
>>> # [3,3,3]
|
|
3491
|
-
>>> # [4,4,4]
|
|
3492
|
-
>>> # ]
|
|
3493
|
-
>>> # [
|
|
3494
|
-
>>> # [5,5,5]
|
|
3495
|
-
>>> # [6,6,6]
|
|
3496
|
-
>>> # ]
|
|
3497
|
-
>>> # ]
|
|
3498
|
-
>>> # Step 2: Calculation of the second dimension
|
|
3499
|
-
>>> # 2nd dimension, start = 0, end = 1, stride = 1. So only 0th rows can be taken, and the output at this time.
|
|
3500
|
-
>>> # output_2nd =
|
|
3501
|
-
>>> # [
|
|
3502
|
-
>>> # [
|
|
3503
|
-
>>> # [3,3,3]
|
|
3504
|
-
>>> # ]
|
|
3505
|
-
>>> # [
|
|
3506
|
-
>>> # [5,5,5]
|
|
3507
|
-
>>> # ]
|
|
3508
|
-
>>> # ]
|
|
3509
|
-
>>> # Step 3: Calculation of the third dimension
|
|
3510
|
-
>>> # 3nd dimension,start = 2, end = 3, stride = 1, So can take 2th cols,
|
|
3511
|
-
>>> # and you get the final output at this time.
|
|
3512
|
-
>>> # output_3ed =
|
|
3513
|
-
>>> # [
|
|
3514
|
-
>>> # [
|
|
3515
|
-
>>> # [3]
|
|
3516
|
-
>>> # ]
|
|
3517
|
-
>>> # [
|
|
3518
|
-
>>> # [5]
|
|
3519
|
-
>>> # ]
|
|
3520
|
-
>>> # ]
|
|
3521
|
-
>>> # The final output after finishing is:
|
|
3522
|
-
>>> print(output)
|
|
3523
|
-
[[[3.]]
|
|
3524
|
-
[[5.]]]
|
|
3525
|
-
>>> # another example like :
|
|
3526
|
-
>>> output = strided_slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1))
|
|
3527
|
-
>>> print(output)
|
|
3528
|
-
[[[3. 3. 3.]]]
|
|
3529
|
-
"""
|
|
3530
|
-
|
|
3531
|
-
@prim_attr_register
|
|
3532
|
-
def __init__(self,
|
|
3533
|
-
begin_mask=0,
|
|
3534
|
-
end_mask=0,
|
|
3535
|
-
ellipsis_mask=0,
|
|
3536
|
-
new_axis_mask=0,
|
|
3537
|
-
shrink_axis_mask=0):
|
|
3538
|
-
"""Initialize StridedSlice"""
|
|
3539
|
-
self.init_prim_io_names(inputs=['x', 'begin', 'end', 'strides'], outputs=['output'])
|
|
3540
|
-
|
|
3541
|
-
validator.check_non_negative_int(begin_mask, 'begin_mask', self.name)
|
|
3542
|
-
validator.check_non_negative_int(end_mask, 'end_mask', self.name)
|
|
3543
|
-
validator.check_non_negative_int(ellipsis_mask, 'ellipsis_mask', self.name)
|
|
3544
|
-
if len(tuple(filter(lambda x: x == '1', bin(ellipsis_mask)[-1:1:-1]))) > 1:
|
|
3545
|
-
raise ValueError(f"For '{self.name}', only support one ellipsis in the index, but got {ellipsis_mask}.")
|
|
3546
|
-
validator.check_non_negative_int(new_axis_mask, 'new_axis_mask', self.name)
|
|
3547
|
-
validator.check_non_negative_int(shrink_axis_mask, 'shrink_axis_mask',
|
|
3548
|
-
self.name)
|
|
3549
|
-
|
|
3550
|
-
def __infer__(self, x, begin, end, strides):
|
|
3551
|
-
begin_v, begin_len = self._check_and_get_value(begin, 'begin')
|
|
3552
|
-
end_v, end_len = self._check_and_get_value(end, 'end')
|
|
3553
|
-
strides_v, strides_len = self._check_and_get_value(strides, 'strides')
|
|
3554
|
-
|
|
3555
|
-
is_dynamic_tuple = (self._is_none_in_tuple(begin_v.get('value'))
|
|
3556
|
-
or self._is_none_in_tuple(end_v.get('value'))
|
|
3557
|
-
or self._is_none_in_tuple(strides_v.get('value')))
|
|
3558
|
-
is_dynamic = None in (begin_v.get('value'), end_v.get('value'), strides_v.get('value'))
|
|
3559
|
-
|
|
3560
|
-
if not is_dynamic and (begin_len != strides_len or end_len != strides_len):
|
|
3561
|
-
raise ValueError(
|
|
3562
|
-
f"For '{self.name}', 'begin', 'end' and 'strides' must be the same length, but got "
|
|
3563
|
-
f"'begin' length: {begin_len}, 'end' length: {end_len}, 'strides' length: {strides_len}."
|
|
3564
|
-
)
|
|
3565
|
-
|
|
3566
|
-
if is_dynamic or is_dynamic_tuple or is_shape_unknown(x['shape']):
|
|
3567
|
-
ret_shape = self._compute_dynamic_slicing_shape(x, begin_v, end_v, strides_v, begin_len)
|
|
3568
|
-
rets = {'shape': ret_shape,
|
|
3569
|
-
'dtype': x['dtype'],
|
|
3570
|
-
'value': None}
|
|
3571
|
-
return rets
|
|
3572
|
-
|
|
3573
|
-
ret_shape = self._compute_slicing_shape(x['shape'], begin_v['value'], end_v['value'], strides_v['value'])
|
|
3574
|
-
if all(ret_shape):
|
|
3575
|
-
value = None
|
|
3576
|
-
else:
|
|
3577
|
-
init_func = Zero()
|
|
3578
|
-
init_func.__enable_zero_dim__ = True
|
|
3579
|
-
value = Tensor(dtype=x['dtype'].element_type(), shape=ret_shape, init=init_func)
|
|
3580
|
-
|
|
3581
|
-
if "max_value" in x and "min_value" in x:
|
|
3582
|
-
validator.check_value_type("min_value", x["min_value"], [tuple, list], self.name)
|
|
3583
|
-
validator.check_value_type("max_value", x["max_value"], [tuple, list], self.name)
|
|
3584
|
-
max_value_slice = self._compute_dynamic_slicing_value(x["max_value"], begin_v, end_v, strides_v)
|
|
3585
|
-
min_value_slice = self._compute_dynamic_slicing_value(x["min_value"], begin_v, end_v, strides_v)
|
|
3586
|
-
return {'shape': ret_shape,
|
|
3587
|
-
'dtype': x['dtype'],
|
|
3588
|
-
'value': value,
|
|
3589
|
-
'max_value': max_value_slice,
|
|
3590
|
-
'min_value': min_value_slice}
|
|
3591
|
-
|
|
3592
|
-
if "shape_value" in x:
|
|
3593
|
-
validator.check_value_type("shape_value", x["shape_value"], [tuple], self.name)
|
|
3594
|
-
shape_value_slice = self._compute_dynamic_slicing_value(x["shape_value"], begin_v, end_v, strides_v)
|
|
3595
|
-
return {'shape': ret_shape,
|
|
3596
|
-
'dtype': x['dtype'],
|
|
3597
|
-
'shape_value': shape_value_slice,
|
|
3598
|
-
'value': value}
|
|
3599
|
-
return {'shape': ret_shape,
|
|
3600
|
-
'dtype': x['dtype'],
|
|
3601
|
-
'value': value}
|
|
3602
|
-
|
|
3603
|
-
@staticmethod
|
|
3604
|
-
def _compute_slicing_len_for_positive_stride(begin, end, stride, x_dim):
|
|
3605
|
-
"""Compute slice length for positive stride."""
|
|
3606
|
-
if x_dim == -1:
|
|
3607
|
-
if begin >= end:
|
|
3608
|
-
# When slicing forward, if begin >= end, the length of the slicing is 0.
|
|
3609
|
-
slicing_length = 0
|
|
3610
|
-
else:
|
|
3611
|
-
slicing_length = -1
|
|
3612
|
-
return slicing_length
|
|
3613
|
-
# When slicing forward, convert begin and end to positive numbers.
|
|
3614
|
-
if begin >= x_dim or end < -x_dim:
|
|
3615
|
-
# When slicing forward, if begin >= x_dim or end < -x_dim, the length of the slicing is 0.
|
|
3616
|
-
slicing_length = 0
|
|
3617
|
-
else:
|
|
3618
|
-
if -x_dim <= begin < 0:
|
|
3619
|
-
begin += x_dim
|
|
3620
|
-
if begin < -x_dim:
|
|
3621
|
-
# When slicing forward, if begin < -x_dim, set begin = 0, which means start from the 0th element.
|
|
3622
|
-
begin = 0
|
|
3623
|
-
if -x_dim <= end < 0:
|
|
3624
|
-
end += x_dim
|
|
3625
|
-
if end > x_dim:
|
|
3626
|
-
# When slicing forward, if end > x_dim, set end = x_dims, which means slice to the last element.
|
|
3627
|
-
end = x_dim
|
|
3628
|
-
if begin >= end:
|
|
3629
|
-
# When slicing forward, if begin >= end, the length of the slicing is 0.
|
|
3630
|
-
slicing_length = 0
|
|
3631
|
-
else:
|
|
3632
|
-
slicing_length = 1 + (end - 1 - begin) // stride
|
|
3633
|
-
return slicing_length
|
|
3634
|
-
|
|
3635
|
-
@staticmethod
|
|
3636
|
-
def _compute_slicing_len_for_negative_stride(begin, end, stride, x_dim):
|
|
3637
|
-
"""Compute slice length for negative stride."""
|
|
3638
|
-
if x_dim == -1:
|
|
3639
|
-
if begin <= end:
|
|
3640
|
-
slicing_length = 0
|
|
3641
|
-
else:
|
|
3642
|
-
slicing_length = -1
|
|
3643
|
-
return slicing_length
|
|
3644
|
-
# When slicing backward, convert begin and end to negative numbers.
|
|
3645
|
-
if begin < -x_dim or end >= x_dim:
|
|
3646
|
-
# When slicing backward, if begin < -x_dim or end >= x_dim, the length of the slicing is 0.
|
|
3647
|
-
slicing_length = 0
|
|
3648
|
-
else:
|
|
3649
|
-
if 0 <= begin < x_dim:
|
|
3650
|
-
begin += -x_dim
|
|
3651
|
-
if begin >= x_dim:
|
|
3652
|
-
begin = -1
|
|
3653
|
-
if 0 <= end < x_dim:
|
|
3654
|
-
end += -x_dim
|
|
3655
|
-
if end < -x_dim - 1:
|
|
3656
|
-
# Slicing to the 0th element.
|
|
3657
|
-
end = -x_dim - 1
|
|
3658
|
-
if begin <= end:
|
|
3659
|
-
slicing_length = 0
|
|
3660
|
-
else:
|
|
3661
|
-
slicing_length = 1 + (end + 1 - begin) // stride
|
|
3662
|
-
return slicing_length
|
|
3663
|
-
|
|
3664
|
-
@staticmethod
|
|
3665
|
-
def _get_slice_value(begin_v, end_v, strides_v):
|
|
3666
|
-
"""Get the slice value from value or shape_value."""
|
|
3667
|
-
begin_value = begin_v['value']
|
|
3668
|
-
end_value = end_v['value']
|
|
3669
|
-
strides_value = strides_v['value']
|
|
3670
|
-
if begin_value is None:
|
|
3671
|
-
begin_value = begin_v['shape_value']
|
|
3672
|
-
if end_value is None:
|
|
3673
|
-
end_value = end_v['shape_value']
|
|
3674
|
-
if strides_value is None:
|
|
3675
|
-
strides_value = strides_v['shape_value']
|
|
3676
|
-
return begin_value, end_value, strides_value
|
|
3677
|
-
|
|
3678
|
-
def _is_none_in_tuple(self, x):
|
|
3679
|
-
return isinstance(x, tuple) and None in x
|
|
3680
|
-
|
|
3681
|
-
def _compute_slicing_length(self, begin, end, stride, x_dim):
|
|
3682
|
-
"""Computes the length of the slicing."""
|
|
3683
|
-
if stride > 0:
|
|
3684
|
-
slicing_length = self._compute_slicing_len_for_positive_stride(begin, end, stride, x_dim)
|
|
3685
|
-
else:
|
|
3686
|
-
slicing_length = self._compute_slicing_len_for_negative_stride(begin, end, stride, x_dim)
|
|
3687
|
-
return slicing_length
|
|
3688
|
-
|
|
3689
|
-
def _compute_slicing_shape(self, x_shape, begin_v, end_v, strides_v):
|
|
3690
|
-
"""Computes the shape of the slicing."""
|
|
3691
|
-
x_rank = len(x_shape)
|
|
3692
|
-
slice_len = len(begin_v)
|
|
3693
|
-
|
|
3694
|
-
# After the integer is converted to binary, it is a str and the first two chars are the flag char '0b'.
|
|
3695
|
-
begin_pos = bin(self.begin_mask)[-1:1:-1]
|
|
3696
|
-
end_pos = bin(self.end_mask)[-1:1:-1]
|
|
3697
|
-
ellipsis_pos = bin(self.ellipsis_mask)[-1:1:-1]
|
|
3698
|
-
new_axis_pos = bin(self.new_axis_mask)[-1:1:-1]
|
|
3699
|
-
shrink_axis_pos = bin(self.shrink_axis_mask)[-1:1:-1]
|
|
3700
|
-
|
|
3701
|
-
ret_shape = []
|
|
3702
|
-
i, j = 0, 0
|
|
3703
|
-
has_ellipsis = False
|
|
3704
|
-
while i < x_rank or j < slice_len:
|
|
3705
|
-
if j < slice_len:
|
|
3706
|
-
begin, end, stride = begin_v[j], end_v[j], strides_v[j]
|
|
3707
|
-
|
|
3708
|
-
if j < len(ellipsis_pos) and ellipsis_pos[j] == '1':
|
|
3709
|
-
# When there is ellipsis, the latter part of the ellipsis will be processed separately.
|
|
3710
|
-
has_ellipsis = True
|
|
3711
|
-
break
|
|
3712
|
-
if j < len(begin_pos) and begin_pos[j] == '1':
|
|
3713
|
-
begin = -1 if strides_v[j] < 0 else 0
|
|
3714
|
-
if j < len(end_pos) and end_pos[j] == '1':
|
|
3715
|
-
end = -(x_shape[i] + 1) if strides_v[j] < 0 else x_shape[i]
|
|
3716
|
-
if j < len(new_axis_pos) and new_axis_pos[j] == '1':
|
|
3717
|
-
ret_shape.append(1)
|
|
3718
|
-
j += 1
|
|
3719
|
-
continue
|
|
3720
|
-
if j < len(shrink_axis_pos) and shrink_axis_pos[j] == '1':
|
|
3721
|
-
if (not -x_shape[i] <= begin < x_shape[i]) or stride < 0:
|
|
3722
|
-
raise IndexError(f"For '{self.name}', the 'strides[{i}]' cannot be negative number and "
|
|
3723
|
-
f"'begin[{i}]' must be in [-{x_shape[i]}, {x_shape[i]}) "
|
|
3724
|
-
f"when 'shrink_axis_mask' is greater than 0, "
|
|
3725
|
-
f"but got 'shrink_axis_mask': {self.shrink_axis_mask}, "
|
|
3726
|
-
f"'strides[{i}]': {stride}, 'begin[{i}]': {begin}.")
|
|
3727
|
-
j += 1
|
|
3728
|
-
i += 1
|
|
3729
|
-
continue
|
|
3730
|
-
else:
|
|
3731
|
-
begin, end, stride = 0, x_shape[i], 1
|
|
3732
|
-
|
|
3733
|
-
slicing_length = self._compute_slicing_length(begin, end, stride, x_shape[i])
|
|
3734
|
-
ret_shape.append(slicing_length)
|
|
3735
|
-
i += 1
|
|
3736
|
-
j += 1
|
|
3737
|
-
if has_ellipsis:
|
|
3738
|
-
# When there is ellipsis, handle the second half of the ellipsis split.
|
|
3739
|
-
ellipsis_occupied_dims = x_rank - i - (slice_len - (j + 1)) + \
|
|
3740
|
-
len(tuple(filter(lambda x: x == '1', new_axis_pos[j + 1:slice_len])))
|
|
3741
|
-
ret_shape.extend(x_shape[i:i + ellipsis_occupied_dims])
|
|
3742
|
-
j += 1
|
|
3743
|
-
i += ellipsis_occupied_dims
|
|
3744
|
-
|
|
3745
|
-
while i < x_rank or j < slice_len:
|
|
3746
|
-
begin, end, stride = begin_v[j], end_v[j], strides_v[j]
|
|
3747
|
-
|
|
3748
|
-
if j < len(begin_pos) and begin_pos[j] == '1':
|
|
3749
|
-
begin = -1 if strides_v[j] < 0 else 0
|
|
3750
|
-
if j < len(end_pos) and end_pos[j] == '1':
|
|
3751
|
-
end = -(x_shape[i] + 1) if strides_v[j] < 0 else x_shape[i]
|
|
3752
|
-
if j < len(new_axis_pos) and new_axis_pos[j] == '1':
|
|
3753
|
-
ret_shape.append(1)
|
|
3754
|
-
j += 1
|
|
3755
|
-
continue
|
|
3756
|
-
if j < len(shrink_axis_pos) and shrink_axis_pos[j] == '1':
|
|
3757
|
-
if (not -x_shape[i] <= begin < x_shape[i]) or stride < 0:
|
|
3758
|
-
raise IndexError(f"For '{self.name}', the 'strides[{i}]' can not be negative number and "
|
|
3759
|
-
f"'begin[{i}]' must be in [-{x_shape[i]}, {x_shape[i]}) "
|
|
3760
|
-
f"when 'shrink_axis_mask' is greater than 0, "
|
|
3761
|
-
f"but got 'shrink_axis_mask': {self.shrink_axis_mask}, "
|
|
3762
|
-
f"'strides[{i}]': {stride}, 'begin[{i}]': {begin}.")
|
|
3763
|
-
j += 1
|
|
3764
|
-
i += 1
|
|
3765
|
-
continue
|
|
3766
|
-
|
|
3767
|
-
slicing_length = self._compute_slicing_length(begin, end, stride, x_shape[i])
|
|
3768
|
-
ret_shape.append(slicing_length)
|
|
3769
|
-
i += 1
|
|
3770
|
-
j += 1
|
|
3771
|
-
return ret_shape
|
|
3772
|
-
|
|
3773
|
-
def _compute_dynamic_slicing_value(self, shape_value, begin_v, end_v, strides_v):
|
|
3774
|
-
"""Computes the length of the slicing for dynamic shape."""
|
|
3775
|
-
shape_value_np = np.array(shape_value)
|
|
3776
|
-
slice_index = []
|
|
3777
|
-
for begin_i, end_i, strides_i in zip(begin_v['value'], end_v['value'], strides_v['value']):
|
|
3778
|
-
s = slice(begin_i, end_i, strides_i)
|
|
3779
|
-
slice_index.append(s)
|
|
3780
|
-
slice_index = tuple(slice_index)
|
|
3781
|
-
shape_value_slice = shape_value_np[slice_index]
|
|
3782
|
-
shape_value_slice = tuple(shape_value_slice.tolist())
|
|
3783
|
-
return shape_value_slice
|
|
3784
|
-
|
|
3785
|
-
def _compute_dynamic_slicing_length(self, begin, end, stride, x_dim):
|
|
3786
|
-
"""Computes the length of the slicing for dynamic shape."""
|
|
3787
|
-
slicing_length = -1
|
|
3788
|
-
if None in (begin, end, stride) or -1 in (begin, end, stride):
|
|
3789
|
-
return slicing_length
|
|
3790
|
-
slicing_length = self._compute_slicing_length(begin, end, stride, x_dim)
|
|
3791
|
-
return slicing_length
|
|
3792
|
-
|
|
3793
|
-
def _compute_dynamic_slicing_shape(self, x, begin_v, end_v, strides_v, slice_len):
|
|
3794
|
-
"""Computes the shape of the slicing for dynamic shape, mask is currently not supported."""
|
|
3795
|
-
x_shape = x['shape']
|
|
3796
|
-
if is_dim_unknown(x_shape):
|
|
3797
|
-
return [-2]
|
|
3798
|
-
x_rank = len(x_shape)
|
|
3799
|
-
new_axis_pos = bin(self.new_axis_mask)[-1:1:-1]
|
|
3800
|
-
shrink_axis_pos = bin(self.shrink_axis_mask)[-1:1:-1]
|
|
3801
|
-
if self.ellipsis_mask:
|
|
3802
|
-
raise ValueError("Ellipsis Mask is currently not supported in dynamic shape.")
|
|
3803
|
-
ret_shape = []
|
|
3804
|
-
i, j = 0, 0
|
|
3805
|
-
slice_has_special_value = False
|
|
3806
|
-
begin_value, end_value, strides_value = self._get_slice_value(
|
|
3807
|
-
begin_v, end_v, strides_v)
|
|
3808
|
-
is_dynamic_tuple = (self._is_none_in_tuple(begin_value)
|
|
3809
|
-
or self._is_none_in_tuple(end_value)
|
|
3810
|
-
or self._is_none_in_tuple(strides_value))
|
|
3811
|
-
if None in (begin_v['value'], end_v['value'], strides_v['value']) or is_dynamic_tuple:
|
|
3812
|
-
slice_has_special_value = True
|
|
3813
|
-
while i < x_rank or j < slice_len:
|
|
3814
|
-
slicing_length = -1
|
|
3815
|
-
if j < slice_len:
|
|
3816
|
-
if j < len(new_axis_pos) and new_axis_pos[j] == '1':
|
|
3817
|
-
ret_shape.append(1)
|
|
3818
|
-
j += 1
|
|
3819
|
-
continue
|
|
3820
|
-
if j < len(shrink_axis_pos) and shrink_axis_pos[j] == '1':
|
|
3821
|
-
j += 1
|
|
3822
|
-
i += 1
|
|
3823
|
-
continue
|
|
3824
|
-
if None in (begin_value, end_value, strides_value):
|
|
3825
|
-
slicing_length = -1
|
|
3826
|
-
elif slice_has_special_value:
|
|
3827
|
-
slicing_length = self._compute_dynamic_slicing_length(
|
|
3828
|
-
begin_value[j], end_value[j], strides_value[j], x_shape[i])
|
|
3829
|
-
else:
|
|
3830
|
-
slicing_length = \
|
|
3831
|
-
self._compute_slicing_length(begin_value[j], end_value[j], strides_value[j], x_shape[i])
|
|
3832
|
-
else:
|
|
3833
|
-
if i >= len(x_shape):
|
|
3834
|
-
raise ValueError(f"For 'StridedSlice', the index must be less than or equal to "
|
|
3835
|
-
f"the dimension of 'input_x', but got the dimension of 'input_x': {len(x_shape)} "
|
|
3836
|
-
f"and the index: {i}.")
|
|
3837
|
-
begin, end, stride = 0, x_shape[i], 1
|
|
3838
|
-
if end > 0:
|
|
3839
|
-
slicing_length = self._compute_slicing_length(begin, end, stride, x_shape[i])
|
|
3840
|
-
ret_shape.append(slicing_length)
|
|
3841
|
-
i += 1
|
|
3842
|
-
j += 1
|
|
3843
|
-
return ret_shape
|
|
3844
|
-
|
|
3845
|
-
def _check_and_get_value(self, slice_input, name):
|
|
3846
|
-
"""Check begin, end, strides. Get its length and value."""
|
|
3847
|
-
slice_value = slice_input['value']
|
|
3848
|
-
slice_min = None
|
|
3849
|
-
slice_max = None
|
|
3850
|
-
slice_special_value = None
|
|
3851
|
-
if "min_value" in slice_input and "max_value" in slice_input:
|
|
3852
|
-
slice_min = slice_input["min_value"]
|
|
3853
|
-
slice_max = slice_input["max_value"]
|
|
3854
|
-
elif "shape_value" in slice_input:
|
|
3855
|
-
slice_special_value = slice_input["shape_value"]
|
|
3856
|
-
if slice_value is None:
|
|
3857
|
-
validator.check_tensor_dtype_valid(name, slice_input['dtype'], [mstype.int32, mstype.int64], self.name)
|
|
3858
|
-
slice_shape = slice_input['shape']
|
|
3859
|
-
if len(slice_shape) != 1:
|
|
3860
|
-
raise ValueError(f"For '{self.name}', both the 'begins', 'ends', and 'strides' must be 1-D, "
|
|
3861
|
-
f"but got '{name}' shape: {slice_shape}.")
|
|
3862
|
-
# not support scalar
|
|
3863
|
-
slices = {
|
|
3864
|
-
'value': slice_value,
|
|
3865
|
-
'shape_value': slice_special_value,
|
|
3866
|
-
'min_value': slice_min,
|
|
3867
|
-
'max_value': slice_max
|
|
3868
|
-
}
|
|
3869
|
-
return slices, slice_shape[0]
|
|
3870
|
-
|
|
3871
|
-
if isinstance(slice_value, (Tensor, Tensor_)):
|
|
3872
|
-
validator.check_tensor_dtype_valid(name, slice_input['dtype'], [mstype.int64], self.name)
|
|
3873
|
-
slice_value = slice_value.asnumpy().tolist()
|
|
3874
|
-
elif not isinstance(slice_value, tuple):
|
|
3875
|
-
raise TypeError(f"For '{self.name}', both the 'begin', 'end', and 'strides' must be a tuple or Tensor, "
|
|
3876
|
-
f"but got '{name}': {slice_value}.")
|
|
3877
|
-
|
|
3878
|
-
if tuple(filter(lambda x: x is not None and not isinstance(x, int), slice_value)):
|
|
3879
|
-
raise TypeError(f"For '{self.name}', the elements of 'begin', 'end', and 'strides' must be int, "
|
|
3880
|
-
f"but got {name}: {slice_value}.")
|
|
3881
|
-
|
|
3882
|
-
if name == 'strides':
|
|
3883
|
-
if slice_value is not None and tuple(filter(lambda x: x == 0, slice_value)):
|
|
3884
|
-
raise ValueError(f"For '{self.name}', 'strides' cannot contain 0, but got 'strides': {slice_value}.")
|
|
3885
|
-
|
|
3886
|
-
slices = {
|
|
3887
|
-
'value': slice_value,
|
|
3888
|
-
'shape_value': slice_special_value,
|
|
3889
|
-
'min_value': slice_min,
|
|
3890
|
-
'max_value': slice_max
|
|
3891
|
-
}
|
|
3892
|
-
return slices, len(slice_value)
|
|
3893
|
-
|
|
3894
|
-
|
|
3895
|
-
class Diag(PrimitiveWithCheck):
|
|
3896
|
-
r"""
|
|
3897
|
-
|
|
3898
|
-
Constructs a diagonal tensor with a given diagonal values.
|
|
3899
|
-
|
|
3900
|
-
.. warning::
|
|
3901
|
-
This is an experimental API that is subject to change or deletion.
|
|
3902
|
-
|
|
3903
|
-
Refer to :func:`mindspore.ops.diag` for more details.
|
|
3904
|
-
|
|
3905
|
-
Inputs:
|
|
3906
|
-
- **input_x** (Tensor) - The input tensor.
|
|
3907
|
-
|
|
3908
|
-
Outputs:
|
|
3909
|
-
Tensor, has the same dtype as the `input_x`.
|
|
3910
|
-
|
|
3911
|
-
Supported Platforms:
|
|
3912
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3913
|
-
|
|
3914
|
-
Examples:
|
|
2066
|
+
>>> import mindspore
|
|
2067
|
+
>>> import numpy as np
|
|
3915
2068
|
>>> from mindspore import Tensor, ops
|
|
3916
|
-
>>> input_x = Tensor([1,
|
|
3917
|
-
>>>
|
|
3918
|
-
>>> output =
|
|
2069
|
+
>>> input_x = Tensor(np.array([-1.6, -0.1, 1.5, 2.0]), mindspore.float32)
|
|
2070
|
+
>>> op = ops.Rint()
|
|
2071
|
+
>>> output = op(input_x)
|
|
2072
|
+
>>> print(output)
|
|
2073
|
+
[-2. 0. 2. 2.]
|
|
2074
|
+
>>> input_x = Tensor(np.array([[-2.0, -1.9, -1.8, -1.7, -1.6],
|
|
2075
|
+
... [-2.0, -1.9, -1.8, -1.7, -1.6]]), mindspore.float32)
|
|
2076
|
+
>>> output = op(input_x)
|
|
3919
2077
|
>>> print(output)
|
|
3920
|
-
[[
|
|
3921
|
-
[
|
|
3922
|
-
[0 0 3 0]
|
|
3923
|
-
[0 0 0 4]]
|
|
2078
|
+
[[-2. -2. -2. -2. -2.]
|
|
2079
|
+
[-2. -2. -2. -2. -2.]]
|
|
3924
2080
|
"""
|
|
3925
2081
|
|
|
3926
2082
|
@prim_attr_register
|
|
3927
2083
|
def __init__(self):
|
|
3928
|
-
"""Initialize
|
|
3929
|
-
|
|
3930
|
-
def infer_value(self, x):
|
|
3931
|
-
if x is None:
|
|
3932
|
-
return None
|
|
3933
|
-
# do constant-folding only when x rank is 1
|
|
3934
|
-
if len(x.shape) != 1:
|
|
3935
|
-
return None
|
|
3936
|
-
ret = np.diag(x.asnumpy())
|
|
3937
|
-
return Tensor(ret)
|
|
2084
|
+
"""Initialize Rint."""
|
|
2085
|
+
self.init_prim_io_names(inputs=['x'], outputs=['output'])
|
|
3938
2086
|
|
|
3939
2087
|
|
|
3940
2088
|
class DiagPart(PrimitiveWithCheck):
|
|
@@ -4029,280 +2177,6 @@ class Mvlgamma(Primitive):
|
|
|
4029
2177
|
validator.check_positive_int(p, 'p', self.name)
|
|
4030
2178
|
|
|
4031
2179
|
|
|
4032
|
-
class Eye(Primitive):
|
|
4033
|
-
"""
|
|
4034
|
-
Creates a tensor with ones on the diagonal and zeros in the rest.
|
|
4035
|
-
|
|
4036
|
-
Refer to :func:`mindspore.ops.eye` for more details.
|
|
4037
|
-
|
|
4038
|
-
Inputs:
|
|
4039
|
-
- **n** (int) - The number of rows of returned tensor. Constant value only.
|
|
4040
|
-
- **m** (int) - The number of columns of returned tensor. Constant value only.
|
|
4041
|
-
- **t** (mindspore.dtype) - MindSpore's dtype, the data type of the returned tensor.
|
|
4042
|
-
Default: ``None`` , the data type of the returned tensor is mindspore.float32.
|
|
4043
|
-
|
|
4044
|
-
Outputs:
|
|
4045
|
-
Tensor, a tensor with ones on the diagonal and the rest of elements are zero. The shape of `output` depends on
|
|
4046
|
-
the user's Inputs `n` and `m`. And the data type depends on Inputs `t`.
|
|
4047
|
-
|
|
4048
|
-
Supported Platforms:
|
|
4049
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4050
|
-
|
|
4051
|
-
Examples:
|
|
4052
|
-
>>> import mindspore
|
|
4053
|
-
>>> from mindspore import ops
|
|
4054
|
-
>>> eye = ops.Eye()
|
|
4055
|
-
>>> output = eye(2, 2, mindspore.int32)
|
|
4056
|
-
>>> print(output)
|
|
4057
|
-
[[1 0]
|
|
4058
|
-
[0 1]]
|
|
4059
|
-
>>> print(output.dtype)
|
|
4060
|
-
Int32
|
|
4061
|
-
>>> output = eye(1, 2, mindspore.float64)
|
|
4062
|
-
>>> print(output)
|
|
4063
|
-
[[1. 0.]]
|
|
4064
|
-
>>> print(output.dtype)
|
|
4065
|
-
Float64
|
|
4066
|
-
"""
|
|
4067
|
-
|
|
4068
|
-
@prim_attr_register
|
|
4069
|
-
def __init__(self):
|
|
4070
|
-
"""Initialize Eye"""
|
|
4071
|
-
self.init_prim_io_names(inputs=['n', 'm', 't'], outputs=['output'])
|
|
4072
|
-
|
|
4073
|
-
|
|
4074
|
-
class ScatterNd(Primitive):
|
|
4075
|
-
r"""
|
|
4076
|
-
Scatters a tensor into a new tensor depending on the specified indices.
|
|
4077
|
-
|
|
4078
|
-
Refer to :func:`mindspore.ops.scatter_nd` for more details.
|
|
4079
|
-
|
|
4080
|
-
Inputs:
|
|
4081
|
-
- **indices** (Tensor) - The index of scattering in the new tensor with int32 or int64 data type.
|
|
4082
|
-
The rank of indices must be at least 2 and `indices_shape[-1] <= len(shape)`.
|
|
4083
|
-
- **updates** (Tensor) - The source Tensor to be scattered.
|
|
4084
|
-
It has shape `indices_shape[:-1] + shape[indices_shape[-1]:]`.
|
|
4085
|
-
- **shape** (tuple[int]) - Define the shape of the output tensor, has the same data type as indices.
|
|
4086
|
-
The shape of `shape` is :math:`(x_1, x_2, ..., x_R)`, and the length of 'shape' is greater than or equal to 2.
|
|
4087
|
-
In other words, the shape of `shape` is at least :math:`(x_1, x_2)`.
|
|
4088
|
-
And the value of any element in `shape` must be greater than or equal to 1.
|
|
4089
|
-
In other words, :math:`x_1` >= 1, :math:`x_2` >= 1.
|
|
4090
|
-
|
|
4091
|
-
Outputs:
|
|
4092
|
-
Tensor, the new tensor, has the same type as `update` and the same shape as `shape`.
|
|
4093
|
-
|
|
4094
|
-
Supported Platforms:
|
|
4095
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4096
|
-
|
|
4097
|
-
Examples:
|
|
4098
|
-
>>> import mindspore
|
|
4099
|
-
>>> import numpy as np
|
|
4100
|
-
>>> from mindspore import Tensor, ops
|
|
4101
|
-
>>> op = ops.ScatterNd()
|
|
4102
|
-
>>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
|
|
4103
|
-
>>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2],
|
|
4104
|
-
... [3, 3, 3, 3], [4, 4, 4, 4]],
|
|
4105
|
-
... [[1, 1, 1, 1], [2, 2, 2, 2],
|
|
4106
|
-
... [3, 3, 3, 3], [4, 4, 4, 4]]]), mindspore.float32)
|
|
4107
|
-
>>> shape = (4, 4, 4)
|
|
4108
|
-
>>> output = op(indices, updates, shape)
|
|
4109
|
-
>>> print(output)
|
|
4110
|
-
[[[1. 1. 1. 1.]
|
|
4111
|
-
[2. 2. 2. 2.]
|
|
4112
|
-
[3. 3. 3. 3.]
|
|
4113
|
-
[4. 4. 4. 4.]]
|
|
4114
|
-
[[0. 0. 0. 0.]
|
|
4115
|
-
[0. 0. 0. 0.]
|
|
4116
|
-
[0. 0. 0. 0.]
|
|
4117
|
-
[0. 0. 0. 0.]]
|
|
4118
|
-
[[1. 1. 1. 1.]
|
|
4119
|
-
[2. 2. 2. 2.]
|
|
4120
|
-
[3. 3. 3. 3.]
|
|
4121
|
-
[4. 4. 4. 4.]]
|
|
4122
|
-
[[0. 0. 0. 0.]
|
|
4123
|
-
[0. 0. 0. 0.]
|
|
4124
|
-
[0. 0. 0. 0.]
|
|
4125
|
-
[0. 0. 0. 0.]]]
|
|
4126
|
-
>>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)
|
|
4127
|
-
>>> updates = Tensor(np.array([3.2, 1.1]), mindspore.float32)
|
|
4128
|
-
>>> shape = (3, 3)
|
|
4129
|
-
>>> output = op(indices, updates, shape)
|
|
4130
|
-
>>> # In order to facilitate understanding, explain the operator pseudo-operation process step by step:
|
|
4131
|
-
>>> # Step 1: Generate an empty Tensor of the specified shape according to the shape
|
|
4132
|
-
>>> # [
|
|
4133
|
-
>>> # [0. 0. 0.]
|
|
4134
|
-
>>> # [0. 0. 0.]
|
|
4135
|
-
>>> # [0. 0. 0.]
|
|
4136
|
-
>>> # ]
|
|
4137
|
-
>>> # Step 2: Modify the data at the specified location according to the indicators
|
|
4138
|
-
>>> # 0th row of indices is [0, 1], 0th row of updates is 3.2.
|
|
4139
|
-
>>> # means that the empty tensor in the 0th row and 1st col set to 3.2
|
|
4140
|
-
>>> # [
|
|
4141
|
-
>>> # [0. 3.2. 0.]
|
|
4142
|
-
>>> # [0. 0. 0.]
|
|
4143
|
-
>>> # [0. 0. 0.]
|
|
4144
|
-
>>> # ]
|
|
4145
|
-
>>> # 1th row of indices is [1, 1], 1th row of updates is 1.1.
|
|
4146
|
-
>>> # means that the empty tensor in the 1th row and 1st col set to 1.1
|
|
4147
|
-
>>> # [
|
|
4148
|
-
>>> # [0. 3.2. 0.]
|
|
4149
|
-
>>> # [0. 1.1 0.]
|
|
4150
|
-
>>> # [0. 0. 0.]
|
|
4151
|
-
>>> # ]
|
|
4152
|
-
>>> # The final result is as follows:
|
|
4153
|
-
>>> print(output)
|
|
4154
|
-
[[0. 3.2 0.]
|
|
4155
|
-
[0. 1.1 0.]
|
|
4156
|
-
[0. 0. 0.]]
|
|
4157
|
-
"""
|
|
4158
|
-
|
|
4159
|
-
@prim_attr_register
|
|
4160
|
-
def __init__(self):
|
|
4161
|
-
"""Initialize ScatterNd"""
|
|
4162
|
-
self.init_prim_io_names(inputs=['indices', 'update', 'shape'], outputs=['output'])
|
|
4163
|
-
|
|
4164
|
-
|
|
4165
|
-
class ResizeNearestNeighbor(Primitive):
|
|
4166
|
-
r"""
|
|
4167
|
-
Resizes the input tensor to a given size by using the nearest neighbor algorithm. The nearest
|
|
4168
|
-
neighbor algorithm selects the value of the nearest point and does not consider the
|
|
4169
|
-
values of neighboring points at all, yielding a piecewise-constant interpolant.
|
|
4170
|
-
|
|
4171
|
-
Args:
|
|
4172
|
-
size (Union[tuple, list]): The target size. The dimension of size must be 2.
|
|
4173
|
-
align_corners (bool): Whether the centers of the 4 corner pixels of the input
|
|
4174
|
-
and output tensors are aligned. Default: ``False`` .
|
|
4175
|
-
|
|
4176
|
-
Inputs:
|
|
4177
|
-
- **input_x** (Tensor) - The input tensor. The shape of the tensor is :math:`(N, C, H, W)`.
|
|
4178
|
-
|
|
4179
|
-
Outputs:
|
|
4180
|
-
Tensor, the shape of the output tensor is :math:`(N, C, NEW\_H, NEW\_W)`.
|
|
4181
|
-
The data type is the same as the `input_x`.
|
|
4182
|
-
|
|
4183
|
-
Raises:
|
|
4184
|
-
TypeError: If `size` is neither tuple nor list.
|
|
4185
|
-
TypeError: If `align_corners` is not a bool.
|
|
4186
|
-
ValueError: If length of `size` is not equal to 2.
|
|
4187
|
-
|
|
4188
|
-
Supported Platforms:
|
|
4189
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4190
|
-
|
|
4191
|
-
Examples:
|
|
4192
|
-
>>> import numpy as np
|
|
4193
|
-
>>> import mindspore
|
|
4194
|
-
>>> from mindspore import Tensor, ops
|
|
4195
|
-
>>> input_tensor = Tensor(np.array([[[[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]]]), mindspore.float32)
|
|
4196
|
-
>>> size = (2, 2)
|
|
4197
|
-
>>> output = ops.ResizeNearestNeighbor(size=size)(input_tensor)
|
|
4198
|
-
>>> print(output)
|
|
4199
|
-
[[[[-0.1 0.3]
|
|
4200
|
-
[ 0.4 0.5]]]]
|
|
4201
|
-
"""
|
|
4202
|
-
|
|
4203
|
-
@prim_attr_register
|
|
4204
|
-
def __init__(self, size, align_corners=False):
|
|
4205
|
-
"""Initialize ResizeNearestNeighbor"""
|
|
4206
|
-
validator.check_value_type("size", size, [tuple, list], self.name)
|
|
4207
|
-
validator.check_value_type("align_corners", align_corners, [bool], self.name)
|
|
4208
|
-
validator.check_equal_int(len(size), 2, "length of size", self.name)
|
|
4209
|
-
for i, value in enumerate(size):
|
|
4210
|
-
validator.check_non_negative_int(value, f'{i}th value of size', self.name)
|
|
4211
|
-
self.init_prim_io_names(inputs=['image_in'], outputs=['image_out'])
|
|
4212
|
-
|
|
4213
|
-
|
|
4214
|
-
class ResizeNearestNeighborV2(Primitive):
|
|
4215
|
-
r"""
|
|
4216
|
-
Resizes the input tensor to specific size by using the nearest neighbor algorithm.
|
|
4217
|
-
|
|
4218
|
-
The nearest neighbor algorithm selects the value of the nearest point and does not consider the
|
|
4219
|
-
values of neighboring points at all, yielding a piecewise-constant interpolant.
|
|
4220
|
-
|
|
4221
|
-
Args:
|
|
4222
|
-
align_corners (bool, optional): If ``True`` , the centers of the 4 corner pixels of the input and output
|
|
4223
|
-
tensors are aligned, preserving the values at the corner pixels. Default: ``False`` .
|
|
4224
|
-
half_pixel_centers (bool, optional): Whether half pixel center. If set to ``True`` ,
|
|
4225
|
-
`align_corners` should be False. Default: ``False`` .
|
|
4226
|
-
|
|
4227
|
-
Inputs:
|
|
4228
|
-
- **x** (Tensor) - 4-D with shape :math:`(batch, channels, height, width)` .
|
|
4229
|
-
- **size** (Tensor) - The new size for the images. A 1-D int32 Tensor
|
|
4230
|
-
of 2 elements: [`new_height, new_width`].
|
|
4231
|
-
|
|
4232
|
-
Outputs:
|
|
4233
|
-
- **y** (Tensor) - The resized images. A 4-D with shape
|
|
4234
|
-
:math:`(batch, channels, new\_height, new\_width)`. It has the same dtype as `x`.
|
|
4235
|
-
|
|
4236
|
-
Raises:
|
|
4237
|
-
TypeError: If `x` or `size` is not a Tensor.
|
|
4238
|
-
TypeError: If the data type of `size` is not int32.
|
|
4239
|
-
TypeError: If `align_corners` or `half_pixel_centers` is not bool.
|
|
4240
|
-
ValueError: If any value of `size` is non positive.
|
|
4241
|
-
ValueError: If the dimension of `x` is not 4.
|
|
4242
|
-
ValueError: If the dimension of `size` is not 1.
|
|
4243
|
-
ValueError: If the elements number of `size` is not 2.
|
|
4244
|
-
ValueError: If attr `half_pixel_centers` and `align_corners` are True at the same time.
|
|
4245
|
-
|
|
4246
|
-
Supported Platforms:
|
|
4247
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4248
|
-
|
|
4249
|
-
Examples:
|
|
4250
|
-
>>> import numpy as np
|
|
4251
|
-
>>> from mindspore import Tensor, ops
|
|
4252
|
-
>>> from mindspore import dtype as mstype
|
|
4253
|
-
>>> input_tensor = Tensor(np.ones((1, 1, 4, 4)), mstype.float32)
|
|
4254
|
-
>>> size = Tensor([2, 2], mstype.int32)
|
|
4255
|
-
>>> resize = ops.ResizeNearestNeighborV2()
|
|
4256
|
-
>>> output = resize(input_tensor, size)
|
|
4257
|
-
>>> print(output)
|
|
4258
|
-
[[[[1. 1.]
|
|
4259
|
-
[1. 1.]]]]
|
|
4260
|
-
>>> print(output.shape)
|
|
4261
|
-
(1, 1, 2, 2)
|
|
4262
|
-
"""
|
|
4263
|
-
|
|
4264
|
-
@prim_attr_register
|
|
4265
|
-
def __init__(self, align_corners=False, half_pixel_centers=False):
|
|
4266
|
-
"""Initialize ResizeNearestNeighborV2"""
|
|
4267
|
-
self.init_prim_io_names(inputs=['x', 'size'], outputs=['y'])
|
|
4268
|
-
validator.check_bool(align_corners, 'align_corners', self.name)
|
|
4269
|
-
validator.check_bool(half_pixel_centers, 'half_pixel_centers', self.name)
|
|
4270
|
-
|
|
4271
|
-
|
|
4272
|
-
class GatherNd(Primitive):
|
|
4273
|
-
r"""
|
|
4274
|
-
Gathers slices from a tensor by indices.
|
|
4275
|
-
|
|
4276
|
-
Refer to :func:`mindspore.ops.gather_nd` for more details.
|
|
4277
|
-
|
|
4278
|
-
Inputs:
|
|
4279
|
-
- **input_x** (Tensor) - The target tensor to gather values.
|
|
4280
|
-
- **indices** (Tensor) - The index tensor, with int32 or int64 data type.
|
|
4281
|
-
|
|
4282
|
-
Outputs:
|
|
4283
|
-
Tensor, has the same type as `input_x` and the shape is indices_shape[:-1] + x_shape[indices_shape[-1]:].
|
|
4284
|
-
|
|
4285
|
-
Supported Platforms:
|
|
4286
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4287
|
-
|
|
4288
|
-
Examples:
|
|
4289
|
-
>>> import mindspore
|
|
4290
|
-
>>> import numpy as np
|
|
4291
|
-
>>> from mindspore import Tensor, ops
|
|
4292
|
-
>>> op = ops.GatherNd()
|
|
4293
|
-
>>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
|
|
4294
|
-
>>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
|
|
4295
|
-
>>> output = op(input_x, indices)
|
|
4296
|
-
>>> print(output)
|
|
4297
|
-
[-0.1 0.5]
|
|
4298
|
-
"""
|
|
4299
|
-
|
|
4300
|
-
@prim_attr_register
|
|
4301
|
-
def __init__(self):
|
|
4302
|
-
"""Initialize GatherNd"""
|
|
4303
|
-
self.init_prim_io_names(inputs=['input_x', 'indices'], outputs=['y'])
|
|
4304
|
-
|
|
4305
|
-
|
|
4306
2180
|
class ScatterUpdate(Primitive):
|
|
4307
2181
|
r"""
|
|
4308
2182
|
Updates tensor values by using input indices and value.
|
|
@@ -4805,80 +2679,6 @@ class ScatterSub(Primitive):
|
|
|
4805
2679
|
self.add_prim_attr('side_effect_mem', True)
|
|
4806
2680
|
|
|
4807
2681
|
|
|
4808
|
-
class Triu(Primitive):
|
|
4809
|
-
"""
|
|
4810
|
-
Returns the upper triangular portion of the 2-D matrix or the set of matrices
|
|
4811
|
-
in a batch. The remaining elements of the resulting Tensor are assigned a value of 0.
|
|
4812
|
-
The upper triangular section of the matrix comprises of the
|
|
4813
|
-
elements present on and above the main diagonal.
|
|
4814
|
-
|
|
4815
|
-
.. warning::
|
|
4816
|
-
This is an experimental API that is subject to change or deletion.
|
|
4817
|
-
|
|
4818
|
-
Args:
|
|
4819
|
-
diagonal (int, optional): The index of diagonal. Default: ``0`` , indicating the main diagonal.
|
|
4820
|
-
|
|
4821
|
-
Inputs:
|
|
4822
|
-
- **x** (Tensor) - The input tensor with shape :math:`(M, N, *)`
|
|
4823
|
-
where :math:`*` means any number of additional dimensions.
|
|
4824
|
-
|
|
4825
|
-
Outputs:
|
|
4826
|
-
- **y** (Tensor) - A tensor has the same shape and data type as input.
|
|
4827
|
-
|
|
4828
|
-
Raises:
|
|
4829
|
-
TypeError: If `x` is not an Tensor.
|
|
4830
|
-
TypeError: If `diagonal` is not an int.
|
|
4831
|
-
ValueError: If the dimension of `input` is less than 2.
|
|
4832
|
-
|
|
4833
|
-
Supported Platforms:
|
|
4834
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4835
|
-
|
|
4836
|
-
Examples:
|
|
4837
|
-
>>> import numpy as np
|
|
4838
|
-
>>> from mindspore import Tensor, ops
|
|
4839
|
-
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
4840
|
-
... [ 5, 6, 7, 8],
|
|
4841
|
-
... [10, 11, 12, 13],
|
|
4842
|
-
... [14, 15, 16, 17]]))
|
|
4843
|
-
>>> triu = ops.Triu()
|
|
4844
|
-
>>> result = triu(x)
|
|
4845
|
-
>>> print(result)
|
|
4846
|
-
[[ 1 2 3 4]
|
|
4847
|
-
[ 0 6 7 8]
|
|
4848
|
-
[ 0 0 12 13]
|
|
4849
|
-
[ 0 0 0 17]]
|
|
4850
|
-
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
4851
|
-
... [ 5, 6, 7, 8],
|
|
4852
|
-
... [10, 11, 12, 13],
|
|
4853
|
-
... [14, 15, 16, 17]]))
|
|
4854
|
-
>>> triu = ops.Triu(diagonal=1)
|
|
4855
|
-
>>> result = triu(x)
|
|
4856
|
-
>>> print(result)
|
|
4857
|
-
[[ 0 2 3 4]
|
|
4858
|
-
[ 0 0 7 8]
|
|
4859
|
-
[ 0 0 0 13]
|
|
4860
|
-
[ 0 0 0 0]]
|
|
4861
|
-
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
4862
|
-
... [ 5, 6, 7, 8],
|
|
4863
|
-
... [10, 11, 12, 13],
|
|
4864
|
-
... [14, 15, 16, 17]]))
|
|
4865
|
-
>>> triu = ops.Triu(diagonal=-1)
|
|
4866
|
-
>>> result = triu(x)
|
|
4867
|
-
>>> print(result)
|
|
4868
|
-
[[ 1 2 3 4]
|
|
4869
|
-
[ 5 6 7 8]
|
|
4870
|
-
[ 0 11 12 13]
|
|
4871
|
-
[ 0 0 16 17]]
|
|
4872
|
-
"""
|
|
4873
|
-
|
|
4874
|
-
@prim_attr_register
|
|
4875
|
-
def __init__(self, diagonal=0):
|
|
4876
|
-
"""Initialize Triu"""
|
|
4877
|
-
validator.check_value_type("diagonal", diagonal, [int], self.name)
|
|
4878
|
-
self.diagonal = diagonal
|
|
4879
|
-
self.init_prim_io_names(inputs=['x'], outputs=['y'])
|
|
4880
|
-
|
|
4881
|
-
|
|
4882
2682
|
class ScatterMul(_ScatterOpDynamic):
|
|
4883
2683
|
r"""
|
|
4884
2684
|
Updates the value of the input tensor through the multiply operation.
|
|
@@ -5933,59 +3733,6 @@ class BatchToSpaceNDV2(Primitive):
|
|
|
5933
3733
|
self.add_prim_attr('origin_format', 'NHWC')
|
|
5934
3734
|
|
|
5935
3735
|
|
|
5936
|
-
class BroadcastTo(PrimitiveWithCheck):
|
|
5937
|
-
"""
|
|
5938
|
-
Broadcasts input tensor to a given shape.
|
|
5939
|
-
|
|
5940
|
-
Refer to :func:`mindspore.ops.broadcast_to` for more details.
|
|
5941
|
-
|
|
5942
|
-
Args:
|
|
5943
|
-
shape (tuple): The target shape to broadcast. Can be fully specified, or have -1 in one position
|
|
5944
|
-
where it will be substituted by the input tensor's shape in that position, see example.
|
|
5945
|
-
|
|
5946
|
-
Inputs:
|
|
5947
|
-
- **input_x** (Tensor) - The input tensor of any dimension.
|
|
5948
|
-
|
|
5949
|
-
Outputs:
|
|
5950
|
-
Tensor, with the given `shape` and the same data type as `input_x`.
|
|
5951
|
-
|
|
5952
|
-
Supported Platforms:
|
|
5953
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
5954
|
-
|
|
5955
|
-
Examples:
|
|
5956
|
-
>>> import numpy as np
|
|
5957
|
-
>>> from mindspore import Tensor, ops
|
|
5958
|
-
>>> shape = (2, 3)
|
|
5959
|
-
>>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
|
|
5960
|
-
>>> output = ops.BroadcastTo(shape=shape)(x)
|
|
5961
|
-
>>> print(output)
|
|
5962
|
-
[[1. 2. 3.]
|
|
5963
|
-
[1. 2. 3.]]
|
|
5964
|
-
>>>
|
|
5965
|
-
>>> shape = (-1, 2)
|
|
5966
|
-
>>> x = Tensor(np.array([[1], [2]]).astype(np.float32))
|
|
5967
|
-
>>> output = ops.BroadcastTo(shape=shape)(x)
|
|
5968
|
-
>>> print(output)
|
|
5969
|
-
[[1. 1.]
|
|
5970
|
-
[2. 2.]]
|
|
5971
|
-
"""
|
|
5972
|
-
|
|
5973
|
-
@prim_attr_register
|
|
5974
|
-
def __init__(self, shape):
|
|
5975
|
-
"""Initialize BroadcastTo"""
|
|
5976
|
-
validator.check_value_type("shape", shape, (tuple), self.name)
|
|
5977
|
-
validator.check("dimension of x", len(shape), "", 0, validator.GT, self.name)
|
|
5978
|
-
for ix, i in enumerate(shape):
|
|
5979
|
-
validator.check_value_type('target shape index -> ' + str(ix), i, [int], self.name)
|
|
5980
|
-
validator.check("shape element", i, "shape element min limit", -1, validator.GE, self.name)
|
|
5981
|
-
self.shape = shape
|
|
5982
|
-
|
|
5983
|
-
def infer_value(self, x):
|
|
5984
|
-
if x is None:
|
|
5985
|
-
return None
|
|
5986
|
-
return Tensor(np.broadcast_to(x.asnumpy(), self.shape))
|
|
5987
|
-
|
|
5988
|
-
|
|
5989
3736
|
class Meshgrid(PrimitiveWithInfer):
|
|
5990
3737
|
"""
|
|
5991
3738
|
Generates coordinate matrices from given coordinate tensors.
|
|
@@ -5993,13 +3740,13 @@ class Meshgrid(PrimitiveWithInfer):
|
|
|
5993
3740
|
Refer to :func:`mindspore.ops.meshgrid` for more details.
|
|
5994
3741
|
|
|
5995
3742
|
Args:
|
|
5996
|
-
indexing (str, optional): Cartesian
|
|
5997
|
-
matrix
|
|
3743
|
+
indexing (str, optional): Cartesian ``'xy'`` or
|
|
3744
|
+
matrix ``'ij'`` indexing of output. In the 2-D case with
|
|
5998
3745
|
inputs of length `M` and `N`, the outputs are of shape :math:`(N, M)`
|
|
5999
|
-
for 'xy' indexing and :math:`(M, N)` for 'ij' indexing. In the 3-D
|
|
3746
|
+
for ``'xy'`` indexing and :math:`(M, N)` for ``'ij'`` indexing. In the 3-D
|
|
6000
3747
|
case with inputs of length `M`, `N` and `P`, outputs are of shape
|
|
6001
|
-
:math:`(N, M, P)` for 'xy' indexing and :math:`(M, N, P)` for 'ij' indexing.
|
|
6002
|
-
Default: 'xy'
|
|
3748
|
+
:math:`(N, M, P)` for ``'xy'`` indexing and :math:`(M, N, P)` for ``'ij'`` indexing.
|
|
3749
|
+
Default: ``'xy'``.
|
|
6003
3750
|
|
|
6004
3751
|
Inputs:
|
|
6005
3752
|
- **input** (Union[tuple]) - A Tuple of N 1-D Tensor objects.
|
|
@@ -6226,7 +3973,7 @@ class EditDistance(Primitive):
|
|
|
6226
3973
|
>>> import numpy as np
|
|
6227
3974
|
>>> from mindspore import Tensor
|
|
6228
3975
|
>>> import mindspore.nn as nn
|
|
6229
|
-
>>>
|
|
3976
|
+
>>> from mindspore import ops
|
|
6230
3977
|
>>> class EditDistance(nn.Cell):
|
|
6231
3978
|
... def __init__(self, hypothesis_shape, truth_shape, normalize=True):
|
|
6232
3979
|
... super(EditDistance, self).__init__()
|
|
@@ -6353,57 +4100,15 @@ class EmbeddingLookup(Primitive):
|
|
|
6353
4100
|
Specifies the indices of elements of the original Tensor. Values can be out of range of `input_params`,
|
|
6354
4101
|
and the exceeding part will be filled with 0 in the output. Values do not support negative and the result
|
|
6355
4102
|
is undefined if values are negative. The data type should be int32 or int64.
|
|
6356
|
-
- **offset** (int) - Specifies the offset value of this `input_params` slice. Thus the real indices
|
|
6357
|
-
are equal to `input_indices` minus `offset`.
|
|
6358
|
-
|
|
6359
|
-
Outputs:
|
|
6360
|
-
Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`. The data type is the same with `input_params`.
|
|
6361
|
-
|
|
6362
|
-
Raises:
|
|
6363
|
-
TypeError: If dtype of `input_indices` is not int.
|
|
6364
|
-
ValueError: If length of shape of `input_params` is greater than 2.
|
|
6365
|
-
|
|
6366
|
-
Supported Platforms:
|
|
6367
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
6368
|
-
|
|
6369
|
-
Examples:
|
|
6370
|
-
>>> import mindspore
|
|
6371
|
-
>>> import numpy as np
|
|
6372
|
-
>>> from mindspore import Tensor, ops
|
|
6373
|
-
>>> input_params = Tensor(np.array([[8, 9], [10, 11], [12, 13], [14, 15]]), mindspore.float32)
|
|
6374
|
-
>>> input_indices = Tensor(np.array([[5, 2], [8, 5]]), mindspore.int32)
|
|
6375
|
-
>>> offset = 4
|
|
6376
|
-
>>> output = ops.EmbeddingLookup()(input_params, input_indices, offset)
|
|
6377
|
-
>>> print(output)
|
|
6378
|
-
[[[10. 11.]
|
|
6379
|
-
[ 0. 0.]]
|
|
6380
|
-
[[ 0. 0.]
|
|
6381
|
-
[10. 11.]]]
|
|
6382
|
-
"""
|
|
6383
|
-
|
|
6384
|
-
@prim_attr_register
|
|
6385
|
-
def __init__(self):
|
|
6386
|
-
"""Initialize EmbeddingLookup."""
|
|
6387
|
-
self.__setattr_flag__ = True
|
|
6388
|
-
self.init_prim_io_names(inputs=['params', 'indices', 'offset'],
|
|
6389
|
-
outputs=['output'])
|
|
6390
|
-
self.add_prim_attr('bprop_return_sparse', True)
|
|
6391
|
-
|
|
6392
|
-
|
|
6393
|
-
class GatherD(Primitive):
|
|
6394
|
-
"""
|
|
6395
|
-
Gathers elements along an axis specified by dim.
|
|
6396
|
-
|
|
6397
|
-
Refer to :func:`mindspore.ops.gather_elements` for more details.
|
|
6398
|
-
|
|
6399
|
-
Inputs:
|
|
6400
|
-
- **x** (Tensor) - The input tensor.
|
|
6401
|
-
- **dim** (int) - The axis along which to index. It must be int32 or int64.
|
|
6402
|
-
- **index** (Tensor) - The indices of elements to gather. It can be one of the following data types:
|
|
6403
|
-
int32, int64. The value range of each index element is [-x_rank[dim], x_rank[dim]).
|
|
4103
|
+
- **offset** (int) - Specifies the offset value of this `input_params` slice. Thus the real indices
|
|
4104
|
+
are equal to `input_indices` minus `offset`.
|
|
6404
4105
|
|
|
6405
4106
|
Outputs:
|
|
6406
|
-
Tensor,
|
|
4107
|
+
Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`. The data type is the same with `input_params`.
|
|
4108
|
+
|
|
4109
|
+
Raises:
|
|
4110
|
+
TypeError: If dtype of `input_indices` is not int.
|
|
4111
|
+
ValueError: If length of shape of `input_params` is greater than 2.
|
|
6407
4112
|
|
|
6408
4113
|
Supported Platforms:
|
|
6409
4114
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -6412,32 +4117,24 @@ class GatherD(Primitive):
|
|
|
6412
4117
|
>>> import mindspore
|
|
6413
4118
|
>>> import numpy as np
|
|
6414
4119
|
>>> from mindspore import Tensor, ops
|
|
6415
|
-
>>>
|
|
6416
|
-
>>>
|
|
6417
|
-
>>>
|
|
6418
|
-
>>> output = ops.
|
|
4120
|
+
>>> input_params = Tensor(np.array([[8, 9], [10, 11], [12, 13], [14, 15]]), mindspore.float32)
|
|
4121
|
+
>>> input_indices = Tensor(np.array([[5, 2], [8, 5]]), mindspore.int32)
|
|
4122
|
+
>>> offset = 4
|
|
4123
|
+
>>> output = ops.EmbeddingLookup()(input_params, input_indices, offset)
|
|
6419
4124
|
>>> print(output)
|
|
6420
|
-
[[
|
|
6421
|
-
|
|
6422
|
-
|
|
6423
|
-
|
|
6424
|
-
@prim_attr_register
|
|
6425
|
-
def __init__(self):
|
|
6426
|
-
"""Initialize GatherD"""
|
|
6427
|
-
self.init_prim_io_names(inputs=['x', 'dim', 'index'], outputs=['output'])
|
|
6428
|
-
|
|
6429
|
-
|
|
6430
|
-
class Identity(Primitive):
|
|
6431
|
-
"""
|
|
6432
|
-
The `mindspore.ops.Identity` interface is deprecated, please use the :func:`mindspore.ops.deepcopy` instead.
|
|
6433
|
-
|
|
6434
|
-
Supported Platforms:
|
|
6435
|
-
Deprecated
|
|
4125
|
+
[[[10. 11.]
|
|
4126
|
+
[ 0. 0.]]
|
|
4127
|
+
[[ 0. 0.]
|
|
4128
|
+
[10. 11.]]]
|
|
6436
4129
|
"""
|
|
6437
4130
|
|
|
6438
4131
|
@prim_attr_register
|
|
6439
4132
|
def __init__(self):
|
|
6440
|
-
|
|
4133
|
+
"""Initialize EmbeddingLookup."""
|
|
4134
|
+
self.__setattr_flag__ = True
|
|
4135
|
+
self.init_prim_io_names(inputs=['params', 'indices', 'offset'],
|
|
4136
|
+
outputs=['output'])
|
|
4137
|
+
self.add_prim_attr('bprop_return_sparse', True)
|
|
6441
4138
|
|
|
6442
4139
|
|
|
6443
4140
|
class IdentityN(Primitive):
|
|
@@ -6478,72 +4175,6 @@ class IdentityN(Primitive):
|
|
|
6478
4175
|
self.init_prim_io_names(inputs=['x'], outputs=['y'])
|
|
6479
4176
|
|
|
6480
4177
|
|
|
6481
|
-
class Range(PrimitiveWithCheck):
|
|
6482
|
-
r"""
|
|
6483
|
-
Creates a sequence of numbers that begins at `start` and extlimits by increments of
|
|
6484
|
-
`delta` up to but not including `limit`.
|
|
6485
|
-
|
|
6486
|
-
Refer to :func:`mindspore.ops.range` for more details.
|
|
6487
|
-
|
|
6488
|
-
Args:
|
|
6489
|
-
maxlen (int, optional): Memory that can fit `maxlen` many elements
|
|
6490
|
-
will be allocated for the output. Optional, must be positive. Default: 1000000.
|
|
6491
|
-
If the output has more than `maxlen` elements, a runtime error
|
|
6492
|
-
will occur.
|
|
6493
|
-
|
|
6494
|
-
Inputs:
|
|
6495
|
-
- **start** (Tensor) - A scalar Tensor. The first number in the sequence.
|
|
6496
|
-
- **limit** (Tensor) - A scalar Tensor. Upper limit of the sequence, exclusive.
|
|
6497
|
-
- **delta** (Tensor) - A scalar Tensor. Number that increments `start`.
|
|
6498
|
-
|
|
6499
|
-
Outputs:
|
|
6500
|
-
A 1-D Tensor, with the same type as the inputs.
|
|
6501
|
-
|
|
6502
|
-
Supported Platforms:
|
|
6503
|
-
``GPU`` ``CPU``
|
|
6504
|
-
|
|
6505
|
-
Examples:
|
|
6506
|
-
>>> from mindspore import Tensor, ops
|
|
6507
|
-
>>> from mindspore import dtype as mstype
|
|
6508
|
-
>>> start = Tensor(0, mstype.int32)
|
|
6509
|
-
>>> limit = Tensor(10, mstype.int32)
|
|
6510
|
-
>>> delta = Tensor(4, mstype.int32)
|
|
6511
|
-
>>> output = ops.Range()(start, limit, delta)
|
|
6512
|
-
>>> print(output)
|
|
6513
|
-
[0 4 8]
|
|
6514
|
-
"""
|
|
6515
|
-
|
|
6516
|
-
@prim_attr_register
|
|
6517
|
-
def __init__(self, maxlen=1000000):
|
|
6518
|
-
self.init_prim_io_names(inputs=['start', 'limit', 'delta'], outputs=['output'])
|
|
6519
|
-
validator.check_value_type("maxlen", maxlen, [int], self.name)
|
|
6520
|
-
validator.check_positive_int(maxlen, "maxlen", self.name)
|
|
6521
|
-
self.maxlen = maxlen
|
|
6522
|
-
self.add_prim_attr('maxlen', maxlen)
|
|
6523
|
-
|
|
6524
|
-
def check_shape(self, start_shape, limit_shape, delta_shape):
|
|
6525
|
-
if not is_shape_unknown(start_shape):
|
|
6526
|
-
validator.check("start_shape", len(start_shape), "", 0, validator.EQ, self.name)
|
|
6527
|
-
if not is_shape_unknown(limit_shape):
|
|
6528
|
-
validator.check("limit_shape", len(limit_shape), "", 0, validator.EQ, self.name)
|
|
6529
|
-
if not is_shape_unknown(delta_shape):
|
|
6530
|
-
validator.check("delta_shape", len(delta_shape), "", 0, validator.EQ, self.name)
|
|
6531
|
-
|
|
6532
|
-
def check_dtype(self, start_dtype, limit_dtype, delta_dtype):
|
|
6533
|
-
valid_dtypes = [mstype.int32, mstype.float32, mstype.int64, mstype.float64]
|
|
6534
|
-
inputs = {"start": start_dtype, "limit": limit_dtype, "delta": delta_dtype}
|
|
6535
|
-
validator.check_tensors_dtypes_same_and_valid(inputs, valid_dtypes, self.name)
|
|
6536
|
-
|
|
6537
|
-
def infer_value(self, start_value, limit_value, delat_value):
|
|
6538
|
-
"""Infer the value of input for Range."""
|
|
6539
|
-
if start_value is not None and limit_value is not None and delat_value is not None:
|
|
6540
|
-
start = start_value.asnumpy()
|
|
6541
|
-
limit = limit_value.asnumpy()
|
|
6542
|
-
delat = delat_value.asnumpy()
|
|
6543
|
-
return Tensor(np.arange(start, limit, delat), dtype=start_value.dtype)
|
|
6544
|
-
return None
|
|
6545
|
-
|
|
6546
|
-
|
|
6547
4178
|
class RangeV2(Primitive):
|
|
6548
4179
|
"""
|
|
6549
4180
|
Creates a sequence of numbers that begins at `start`, ends at `limit` but not including `limit`
|
|
@@ -6598,46 +4229,6 @@ class RangeV2(Primitive):
|
|
|
6598
4229
|
validator.check_positive_int(maxlen, "maxlen", self.name)
|
|
6599
4230
|
|
|
6600
4231
|
|
|
6601
|
-
class MaskedFill(Primitive):
|
|
6602
|
-
"""
|
|
6603
|
-
Fills elements with value where mask is True.
|
|
6604
|
-
|
|
6605
|
-
Note:
|
|
6606
|
-
If `value` is a floating-point number of Python, it will be converted to float32 later by default.
|
|
6607
|
-
In this case, if `input_x` is a float16 Tensor, it will be converted to float32 for calculation,
|
|
6608
|
-
and the result type will be converted back to float16 on the CPU and Ascend platforms, which may
|
|
6609
|
-
cause the performance penalty. A TypeError may be raised on the GPU platform. Therefore,
|
|
6610
|
-
it is recommended that 'value' should use a Tensor with the same dtype as `input_x`.
|
|
6611
|
-
|
|
6612
|
-
Refer to :func:`mindspore.ops.masked_fill` for more details.
|
|
6613
|
-
|
|
6614
|
-
Inputs:
|
|
6615
|
-
- **input** (Tensor) - The input Tensor.
|
|
6616
|
-
- **mask** (Tensor[bool]) - The boolean mask.
|
|
6617
|
-
- **value** (Union[float, Tensor]) - The value to fill in with, which dtype is the same as `input`.
|
|
6618
|
-
|
|
6619
|
-
Outputs:
|
|
6620
|
-
Tensor, has the same type and shape as `input`.
|
|
6621
|
-
|
|
6622
|
-
Supported Platforms:
|
|
6623
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
6624
|
-
|
|
6625
|
-
Examples:
|
|
6626
|
-
>>> import mindspore
|
|
6627
|
-
>>> import numpy as np
|
|
6628
|
-
>>> from mindspore import Tensor, ops
|
|
6629
|
-
>>> input = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
|
|
6630
|
-
>>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)
|
|
6631
|
-
>>> output = ops.MaskedFill()(input, mask, 0.5)
|
|
6632
|
-
>>> print(output)
|
|
6633
|
-
[0.5 0.5 3. 0.5]
|
|
6634
|
-
"""
|
|
6635
|
-
|
|
6636
|
-
@prim_attr_register
|
|
6637
|
-
def __init__(self):
|
|
6638
|
-
self.init_prim_io_names(inputs=['input', 'mask', 'value'], outputs=['output'])
|
|
6639
|
-
|
|
6640
|
-
|
|
6641
4232
|
class MaskedScatter(Primitive):
|
|
6642
4233
|
"""
|
|
6643
4234
|
Updates the value in the input with value in `updates` according to the `mask`.
|
|
@@ -6684,107 +4275,6 @@ class MaskedScatter(Primitive):
|
|
|
6684
4275
|
self.init_prim_io_names(inputs=['x', 'mask', 'updates'], outputs=['y'])
|
|
6685
4276
|
|
|
6686
4277
|
|
|
6687
|
-
class MaskedSelect(PrimitiveWithCheck):
|
|
6688
|
-
"""
|
|
6689
|
-
Returns a new 1-D Tensor which indexes the `x` tensor according to the boolean `mask`.
|
|
6690
|
-
The shapes of the `mask` tensor and the `x` tensor don't need to match, but they must be broadcastable.
|
|
6691
|
-
|
|
6692
|
-
Inputs:
|
|
6693
|
-
- **x** (Tensor) - Input Tensor of any dimension.
|
|
6694
|
-
- **mask** (Tensor[bool]) - Boolean mask Tensor, has the same shape as `x`.
|
|
6695
|
-
|
|
6696
|
-
Outputs:
|
|
6697
|
-
A 1-D Tensor, with the same type as x.
|
|
6698
|
-
|
|
6699
|
-
Raises:
|
|
6700
|
-
TypeError: If `x` or `mask` is not a Tensor.
|
|
6701
|
-
TypeError: If dtype of `mask` is not bool.
|
|
6702
|
-
|
|
6703
|
-
Supported Platforms:
|
|
6704
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
6705
|
-
|
|
6706
|
-
Examples:
|
|
6707
|
-
>>> import mindspore
|
|
6708
|
-
>>> import numpy as np
|
|
6709
|
-
>>> from mindspore import Tensor, ops
|
|
6710
|
-
>>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int32)
|
|
6711
|
-
>>> mask = Tensor(np.array([1, 0, 1, 0]), mindspore.bool_)
|
|
6712
|
-
>>> output = ops.MaskedSelect()(x, mask)
|
|
6713
|
-
>>> print(output)
|
|
6714
|
-
[1 3]
|
|
6715
|
-
>>> x = Tensor(2.1, mindspore.float32)
|
|
6716
|
-
>>> mask = Tensor(True, mindspore.bool_)
|
|
6717
|
-
>>> output = ops.MaskedSelect()(x, mask)
|
|
6718
|
-
>>> print(output)
|
|
6719
|
-
[2.1]
|
|
6720
|
-
"""
|
|
6721
|
-
|
|
6722
|
-
@prim_attr_register
|
|
6723
|
-
def __init__(self):
|
|
6724
|
-
self.init_prim_io_names(inputs=['x', 'mask'], outputs=['output'])
|
|
6725
|
-
|
|
6726
|
-
def check_shape(self, x_shape, mask_shape):
|
|
6727
|
-
get_broadcast_shape(x_shape, mask_shape, self.name, arg_name1="x", arg_name2="mask")
|
|
6728
|
-
|
|
6729
|
-
def check_dtype(self, x_dtype, mask_dtype):
|
|
6730
|
-
validator.check_tensor_dtype_valid('mask', mask_dtype, [mstype.bool_], self.name)
|
|
6731
|
-
validator.check_tensor_dtype_valid('x', x_dtype, (mstype.bool_,) + mstype.number_type, self.name)
|
|
6732
|
-
|
|
6733
|
-
|
|
6734
|
-
class SearchSorted(Primitive):
|
|
6735
|
-
"""
|
|
6736
|
-
Returns the indices correspond to the positions where the given numbers in `values` should be inserted
|
|
6737
|
-
into `sorted_sequence` so that the order of the sequence is maintained.
|
|
6738
|
-
|
|
6739
|
-
.. warning::
|
|
6740
|
-
This is an experimental API that is subject to change or deletion.
|
|
6741
|
-
|
|
6742
|
-
Refer to :func:`mindspore.ops.searchsorted` for more details.
|
|
6743
|
-
|
|
6744
|
-
Args:
|
|
6745
|
-
dtype (:class:`mindspore.dtype`, optional): Output data type. An optional data type of
|
|
6746
|
-
``mstype.int32`` and ``mstype.int64``. Default: ``mstype.int64``.
|
|
6747
|
-
right (bool, optional): Search Strategy. If ``True`` , return the last suitable index found;
|
|
6748
|
-
if ``False`` , return the first such index. Default: ``False`` .
|
|
6749
|
-
|
|
6750
|
-
Inputs:
|
|
6751
|
-
- **sorted_sequence** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R-1, x_R)` or `(x_1)`.
|
|
6752
|
-
It must contain a monotonically increasing sequence on the innermost dimension.
|
|
6753
|
-
- **values** (Tensor) - The value that should be inserted.
|
|
6754
|
-
The shape of tensor is :math:`(x_1, x_2, ..., x_R-1, x_S)`.
|
|
6755
|
-
|
|
6756
|
-
Outputs:
|
|
6757
|
-
Tensor containing the indices from the innermost dimension of `sorted_sequence` such that,
|
|
6758
|
-
if insert the corresponding value in the `values` tensor, the order of `sorted_sequence` would be preserved,
|
|
6759
|
-
whose datatype is int32 if out_int32 is True, otherwise int64, and shape is the same as the shape of `values`.
|
|
6760
|
-
|
|
6761
|
-
Supported Platforms:
|
|
6762
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
6763
|
-
|
|
6764
|
-
Examples:
|
|
6765
|
-
>>> import mindspore
|
|
6766
|
-
>>> import numpy as np
|
|
6767
|
-
>>> from mindspore import Tensor, ops
|
|
6768
|
-
>>> sorted_sequence = Tensor(np.array([[0, 1, 3, 5, 7], [2, 4, 6, 8, 10]]), mindspore.float32)
|
|
6769
|
-
>>> values = Tensor(np.array([[3, 6, 9], [3, 6, 9]]), mindspore.float32)
|
|
6770
|
-
>>> output = ops.SearchSorted()(sorted_sequence, values)
|
|
6771
|
-
>>> print(output)
|
|
6772
|
-
[[2 4 5]
|
|
6773
|
-
[1 2 4]]
|
|
6774
|
-
"""
|
|
6775
|
-
|
|
6776
|
-
@prim_attr_register
|
|
6777
|
-
def __init__(self, dtype=mstype.int64, right=False):
|
|
6778
|
-
"""Initialize SearchSorted"""
|
|
6779
|
-
validator.check_value_type("dtype", dtype, [mstype.Type], self.name)
|
|
6780
|
-
valid_values = (mstype.int64, mstype.int32)
|
|
6781
|
-
self.dtype = validator.check_type_name(
|
|
6782
|
-
"dtype", dtype, valid_values, self.name)
|
|
6783
|
-
validator.check_value_type('right', right, [bool], self.name)
|
|
6784
|
-
self.init_prim_io_names(
|
|
6785
|
-
inputs=['sorted_sequence', 'values'], outputs=['output'])
|
|
6786
|
-
|
|
6787
|
-
|
|
6788
4278
|
class _TensorScatterOp(PrimitiveWithInfer):
|
|
6789
4279
|
"""
|
|
6790
4280
|
Defines TensorScatter Base Operators
|
|
@@ -6889,43 +4379,15 @@ class TensorScatterUpdate(_TensorScatterOp):
|
|
|
6889
4379
|
def __init__(self):
|
|
6890
4380
|
self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])
|
|
6891
4381
|
|
|
6892
|
-
def _infer_specified_value(self, input_x_value, indices_value, updates_value):
|
|
6893
|
-
"""Calculate min/max value for output of TensorScatterUpdate op"""
|
|
6894
|
-
if isinstance(input_x_value, tuple):
|
|
6895
|
-
input_x_value = list(input_x_value)
|
|
6896
|
-
if isinstance(input_x_value, (Tensor, Tensor_)):
|
|
6897
|
-
input_x_value = input_x_value.asnumpy()
|
|
6898
|
-
if indices_value is None or updates_value is None:
|
|
6899
|
-
return None
|
|
6900
|
-
if isinstance(indices_value, (Tensor, Tensor_)):
|
|
6901
|
-
indices_value = indices_value.asnumpy()
|
|
6902
|
-
if isinstance(updates_value, (Tensor, Tensor_)):
|
|
6903
|
-
updates_value = updates_value.asnumpy()
|
|
6904
|
-
input_x = np.array(input_x_value)
|
|
6905
|
-
updates = np.array(updates_value)
|
|
6906
|
-
for i, indice in enumerate(indices_value):
|
|
6907
|
-
input_x[indice] = updates[i]
|
|
6908
|
-
output = tuple(input_x.tolist())
|
|
6909
|
-
return output
|
|
6910
|
-
|
|
6911
|
-
def _infer_min_value(self, input_x_value, indices_value, updates_value):
|
|
6912
|
-
return self._infer_specified_value(input_x_value, indices_value, updates_value)
|
|
6913
|
-
|
|
6914
|
-
def _infer_max_value(self, input_x_value, indices_value, updates_value):
|
|
6915
|
-
return self._infer_specified_value(input_x_value, indices_value, updates_value)
|
|
6916
|
-
|
|
6917
4382
|
def infer_dtype(self, input_x_dtype, indices_dtype, updates_dtype):
|
|
6918
4383
|
validator.check_tensor_dtype_valid('indices', indices_dtype, [mstype.int32, mstype.int64], self.name)
|
|
6919
4384
|
args = {"input_x": input_x_dtype, "updates": updates_dtype}
|
|
6920
4385
|
validator.check_tensors_dtypes_same_and_valid(args, (mstype.bool_,) + mstype.number_type, self.name)
|
|
6921
4386
|
return input_x_dtype
|
|
6922
4387
|
|
|
6923
|
-
def _infer_shape_value(self, input_x_value, indices_value, updates_value):
|
|
6924
|
-
return self._infer_specified_value(input_x_value, indices_value, updates_value)
|
|
6925
|
-
|
|
6926
4388
|
|
|
6927
4389
|
class TensorScatterMax(Primitive):
|
|
6928
|
-
"""
|
|
4390
|
+
r"""
|
|
6929
4391
|
By comparing the value at the position indicated by `indices` in `x` with the value in the `updates`,
|
|
6930
4392
|
the value at the index will eventually be equal to the largest one to create a new tensor.
|
|
6931
4393
|
|
|
@@ -6936,7 +4398,7 @@ class TensorScatterMax(Primitive):
|
|
|
6936
4398
|
- **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
|
|
6937
4399
|
The rank must be at least 2.
|
|
6938
4400
|
- **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,
|
|
6939
|
-
and updates.shape should be equal to indices.shape[:-1] +
|
|
4401
|
+
and updates.shape should be equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
|
|
6940
4402
|
|
|
6941
4403
|
Outputs:
|
|
6942
4404
|
Tensor, has the same shape and type as `input_x`.
|
|
@@ -6973,7 +4435,7 @@ class TensorScatterMax(Primitive):
|
|
|
6973
4435
|
|
|
6974
4436
|
|
|
6975
4437
|
class TensorScatterMin(Primitive):
|
|
6976
|
-
"""
|
|
4438
|
+
r"""
|
|
6977
4439
|
By comparing the value at the position indicated by `indices` in `input_x` with the value in the `updates`,
|
|
6978
4440
|
the value at the index will eventually be equal to the smallest one to create a new tensor.
|
|
6979
4441
|
|
|
@@ -6984,7 +4446,7 @@ class TensorScatterMin(Primitive):
|
|
|
6984
4446
|
- **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
|
|
6985
4447
|
The rank must be at least 2.
|
|
6986
4448
|
- **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,
|
|
6987
|
-
and updates.shape should be equal to indices.shape[:-1] +
|
|
4449
|
+
and updates.shape should be equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
|
|
6988
4450
|
|
|
6989
4451
|
Outputs:
|
|
6990
4452
|
Tensor, has the same shape and type as `input_x`.
|
|
@@ -7029,7 +4491,7 @@ class TensorScatterSub(Primitive):
|
|
|
7029
4491
|
instead of input `Parameter`.
|
|
7030
4492
|
|
|
7031
4493
|
.. math::
|
|
7032
|
-
output[indices] = input\_x
|
|
4494
|
+
output\left [indices \right ] = input\_x- update
|
|
7033
4495
|
|
|
7034
4496
|
Refer to :func:`mindspore.ops.tensor_scatter_sub` for more details.
|
|
7035
4497
|
|
|
@@ -7133,7 +4595,7 @@ class TensorScatterMul(_TensorScatterOp):
|
|
|
7133
4595
|
The updates are applied on output `Tensor` instead of input `Parameter`.
|
|
7134
4596
|
|
|
7135
4597
|
.. math::
|
|
7136
|
-
output[indices] = input\_x
|
|
4598
|
+
output\left [indices \right ] = input\_x\times update
|
|
7137
4599
|
|
|
7138
4600
|
Refer to :func:`mindspore.ops.tensor_scatter_mul` for more details.
|
|
7139
4601
|
|
|
@@ -7142,7 +4604,7 @@ class TensorScatterMul(_TensorScatterOp):
|
|
|
7142
4604
|
- **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
|
|
7143
4605
|
The rank must be at least 2.
|
|
7144
4606
|
- **updates** (Tensor) - The tensor to update the input tensor, has the same type as `input_x`,
|
|
7145
|
-
and the shape of `updates` should be equal to indices.shape[:-1] +
|
|
4607
|
+
and the shape of `updates` should be equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
|
|
7146
4608
|
|
|
7147
4609
|
Outputs:
|
|
7148
4610
|
Tensor, has the same shape and type as `input_x`.
|
|
@@ -7179,7 +4641,7 @@ class TensorScatterMul(_TensorScatterOp):
|
|
|
7179
4641
|
|
|
7180
4642
|
|
|
7181
4643
|
class TensorScatterDiv(_TensorScatterOp):
|
|
7182
|
-
"""
|
|
4644
|
+
r"""
|
|
7183
4645
|
Creates a new tensor by dividing the values from the positions in `input_x` indicated by
|
|
7184
4646
|
`indices`, with values from `updates`. When divided values are provided for the same
|
|
7185
4647
|
index, the result of the update will be to divided these values respectively. Except that
|
|
@@ -7192,7 +4654,7 @@ class TensorScatterDiv(_TensorScatterOp):
|
|
|
7192
4654
|
- **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
|
|
7193
4655
|
The rank must be at least 2.
|
|
7194
4656
|
- **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,
|
|
7195
|
-
and updates.shape should be equal to indices.shape[:-1] +
|
|
4657
|
+
and updates.shape should be equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
|
|
7196
4658
|
|
|
7197
4659
|
Outputs:
|
|
7198
4660
|
Tensor, has the same shape and type as `input_x`.
|
|
@@ -7372,7 +4834,7 @@ class SplitV(Primitive):
|
|
|
7372
4834
|
self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
|
|
7373
4835
|
|
|
7374
4836
|
|
|
7375
|
-
class TensorScatterElements(
|
|
4837
|
+
class TensorScatterElements(TensorScatterElementsExt):
|
|
7376
4838
|
"""
|
|
7377
4839
|
Write all elements in `updates` to the index specified by `indices` in `input_x` according to the reduction
|
|
7378
4840
|
operation specified by `reduction`.
|
|
@@ -7387,6 +4849,9 @@ class TensorScatterElements(Primitive):
|
|
|
7387
4849
|
.. warning::
|
|
7388
4850
|
This is an experimental API that is subject to change or deletion.
|
|
7389
4851
|
|
|
4852
|
+
Note:
|
|
4853
|
+
The backward is supported only for the case `updates.shape == indices.shape`.
|
|
4854
|
+
|
|
7390
4855
|
Args:
|
|
7391
4856
|
axis (int, optional): Specify which axis to do scatter operation. Default: ``0`` .
|
|
7392
4857
|
reduction (str, optional): Which reduction operation to scatter, default is ``"none"`` . Other option: "add".
|
|
@@ -7396,9 +4861,7 @@ class TensorScatterElements(Primitive):
|
|
|
7396
4861
|
- **indices** (Tensor) - The index of `input_x` to do scatter operation whose data type must be int32 or
|
|
7397
4862
|
int64. It has the same rank as `data`. And accepted range is [-s, s) where s is the size along axis.
|
|
7398
4863
|
- **updates** (Tensor) - The tensor doing the scatter operation with `data`,
|
|
7399
|
-
it has the same
|
|
7400
|
-
- **update** (Tensor) - The tensor doing the scatter operation with `data`,
|
|
7401
|
-
it has the same type as `data` and the same shape as `indices`.
|
|
4864
|
+
it has the same type as `data`.
|
|
7402
4865
|
|
|
7403
4866
|
Outputs:
|
|
7404
4867
|
Tensor, has the same shape and type as `data`.
|
|
@@ -7408,7 +4871,7 @@ class TensorScatterElements(Primitive):
|
|
|
7408
4871
|
|
|
7409
4872
|
Examples:
|
|
7410
4873
|
>>> import mindspore
|
|
7411
|
-
>>>
|
|
4874
|
+
>>> from mindspore import ops
|
|
7412
4875
|
>>> from mindspore import Tensor
|
|
7413
4876
|
>>> op = ops.TensorScatterElements(0, "none")
|
|
7414
4877
|
>>> data = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
|
|
@@ -7420,7 +4883,7 @@ class TensorScatterElements(Primitive):
|
|
|
7420
4883
|
[ 0.0 5.0 0.0]
|
|
7421
4884
|
[ 7.0 0.0 0.0]]
|
|
7422
4885
|
>>> import mindspore as ms
|
|
7423
|
-
>>>
|
|
4886
|
+
>>> from mindspore import ops
|
|
7424
4887
|
>>> from mindspore import Tensor
|
|
7425
4888
|
>>> op = ops.TensorScatterElements(1, "add")
|
|
7426
4889
|
>>> data = Tensor(np.array([[1, 2, 3, 4, 5]]), mindspore.float32)
|
|
@@ -7433,77 +4896,17 @@ class TensorScatterElements(Primitive):
|
|
|
7433
4896
|
|
|
7434
4897
|
@prim_attr_register
|
|
7435
4898
|
def __init__(self, axis=0, reduction="none"):
|
|
7436
|
-
|
|
7437
|
-
validator.check_value_type("axis", axis, [int], self.name)
|
|
7438
|
-
validator.check_value_type("reduction", reduction, [str], self.name)
|
|
7439
|
-
validator.check_string(reduction, ["none", "add"], "reduction", self.name)
|
|
7440
|
-
self.init_prim_io_names(inputs=['data', 'indices', 'updates'], outputs=['y'])
|
|
7441
|
-
target = context.get_context("device_target")
|
|
7442
|
-
if reduction != 'none' and target.lower() == "ascend":
|
|
7443
|
-
raise ValueError(f"For '{self.name}', "
|
|
7444
|
-
f"Currently Ascend device_target only support `reduction`='none', "
|
|
7445
|
-
f"but got {reduction}")
|
|
4899
|
+
super().__init__(axis, reduce=reduction)
|
|
7446
4900
|
|
|
7447
4901
|
|
|
7448
4902
|
class ExtractVolumePatches(Primitive):
|
|
7449
|
-
|
|
7450
|
-
|
|
7451
|
-
"depth" dimension is the second dim of output.
|
|
7452
|
-
|
|
7453
|
-
.. warning::
|
|
7454
|
-
This is an experimental API that is subject to change or deletion.
|
|
7455
|
-
|
|
7456
|
-
Args:
|
|
7457
|
-
kernel_size (Union[int, tuple[int], list[int]]): A list of ints which's length is 3 or 5.
|
|
7458
|
-
The size of the sliding window for each dimension of input. Must be: :math:`[1, 1, k_d, k_h, k_w]` or
|
|
7459
|
-
:math:`[k_d, k_h, k_w]`. If :math:`k_d = k_h = k_w`, you can enter an integer.
|
|
7460
|
-
strides (Union[int, tuple[int], list[int]]): A list of ints which's length is 3 or 5.
|
|
7461
|
-
How far the centers of two consecutive patches are in input. Must be: :math:`[1, 1, s_d, s_h, s_w]` or
|
|
7462
|
-
:math:`[s_d, s_h, s_w]`. If :math:`s_d = s_h = s_w`, you can enter an integer.
|
|
7463
|
-
padding (str): A string from: ``"SAME"`` , ``"VALID"`` . The type of padding algorithm to use.
|
|
7464
|
-
|
|
7465
|
-
Inputs:
|
|
7466
|
-
- **input_x** (Tensor) - A Tensor. 5-D Tensor with shape :math:`(x_n, x_c, x_d, x_h, x_w)`.
|
|
7467
|
-
|
|
7468
|
-
Outputs:
|
|
7469
|
-
Tensor, has the same type as input.
|
|
7470
|
-
If padding is "VALID", the shape is :math:`(x_n, k_d * k_h * k_w * x_c, 1 + (x_d - k_d) / s_d,
|
|
7471
|
-
1 + (x_h - k_h) / s_h, 1 + (x_w - k_w) / s_w)`; if padding is "SAME", the shape is :math:`(
|
|
7472
|
-
x_n, k_d * k_h * k_w * x_c, (x_d + s_d - 1) / s_d, (x_h + s_h - 1) / s_h, (x_w + s_w - 1) / s_w)`.
|
|
7473
|
-
|
|
7474
|
-
Raises:
|
|
7475
|
-
TypeError: If kernel_size or strides is not a list, a tuple or an int.
|
|
7476
|
-
TypeError: If input_x is not a tensor.
|
|
7477
|
-
TypeError: If padding is not str.
|
|
7478
|
-
ValueError: If the length of kernel_size is neither 3 nor 5 and kernel_size is not an integer.
|
|
7479
|
-
ValueError: If the length of strides is neither 3 nor 5 and strides is not an integer.
|
|
7480
|
-
ValueError: If padding is neither ``"VALID"`` nor ``"SAME"`` .
|
|
7481
|
-
ValueError: If elements of kernel_size or strides are not positive integer.
|
|
7482
|
-
ValueError: If input_x is not a tensor in dimension 5.
|
|
7483
|
-
ValueError: If input_x's shape has zero.
|
|
7484
|
-
ValueError: If one of kernel_size or strides' first two numbers is not 1.
|
|
7485
|
-
ValueError: If padding = "VALID" and :math:`input\_x - kernel\_size` is less than 0 in d, h or w dimension.
|
|
7486
|
-
ValueError: If padding = "SAME" and :math:`padding\_needed = ((input\_x + strides - 1) / strides - 1) *
|
|
7487
|
-
strides + kernel\_size - input\_x` is less than 0 in d, h or w dimension.
|
|
7488
|
-
ValueError: If x_h is not 1 or x_w is not 1 and :math:`x_w + padding\_needed - k_w - s_w` is less than 0.
|
|
7489
|
-
ValueError: If :math:`x_d * x_h * x_w` is greater than 2048.
|
|
4903
|
+
"""
|
|
4904
|
+
`ops.ExtractVolumePatches` is deprecated from version 2.3 and will be removed in a future version.
|
|
7490
4905
|
|
|
7491
4906
|
Supported Platforms:
|
|
7492
|
-
|
|
7493
|
-
|
|
7494
|
-
Examples:
|
|
7495
|
-
>>> import numpy as np
|
|
7496
|
-
>>> from mindspore import Tensor, ops
|
|
7497
|
-
>>> from mindspore import dtype as mstype
|
|
7498
|
-
>>> kernel_size = (1, 1, 2, 2, 2)
|
|
7499
|
-
>>> strides = (1, 1, 1, 1, 1)
|
|
7500
|
-
>>> padding = "VALID"
|
|
7501
|
-
>>> input_x = ops.Reshape()(Tensor(np.arange(1, 28), mstype.float16), (1, 1, 3, 3, 3))
|
|
7502
|
-
>>> output_y = ops.ExtractVolumePatches(kernel_size, strides, padding)(input_x)
|
|
7503
|
-
>>> print(output_y.shape)
|
|
7504
|
-
(1, 8, 2, 2, 2)
|
|
4907
|
+
Deprecated
|
|
7505
4908
|
"""
|
|
7506
|
-
|
|
4909
|
+
@deprecated("2.3", "ops.ExtractVolumePatches", False)
|
|
7507
4910
|
@prim_attr_register
|
|
7508
4911
|
def __init__(self, kernel_size, strides, padding):
|
|
7509
4912
|
validator.check_value_type("kernel_size", kernel_size, (int, list, tuple), self.name)
|
|
@@ -7678,7 +5081,7 @@ class LowerBound(Primitive):
|
|
|
7678
5081
|
>>> import mindspore
|
|
7679
5082
|
>>> import numpy as np
|
|
7680
5083
|
>>> from mindspore import Tensor
|
|
7681
|
-
>>>
|
|
5084
|
+
>>> from mindspore import ops
|
|
7682
5085
|
>>> lowerbound = ops.LowerBound(out_type = mindspore.int32)
|
|
7683
5086
|
>>> sorted_x = Tensor(np.arange(12).reshape(3, 4).astype(np.int8))
|
|
7684
5087
|
>>> values = Tensor(np.array([[3], [4], [8]]).astype(np.int8))
|
|
@@ -7731,7 +5134,7 @@ class UpperBound(Primitive):
|
|
|
7731
5134
|
>>> import mindspore
|
|
7732
5135
|
>>> import numpy as np
|
|
7733
5136
|
>>> from mindspore import Tensor
|
|
7734
|
-
>>>
|
|
5137
|
+
>>> from mindspore import ops
|
|
7735
5138
|
>>> upperbound = ops.UpperBound(out_type = mindspore.int32)
|
|
7736
5139
|
>>> sorted_x = Tensor(np.arange(12).reshape(3, 4).astype(np.int8))
|
|
7737
5140
|
>>> values = Tensor(np.array([[3], [6], [9]]).astype(np.int8))
|
|
@@ -7750,100 +5153,6 @@ class UpperBound(Primitive):
|
|
|
7750
5153
|
self.init_prim_io_names(inputs=['sorted_x', 'values'], outputs=['y'])
|
|
7751
5154
|
|
|
7752
5155
|
|
|
7753
|
-
class Cummax(Primitive):
|
|
7754
|
-
"""
|
|
7755
|
-
Returns the cumulative maximum of elements and the index.
|
|
7756
|
-
|
|
7757
|
-
Refer to :func:`mindspore.ops.cummax` for more details.
|
|
7758
|
-
|
|
7759
|
-
Args:
|
|
7760
|
-
axis (int): The axis to accumulate the tensor's value. Must be in the range [-rank(input), rank(input)).
|
|
7761
|
-
|
|
7762
|
-
Inputs:
|
|
7763
|
-
- **input** (Tensor) - The input tensor.
|
|
7764
|
-
|
|
7765
|
-
Outputs:
|
|
7766
|
-
A tuple of 2 Tensors(values, indices), containing the cumulative maximum of elements and the index,
|
|
7767
|
-
The shape of each output tensor is the same as input `input`.
|
|
7768
|
-
|
|
7769
|
-
Supported Platforms:
|
|
7770
|
-
``GPU`` ``CPU``
|
|
7771
|
-
|
|
7772
|
-
Examples:
|
|
7773
|
-
>>> import mindspore
|
|
7774
|
-
>>> import numpy as np
|
|
7775
|
-
>>> from mindspore import Tensor
|
|
7776
|
-
>>> import mindspore.ops as ops
|
|
7777
|
-
>>> cummax = ops.Cummax(axis=0)
|
|
7778
|
-
>>> x = Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))
|
|
7779
|
-
>>> output = cummax(x)
|
|
7780
|
-
>>> print(output[0])
|
|
7781
|
-
[[ 3. 4. 6. 10.]
|
|
7782
|
-
[ 3. 6. 7. 10.]
|
|
7783
|
-
[ 4. 6. 8. 10.]
|
|
7784
|
-
[ 4. 6. 8. 10.]]
|
|
7785
|
-
>>> print(output[1])
|
|
7786
|
-
[[0 0 0 0]
|
|
7787
|
-
[0 1 1 0]
|
|
7788
|
-
[2 1 2 0]
|
|
7789
|
-
[2 1 2 0]]
|
|
7790
|
-
"""
|
|
7791
|
-
|
|
7792
|
-
@prim_attr_register
|
|
7793
|
-
def __init__(self, axis):
|
|
7794
|
-
"""Initialize Cummax"""
|
|
7795
|
-
validator.check_value_type("axis", axis, [int], self.name)
|
|
7796
|
-
self.init_prim_io_names(inputs=['x'], outputs=['y', 'indices'])
|
|
7797
|
-
self.add_prim_attr("dim", axis)
|
|
7798
|
-
|
|
7799
|
-
|
|
7800
|
-
class RightShift(Primitive):
|
|
7801
|
-
r"""
|
|
7802
|
-
Shift the value of each position of Tensor `input_x` to the right by corresponding bits in Tensor `input_y`.
|
|
7803
|
-
The inputs are two tensors, dtypes of them must be consistent, and the
|
|
7804
|
-
shapes of them could be broadcast.
|
|
7805
|
-
|
|
7806
|
-
.. math::
|
|
7807
|
-
|
|
7808
|
-
\begin{aligned}
|
|
7809
|
-
&out_{i} =x_{i} >> y_{i}
|
|
7810
|
-
\end{aligned}
|
|
7811
|
-
|
|
7812
|
-
.. warning::
|
|
7813
|
-
This is an experimental API that is subject to change or deletion.
|
|
7814
|
-
|
|
7815
|
-
Inputs:
|
|
7816
|
-
- **input_x** (Tensor) - The target tensor, will be shifted to the right
|
|
7817
|
-
by `input_y` bits element-wise. Support all int and uint types.
|
|
7818
|
-
- **input_y** (Tensor) - Number of bits shifted, the tensor must have the same type as `input_x`.
|
|
7819
|
-
|
|
7820
|
-
Outputs:
|
|
7821
|
-
- **output** (Tensor) - The output tensor, has the same type as `input_x`.
|
|
7822
|
-
|
|
7823
|
-
Raises:
|
|
7824
|
-
TypeError: If `input_x` or `input_y` is not tensor.
|
|
7825
|
-
TypeError: If `input_x` and `input_y` could not be broadcast.
|
|
7826
|
-
|
|
7827
|
-
Supported Platforms:
|
|
7828
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
7829
|
-
|
|
7830
|
-
Examples:
|
|
7831
|
-
>>> import numpy as np
|
|
7832
|
-
>>> from mindspore import Tensor, ops
|
|
7833
|
-
>>> rightshift = ops.RightShift()
|
|
7834
|
-
>>> input_x = Tensor(np.array([1, 2, 3]).astype(np.uint8))
|
|
7835
|
-
>>> input_y = Tensor(np.array([1, 1, 1]).astype(np.uint8))
|
|
7836
|
-
>>> output = rightshift(input_x, input_y)
|
|
7837
|
-
>>> print(output)
|
|
7838
|
-
[0 1 1]
|
|
7839
|
-
"""
|
|
7840
|
-
|
|
7841
|
-
@prim_attr_register
|
|
7842
|
-
def __init__(self):
|
|
7843
|
-
"""Initialize RightShift."""
|
|
7844
|
-
self.init_prim_io_names(inputs=['input_x', 'input_y'], outputs=['output'])
|
|
7845
|
-
|
|
7846
|
-
|
|
7847
5156
|
class LogSpace(Primitive):
|
|
7848
5157
|
r"""
|
|
7849
5158
|
Generates a 1-D Tensor with a length of steps. The tensor's
|
|
@@ -7911,46 +5220,6 @@ class LogSpace(Primitive):
|
|
|
7911
5220
|
self.init_prim_io_names(inputs=['start', 'end'], outputs=['y'])
|
|
7912
5221
|
|
|
7913
5222
|
|
|
7914
|
-
class NonZero(Primitive):
|
|
7915
|
-
"""
|
|
7916
|
-
Return a tensor of the positions of all non-zero values.
|
|
7917
|
-
|
|
7918
|
-
Refer to :func:`mindspore.ops.nonzero` for more details.
|
|
7919
|
-
|
|
7920
|
-
Inputs:
|
|
7921
|
-
- **x** (Tensor) - The input Tensor, its rank should be greater than or eaqual to 1.
|
|
7922
|
-
|
|
7923
|
-
Outputs:
|
|
7924
|
-
- **y** (Tensor), 2-D Tensor of data type int64.
|
|
7925
|
-
|
|
7926
|
-
Supported Platforms:
|
|
7927
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
7928
|
-
|
|
7929
|
-
Examples:
|
|
7930
|
-
>>> import mindspore
|
|
7931
|
-
>>> import numpy as np
|
|
7932
|
-
>>> from mindspore import Tensor
|
|
7933
|
-
>>> from mindspore.ops import NonZero
|
|
7934
|
-
>>> x = Tensor(np.array([[[1, 0], [-5, 0]]]), mindspore.int32)
|
|
7935
|
-
>>> nonzero = NonZero()
|
|
7936
|
-
>>> output = nonzero(x)
|
|
7937
|
-
>>> print(output)
|
|
7938
|
-
[[0 0 0]
|
|
7939
|
-
[0 1 0]]
|
|
7940
|
-
>>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
|
|
7941
|
-
>>> nonzero = NonZero()
|
|
7942
|
-
>>> output = nonzero(x)
|
|
7943
|
-
>>> print(output)
|
|
7944
|
-
[[0]
|
|
7945
|
-
[2]
|
|
7946
|
-
[4]]
|
|
7947
|
-
"""
|
|
7948
|
-
|
|
7949
|
-
@prim_attr_register
|
|
7950
|
-
def __init__(self):
|
|
7951
|
-
self.init_prim_io_names(inputs=['x'], outputs=['y'])
|
|
7952
|
-
|
|
7953
|
-
|
|
7954
5223
|
class Tril(Primitive):
|
|
7955
5224
|
"""
|
|
7956
5225
|
Returns the lower triangular portion of the 2-D matrix or the set of matrices
|
|
@@ -7963,7 +5232,7 @@ class Tril(Primitive):
|
|
|
7963
5232
|
|
|
7964
5233
|
Args:
|
|
7965
5234
|
diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: ``0`` ,
|
|
7966
|
-
indicating the main
|
|
5235
|
+
indicating the main diagonal.
|
|
7967
5236
|
|
|
7968
5237
|
Inputs:
|
|
7969
5238
|
- **x** (Tensor) - The input tensor with shape :math:`(M, N, *)`
|
|
@@ -8713,7 +5982,7 @@ class TopK(Primitive):
|
|
|
8713
5982
|
|
|
8714
5983
|
.. math::
|
|
8715
5984
|
|
|
8716
|
-
values.shape = indices.shape = input.shape[:-1] + [k]
|
|
5985
|
+
values.shape = indices.shape = input.shape[:-1] + [k]
|
|
8717
5986
|
|
|
8718
5987
|
If the two compared elements are the same, the one with the smaller index value is returned first.
|
|
8719
5988
|
|