mindspore 1.10.0__cp37-none-any.whl → 2.0.0rc1__cp37-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +9064 -0
- mindspore/__init__.py +9 -4
- mindspore/_akg/akg/composite/build_module.py +11 -0
- mindspore/_akg/akg/config/repository_cuda.json +11 -0
- mindspore/_akg/akg/tvm/contrib/nvcc.py +4 -3
- mindspore/_c_dataengine.cpython-37m-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-37m-aarch64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-37m-aarch64-linux-gnu.so +0 -0
- mindspore/_check_jit_forbidden_api.py +102 -0
- mindspore/_checkparam.py +1066 -1001
- mindspore/_extends/builtin_operations.py +32 -4
- mindspore/_extends/graph_kernel/model/graph_split.py +66 -222
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +12 -9
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +119 -26
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -50
- mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -6
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -25
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -27
- mindspore/_extends/parse/__init__.py +5 -3
- mindspore/_extends/parse/namespace.py +17 -2
- mindspore/_extends/parse/parser.py +193 -34
- mindspore/_extends/parse/resources.py +7 -8
- mindspore/_extends/parse/standard_method.py +1780 -435
- mindspore/_extends/parse/trope.py +3 -1
- mindspore/_mindspore_offline_debug.cpython-37m-aarch64-linux-gnu.so +0 -0
- mindspore/amp.py +53 -58
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/adasum.py +3 -2
- mindspore/boost/boost.py +2 -2
- mindspore/boost/boost_cell_wrapper.py +46 -26
- mindspore/boost/dim_reduce.py +6 -5
- mindspore/boost/grad_accumulation.py +2 -1
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +11 -10
- mindspore/common/_decorator.py +2 -0
- mindspore/common/_register_for_adapter.py +55 -0
- mindspore/common/_stub_tensor.py +201 -0
- mindspore/common/_utils.py +57 -0
- mindspore/common/api.py +582 -297
- mindspore/common/dtype.py +66 -18
- mindspore/common/dump.py +2 -2
- mindspore/common/initializer.py +38 -1
- mindspore/common/jit_config.py +25 -13
- mindspore/common/mutable.py +53 -24
- mindspore/common/parameter.py +60 -37
- mindspore/common/seed.py +8 -24
- mindspore/common/sparse_tensor.py +927 -0
- mindspore/common/tensor.py +1627 -3900
- mindspore/communication/__init__.py +10 -5
- mindspore/communication/_comm_helper.py +78 -214
- mindspore/communication/_hccl_management.py +2 -1
- mindspore/communication/management.py +136 -47
- mindspore/config/op_info.config +501 -1008
- mindspore/config/super_bar_config.json +512 -0
- mindspore/context.py +291 -56
- mindspore/dataset/__init__.py +12 -8
- mindspore/dataset/audio/__init__.py +9 -9
- mindspore/dataset/audio/transforms.py +1090 -228
- mindspore/dataset/audio/utils.py +87 -39
- mindspore/dataset/audio/validators.py +223 -1
- mindspore/dataset/callback/ds_callback.py +17 -15
- mindspore/dataset/core/config.py +246 -17
- mindspore/dataset/core/py_util_helpers.py +4 -3
- mindspore/dataset/core/validator_helpers.py +10 -10
- mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
- mindspore/dataset/debug/debug_hook.py +65 -0
- mindspore/dataset/debug/pre_defined_hook.py +67 -0
- mindspore/dataset/engine/__init__.py +7 -3
- mindspore/dataset/engine/cache_client.py +9 -9
- mindspore/dataset/engine/datasets.py +648 -477
- mindspore/dataset/engine/datasets_audio.py +165 -167
- mindspore/dataset/engine/datasets_standard_format.py +93 -67
- mindspore/dataset/engine/datasets_text.py +492 -342
- mindspore/dataset/engine/datasets_user_defined.py +85 -50
- mindspore/dataset/engine/datasets_vision.py +1224 -699
- mindspore/dataset/engine/graphdata.py +134 -69
- mindspore/dataset/engine/iterators.py +50 -9
- mindspore/dataset/engine/offload.py +52 -31
- mindspore/dataset/engine/samplers.py +27 -24
- mindspore/dataset/engine/serializer_deserializer.py +14 -15
- mindspore/dataset/engine/validators.py +213 -52
- mindspore/dataset/text/__init__.py +10 -8
- mindspore/dataset/text/transforms.py +152 -57
- mindspore/dataset/text/utils.py +98 -49
- mindspore/dataset/text/validators.py +25 -0
- mindspore/dataset/transforms/__init__.py +4 -2
- mindspore/dataset/transforms/c_transforms.py +11 -13
- mindspore/dataset/transforms/py_transforms.py +2 -2
- mindspore/dataset/transforms/py_transforms_util.py +10 -0
- mindspore/dataset/transforms/transforms.py +13 -15
- mindspore/dataset/transforms/validators.py +7 -7
- mindspore/dataset/utils/__init__.py +2 -1
- mindspore/dataset/utils/browse_dataset.py +13 -13
- mindspore/dataset/utils/line_reader.py +121 -0
- mindspore/dataset/vision/__init__.py +8 -7
- mindspore/dataset/vision/c_transforms.py +125 -126
- mindspore/dataset/vision/py_transforms.py +37 -37
- mindspore/dataset/vision/py_transforms_util.py +23 -20
- mindspore/dataset/vision/transforms.py +316 -315
- mindspore/dataset/vision/utils.py +313 -17
- mindspore/dataset/vision/validators.py +6 -6
- mindspore/default_config.py +0 -1
- mindspore/{compression → experimental}/__init__.py +6 -5
- mindspore/experimental/map_parameter.py +275 -0
- mindspore/include/OWNERS +0 -1
- mindspore/include/api/callback/callback.h +9 -13
- mindspore/include/api/callback/ckpt_saver.h +2 -2
- mindspore/include/api/callback/loss_monitor.h +2 -2
- mindspore/include/api/callback/lr_scheduler.h +5 -5
- mindspore/include/api/callback/time_monitor.h +2 -2
- mindspore/include/api/callback/train_accuracy.h +4 -6
- mindspore/include/api/cfg.h +19 -6
- mindspore/include/api/context.h +70 -9
- mindspore/include/api/delegate.h +8 -1
- mindspore/include/api/dual_abi_helper.h +8 -24
- mindspore/include/api/metrics/accuracy.h +2 -2
- mindspore/include/api/metrics/metrics.h +4 -3
- mindspore/include/api/model.h +9 -4
- mindspore/include/api/model_group.h +68 -0
- mindspore/include/api/model_parallel_runner.h +17 -17
- mindspore/include/api/net.h +12 -11
- mindspore/include/api/serialization.h +20 -4
- mindspore/include/api/status.h +7 -1
- mindspore/include/api/types.h +25 -21
- mindspore/include/api/visible.h +4 -0
- mindspore/include/c_api/model_c.h +5 -0
- mindspore/include/c_api/status_c.h +1 -1
- mindspore/include/dataset/config.h +1 -1
- mindspore/include/dataset/constants.h +14 -0
- mindspore/include/dataset/text.h +59 -0
- mindspore/include/dataset/vision.h +56 -117
- mindspore/include/dataset/vision_lite.h +102 -0
- mindspore/include/mindapi/base/type_id.h +42 -3
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libicudata.so.69 +0 -0
- mindspore/lib/libicui18n.so.69 +0 -0
- mindspore/lib/libicuuc.so.69 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/{libakg.so → plugin/cpu/libakg.so} +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/log.py +28 -28
- mindspore/mindrecord/common/exceptions.py +2 -4
- mindspore/mindrecord/filereader.py +19 -1
- mindspore/mindrecord/filewriter.py +250 -88
- mindspore/mindrecord/mindpage.py +13 -13
- mindspore/mindrecord/shardheader.py +15 -15
- mindspore/mindrecord/shardreader.py +9 -0
- mindspore/mindrecord/shardwriter.py +29 -29
- mindspore/mindrecord/tools/cifar100_to_mr.py +9 -9
- mindspore/mindrecord/tools/cifar10_to_mr.py +9 -9
- mindspore/mindrecord/tools/csv_to_mr.py +4 -4
- mindspore/mindrecord/tools/imagenet_to_mr.py +70 -65
- mindspore/mindrecord/tools/mnist_to_mr.py +41 -41
- mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
- mindspore/nn/__init__.py +1 -5
- mindspore/nn/cell.py +297 -234
- mindspore/nn/dynamic_lr.py +1 -1
- mindspore/nn/grad/cell_grad.py +17 -42
- mindspore/nn/layer/__init__.py +7 -4
- mindspore/nn/layer/activation.py +131 -88
- mindspore/nn/layer/basic.py +313 -613
- mindspore/nn/layer/channel_shuffle.py +103 -0
- mindspore/nn/layer/combined.py +1 -1
- mindspore/nn/layer/container.py +52 -6
- mindspore/nn/layer/conv.py +112 -43
- mindspore/nn/layer/dense.py +10 -9
- mindspore/nn/layer/embedding.py +36 -34
- mindspore/nn/layer/image.py +123 -27
- mindspore/nn/layer/math.py +108 -107
- mindspore/nn/layer/normalization.py +212 -366
- mindspore/nn/layer/padding.py +370 -42
- mindspore/nn/layer/pooling.py +1443 -219
- mindspore/nn/layer/rnn_cells.py +11 -16
- mindspore/nn/layer/rnns.py +38 -39
- mindspore/nn/layer/thor_layer.py +24 -25
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +701 -0
- mindspore/nn/learning_rate_schedule.py +8 -8
- mindspore/nn/loss/__init__.py +9 -6
- mindspore/nn/loss/loss.py +678 -142
- mindspore/nn/metrics.py +53 -0
- mindspore/nn/optim/_dist_optimizer_registry.py +2 -2
- mindspore/nn/optim/ada_grad.py +8 -8
- mindspore/nn/optim/adadelta.py +2 -3
- mindspore/nn/optim/adafactor.py +18 -14
- mindspore/nn/optim/adam.py +429 -87
- mindspore/nn/optim/adamax.py +5 -6
- mindspore/nn/optim/adasum.py +10 -8
- mindspore/nn/optim/asgd.py +7 -7
- mindspore/nn/optim/ftrl.py +81 -11
- mindspore/nn/optim/lamb.py +7 -8
- mindspore/nn/optim/lars.py +4 -4
- mindspore/nn/optim/lazyadam.py +82 -7
- mindspore/nn/optim/momentum.py +8 -7
- mindspore/nn/optim/optimizer.py +19 -10
- mindspore/nn/optim/proximal_ada_grad.py +6 -5
- mindspore/nn/optim/rmsprop.py +3 -3
- mindspore/nn/optim/rprop.py +20 -16
- mindspore/nn/optim/sgd.py +21 -15
- mindspore/nn/optim/thor.py +23 -21
- mindspore/nn/probability/__init__.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +7 -6
- mindspore/nn/probability/bijector/invert.py +4 -2
- mindspore/nn/probability/bijector/softplus.py +2 -2
- mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
- mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
- mindspore/nn/probability/distribution/__init__.py +6 -0
- mindspore/nn/probability/distribution/_utils/custom_ops.py +3 -2
- mindspore/nn/probability/distribution/_utils/utils.py +11 -17
- mindspore/nn/probability/distribution/bernoulli.py +6 -6
- mindspore/nn/probability/distribution/beta.py +1 -1
- mindspore/nn/probability/distribution/categorical.py +9 -9
- mindspore/nn/probability/distribution/cauchy.py +8 -8
- mindspore/nn/probability/distribution/distribution.py +12 -6
- mindspore/nn/probability/distribution/exponential.py +5 -5
- mindspore/nn/probability/distribution/gamma.py +3 -3
- mindspore/nn/probability/distribution/geometric.py +6 -5
- mindspore/nn/probability/distribution/gumbel.py +5 -5
- mindspore/nn/probability/distribution/half_normal.py +133 -0
- mindspore/nn/probability/distribution/laplace.py +128 -0
- mindspore/nn/probability/distribution/log_normal.py +0 -1
- mindspore/nn/probability/distribution/logistic.py +4 -5
- mindspore/nn/probability/distribution/normal.py +11 -15
- mindspore/nn/probability/distribution/poisson.py +6 -2
- mindspore/nn/probability/distribution/student_t.py +150 -0
- mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
- mindspore/nn/probability/distribution/uniform.py +5 -5
- mindspore/nn/reinforcement/_tensors_queue.py +3 -3
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +8 -1
- mindspore/nn/wrap/cell_wrapper.py +55 -27
- mindspore/nn/wrap/grad_reducer.py +20 -11
- mindspore/nn/wrap/loss_scale.py +47 -30
- mindspore/numpy/array_creations.py +33 -22
- mindspore/numpy/array_ops.py +46 -42
- mindspore/numpy/logic_ops.py +6 -27
- mindspore/numpy/math_ops.py +26 -19
- mindspore/numpy/utils.py +1 -8
- mindspore/numpy/utils_const.py +112 -62
- mindspore/ops/__init__.py +6 -3
- mindspore/ops/_constants.py +0 -6
- mindspore/ops/_grad/__init__.py +2 -1
- mindspore/ops/_grad/grad_array_ops.py +209 -152
- mindspore/ops/_grad/grad_base.py +55 -17
- mindspore/ops/_grad/grad_clip_ops.py +11 -3
- mindspore/ops/_grad/grad_comm_ops.py +58 -47
- mindspore/ops/_grad/grad_implementations.py +21 -61
- mindspore/ops/_grad/grad_inner_ops.py +48 -6
- mindspore/ops/_grad/grad_math_ops.py +306 -161
- mindspore/ops/_grad/grad_nn_ops.py +192 -181
- mindspore/ops/_grad/grad_other_ops.py +1 -1
- mindspore/ops/_grad/grad_quant_ops.py +5 -5
- mindspore/ops/_grad/grad_sequence_ops.py +296 -0
- mindspore/ops/_grad/grad_sparse.py +15 -9
- mindspore/ops/_grad_experimental/__init__.py +1 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +441 -55
- mindspore/ops/_grad_experimental/grad_image_ops.py +25 -7
- mindspore/ops/_grad_experimental/grad_inner_ops.py +3 -44
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +16 -21
- mindspore/ops/_grad_experimental/grad_math_ops.py +979 -49
- mindspore/ops/_grad_experimental/grad_nn_ops.py +78 -8
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +197 -13
- mindspore/ops/_op_impl/__init__.py +3 -3
- mindspore/ops/_op_impl/_custom_op/__init__.py +0 -1
- mindspore/ops/_op_impl/_custom_op/_basic.py +0 -1
- mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +4 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +5 -5
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
- mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/correction_mul.py +3 -3
- mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +4 -8
- mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_dds_grad_impl.py +0 -1
- mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -1
- mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +238 -3
- mindspore/ops/_op_impl/aicpu/abs.py +36 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d.py +34 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d.py +39 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d.py +42 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d_grad.py +152 -0
- mindspore/ops/_op_impl/aicpu/add.py +43 -0
- mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
- mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
- mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/arg_max.py +75 -0
- mindspore/ops/_op_impl/aicpu/arg_min.py +75 -0
- mindspore/ops/_op_impl/aicpu/argmin_with_value.py +43 -0
- mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -0
- mindspore/ops/_op_impl/aicpu/batch_norm_grad_grad.py +49 -0
- mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
- mindspore/ops/_op_impl/aicpu/bessel_i0.py +31 -0
- mindspore/ops/_op_impl/aicpu/bias_add.py +44 -0
- mindspore/ops/_op_impl/aicpu/bias_add_grad.py +43 -0
- mindspore/ops/_op_impl/aicpu/bincount.py +33 -0
- mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/cauchy.py} +17 -10
- mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
- mindspore/ops/_op_impl/aicpu/cholesky.py +1 -1
- mindspore/ops/_op_impl/{cpu/bias_add.py → aicpu/choleskygrad.py} +9 -7
- mindspore/ops/_op_impl/aicpu/combined_non_max_suppression.py +42 -0
- mindspore/ops/_op_impl/aicpu/concat_offset.py +42 -0
- mindspore/ops/_op_impl/aicpu/concat_offset_v1.py +31 -0
- mindspore/ops/_op_impl/aicpu/conj.py +11 -0
- mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_image.py +38 -0
- mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +36 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +2 -2
- mindspore/ops/_op_impl/aicpu/dense_to_sparse_set_operation.py +48 -0
- mindspore/ops/_op_impl/aicpu/diag.py +36 -0
- mindspore/ops/_op_impl/aicpu/diag_part.py +36 -0
- mindspore/ops/_op_impl/aicpu/diagonal.py +35 -0
- mindspore/ops/_op_impl/{cpu/bias_add_grad.py → aicpu/digamma.py} +9 -7
- mindspore/ops/_op_impl/aicpu/eig.py +35 -0
- mindspore/ops/_op_impl/aicpu/fft_with_size.py +41 -0
- mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
- mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
- mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
- mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
- mindspore/ops/_op_impl/aicpu/glu.py +33 -0
- mindspore/ops/_op_impl/aicpu/glu_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/greater.py +41 -0
- mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
- mindspore/ops/_op_impl/{tbe/scatter_add_ds.py → aicpu/inplace_index_add.py} +17 -21
- mindspore/ops/_op_impl/aicpu/instance_norm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/instance_norm_v2_grad.py +44 -0
- mindspore/ops/_op_impl/aicpu/layer_norm_grad_grad.py +47 -0
- mindspore/ops/_op_impl/aicpu/less.py +41 -0
- mindspore/ops/_op_impl/aicpu/less_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/lgamma.py +32 -0
- mindspore/ops/_op_impl/aicpu/log_normal_reverse.py +33 -0
- mindspore/ops/_op_impl/aicpu/logit.py +33 -0
- mindspore/ops/_op_impl/aicpu/logit_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/masked_fill.py +42 -0
- mindspore/ops/_op_impl/aicpu/masked_scatter.py +39 -0
- mindspore/ops/_op_impl/aicpu/matmul.py +39 -0
- mindspore/ops/_op_impl/aicpu/matrix_logarithm.py +31 -0
- mindspore/ops/_op_impl/aicpu/matrix_power.py +32 -0
- mindspore/ops/_op_impl/aicpu/matrix_solve_ls.py +36 -0
- mindspore/ops/_op_impl/aicpu/matrix_triangular_solve.py +36 -0
- mindspore/ops/_op_impl/aicpu/mirror_pad.py +2 -0
- mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mul.py +3 -1
- mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
- mindspore/ops/_op_impl/aicpu/multinomial_with_replacement.py +35 -0
- mindspore/ops/_op_impl/aicpu/nan_to_num.py +34 -0
- mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
- mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
- mindspore/ops/_op_impl/aicpu/polar.py +32 -0
- mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
- mindspore/ops/_op_impl/aicpu/qr.py +36 -0
- mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
- mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_tensor.py +74 -0
- mindspore/ops/_op_impl/aicpu/random_shuffle.py +3 -0
- mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/range.py +36 -0
- mindspore/ops/_op_impl/aicpu/reciprocal.py +34 -0
- mindspore/ops/_op_impl/aicpu/reciprocal_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/reduce_sum.py +57 -0
- mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
- mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
- mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
- mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
- mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
- mindspore/ops/_op_impl/aicpu/search_sorted.py +12 -6
- mindspore/ops/_op_impl/aicpu/self_adjoint_eig.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
- mindspore/ops/_op_impl/aicpu/slice_grad.py +76 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/sort.py +39 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
- mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows.py +63 -0
- mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows_grad.py +45 -0
- mindspore/ops/_op_impl/aicpu/sparse_matrix_mat_mul.py +56 -0
- mindspore/ops/_op_impl/{tbe/slice_ds.py → aicpu/sparse_segment_sum.py} +16 -24
- mindspore/ops/_op_impl/aicpu/sparse_segment_sum_with_num_segments.py +68 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice.py +63 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +61 -0
- mindspore/ops/_op_impl/aicpu/squared_difference.py +2 -0
- mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +93 -0
- mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +66 -0
- mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
- mindspore/ops/_op_impl/{tbe/gather_v2.py → aicpu/tile.py} +24 -24
- mindspore/ops/_op_impl/aicpu/tridiagonal_solve.py +35 -0
- mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/triu_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
- mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
- mindspore/ops/_op_impl/cpu/__init__.py +1 -2
- mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
- mindspore/ops/_op_impl/cpu/maximum_grad.py +2 -0
- mindspore/{compression/common/__init__.py → ops/_op_impl/cpu/pyexecute.py} +13 -8
- mindspore/ops/_op_impl/cpu/reduce_sum.py +8 -0
- mindspore/ops/_op_impl/cpu/sparse_slice.py +62 -0
- mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +60 -0
- mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
- mindspore/ops/_op_impl/tbe/__init__.py +27 -608
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +42 -0
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +44 -0
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -1
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +41 -0
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +1 -0
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +2 -0
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +40 -0
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
- mindspore/ops/_op_impl/tbe/cast.py +0 -2
- mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -2
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -2
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +1 -0
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +1 -1
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
- mindspore/ops/_op_impl/tbe/greater.py +2 -0
- mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -1
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -6
- mindspore/ops/_op_impl/tbe/{greater_ds.py → reduce_all_ds.py} +13 -16
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +39 -0
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +44 -0
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +44 -0
- mindspore/ops/_op_impl/tbe/scatter_add.py +2 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +2 -2
- mindspore/ops/_op_impl/tbe/slice.py +26 -15
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +1 -0
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +1 -1
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +2 -0
- mindspore/ops/_primitive_cache.py +3 -2
- mindspore/ops/_register_for_op.py +11 -0
- mindspore/ops/_utils/__init__.py +1 -1
- mindspore/ops/_utils/utils.py +20 -41
- mindspore/ops/_vmap/__init__.py +2 -2
- mindspore/ops/_vmap/vmap_array_ops.py +170 -78
- mindspore/ops/_vmap/vmap_base.py +24 -10
- mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
- mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +41 -9
- mindspore/ops/_vmap/vmap_image_ops.py +52 -0
- mindspore/ops/_vmap/vmap_math_ops.py +77 -6
- mindspore/ops/_vmap/vmap_nn_ops.py +78 -29
- mindspore/ops/_vmap/vmap_other_ops.py +3 -1
- mindspore/ops/_vmap/vmap_random_ops.py +55 -3
- mindspore/ops/_vmap/vmap_sparse_ops.py +1 -0
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ApproximateEqual_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Argmax_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/Argmin_bprop.mindir +14 -13
- mindspore/ops/bprop_mindir/AssignSub_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/Assign_bprop.mindir +16 -16
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchToSpaceND_bprop.mindir +28 -0
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +306 -0
- mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +12 -8
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Concat_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
- mindspore/ops/bprop_mindir/DType_bprop.mindir +12 -12
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/Depend_bprop.mindir +12 -13
- mindspore/ops/bprop_mindir/DepthToSpace_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
- mindspore/ops/bprop_mindir/DiagPart_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -24
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -14
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicShape_bprop.mindir +12 -12
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Equal_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +54 -0
- mindspore/ops/bprop_mindir/FloorDiv_bprop.mindir +18 -15
- mindspore/ops/bprop_mindir/GatherD_bprop.mindir +26 -0
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +57 -0
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/GreaterEqual_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/Greater_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/IOU_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/IsFinite_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/IsInf_bprop.mindir +13 -10
- mindspore/ops/bprop_mindir/IsNan_bprop.mindir +14 -11
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LessEqual_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Less_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/LinSpace_bprop.mindir +22 -19
- mindspore/ops/bprop_mindir/Load_bprop.mindir +12 -13
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/LogicalAnd_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/LogicalNot_bprop.mindir +14 -13
- mindspore/ops/bprop_mindir/MaskedSelect_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Maximum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Minimum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NonZero_bprop.mindir +14 -0
- mindspore/ops/bprop_mindir/NotEqual_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +25 -23
- mindspore/ops/bprop_mindir/OnesLike_bprop.mindir +13 -13
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
- mindspore/ops/bprop_mindir/Range_bprop.mindir +21 -19
- mindspore/ops/bprop_mindir/Rank_bprop.mindir +11 -11
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReduceAll_bprop.mindir +18 -17
- mindspore/ops/bprop_mindir/ReduceAny_bprop.mindir +18 -17
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +19 -23
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +60 -0
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +89 -0
- mindspore/ops/bprop_mindir/ReverseSequence_bprop.mindir +52 -0
- mindspore/ops/bprop_mindir/ReverseV2_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/Round_bprop.mindir +14 -13
- mindspore/ops/bprop_mindir/ScatterMax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterMin_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterNdUpdate_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/ScatterNd_bprop.mindir +24 -0
- mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/ScatterUpdate_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/Select_bprop.mindir +30 -34
- mindspore/ops/bprop_mindir/Shape_bprop.mindir +12 -12
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Sign_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/Slice_bprop.mindir +26 -0
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/Sort_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SpaceToBatchND_bprop.mindir +28 -0
- mindspore/ops/bprop_mindir/SpaceToDepth_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Split_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +54 -0
- mindspore/ops/bprop_mindir/StridedSliceGrad_bprop.mindir +95 -0
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +98 -0
- mindspore/ops/bprop_mindir/Switch_bprop.mindir +28 -32
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/TensorScatterAdd_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/TensorScatterUpdate_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/TensorShape_bprop.mindir +14 -0
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TransShape_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/TruncateDiv_bprop.mindir +18 -15
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +11 -13
- mindspore/ops/bprop_mindir/Unique_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Unstack_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
- mindspore/ops/bprop_mindir/ZerosLike_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/__init__.py +1 -4
- mindspore/ops/bprop_mindir/generate_mindir.py +32 -20
- mindspore/ops/composite/__init__.py +12 -13
- mindspore/ops/composite/base.py +261 -254
- mindspore/ops/composite/env_ops.py +41 -0
- mindspore/ops/composite/math_ops.py +197 -156
- mindspore/ops/composite/multitype_ops/_compile_utils.py +428 -176
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +188 -87
- mindspore/ops/composite/multitype_ops/add_impl.py +23 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +3 -3
- mindspore/ops/composite/multitype_ops/equal_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +52 -5
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +15 -3
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +33 -2
- mindspore/ops/composite/multitype_ops/less_impl.py +33 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +21 -7
- mindspore/ops/composite/multitype_ops/not_in_impl.py +15 -3
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
- mindspore/ops/composite/multitype_ops/pow_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +62 -70
- mindspore/ops/composite/multitype_ops/sub_impl.py +3 -3
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +41 -4
- mindspore/ops/function/__init__.py +323 -8
- mindspore/ops/function/array_func.py +3511 -780
- mindspore/ops/function/clip_func.py +329 -0
- mindspore/ops/function/debug_func.py +6 -6
- mindspore/ops/function/grad/__init__.py +5 -1
- mindspore/ops/function/grad/grad_func.py +736 -65
- mindspore/ops/function/image_func.py +270 -0
- mindspore/ops/function/linalg_func.py +268 -8
- mindspore/ops/function/math_func.py +8032 -3164
- mindspore/ops/function/nn_func.py +5619 -1855
- mindspore/ops/function/other_func.py +115 -0
- mindspore/ops/function/parameter_func.py +11 -10
- mindspore/ops/function/random_func.py +939 -77
- mindspore/ops/function/sparse_func.py +249 -84
- mindspore/ops/function/sparse_unary_func.py +2303 -0
- mindspore/ops/function/spectral_func.py +146 -0
- mindspore/ops/function/vmap_func.py +114 -0
- mindspore/ops/functional.py +182 -254
- mindspore/ops/op_info_register.py +79 -34
- mindspore/ops/operations/__init__.py +210 -118
- mindspore/ops/operations/_csr_ops.py +7 -7
- mindspore/ops/operations/_embedding_cache_ops.py +25 -15
- mindspore/ops/operations/_grad_ops.py +447 -322
- mindspore/ops/operations/_inner_ops.py +547 -176
- mindspore/ops/operations/_map_tensor_ops.py +112 -0
- mindspore/ops/operations/_ms_kernel.py +29 -27
- mindspore/ops/operations/_ocr_ops.py +11 -11
- mindspore/ops/operations/_opaque_predicate_registry.py +41 -0
- mindspore/ops/operations/_quant_ops.py +186 -101
- mindspore/ops/operations/_rl_inner_ops.py +122 -61
- mindspore/ops/operations/_scalar_ops.py +466 -0
- mindspore/ops/operations/_sequence_ops.py +1047 -0
- mindspore/ops/operations/_tensor_array.py +10 -11
- mindspore/ops/operations/_thor_ops.py +4 -4
- mindspore/ops/operations/array_ops.py +1428 -1226
- mindspore/ops/operations/comm_ops.py +180 -117
- mindspore/ops/operations/control_ops.py +4 -2
- mindspore/ops/operations/custom_ops.py +185 -98
- mindspore/ops/operations/debug_ops.py +92 -54
- mindspore/ops/operations/image_ops.py +406 -211
- mindspore/ops/operations/inner_ops.py +42 -53
- mindspore/ops/operations/linalg_ops.py +32 -29
- mindspore/ops/operations/math_ops.py +2076 -897
- mindspore/ops/operations/nn_ops.py +1282 -1252
- mindspore/ops/operations/other_ops.py +124 -278
- mindspore/ops/operations/random_ops.py +345 -178
- mindspore/ops/operations/rl_ops.py +8 -9
- mindspore/ops/operations/sparse_ops.py +502 -157
- mindspore/ops/operations/spectral_ops.py +107 -0
- mindspore/ops/primitive.py +192 -15
- mindspore/ops/vm_impl_registry.py +23 -2
- mindspore/parallel/__init__.py +6 -1
- mindspore/parallel/_auto_parallel_context.py +199 -92
- mindspore/parallel/_cell_wrapper.py +4 -2
- mindspore/parallel/_cost_model_context.py +3 -0
- mindspore/parallel/_dp_allreduce_fusion.py +2 -1
- mindspore/parallel/_offload_context.py +185 -0
- mindspore/parallel/_parallel_serialization.py +167 -28
- mindspore/parallel/_ps_context.py +9 -5
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +9 -1
- mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
- mindspore/{nn/transformer → parallel/_transformer}/layers.py +59 -37
- mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
- mindspore/{nn/transformer → parallel/_transformer}/moe.py +160 -35
- mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
- mindspore/{nn/transformer → parallel/_transformer}/transformer.py +235 -196
- mindspore/parallel/_utils.py +47 -7
- mindspore/parallel/algo_parameter_config.py +5 -1
- mindspore/parallel/checkpoint_transform.py +329 -0
- mindspore/parallel/shard.py +229 -0
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/common/util.py +4 -3
- mindspore/profiler/common/validator/validate_path.py +2 -2
- mindspore/profiler/envprofiling.py +249 -0
- mindspore/profiler/parser/aicpu_data_parser.py +38 -39
- mindspore/profiler/parser/ascend_timeline_generator.py +497 -0
- mindspore/profiler/parser/base_timeline_generator.py +471 -0
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +684 -0
- mindspore/profiler/parser/framework_parser.py +42 -16
- mindspore/profiler/parser/hccl_parser.py +158 -158
- mindspore/profiler/parser/hwts_log_parser.py +7 -6
- mindspore/profiler/parser/integrator.py +18 -1579
- mindspore/profiler/parser/minddata_analyzer.py +8 -8
- mindspore/profiler/parser/msadvisor_analyzer.py +14 -27
- mindspore/profiler/parser/msadvisor_parser.py +2 -4
- mindspore/profiler/parser/optime_parser.py +17 -18
- mindspore/profiler/parser/profiler_info.py +108 -0
- mindspore/profiler/parser/step_trace_parser.py +1 -1
- mindspore/profiler/profiling.py +396 -194
- mindspore/rewrite/__init__.py +6 -2
- mindspore/rewrite/api/node.py +51 -110
- mindspore/rewrite/api/node_type.py +10 -6
- mindspore/rewrite/api/pattern_engine.py +51 -7
- mindspore/rewrite/api/scoped_value.py +64 -53
- mindspore/rewrite/api/symbol_tree.py +108 -61
- mindspore/rewrite/api/tree_node_helper.py +2 -3
- mindspore/{compression/quant/__init__.py → rewrite/ast_creator_register.py} +20 -11
- mindspore/rewrite/ast_helpers/__init__.py +6 -3
- mindspore/rewrite/ast_helpers/ast_creator.py +115 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +99 -1
- mindspore/rewrite/ast_helpers/ast_modifier.py +17 -4
- mindspore/rewrite/ast_helpers/ast_replacer.py +1 -1
- mindspore/rewrite/ast_transformers/__init__.py +0 -1
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +46 -5
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +6 -3
- mindspore/rewrite/common/__init__.py +2 -0
- mindspore/rewrite/common/event.py +1 -1
- mindspore/rewrite/common/observable.py +1 -1
- mindspore/rewrite/common/observer.py +1 -1
- mindspore/rewrite/common/rewrite_elog.py +35 -0
- mindspore/rewrite/namer.py +2 -2
- mindspore/rewrite/namespace.py +14 -4
- mindspore/rewrite/node.py +161 -13
- mindspore/rewrite/parser.py +0 -1
- mindspore/rewrite/parser_register.py +0 -1
- mindspore/rewrite/parsers/arguments_parser.py +3 -2
- mindspore/rewrite/parsers/assign_parser.py +267 -67
- mindspore/rewrite/parsers/attribute_parser.py +56 -0
- mindspore/rewrite/parsers/class_def_parser.py +191 -108
- mindspore/rewrite/parsers/constant_parser.py +101 -0
- mindspore/rewrite/parsers/container_parser.py +88 -0
- mindspore/rewrite/parsers/for_parser.py +28 -15
- mindspore/rewrite/parsers/function_def_parser.py +21 -5
- mindspore/rewrite/parsers/if_parser.py +11 -28
- mindspore/rewrite/parsers/module_parser.py +9 -6
- mindspore/rewrite/parsers/return_parser.py +3 -2
- mindspore/rewrite/sparsify/__init__.py +0 -0
- mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
- mindspore/rewrite/sparsify/sparsify.py +109 -0
- mindspore/rewrite/sparsify/utils.py +173 -0
- mindspore/rewrite/symbol_tree.py +322 -109
- mindspore/rewrite/symbol_tree_builder.py +45 -8
- mindspore/rewrite/symbol_tree_dumper.py +0 -1
- mindspore/rewrite/topological_manager.py +1 -2
- mindspore/run_check/_check_version.py +209 -112
- mindspore/run_check/run_check.py +2 -1
- mindspore/scipy/linalg.py +13 -117
- mindspore/scipy/ops.py +5 -71
- mindspore/scipy/ops_grad.py +1 -25
- mindspore/scipy/ops_wrapper.py +1 -1
- mindspore/scipy/optimize/_bfgs.py +1 -1
- mindspore/scipy/optimize/_lagrange.py +200 -0
- mindspore/scipy/optimize/line_search.py +3 -2
- mindspore/scipy/optimize/minimize.py +43 -6
- mindspore/scipy/sparse/__init__.py +2 -2
- mindspore/scipy/sparse/linalg.py +5 -465
- mindspore/scipy/utils.py +2 -1
- mindspore/scipy/utils_const.py +7 -1
- mindspore/train/__init__.py +6 -4
- mindspore/train/_utils.py +28 -5
- mindspore/train/amp.py +321 -50
- mindspore/train/callback/__init__.py +3 -1
- mindspore/train/callback/_backup_and_restore.py +120 -0
- mindspore/train/callback/_callback.py +8 -8
- mindspore/train/callback/_checkpoint.py +12 -9
- mindspore/train/callback/_early_stop.py +13 -7
- mindspore/train/callback/_history.py +8 -8
- mindspore/train/callback/_lambda_callback.py +6 -6
- mindspore/train/callback/_landscape.py +36 -38
- mindspore/train/callback/_loss_monitor.py +12 -6
- mindspore/train/callback/_lr_scheduler_callback.py +2 -4
- mindspore/train/callback/_on_request_exit.py +212 -0
- mindspore/train/callback/_reduce_lr_on_plateau.py +13 -7
- mindspore/train/callback/_summary_collector.py +27 -19
- mindspore/train/callback/_time_monitor.py +13 -7
- mindspore/train/checkpoint_pb2.py +68 -8
- mindspore/train/data_sink.py +122 -33
- mindspore/train/dataset_helper.py +28 -87
- mindspore/train/loss_scale_manager.py +4 -7
- mindspore/{nn → train}/metrics/__init__.py +20 -20
- mindspore/{nn → train}/metrics/accuracy.py +12 -10
- mindspore/{nn → train}/metrics/auc.py +4 -4
- mindspore/{nn → train}/metrics/bleu_score.py +4 -4
- mindspore/{nn → train}/metrics/confusion_matrix.py +10 -8
- mindspore/{nn → train}/metrics/cosine_similarity.py +4 -4
- mindspore/{nn → train}/metrics/dice.py +6 -5
- mindspore/{nn → train}/metrics/error.py +7 -5
- mindspore/{nn → train}/metrics/fbeta.py +9 -7
- mindspore/{nn → train}/metrics/hausdorff_distance.py +8 -6
- mindspore/{nn → train}/metrics/loss.py +4 -3
- mindspore/{nn → train}/metrics/mean_surface_distance.py +6 -5
- mindspore/{nn → train}/metrics/metric.py +6 -5
- mindspore/{nn → train}/metrics/occlusion_sensitivity.py +4 -3
- mindspore/{nn → train}/metrics/perplexity.py +5 -4
- mindspore/{nn → train}/metrics/precision.py +5 -4
- mindspore/{nn → train}/metrics/recall.py +5 -4
- mindspore/{nn → train}/metrics/roc.py +7 -6
- mindspore/{nn → train}/metrics/root_mean_square_surface_distance.py +6 -5
- mindspore/{nn → train}/metrics/topk.py +7 -5
- mindspore/train/mind_ir_pb2.py +339 -32
- mindspore/train/model.py +113 -84
- mindspore/train/serialization.py +547 -167
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +43 -12
- mindspore/train/train_thor/convert_utils.py +7 -1
- mindspore/train/train_thor/dataset_helper.py +3 -3
- mindspore/train/train_thor/model_thor.py +0 -4
- mindspore/version.py +1 -1
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +4 -3
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +899 -675
- mindspore/compression/common/constant.py +0 -124
- mindspore/compression/export/__init__.py +0 -19
- mindspore/compression/export/quant_export.py +0 -514
- mindspore/compression/quant/qat.py +0 -636
- mindspore/compression/quant/quant_utils.py +0 -462
- mindspore/compression/quant/quantizer.py +0 -68
- mindspore/nn/layer/quant.py +0 -1868
- mindspore/nn/layer/rnn_utils.py +0 -90
- mindspore/nn/probability/dpn/__init__.py +0 -22
- mindspore/nn/probability/dpn/vae/__init__.py +0 -25
- mindspore/nn/probability/dpn/vae/cvae.py +0 -138
- mindspore/nn/probability/dpn/vae/vae.py +0 -122
- mindspore/nn/probability/infer/__init__.py +0 -22
- mindspore/nn/probability/infer/variational/elbo.py +0 -70
- mindspore/nn/probability/infer/variational/svi.py +0 -84
- mindspore/nn/probability/toolbox/__init__.py +0 -22
- mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
- mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -363
- mindspore/nn/probability/transforms/__init__.py +0 -22
- mindspore/nn/probability/transforms/transform_bnn.py +0 -262
- mindspore/nn/probability/zhusuan/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
- mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
- mindspore/ops/_op_impl/tbe/bias_add_grad_ds.py +0 -52
- mindspore/ops/_op_impl/tbe/scatter_nd_add_ds.py +0 -43
- mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/Identity_bprop.mindir +0 -9
- mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/stop_gradient_bprop.mindir +0 -12
- mindspore/ops/composite/array_ops.py +0 -210
- mindspore/ops/composite/clip_ops.py +0 -238
- mindspore/ops/composite/random_ops.py +0 -426
- mindspore/ops/composite/vmap_ops.py +0 -38
- mindspore/ops/operations/sponge_ops.py +0 -3531
- mindspore/ops/operations/sponge_update_ops.py +0 -2546
- mindspore/parallel/nn/__init__.py +0 -42
- mindspore/parallel/nn/loss.py +0 -22
- mindspore/parallel/nn/moe.py +0 -21
- mindspore/parallel/nn/op_parallel_config.py +0 -22
- mindspore/parallel/nn/transformer.py +0 -31
- mindspore/run_check/_check_deps_version.py +0 -84
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -14,9 +14,10 @@
|
|
|
14
14
|
# ============================================================================
|
|
15
15
|
|
|
16
16
|
"""image_ops"""
|
|
17
|
+
|
|
18
|
+
from __future__ import absolute_import
|
|
17
19
|
from mindspore import context
|
|
18
|
-
from mindspore
|
|
19
|
-
from mindspore._checkparam import Rel
|
|
20
|
+
from mindspore import _checkparam as validator
|
|
20
21
|
from mindspore.ops.primitive import prim_attr_register, Primitive
|
|
21
22
|
from mindspore.common import dtype as mstype
|
|
22
23
|
|
|
@@ -30,19 +31,23 @@ class AdjustSaturation(Primitive):
|
|
|
30
31
|
adds an offset to the saturation channel, converts back to RGB and then back to the original data type.
|
|
31
32
|
If several adjustments are chained it is advisable to minimize the number of redundant conversions.
|
|
32
33
|
|
|
33
|
-
|
|
34
|
-
- **image** (Tensor)
|
|
35
|
-
|
|
36
|
-
- **scale** (Tensor)
|
|
34
|
+
Inputs:
|
|
35
|
+
- **image** (Tensor) - Images to adjust. Must be one of the following types: float16, float32.
|
|
36
|
+
At least 3-D. The last dimension is interpreted as channels, and must be three.
|
|
37
|
+
- **scale** (Tensor) - A scale factor determines the amount of saturation adjustment to
|
|
38
|
+
apply to the image. A value greater than 1.0 increases the saturation, while a value less than
|
|
39
|
+
1.0 decreases the saturation. A value of 1.0 leaves the saturation unchanged.
|
|
40
|
+
Must be 0-D Tensor of type float32.
|
|
37
41
|
|
|
38
|
-
|
|
42
|
+
Outputs:
|
|
39
43
|
Adjusted image(s), same shape and dtype as `image`.
|
|
40
44
|
|
|
41
45
|
Raises:
|
|
42
46
|
TypeError: If any iput is not Tensor.
|
|
43
47
|
TypeError: If the type of `image` is not one of the following dtype: float16, float32.
|
|
44
48
|
TypeError: If the type of `scale` is not float32.
|
|
45
|
-
ValueError: If the dimension of the 'image' is less than 3
|
|
49
|
+
ValueError: If the dimension of the 'image' is less than 3.
|
|
50
|
+
ValueError: If the last dimension of the 'image' is not 3.
|
|
46
51
|
|
|
47
52
|
Supported Platforms:
|
|
48
53
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -78,12 +83,12 @@ class AdjustContrastv2(Primitive):
|
|
|
78
83
|
The other dimensions only represent a collection of images, such as [batch, height, width, channels].
|
|
79
84
|
Contrast is adjusted independently for each channel of each image.
|
|
80
85
|
|
|
81
|
-
|
|
86
|
+
Inputs:
|
|
82
87
|
-**images**(tensor): Images to adjust. Must be one of the following types: float16, float32.
|
|
83
88
|
At least 3-D.The last dimension is interpreted as channels, and must be three.
|
|
84
89
|
-**contrast_factor**(tensor): A float multiplier for adjusting contrast. A Tensor of type float32. Must be 0-D.
|
|
85
90
|
|
|
86
|
-
|
|
91
|
+
Outputs:
|
|
87
92
|
Adjusted image(s), same shape and dtype as `images`.
|
|
88
93
|
|
|
89
94
|
Raises:
|
|
@@ -93,7 +98,7 @@ class AdjustContrastv2(Primitive):
|
|
|
93
98
|
ValueError: If the dimension of the 'images' is less than 3, or the last dimension of the 'images' is not 3.
|
|
94
99
|
|
|
95
100
|
Supported Platforms:
|
|
96
|
-
``Ascend`` ``CPU``
|
|
101
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
97
102
|
|
|
98
103
|
Examples:
|
|
99
104
|
>>> images = Tensor([[[1.0, 2.0, 3.0],
|
|
@@ -128,18 +133,19 @@ class AdjustHue(Primitive):
|
|
|
128
133
|
It is recommended to minimize the number of redundant transformations when several adjustments are chained.
|
|
129
134
|
|
|
130
135
|
Inputs:
|
|
131
|
-
- **image** (Tensor): RGB image or images
|
|
132
|
-
|
|
136
|
+
- **image** (Tensor): RGB image or images, a Tensor has at least 3-D.
|
|
137
|
+
The last dimension is interpreted as channels whose size must be three.
|
|
138
|
+
the dtype is float16 or float32.
|
|
133
139
|
- **delta** (Tensor): How much to add to the hue channel, the dtype is float32. Must be 0-D.
|
|
134
140
|
|
|
135
|
-
|
|
141
|
+
Outputs:
|
|
136
142
|
Adjusted image(s), same shape and dtype as `image`.
|
|
137
143
|
|
|
138
144
|
Raises:
|
|
139
145
|
TypeError: If neither `image` nor `delta` is a tensor.
|
|
140
|
-
TypeError: If the dtype of image
|
|
141
|
-
TypeError: If the dtype of delta not float32.
|
|
142
|
-
ValueError: If image
|
|
146
|
+
TypeError: If the dtype of `image` is neither float16 nor float32.
|
|
147
|
+
TypeError: If the dtype of `delta` not float32.
|
|
148
|
+
ValueError: If the dimension of `image` is less than 3.
|
|
143
149
|
|
|
144
150
|
Supported Platforms:
|
|
145
151
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -175,40 +181,43 @@ class AdjustHue(Primitive):
|
|
|
175
181
|
|
|
176
182
|
class ExtractGlimpse(Primitive):
|
|
177
183
|
"""
|
|
178
|
-
Extracts
|
|
184
|
+
Extracts glimpses(usually subarea of rectangle) from the input image Tensor and return as windows.
|
|
179
185
|
|
|
180
186
|
Note:
|
|
181
|
-
If
|
|
187
|
+
If extracted windows and the input image only partially overlap,
|
|
188
|
+
random noise is filled in those non overlapping areas.
|
|
182
189
|
|
|
183
190
|
Args:
|
|
184
|
-
centered (bool): An optional `bool`.
|
|
191
|
+
centered (bool, optional): An optional `bool`. Indicates if the offset coordinates
|
|
185
192
|
are centered relative to the image, in which case the (0, 0) offset is relative to the center of
|
|
186
193
|
the center of the input images. If false, the (0, 0) offset corresponds to the upper left corner
|
|
187
|
-
of the input images.
|
|
188
|
-
normalized (bool): An optional `bool`.
|
|
189
|
-
coordinates are normalized.
|
|
190
|
-
uniform_noise (bool): An optional `bool`.
|
|
191
|
-
generated using a uniform distribution
|
|
192
|
-
noise (str): An optional string
|
|
193
|
-
|
|
194
|
-
When the window and input image tensor not overlap, random noise is filled.
|
|
195
|
-
The
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
When
|
|
199
|
-
|
|
194
|
+
of the input images. Defaults to `True`.
|
|
195
|
+
normalized (bool, optional): An optional `bool`. indicates if the offset
|
|
196
|
+
coordinates are normalized. Defaults to `True`.
|
|
197
|
+
uniform_noise (bool, optional): An optional `bool`. indicates if the noise should be
|
|
198
|
+
generated using a uniform distribution(aka. Gaussian distribution). Defaults to `True`.
|
|
199
|
+
noise (str, optional): An optional string specifies the type of noise to fill.
|
|
200
|
+
The window is determined by size and offsets.
|
|
201
|
+
When the window and input image tensor don't not overlap, random noise is filled.
|
|
202
|
+
The value can be 'uniform', 'gaussian' and 'zero'. Default: `uniform`.
|
|
203
|
+
|
|
204
|
+
- When `noise` is 'uniform' and 'gaussian', the result is variable.
|
|
205
|
+
- When `noise` is 'zero', the value of `uniform_noise` must be 'False' and the
|
|
206
|
+
filling noise will be zero so that the result is fixed.
|
|
207
|
+
- When `uniform_noise` is 'True', the value of `noise` only can be 'uniform'.
|
|
208
|
+
When `uniform_noise` is 'False', the value of `noise` can be 'uniform', 'gaussian' and 'zero'.
|
|
200
209
|
|
|
201
210
|
Inputs:
|
|
202
|
-
- **x** (Tensor) - A 4-D float tensor of shape
|
|
211
|
+
- **x** (Tensor) - A 4-D float tensor of shape :math:`(batch_size, height, width, channels)`.
|
|
203
212
|
Types allowed: float32.
|
|
204
213
|
- **size** (Tensor) - A 1-D tensor of 2 elements containing the size of the glimpses to extract.
|
|
205
214
|
The glimpse height must be specified first, following by the glimpse width. Types allowed: int32.
|
|
206
215
|
The value of size must be greater than zero.
|
|
207
|
-
- **offsets** (Tensor) - A 2-D integer tensor of shape
|
|
216
|
+
- **offsets** (Tensor) - A 2-D integer tensor of shape :math:`(batch_size, 2)` containing the y, x locations
|
|
208
217
|
of the center of each window. Types allowed: float32.
|
|
209
218
|
|
|
210
219
|
Outputs:
|
|
211
|
-
A 4-D tensor of shape
|
|
220
|
+
A 4-D tensor of shape :math:`(batch_size, glimpse_height, glimpse_width, channels)` with type: float32.
|
|
212
221
|
|
|
213
222
|
Raises:
|
|
214
223
|
TypeError: If `centered` is not a bool.
|
|
@@ -221,32 +230,23 @@ class ExtractGlimpse(Primitive):
|
|
|
221
230
|
ValueError: If the input is not Tensor.
|
|
222
231
|
|
|
223
232
|
Supported Platforms:
|
|
224
|
-
``Ascend`` ``CPU``
|
|
233
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
225
234
|
|
|
226
235
|
Examples:
|
|
227
|
-
>>>
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
... def construct(self, x, offsets):
|
|
234
|
-
... return self.attribute(x, self.size, offsets);
|
|
235
|
-
>>> x = Tensor(np.random.randn(1, 4, 2, 3).astype(np.float32))
|
|
236
|
-
>>> size = Tensor(np.array([2, 2]).astype("int32"))
|
|
237
|
-
>>> offsets = Tensor(np.array([[0, 0]]).astype("float32"))
|
|
238
|
-
>>> attribute = ExtractGlimpse(size, True, True, True, "uniform")
|
|
239
|
-
>>> output = attribute(x, offsets)
|
|
236
|
+
>>> x = Tensor([[[[0.0], [1.0], [2.0]], [[3.0], [4.0], [5.0]], [[6.0], [7.0], [8.0]]]], dtype=mindspore.float32)
|
|
237
|
+
>>> size = Tensor((2, 2), dtype=mindspore.int32)
|
|
238
|
+
>>> offsets = Tensor([[1, 1]], dtype=mindspore.float32)
|
|
239
|
+
>>> ops = P.image_ops.ExtractGlimpse(centered = False, normalized = False,
|
|
240
|
+
>>> uniform_noise = False, noise = "uniform")
|
|
241
|
+
>>> output = ops(x, size, offsets)
|
|
240
242
|
>>> print(output)
|
|
241
|
-
[[[[
|
|
242
|
-
[
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
[ 0.33367434 1.4940791 -0.20515826]]]]
|
|
243
|
+
[[[[0.]
|
|
244
|
+
[1.]]
|
|
245
|
+
[[3.]
|
|
246
|
+
[4.]]]]
|
|
246
247
|
"""
|
|
247
248
|
@prim_attr_register
|
|
248
249
|
def __init__(self, centered=True, normalized=True, uniform_noise=True, noise="uniform"):
|
|
249
|
-
self.add_prim_attr("max_length", 1000000)
|
|
250
250
|
self.init_prim_io_names(inputs=['x', 'size', 'offsets'], outputs=['output'])
|
|
251
251
|
self.centered = centered
|
|
252
252
|
self.normalized = normalized
|
|
@@ -267,7 +267,7 @@ class ExtractGlimpse(Primitive):
|
|
|
267
267
|
|
|
268
268
|
|
|
269
269
|
class CropAndResize(Primitive):
|
|
270
|
-
"""
|
|
270
|
+
r"""
|
|
271
271
|
Extracts crops from the input image tensor and resizes them.
|
|
272
272
|
|
|
273
273
|
Note:
|
|
@@ -281,27 +281,39 @@ class CropAndResize(Primitive):
|
|
|
281
281
|
extrapolation_value (float, optional): An optional float value used extrapolation, if applicable. Default: 0.0.
|
|
282
282
|
|
|
283
283
|
Inputs:
|
|
284
|
-
- **x** (Tensor) - The input image must be a 4-D tensor of shape
|
|
284
|
+
- **x** (Tensor) - The input image must be a 4-D tensor of shape
|
|
285
|
+
:math:`(batch, image\_height, image\_width, depth)`.
|
|
285
286
|
Types allowed: int8, int16, int32, int64, float16, float32, float64, uint8, uint16.
|
|
286
|
-
- **boxes** (Tensor) - A 2-D tensor of shape
|
|
287
|
+
- **boxes** (Tensor) - A 2-D tensor of shape :math:`(num\_boxes, 4)`.
|
|
287
288
|
The i-th row of the tensor specifies the coordinates of a box in the box_ind[i] image
|
|
288
289
|
and is specified in normalized coordinates [y1, x1, y2, x2]. A normalized coordinate value of y is mapped to
|
|
289
290
|
the image coordinate at y * (image_height - 1), so as the [0, 1] interval of normalized image height is
|
|
290
291
|
mapped to [0, image_height - 1] in image height coordinates. We do allow y1 > y2, in which case the sampled
|
|
291
292
|
crop is an up-down flipped version of the original image. The width dimension is treated similarly.
|
|
292
|
-
Normalized coordinates outside the [0, 1] range are allowed, in which case we use extrapolation_value to
|
|
293
|
+
Normalized coordinates outside the [0, 1] range are allowed, in which case we use `extrapolation_value` to
|
|
293
294
|
extrapolate the input image values. Types allowed: float32.
|
|
294
|
-
- **box_index** (Tensor) - A 1-D tensor of shape
|
|
295
|
-
The value of
|
|
295
|
+
- **box_index** (Tensor) - A 1-D tensor of shape :math:`(num\_boxes)` with int32 values in [0, batch).
|
|
296
|
+
The value of `box_index[i]` specifies the image that the i-th box refers to. Types allowed: int32.
|
|
296
297
|
- **crop_size** (Tuple[int]) - A tuple of two int32 elements: (crop_height, crop_width).
|
|
297
298
|
Only constant value is allowed. All cropped image patches are resized to this size.
|
|
298
299
|
The aspect ratio of the image content is not preserved. Both crop_height and crop_width need to be positive.
|
|
300
|
+
|
|
299
301
|
Outputs:
|
|
300
|
-
A 4-D tensor of shape
|
|
302
|
+
A 4-D tensor of shape :math:`(num\_boxes, crop\_height, crop\_width, depth)` with type: float32.
|
|
301
303
|
|
|
302
304
|
Raises:
|
|
305
|
+
TypeError: If `x` or `boxes` or `box_index` is not a Tensor.
|
|
306
|
+
TypeError: If `crop_size` is not a Tuple with two int32 elements.
|
|
307
|
+
TypeError: If dtype of `boxes` is not float or that of `box_index` is not int.
|
|
303
308
|
TypeError: If `method` is not a str.
|
|
304
309
|
TypeError: If `extrapolation_value` is not a float.
|
|
310
|
+
ValueError: If the shape rank of `x` is not 4.
|
|
311
|
+
ValueError: If the shape rank of `boxes` is not 2.
|
|
312
|
+
ValueError: If the second dim of `boxes` is not 4.
|
|
313
|
+
ValueError: If the shape rank of `box_index` is not 1.
|
|
314
|
+
ValueError: If the first dim of `box_index` is not equal to that of `boxes`.
|
|
315
|
+
ValueError: If existing element in `box_index` is out of range `[0, batch)`.
|
|
316
|
+
ValueError: If the data of `crop_size` is not positive.
|
|
305
317
|
ValueError: If `method` is not one of 'bilinear', 'nearest', 'bilinear_v2'.
|
|
306
318
|
|
|
307
319
|
Supported Platforms:
|
|
@@ -346,48 +358,52 @@ class CropAndResize(Primitive):
|
|
|
346
358
|
|
|
347
359
|
class NonMaxSuppressionV3(Primitive):
|
|
348
360
|
r"""
|
|
349
|
-
|
|
361
|
+
Selects a subset of bounding boxes in a greedy manner, based on their descending score.
|
|
362
|
+
It removes boxes that have high intersection-over-union (IOU) overlap with previously
|
|
363
|
+
selected boxes, and eliminates boxes with scores lower than a given threshold.
|
|
350
364
|
|
|
351
365
|
.. warning::
|
|
352
|
-
When input
|
|
366
|
+
When input `max_output_size` is negative, it will be treated as 0.
|
|
353
367
|
|
|
354
368
|
Note:
|
|
355
|
-
This algorithm
|
|
356
|
-
This algorithm
|
|
357
|
-
|
|
358
|
-
|
|
369
|
+
- This algorithm does not depend on the location of the origin in the coordinate system.
|
|
370
|
+
- This algorithm remains unaffected by orthogonal transformations and translations of
|
|
371
|
+
the coordinate system, which means that translating or reflecting the coordinate system
|
|
372
|
+
will result in the same boxes being chosen by the algorithm.
|
|
359
373
|
|
|
360
374
|
Inputs:
|
|
361
|
-
- **boxes** (Tensor) - A 2-D Tensor of shape
|
|
362
|
-
- **scores** (Tensor) - A 1-D Tensor of shape
|
|
363
|
-
|
|
364
|
-
the
|
|
375
|
+
- **boxes** (Tensor) - A 2-D Tensor of shape :math:`(num\_boxes, 4)`.
|
|
376
|
+
- **scores** (Tensor) - A 1-D Tensor of shape :math:`(num\_boxes)` where each element represents a
|
|
377
|
+
single score associated with each box (i.e., each row of the `boxes` Tensor).
|
|
378
|
+
It is required that the number of scores in `scores` must be equal to the number of boxes in `boxes`.
|
|
379
|
+
The supported data type is float32.
|
|
365
380
|
- **max_output_size** (Union[Tensor, Number.Int]) - A scalar integer Tensor representing the maximum
|
|
366
|
-
number of boxes to be selected by non max suppression.
|
|
367
|
-
- **iou_threshold** (Union[Tensor, Number.Float]) - A
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
- **score_threshold** (Union[Tensor, Number.Float]) - A
|
|
371
|
-
|
|
381
|
+
number of boxes to be selected by non max suppression. The supported data type is int32.
|
|
382
|
+
- **iou_threshold** (Union[Tensor, Number.Float]) - A scalar float Tensor represents the threshold
|
|
383
|
+
used for determining if the intersection over union (IOU) between boxes is too high.
|
|
384
|
+
Data type of `iou_threshold` is float32 and must be in range [0, 1].
|
|
385
|
+
- **score_threshold** (Union[Tensor, Number.Float]) - A scalar float Tensor represents the threshold for
|
|
386
|
+
determining when to remove boxes based on score. The supported data type is float32.
|
|
372
387
|
|
|
373
388
|
Outputs:
|
|
374
|
-
A 1-D integer Tensor of shape
|
|
375
|
-
where M <= max_output_size
|
|
389
|
+
A 1-D integer Tensor of shape :math:`(M)` representing the selected indices from the boxes tensor,
|
|
390
|
+
where M <= `max_output_size`.
|
|
376
391
|
|
|
377
392
|
Raises:
|
|
378
|
-
TypeError: If the dtype of `boxes` and `scores`
|
|
379
|
-
TypeError: If the dtype of `iou_threshold` and `score_threshold`
|
|
393
|
+
TypeError: If the dtype of `boxes` and `scores` are different.
|
|
394
|
+
TypeError: If the dtype of `iou_threshold` and `score_threshold` are different.
|
|
380
395
|
TypeError: If `boxes` is not tensor or its dtype is not float16 or float32.
|
|
381
|
-
|
|
382
|
-
TypeError: If `max_output_size` is not tensor or scalar
|
|
383
|
-
TypeError: If `iou_threshold` is not tensor or scalar
|
|
384
|
-
TypeError: If `score_threshold` is not tensor or scalar
|
|
396
|
+
TypeError: If `scores` is not tensor or its dtype is not float16 or float32.
|
|
397
|
+
TypeError: If `max_output_size` is not tensor or scalar or its date type is not int32 or int64.
|
|
398
|
+
TypeError: If `iou_threshold` is not tensor or scalar or its type is neither float16 or float32.
|
|
399
|
+
TypeError: If `score_threshold` is not tensor or scalar or its type is neither float16 or float32.
|
|
385
400
|
ValueError: If the size of shape of `boxes` is not 2 or the second value of its shape is not 4.
|
|
386
401
|
ValueError: If the size of shape of `scores` is not 1.
|
|
387
|
-
ValueError: If
|
|
402
|
+
ValueError: If any of the size of shape of `max_output_size`,
|
|
403
|
+
`iou_threshold`, `score_threshold` is not 0.
|
|
388
404
|
|
|
389
405
|
Supported Platforms:
|
|
390
|
-
``Ascend``
|
|
406
|
+
``Ascend`` ``GPU``
|
|
391
407
|
|
|
392
408
|
Examples:
|
|
393
409
|
>>> boxes = Tensor(np.array([[1, 2, 3, 4], [1, 3, 3, 4], [1, 3, 4, 4],
|
|
@@ -405,64 +421,70 @@ class NonMaxSuppressionV3(Primitive):
|
|
|
405
421
|
@prim_attr_register
|
|
406
422
|
def __init__(self):
|
|
407
423
|
"""Initialize NonMaxSuppressionV3"""
|
|
424
|
+
self.init_prim_io_names(inputs=['boxes', 'scores', 'max_output_size', 'iou_threshold', 'score_threshold'],
|
|
425
|
+
outputs=['selected indices'])
|
|
408
426
|
|
|
409
427
|
|
|
410
428
|
class NonMaxSuppressionWithOverlaps(Primitive):
|
|
411
|
-
"""
|
|
412
|
-
|
|
429
|
+
r"""
|
|
430
|
+
Selects a subset of bounding boxes in a greedy manner by prioritizing those with higher
|
|
431
|
+
scores and removing those with high overlaps with previously selected boxes.
|
|
432
|
+
Boxes with scores lower than the score threshold are also removed.
|
|
433
|
+
The overlap values between boxes are represented as an N-by-N square matrix,
|
|
434
|
+
which can be customized to define different overlap criteria such as intersection
|
|
435
|
+
over union or intersection over area.
|
|
436
|
+
|
|
413
437
|
|
|
414
438
|
Note:
|
|
415
|
-
This algorithm
|
|
416
|
-
This algorithm
|
|
417
|
-
|
|
418
|
-
|
|
439
|
+
- This algorithm does not depend on the location of the origin in the coordinate system.
|
|
440
|
+
- This algorithm remains unaffected by orthogonal transformations and translations of
|
|
441
|
+
the coordinate system, which means that translating or reflecting the coordinate system
|
|
442
|
+
will result in the same boxes being chosen by the algorithm.
|
|
419
443
|
|
|
420
444
|
Inputs:
|
|
421
|
-
- **overlaps** (Tensor) - A 2-D Tensor of shape
|
|
422
|
-
Types allowed:float32.
|
|
423
|
-
- **scores** (Tensor) - A 1-D Tensor of shape
|
|
424
|
-
|
|
425
|
-
the
|
|
426
|
-
|
|
445
|
+
- **overlaps** (Tensor) - A 2-D Tensor of shape :math:`(num\_boxes, num\_boxes)`,
|
|
446
|
+
representing the n-by-n box overlap values. Types allowed:float16, float32 and float64.
|
|
447
|
+
- **scores** (Tensor) - A 1-D Tensor of shape :math:`(num\_boxes)` where each element represents a
|
|
448
|
+
single score associated with each box (i.e., each row of the `boxes` Tensor).
|
|
449
|
+
It is required that the number of scores in `scores` must be equal to the number of boxes in `boxes`.
|
|
450
|
+
The supported data type is float32.
|
|
427
451
|
- **max_output_size** (Union[Tensor, Number.Int]) - A scalar integer Tensor representing the maximum
|
|
428
452
|
number of boxes to be selected by non max suppression, and max_output_size must be equal to or greater
|
|
429
453
|
than 0.
|
|
430
454
|
Types allowed:int32.
|
|
431
|
-
- **overlap_threshold** (Union[Tensor, Number.Float]) - A 0-D float
|
|
432
|
-
|
|
433
|
-
Types allowed:float32.
|
|
434
|
-
- **score_threshold** (Union[Tensor, Number.Float]) - A 0-D float
|
|
435
|
-
deciding when to remove boxes based on score.
|
|
436
|
-
Types allowed:float32.
|
|
455
|
+
- **overlap_threshold** (Union[Tensor, Number.Float]) - A scalar value, represented by a 0-D float Tensor,
|
|
456
|
+
which is used as a threshold to determine if two boxes overlap too much.
|
|
457
|
+
Types allowed:float16, float32 and float64.
|
|
458
|
+
- **score_threshold** (Union[Tensor, Number.Float]) - A 0-D float Tensor representing the threshold for
|
|
459
|
+
deciding when to remove boxes based on score. It has the same dtype as `overlap_threshold`.
|
|
437
460
|
|
|
438
461
|
Outputs:
|
|
439
|
-
|
|
440
|
-
|
|
462
|
+
A 1-D integer Tensor of shape :math:`(M)` representing the selected indices from the `boxes` Tensor,
|
|
463
|
+
where M <= `max_output_size`. Its data type is int32.
|
|
441
464
|
|
|
442
465
|
Raises:
|
|
443
|
-
TypeError: If the dtype of `overlaps`
|
|
444
|
-
|
|
445
|
-
TypeError: If `overlaps`
|
|
446
|
-
TypeError: If `
|
|
447
|
-
TypeError: If `
|
|
448
|
-
TypeError: If `
|
|
449
|
-
TypeError: If `score_threshold` is not tensor or scalar. If its type is not float32.
|
|
466
|
+
TypeError: If the dtype of `overlaps` , `scores` `overlap_threshold` and `score_threshold`
|
|
467
|
+
is not float16, float32 or float64.
|
|
468
|
+
TypeError: If `overlaps` or `scores` is not Tensor。
|
|
469
|
+
TypeError: If `max_output_size` is not Tensor or Scalar.If `max_output_size` is not int32.
|
|
470
|
+
TypeError: If `overlap_threshold` is not Tensor or scalar. If its type is not float16, float32 or float64.
|
|
471
|
+
TypeError: If `score_threshold` is not Tensor or scalar. If its type is not float16, float32 or float64.
|
|
450
472
|
ValueError: If the size of shape of `overlaps` is not 2 or the second value of its shape
|
|
451
473
|
is not equal to the first value of its shape.
|
|
452
474
|
ValueError: If the size of shape of `scores` is not 1.
|
|
453
|
-
ValueError: If
|
|
475
|
+
ValueError: If any of the size of shape of `max_output_size`, `overlap_threshold`, `score_threshold` is not 0.
|
|
454
476
|
ValueError: If `max_output_size` is negative.
|
|
455
477
|
ValueError: If the shape of `scores` is not equal to the shape of the dim0 or dim1 of `overlaps`.
|
|
456
478
|
|
|
457
479
|
Supported Platforms:
|
|
458
|
-
``Ascend`` ``
|
|
480
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
459
481
|
|
|
460
482
|
Examples:
|
|
461
483
|
>>> overlaps = Tensor(np.array([[0.6964692, 0.28613934, 0.22685145, 0.5513148],
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
484
|
+
... [0.71946895, 0.42310646, 0.9807642, 0.6848297],
|
|
485
|
+
... [0.4809319, 0.39211753, 0.343178, 0.7290497],
|
|
486
|
+
... [0.43857226, 0.059677895, 0.39804426, 0.7379954]
|
|
487
|
+
... ]), mstype.float32)
|
|
466
488
|
>>> scores = Tensor(np.array([0.18249173, 0.17545176, 0.53155136, 0.53182757]), mstype.float32)
|
|
467
489
|
>>> max_output_size = Tensor(4, mstype.int32)
|
|
468
490
|
>>> overlap_threshold = Tensor(0.1, mstype.float32)
|
|
@@ -481,15 +503,20 @@ class NonMaxSuppressionWithOverlaps(Primitive):
|
|
|
481
503
|
|
|
482
504
|
|
|
483
505
|
class HSVToRGB(Primitive):
|
|
484
|
-
"""
|
|
485
|
-
|
|
506
|
+
r"""
|
|
507
|
+
Transform one single or a batch of images from HSV to RGB color space.
|
|
508
|
+
Each pixel's HSV value is converted to its corresponding RGB value.
|
|
509
|
+
Note that the function is only well-defined for input pixel values in the range [0, 1].
|
|
510
|
+
Image format should be "NHWC".
|
|
486
511
|
|
|
487
512
|
Inputs:
|
|
488
|
-
- **x** (Tensor) - The input image must be a 4-D tensor of shape
|
|
489
|
-
|
|
490
|
-
Types allowed: float16, float32, float64.
|
|
513
|
+
- **x** (Tensor) - The input image must be a 4-D tensor of shape
|
|
514
|
+
:math:`(batch, image\_height, image\_width, channel)`.
|
|
515
|
+
Number of channel must be 3. Types allowed: float16, float32, float64.
|
|
516
|
+
|
|
491
517
|
Outputs:
|
|
492
|
-
A 4-D tensor of shape
|
|
518
|
+
A 4-D tensor of shape :math:`(batch, image\_height, image\_width, channel)`
|
|
519
|
+
with same type of input.
|
|
493
520
|
|
|
494
521
|
Raises:
|
|
495
522
|
TypeError: If `x` is not a Tensor.
|
|
@@ -498,7 +525,7 @@ class HSVToRGB(Primitive):
|
|
|
498
525
|
ValueError: If the last dimension of `x` is not equal to 3.
|
|
499
526
|
|
|
500
527
|
Supported Platforms:
|
|
501
|
-
``CPU``
|
|
528
|
+
``GPU`` ``CPU``
|
|
502
529
|
|
|
503
530
|
Examples:
|
|
504
531
|
>>> image = np.array([0.5, 0.5, 0.5]).astype(np.float32).reshape([1, 1, 1, 3])
|
|
@@ -586,27 +613,27 @@ class CropAndResizeGradBoxes(Primitive):
|
|
|
586
613
|
|
|
587
614
|
class RGBToHSV(Primitive):
|
|
588
615
|
"""
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
616
|
+
Transform one single or a batch of images from RGB to HSV color space.
|
|
617
|
+
Each pixel's RGB value is converted to its corresponding HSV value.
|
|
618
|
+
Note that the function is only well-defined for input pixel values in the range [0, 1].
|
|
592
619
|
|
|
593
620
|
Note:
|
|
594
621
|
Last dimension of input images must be size 3.
|
|
595
622
|
|
|
596
623
|
Inputs:
|
|
597
|
-
**images** (Tensor) -
|
|
598
|
-
|
|
624
|
+
- **images** (Tensor) - 1-D or higher rank RGB data Tensor to convert, last dimension must be size 3.
|
|
625
|
+
Must be one of the following types: float16, float32, float64.
|
|
599
626
|
|
|
600
627
|
Outputs:
|
|
601
628
|
A Tensor, has the same type and shape as input `images`.
|
|
602
629
|
|
|
603
630
|
Raises:
|
|
604
|
-
TypeError: If `images` is not tensor or its dtype is not float
|
|
605
|
-
ValueError: If the
|
|
631
|
+
TypeError: If `images` is not tensor or its dtype is not float.
|
|
632
|
+
ValueError: If the rank of `images` is less than 1.
|
|
606
633
|
ValueError: If the last value of shape of `images` is not 3.
|
|
607
634
|
|
|
608
635
|
Supported Platforms:
|
|
609
|
-
``Ascend`` ``CPU``
|
|
636
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
610
637
|
|
|
611
638
|
Examples:
|
|
612
639
|
>>> images = np.array([0.25, 0.5, 0.5]).astype(np.float32).reshape([1, 1, 1, 3])
|
|
@@ -626,37 +653,39 @@ class ResizeLinear1D(Primitive):
|
|
|
626
653
|
r"""
|
|
627
654
|
Using the linear interpolate method resize the input tensor 'x'.
|
|
628
655
|
|
|
629
|
-
For general resize, refer to :func:`mindspore.ops.interpolate` for more
|
|
656
|
+
For general resize, refer to :func:`mindspore.ops.interpolate` for more details.
|
|
630
657
|
|
|
631
658
|
.. warning::
|
|
632
|
-
This is an experimental
|
|
659
|
+
- This is an experimental API that is subject to change.
|
|
660
|
+
- Currently, the Ascend platform only supports scenarios where the input `size` is Tuple or List.
|
|
633
661
|
|
|
634
662
|
Args:
|
|
635
|
-
coordinate_transformation_mode (
|
|
636
|
-
in the resized tensor to the coordinate in the original tensor. Other optional: 'half_pixel'
|
|
663
|
+
coordinate_transformation_mode (str): Default is 'align_corners'. Describes how to transform the coordinate
|
|
664
|
+
in the resized tensor to the coordinate in the original tensor. Other optional: 'half_pixel'.
|
|
637
665
|
|
|
638
666
|
Inputs:
|
|
639
667
|
- **x** (Tensor) - A 3-D tensor which to resize, with shape [batch, channel, width]. Must be one of the
|
|
640
668
|
following types: uint8, int8, int16, int32, int64, float16, float32, double.
|
|
641
|
-
- **size** (
|
|
669
|
+
- **size** (Union[Tuple[int], List[int], Tensor[int]]): describes the new width of `x` .
|
|
670
|
+
A tuple or list or 1-D tensor with only one int element :math:`(new\_width)`.
|
|
642
671
|
|
|
643
672
|
Outputs:
|
|
644
673
|
A 3-D tensor which shape is [batch, channel, new_width] with the same type as `x`.
|
|
645
674
|
|
|
646
675
|
Raises:
|
|
647
676
|
TypeError: If dtype of `x` is not in the support list.
|
|
648
|
-
TypeError: If `size` is not
|
|
677
|
+
TypeError: If `size` is not in Union[Tuple[int], List[int], Tensor[int]].
|
|
649
678
|
TypeError: If `coordinate_transformation_mode` is not a string.
|
|
650
679
|
TypeError: If `coordinate_transformation_mode` is not in the support list.
|
|
651
680
|
|
|
652
681
|
Supported Platforms:
|
|
653
|
-
``
|
|
682
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
654
683
|
|
|
655
684
|
Examples:
|
|
656
|
-
>>>
|
|
657
|
-
>>> size =
|
|
685
|
+
>>> x = Tensor([[[1, 2, 3], [4, 5, 6]]], mindspore.float32)
|
|
686
|
+
>>> size = (6,)
|
|
658
687
|
>>> resize_linear_1d = ops.ResizeLinear1D(coordinate_transformation_mode="align_corners")
|
|
659
|
-
>>> output = resize_linear_1d(x
|
|
688
|
+
>>> output = resize_linear_1d(x, size)
|
|
660
689
|
>>> print(output)
|
|
661
690
|
[[[1. 1.4 1.8 2.2 2.6 3.]
|
|
662
691
|
[4. 4.4 4.8 5.2 5.6 6.]]]
|
|
@@ -668,7 +697,7 @@ class ResizeLinear1D(Primitive):
|
|
|
668
697
|
self.init_prim_io_names(inputs=["x", "sizes"], outputs=["output"])
|
|
669
698
|
validator.check_value_type(
|
|
670
699
|
"coordinate_transformation_mode", coordinate_transformation_mode, [str], self.name)
|
|
671
|
-
validator.check_string(coordinate_transformation_mode, ["align_corners", "half_pixel"
|
|
700
|
+
validator.check_string(coordinate_transformation_mode, ["align_corners", "half_pixel"],
|
|
672
701
|
"coordinate_transformation_mode", self.name)
|
|
673
702
|
|
|
674
703
|
|
|
@@ -678,11 +707,14 @@ class ResizeBilinearV2(Primitive):
|
|
|
678
707
|
|
|
679
708
|
The resizing only affects the lower two dimensions which represent the height and width.
|
|
680
709
|
|
|
710
|
+
.. warning::
|
|
711
|
+
This is an experimental API that is subject to change or deletion.
|
|
712
|
+
|
|
681
713
|
Args:
|
|
682
|
-
align_corners (bool): If true, rescale input by :math:`(new\_height - 1) / (height - 1)`,
|
|
714
|
+
align_corners (bool, optional): If true, rescale input by :math:`(new\_height - 1) / (height - 1)`,
|
|
683
715
|
which exactly aligns the 4 corners of images and resized images. If false,
|
|
684
716
|
rescale by :math:`new\_height / height`. Default: False.
|
|
685
|
-
half_pixel_centers (bool): Whether half pixel center. If set to True, `align_corners` should be False.
|
|
717
|
+
half_pixel_centers (bool, optional): Whether half pixel center. If set to True, `align_corners` should be False.
|
|
686
718
|
Default: False.
|
|
687
719
|
|
|
688
720
|
Inputs:
|
|
@@ -700,13 +732,16 @@ class ResizeBilinearV2(Primitive):
|
|
|
700
732
|
TypeError: If `half_pixel_centers` is not a bool.
|
|
701
733
|
TypeError: If `align_corners` and `half_pixel_centers` are all True.
|
|
702
734
|
ValueError: If `half_pixel_centers` is True and device_target is CPU.
|
|
735
|
+
ValueError: If dim of `x` is not 4.
|
|
736
|
+
ValueError: If `size` is Tensor and its dim is not 1.
|
|
737
|
+
ValueError: If `size` contains other than 2 elements.
|
|
703
738
|
|
|
704
739
|
Supported Platforms:
|
|
705
|
-
``Ascend`` ``
|
|
740
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
706
741
|
|
|
707
742
|
Examples:
|
|
708
743
|
>>> x = Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mindspore.float32)
|
|
709
|
-
>>> output = ops.ResizeBilinearV2(x, (5, 5))
|
|
744
|
+
>>> output = ops.ResizeBilinearV2()(x, (5, 5))
|
|
710
745
|
>>> print(output)
|
|
711
746
|
[[[[1. 2. 3. 4. 5.]
|
|
712
747
|
[1. 2. 3. 4. 5.]
|
|
@@ -725,46 +760,42 @@ class ResizeBilinearV2(Primitive):
|
|
|
725
760
|
half_pixel_centers, [bool], self.name)
|
|
726
761
|
if half_pixel_centers and align_corners:
|
|
727
762
|
raise ValueError(f"If half_pixel_centers is True, align_corners must be False, but got {align_corners}")
|
|
728
|
-
target = context.get_context("device_target")
|
|
729
|
-
if half_pixel_centers and target == "CPU":
|
|
730
|
-
raise ValueError(f"Currently `half_pixel_centers`=True is not supported in CPU device_target")
|
|
731
763
|
|
|
732
764
|
|
|
733
765
|
class ResizeBicubic(Primitive):
|
|
734
|
-
"""
|
|
766
|
+
r"""
|
|
735
767
|
Resize images to size using bicubic interpolation.
|
|
736
768
|
|
|
737
|
-
.. warning::
|
|
738
|
-
The max output length is 1000000.
|
|
739
|
-
|
|
740
769
|
Args:
|
|
741
|
-
align_corners (bool):If true, the centers of the 4 corner pixels of the input
|
|
770
|
+
align_corners (bool, optional):If true, the centers of the 4 corner pixels of the input
|
|
742
771
|
and output tensors are aligned, preserving the values at the corner pixels.Default: False.
|
|
743
|
-
half_pixel_centers (bool):
|
|
772
|
+
half_pixel_centers (bool, optional): Whether to use half-pixel center alignment. If set to True,
|
|
773
|
+
`align_corners` should be False. Default: False.
|
|
744
774
|
|
|
745
775
|
Inputs:
|
|
746
|
-
- **images** (Tensor) - The input image must be a 4-D tensor of shape
|
|
747
|
-
The format must be
|
|
776
|
+
- **images** (Tensor) - The input image must be a 4-D tensor of shape :math:`(batch, channels, height, width)`.
|
|
777
|
+
The format must be NCHW.
|
|
748
778
|
Types allowed: int8, int16, int32, int64, float16, float32, float64, uint8, uint16.
|
|
749
779
|
- **size** (Tensor) - A 1-D tensor of shape [2], with 2 elements: new_height, new_width.
|
|
750
780
|
Types allowed: int32.
|
|
781
|
+
|
|
751
782
|
Outputs:
|
|
752
|
-
A 4-D tensor of shape
|
|
783
|
+
A 4-D tensor of shape :math:`(batch, channels, new\_height, new\_width)` with type float32.
|
|
753
784
|
|
|
754
785
|
Raises:
|
|
755
786
|
TypeError: If `images` type is not allowed.
|
|
756
|
-
TypeError: If `size` type is not
|
|
757
|
-
TypeError: If `align_corners` type is not
|
|
758
|
-
TypeError: If `half_pixel_centers` type is not
|
|
787
|
+
TypeError: If `size` type is not int32.
|
|
788
|
+
TypeError: If `align_corners` type is not bool.
|
|
789
|
+
TypeError: If `half_pixel_centers` type is not bool.
|
|
759
790
|
ValueError: If `images` dim is not 4.
|
|
760
791
|
ValueError: If `size` dim is not 1.
|
|
761
792
|
ValueError: If `size` size is not 2.
|
|
762
|
-
ValueError: If `size` value is not positive.
|
|
763
|
-
ValueError: If `align_corners` and `half_pixel_centers` value are both
|
|
793
|
+
ValueError: If any `size` value is not positive.
|
|
794
|
+
ValueError: If `align_corners` and `half_pixel_centers` value are both True.
|
|
764
795
|
|
|
765
796
|
|
|
766
797
|
Supported Platforms:
|
|
767
|
-
``Ascend`` ``
|
|
798
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
768
799
|
|
|
769
800
|
Examples:
|
|
770
801
|
>>> class NetResizeBicubic(nn.Cell):
|
|
@@ -791,7 +822,6 @@ class ResizeBicubic(Primitive):
|
|
|
791
822
|
@prim_attr_register
|
|
792
823
|
def __init__(self, align_corners=False, half_pixel_centers=False):
|
|
793
824
|
"""Initialize"""
|
|
794
|
-
self.add_prim_attr("max_length", 1000000)
|
|
795
825
|
validator.check_value_type('align_corners', align_corners, bool, self.name)
|
|
796
826
|
validator.check_value_type('half_pixel_centers', half_pixel_centers, bool, self.name)
|
|
797
827
|
self.init_prim_io_names(inputs=['images', 'size'], outputs=['y'])
|
|
@@ -815,59 +845,61 @@ class ResizeBicubic(Primitive):
|
|
|
815
845
|
mstype.float32, mstype.uint8, mstype.uint16, mstype.double], self.name)
|
|
816
846
|
validator.check_tensor_dtype_valid("size", size_dtype, [mstype.int32], self.name)
|
|
817
847
|
# check input shape rank
|
|
818
|
-
validator.check("images rank", len(images_shape), "expected", 4,
|
|
819
|
-
validator.check("size rank", len(size_shape), "expected", 1,
|
|
820
|
-
validator.check("size dim_0", size_shape[0], "expected", 2,
|
|
848
|
+
validator.check("images rank", len(images_shape), "expected", 4, validator.EQ, self.name)
|
|
849
|
+
validator.check("size rank", len(size_shape), "expected", 1, validator.EQ, self.name)
|
|
850
|
+
validator.check("size dim_0", size_shape[0], "expected", 2, validator.EQ, self.name)
|
|
821
851
|
# check size_value
|
|
822
|
-
validator.check("size[0]", size_value[0], "minimum", 0,
|
|
823
|
-
validator.check("size[1]", size_value[1], "minimum", 0,
|
|
852
|
+
validator.check("size[0]", size_value[0], "minimum", 0, validator.GT, self.name)
|
|
853
|
+
validator.check("size[1]", size_value[1], "minimum", 0, validator.GT, self.name)
|
|
824
854
|
|
|
825
855
|
batch_size = images_shape[0]
|
|
856
|
+
channel = images_shape[1]
|
|
826
857
|
height = size_value[0]
|
|
827
858
|
width = size_value[1]
|
|
828
|
-
|
|
829
|
-
out_shape = (batch_size, height, width
|
|
830
|
-
return {'shape': out_shape,
|
|
831
|
-
'dtype': mstype.float32,
|
|
832
|
-
'value': None}
|
|
859
|
+
|
|
860
|
+
out_shape = (batch_size, channel, height, width)
|
|
861
|
+
return {'shape': out_shape, 'dtype': mstype.float32, 'value': None}
|
|
833
862
|
|
|
834
863
|
|
|
835
864
|
class ResizeArea(Primitive):
|
|
836
|
-
"""
|
|
865
|
+
r"""
|
|
837
866
|
Resize images to a certain size using area interpolation.
|
|
838
867
|
|
|
839
868
|
The resizing process only changes the two dimensions of images, which represent the width and height of images.
|
|
840
869
|
|
|
841
870
|
.. warning::
|
|
842
|
-
The values of
|
|
871
|
+
The values of `size` must be greater than zero.
|
|
843
872
|
|
|
844
873
|
Args:
|
|
845
|
-
align_corners (bool):
|
|
846
|
-
|
|
874
|
+
align_corners (bool, optional): A boolean flag that specifies whether
|
|
875
|
+
to align the centers of the four corner pixels of the input and output tensors.
|
|
876
|
+
When this flag is set to True, the corner pixels of the output tensor are aligned
|
|
877
|
+
with the corner pixels of the input tensor, which preserves the values at the corner pixels.
|
|
878
|
+
Defaults: False.
|
|
847
879
|
|
|
848
880
|
Inputs:
|
|
849
|
-
- **images** (Tensor) - Input images must be a 4-D tensor with shape
|
|
850
|
-
The format must be NHWC.
|
|
881
|
+
- **images** (Tensor) - Input images must be a 4-D tensor with shape
|
|
882
|
+
which is :math:`(batch, channels, height, width)`. The format must be "NHWC".
|
|
851
883
|
Types allowed: int8, int16, int32, int64, float16, float32, float64, uint8, uint16.
|
|
852
884
|
- **size** (Tensor) - Input size must be a 1-D tensor of 2 elements: new_height, new_width.
|
|
853
885
|
The new size of output image.
|
|
854
886
|
Types allowed: int32.
|
|
855
887
|
|
|
856
888
|
Outputs:
|
|
857
|
-
A 4-D tensor of shape
|
|
889
|
+
A 4-D tensor of shape :math:`(batch, new\_height, new\_width, channels)` with type float32.
|
|
858
890
|
|
|
859
891
|
Raises:
|
|
860
892
|
TypeError: If dtype of `images` is not supported.
|
|
861
893
|
TypeError: If dtype of `size` is not int32.
|
|
862
894
|
TypeError: If dtype of `align_corners` is not bool.
|
|
863
895
|
ValueError: If the num of inputs is not 2.
|
|
864
|
-
ValueError: If the dimension of `images`
|
|
865
|
-
ValueError: If the dimension of `size`
|
|
866
|
-
ValueError: If the element num of `size` is not
|
|
867
|
-
ValueError:
|
|
896
|
+
ValueError: If the dimension of `images` is not 4.
|
|
897
|
+
ValueError: If the dimension of `size` is not 1.
|
|
898
|
+
ValueError: If the element num of `size` is not 2.
|
|
899
|
+
ValueError: If any value of `size` is not positive.
|
|
868
900
|
|
|
869
901
|
Supported Platforms:
|
|
870
|
-
``Ascend`` ``
|
|
902
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
871
903
|
|
|
872
904
|
Examples:
|
|
873
905
|
>>> images = Tensor([[[[2], [4], [6], [8]], [[10], [12], [14], [16]]]], mindspore.float16)
|
|
@@ -896,7 +928,7 @@ class CropAndResizeGradImage(Primitive):
|
|
|
896
928
|
|
|
897
929
|
Args:
|
|
898
930
|
method (str): A string specifying the interpolation method. "bilinear", "nearest" and "bilinear_v2" are
|
|
899
|
-
supported for now. Default: "bilinear".
|
|
931
|
+
supported for now. "bilinear_v2" only supports GPU. Default: "bilinear".
|
|
900
932
|
T (mindspore.dtype): T is a required attribute. The value range of T is {mindspore.float16, mindspore.float32,
|
|
901
933
|
mindspore.float64}.
|
|
902
934
|
|
|
@@ -928,7 +960,7 @@ class CropAndResizeGradImage(Primitive):
|
|
|
928
960
|
TypeError: If `box_index` is not tensor or its dtype is not int32.
|
|
929
961
|
TypeError: If `image_size` is not tensor or its dtype is not int32.
|
|
930
962
|
TypeError: If the value of `T` is not a number dtype in mindspore.
|
|
931
|
-
ValueError: If `method` is not "bilinear".
|
|
963
|
+
ValueError: If `method` is not in {"bilinear", "nearest", "bilinear_v2"}.
|
|
932
964
|
ValueError: If `T` is not in {mindspore.float16, mindspore.float32, mindspore.float64}.
|
|
933
965
|
ValueError: If the size of `grads` tensor shape is not equal to 4.
|
|
934
966
|
ValueError: If the size of `boxes` tensor shape is not equal to 2.
|
|
@@ -939,7 +971,7 @@ class CropAndResizeGradImage(Primitive):
|
|
|
939
971
|
ValueError: If the value of image_height or image_width of `image_size` is not positive.
|
|
940
972
|
|
|
941
973
|
Supported Platforms:
|
|
942
|
-
``GPU``
|
|
974
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
943
975
|
|
|
944
976
|
Examples:
|
|
945
977
|
>>> crop_and_resize_grad_image = ops.CropAndResizeGradImage(T = mindspore.float32, method = "bilinear")
|
|
@@ -972,39 +1004,46 @@ class CropAndResizeGradImage(Primitive):
|
|
|
972
1004
|
"""Initialize CropAndResizeGradImage"""
|
|
973
1005
|
self.init_prim_io_names(inputs=['grads', 'boxes', 'box_index', 'image_size'], outputs=['y'])
|
|
974
1006
|
validator.check_value_type("method", method, [str], self.name)
|
|
975
|
-
|
|
1007
|
+
is_ascend_cpu = context.get_context('device_target') in ("Ascend", "CPU")
|
|
1008
|
+
if is_ascend_cpu:
|
|
1009
|
+
validator.check("method", method, "expected", ("bilinear", "nearest"), validator.IN, self.name)
|
|
1010
|
+
else:
|
|
1011
|
+
validator.check("method", method, "expected", ("bilinear", "nearest", "bilinear_v2"),
|
|
1012
|
+
validator.IN, self.name)
|
|
976
1013
|
self.method = method
|
|
977
1014
|
valid_values = (mstype.float16, mstype.float32, mstype.float64)
|
|
978
1015
|
if T in mstype.number_type:
|
|
979
|
-
validator.check("T", T, "expected", valid_values,
|
|
1016
|
+
validator.check("T", T, "expected", valid_values, validator.IN, self.name)
|
|
980
1017
|
else:
|
|
981
1018
|
validator.check_type_name("T", T, valid_values, self.name)
|
|
982
1019
|
self.add_prim_attr("max_Byte", int(2e9)) # Maximum bytes of image gradient
|
|
983
1020
|
|
|
984
1021
|
|
|
985
1022
|
class ScaleAndTranslate(Primitive):
|
|
986
|
-
"""
|
|
1023
|
+
r"""
|
|
987
1024
|
Scale And Translate the input image tensor.
|
|
988
1025
|
|
|
989
1026
|
Note:
|
|
990
|
-
Input images must be a 4-D tensor.
|
|
991
|
-
Input size, scale and translation must be a 1-D tensor with two elements.
|
|
1027
|
+
- Input images must be a 4-D tensor.
|
|
1028
|
+
- Input size, scale and translation must be a 1-D tensor with two elements.
|
|
992
1029
|
|
|
993
1030
|
Args:
|
|
994
|
-
kernel_type (str):
|
|
995
|
-
|
|
1031
|
+
kernel_type (str, optional): Deciding which image filtering algorithm to choose. Valid options:
|
|
1032
|
+
["lanczos1", "lanczos3", "lanczos5", "gaussian", "box", "triangle", "keyscubic", "mitchellcubic"]
|
|
1033
|
+
Default: "lanczos3".
|
|
1034
|
+
antialias (bool, optional): Deciding whether to use the antialias. Default: True.
|
|
996
1035
|
|
|
997
1036
|
Inputs:
|
|
998
|
-
- **images** (Tensor) - A 4-D tensor of shape (batch,
|
|
1037
|
+
- **images** (Tensor) - A 4-D tensor of shape :math:`(batch, image\_height, image\_width, channel)`.
|
|
999
1038
|
- **size** (Tensor) - The size of the output image after scale and translate operations. A 1-D tensor with two
|
|
1000
|
-
positive elements whose dtype is int32 and shape must be (2,)
|
|
1039
|
+
positive elements whose dtype is int32 and shape must be :math:`(2,)`.
|
|
1001
1040
|
- **scale** (Tensor) - Indicates the zoom factor. A 1-D tensor with two positive elements whose dtype is float32
|
|
1002
|
-
and shape must be (2,)
|
|
1041
|
+
and shape must be :math:`(2,)`.
|
|
1003
1042
|
- **translation** (Tensor) - Translate the pixel value. A 1-D tensor with two elements whose dtype is
|
|
1004
|
-
float32 and shape must be (2,)
|
|
1043
|
+
float32 and shape must be :math:`(2,)`.
|
|
1005
1044
|
|
|
1006
1045
|
Outputs:
|
|
1007
|
-
A 4-D tensor with type: float32 and shape:
|
|
1046
|
+
A 4-D tensor with type: float32 and shape :math:`(batch, size[0], size[1], channel)`.
|
|
1008
1047
|
|
|
1009
1048
|
Raises:
|
|
1010
1049
|
TypeError: If `kernel_type` is not str.
|
|
@@ -1016,9 +1055,9 @@ class ScaleAndTranslate(Primitive):
|
|
|
1016
1055
|
ValueError: If `kernel_type` is not in ["lanczos1", "lanczos3", "lanczos5", "gaussian", "box", "triangle",
|
|
1017
1056
|
"keyscubic", "mitchellcubic"].
|
|
1018
1057
|
ValueError: If the rank of `images` is not 4.
|
|
1019
|
-
ValueError: If
|
|
1020
|
-
ValueError: If
|
|
1021
|
-
ValueError: If the shape of `translation` is not (2,)
|
|
1058
|
+
ValueError: If the shape of `size` is not :math:`(2,)`.
|
|
1059
|
+
ValueError: If the shape of `scale` is not :math:`(2,)`.
|
|
1060
|
+
ValueError: If the shape of `translation` is not :math:`(2,)`.
|
|
1022
1061
|
|
|
1023
1062
|
Supported Platforms:
|
|
1024
1063
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1045,3 +1084,159 @@ class ScaleAndTranslate(Primitive):
|
|
|
1045
1084
|
validator.check_string(kernel_type, ["lanczos1", "lanczos3", "lanczos5", "gaussian", "box", "triangle",
|
|
1046
1085
|
"keyscubic", "mitchellcubic"], "kernel_type", self.name)
|
|
1047
1086
|
validator.check_value_type("antialias", antialias, [bool], self.name)
|
|
1087
|
+
|
|
1088
|
+
|
|
1089
|
+
class CombinedNonMaxSuppression(Primitive):
|
|
1090
|
+
r"""
|
|
1091
|
+
Applies a greedy approach to select a subset of bounding boxes from a list of
|
|
1092
|
+
candidates using NonMaxSuppression, where the boxes are sorted in descending order of their confidence score.
|
|
1093
|
+
|
|
1094
|
+
Args:
|
|
1095
|
+
clip_boxes (bool, optional): Determines whether to apply bounding box normalization to ensure the
|
|
1096
|
+
coordinates are within [0, 1] range. Default: True.
|
|
1097
|
+
|
|
1098
|
+
- If True, clip the boxes that fall outside this range.
|
|
1099
|
+
- If False, return the box coordinates as they are without any modifications.
|
|
1100
|
+
|
|
1101
|
+
pad_per_class (bool, optional): Determines whether the output of the non-maximum suppression (NMS)
|
|
1102
|
+
algorithm should be padded or clipped to meet the maximum size constraints. Default: False.
|
|
1103
|
+
|
|
1104
|
+
- If False, the output is clipped to the maximum size of `max_total_size`.
|
|
1105
|
+
- If True, the output is padded up to `max_size_per_class` * `num_classes` and clipped if
|
|
1106
|
+
it exceeds `max_total_size`.
|
|
1107
|
+
|
|
1108
|
+
Inputs:
|
|
1109
|
+
- **boxes** (Tensor) - A float32 Tensor with shape :math:`(batch_size, num_boxes, q, 4)`
|
|
1110
|
+
representing the bounding box coordinates.
|
|
1111
|
+
`q` indicates mapping relationship between boxes and classes.
|
|
1112
|
+
If `q` is 1, all classes use the same bounding box. If `q` is equal to the number of classes,
|
|
1113
|
+
class-specific boxes are applied.
|
|
1114
|
+
- **scores** (Tensor) - A 3-D Tensor of float32 type with the shape
|
|
1115
|
+
:math:`(batch_size, num_boxes, num_classes)`. It contains a score value for each box,
|
|
1116
|
+
with each row of `boxes` represented by a single score.
|
|
1117
|
+
- **max_output_size_per_class** (Tensor) - The maximum number of boxes that can be selected for each class
|
|
1118
|
+
by the non-maximum suppression algorithm, represented by a scalar Tensor of type int32.
|
|
1119
|
+
- **max_total_size** (Tensor) - A scalar Tensor of type int32 that represents the
|
|
1120
|
+
maximum number of boxes that are kept for all classes.
|
|
1121
|
+
- **iou_threshold** (Tensor) - A scalar Tensor of float32 type that represents the threshold for
|
|
1122
|
+
determining if the IOU overlap between boxes is too high. `iou_threshold` must be equal or greater
|
|
1123
|
+
than 0 and be equal or smaller than 1.
|
|
1124
|
+
- **score_threshold** (Tensor) - A scalar Tensor of type float32 that represents the threshold
|
|
1125
|
+
for determining when to remove boxes based on their scores.
|
|
1126
|
+
|
|
1127
|
+
Outputs:
|
|
1128
|
+
- **nmsed_boxes** - A Tensor of float32 with shape of (batch_size, num_detection, 4), which contains
|
|
1129
|
+
the non-max suppressed boxes.
|
|
1130
|
+
- **nmsed_scores** - A Tensor of float32 with shape of (batch_size, num_detection), which contains score
|
|
1131
|
+
of boxes.
|
|
1132
|
+
- **nmsed_classes** - A Tensor of float32 with shape of (batch_size, num_detection), which contains classes
|
|
1133
|
+
of boxes.
|
|
1134
|
+
- **valid_detections** A Tensor of int32 with shape of (batch_size,), which indicates the number of valid
|
|
1135
|
+
detections of each batch.
|
|
1136
|
+
|
|
1137
|
+
Raises:
|
|
1138
|
+
TypeError: If the dtype of `boxes`, `scores` , `iou_threshold` , `score threshold` are not float32.
|
|
1139
|
+
TypeError: If the dtype of `max_output_size_per_class` and `max_total_size` are not int32.
|
|
1140
|
+
ValueError: If `boxes` is not 4D.
|
|
1141
|
+
ValueError: If `max_output_size_per_class`, `max_total_size`, `iou_threshold` and `score threshold` are not 0D.
|
|
1142
|
+
ValueError: If `scores` is not 3D.
|
|
1143
|
+
ValueError: If shape[0] or shape[1] of `boxes` is not same with that of the `scores`.
|
|
1144
|
+
ValueError: If shape[2] of `boxes` is not same with shape[2] of `scores` or 1
|
|
1145
|
+
ValueError: If `max_total_size` < 0.
|
|
1146
|
+
ValueError: If `max_output_size_per_class` < 0.
|
|
1147
|
+
ValueError: If `iou_threshold` not in [0,1].
|
|
1148
|
+
|
|
1149
|
+
Supported Platforms:
|
|
1150
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1151
|
+
|
|
1152
|
+
Examples:
|
|
1153
|
+
>>> boxes = Tensor(np.array([[[[200, 100, 150, 100]],
|
|
1154
|
+
... [[220, 120, 150, 100]],
|
|
1155
|
+
... [[190, 110, 150, 100]],
|
|
1156
|
+
... [[210, 112, 150, 100]]]])).astype('float32')
|
|
1157
|
+
>>> scores = Tensor(np.array([[[0.2000, 0.7000, 0.1000], [0.1000, 0.8000, 0.1000], [0.3000, 0.6000, 0.1000],
|
|
1158
|
+
... [0.0500, 0.9000, 0.0500]]])).astype('float32')
|
|
1159
|
+
>>> max_output_size_per_class = Tensor(4, mstype.int32)
|
|
1160
|
+
>>> max_total_size = Tensor(1, mstype.int32)
|
|
1161
|
+
>>> iou_threshold = Tensor(0, mstype.float32)
|
|
1162
|
+
>>> score_threshold = Tensor(0, mstype.float32)
|
|
1163
|
+
>>> net = ops.CombinedNonMaxSuppression()
|
|
1164
|
+
>>> out = net(boxes, scores, max_output_size_per_class, max_total_size, iou_threshold, score_threshold)
|
|
1165
|
+
>>> print(out)
|
|
1166
|
+
(Tensor(shape=[1, 1, 4], dtype=Float32, value= [[[1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
|
|
1167
|
+
1.00000000e+00]]]),
|
|
1168
|
+
Tensor(shape=[1, 1], dtype=Float32, value= [[ 8.99999976e-01]]),
|
|
1169
|
+
Tensor(shape=[1, 1], dtype=Float32, value= [[ 1.00000000e+00]]),
|
|
1170
|
+
Tensor(shape=[1], dtype=Int32, value= [1]))
|
|
1171
|
+
"""
|
|
1172
|
+
|
|
1173
|
+
@prim_attr_register
|
|
1174
|
+
def __init__(self, pad_per_class=False, clip_boxes=True):
|
|
1175
|
+
"""Initialize CombinedNonMaxSuppression"""
|
|
1176
|
+
self.pad_per_class = validator.check_value_type("pad_per_class", pad_per_class, [bool], self.name)
|
|
1177
|
+
self.add_prim_attr('pad_per_class', self.pad_per_class)
|
|
1178
|
+
self.clip_boxes = validator.check_value_type("clip_boxes", clip_boxes, [bool], self.name)
|
|
1179
|
+
self.add_prim_attr('clip_boxes', self.clip_boxes)
|
|
1180
|
+
|
|
1181
|
+
|
|
1182
|
+
class ResizeV2(Primitive):
|
|
1183
|
+
r"""
|
|
1184
|
+
Using the nearest, linear or cubic interpolate method resize the input tensor 'x'.
|
|
1185
|
+
|
|
1186
|
+
Note:
|
|
1187
|
+
Input x must be a 4-D tensor.
|
|
1188
|
+
|
|
1189
|
+
Args:
|
|
1190
|
+
coordinate_transformation_mode (str): Default is 'half_pixel'. Describes how to transform the
|
|
1191
|
+
coordinate in the resized tensor to the coordinate in the original tensor. Other optional: 'align_corners'.
|
|
1192
|
+
In 'nearest' mode, coordinate_transformation_mode must be 'half_pixel'.
|
|
1193
|
+
mode (str): Defaults to 'nearest'. Other optional: 'linear' and 'cubic'.
|
|
1194
|
+
|
|
1195
|
+
Inputs:
|
|
1196
|
+
- **x** (Tensor) - A 4-D tensor which to resize, with shape [batch, channel, width, height]. Must be one of the
|
|
1197
|
+
following types: uint8, int8, int16, int32, int64, float16, float32, float64, when mode = 'nearest'.
|
|
1198
|
+
Must be one of the following types: float16, float32, float64, when mode = 'linear' or 'cubic'.
|
|
1199
|
+
- **roi** (Tensor) - A 1-D float32 Tensor. Unused parameters currently.
|
|
1200
|
+
- **scales** (Tensor) - A 1-D float32 Tensor. Unused parameters currently.
|
|
1201
|
+
- **sizes** (Tensor) - A 1-D int64 or int32 Tensor, the length must be 4 and greater than 0.
|
|
1202
|
+
And sizes[0], sizes[1] must match with the shape[0] and shape[1] of x.
|
|
1203
|
+
When mode equals 'nearest' or 'linear', sizes[2] must be 1.
|
|
1204
|
+
|
|
1205
|
+
Outputs:
|
|
1206
|
+
A 4-D tensor which shape is [batch, channel, new_height, new_width] with type as same as x.
|
|
1207
|
+
|
|
1208
|
+
Raises:
|
|
1209
|
+
TypeError: If dtype of `x`, `roi`, `scales` or `sizes` is not supported.
|
|
1210
|
+
ValueError: If shape of `x`, `roi`, `scales` or `sizes` is not supported.
|
|
1211
|
+
ValueError: If the length of `sizes` is not 4.
|
|
1212
|
+
ValueError: If `sizes` is not greater than 0.
|
|
1213
|
+
ValueError: If sizes[2] is not 1, when `mode` = 'nearest' or 'linear'.
|
|
1214
|
+
ValueError: If sizes[0] and sizes[1] don't match the shape[0] and shape[1] of x.
|
|
1215
|
+
ValueError: If `coordinate_transformation_mode` or `mode` is not supported.
|
|
1216
|
+
ValueError: If `coordinate_transformation_mode` is not 'half_pixel', when `mode` = 'nearest'.
|
|
1217
|
+
|
|
1218
|
+
Supported Platforms:
|
|
1219
|
+
``CPU``
|
|
1220
|
+
|
|
1221
|
+
Examples:
|
|
1222
|
+
>>> x = Tensor(np.array([[[[1., 2., 3., 4.]]]]).astype(np.float32))
|
|
1223
|
+
>>> roi = Tensor(np.array([0]).astype(np.float32))
|
|
1224
|
+
>>> scales = Tensor(np.array([0]).astype(np.float32))
|
|
1225
|
+
>>> sizes = Tensor(np.array([1, 1, 1, 9]).astype(np.int64))
|
|
1226
|
+
>>> resize_v2 = ops.ResizeV2(coordinate_transformation_mode="half_pixel", mode="nearest")
|
|
1227
|
+
>>> output = resize_v2(x, roi, scales, sizes)
|
|
1228
|
+
>>> print(output)
|
|
1229
|
+
[[[[1. 1. 1. 2. 2. 3. 3. 4. 4.]]]]
|
|
1230
|
+
"""
|
|
1231
|
+
@prim_attr_register
|
|
1232
|
+
def __init__(self, coordinate_transformation_mode="half_pixel", mode="nearest"):
|
|
1233
|
+
"""Initialize ResizeV2."""
|
|
1234
|
+
self.init_prim_io_names(inputs=['x', 'roi', 'scales', 'sizes'], outputs=['y'])
|
|
1235
|
+
self.add_prim_attr("nearest_mode", "floor")
|
|
1236
|
+
self.add_prim_attr("cubic_coeff_a", -0.75)
|
|
1237
|
+
validator.check_value_type(
|
|
1238
|
+
"coordinate_transformation_mode", coordinate_transformation_mode, [str], self.name)
|
|
1239
|
+
validator.check_string(coordinate_transformation_mode, ["align_corners", "half_pixel"],
|
|
1240
|
+
"coordinate_transformation_mode", self.name)
|
|
1241
|
+
validator.check_value_type("mode", mode, [str], self.name)
|
|
1242
|
+
validator.check_string(mode, ["nearest", "linear", "cubic"], "mode", self.name)
|