mindspore 2.0.0a0__cp38-cp38-win_amd64.whl → 2.0.0rc1__cp38-cp38-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +4 -2
- mindspore/_c_dataengine.cp38-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp38-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp38-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +102 -0
- mindspore/_checkparam.py +1066 -1001
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +4 -3
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -48
- mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
- mindspore/_extends/parse/__init__.py +5 -3
- mindspore/_extends/parse/namespace.py +16 -1
- mindspore/_extends/parse/parser.py +107 -22
- mindspore/_extends/parse/resources.py +0 -7
- mindspore/_extends/parse/standard_method.py +885 -413
- mindspore/amp.py +52 -57
- mindspore/boost/boost.py +2 -2
- mindspore/boost/boost_cell_wrapper.py +38 -20
- mindspore/boost/dim_reduce.py +3 -3
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +4 -6
- mindspore/common/_decorator.py +2 -0
- mindspore/common/_register_for_adapter.py +55 -0
- mindspore/common/_stub_tensor.py +201 -0
- mindspore/common/_utils.py +41 -7
- mindspore/common/api.py +215 -141
- mindspore/common/dtype.py +8 -1
- mindspore/common/dump.py +2 -2
- mindspore/common/initializer.py +4 -2
- mindspore/common/jit_config.py +17 -13
- mindspore/common/mutable.py +33 -13
- mindspore/common/parameter.py +23 -21
- mindspore/common/seed.py +8 -24
- mindspore/common/sparse_tensor.py +62 -41
- mindspore/common/tensor.py +852 -1154
- mindspore/communication/__init__.py +2 -2
- mindspore/communication/_comm_helper.py +11 -4
- mindspore/communication/management.py +22 -21
- mindspore/config/op_info.config +501 -1008
- mindspore/context.py +201 -23
- mindspore/dataset/__init__.py +6 -6
- mindspore/dataset/audio/__init__.py +7 -7
- mindspore/dataset/audio/transforms.py +670 -30
- mindspore/dataset/audio/utils.py +47 -4
- mindspore/dataset/audio/validators.py +223 -1
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/core/config.py +210 -14
- mindspore/dataset/core/validator_helpers.py +2 -2
- mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
- mindspore/dataset/debug/debug_hook.py +65 -0
- mindspore/dataset/debug/pre_defined_hook.py +67 -0
- mindspore/dataset/engine/__init__.py +7 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +322 -66
- mindspore/dataset/engine/datasets_audio.py +80 -76
- mindspore/dataset/engine/datasets_standard_format.py +51 -38
- mindspore/dataset/engine/datasets_text.py +232 -118
- mindspore/dataset/engine/datasets_user_defined.py +41 -17
- mindspore/dataset/engine/datasets_vision.py +746 -225
- mindspore/dataset/engine/graphdata.py +75 -10
- mindspore/dataset/engine/iterators.py +45 -5
- mindspore/dataset/engine/offload.py +48 -28
- mindspore/dataset/engine/validators.py +117 -8
- mindspore/dataset/text/__init__.py +6 -5
- mindspore/dataset/text/transforms.py +86 -3
- mindspore/dataset/text/utils.py +6 -4
- mindspore/dataset/text/validators.py +25 -0
- mindspore/dataset/transforms/__init__.py +3 -2
- mindspore/dataset/transforms/c_transforms.py +1 -1
- mindspore/dataset/transforms/transforms.py +2 -2
- mindspore/dataset/utils/__init__.py +2 -1
- mindspore/dataset/utils/line_reader.py +121 -0
- mindspore/dataset/vision/__init__.py +2 -3
- mindspore/dataset/vision/c_transforms.py +9 -9
- mindspore/dataset/vision/py_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +2 -0
- mindspore/dataset/vision/transforms.py +160 -161
- mindspore/dataset/vision/utils.py +3 -3
- mindspore/experimental/map_parameter.py +38 -26
- mindspore/include/OWNERS +0 -1
- mindspore/include/api/callback/callback.h +9 -13
- mindspore/include/api/callback/ckpt_saver.h +2 -2
- mindspore/include/api/callback/loss_monitor.h +2 -2
- mindspore/include/api/callback/lr_scheduler.h +5 -5
- mindspore/include/api/callback/time_monitor.h +2 -2
- mindspore/include/api/callback/train_accuracy.h +4 -6
- mindspore/include/api/cfg.h +19 -6
- mindspore/include/api/context.h +44 -9
- mindspore/include/api/delegate.h +1 -1
- mindspore/include/api/metrics/accuracy.h +2 -2
- mindspore/include/api/metrics/metrics.h +4 -3
- mindspore/include/api/model.h +9 -4
- mindspore/include/api/model_parallel_runner.h +2 -2
- mindspore/include/api/net.h +12 -11
- mindspore/include/api/serialization.h +19 -3
- mindspore/include/api/types.h +3 -3
- mindspore/include/dataset/constants.h +7 -0
- mindspore/include/dataset/text.h +59 -0
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +1 -1
- mindspore/mindrecord/filereader.py +18 -0
- mindspore/mindrecord/filewriter.py +197 -34
- mindspore/mindrecord/shardreader.py +9 -0
- mindspore/mindrecord/shardwriter.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +3 -3
- mindspore/mindrecord/tools/cifar10_to_mr.py +3 -3
- mindspore/mindrecord/tools/csv_to_mr.py +3 -3
- mindspore/mindrecord/tools/imagenet_to_mr.py +16 -11
- mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
- mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/nn/__init__.py +0 -4
- mindspore/nn/cell.py +204 -132
- mindspore/nn/dynamic_lr.py +1 -1
- mindspore/nn/grad/cell_grad.py +7 -6
- mindspore/nn/layer/__init__.py +5 -4
- mindspore/nn/layer/activation.py +40 -89
- mindspore/nn/layer/basic.py +255 -624
- mindspore/nn/layer/channel_shuffle.py +7 -6
- mindspore/nn/layer/combined.py +1 -1
- mindspore/nn/layer/container.py +41 -4
- mindspore/nn/layer/conv.py +64 -28
- mindspore/nn/layer/dense.py +9 -8
- mindspore/nn/layer/embedding.py +27 -25
- mindspore/nn/layer/image.py +53 -46
- mindspore/nn/layer/math.py +97 -105
- mindspore/nn/layer/normalization.py +117 -86
- mindspore/nn/layer/padding.py +185 -95
- mindspore/nn/layer/pooling.py +817 -414
- mindspore/nn/layer/rnn_cells.py +10 -15
- mindspore/nn/layer/rnns.py +37 -38
- mindspore/nn/layer/thor_layer.py +11 -12
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +701 -0
- mindspore/nn/learning_rate_schedule.py +8 -8
- mindspore/nn/loss/__init__.py +5 -4
- mindspore/nn/loss/loss.py +334 -199
- mindspore/nn/optim/ada_grad.py +6 -6
- mindspore/nn/optim/adadelta.py +2 -3
- mindspore/nn/optim/adafactor.py +4 -5
- mindspore/nn/optim/adam.py +126 -62
- mindspore/nn/optim/adamax.py +3 -4
- mindspore/nn/optim/adasum.py +6 -6
- mindspore/nn/optim/asgd.py +2 -2
- mindspore/nn/optim/ftrl.py +67 -38
- mindspore/nn/optim/lamb.py +4 -5
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +43 -4
- mindspore/nn/optim/momentum.py +6 -5
- mindspore/nn/optim/optimizer.py +3 -1
- mindspore/nn/optim/proximal_ada_grad.py +2 -2
- mindspore/nn/optim/rmsprop.py +1 -1
- mindspore/nn/optim/rprop.py +8 -9
- mindspore/nn/optim/sgd.py +19 -13
- mindspore/nn/optim/thor.py +10 -15
- mindspore/nn/probability/__init__.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +4 -4
- mindspore/nn/probability/bijector/invert.py +1 -1
- mindspore/nn/probability/bijector/softplus.py +2 -2
- mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
- mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
- mindspore/nn/probability/distribution/_utils/utils.py +9 -15
- mindspore/nn/probability/distribution/bernoulli.py +3 -3
- mindspore/nn/probability/distribution/beta.py +1 -1
- mindspore/nn/probability/distribution/categorical.py +5 -7
- mindspore/nn/probability/distribution/cauchy.py +3 -3
- mindspore/nn/probability/distribution/distribution.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -2
- mindspore/nn/probability/distribution/gamma.py +3 -3
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +3 -3
- mindspore/nn/probability/distribution/half_normal.py +15 -11
- mindspore/nn/probability/distribution/laplace.py +16 -13
- mindspore/nn/probability/distribution/logistic.py +2 -2
- mindspore/nn/probability/distribution/normal.py +1 -1
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/student_t.py +20 -15
- mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +3 -3
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +27 -10
- mindspore/nn/wrap/grad_reducer.py +2 -2
- mindspore/nn/wrap/loss_scale.py +40 -24
- mindspore/numpy/array_creations.py +33 -22
- mindspore/numpy/array_ops.py +35 -30
- mindspore/numpy/logic_ops.py +6 -27
- mindspore/numpy/math_ops.py +22 -19
- mindspore/numpy/utils.py +1 -1
- mindspore/numpy/utils_const.py +108 -58
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_constants.py +0 -6
- mindspore/ops/_grad/__init__.py +2 -1
- mindspore/ops/_grad/grad_array_ops.py +86 -117
- mindspore/ops/_grad/grad_base.py +23 -1
- mindspore/ops/_grad/grad_clip_ops.py +2 -3
- mindspore/ops/_grad/grad_comm_ops.py +34 -24
- mindspore/ops/_grad/grad_implementations.py +9 -45
- mindspore/ops/_grad/grad_inner_ops.py +47 -4
- mindspore/ops/_grad/grad_math_ops.py +142 -117
- mindspore/ops/_grad/grad_nn_ops.py +71 -165
- mindspore/ops/_grad/grad_sequence_ops.py +296 -0
- mindspore/ops/_grad/grad_sparse.py +7 -6
- mindspore/ops/_grad_experimental/__init__.py +1 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +150 -15
- mindspore/ops/_grad_experimental/grad_image_ops.py +16 -7
- mindspore/ops/_grad_experimental/grad_inner_ops.py +1 -22
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +4 -11
- mindspore/ops/_grad_experimental/grad_math_ops.py +210 -89
- mindspore/ops/_grad_experimental/grad_nn_ops.py +26 -22
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +49 -8
- mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +4 -4
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
- mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/correction_mul.py +2 -2
- mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -5
- mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -4
- mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +236 -4
- mindspore/ops/_op_impl/aicpu/abs.py +36 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_v1.py → adaptive_avg_pool_2d.py} +6 -5
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/add.py +43 -0
- mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
- mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
- mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -43
- mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
- mindspore/{compression/common/__init__.py → ops/_op_impl/aicpu/bessel_i0.py} +15 -8
- mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
- mindspore/ops/_op_impl/aicpu/conj.py +11 -0
- mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +0 -3
- mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_grad_v1.py → digamma.py} +7 -9
- mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
- mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
- mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
- mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
- mindspore/ops/_op_impl/aicpu/greater.py +41 -0
- mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
- mindspore/ops/_op_impl/aicpu/less.py +41 -0
- mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/lgamma.py} +16 -10
- mindspore/ops/_op_impl/aicpu/mirror_pad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mul.py +3 -1
- mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
- mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
- mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
- mindspore/ops/_op_impl/aicpu/polar.py +32 -0
- mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
- mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
- mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
- mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
- mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
- mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
- mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
- mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
- mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
- mindspore/ops/_op_impl/aicpu/trans_data.py +1 -0
- mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
- mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
- mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
- mindspore/ops/_op_impl/cpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
- mindspore/ops/_op_impl/tbe/__init__.py +27 -611
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
- mindspore/ops/_op_impl/tbe/cast.py +0 -2
- mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +2 -2
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
- mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
- mindspore/ops/_op_impl/tbe/matmul_ds.py +2 -0
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/scatter_mul.py +2 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -2
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
- mindspore/ops/_register_for_op.py +1 -0
- mindspore/ops/_utils/__init__.py +1 -2
- mindspore/ops/_utils/utils.py +19 -40
- mindspore/ops/_vmap/vmap_array_ops.py +116 -38
- mindspore/ops/_vmap/vmap_base.py +16 -9
- mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
- mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +7 -5
- mindspore/ops/_vmap/vmap_image_ops.py +12 -5
- mindspore/ops/_vmap/vmap_math_ops.py +46 -5
- mindspore/ops/_vmap/vmap_nn_ops.py +15 -21
- mindspore/ops/_vmap/vmap_random_ops.py +1 -1
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +220 -106
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -23
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -17
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +39 -41
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +41 -43
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +51 -57
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +24 -25
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +53 -53
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +77 -85
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +37 -39
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +70 -72
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +17 -17
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
- mindspore/ops/bprop_mindir/generate_mindir.py +2 -0
- mindspore/ops/composite/__init__.py +7 -8
- mindspore/ops/composite/base.py +101 -47
- mindspore/ops/composite/math_ops.py +188 -158
- mindspore/ops/composite/multitype_ops/_compile_utils.py +415 -170
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +142 -87
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +2 -3
- mindspore/ops/composite/multitype_ops/getitem_impl.py +31 -3
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +21 -5
- mindspore/ops/composite/multitype_ops/not_in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
- mindspore/ops/composite/multitype_ops/setitem_impl.py +21 -3
- mindspore/ops/composite/multitype_ops/sub_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +35 -4
- mindspore/ops/function/__init__.py +152 -8
- mindspore/ops/function/array_func.py +2555 -674
- mindspore/ops/function/clip_func.py +209 -13
- mindspore/ops/function/debug_func.py +2 -2
- mindspore/ops/function/grad/__init__.py +2 -1
- mindspore/ops/function/grad/grad_func.py +147 -62
- mindspore/ops/function/image_func.py +54 -38
- mindspore/ops/function/linalg_func.py +167 -16
- mindspore/ops/function/math_func.py +4849 -1492
- mindspore/ops/function/nn_func.py +2573 -988
- mindspore/ops/function/other_func.py +115 -0
- mindspore/ops/function/parameter_func.py +3 -3
- mindspore/ops/function/random_func.py +790 -73
- mindspore/ops/function/sparse_func.py +98 -78
- mindspore/ops/function/sparse_unary_func.py +54 -53
- mindspore/ops/function/spectral_func.py +27 -24
- mindspore/ops/function/vmap_func.py +22 -2
- mindspore/ops/functional.py +97 -37
- mindspore/ops/op_info_register.py +70 -28
- mindspore/ops/operations/__init__.py +47 -14
- mindspore/ops/operations/_csr_ops.py +7 -7
- mindspore/ops/operations/_embedding_cache_ops.py +5 -5
- mindspore/ops/operations/_grad_ops.py +276 -187
- mindspore/ops/operations/_inner_ops.py +319 -113
- mindspore/ops/operations/_ms_kernel.py +10 -8
- mindspore/ops/operations/_ocr_ops.py +9 -9
- mindspore/ops/operations/_opaque_predicate_registry.py +4 -0
- mindspore/ops/operations/_quant_ops.py +137 -102
- mindspore/ops/operations/_rl_inner_ops.py +121 -60
- mindspore/ops/operations/_scalar_ops.py +466 -0
- mindspore/ops/operations/_sequence_ops.py +1004 -2
- mindspore/ops/operations/_tensor_array.py +10 -11
- mindspore/ops/operations/_thor_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +801 -466
- mindspore/ops/operations/comm_ops.py +51 -49
- mindspore/ops/operations/control_ops.py +2 -2
- mindspore/ops/operations/custom_ops.py +123 -44
- mindspore/ops/operations/debug_ops.py +24 -24
- mindspore/ops/operations/image_ops.py +240 -153
- mindspore/ops/operations/inner_ops.py +34 -50
- mindspore/ops/operations/linalg_ops.py +31 -9
- mindspore/ops/operations/math_ops.py +988 -757
- mindspore/ops/operations/nn_ops.py +965 -819
- mindspore/ops/operations/other_ops.py +51 -40
- mindspore/ops/operations/random_ops.py +204 -122
- mindspore/ops/operations/rl_ops.py +8 -9
- mindspore/ops/operations/sparse_ops.py +254 -93
- mindspore/ops/operations/spectral_ops.py +35 -3
- mindspore/ops/primitive.py +111 -9
- mindspore/parallel/_auto_parallel_context.py +189 -83
- mindspore/parallel/_offload_context.py +185 -0
- mindspore/parallel/_parallel_serialization.py +99 -7
- mindspore/parallel/_ps_context.py +9 -5
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +7 -1
- mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
- mindspore/{nn/transformer → parallel/_transformer}/layers.py +6 -37
- mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
- mindspore/{nn/transformer → parallel/_transformer}/moe.py +20 -16
- mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
- mindspore/{nn/transformer → parallel/_transformer}/transformer.py +48 -111
- mindspore/parallel/_utils.py +1 -2
- mindspore/parallel/algo_parameter_config.py +1 -1
- mindspore/parallel/checkpoint_transform.py +37 -34
- mindspore/parallel/shard.py +17 -18
- mindspore/profiler/common/validator/validate_path.py +2 -2
- mindspore/profiler/envprofiling.py +69 -47
- mindspore/profiler/parser/ascend_timeline_generator.py +49 -42
- mindspore/profiler/parser/base_timeline_generator.py +49 -56
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +98 -78
- mindspore/profiler/parser/hwts_log_parser.py +1 -1
- mindspore/profiler/parser/integrator.py +15 -14
- mindspore/profiler/parser/minddata_analyzer.py +2 -2
- mindspore/profiler/parser/msadvisor_analyzer.py +12 -25
- mindspore/profiler/parser/msadvisor_parser.py +2 -4
- mindspore/profiler/parser/optime_parser.py +17 -18
- mindspore/profiler/parser/profiler_info.py +2 -1
- mindspore/profiler/profiling.py +218 -186
- mindspore/rewrite/__init__.py +3 -1
- mindspore/rewrite/api/node.py +1 -114
- mindspore/rewrite/api/node_type.py +3 -0
- mindspore/rewrite/api/pattern_engine.py +31 -1
- mindspore/rewrite/api/scoped_value.py +4 -4
- mindspore/rewrite/api/symbol_tree.py +3 -78
- mindspore/rewrite/api/tree_node_helper.py +1 -1
- mindspore/rewrite/ast_creator_register.py +1 -0
- mindspore/rewrite/ast_helpers/__init__.py +2 -2
- mindspore/rewrite/ast_helpers/ast_creator.py +1 -2
- mindspore/rewrite/ast_helpers/ast_finder.py +65 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +11 -3
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +18 -2
- mindspore/rewrite/namespace.py +0 -2
- mindspore/rewrite/node.py +157 -11
- mindspore/rewrite/parsers/assign_parser.py +231 -53
- mindspore/rewrite/parsers/class_def_parser.py +187 -109
- mindspore/rewrite/parsers/for_parser.py +24 -14
- mindspore/rewrite/parsers/function_def_parser.py +21 -4
- mindspore/rewrite/parsers/if_parser.py +6 -2
- mindspore/rewrite/sparsify/__init__.py +0 -0
- mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
- mindspore/rewrite/sparsify/sparsify.py +109 -0
- mindspore/rewrite/sparsify/utils.py +173 -0
- mindspore/rewrite/symbol_tree.py +256 -133
- mindspore/rewrite/symbol_tree_builder.py +38 -1
- mindspore/run_check/_check_version.py +69 -63
- mindspore/run_check/run_check.py +2 -1
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +1 -1
- mindspore/train/_utils.py +28 -5
- mindspore/train/amp.py +273 -102
- mindspore/train/callback/_backup_and_restore.py +5 -5
- mindspore/train/callback/_callback.py +2 -2
- mindspore/train/callback/_checkpoint.py +3 -3
- mindspore/train/callback/_early_stop.py +3 -3
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +29 -31
- mindspore/train/callback/_loss_monitor.py +3 -3
- mindspore/train/callback/_on_request_exit.py +3 -3
- mindspore/train/callback/_reduce_lr_on_plateau.py +4 -4
- mindspore/train/callback/_summary_collector.py +23 -16
- mindspore/train/callback/_time_monitor.py +3 -3
- mindspore/train/checkpoint_pb2.py +68 -8
- mindspore/train/data_sink.py +15 -3
- mindspore/train/dataset_helper.py +10 -15
- mindspore/train/loss_scale_manager.py +8 -11
- mindspore/train/metrics/__init__.py +1 -1
- mindspore/train/metrics/bleu_score.py +1 -1
- mindspore/train/metrics/confusion_matrix.py +1 -1
- mindspore/train/metrics/cosine_similarity.py +1 -1
- mindspore/train/metrics/dice.py +2 -2
- mindspore/train/metrics/fbeta.py +1 -1
- mindspore/train/metrics/hausdorff_distance.py +4 -3
- mindspore/train/metrics/mean_surface_distance.py +2 -2
- mindspore/train/metrics/occlusion_sensitivity.py +1 -1
- mindspore/train/metrics/perplexity.py +1 -1
- mindspore/train/metrics/precision.py +1 -1
- mindspore/train/metrics/recall.py +1 -1
- mindspore/train/metrics/roc.py +2 -2
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +116 -37
- mindspore/train/model.py +45 -28
- mindspore/train/serialization.py +295 -188
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +43 -13
- mindspore/train/train_thor/convert_utils.py +2 -2
- mindspore/train/train_thor/dataset_helper.py +3 -3
- mindspore/turbojpeg.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +3 -2
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +610 -541
- mindspore/compression/__init__.py +0 -19
- mindspore/compression/common/constant.py +0 -124
- mindspore/compression/export/__init__.py +0 -19
- mindspore/compression/export/quant_export.py +0 -515
- mindspore/compression/quant/__init__.py +0 -28
- mindspore/compression/quant/qat.py +0 -634
- mindspore/compression/quant/quant_utils.py +0 -462
- mindspore/compression/quant/quantizer.py +0 -68
- mindspore/nn/layer/quant.py +0 -1868
- mindspore/nn/layer/rnn_utils.py +0 -90
- mindspore/nn/probability/dpn/__init__.py +0 -22
- mindspore/nn/probability/dpn/vae/__init__.py +0 -25
- mindspore/nn/probability/dpn/vae/cvae.py +0 -140
- mindspore/nn/probability/dpn/vae/vae.py +0 -124
- mindspore/nn/probability/infer/__init__.py +0 -22
- mindspore/nn/probability/infer/variational/elbo.py +0 -70
- mindspore/nn/probability/infer/variational/svi.py +0 -84
- mindspore/nn/probability/toolbox/__init__.py +0 -22
- mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
- mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -364
- mindspore/nn/probability/transforms/__init__.py +0 -22
- mindspore/nn/probability/transforms/transform_bnn.py +0 -262
- mindspore/nn/probability/zhusuan/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
- mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
- mindspore/ops/_op_impl/aicpu/parallel_concat.py +0 -42
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Cast_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/MatMul_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/Transpose_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -15
- mindspore/ops/composite/array_ops.py +0 -241
- mindspore/ops/composite/clip_ops.py +0 -134
- mindspore/ops/composite/random_ops.py +0 -426
- mindspore/ops/composite/vmap_ops.py +0 -38
- mindspore/parallel/nn/__init__.py +0 -42
- mindspore/parallel/nn/loss.py +0 -22
- mindspore/parallel/nn/moe.py +0 -21
- mindspore/parallel/nn/op_parallel_config.py +0 -22
- mindspore/parallel/nn/transformer.py +0 -31
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -17,8 +17,7 @@
|
|
|
17
17
|
|
|
18
18
|
from __future__ import absolute_import
|
|
19
19
|
from mindspore import context
|
|
20
|
-
from mindspore
|
|
21
|
-
from mindspore._checkparam import Rel
|
|
20
|
+
from mindspore import _checkparam as validator
|
|
22
21
|
from mindspore.ops.primitive import prim_attr_register, Primitive
|
|
23
22
|
from mindspore.common import dtype as mstype
|
|
24
23
|
|
|
@@ -34,8 +33,11 @@ class AdjustSaturation(Primitive):
|
|
|
34
33
|
|
|
35
34
|
Inputs:
|
|
36
35
|
- **image** (Tensor) - Images to adjust. Must be one of the following types: float16, float32.
|
|
37
|
-
At least 3-D.The last dimension is interpreted as channels, and must be three.
|
|
38
|
-
- **scale** (Tensor) - A
|
|
36
|
+
At least 3-D. The last dimension is interpreted as channels, and must be three.
|
|
37
|
+
- **scale** (Tensor) - A scale factor determines the amount of saturation adjustment to
|
|
38
|
+
apply to the image. A value greater than 1.0 increases the saturation, while a value less than
|
|
39
|
+
1.0 decreases the saturation. A value of 1.0 leaves the saturation unchanged.
|
|
40
|
+
Must be 0-D Tensor of type float32.
|
|
39
41
|
|
|
40
42
|
Outputs:
|
|
41
43
|
Adjusted image(s), same shape and dtype as `image`.
|
|
@@ -48,7 +50,7 @@ class AdjustSaturation(Primitive):
|
|
|
48
50
|
ValueError: If the last dimension of the 'image' is not 3.
|
|
49
51
|
|
|
50
52
|
Supported Platforms:
|
|
51
|
-
``GPU`` ``CPU``
|
|
53
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
52
54
|
|
|
53
55
|
Examples:
|
|
54
56
|
>>> x = Tensor([[[1.0, 2.0, 3.0],
|
|
@@ -96,7 +98,7 @@ class AdjustContrastv2(Primitive):
|
|
|
96
98
|
ValueError: If the dimension of the 'images' is less than 3, or the last dimension of the 'images' is not 3.
|
|
97
99
|
|
|
98
100
|
Supported Platforms:
|
|
99
|
-
``GPU`` ``CPU``
|
|
101
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
100
102
|
|
|
101
103
|
Examples:
|
|
102
104
|
>>> images = Tensor([[[1.0, 2.0, 3.0],
|
|
@@ -131,8 +133,9 @@ class AdjustHue(Primitive):
|
|
|
131
133
|
It is recommended to minimize the number of redundant transformations when several adjustments are chained.
|
|
132
134
|
|
|
133
135
|
Inputs:
|
|
134
|
-
- **image** (Tensor): RGB image or images
|
|
135
|
-
|
|
136
|
+
- **image** (Tensor): RGB image or images, a Tensor has at least 3-D.
|
|
137
|
+
The last dimension is interpreted as channels whose size must be three.
|
|
138
|
+
the dtype is float16 or float32.
|
|
136
139
|
- **delta** (Tensor): How much to add to the hue channel, the dtype is float32. Must be 0-D.
|
|
137
140
|
|
|
138
141
|
Outputs:
|
|
@@ -145,7 +148,7 @@ class AdjustHue(Primitive):
|
|
|
145
148
|
ValueError: If the dimension of `image` is less than 3.
|
|
146
149
|
|
|
147
150
|
Supported Platforms:
|
|
148
|
-
``GPU`` ``CPU``
|
|
151
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
149
152
|
|
|
150
153
|
Examples:
|
|
151
154
|
>>> class AdjustHue(nn.Cell):
|
|
@@ -178,10 +181,11 @@ class AdjustHue(Primitive):
|
|
|
178
181
|
|
|
179
182
|
class ExtractGlimpse(Primitive):
|
|
180
183
|
"""
|
|
181
|
-
Extracts
|
|
184
|
+
Extracts glimpses(usually subarea of rectangle) from the input image Tensor and return as windows.
|
|
182
185
|
|
|
183
186
|
Note:
|
|
184
|
-
If
|
|
187
|
+
If extracted windows and the input image only partially overlap,
|
|
188
|
+
random noise is filled in those non overlapping areas.
|
|
185
189
|
|
|
186
190
|
Args:
|
|
187
191
|
centered (bool, optional): An optional `bool`. Indicates if the offset coordinates
|
|
@@ -191,28 +195,29 @@ class ExtractGlimpse(Primitive):
|
|
|
191
195
|
normalized (bool, optional): An optional `bool`. indicates if the offset
|
|
192
196
|
coordinates are normalized. Defaults to `True`.
|
|
193
197
|
uniform_noise (bool, optional): An optional `bool`. indicates if the noise should be
|
|
194
|
-
generated using a uniform distribution
|
|
195
|
-
noise (str, optional): An optional string
|
|
196
|
-
|
|
197
|
-
When the window and input image tensor not overlap, random noise is filled.
|
|
198
|
-
The
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
When
|
|
202
|
-
|
|
203
|
-
|
|
198
|
+
generated using a uniform distribution(aka. Gaussian distribution). Defaults to `True`.
|
|
199
|
+
noise (str, optional): An optional string specifies the type of noise to fill.
|
|
200
|
+
The window is determined by size and offsets.
|
|
201
|
+
When the window and input image tensor don't not overlap, random noise is filled.
|
|
202
|
+
The value can be 'uniform', 'gaussian' and 'zero'. Default: `uniform`.
|
|
203
|
+
|
|
204
|
+
- When `noise` is 'uniform' and 'gaussian', the result is variable.
|
|
205
|
+
- When `noise` is 'zero', the value of `uniform_noise` must be 'False' and the
|
|
206
|
+
filling noise will be zero so that the result is fixed.
|
|
207
|
+
- When `uniform_noise` is 'True', the value of `noise` only can be 'uniform'.
|
|
208
|
+
When `uniform_noise` is 'False', the value of `noise` can be 'uniform', 'gaussian' and 'zero'.
|
|
204
209
|
|
|
205
210
|
Inputs:
|
|
206
|
-
- **x** (Tensor) - A 4-D float tensor of shape
|
|
211
|
+
- **x** (Tensor) - A 4-D float tensor of shape :math:`(batch_size, height, width, channels)`.
|
|
207
212
|
Types allowed: float32.
|
|
208
213
|
- **size** (Tensor) - A 1-D tensor of 2 elements containing the size of the glimpses to extract.
|
|
209
214
|
The glimpse height must be specified first, following by the glimpse width. Types allowed: int32.
|
|
210
215
|
The value of size must be greater than zero.
|
|
211
|
-
- **offsets** (Tensor) - A 2-D integer tensor of shape
|
|
216
|
+
- **offsets** (Tensor) - A 2-D integer tensor of shape :math:`(batch_size, 2)` containing the y, x locations
|
|
212
217
|
of the center of each window. Types allowed: float32.
|
|
213
218
|
|
|
214
219
|
Outputs:
|
|
215
|
-
A 4-D tensor of shape
|
|
220
|
+
A 4-D tensor of shape :math:`(batch_size, glimpse_height, glimpse_width, channels)` with type: float32.
|
|
216
221
|
|
|
217
222
|
Raises:
|
|
218
223
|
TypeError: If `centered` is not a bool.
|
|
@@ -225,7 +230,7 @@ class ExtractGlimpse(Primitive):
|
|
|
225
230
|
ValueError: If the input is not Tensor.
|
|
226
231
|
|
|
227
232
|
Supported Platforms:
|
|
228
|
-
``GPU`` ``CPU``
|
|
233
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
229
234
|
|
|
230
235
|
Examples:
|
|
231
236
|
>>> x = Tensor([[[[0.0], [1.0], [2.0]], [[3.0], [4.0], [5.0]], [[6.0], [7.0], [8.0]]]], dtype=mindspore.float32)
|
|
@@ -262,7 +267,7 @@ class ExtractGlimpse(Primitive):
|
|
|
262
267
|
|
|
263
268
|
|
|
264
269
|
class CropAndResize(Primitive):
|
|
265
|
-
"""
|
|
270
|
+
r"""
|
|
266
271
|
Extracts crops from the input image tensor and resizes them.
|
|
267
272
|
|
|
268
273
|
Note:
|
|
@@ -276,9 +281,10 @@ class CropAndResize(Primitive):
|
|
|
276
281
|
extrapolation_value (float, optional): An optional float value used extrapolation, if applicable. Default: 0.0.
|
|
277
282
|
|
|
278
283
|
Inputs:
|
|
279
|
-
- **x** (Tensor) - The input image must be a 4-D tensor of shape
|
|
284
|
+
- **x** (Tensor) - The input image must be a 4-D tensor of shape
|
|
285
|
+
:math:`(batch, image\_height, image\_width, depth)`.
|
|
280
286
|
Types allowed: int8, int16, int32, int64, float16, float32, float64, uint8, uint16.
|
|
281
|
-
- **boxes** (Tensor) - A 2-D tensor of shape
|
|
287
|
+
- **boxes** (Tensor) - A 2-D tensor of shape :math:`(num\_boxes, 4)`.
|
|
282
288
|
The i-th row of the tensor specifies the coordinates of a box in the box_ind[i] image
|
|
283
289
|
and is specified in normalized coordinates [y1, x1, y2, x2]. A normalized coordinate value of y is mapped to
|
|
284
290
|
the image coordinate at y * (image_height - 1), so as the [0, 1] interval of normalized image height is
|
|
@@ -286,14 +292,14 @@ class CropAndResize(Primitive):
|
|
|
286
292
|
crop is an up-down flipped version of the original image. The width dimension is treated similarly.
|
|
287
293
|
Normalized coordinates outside the [0, 1] range are allowed, in which case we use `extrapolation_value` to
|
|
288
294
|
extrapolate the input image values. Types allowed: float32.
|
|
289
|
-
- **box_index** (Tensor) - A 1-D tensor of shape
|
|
295
|
+
- **box_index** (Tensor) - A 1-D tensor of shape :math:`(num\_boxes)` with int32 values in [0, batch).
|
|
290
296
|
The value of `box_index[i]` specifies the image that the i-th box refers to. Types allowed: int32.
|
|
291
297
|
- **crop_size** (Tuple[int]) - A tuple of two int32 elements: (crop_height, crop_width).
|
|
292
298
|
Only constant value is allowed. All cropped image patches are resized to this size.
|
|
293
299
|
The aspect ratio of the image content is not preserved. Both crop_height and crop_width need to be positive.
|
|
294
300
|
|
|
295
301
|
Outputs:
|
|
296
|
-
A 4-D tensor of shape
|
|
302
|
+
A 4-D tensor of shape :math:`(num\_boxes, crop\_height, crop\_width, depth)` with type: float32.
|
|
297
303
|
|
|
298
304
|
Raises:
|
|
299
305
|
TypeError: If `x` or `boxes` or `box_index` is not a Tensor.
|
|
@@ -352,39 +358,42 @@ class CropAndResize(Primitive):
|
|
|
352
358
|
|
|
353
359
|
class NonMaxSuppressionV3(Primitive):
|
|
354
360
|
r"""
|
|
355
|
-
|
|
361
|
+
Selects a subset of bounding boxes in a greedy manner, based on their descending score.
|
|
362
|
+
It removes boxes that have high intersection-over-union (IOU) overlap with previously
|
|
363
|
+
selected boxes, and eliminates boxes with scores lower than a given threshold.
|
|
356
364
|
|
|
357
365
|
.. warning::
|
|
358
366
|
When input `max_output_size` is negative, it will be treated as 0.
|
|
359
367
|
|
|
360
368
|
Note:
|
|
361
|
-
- This algorithm
|
|
362
|
-
- This algorithm
|
|
363
|
-
|
|
364
|
-
|
|
369
|
+
- This algorithm does not depend on the location of the origin in the coordinate system.
|
|
370
|
+
- This algorithm remains unaffected by orthogonal transformations and translations of
|
|
371
|
+
the coordinate system, which means that translating or reflecting the coordinate system
|
|
372
|
+
will result in the same boxes being chosen by the algorithm.
|
|
365
373
|
|
|
366
374
|
Inputs:
|
|
367
375
|
- **boxes** (Tensor) - A 2-D Tensor of shape :math:`(num\_boxes, 4)`.
|
|
368
|
-
- **scores** (Tensor) - A 1-D Tensor of shape :math:`(num\_boxes)`
|
|
369
|
-
|
|
370
|
-
the
|
|
376
|
+
- **scores** (Tensor) - A 1-D Tensor of shape :math:`(num\_boxes)` where each element represents a
|
|
377
|
+
single score associated with each box (i.e., each row of the `boxes` Tensor).
|
|
378
|
+
It is required that the number of scores in `scores` must be equal to the number of boxes in `boxes`.
|
|
379
|
+
The supported data type is float32.
|
|
371
380
|
- **max_output_size** (Union[Tensor, Number.Int]) - A scalar integer Tensor representing the maximum
|
|
372
|
-
number of boxes to be selected by non max suppression.
|
|
373
|
-
- **iou_threshold** (Union[Tensor, Number.Float]) - A
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
- **score_threshold** (Union[Tensor, Number.Float]) - A
|
|
377
|
-
|
|
381
|
+
number of boxes to be selected by non max suppression. The supported data type is int32.
|
|
382
|
+
- **iou_threshold** (Union[Tensor, Number.Float]) - A scalar float Tensor represents the threshold
|
|
383
|
+
used for determining if the intersection over union (IOU) between boxes is too high.
|
|
384
|
+
Data type of `iou_threshold` is float32 and must be in range [0, 1].
|
|
385
|
+
- **score_threshold** (Union[Tensor, Number.Float]) - A scalar float Tensor represents the threshold for
|
|
386
|
+
determining when to remove boxes based on score. The supported data type is float32.
|
|
378
387
|
|
|
379
388
|
Outputs:
|
|
380
|
-
A 1-D integer Tensor of shape
|
|
381
|
-
where M <= max_output_size
|
|
389
|
+
A 1-D integer Tensor of shape :math:`(M)` representing the selected indices from the boxes tensor,
|
|
390
|
+
where M <= `max_output_size`.
|
|
382
391
|
|
|
383
392
|
Raises:
|
|
384
393
|
TypeError: If the dtype of `boxes` and `scores` are different.
|
|
385
394
|
TypeError: If the dtype of `iou_threshold` and `score_threshold` are different.
|
|
386
395
|
TypeError: If `boxes` is not tensor or its dtype is not float16 or float32.
|
|
387
|
-
|
|
396
|
+
TypeError: If `scores` is not tensor or its dtype is not float16 or float32.
|
|
388
397
|
TypeError: If `max_output_size` is not tensor or scalar or its date type is not int32 or int64.
|
|
389
398
|
TypeError: If `iou_threshold` is not tensor or scalar or its type is neither float16 or float32.
|
|
390
399
|
TypeError: If `score_threshold` is not tensor or scalar or its type is neither float16 or float32.
|
|
@@ -394,7 +403,7 @@ class NonMaxSuppressionV3(Primitive):
|
|
|
394
403
|
`iou_threshold`, `score_threshold` is not 0.
|
|
395
404
|
|
|
396
405
|
Supported Platforms:
|
|
397
|
-
``Ascend``
|
|
406
|
+
``Ascend`` ``GPU``
|
|
398
407
|
|
|
399
408
|
Examples:
|
|
400
409
|
>>> boxes = Tensor(np.array([[1, 2, 3, 4], [1, 3, 3, 4], [1, 3, 4, 4],
|
|
@@ -412,46 +421,54 @@ class NonMaxSuppressionV3(Primitive):
|
|
|
412
421
|
@prim_attr_register
|
|
413
422
|
def __init__(self):
|
|
414
423
|
"""Initialize NonMaxSuppressionV3"""
|
|
424
|
+
self.init_prim_io_names(inputs=['boxes', 'scores', 'max_output_size', 'iou_threshold', 'score_threshold'],
|
|
425
|
+
outputs=['selected indices'])
|
|
415
426
|
|
|
416
427
|
|
|
417
428
|
class NonMaxSuppressionWithOverlaps(Primitive):
|
|
418
429
|
r"""
|
|
419
|
-
|
|
430
|
+
Selects a subset of bounding boxes in a greedy manner by prioritizing those with higher
|
|
431
|
+
scores and removing those with high overlaps with previously selected boxes.
|
|
432
|
+
Boxes with scores lower than the score threshold are also removed.
|
|
433
|
+
The overlap values between boxes are represented as an N-by-N square matrix,
|
|
434
|
+
which can be customized to define different overlap criteria such as intersection
|
|
435
|
+
over union or intersection over area.
|
|
436
|
+
|
|
420
437
|
|
|
421
438
|
Note:
|
|
422
|
-
- This algorithm
|
|
423
|
-
- This algorithm
|
|
424
|
-
|
|
425
|
-
|
|
439
|
+
- This algorithm does not depend on the location of the origin in the coordinate system.
|
|
440
|
+
- This algorithm remains unaffected by orthogonal transformations and translations of
|
|
441
|
+
the coordinate system, which means that translating or reflecting the coordinate system
|
|
442
|
+
will result in the same boxes being chosen by the algorithm.
|
|
426
443
|
|
|
427
444
|
Inputs:
|
|
428
445
|
- **overlaps** (Tensor) - A 2-D Tensor of shape :math:`(num\_boxes, num\_boxes)`,
|
|
429
|
-
representing the n-by-n box overlap values. Types allowed:float32.
|
|
430
|
-
- **scores** (Tensor) - A 1-D Tensor of shape :math:`(num\_boxes)`
|
|
431
|
-
|
|
432
|
-
the
|
|
433
|
-
|
|
446
|
+
representing the n-by-n box overlap values. Types allowed:float16, float32 and float64.
|
|
447
|
+
- **scores** (Tensor) - A 1-D Tensor of shape :math:`(num\_boxes)` where each element represents a
|
|
448
|
+
single score associated with each box (i.e., each row of the `boxes` Tensor).
|
|
449
|
+
It is required that the number of scores in `scores` must be equal to the number of boxes in `boxes`.
|
|
450
|
+
The supported data type is float32.
|
|
434
451
|
- **max_output_size** (Union[Tensor, Number.Int]) - A scalar integer Tensor representing the maximum
|
|
435
452
|
number of boxes to be selected by non max suppression, and max_output_size must be equal to or greater
|
|
436
453
|
than 0.
|
|
437
454
|
Types allowed:int32.
|
|
438
|
-
- **overlap_threshold** (Union[Tensor, Number.Float]) - A 0-D float Tensor
|
|
439
|
-
|
|
440
|
-
Types allowed:float32.
|
|
455
|
+
- **overlap_threshold** (Union[Tensor, Number.Float]) - A scalar value, represented by a 0-D float Tensor,
|
|
456
|
+
which is used as a threshold to determine if two boxes overlap too much.
|
|
457
|
+
Types allowed:float16, float32 and float64.
|
|
441
458
|
- **score_threshold** (Union[Tensor, Number.Float]) - A 0-D float Tensor representing the threshold for
|
|
442
|
-
deciding when to remove boxes based on score.
|
|
443
|
-
Types allowed:float32.
|
|
459
|
+
deciding when to remove boxes based on score. It has the same dtype as `overlap_threshold`.
|
|
444
460
|
|
|
445
461
|
Outputs:
|
|
446
|
-
A 1-D integer Tensor of shape :math:`(M)` representing the selected indices from the boxes Tensor,
|
|
447
|
-
where M <= max_output_size
|
|
462
|
+
A 1-D integer Tensor of shape :math:`(M)` representing the selected indices from the `boxes` Tensor,
|
|
463
|
+
where M <= `max_output_size`. Its data type is int32.
|
|
448
464
|
|
|
449
465
|
Raises:
|
|
450
|
-
TypeError: If the dtype of `overlaps` , `scores` `overlap_threshold` and `score_threshold`
|
|
466
|
+
TypeError: If the dtype of `overlaps` , `scores` `overlap_threshold` and `score_threshold`
|
|
467
|
+
is not float16, float32 or float64.
|
|
451
468
|
TypeError: If `overlaps` or `scores` is not Tensor。
|
|
452
469
|
TypeError: If `max_output_size` is not Tensor or Scalar.If `max_output_size` is not int32.
|
|
453
|
-
TypeError: If `overlap_threshold` is not Tensor or scalar. If its type is not float32.
|
|
454
|
-
TypeError: If `score_threshold` is not Tensor or scalar. If its type is not float32.
|
|
470
|
+
TypeError: If `overlap_threshold` is not Tensor or scalar. If its type is not float16, float32 or float64.
|
|
471
|
+
TypeError: If `score_threshold` is not Tensor or scalar. If its type is not float16, float32 or float64.
|
|
455
472
|
ValueError: If the size of shape of `overlaps` is not 2 or the second value of its shape
|
|
456
473
|
is not equal to the first value of its shape.
|
|
457
474
|
ValueError: If the size of shape of `scores` is not 1.
|
|
@@ -460,14 +477,14 @@ class NonMaxSuppressionWithOverlaps(Primitive):
|
|
|
460
477
|
ValueError: If the shape of `scores` is not equal to the shape of the dim0 or dim1 of `overlaps`.
|
|
461
478
|
|
|
462
479
|
Supported Platforms:
|
|
463
|
-
``GPU`` ``CPU``
|
|
480
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
464
481
|
|
|
465
482
|
Examples:
|
|
466
483
|
>>> overlaps = Tensor(np.array([[0.6964692, 0.28613934, 0.22685145, 0.5513148],
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
484
|
+
... [0.71946895, 0.42310646, 0.9807642, 0.6848297],
|
|
485
|
+
... [0.4809319, 0.39211753, 0.343178, 0.7290497],
|
|
486
|
+
... [0.43857226, 0.059677895, 0.39804426, 0.7379954]
|
|
487
|
+
... ]), mstype.float32)
|
|
471
488
|
>>> scores = Tensor(np.array([0.18249173, 0.17545176, 0.53155136, 0.53182757]), mstype.float32)
|
|
472
489
|
>>> max_output_size = Tensor(4, mstype.int32)
|
|
473
490
|
>>> overlap_threshold = Tensor(0.1, mstype.float32)
|
|
@@ -487,18 +504,18 @@ class NonMaxSuppressionWithOverlaps(Primitive):
|
|
|
487
504
|
|
|
488
505
|
class HSVToRGB(Primitive):
|
|
489
506
|
r"""
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
507
|
+
Transform one single or a batch of images from HSV to RGB color space.
|
|
508
|
+
Each pixel's HSV value is converted to its corresponding RGB value.
|
|
509
|
+
Note that the function is only well-defined for input pixel values in the range [0, 1].
|
|
510
|
+
Image format should be "NHWC".
|
|
494
511
|
|
|
495
512
|
Inputs:
|
|
496
513
|
- **x** (Tensor) - The input image must be a 4-D tensor of shape
|
|
497
|
-
:math:`
|
|
514
|
+
:math:`(batch, image\_height, image\_width, channel)`.
|
|
498
515
|
Number of channel must be 3. Types allowed: float16, float32, float64.
|
|
499
516
|
|
|
500
517
|
Outputs:
|
|
501
|
-
A 4-D tensor of shape :math:`
|
|
518
|
+
A 4-D tensor of shape :math:`(batch, image\_height, image\_width, channel)`
|
|
502
519
|
with same type of input.
|
|
503
520
|
|
|
504
521
|
Raises:
|
|
@@ -569,7 +586,7 @@ class CropAndResizeGradBoxes(Primitive):
|
|
|
569
586
|
ValueError: If the length of `box_index` is not equal to num_boxes.
|
|
570
587
|
|
|
571
588
|
Supported Platforms:
|
|
572
|
-
``GPU`` ``CPU``
|
|
589
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
573
590
|
|
|
574
591
|
Examples:
|
|
575
592
|
>>> crop_and_resize_grad_boxes = ops.CropAndResizeGradBoxes(method = "bilinear")
|
|
@@ -596,9 +613,9 @@ class CropAndResizeGradBoxes(Primitive):
|
|
|
596
613
|
|
|
597
614
|
class RGBToHSV(Primitive):
|
|
598
615
|
"""
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
616
|
+
Transform one single or a batch of images from RGB to HSV color space.
|
|
617
|
+
Each pixel's RGB value is converted to its corresponding HSV value.
|
|
618
|
+
Note that the function is only well-defined for input pixel values in the range [0, 1].
|
|
602
619
|
|
|
603
620
|
Note:
|
|
604
621
|
Last dimension of input images must be size 3.
|
|
@@ -611,12 +628,12 @@ class RGBToHSV(Primitive):
|
|
|
611
628
|
A Tensor, has the same type and shape as input `images`.
|
|
612
629
|
|
|
613
630
|
Raises:
|
|
614
|
-
TypeError: If `images` is not tensor or its dtype is not float
|
|
631
|
+
TypeError: If `images` is not tensor or its dtype is not float.
|
|
615
632
|
ValueError: If the rank of `images` is less than 1.
|
|
616
633
|
ValueError: If the last value of shape of `images` is not 3.
|
|
617
634
|
|
|
618
635
|
Supported Platforms:
|
|
619
|
-
``GPU`` ``CPU``
|
|
636
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
620
637
|
|
|
621
638
|
Examples:
|
|
622
639
|
>>> images = np.array([0.25, 0.5, 0.5]).astype(np.float32).reshape([1, 1, 1, 3])
|
|
@@ -639,34 +656,36 @@ class ResizeLinear1D(Primitive):
|
|
|
639
656
|
For general resize, refer to :func:`mindspore.ops.interpolate` for more details.
|
|
640
657
|
|
|
641
658
|
.. warning::
|
|
642
|
-
This is an experimental
|
|
659
|
+
- This is an experimental API that is subject to change.
|
|
660
|
+
- Currently, the Ascend platform only supports scenarios where the input `size` is Tuple or List.
|
|
643
661
|
|
|
644
662
|
Args:
|
|
645
|
-
coordinate_transformation_mode (
|
|
646
|
-
in the resized tensor to the coordinate in the original tensor. Other optional: 'half_pixel'
|
|
663
|
+
coordinate_transformation_mode (str): Default is 'align_corners'. Describes how to transform the coordinate
|
|
664
|
+
in the resized tensor to the coordinate in the original tensor. Other optional: 'half_pixel'.
|
|
647
665
|
|
|
648
666
|
Inputs:
|
|
649
667
|
- **x** (Tensor) - A 3-D tensor which to resize, with shape [batch, channel, width]. Must be one of the
|
|
650
668
|
following types: uint8, int8, int16, int32, int64, float16, float32, double.
|
|
651
|
-
- **size** (
|
|
669
|
+
- **size** (Union[Tuple[int], List[int], Tensor[int]]): describes the new width of `x` .
|
|
670
|
+
A tuple or list or 1-D tensor with only one int element :math:`(new\_width)`.
|
|
652
671
|
|
|
653
672
|
Outputs:
|
|
654
673
|
A 3-D tensor which shape is [batch, channel, new_width] with the same type as `x`.
|
|
655
674
|
|
|
656
675
|
Raises:
|
|
657
676
|
TypeError: If dtype of `x` is not in the support list.
|
|
658
|
-
TypeError: If `size` is not
|
|
677
|
+
TypeError: If `size` is not in Union[Tuple[int], List[int], Tensor[int]].
|
|
659
678
|
TypeError: If `coordinate_transformation_mode` is not a string.
|
|
660
679
|
TypeError: If `coordinate_transformation_mode` is not in the support list.
|
|
661
680
|
|
|
662
681
|
Supported Platforms:
|
|
663
|
-
``
|
|
682
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
664
683
|
|
|
665
684
|
Examples:
|
|
666
|
-
>>>
|
|
667
|
-
>>> size =
|
|
685
|
+
>>> x = Tensor([[[1, 2, 3], [4, 5, 6]]], mindspore.float32)
|
|
686
|
+
>>> size = (6,)
|
|
668
687
|
>>> resize_linear_1d = ops.ResizeLinear1D(coordinate_transformation_mode="align_corners")
|
|
669
|
-
>>> output = resize_linear_1d(x
|
|
688
|
+
>>> output = resize_linear_1d(x, size)
|
|
670
689
|
>>> print(output)
|
|
671
690
|
[[[1. 1.4 1.8 2.2 2.6 3.]
|
|
672
691
|
[4. 4.4 4.8 5.2 5.6 6.]]]
|
|
@@ -678,7 +697,7 @@ class ResizeLinear1D(Primitive):
|
|
|
678
697
|
self.init_prim_io_names(inputs=["x", "sizes"], outputs=["output"])
|
|
679
698
|
validator.check_value_type(
|
|
680
699
|
"coordinate_transformation_mode", coordinate_transformation_mode, [str], self.name)
|
|
681
|
-
validator.check_string(coordinate_transformation_mode, ["align_corners", "half_pixel"
|
|
700
|
+
validator.check_string(coordinate_transformation_mode, ["align_corners", "half_pixel"],
|
|
682
701
|
"coordinate_transformation_mode", self.name)
|
|
683
702
|
|
|
684
703
|
|
|
@@ -689,7 +708,7 @@ class ResizeBilinearV2(Primitive):
|
|
|
689
708
|
The resizing only affects the lower two dimensions which represent the height and width.
|
|
690
709
|
|
|
691
710
|
.. warning::
|
|
692
|
-
|
|
711
|
+
This is an experimental API that is subject to change or deletion.
|
|
693
712
|
|
|
694
713
|
Args:
|
|
695
714
|
align_corners (bool, optional): If true, rescale input by :math:`(new\_height - 1) / (height - 1)`,
|
|
@@ -718,7 +737,7 @@ class ResizeBilinearV2(Primitive):
|
|
|
718
737
|
ValueError: If `size` contains other than 2 elements.
|
|
719
738
|
|
|
720
739
|
Supported Platforms:
|
|
721
|
-
``Ascend`` ``
|
|
740
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
722
741
|
|
|
723
742
|
Examples:
|
|
724
743
|
>>> x = Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mindspore.float32)
|
|
@@ -741,18 +760,12 @@ class ResizeBilinearV2(Primitive):
|
|
|
741
760
|
half_pixel_centers, [bool], self.name)
|
|
742
761
|
if half_pixel_centers and align_corners:
|
|
743
762
|
raise ValueError(f"If half_pixel_centers is True, align_corners must be False, but got {align_corners}")
|
|
744
|
-
target = context.get_context("device_target")
|
|
745
|
-
if half_pixel_centers and target == "CPU":
|
|
746
|
-
raise ValueError(f"Currently `half_pixel_centers`=True is not supported in CPU device_target")
|
|
747
763
|
|
|
748
764
|
|
|
749
765
|
class ResizeBicubic(Primitive):
|
|
750
766
|
r"""
|
|
751
767
|
Resize images to size using bicubic interpolation.
|
|
752
768
|
|
|
753
|
-
.. warning::
|
|
754
|
-
The max output length is 1000000.
|
|
755
|
-
|
|
756
769
|
Args:
|
|
757
770
|
align_corners (bool, optional):If true, the centers of the 4 corner pixels of the input
|
|
758
771
|
and output tensors are aligned, preserving the values at the corner pixels.Default: False.
|
|
@@ -760,14 +773,14 @@ class ResizeBicubic(Primitive):
|
|
|
760
773
|
`align_corners` should be False. Default: False.
|
|
761
774
|
|
|
762
775
|
Inputs:
|
|
763
|
-
- **images** (Tensor) - The input image must be a 4-D tensor of shape :math:`(batch, height, width
|
|
764
|
-
The format must be
|
|
776
|
+
- **images** (Tensor) - The input image must be a 4-D tensor of shape :math:`(batch, channels, height, width)`.
|
|
777
|
+
The format must be NCHW.
|
|
765
778
|
Types allowed: int8, int16, int32, int64, float16, float32, float64, uint8, uint16.
|
|
766
779
|
- **size** (Tensor) - A 1-D tensor of shape [2], with 2 elements: new_height, new_width.
|
|
767
780
|
Types allowed: int32.
|
|
768
781
|
|
|
769
782
|
Outputs:
|
|
770
|
-
A 4-D tensor of shape :math:`(batch, new\_height, new\_width
|
|
783
|
+
A 4-D tensor of shape :math:`(batch, channels, new\_height, new\_width)` with type float32.
|
|
771
784
|
|
|
772
785
|
Raises:
|
|
773
786
|
TypeError: If `images` type is not allowed.
|
|
@@ -780,8 +793,9 @@ class ResizeBicubic(Primitive):
|
|
|
780
793
|
ValueError: If any `size` value is not positive.
|
|
781
794
|
ValueError: If `align_corners` and `half_pixel_centers` value are both True.
|
|
782
795
|
|
|
796
|
+
|
|
783
797
|
Supported Platforms:
|
|
784
|
-
``GPU`` ``CPU``
|
|
798
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
785
799
|
|
|
786
800
|
Examples:
|
|
787
801
|
>>> class NetResizeBicubic(nn.Cell):
|
|
@@ -808,7 +822,6 @@ class ResizeBicubic(Primitive):
|
|
|
808
822
|
@prim_attr_register
|
|
809
823
|
def __init__(self, align_corners=False, half_pixel_centers=False):
|
|
810
824
|
"""Initialize"""
|
|
811
|
-
self.add_prim_attr("max_length", 1000000)
|
|
812
825
|
validator.check_value_type('align_corners', align_corners, bool, self.name)
|
|
813
826
|
validator.check_value_type('half_pixel_centers', half_pixel_centers, bool, self.name)
|
|
814
827
|
self.init_prim_io_names(inputs=['images', 'size'], outputs=['y'])
|
|
@@ -832,21 +845,20 @@ class ResizeBicubic(Primitive):
|
|
|
832
845
|
mstype.float32, mstype.uint8, mstype.uint16, mstype.double], self.name)
|
|
833
846
|
validator.check_tensor_dtype_valid("size", size_dtype, [mstype.int32], self.name)
|
|
834
847
|
# check input shape rank
|
|
835
|
-
validator.check("images rank", len(images_shape), "expected", 4,
|
|
836
|
-
validator.check("size rank", len(size_shape), "expected", 1,
|
|
837
|
-
validator.check("size dim_0", size_shape[0], "expected", 2,
|
|
848
|
+
validator.check("images rank", len(images_shape), "expected", 4, validator.EQ, self.name)
|
|
849
|
+
validator.check("size rank", len(size_shape), "expected", 1, validator.EQ, self.name)
|
|
850
|
+
validator.check("size dim_0", size_shape[0], "expected", 2, validator.EQ, self.name)
|
|
838
851
|
# check size_value
|
|
839
|
-
validator.check("size[0]", size_value[0], "minimum", 0,
|
|
840
|
-
validator.check("size[1]", size_value[1], "minimum", 0,
|
|
852
|
+
validator.check("size[0]", size_value[0], "minimum", 0, validator.GT, self.name)
|
|
853
|
+
validator.check("size[1]", size_value[1], "minimum", 0, validator.GT, self.name)
|
|
841
854
|
|
|
842
855
|
batch_size = images_shape[0]
|
|
856
|
+
channel = images_shape[1]
|
|
843
857
|
height = size_value[0]
|
|
844
858
|
width = size_value[1]
|
|
845
|
-
|
|
846
|
-
out_shape = (batch_size, height, width
|
|
847
|
-
return {'shape': out_shape,
|
|
848
|
-
'dtype': mstype.float32,
|
|
849
|
-
'value': None}
|
|
859
|
+
|
|
860
|
+
out_shape = (batch_size, channel, height, width)
|
|
861
|
+
return {'shape': out_shape, 'dtype': mstype.float32, 'value': None}
|
|
850
862
|
|
|
851
863
|
|
|
852
864
|
class ResizeArea(Primitive):
|
|
@@ -859,12 +871,15 @@ class ResizeArea(Primitive):
|
|
|
859
871
|
The values of `size` must be greater than zero.
|
|
860
872
|
|
|
861
873
|
Args:
|
|
862
|
-
align_corners (bool, optional):
|
|
863
|
-
|
|
874
|
+
align_corners (bool, optional): A boolean flag that specifies whether
|
|
875
|
+
to align the centers of the four corner pixels of the input and output tensors.
|
|
876
|
+
When this flag is set to True, the corner pixels of the output tensor are aligned
|
|
877
|
+
with the corner pixels of the input tensor, which preserves the values at the corner pixels.
|
|
878
|
+
Defaults: False.
|
|
864
879
|
|
|
865
880
|
Inputs:
|
|
866
881
|
- **images** (Tensor) - Input images must be a 4-D tensor with shape
|
|
867
|
-
which is :math:`(batch, channels, height, width)`. The format must be NHWC.
|
|
882
|
+
which is :math:`(batch, channels, height, width)`. The format must be "NHWC".
|
|
868
883
|
Types allowed: int8, int16, int32, int64, float16, float32, float64, uint8, uint16.
|
|
869
884
|
- **size** (Tensor) - Input size must be a 1-D tensor of 2 elements: new_height, new_width.
|
|
870
885
|
The new size of output image.
|
|
@@ -884,7 +899,7 @@ class ResizeArea(Primitive):
|
|
|
884
899
|
ValueError: If any value of `size` is not positive.
|
|
885
900
|
|
|
886
901
|
Supported Platforms:
|
|
887
|
-
``GPU`` ``CPU``
|
|
902
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
888
903
|
|
|
889
904
|
Examples:
|
|
890
905
|
>>> images = Tensor([[[[2], [4], [6], [8]], [[10], [12], [14], [16]]]], mindspore.float16)
|
|
@@ -956,7 +971,7 @@ class CropAndResizeGradImage(Primitive):
|
|
|
956
971
|
ValueError: If the value of image_height or image_width of `image_size` is not positive.
|
|
957
972
|
|
|
958
973
|
Supported Platforms:
|
|
959
|
-
``GPU`` ``CPU``
|
|
974
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
960
975
|
|
|
961
976
|
Examples:
|
|
962
977
|
>>> crop_and_resize_grad_image = ops.CropAndResizeGradImage(T = mindspore.float32, method = "bilinear")
|
|
@@ -991,13 +1006,14 @@ class CropAndResizeGradImage(Primitive):
|
|
|
991
1006
|
validator.check_value_type("method", method, [str], self.name)
|
|
992
1007
|
is_ascend_cpu = context.get_context('device_target') in ("Ascend", "CPU")
|
|
993
1008
|
if is_ascend_cpu:
|
|
994
|
-
validator.check("method", method, "expected", ("bilinear", "nearest"),
|
|
1009
|
+
validator.check("method", method, "expected", ("bilinear", "nearest"), validator.IN, self.name)
|
|
995
1010
|
else:
|
|
996
|
-
validator.check("method", method, "expected", ("bilinear", "nearest", "bilinear_v2"),
|
|
1011
|
+
validator.check("method", method, "expected", ("bilinear", "nearest", "bilinear_v2"),
|
|
1012
|
+
validator.IN, self.name)
|
|
997
1013
|
self.method = method
|
|
998
1014
|
valid_values = (mstype.float16, mstype.float32, mstype.float64)
|
|
999
1015
|
if T in mstype.number_type:
|
|
1000
|
-
validator.check("T", T, "expected", valid_values,
|
|
1016
|
+
validator.check("T", T, "expected", valid_values, validator.IN, self.name)
|
|
1001
1017
|
else:
|
|
1002
1018
|
validator.check_type_name("T", T, valid_values, self.name)
|
|
1003
1019
|
self.add_prim_attr("max_Byte", int(2e9)) # Maximum bytes of image gradient
|
|
@@ -1020,11 +1036,11 @@ class ScaleAndTranslate(Primitive):
|
|
|
1020
1036
|
Inputs:
|
|
1021
1037
|
- **images** (Tensor) - A 4-D tensor of shape :math:`(batch, image\_height, image\_width, channel)`.
|
|
1022
1038
|
- **size** (Tensor) - The size of the output image after scale and translate operations. A 1-D tensor with two
|
|
1023
|
-
positive elements whose dtype is int32 and shape must be (2,)
|
|
1039
|
+
positive elements whose dtype is int32 and shape must be :math:`(2,)`.
|
|
1024
1040
|
- **scale** (Tensor) - Indicates the zoom factor. A 1-D tensor with two positive elements whose dtype is float32
|
|
1025
|
-
and shape must be (2,)
|
|
1041
|
+
and shape must be :math:`(2,)`.
|
|
1026
1042
|
- **translation** (Tensor) - Translate the pixel value. A 1-D tensor with two elements whose dtype is
|
|
1027
|
-
float32 and shape must be (2,)
|
|
1043
|
+
float32 and shape must be :math:`(2,)`.
|
|
1028
1044
|
|
|
1029
1045
|
Outputs:
|
|
1030
1046
|
A 4-D tensor with type: float32 and shape :math:`(batch, size[0], size[1], channel)`.
|
|
@@ -1044,7 +1060,7 @@ class ScaleAndTranslate(Primitive):
|
|
|
1044
1060
|
ValueError: If the shape of `translation` is not :math:`(2,)`.
|
|
1045
1061
|
|
|
1046
1062
|
Supported Platforms:
|
|
1047
|
-
``GPU`` ``CPU``
|
|
1063
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1048
1064
|
|
|
1049
1065
|
Examples:
|
|
1050
1066
|
>>> op = ops.ScaleAndTranslate()
|
|
@@ -1072,33 +1088,41 @@ class ScaleAndTranslate(Primitive):
|
|
|
1072
1088
|
|
|
1073
1089
|
class CombinedNonMaxSuppression(Primitive):
|
|
1074
1090
|
r"""
|
|
1075
|
-
|
|
1091
|
+
Applies a greedy approach to select a subset of bounding boxes from a list of
|
|
1092
|
+
candidates using NonMaxSuppression, where the boxes are sorted in descending order of their confidence score.
|
|
1076
1093
|
|
|
1077
1094
|
Args:
|
|
1078
|
-
clip_boxes (bool, optional):
|
|
1079
|
-
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
|
|
1095
|
+
clip_boxes (bool, optional): Determines whether to apply bounding box normalization to ensure the
|
|
1096
|
+
coordinates are within [0, 1] range. Default: True.
|
|
1097
|
+
|
|
1098
|
+
- If True, clip the boxes that fall outside this range.
|
|
1099
|
+
- If False, return the box coordinates as they are without any modifications.
|
|
1100
|
+
|
|
1101
|
+
pad_per_class (bool, optional): Determines whether the output of the non-maximum suppression (NMS)
|
|
1102
|
+
algorithm should be padded or clipped to meet the maximum size constraints. Default: False.
|
|
1103
|
+
|
|
1104
|
+
- If False, the output is clipped to the maximum size of `max_total_size`.
|
|
1105
|
+
- If True, the output is padded up to `max_size_per_class` * `num_classes` and clipped if
|
|
1106
|
+
it exceeds `max_total_size`.
|
|
1086
1107
|
|
|
1087
1108
|
Inputs:
|
|
1088
|
-
- **boxes** (Tensor) - A Tensor
|
|
1089
|
-
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
- **
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1109
|
+
- **boxes** (Tensor) - A float32 Tensor with shape :math:`(batch_size, num_boxes, q, 4)`
|
|
1110
|
+
representing the bounding box coordinates.
|
|
1111
|
+
`q` indicates mapping relationship between boxes and classes.
|
|
1112
|
+
If `q` is 1, all classes use the same bounding box. If `q` is equal to the number of classes,
|
|
1113
|
+
class-specific boxes are applied.
|
|
1114
|
+
- **scores** (Tensor) - A 3-D Tensor of float32 type with the shape
|
|
1115
|
+
:math:`(batch_size, num_boxes, num_classes)`. It contains a score value for each box,
|
|
1116
|
+
with each row of `boxes` represented by a single score.
|
|
1117
|
+
- **max_output_size_per_class** (Tensor) - The maximum number of boxes that can be selected for each class
|
|
1118
|
+
by the non-maximum suppression algorithm, represented by a scalar Tensor of type int32.
|
|
1119
|
+
- **max_total_size** (Tensor) - A scalar Tensor of type int32 that represents the
|
|
1120
|
+
maximum number of boxes that are kept for all classes.
|
|
1121
|
+
- **iou_threshold** (Tensor) - A scalar Tensor of float32 type that represents the threshold for
|
|
1122
|
+
determining if the IOU overlap between boxes is too high. `iou_threshold` must be equal or greater
|
|
1099
1123
|
than 0 and be equal or smaller than 1.
|
|
1100
|
-
- **score_threshold** (Tensor) - A
|
|
1101
|
-
boxes based on
|
|
1124
|
+
- **score_threshold** (Tensor) - A scalar Tensor of type float32 that represents the threshold
|
|
1125
|
+
for determining when to remove boxes based on their scores.
|
|
1102
1126
|
|
|
1103
1127
|
Outputs:
|
|
1104
1128
|
- **nmsed_boxes** - A Tensor of float32 with shape of (batch_size, num_detection, 4), which contains
|
|
@@ -1123,7 +1147,7 @@ class CombinedNonMaxSuppression(Primitive):
|
|
|
1123
1147
|
ValueError: If `iou_threshold` not in [0,1].
|
|
1124
1148
|
|
|
1125
1149
|
Supported Platforms:
|
|
1126
|
-
``Ascend`` ``CPU``
|
|
1150
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1127
1151
|
|
|
1128
1152
|
Examples:
|
|
1129
1153
|
>>> boxes = Tensor(np.array([[[[200, 100, 150, 100]],
|
|
@@ -1153,3 +1177,66 @@ class CombinedNonMaxSuppression(Primitive):
|
|
|
1153
1177
|
self.add_prim_attr('pad_per_class', self.pad_per_class)
|
|
1154
1178
|
self.clip_boxes = validator.check_value_type("clip_boxes", clip_boxes, [bool], self.name)
|
|
1155
1179
|
self.add_prim_attr('clip_boxes', self.clip_boxes)
|
|
1180
|
+
|
|
1181
|
+
|
|
1182
|
+
class ResizeV2(Primitive):
|
|
1183
|
+
r"""
|
|
1184
|
+
Using the nearest, linear or cubic interpolate method resize the input tensor 'x'.
|
|
1185
|
+
|
|
1186
|
+
Note:
|
|
1187
|
+
Input x must be a 4-D tensor.
|
|
1188
|
+
|
|
1189
|
+
Args:
|
|
1190
|
+
coordinate_transformation_mode (str): Default is 'half_pixel'. Describes how to transform the
|
|
1191
|
+
coordinate in the resized tensor to the coordinate in the original tensor. Other optional: 'align_corners'.
|
|
1192
|
+
In 'nearest' mode, coordinate_transformation_mode must be 'half_pixel'.
|
|
1193
|
+
mode (str): Defaults to 'nearest'. Other optional: 'linear' and 'cubic'.
|
|
1194
|
+
|
|
1195
|
+
Inputs:
|
|
1196
|
+
- **x** (Tensor) - A 4-D tensor which to resize, with shape [batch, channel, width, height]. Must be one of the
|
|
1197
|
+
following types: uint8, int8, int16, int32, int64, float16, float32, float64, when mode = 'nearest'.
|
|
1198
|
+
Must be one of the following types: float16, float32, float64, when mode = 'linear' or 'cubic'.
|
|
1199
|
+
- **roi** (Tensor) - A 1-D float32 Tensor. Unused parameters currently.
|
|
1200
|
+
- **scales** (Tensor) - A 1-D float32 Tensor. Unused parameters currently.
|
|
1201
|
+
- **sizes** (Tensor) - A 1-D int64 or int32 Tensor, the length must be 4 and greater than 0.
|
|
1202
|
+
And sizes[0], sizes[1] must match with the shape[0] and shape[1] of x.
|
|
1203
|
+
When mode equals 'nearest' or 'linear', sizes[2] must be 1.
|
|
1204
|
+
|
|
1205
|
+
Outputs:
|
|
1206
|
+
A 4-D tensor which shape is [batch, channel, new_height, new_width] with type as same as x.
|
|
1207
|
+
|
|
1208
|
+
Raises:
|
|
1209
|
+
TypeError: If dtype of `x`, `roi`, `scales` or `sizes` is not supported.
|
|
1210
|
+
ValueError: If shape of `x`, `roi`, `scales` or `sizes` is not supported.
|
|
1211
|
+
ValueError: If the length of `sizes` is not 4.
|
|
1212
|
+
ValueError: If `sizes` is not greater than 0.
|
|
1213
|
+
ValueError: If sizes[2] is not 1, when `mode` = 'nearest' or 'linear'.
|
|
1214
|
+
ValueError: If sizes[0] and sizes[1] don't match the shape[0] and shape[1] of x.
|
|
1215
|
+
ValueError: If `coordinate_transformation_mode` or `mode` is not supported.
|
|
1216
|
+
ValueError: If `coordinate_transformation_mode` is not 'half_pixel', when `mode` = 'nearest'.
|
|
1217
|
+
|
|
1218
|
+
Supported Platforms:
|
|
1219
|
+
``CPU``
|
|
1220
|
+
|
|
1221
|
+
Examples:
|
|
1222
|
+
>>> x = Tensor(np.array([[[[1., 2., 3., 4.]]]]).astype(np.float32))
|
|
1223
|
+
>>> roi = Tensor(np.array([0]).astype(np.float32))
|
|
1224
|
+
>>> scales = Tensor(np.array([0]).astype(np.float32))
|
|
1225
|
+
>>> sizes = Tensor(np.array([1, 1, 1, 9]).astype(np.int64))
|
|
1226
|
+
>>> resize_v2 = ops.ResizeV2(coordinate_transformation_mode="half_pixel", mode="nearest")
|
|
1227
|
+
>>> output = resize_v2(x, roi, scales, sizes)
|
|
1228
|
+
>>> print(output)
|
|
1229
|
+
[[[[1. 1. 1. 2. 2. 3. 3. 4. 4.]]]]
|
|
1230
|
+
"""
|
|
1231
|
+
@prim_attr_register
|
|
1232
|
+
def __init__(self, coordinate_transformation_mode="half_pixel", mode="nearest"):
|
|
1233
|
+
"""Initialize ResizeV2."""
|
|
1234
|
+
self.init_prim_io_names(inputs=['x', 'roi', 'scales', 'sizes'], outputs=['y'])
|
|
1235
|
+
self.add_prim_attr("nearest_mode", "floor")
|
|
1236
|
+
self.add_prim_attr("cubic_coeff_a", -0.75)
|
|
1237
|
+
validator.check_value_type(
|
|
1238
|
+
"coordinate_transformation_mode", coordinate_transformation_mode, [str], self.name)
|
|
1239
|
+
validator.check_string(coordinate_transformation_mode, ["align_corners", "half_pixel"],
|
|
1240
|
+
"coordinate_transformation_mode", self.name)
|
|
1241
|
+
validator.check_value_type("mode", mode, [str], self.name)
|
|
1242
|
+
validator.check_string(mode, ["nearest", "linear", "cubic"], "mode", self.name)
|