mindspore 1.10.0__cp39-cp39-win_amd64.whl → 2.0.0rc1__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/ConcurrencyCheck.dll +0 -0
- mindspore/CppBuildInsights.dll +0 -0
- mindspore/CppCoreCheck.dll +0 -0
- mindspore/EnumIndex.dll +0 -0
- mindspore/EspXEngine.dll +0 -0
- mindspore/HResultCheck.dll +0 -0
- mindspore/KernelTraceControl.dll +0 -0
- mindspore/LocalESPC.dll +0 -0
- mindspore/Microsoft.Diagnostics.Tracing.EventSource.dll +0 -0
- mindspore/Microsoft.VisualStudio.RemoteControl.dll +0 -0
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Microsoft.VisualStudio.Utilities.Internal.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/System.Runtime.CompilerServices.Unsafe.dll +0 -0
- mindspore/VariantClear.dll +0 -0
- mindspore/__init__.py +9 -4
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +102 -0
- mindspore/_checkparam.py +1066 -1001
- mindspore/_extends/builtin_operations.py +32 -4
- mindspore/_extends/graph_kernel/model/graph_split.py +66 -222
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +12 -9
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +119 -26
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -50
- mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -6
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -25
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -27
- mindspore/_extends/parse/__init__.py +5 -3
- mindspore/_extends/parse/namespace.py +17 -2
- mindspore/_extends/parse/parser.py +193 -34
- mindspore/_extends/parse/resources.py +7 -8
- mindspore/_extends/parse/standard_method.py +1780 -435
- mindspore/_extends/parse/trope.py +3 -1
- mindspore/amp.py +53 -58
- mindspore/atlprov.dll +0 -0
- mindspore/boost/adasum.py +3 -2
- mindspore/boost/boost.py +2 -2
- mindspore/boost/boost_cell_wrapper.py +46 -26
- mindspore/boost/dim_reduce.py +6 -5
- mindspore/boost/grad_accumulation.py +2 -1
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/cfgpersist.dll +0 -0
- mindspore/clang_rt.asan_dbg_dynamic-x86_64.dll +0 -0
- mindspore/clang_rt.asan_dynamic-x86_64.dll +0 -0
- mindspore/common/__init__.py +11 -10
- mindspore/common/_decorator.py +2 -0
- mindspore/common/_register_for_adapter.py +55 -0
- mindspore/common/_stub_tensor.py +201 -0
- mindspore/common/_utils.py +57 -0
- mindspore/common/api.py +582 -297
- mindspore/common/dtype.py +66 -18
- mindspore/common/dump.py +2 -2
- mindspore/common/initializer.py +38 -1
- mindspore/common/jit_config.py +25 -13
- mindspore/common/mutable.py +53 -24
- mindspore/common/parameter.py +60 -37
- mindspore/common/seed.py +8 -24
- mindspore/common/sparse_tensor.py +927 -0
- mindspore/common/tensor.py +1627 -3900
- mindspore/communication/__init__.py +10 -5
- mindspore/communication/_comm_helper.py +78 -214
- mindspore/communication/_hccl_management.py +2 -1
- mindspore/communication/management.py +136 -47
- mindspore/config/op_info.config +501 -1008
- mindspore/context.py +291 -56
- mindspore/d3dcompiler_47.dll +0 -0
- mindspore/dataset/__init__.py +12 -8
- mindspore/dataset/audio/__init__.py +9 -9
- mindspore/dataset/audio/transforms.py +1090 -228
- mindspore/dataset/audio/utils.py +87 -39
- mindspore/dataset/audio/validators.py +223 -1
- mindspore/dataset/callback/ds_callback.py +17 -15
- mindspore/dataset/core/config.py +246 -17
- mindspore/dataset/core/py_util_helpers.py +4 -3
- mindspore/dataset/core/validator_helpers.py +10 -10
- mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
- mindspore/dataset/debug/debug_hook.py +65 -0
- mindspore/dataset/debug/pre_defined_hook.py +67 -0
- mindspore/dataset/engine/__init__.py +7 -3
- mindspore/dataset/engine/cache_client.py +9 -9
- mindspore/dataset/engine/datasets.py +648 -477
- mindspore/dataset/engine/datasets_audio.py +165 -167
- mindspore/dataset/engine/datasets_standard_format.py +93 -67
- mindspore/dataset/engine/datasets_text.py +492 -342
- mindspore/dataset/engine/datasets_user_defined.py +85 -50
- mindspore/dataset/engine/datasets_vision.py +1224 -699
- mindspore/dataset/engine/graphdata.py +134 -69
- mindspore/dataset/engine/iterators.py +50 -9
- mindspore/dataset/engine/offload.py +52 -31
- mindspore/dataset/engine/samplers.py +27 -24
- mindspore/dataset/engine/serializer_deserializer.py +14 -15
- mindspore/dataset/engine/validators.py +213 -52
- mindspore/dataset/text/__init__.py +10 -8
- mindspore/dataset/text/transforms.py +152 -57
- mindspore/dataset/text/utils.py +98 -49
- mindspore/dataset/text/validators.py +25 -0
- mindspore/dataset/transforms/__init__.py +4 -2
- mindspore/dataset/transforms/c_transforms.py +11 -13
- mindspore/dataset/transforms/py_transforms.py +2 -2
- mindspore/dataset/transforms/py_transforms_util.py +10 -0
- mindspore/dataset/transforms/transforms.py +13 -15
- mindspore/dataset/transforms/validators.py +7 -7
- mindspore/dataset/utils/__init__.py +2 -1
- mindspore/dataset/utils/browse_dataset.py +13 -13
- mindspore/dataset/utils/line_reader.py +121 -0
- mindspore/dataset/vision/__init__.py +8 -7
- mindspore/dataset/vision/c_transforms.py +125 -126
- mindspore/dataset/vision/py_transforms.py +37 -37
- mindspore/dataset/vision/py_transforms_util.py +23 -20
- mindspore/dataset/vision/transforms.py +316 -315
- mindspore/dataset/vision/utils.py +313 -17
- mindspore/dataset/vision/validators.py +6 -6
- mindspore/default_config.py +0 -1
- mindspore/dpcmi.dll +0 -0
- mindspore/{compression → experimental}/__init__.py +6 -5
- mindspore/experimental/map_parameter.py +275 -0
- mindspore/include/OWNERS +0 -1
- mindspore/include/api/callback/callback.h +9 -13
- mindspore/include/api/callback/ckpt_saver.h +2 -2
- mindspore/include/api/callback/loss_monitor.h +2 -2
- mindspore/include/api/callback/lr_scheduler.h +5 -5
- mindspore/include/api/callback/time_monitor.h +2 -2
- mindspore/include/api/callback/train_accuracy.h +4 -6
- mindspore/include/api/cfg.h +19 -6
- mindspore/include/api/context.h +70 -9
- mindspore/include/api/delegate.h +8 -1
- mindspore/include/api/dual_abi_helper.h +8 -24
- mindspore/include/api/metrics/accuracy.h +2 -2
- mindspore/include/api/metrics/metrics.h +4 -3
- mindspore/include/api/model.h +9 -4
- mindspore/include/api/model_group.h +68 -0
- mindspore/include/api/model_parallel_runner.h +17 -17
- mindspore/include/api/net.h +12 -11
- mindspore/include/api/serialization.h +20 -4
- mindspore/include/api/status.h +7 -1
- mindspore/include/api/types.h +25 -21
- mindspore/include/api/visible.h +4 -0
- mindspore/include/c_api/model_c.h +5 -0
- mindspore/include/c_api/status_c.h +1 -1
- mindspore/include/dataset/config.h +1 -1
- mindspore/include/dataset/constants.h +14 -0
- mindspore/include/dataset/text.h +59 -0
- mindspore/include/dataset/vision.h +56 -117
- mindspore/include/dataset/vision_lite.h +102 -0
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +28 -28
- mindspore/mindrecord/common/exceptions.py +2 -4
- mindspore/mindrecord/filereader.py +19 -1
- mindspore/mindrecord/filewriter.py +250 -88
- mindspore/mindrecord/mindpage.py +13 -13
- mindspore/mindrecord/shardheader.py +15 -15
- mindspore/mindrecord/shardreader.py +9 -0
- mindspore/mindrecord/shardwriter.py +29 -29
- mindspore/mindrecord/tools/cifar100_to_mr.py +9 -9
- mindspore/mindrecord/tools/cifar10_to_mr.py +9 -9
- mindspore/mindrecord/tools/csv_to_mr.py +4 -4
- mindspore/mindrecord/tools/imagenet_to_mr.py +70 -65
- mindspore/mindrecord/tools/mnist_to_mr.py +41 -41
- mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
- mindspore/{libmindspore_backend.dll → mindspore_backend.dll} +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/__init__.py +1 -5
- mindspore/nn/cell.py +297 -234
- mindspore/nn/dynamic_lr.py +1 -1
- mindspore/nn/grad/cell_grad.py +17 -42
- mindspore/nn/layer/__init__.py +7 -4
- mindspore/nn/layer/activation.py +131 -88
- mindspore/nn/layer/basic.py +313 -613
- mindspore/nn/layer/channel_shuffle.py +103 -0
- mindspore/nn/layer/combined.py +1 -1
- mindspore/nn/layer/container.py +52 -6
- mindspore/nn/layer/conv.py +112 -43
- mindspore/nn/layer/dense.py +10 -9
- mindspore/nn/layer/embedding.py +36 -34
- mindspore/nn/layer/image.py +123 -27
- mindspore/nn/layer/math.py +108 -107
- mindspore/nn/layer/normalization.py +212 -366
- mindspore/nn/layer/padding.py +370 -42
- mindspore/nn/layer/pooling.py +1443 -219
- mindspore/nn/layer/rnn_cells.py +11 -16
- mindspore/nn/layer/rnns.py +38 -39
- mindspore/nn/layer/thor_layer.py +24 -25
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +701 -0
- mindspore/nn/learning_rate_schedule.py +8 -8
- mindspore/nn/loss/__init__.py +9 -6
- mindspore/nn/loss/loss.py +678 -142
- mindspore/nn/metrics.py +53 -0
- mindspore/nn/optim/_dist_optimizer_registry.py +2 -2
- mindspore/nn/optim/ada_grad.py +8 -8
- mindspore/nn/optim/adadelta.py +2 -3
- mindspore/nn/optim/adafactor.py +18 -14
- mindspore/nn/optim/adam.py +429 -87
- mindspore/nn/optim/adamax.py +5 -6
- mindspore/nn/optim/adasum.py +10 -8
- mindspore/nn/optim/asgd.py +7 -7
- mindspore/nn/optim/ftrl.py +81 -11
- mindspore/nn/optim/lamb.py +7 -8
- mindspore/nn/optim/lars.py +4 -4
- mindspore/nn/optim/lazyadam.py +82 -7
- mindspore/nn/optim/momentum.py +8 -7
- mindspore/nn/optim/optimizer.py +19 -10
- mindspore/nn/optim/proximal_ada_grad.py +6 -5
- mindspore/nn/optim/rmsprop.py +3 -3
- mindspore/nn/optim/rprop.py +20 -16
- mindspore/nn/optim/sgd.py +21 -15
- mindspore/nn/optim/thor.py +23 -21
- mindspore/nn/probability/__init__.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +7 -6
- mindspore/nn/probability/bijector/invert.py +4 -2
- mindspore/nn/probability/bijector/softplus.py +2 -2
- mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
- mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
- mindspore/nn/probability/distribution/__init__.py +6 -0
- mindspore/nn/probability/distribution/_utils/custom_ops.py +3 -2
- mindspore/nn/probability/distribution/_utils/utils.py +11 -17
- mindspore/nn/probability/distribution/bernoulli.py +6 -6
- mindspore/nn/probability/distribution/beta.py +1 -1
- mindspore/nn/probability/distribution/categorical.py +9 -9
- mindspore/nn/probability/distribution/cauchy.py +8 -8
- mindspore/nn/probability/distribution/distribution.py +12 -6
- mindspore/nn/probability/distribution/exponential.py +5 -5
- mindspore/nn/probability/distribution/gamma.py +3 -3
- mindspore/nn/probability/distribution/geometric.py +6 -5
- mindspore/nn/probability/distribution/gumbel.py +5 -5
- mindspore/nn/probability/distribution/half_normal.py +133 -0
- mindspore/nn/probability/distribution/laplace.py +128 -0
- mindspore/nn/probability/distribution/log_normal.py +0 -1
- mindspore/nn/probability/distribution/logistic.py +4 -5
- mindspore/nn/probability/distribution/normal.py +11 -15
- mindspore/nn/probability/distribution/poisson.py +6 -2
- mindspore/nn/probability/distribution/student_t.py +150 -0
- mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
- mindspore/nn/probability/distribution/uniform.py +5 -5
- mindspore/nn/reinforcement/_tensors_queue.py +3 -3
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +8 -1
- mindspore/nn/wrap/cell_wrapper.py +55 -27
- mindspore/nn/wrap/grad_reducer.py +20 -11
- mindspore/nn/wrap/loss_scale.py +47 -30
- mindspore/numpy/array_creations.py +33 -22
- mindspore/numpy/array_ops.py +46 -42
- mindspore/numpy/logic_ops.py +6 -27
- mindspore/numpy/math_ops.py +26 -19
- mindspore/numpy/utils.py +1 -8
- mindspore/numpy/utils_const.py +112 -62
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +6 -3
- mindspore/ops/_constants.py +0 -6
- mindspore/ops/_grad/__init__.py +2 -1
- mindspore/ops/_grad/grad_array_ops.py +209 -152
- mindspore/ops/_grad/grad_base.py +55 -17
- mindspore/ops/_grad/grad_clip_ops.py +11 -3
- mindspore/ops/_grad/grad_comm_ops.py +58 -47
- mindspore/ops/_grad/grad_implementations.py +21 -61
- mindspore/ops/_grad/grad_inner_ops.py +48 -6
- mindspore/ops/_grad/grad_math_ops.py +306 -161
- mindspore/ops/_grad/grad_nn_ops.py +192 -181
- mindspore/ops/_grad/grad_other_ops.py +1 -1
- mindspore/ops/_grad/grad_quant_ops.py +5 -5
- mindspore/ops/_grad/grad_sequence_ops.py +296 -0
- mindspore/ops/_grad/grad_sparse.py +15 -9
- mindspore/ops/_grad_experimental/__init__.py +1 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +441 -55
- mindspore/ops/_grad_experimental/grad_image_ops.py +25 -7
- mindspore/ops/_grad_experimental/grad_inner_ops.py +3 -44
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +16 -21
- mindspore/ops/_grad_experimental/grad_math_ops.py +979 -49
- mindspore/ops/_grad_experimental/grad_nn_ops.py +78 -8
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +197 -13
- mindspore/ops/_op_impl/__init__.py +3 -3
- mindspore/ops/_op_impl/_custom_op/__init__.py +0 -1
- mindspore/ops/_op_impl/_custom_op/_basic.py +0 -1
- mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +4 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +5 -5
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
- mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/correction_mul.py +3 -3
- mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +4 -8
- mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_dds_grad_impl.py +0 -1
- mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -1
- mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +238 -3
- mindspore/ops/_op_impl/aicpu/abs.py +36 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d.py +34 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d.py +39 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d.py +42 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d_grad.py +152 -0
- mindspore/ops/_op_impl/aicpu/add.py +43 -0
- mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
- mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
- mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/arg_max.py +75 -0
- mindspore/ops/_op_impl/aicpu/arg_min.py +75 -0
- mindspore/ops/_op_impl/aicpu/argmin_with_value.py +43 -0
- mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -0
- mindspore/ops/_op_impl/aicpu/batch_norm_grad_grad.py +49 -0
- mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
- mindspore/ops/_op_impl/aicpu/bessel_i0.py +31 -0
- mindspore/ops/_op_impl/aicpu/bias_add.py +44 -0
- mindspore/ops/_op_impl/aicpu/bias_add_grad.py +43 -0
- mindspore/ops/_op_impl/aicpu/bincount.py +33 -0
- mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/cauchy.py} +17 -10
- mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
- mindspore/ops/_op_impl/aicpu/cholesky.py +1 -1
- mindspore/ops/_op_impl/{cpu/bias_add.py → aicpu/choleskygrad.py} +9 -7
- mindspore/ops/_op_impl/aicpu/combined_non_max_suppression.py +42 -0
- mindspore/ops/_op_impl/aicpu/concat_offset.py +42 -0
- mindspore/ops/_op_impl/aicpu/concat_offset_v1.py +31 -0
- mindspore/ops/_op_impl/aicpu/conj.py +11 -0
- mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_image.py +38 -0
- mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +36 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +2 -2
- mindspore/ops/_op_impl/aicpu/dense_to_sparse_set_operation.py +48 -0
- mindspore/ops/_op_impl/aicpu/diag.py +36 -0
- mindspore/ops/_op_impl/aicpu/diag_part.py +36 -0
- mindspore/ops/_op_impl/aicpu/diagonal.py +35 -0
- mindspore/ops/_op_impl/{cpu/bias_add_grad.py → aicpu/digamma.py} +9 -7
- mindspore/ops/_op_impl/aicpu/eig.py +35 -0
- mindspore/ops/_op_impl/aicpu/fft_with_size.py +41 -0
- mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
- mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
- mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
- mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
- mindspore/ops/_op_impl/aicpu/glu.py +33 -0
- mindspore/ops/_op_impl/aicpu/glu_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/greater.py +41 -0
- mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
- mindspore/ops/_op_impl/{tbe/scatter_add_ds.py → aicpu/inplace_index_add.py} +17 -21
- mindspore/ops/_op_impl/aicpu/instance_norm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/instance_norm_v2_grad.py +44 -0
- mindspore/ops/_op_impl/aicpu/layer_norm_grad_grad.py +47 -0
- mindspore/ops/_op_impl/aicpu/less.py +41 -0
- mindspore/ops/_op_impl/aicpu/less_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/lgamma.py +32 -0
- mindspore/ops/_op_impl/aicpu/log_normal_reverse.py +33 -0
- mindspore/ops/_op_impl/aicpu/logit.py +33 -0
- mindspore/ops/_op_impl/aicpu/logit_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/masked_fill.py +42 -0
- mindspore/ops/_op_impl/aicpu/masked_scatter.py +39 -0
- mindspore/ops/_op_impl/aicpu/matmul.py +39 -0
- mindspore/ops/_op_impl/aicpu/matrix_logarithm.py +31 -0
- mindspore/ops/_op_impl/aicpu/matrix_power.py +32 -0
- mindspore/ops/_op_impl/aicpu/matrix_solve_ls.py +36 -0
- mindspore/ops/_op_impl/aicpu/matrix_triangular_solve.py +36 -0
- mindspore/ops/_op_impl/aicpu/mirror_pad.py +2 -0
- mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mul.py +3 -1
- mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
- mindspore/ops/_op_impl/aicpu/multinomial_with_replacement.py +35 -0
- mindspore/ops/_op_impl/aicpu/nan_to_num.py +34 -0
- mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
- mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
- mindspore/ops/_op_impl/aicpu/polar.py +32 -0
- mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
- mindspore/ops/_op_impl/aicpu/qr.py +36 -0
- mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
- mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_tensor.py +74 -0
- mindspore/ops/_op_impl/aicpu/random_shuffle.py +3 -0
- mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/range.py +36 -0
- mindspore/ops/_op_impl/aicpu/reciprocal.py +34 -0
- mindspore/ops/_op_impl/aicpu/reciprocal_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/reduce_sum.py +57 -0
- mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
- mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
- mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
- mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
- mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
- mindspore/ops/_op_impl/aicpu/search_sorted.py +12 -6
- mindspore/ops/_op_impl/aicpu/self_adjoint_eig.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
- mindspore/ops/_op_impl/aicpu/slice_grad.py +76 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/sort.py +39 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
- mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows.py +63 -0
- mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows_grad.py +45 -0
- mindspore/ops/_op_impl/aicpu/sparse_matrix_mat_mul.py +56 -0
- mindspore/ops/_op_impl/{tbe/slice_ds.py → aicpu/sparse_segment_sum.py} +16 -24
- mindspore/ops/_op_impl/aicpu/sparse_segment_sum_with_num_segments.py +68 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice.py +63 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +61 -0
- mindspore/ops/_op_impl/aicpu/squared_difference.py +2 -0
- mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +93 -0
- mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +66 -0
- mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
- mindspore/ops/_op_impl/{tbe/gather_v2.py → aicpu/tile.py} +24 -24
- mindspore/ops/_op_impl/aicpu/tridiagonal_solve.py +35 -0
- mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/triu_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
- mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
- mindspore/ops/_op_impl/cpu/__init__.py +1 -2
- mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
- mindspore/ops/_op_impl/cpu/maximum_grad.py +2 -0
- mindspore/{compression/common/__init__.py → ops/_op_impl/cpu/pyexecute.py} +13 -8
- mindspore/ops/_op_impl/cpu/reduce_sum.py +8 -0
- mindspore/ops/_op_impl/cpu/sparse_slice.py +62 -0
- mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +60 -0
- mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
- mindspore/ops/_op_impl/tbe/__init__.py +27 -608
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +42 -0
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +44 -0
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -1
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +41 -0
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +1 -0
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +2 -0
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +40 -0
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
- mindspore/ops/_op_impl/tbe/cast.py +0 -2
- mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -2
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -2
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +1 -0
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +1 -1
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
- mindspore/ops/_op_impl/tbe/greater.py +2 -0
- mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -1
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -6
- mindspore/ops/_op_impl/tbe/{greater_ds.py → reduce_all_ds.py} +13 -16
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +39 -0
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +44 -0
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +44 -0
- mindspore/ops/_op_impl/tbe/scatter_add.py +2 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +2 -2
- mindspore/ops/_op_impl/tbe/slice.py +26 -15
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +1 -0
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +1 -1
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +2 -0
- mindspore/ops/_primitive_cache.py +3 -2
- mindspore/ops/_register_for_op.py +11 -0
- mindspore/ops/_utils/__init__.py +1 -1
- mindspore/ops/_utils/utils.py +20 -41
- mindspore/ops/_vmap/__init__.py +2 -2
- mindspore/ops/_vmap/vmap_array_ops.py +170 -78
- mindspore/ops/_vmap/vmap_base.py +24 -10
- mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
- mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +41 -9
- mindspore/ops/_vmap/vmap_image_ops.py +52 -0
- mindspore/ops/_vmap/vmap_math_ops.py +77 -6
- mindspore/ops/_vmap/vmap_nn_ops.py +78 -29
- mindspore/ops/_vmap/vmap_other_ops.py +3 -1
- mindspore/ops/_vmap/vmap_random_ops.py +55 -3
- mindspore/ops/_vmap/vmap_sparse_ops.py +1 -0
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ApproximateEqual_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Argmax_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/Argmin_bprop.mindir +14 -13
- mindspore/ops/bprop_mindir/AssignSub_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/Assign_bprop.mindir +16 -16
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchToSpaceND_bprop.mindir +28 -0
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +306 -0
- mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +12 -8
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Concat_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
- mindspore/ops/bprop_mindir/DType_bprop.mindir +12 -12
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/Depend_bprop.mindir +12 -13
- mindspore/ops/bprop_mindir/DepthToSpace_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
- mindspore/ops/bprop_mindir/DiagPart_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -24
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -14
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicShape_bprop.mindir +12 -12
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Equal_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +54 -0
- mindspore/ops/bprop_mindir/FloorDiv_bprop.mindir +18 -15
- mindspore/ops/bprop_mindir/GatherD_bprop.mindir +26 -0
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +57 -0
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/GreaterEqual_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/Greater_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/IOU_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/IsFinite_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/IsInf_bprop.mindir +13 -10
- mindspore/ops/bprop_mindir/IsNan_bprop.mindir +14 -11
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LessEqual_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Less_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/LinSpace_bprop.mindir +22 -19
- mindspore/ops/bprop_mindir/Load_bprop.mindir +12 -13
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/LogicalAnd_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/LogicalNot_bprop.mindir +14 -13
- mindspore/ops/bprop_mindir/MaskedSelect_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Maximum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Minimum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NonZero_bprop.mindir +14 -0
- mindspore/ops/bprop_mindir/NotEqual_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +25 -23
- mindspore/ops/bprop_mindir/OnesLike_bprop.mindir +13 -13
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
- mindspore/ops/bprop_mindir/Range_bprop.mindir +21 -19
- mindspore/ops/bprop_mindir/Rank_bprop.mindir +11 -11
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReduceAll_bprop.mindir +18 -17
- mindspore/ops/bprop_mindir/ReduceAny_bprop.mindir +18 -17
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +19 -23
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +60 -0
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +89 -0
- mindspore/ops/bprop_mindir/ReverseSequence_bprop.mindir +52 -0
- mindspore/ops/bprop_mindir/ReverseV2_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/Round_bprop.mindir +14 -13
- mindspore/ops/bprop_mindir/ScatterMax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterMin_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterNdUpdate_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/ScatterNd_bprop.mindir +24 -0
- mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/ScatterUpdate_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/Select_bprop.mindir +30 -34
- mindspore/ops/bprop_mindir/Shape_bprop.mindir +12 -12
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Sign_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/Slice_bprop.mindir +26 -0
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/Sort_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SpaceToBatchND_bprop.mindir +28 -0
- mindspore/ops/bprop_mindir/SpaceToDepth_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Split_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +54 -0
- mindspore/ops/bprop_mindir/StridedSliceGrad_bprop.mindir +95 -0
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +98 -0
- mindspore/ops/bprop_mindir/Switch_bprop.mindir +28 -32
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/TensorScatterAdd_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/TensorScatterUpdate_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/TensorShape_bprop.mindir +14 -0
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TransShape_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/TruncateDiv_bprop.mindir +18 -15
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +11 -13
- mindspore/ops/bprop_mindir/Unique_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Unstack_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
- mindspore/ops/bprop_mindir/ZerosLike_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/__init__.py +1 -4
- mindspore/ops/bprop_mindir/generate_mindir.py +32 -20
- mindspore/ops/composite/__init__.py +12 -13
- mindspore/ops/composite/base.py +261 -254
- mindspore/ops/composite/env_ops.py +41 -0
- mindspore/ops/composite/math_ops.py +197 -156
- mindspore/ops/composite/multitype_ops/_compile_utils.py +428 -176
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +188 -87
- mindspore/ops/composite/multitype_ops/add_impl.py +23 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +3 -3
- mindspore/ops/composite/multitype_ops/equal_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +52 -5
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +15 -3
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +33 -2
- mindspore/ops/composite/multitype_ops/less_impl.py +33 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +21 -7
- mindspore/ops/composite/multitype_ops/not_in_impl.py +15 -3
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
- mindspore/ops/composite/multitype_ops/pow_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +62 -70
- mindspore/ops/composite/multitype_ops/sub_impl.py +3 -3
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +41 -4
- mindspore/ops/function/__init__.py +323 -8
- mindspore/ops/function/array_func.py +3511 -780
- mindspore/ops/function/clip_func.py +329 -0
- mindspore/ops/function/debug_func.py +6 -6
- mindspore/ops/function/grad/__init__.py +5 -1
- mindspore/ops/function/grad/grad_func.py +736 -65
- mindspore/ops/function/image_func.py +270 -0
- mindspore/ops/function/linalg_func.py +268 -8
- mindspore/ops/function/math_func.py +8032 -3164
- mindspore/ops/function/nn_func.py +5619 -1855
- mindspore/ops/function/other_func.py +115 -0
- mindspore/ops/function/parameter_func.py +11 -10
- mindspore/ops/function/random_func.py +939 -77
- mindspore/ops/function/sparse_func.py +249 -84
- mindspore/ops/function/sparse_unary_func.py +2303 -0
- mindspore/ops/function/spectral_func.py +146 -0
- mindspore/ops/function/vmap_func.py +114 -0
- mindspore/ops/functional.py +182 -254
- mindspore/ops/op_info_register.py +79 -34
- mindspore/ops/operations/__init__.py +210 -118
- mindspore/ops/operations/_csr_ops.py +7 -7
- mindspore/ops/operations/_embedding_cache_ops.py +25 -15
- mindspore/ops/operations/_grad_ops.py +447 -322
- mindspore/ops/operations/_inner_ops.py +547 -176
- mindspore/ops/operations/_map_tensor_ops.py +112 -0
- mindspore/ops/operations/_ms_kernel.py +29 -27
- mindspore/ops/operations/_ocr_ops.py +11 -11
- mindspore/ops/operations/_opaque_predicate_registry.py +41 -0
- mindspore/ops/operations/_quant_ops.py +186 -101
- mindspore/ops/operations/_rl_inner_ops.py +122 -61
- mindspore/ops/operations/_scalar_ops.py +466 -0
- mindspore/ops/operations/_sequence_ops.py +1047 -0
- mindspore/ops/operations/_tensor_array.py +10 -11
- mindspore/ops/operations/_thor_ops.py +4 -4
- mindspore/ops/operations/array_ops.py +1428 -1226
- mindspore/ops/operations/comm_ops.py +180 -117
- mindspore/ops/operations/control_ops.py +4 -2
- mindspore/ops/operations/custom_ops.py +185 -98
- mindspore/ops/operations/debug_ops.py +92 -54
- mindspore/ops/operations/image_ops.py +406 -211
- mindspore/ops/operations/inner_ops.py +42 -53
- mindspore/ops/operations/linalg_ops.py +32 -29
- mindspore/ops/operations/math_ops.py +2076 -897
- mindspore/ops/operations/nn_ops.py +1282 -1252
- mindspore/ops/operations/other_ops.py +124 -278
- mindspore/ops/operations/random_ops.py +345 -178
- mindspore/ops/operations/rl_ops.py +8 -9
- mindspore/ops/operations/sparse_ops.py +502 -157
- mindspore/ops/operations/spectral_ops.py +107 -0
- mindspore/ops/primitive.py +192 -15
- mindspore/ops/vm_impl_registry.py +23 -2
- mindspore/parallel/__init__.py +6 -1
- mindspore/parallel/_auto_parallel_context.py +199 -92
- mindspore/parallel/_cell_wrapper.py +4 -2
- mindspore/parallel/_cost_model_context.py +3 -0
- mindspore/parallel/_dp_allreduce_fusion.py +2 -1
- mindspore/parallel/_offload_context.py +185 -0
- mindspore/parallel/_parallel_serialization.py +167 -28
- mindspore/parallel/_ps_context.py +9 -5
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +9 -1
- mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
- mindspore/{nn/transformer → parallel/_transformer}/layers.py +59 -37
- mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
- mindspore/{nn/transformer → parallel/_transformer}/moe.py +160 -35
- mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
- mindspore/{nn/transformer → parallel/_transformer}/transformer.py +235 -196
- mindspore/parallel/_utils.py +47 -7
- mindspore/parallel/algo_parameter_config.py +5 -1
- mindspore/parallel/checkpoint_transform.py +329 -0
- mindspore/parallel/shard.py +229 -0
- mindspore/perf_msvcbuildinsights.dll +0 -0
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/common/util.py +4 -3
- mindspore/profiler/common/validator/validate_path.py +2 -2
- mindspore/profiler/envprofiling.py +249 -0
- mindspore/profiler/parser/aicpu_data_parser.py +38 -39
- mindspore/profiler/parser/ascend_timeline_generator.py +497 -0
- mindspore/profiler/parser/base_timeline_generator.py +471 -0
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +684 -0
- mindspore/profiler/parser/framework_parser.py +42 -16
- mindspore/profiler/parser/hccl_parser.py +158 -158
- mindspore/profiler/parser/hwts_log_parser.py +7 -6
- mindspore/profiler/parser/integrator.py +18 -1579
- mindspore/profiler/parser/minddata_analyzer.py +8 -8
- mindspore/profiler/parser/msadvisor_analyzer.py +14 -27
- mindspore/profiler/parser/msadvisor_parser.py +2 -4
- mindspore/profiler/parser/optime_parser.py +17 -18
- mindspore/profiler/parser/profiler_info.py +108 -0
- mindspore/profiler/parser/step_trace_parser.py +1 -1
- mindspore/profiler/profiling.py +396 -194
- mindspore/rewrite/__init__.py +6 -2
- mindspore/rewrite/api/node.py +51 -110
- mindspore/rewrite/api/node_type.py +10 -6
- mindspore/rewrite/api/pattern_engine.py +51 -7
- mindspore/rewrite/api/scoped_value.py +64 -53
- mindspore/rewrite/api/symbol_tree.py +108 -61
- mindspore/rewrite/api/tree_node_helper.py +2 -3
- mindspore/{compression/quant/__init__.py → rewrite/ast_creator_register.py} +20 -11
- mindspore/rewrite/ast_helpers/__init__.py +6 -3
- mindspore/rewrite/ast_helpers/ast_creator.py +115 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +99 -1
- mindspore/rewrite/ast_helpers/ast_modifier.py +17 -4
- mindspore/rewrite/ast_helpers/ast_replacer.py +1 -1
- mindspore/rewrite/ast_transformers/__init__.py +0 -1
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +46 -5
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +6 -3
- mindspore/rewrite/common/__init__.py +2 -0
- mindspore/rewrite/common/event.py +1 -1
- mindspore/rewrite/common/observable.py +1 -1
- mindspore/rewrite/common/observer.py +1 -1
- mindspore/rewrite/common/rewrite_elog.py +35 -0
- mindspore/rewrite/namer.py +2 -2
- mindspore/rewrite/namespace.py +14 -4
- mindspore/rewrite/node.py +161 -13
- mindspore/rewrite/parser.py +0 -1
- mindspore/rewrite/parser_register.py +0 -1
- mindspore/rewrite/parsers/arguments_parser.py +3 -2
- mindspore/rewrite/parsers/assign_parser.py +267 -67
- mindspore/rewrite/parsers/attribute_parser.py +56 -0
- mindspore/rewrite/parsers/class_def_parser.py +191 -108
- mindspore/rewrite/parsers/constant_parser.py +101 -0
- mindspore/rewrite/parsers/container_parser.py +88 -0
- mindspore/rewrite/parsers/for_parser.py +28 -15
- mindspore/rewrite/parsers/function_def_parser.py +21 -5
- mindspore/rewrite/parsers/if_parser.py +11 -28
- mindspore/rewrite/parsers/module_parser.py +9 -6
- mindspore/rewrite/parsers/return_parser.py +3 -2
- mindspore/rewrite/sparsify/__init__.py +0 -0
- mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
- mindspore/rewrite/sparsify/sparsify.py +109 -0
- mindspore/rewrite/sparsify/utils.py +173 -0
- mindspore/rewrite/symbol_tree.py +322 -109
- mindspore/rewrite/symbol_tree_builder.py +45 -8
- mindspore/rewrite/symbol_tree_dumper.py +0 -1
- mindspore/rewrite/topological_manager.py +1 -2
- mindspore/run_check/_check_version.py +209 -112
- mindspore/run_check/run_check.py +2 -1
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +6 -4
- mindspore/train/_utils.py +28 -5
- mindspore/train/amp.py +321 -50
- mindspore/train/callback/__init__.py +3 -1
- mindspore/train/callback/_backup_and_restore.py +120 -0
- mindspore/train/callback/_callback.py +8 -8
- mindspore/train/callback/_checkpoint.py +12 -9
- mindspore/train/callback/_early_stop.py +13 -7
- mindspore/train/callback/_history.py +8 -8
- mindspore/train/callback/_lambda_callback.py +6 -6
- mindspore/train/callback/_landscape.py +36 -38
- mindspore/train/callback/_loss_monitor.py +12 -6
- mindspore/train/callback/_lr_scheduler_callback.py +2 -4
- mindspore/train/callback/_on_request_exit.py +212 -0
- mindspore/train/callback/_reduce_lr_on_plateau.py +13 -7
- mindspore/train/callback/_summary_collector.py +27 -19
- mindspore/train/callback/_time_monitor.py +13 -7
- mindspore/train/checkpoint_pb2.py +68 -8
- mindspore/train/data_sink.py +122 -33
- mindspore/train/dataset_helper.py +28 -87
- mindspore/train/loss_scale_manager.py +4 -7
- mindspore/{nn → train}/metrics/__init__.py +20 -20
- mindspore/{nn → train}/metrics/accuracy.py +12 -10
- mindspore/{nn → train}/metrics/auc.py +4 -4
- mindspore/{nn → train}/metrics/bleu_score.py +4 -4
- mindspore/{nn → train}/metrics/confusion_matrix.py +10 -8
- mindspore/{nn → train}/metrics/cosine_similarity.py +4 -4
- mindspore/{nn → train}/metrics/dice.py +6 -5
- mindspore/{nn → train}/metrics/error.py +7 -5
- mindspore/{nn → train}/metrics/fbeta.py +9 -7
- mindspore/{nn → train}/metrics/hausdorff_distance.py +8 -6
- mindspore/{nn → train}/metrics/loss.py +4 -3
- mindspore/{nn → train}/metrics/mean_surface_distance.py +6 -5
- mindspore/{nn → train}/metrics/metric.py +6 -5
- mindspore/{nn → train}/metrics/occlusion_sensitivity.py +4 -3
- mindspore/{nn → train}/metrics/perplexity.py +5 -4
- mindspore/{nn → train}/metrics/precision.py +5 -4
- mindspore/{nn → train}/metrics/recall.py +5 -4
- mindspore/{nn → train}/metrics/roc.py +7 -6
- mindspore/{nn → train}/metrics/root_mean_square_surface_distance.py +6 -5
- mindspore/{nn → train}/metrics/topk.py +7 -5
- mindspore/train/mind_ir_pb2.py +339 -32
- mindspore/train/model.py +113 -84
- mindspore/train/serialization.py +547 -167
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +43 -12
- mindspore/train/train_thor/convert_utils.py +7 -1
- mindspore/train/train_thor/dataset_helper.py +3 -3
- mindspore/train/train_thor/model_thor.py +0 -4
- mindspore/turbojpeg.dll +0 -0
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +4 -3
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +901 -660
- mindspore/compression/common/constant.py +0 -124
- mindspore/compression/export/__init__.py +0 -19
- mindspore/compression/export/quant_export.py +0 -514
- mindspore/compression/quant/qat.py +0 -636
- mindspore/compression/quant/quant_utils.py +0 -462
- mindspore/compression/quant/quantizer.py +0 -68
- mindspore/libatomic-1.dll +0 -0
- mindspore/libgcc_s_seh-1.dll +0 -0
- mindspore/libgfortran-4.dll +0 -0
- mindspore/libgomp-1.dll +0 -0
- mindspore/libjpeg-62.dll +0 -0
- mindspore/libmindspore.dll +0 -0
- mindspore/libmindspore_common.dll +0 -0
- mindspore/libmindspore_core.dll +0 -0
- mindspore/libmindspore_glog.dll +0 -0
- mindspore/libnnacl.dll +0 -0
- mindspore/libopencv_core452.dll +0 -0
- mindspore/libopencv_imgcodecs452.dll +0 -0
- mindspore/libopencv_imgproc452.dll +0 -0
- mindspore/libquadmath-0.dll +0 -0
- mindspore/libsqlite3.dll +0 -0
- mindspore/libssp-0.dll +0 -0
- mindspore/libstdc++-6.dll +0 -0
- mindspore/libtinyxml2.dll +0 -0
- mindspore/libturbojpeg.dll +0 -0
- mindspore/libwinpthread-1.dll +0 -0
- mindspore/nn/layer/quant.py +0 -1868
- mindspore/nn/layer/rnn_utils.py +0 -90
- mindspore/nn/probability/dpn/__init__.py +0 -22
- mindspore/nn/probability/dpn/vae/__init__.py +0 -25
- mindspore/nn/probability/dpn/vae/cvae.py +0 -138
- mindspore/nn/probability/dpn/vae/vae.py +0 -122
- mindspore/nn/probability/infer/__init__.py +0 -22
- mindspore/nn/probability/infer/variational/elbo.py +0 -70
- mindspore/nn/probability/infer/variational/svi.py +0 -84
- mindspore/nn/probability/toolbox/__init__.py +0 -22
- mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
- mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -363
- mindspore/nn/probability/transforms/__init__.py +0 -22
- mindspore/nn/probability/transforms/transform_bnn.py +0 -262
- mindspore/nn/probability/zhusuan/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
- mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
- mindspore/ops/_op_impl/tbe/bias_add_grad_ds.py +0 -52
- mindspore/ops/_op_impl/tbe/scatter_nd_add_ds.py +0 -43
- mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/Identity_bprop.mindir +0 -9
- mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/stop_gradient_bprop.mindir +0 -12
- mindspore/ops/composite/array_ops.py +0 -210
- mindspore/ops/composite/clip_ops.py +0 -238
- mindspore/ops/composite/random_ops.py +0 -426
- mindspore/ops/composite/vmap_ops.py +0 -38
- mindspore/ops/operations/sponge_ops.py +0 -3531
- mindspore/ops/operations/sponge_update_ops.py +0 -2546
- mindspore/parallel/nn/__init__.py +0 -42
- mindspore/parallel/nn/loss.py +0 -22
- mindspore/parallel/nn/moe.py +0 -21
- mindspore/parallel/nn/op_parallel_config.py +0 -22
- mindspore/parallel/nn/transformer.py +0 -31
- mindspore/run_check/_check_deps_version.py +0 -84
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
mindspore/nn/layer/pooling.py
CHANGED
|
@@ -17,8 +17,10 @@ from __future__ import absolute_import
|
|
|
17
17
|
|
|
18
18
|
from mindspore.ops import operations as P
|
|
19
19
|
from mindspore.ops import functional as F
|
|
20
|
-
|
|
21
|
-
from mindspore.
|
|
20
|
+
import mindspore.ops as ops
|
|
21
|
+
from mindspore._checkparam import _check_3d_int_or_tuple
|
|
22
|
+
from mindspore import _checkparam as validator
|
|
23
|
+
from mindspore.ops.primitive import constexpr, _primexpr
|
|
22
24
|
from mindspore.common.tensor import Tensor
|
|
23
25
|
import mindspore.context as context
|
|
24
26
|
from mindspore.common import dtype as mstype
|
|
@@ -26,8 +28,10 @@ from mindspore.ops.operations.nn_ops import AdaptiveMaxPool2D
|
|
|
26
28
|
from mindspore.ops.operations.nn_ops import AdaptiveMaxPool3D, AdaptiveAvgPool3D
|
|
27
29
|
from mindspore.nn.cell import Cell
|
|
28
30
|
|
|
29
|
-
__all__ = ['AvgPool2d', 'MaxPool2d', 'AvgPool1d', 'MaxPool1d', '
|
|
30
|
-
'
|
|
31
|
+
__all__ = ['AvgPool3d', 'MaxPool3d', 'AvgPool2d', 'MaxPool2d', 'AvgPool1d', 'MaxPool1d', 'FractionalMaxPool2d',
|
|
32
|
+
'FractionalMaxPool3d', 'AdaptiveAvgPool1d', 'AdaptiveMaxPool1d', 'AdaptiveMaxPool2d', 'AdaptiveMaxPool3d',
|
|
33
|
+
'AdaptiveAvgPool2d', 'AdaptiveAvgPool3d', 'MaxUnpool1d', 'MaxUnpool2d', 'MaxUnpool3d', 'LPPool1d',
|
|
34
|
+
'LPPool2d']
|
|
31
35
|
|
|
32
36
|
|
|
33
37
|
class _PoolNd(Cell):
|
|
@@ -37,7 +41,7 @@ class _PoolNd(Cell):
|
|
|
37
41
|
"""Initialize _PoolNd."""
|
|
38
42
|
super(_PoolNd, self).__init__()
|
|
39
43
|
validator.check_value_type('pad_mode', pad_mode, [str], self.cls_name)
|
|
40
|
-
self.pad_mode = validator.check_string(pad_mode.upper(), ['VALID', 'SAME'], 'pad_mode', self.cls_name)
|
|
44
|
+
self.pad_mode = validator.check_string(pad_mode.upper(), ['VALID', 'SAME', 'PAD'], 'pad_mode', self.cls_name)
|
|
41
45
|
self.format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.cls_name)
|
|
42
46
|
if context.get_context("device_target") != "GPU" and self.format == "NHWC":
|
|
43
47
|
raise ValueError(f"For '{self.cls_name}, the 'NHWC' format only support in GPU target, but got device "
|
|
@@ -46,17 +50,17 @@ class _PoolNd(Cell):
|
|
|
46
50
|
def _check_int_or_tuple(arg_name, arg_value):
|
|
47
51
|
validator.check_value_type(arg_name, arg_value, [int, tuple], self.cls_name)
|
|
48
52
|
error_msg = f"For '{self.cls_name}', the '{arg_name}' must be an positive int number or " \
|
|
49
|
-
f"a tuple
|
|
53
|
+
f"a tuple, but got {arg_value}"
|
|
50
54
|
if isinstance(arg_value, int):
|
|
51
55
|
if arg_value <= 0:
|
|
52
56
|
raise ValueError(error_msg)
|
|
53
|
-
|
|
57
|
+
else:
|
|
54
58
|
for item in arg_value:
|
|
55
59
|
if isinstance(item, int) and item > 0:
|
|
56
60
|
continue
|
|
57
61
|
raise ValueError(error_msg)
|
|
58
|
-
|
|
59
|
-
|
|
62
|
+
if len(arg_value) == 1:
|
|
63
|
+
return arg_value[0]
|
|
60
64
|
return arg_value
|
|
61
65
|
|
|
62
66
|
self.kernel_size = _check_int_or_tuple('kernel_size', kernel_size)
|
|
@@ -69,11 +73,352 @@ class _PoolNd(Cell):
|
|
|
69
73
|
return 'kernel_size={kernel_size}, stride={stride}, pad_mode={pad_mode}'.format(**self.__dict__)
|
|
70
74
|
|
|
71
75
|
|
|
72
|
-
@
|
|
76
|
+
@_primexpr
|
|
73
77
|
def _shape_check(in_shape, prim_name=None):
|
|
74
78
|
msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
|
|
75
|
-
|
|
76
|
-
|
|
79
|
+
|
|
80
|
+
def _check():
|
|
81
|
+
if len(in_shape) != 3:
|
|
82
|
+
raise ValueError(f"{msg_prefix} input must has 3 dim, but got {len(in_shape)}")
|
|
83
|
+
|
|
84
|
+
_check()
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class LPPool1d(Cell):
|
|
88
|
+
r"""
|
|
89
|
+
Applying 1D LPPooling operation on an input Tensor can be regarded as forming a 1D input plane.
|
|
90
|
+
|
|
91
|
+
Typically the input is of shape :math:`(N_{in}, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})``, the output is of
|
|
92
|
+
shape :math:`(N_{out}, C_{out}, L_{out})` or :math:`(C_{out}, L_{out})`, with the same shape as input,
|
|
93
|
+
the operation is as follows.
|
|
94
|
+
|
|
95
|
+
.. math::
|
|
96
|
+
f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
norm_type (Union[int, float]): Type of normalization, represents p in the formula, can not be 0.
|
|
100
|
+
|
|
101
|
+
- if p = 1, the result is the sum of the elements within the pooling kernel(proportional to average
|
|
102
|
+
pooling).
|
|
103
|
+
- if p = :math:`\infty`, the result is the result of maximum pooling.
|
|
104
|
+
|
|
105
|
+
kernel_size (int): The size of kernel window.
|
|
106
|
+
stride (int): The distance of kernel moving, an int number that represents
|
|
107
|
+
the width of movement is stride, if the value is None, the default value `kernel_size` is used;
|
|
108
|
+
ceil_mode (bool): Whether to use ceil or floor to calculate output shape. Default: False.
|
|
109
|
+
|
|
110
|
+
Inputs:
|
|
111
|
+
- **x** (Tensor) - Tensor of shape :math:`(N_{in}, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`.
|
|
112
|
+
|
|
113
|
+
Outputs:
|
|
114
|
+
- **output** (Tensor) - LPPool1d result, with shape :math:`(N_{out}, C_{out}, L_{out})` or
|
|
115
|
+
:math:`(C_{out}, L_{out})`, it has the same data type as `x`, where
|
|
116
|
+
|
|
117
|
+
.. math::
|
|
118
|
+
L_{out} = \left\lfloor\frac{L_{in} - \text{kernel_size}}{\text{stride}} + 1\right\rfloor
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
Raises:
|
|
122
|
+
TypeError: If `x` is not an Tensor.
|
|
123
|
+
TypeError: If `kernel_size` or `stride` is not an int.
|
|
124
|
+
TypeError: If `ceil_mode` is not a bool.
|
|
125
|
+
TypeError: If `norm_type` is neither float nor int.
|
|
126
|
+
ValueError: If `norm_type` is equal to 0.
|
|
127
|
+
ValueError: If `kernel_size` or `stride` is less than 1.
|
|
128
|
+
ValueError: If length of shape of `x` is not equal to 2 or 3.
|
|
129
|
+
|
|
130
|
+
Supported Platforms:
|
|
131
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
132
|
+
|
|
133
|
+
Examples:
|
|
134
|
+
>>> import mindspore as ms
|
|
135
|
+
>>> import mindspore.nn as nn
|
|
136
|
+
>>> from mindspore import Tensor
|
|
137
|
+
>>> import numpy as np
|
|
138
|
+
>>> a = Tensor(np.arange(2 * 3 * 4).reshape((2, 3, 4)), dtype=ms.float32)
|
|
139
|
+
>>> net = nn.LPPool1d(norm_type=1, kernel_size=3, stride=1)
|
|
140
|
+
>>> out = net(a)
|
|
141
|
+
>>> print(out)
|
|
142
|
+
[[[ 3. 6.]
|
|
143
|
+
[15. 18.]
|
|
144
|
+
[27. 30.]]
|
|
145
|
+
[[39. 42.]
|
|
146
|
+
[51. 54.]
|
|
147
|
+
[63. 66.]]]
|
|
148
|
+
"""
|
|
149
|
+
|
|
150
|
+
def __init__(self, norm_type, kernel_size, stride=None, ceil_mode=False):
|
|
151
|
+
super(LPPool1d, self).__init__()
|
|
152
|
+
self.norm_type = norm_type
|
|
153
|
+
self.kernel_size = kernel_size
|
|
154
|
+
self.stride = stride
|
|
155
|
+
self.ceil_mode = ceil_mode
|
|
156
|
+
|
|
157
|
+
def construct(self, x):
|
|
158
|
+
return ops.lp_pool1d(x, self.norm_type, self.kernel_size,
|
|
159
|
+
self.stride, self.ceil_mode)
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
class LPPool2d(Cell):
|
|
163
|
+
r"""
|
|
164
|
+
Applying 2D LPPooling operation on an input Tensor can be regarded as forming a 1D input plane.
|
|
165
|
+
|
|
166
|
+
Typically the input is of shape :math:`(N, C, H_{in}, W_{in})`, the output is of shape
|
|
167
|
+
:math:`(N, C, H_{in}, W_{in})`, with the same shape as input, the operation is as follows.
|
|
168
|
+
|
|
169
|
+
.. math::
|
|
170
|
+
f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
|
|
171
|
+
|
|
172
|
+
Args:
|
|
173
|
+
norm_type(Union[int, float]) - Type of normalization, represents p in the formula, can not be 0.
|
|
174
|
+
|
|
175
|
+
- if p = 1, the result is the sum of the elements within the pooling kernel(proportional to average
|
|
176
|
+
pooling).
|
|
177
|
+
- if p = :math:`\infty`, the result is the result of maximum pooling.
|
|
178
|
+
|
|
179
|
+
kernel_size(Union[int, tuple[int]]): The size of kernel window.
|
|
180
|
+
The data type of kernel_size must be int and the value represents the height and width,
|
|
181
|
+
or a tuple of two int numbers that represent height and width respectively.
|
|
182
|
+
stride(Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
|
|
183
|
+
the height and width of movement are both stride, or a tuple of two int numbers that
|
|
184
|
+
represent height and width of movement respectively, if the value is None,
|
|
185
|
+
the default value `kernel_size` is used;
|
|
186
|
+
ceil_mode(bool): Whether to use ceil or floor to calculate output shape. Default: False.
|
|
187
|
+
|
|
188
|
+
Inputs:
|
|
189
|
+
- **x** (Tensor) - Tensor of shape :math:`(N, C, H_{in}, W_{in})`.
|
|
190
|
+
|
|
191
|
+
Outputs:
|
|
192
|
+
- **output** (Tensor) - LPPool2d result, with shape :math:`(N, C, H_{in}, W_{in})`,
|
|
193
|
+
It has the same data type as `x`, where
|
|
194
|
+
|
|
195
|
+
.. math::
|
|
196
|
+
H_{out} = \left\lfloor\frac{H_{in} - \text{kernel_size}[0]}{\text{stride}[0]} + 1\right\rfloor
|
|
197
|
+
|
|
198
|
+
.. math::
|
|
199
|
+
W_{out} = \left\lfloor\frac{W_{in} - \text{kernel_size}[1]}{\text{stride}[1]} + 1\right\rfloor
|
|
200
|
+
|
|
201
|
+
Raises:
|
|
202
|
+
TypeError: If `x` is not an Tensor.
|
|
203
|
+
TypeError: If `kernel_size` or `stride` is neither int nor tuple.
|
|
204
|
+
TypeError: If `ceil_mode` is not a bool.
|
|
205
|
+
TypeError: If `norm_type` is neither float nor int.
|
|
206
|
+
ValueError: If `norm_type` is equal to 0.
|
|
207
|
+
ValueError: If `kernel_size` or `stride` is less than 1.
|
|
208
|
+
ValueError: If `kernel_size` or `stride` is a tuple whose length is not equal to `2`.
|
|
209
|
+
ValueError: If length of shape of `x` is not equal to 4.
|
|
210
|
+
|
|
211
|
+
Supported Platforms:
|
|
212
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
213
|
+
|
|
214
|
+
Examples:
|
|
215
|
+
>>> import mindspore as ms
|
|
216
|
+
>>> import mindspore.nn as nn
|
|
217
|
+
>>> from mindspore import Tensor
|
|
218
|
+
>>> import numpy as np
|
|
219
|
+
>>> a = Tensor(np.arange(2 * 3 * 4 * 5).reshape((2, 3, 4, 5)), dtype=ms.float32)
|
|
220
|
+
>>> net = nn.LPPool2d(norm_type=1, kernel_size=3, stride=1)
|
|
221
|
+
>>> out = net(a)
|
|
222
|
+
>>> print(out)
|
|
223
|
+
[[[[ 54. 63. 72.]
|
|
224
|
+
[ 99. 108. 117.]]
|
|
225
|
+
[[ 234. 243. 252.]
|
|
226
|
+
[ 279. 288. 297.]]
|
|
227
|
+
[[ 414. 423. 432.]
|
|
228
|
+
[ 459. 468. 477.]]]
|
|
229
|
+
[[[ 594. 603. 612.]
|
|
230
|
+
[ 639. 648. 657.]]
|
|
231
|
+
[[ 774. 783. 792.]
|
|
232
|
+
[ 819. 828. 837.]]
|
|
233
|
+
[[ 954. 963. 972.]
|
|
234
|
+
[ 999. 1008. 1017.]]]]
|
|
235
|
+
"""
|
|
236
|
+
|
|
237
|
+
def __init__(self, norm_type, kernel_size, stride=None, ceil_mode=False):
|
|
238
|
+
super(LPPool2d, self).__init__()
|
|
239
|
+
self.norm_type = norm_type
|
|
240
|
+
self.kernel_size = kernel_size
|
|
241
|
+
self.stride = stride
|
|
242
|
+
self.ceil_mode = ceil_mode
|
|
243
|
+
|
|
244
|
+
def construct(self, x):
|
|
245
|
+
return ops.lp_pool2d(x, self.norm_type, self.kernel_size,
|
|
246
|
+
self.stride, self.ceil_mode)
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
def _check_maxpool_padding(padding, nd, cls_name):
|
|
250
|
+
"""Calculate maxpool padding before call primitive"""
|
|
251
|
+
validator.check_value_type('padding', padding, (int, tuple, list), cls_name)
|
|
252
|
+
if isinstance(padding, int):
|
|
253
|
+
return (0,) * (3 - nd) + (padding,) * nd
|
|
254
|
+
if isinstance(padding, (tuple, list)):
|
|
255
|
+
validator.check_non_negative_int_sequence(padding, "padding", cls_name)
|
|
256
|
+
if len(padding) == 1:
|
|
257
|
+
return (0,) * (3 - nd) + tuple(padding * nd)
|
|
258
|
+
if len(padding) != nd:
|
|
259
|
+
raise ValueError(f"For {cls_name}, the length of padding must equal to {nd}, but got {len(padding)}.")
|
|
260
|
+
return (0,) * (3 - nd) + tuple(padding)
|
|
261
|
+
return padding
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
def _cal_dilation(dilation, nd, cls_name):
|
|
265
|
+
"""check the dilation"""
|
|
266
|
+
if isinstance(dilation, int):
|
|
267
|
+
return dilation
|
|
268
|
+
if isinstance(dilation, tuple):
|
|
269
|
+
if len(dilation) == 1:
|
|
270
|
+
return dilation[0]
|
|
271
|
+
if len(dilation) == nd:
|
|
272
|
+
return (3 - nd) * (1,) + dilation
|
|
273
|
+
if nd == 1:
|
|
274
|
+
raise ValueError(f"For {cls_name}, the length of 'dilation' must be 1, but got {len(dilation)}.")
|
|
275
|
+
raise ValueError(f"For {cls_name}, the length of 'dilation' must be 1 or {nd}, but got {len(dilation)}.")
|
|
276
|
+
raise ValueError(f"For {cls_name}, the 'dilation' must be int or tuple, but got {type(dilation)}.")
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
class MaxPool3d(_PoolNd):
|
|
280
|
+
r"""
|
|
281
|
+
3D max pooling operation.
|
|
282
|
+
|
|
283
|
+
Applies a 3D max pooling over an input Tensor which can be regarded as a composition of 3D planes.
|
|
284
|
+
|
|
285
|
+
Typically the input is of shape :math:`(N_{in}, C_{in}, D_{in}, H_{in}, W_{in})`, MaxPool outputs
|
|
286
|
+
regional maximum in the :math:`(D_{in}, H_{in}, W_{in})`-dimension. Given kernel size is
|
|
287
|
+
:math:`ks = (d_{ker}, h_{ker}, w_{ker})` and stride is :math:`s = (s_0, s_1, s_2)`, the operation is as follows.
|
|
288
|
+
|
|
289
|
+
.. math::
|
|
290
|
+
\text{output}(N_i, C_j, d, h, w) =
|
|
291
|
+
\max_{l=0, \ldots, d_{ker}-1} \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
|
|
292
|
+
\text{input}(N_i, C_j, s_0 \times d + l, s_1 \times h + m, s_2 \times w + n)
|
|
293
|
+
|
|
294
|
+
Args:
|
|
295
|
+
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
|
|
296
|
+
is an int number or a single element tuple that represents depth, height and width of the kernel, or a tuple
|
|
297
|
+
of three int numbers that represent depth, height and width respectively.
|
|
298
|
+
The value must be a positive integer. Default: 1.
|
|
299
|
+
stride (Union[int, tuple[int]]): The moving stride of pooling operation, an int number or a single element tuple
|
|
300
|
+
that represents the moving stride of pooling kernel in the directions of depth, height and the width,
|
|
301
|
+
or a tuple of three int numbers that represent depth, height and width of movement respectively.
|
|
302
|
+
The value must be a positive integer. If the value is None, the default value `kernel_size` is used.
|
|
303
|
+
Default: 1.
|
|
304
|
+
pad_mode (str): The optional value for pad mode, is "same", "valid" or "pad", not case sensitive.
|
|
305
|
+
Default: "valid".
|
|
306
|
+
|
|
307
|
+
- same: The output shape is the same as the input shape evenly divided by `stride`.
|
|
308
|
+
|
|
309
|
+
- valid: The possible largest height and width of output
|
|
310
|
+
will be returned without padding. Extra pixels will be discarded.
|
|
311
|
+
|
|
312
|
+
- pad: pads the input. Pads the top, bottom, left, and right sides of the input with `padding` number of
|
|
313
|
+
zeros. If this mode is set, `padding` must be greater than or equal to 0.
|
|
314
|
+
|
|
315
|
+
padding (Union(int, tuple[int], list[int])): Pooling padding value. Default: 0.
|
|
316
|
+
`padding` can only be an integer or a tuple/list containing one or three integers.
|
|
317
|
+
If `padding` is an integer or a tuple/list containing one integer, it will be padded in six directions of
|
|
318
|
+
front, back, top, bottom, left and right of the input. If `padding` is a tuple/list containing three
|
|
319
|
+
integers, it will be padded in front and back of the input `padding[0]` times, up and down `padding[1]`
|
|
320
|
+
times, and left and right of the input `padding[2]` times.
|
|
321
|
+
dilation (Union(int, tuple[int])): The spacing between the elements of the kernel in convolution,
|
|
322
|
+
used to increase the receptive field of the pooling operation. If it is a tuple, it must contain one or
|
|
323
|
+
three integers. Default: 1.
|
|
324
|
+
return_indices (bool): If True, output is a Tuple of 2 Tensors, representing the maxpool result and where
|
|
325
|
+
the max values are generated. Otherwise, only the maxpool result is returned. Default: False.
|
|
326
|
+
ceil_mode (bool): Whether to use ceil or floor to calculate output shape. Default: False.
|
|
327
|
+
|
|
328
|
+
Inputs:
|
|
329
|
+
- **x** (Tensor) - Tensor of shape :math:`(N_{in}, C_{in}, D_{in}, H_{in}, W_{in})` or
|
|
330
|
+
:math:`(C_{in}, D_{in}, H_{in}, W_{in})`.
|
|
331
|
+
|
|
332
|
+
Outputs:
|
|
333
|
+
If `return_indices` is False, output is a Tensor, with shape
|
|
334
|
+
:math:`(N_{out}, C_{out}, D_{out}, H_{out}, W_{out})` or :math:`(C_{out}, D_{out}, H_{out}, W_{out})`.
|
|
335
|
+
It has the same data type as `x`.
|
|
336
|
+
|
|
337
|
+
If `return_indices` is True, output is a Tuple of 2 Tensors, representing the maxpool result and where
|
|
338
|
+
the max values are generated.
|
|
339
|
+
|
|
340
|
+
- **output** (Tensor) - Maxpooling result, with shape :math:`(N_{out}, C_{out}, D_{out}, H_{out}, W_{out})` or
|
|
341
|
+
:math:`(C_{out}, D_{out}, H_{out}, W_{out})`. It has the same data type as `x`.
|
|
342
|
+
- **argmax** (Tensor) - Index corresponding to the maximum value. Data type is int64.
|
|
343
|
+
|
|
344
|
+
If `pad_mode` is in `pad` mode, the output shape calculation formula is as follows:
|
|
345
|
+
|
|
346
|
+
.. math::
|
|
347
|
+
D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] \times
|
|
348
|
+
(\text{kernel_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
|
|
349
|
+
|
|
350
|
+
.. math::
|
|
351
|
+
H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] \times
|
|
352
|
+
(\text{kernel_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
|
|
353
|
+
|
|
354
|
+
.. math::
|
|
355
|
+
W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2] \times
|
|
356
|
+
(\text{kernel_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor
|
|
357
|
+
|
|
358
|
+
Raises:
|
|
359
|
+
ValueError: If length of shape of `x` is not equal to 4 or 5.
|
|
360
|
+
TypeError: If `kernel_size` , `stride` , `padding` or `dilation` is neither an int nor a tuple.
|
|
361
|
+
ValueError: If `kernel_size` or `stride` is less than 1.
|
|
362
|
+
ValueError: If the `padding` parameter is neither an integer nor a tuple of length 3.
|
|
363
|
+
ValueError: If `pad_mode` is not set to 'pad', setting return_indices to True or dilation to a value
|
|
364
|
+
other than 1.
|
|
365
|
+
ValueError: If `padding` is non-zero when `pad_mode` is not 'pad'.
|
|
366
|
+
|
|
367
|
+
Supported Platforms:
|
|
368
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
369
|
+
|
|
370
|
+
Examples:
|
|
371
|
+
>>> import mindspore as ms
|
|
372
|
+
>>> import mindspore.nn as nn
|
|
373
|
+
>>> import numpy as np
|
|
374
|
+
>>> np_x = np.random.randint(0, 10, [5, 3, 4, 6, 7])
|
|
375
|
+
>>> x = Tensor(np_x, ms.float32)
|
|
376
|
+
>>> pool1 = nn.MaxPool3d(kernel_size=2, stride=1, pad_mode='pad', padding=1, dilation=3, return_indices=True)
|
|
377
|
+
>>> output = pool1(x)
|
|
378
|
+
>>> print(output[0].shape)
|
|
379
|
+
(5, 3, 3, 5, 6)
|
|
380
|
+
>>> print(output[1].shape)
|
|
381
|
+
(5, 3, 3, 5, 6)
|
|
382
|
+
>>> pool2 = nn.MaxPool3d(kernel_size=2, stride=1, pad_mode='pad', padding=1, dilation=3, return_indices=False)
|
|
383
|
+
>>> output2 = pool2(x)
|
|
384
|
+
>>> print(output2.shape)
|
|
385
|
+
(5, 3, 3, 5, 6)
|
|
386
|
+
"""
|
|
387
|
+
|
|
388
|
+
def __init__(self, kernel_size=1, stride=1, pad_mode="valid", padding=0, dilation=1, return_indices=False,
|
|
389
|
+
ceil_mode=False):
|
|
390
|
+
"""Initialize MaxPool3d."""
|
|
391
|
+
super(MaxPool3d, self).__init__(kernel_size, stride, pad_mode)
|
|
392
|
+
self.return_indices = return_indices
|
|
393
|
+
padding = _check_maxpool_padding(padding, 3, self.cls_name)
|
|
394
|
+
_check_3d_int_or_tuple("padding", padding, self.cls_name, greater_zero=False, ret_five=False)
|
|
395
|
+
if dilation != 1 or return_indices:
|
|
396
|
+
self.only_pad = True
|
|
397
|
+
if pad_mode.upper() != "PAD":
|
|
398
|
+
raise ValueError(f"For {self.cls_name}, the pad_mode must be 'pad' when dilation is not 1 "
|
|
399
|
+
f"or return_indices is True, but got pad_mode:{pad_mode}.")
|
|
400
|
+
self.max_pool = P.MaxPool3DWithArgmax(ksize=kernel_size, strides=stride, pads=padding,
|
|
401
|
+
dilation=dilation, ceil_mode=ceil_mode)
|
|
402
|
+
else:
|
|
403
|
+
self.only_pad = False
|
|
404
|
+
ceil_mode = None if not ceil_mode else True
|
|
405
|
+
self.max_pool = P.MaxPool3D(kernel_size=kernel_size, strides=stride, pad_mode=pad_mode, pad_list=padding,
|
|
406
|
+
ceil_mode=ceil_mode)
|
|
407
|
+
|
|
408
|
+
def construct(self, x):
|
|
409
|
+
expand_batch = False
|
|
410
|
+
if x.ndim == 4:
|
|
411
|
+
x = x.unsqueeze(0)
|
|
412
|
+
expand_batch = True
|
|
413
|
+
out = self.max_pool(x)
|
|
414
|
+
if expand_batch:
|
|
415
|
+
if isinstance(out, tuple):
|
|
416
|
+
out = (out[0].squeeze(0), out[1].squeeze(0))
|
|
417
|
+
else:
|
|
418
|
+
out = out.squeeze(0)
|
|
419
|
+
if self.only_pad and not self.return_indices:
|
|
420
|
+
return out[0]
|
|
421
|
+
return out
|
|
77
422
|
|
|
78
423
|
|
|
79
424
|
class MaxPool2d(_PoolNd):
|
|
@@ -82,48 +427,82 @@ class MaxPool2d(_PoolNd):
|
|
|
82
427
|
|
|
83
428
|
Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool2d outputs
|
|
84
429
|
regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
|
|
85
|
-
:math:`
|
|
430
|
+
:math:`(h_{ker}, w_{ker})` and stride :math:`(s_0, s_1)`, the operation is as follows.
|
|
86
431
|
|
|
87
432
|
.. math::
|
|
88
433
|
\text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
|
|
89
434
|
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
|
|
90
435
|
|
|
91
|
-
Note:
|
|
92
|
-
pad_mode for training only supports "same" and "valid".
|
|
93
|
-
|
|
94
436
|
Args:
|
|
95
437
|
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the max value,
|
|
96
|
-
is an int number that represents height and width are both kernel_size,
|
|
438
|
+
is an int number or a single element tuple that represents height and width are both kernel_size,
|
|
97
439
|
or a tuple of two int numbers that represent height and width respectively.
|
|
98
440
|
Default: 1.
|
|
99
|
-
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that
|
|
100
|
-
the height and width of movement are both
|
|
441
|
+
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number or a single element tuple that
|
|
442
|
+
represents the height and width of movement are both stride, or a tuple of two int numbers that
|
|
101
443
|
represent height and width of movement respectively. Default: 1.
|
|
102
|
-
pad_mode (str): The optional value for pad mode, is "same" or "
|
|
444
|
+
pad_mode (str): The optional value for pad mode, is "same", "valid" or "pad", not case sensitive.
|
|
103
445
|
Default: "valid".
|
|
104
446
|
|
|
105
|
-
- same:
|
|
106
|
-
the input. The total number of padding will be calculated in horizontal and vertical
|
|
107
|
-
directions and evenly distributed to top and bottom, left and right if possible.
|
|
108
|
-
Otherwise, the last extra padding will be done from the bottom and the right side.
|
|
447
|
+
- same: The output shape is the same as the input shape evenly divided by `stride`.
|
|
109
448
|
|
|
110
|
-
- valid:
|
|
449
|
+
- valid: The possible largest height and width of output
|
|
111
450
|
will be returned without padding. Extra pixels will be discarded.
|
|
451
|
+
|
|
452
|
+
- pad: pads the input. Pads the top, bottom, left, and right sides of the input with `padding` number of
|
|
453
|
+
zeros. If this mode is set, `padding` must be greater than or equal to 0.
|
|
454
|
+
|
|
455
|
+
padding (Union(int, tuple[int], list[int])): Specifies the padding value of the pooling operation. Default: 0.
|
|
456
|
+
`padding` can only be an integer or a tuple/list containing one or two integers. If `padding` is an integer
|
|
457
|
+
or a tuple/list containing one integer, it will be padded `padding` times in the four directions of the
|
|
458
|
+
input. If `padding` is a tuple/list containing two integers, it will be padded `padding[0]` times in the
|
|
459
|
+
up-down direction of the input and `padding[1]` times in the left-right direction of the input.
|
|
460
|
+
dilation (Union(int, tuple[int])): The spacing between the elements of the kernel in convolution,
|
|
461
|
+
used to increase the receptive field of the pooling operation. If it is a tuple, it must contain one or two
|
|
462
|
+
integers. Default: 1.
|
|
463
|
+
return_indices (bool): If True, the function will return both the result of max pooling and the indices of the
|
|
464
|
+
max elements. Default: False.
|
|
465
|
+
ceil_mode (bool): If True, use ceil to compute the output shape instead of floor. Default: False.
|
|
112
466
|
data_format (str): The optional value for data format, is 'NHWC' or 'NCHW'.
|
|
113
467
|
Default: 'NCHW'.
|
|
114
468
|
|
|
115
469
|
Inputs:
|
|
116
|
-
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in},
|
|
470
|
+
- **x** (Tensor) - Tensor of shape :math:`(N,C_{in},H_{in},W_{in})` or :math:`(C_{in},H_{in},W_{in})`.
|
|
117
471
|
|
|
118
472
|
Outputs:
|
|
119
|
-
Tensor
|
|
473
|
+
If `return_indices` is False, output is a Tensor, with shape :math:`(N, C, H_{out}, W_{out})` or
|
|
474
|
+
:math:`(C_{out}, H_{out}, W_{out})`. It has the same data type as `x`.
|
|
475
|
+
|
|
476
|
+
If `return_indices` is True, output is a Tuple of 2 Tensors, representing the maxpool result and where
|
|
477
|
+
the max values are generated.
|
|
478
|
+
|
|
479
|
+
- **output** (Tensor) - Maxpooling result, with shape :math:`(N_{out}, C_{out}, H_{out}, W_{out})` or
|
|
480
|
+
:math:`(C_{out}, H_{out}, W_{out})`. It has the same data type as `x`.
|
|
481
|
+
- **argmax** (Tensor) - Index corresponding to the maximum value. Data type is int64.
|
|
482
|
+
|
|
483
|
+
If `pad_mode` is in `pad` mode, the output shape calculation formula is as follows:
|
|
484
|
+
|
|
485
|
+
.. math::
|
|
486
|
+
H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding[0]} - \text{dilation[0]}
|
|
487
|
+
\times (\text{kernel_size[0]} - 1) - 1}{\text{stride[0]}} + 1\right\rfloor
|
|
488
|
+
|
|
489
|
+
.. math::
|
|
490
|
+
W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding[1]} - \text{dilation[1]}
|
|
491
|
+
\times (\text{kernel_size[1]} - 1) - 1}{\text{stride[1]}} + 1\right\rfloor
|
|
120
492
|
|
|
121
493
|
Raises:
|
|
122
|
-
TypeError: If `kernel_size` or `
|
|
494
|
+
TypeError: If `kernel_size` or `stride` is neither int nor tuple.
|
|
123
495
|
ValueError: If `pad_mode` is neither 'valid' nor 'same' with not case sensitive.
|
|
124
496
|
ValueError: If `data_format` is neither 'NCHW' nor 'NHWC'.
|
|
125
|
-
ValueError: If `kernel_size` or `
|
|
126
|
-
ValueError: If length of shape of `x` is not equal to 4.
|
|
497
|
+
ValueError: If `kernel_size` or `stride` is less than 1.
|
|
498
|
+
ValueError: If length of shape of `x` is not equal to 3 or 4.
|
|
499
|
+
ValueError: If `pad_mode` is not 'pad', `padding`, `dilation`, `return_indices`, `ceil_mode` parameters are not
|
|
500
|
+
set to their default values.
|
|
501
|
+
ValueError: If the length of the tuple/list `padding` parameter is not 2.
|
|
502
|
+
ValueError: If The length of the tuple dilation parameter is not 2.
|
|
503
|
+
ValueError: If dilation parameter is neither an integer nor a tuple.
|
|
504
|
+
ValueError: If `pad_mode` is 'pad' and `data_format` is 'NHWC'.
|
|
505
|
+
ValueError: If `padding` is non-zero when `pad_mode` is not 'pad'.
|
|
127
506
|
|
|
128
507
|
Supported Platforms:
|
|
129
508
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -134,18 +513,71 @@ class MaxPool2d(_PoolNd):
|
|
|
134
513
|
>>> output = pool(x)
|
|
135
514
|
>>> print(output.shape)
|
|
136
515
|
(1, 2, 2, 2)
|
|
516
|
+
>>> np_x = np.random.randint(0, 10, [5, 3, 4, 5])
|
|
517
|
+
>>> x = Tensor(np_x, mindspore.float32)
|
|
518
|
+
>>> pool2 = nn.MaxPool2d(kernel_size=2, stride=1, pad_mode='pad', padding=1, dilation=1, return_indices=True)
|
|
519
|
+
>>> output = pool2(x)
|
|
520
|
+
>>> print(output[0].shape)
|
|
521
|
+
(5, 3, 5, 6)
|
|
522
|
+
>>> print(output[1].shape)
|
|
523
|
+
(5, 3, 5, 6)
|
|
137
524
|
"""
|
|
138
525
|
|
|
139
|
-
def __init__(self, kernel_size=1, stride=1, pad_mode="valid",
|
|
526
|
+
def __init__(self, kernel_size=1, stride=1, pad_mode="valid", padding=0, dilation=1, return_indices=False,
|
|
527
|
+
ceil_mode=False, data_format="NCHW"):
|
|
140
528
|
"""Initialize MaxPool2d."""
|
|
141
529
|
super(MaxPool2d, self).__init__(kernel_size, stride, pad_mode, data_format)
|
|
142
|
-
self.
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
530
|
+
self.return_indices = return_indices
|
|
531
|
+
if pad_mode.upper() == 'PAD':
|
|
532
|
+
if self.format == "NHWC":
|
|
533
|
+
raise ValueError(f"For '{self.cls_name}, the 'NHWC' format are not support when 'pad_mode' is 'pad'.")
|
|
534
|
+
self.use_pad = True
|
|
535
|
+
if isinstance(self.kernel_size, tuple):
|
|
536
|
+
_check_tuple_length(self.kernel_size, 'kernel_size', 2, self.cls_name)
|
|
537
|
+
kernel_size = (1,) + self.kernel_size
|
|
538
|
+
elif isinstance(self.kernel_size, int):
|
|
539
|
+
kernel_size = (1, self.kernel_size, self.kernel_size)
|
|
540
|
+
if isinstance(self.stride, tuple):
|
|
541
|
+
_check_tuple_length(self.stride, 'stride', 2, self.cls_name)
|
|
542
|
+
stride = (1,) + self.stride
|
|
543
|
+
elif isinstance(self.stride, int):
|
|
544
|
+
stride = (1, self.stride, self.stride)
|
|
545
|
+
self.padding = _check_maxpool_padding(padding, 2, self.cls_name)
|
|
546
|
+
dilation = _cal_dilation(dilation, 2, self.cls_name)
|
|
547
|
+
self.max_pool = P.MaxPool3DWithArgmax(ksize=kernel_size, strides=stride, pads=self.padding,
|
|
548
|
+
dilation=dilation, ceil_mode=ceil_mode)
|
|
549
|
+
else:
|
|
550
|
+
self.use_pad = False
|
|
551
|
+
if padding != 0 or dilation != 1 or return_indices or ceil_mode:
|
|
552
|
+
raise ValueError(f"For MaxPool1d, the parameter 'padding', 'dilation', 'return_indices', 'ceil_mode' "
|
|
553
|
+
f"can not be set to non-default value when pad_mode is not 'pad', "
|
|
554
|
+
f"but got pad_mode:{pad_mode}.")
|
|
555
|
+
self.max_pool = P.MaxPool(kernel_size=self.kernel_size,
|
|
556
|
+
strides=self.stride,
|
|
557
|
+
pad_mode=self.pad_mode,
|
|
558
|
+
data_format=self.format)
|
|
146
559
|
|
|
147
560
|
def construct(self, x):
|
|
148
|
-
|
|
561
|
+
expand_batch = False
|
|
562
|
+
if x.ndim == 3:
|
|
563
|
+
x = x.unsqueeze(0)
|
|
564
|
+
expand_batch = True
|
|
565
|
+
if self.use_pad:
|
|
566
|
+
x = x.unsqueeze(2)
|
|
567
|
+
out = self.max_pool(x)
|
|
568
|
+
if isinstance(out, tuple):
|
|
569
|
+
out = out[0].squeeze(2), out[1].squeeze(2)
|
|
570
|
+
else:
|
|
571
|
+
out = out.squeeze(2)
|
|
572
|
+
else:
|
|
573
|
+
out = self.max_pool(x)
|
|
574
|
+
if expand_batch:
|
|
575
|
+
if isinstance(out, tuple):
|
|
576
|
+
out = (out[0].squeeze(0), out[1].squeeze(0))
|
|
577
|
+
else:
|
|
578
|
+
out = out.squeeze(0)
|
|
579
|
+
if self.use_pad and not self.return_indices:
|
|
580
|
+
return out[0]
|
|
149
581
|
return out
|
|
150
582
|
|
|
151
583
|
|
|
@@ -154,21 +586,18 @@ class MaxPool1d(_PoolNd):
|
|
|
154
586
|
Applies a 1D max pooling over an input Tensor which can be regarded as a composition of 1D planes.
|
|
155
587
|
|
|
156
588
|
Typically the input is of shape :math:`(N_{in}, C_{in}, L_{in})`, MaxPool1d outputs
|
|
157
|
-
regional maximum in the :math:`(L_{in})`-dimension. Given kernel size
|
|
158
|
-
:math:`ks = (l_{ker})` and stride :math:`s = (s_0)`, the operation is as follows:
|
|
589
|
+
regional maximum in the :math:`(L_{in})`-dimension. Given `kernel size`
|
|
590
|
+
:math:`ks = (l_{ker})` and `stride` :math:`s = (s_0)`, the operation is as follows:
|
|
159
591
|
|
|
160
592
|
.. math::
|
|
161
593
|
\text{output}(N_i, C_j, l) = \max_{n=0, \ldots, l_{ker}-1}
|
|
162
594
|
\text{input}(N_i, C_j, s_0 \times l + n)
|
|
163
595
|
|
|
164
|
-
Note:
|
|
165
|
-
pad_mode for training only supports "same" and "valid".
|
|
166
|
-
|
|
167
596
|
Args:
|
|
168
597
|
kernel_size (int): The size of kernel used to take the max value, Default: 1.
|
|
169
598
|
stride (int): The distance of kernel moving, an int number that represents
|
|
170
599
|
the width of movement is stride, Default: 1.
|
|
171
|
-
pad_mode (str): The optional value for pad mode, is "same" or "
|
|
600
|
+
pad_mode (str): The optional value for pad mode, is "same", "valid" or "pad", not case sensitive.
|
|
172
601
|
Default: "valid".
|
|
173
602
|
|
|
174
603
|
- same: Adopts the way of completion. The total number of padding will be calculated in horizontal
|
|
@@ -178,58 +607,288 @@ class MaxPool1d(_PoolNd):
|
|
|
178
607
|
- valid: Adopts the way of discarding. The possible largest height and width of output
|
|
179
608
|
will be returned without padding. Extra pixels will be discarded.
|
|
180
609
|
|
|
610
|
+
- pad: Performs padding on the input. Adds padding size of zeros to both ends of the input.
|
|
611
|
+
If this mode is set, padding must be greater than or equal to 0.
|
|
612
|
+
|
|
613
|
+
padding (Union(int, tuple[int], list[int])): Padding value for the pooling. Default value is 0.
|
|
614
|
+
padding can only be an integer or a tuple/list containing a single integer, in which case padding times or
|
|
615
|
+
padding[0] times are padded on both sides of the input.
|
|
616
|
+
dilation (Union(int, tuple[int])): The spacing between the elements of the kernel in convolution,
|
|
617
|
+
used to increase the receptive field of the pooling operation. If it is a tuple, its length can only be 1.
|
|
618
|
+
Default: 1.
|
|
619
|
+
return_indices (bool): If True, the function will return both the result of max pooling and the indices of the
|
|
620
|
+
max elements. Default: False.
|
|
621
|
+
ceil_mode (bool): If True, use ceil to compute the output shape instead of floor. Default: False.
|
|
622
|
+
|
|
181
623
|
Inputs:
|
|
182
|
-
- **x** (Tensor) - Tensor of shape :math:`(N,
|
|
624
|
+
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`.
|
|
183
625
|
|
|
184
626
|
Outputs:
|
|
185
|
-
Tensor
|
|
627
|
+
If `return_indices` is False, output is a Tensor, with shape :math:`(N, C_{out}, L_{out})` or
|
|
628
|
+
:math:`(C_{out}, L_{out})`. It has the same data type as `x`.
|
|
629
|
+
|
|
630
|
+
If `return_indices` is True, output is a Tuple of 2 Tensors, representing the maxpool result and where
|
|
631
|
+
the max values are generated.
|
|
632
|
+
|
|
633
|
+
- **output** (Tensor) - Maxpooling result, with shape :math:`(N, C_{out}, L_{out})` or
|
|
634
|
+
:math:`(C_{out}, L_{out})`. It has the same data type as `x`.
|
|
635
|
+
- **argmax** (Tensor) - Index corresponding to the maximum value. Data type is int64.
|
|
636
|
+
|
|
637
|
+
If `pad_mode` is in `pad` mode, the output shape calculation formula is as follows:
|
|
638
|
+
|
|
639
|
+
.. math::
|
|
640
|
+
L_{out} = \left\lfloor \frac{L_{in} + 2 \times \text{padding} - \text{dilation}
|
|
641
|
+
\times (\text{kernel_size} - 1) - 1}{\text{stride}} + 1\right\rfloor
|
|
186
642
|
|
|
187
643
|
Raises:
|
|
188
644
|
TypeError: If `kernel_size` or `strides` is not an int.
|
|
189
|
-
ValueError: If `pad_mode` is
|
|
645
|
+
ValueError: If `pad_mode` is not 'valid', 'same' or 'pad', case-insensitive.
|
|
190
646
|
ValueError: If `data_format` is neither 'NCHW' nor 'NHWC'.
|
|
191
647
|
ValueError: If `kernel_size` or `strides` is less than 1.
|
|
192
|
-
ValueError: If length of shape of `x` is not equal to
|
|
648
|
+
ValueError: If length of shape of `x` is not equal to 2 or 3.
|
|
649
|
+
ValueError: If `pad_mode` is not 'pad', `padding`, `dilation`, `return_indices`, `ceil_mode` parameters are not
|
|
650
|
+
set to their default values.
|
|
651
|
+
ValueError: If the length of the tuple/list `padding` parameter is not 1.
|
|
652
|
+
ValueError: If The length of the tuple dilation parameter is not 1.
|
|
653
|
+
ValueError: If dilation parameter is neither an integer nor a tuple.
|
|
654
|
+
ValueError: If `padding` is non-zero when `pad_mode` is not 'pad'.
|
|
193
655
|
|
|
194
656
|
Supported Platforms:
|
|
195
657
|
``Ascend`` ``GPU`` ``CPU``
|
|
196
658
|
|
|
197
659
|
Examples:
|
|
198
|
-
>>>
|
|
660
|
+
>>> mpool1 = nn.MaxPool1d(kernel_size=3, stride=1)
|
|
199
661
|
>>> x = Tensor(np.random.randint(0, 10, [1, 2, 4]), mindspore.float32)
|
|
200
|
-
>>> output =
|
|
662
|
+
>>> output = mpool1(x)
|
|
201
663
|
>>> result = output.shape
|
|
202
664
|
>>> print(result)
|
|
203
665
|
(1, 2, 2)
|
|
666
|
+
>>> np_x = np.random.randint(0, 10, [5, 3, 4])
|
|
667
|
+
>>> x = Tensor(np_x, mindspore.float32)
|
|
668
|
+
>>> mpool2 = nn.MaxPool1d(kernel_size=2, stride=1, pad_mode='pad', padding=1, dilation=1, return_indices=True)
|
|
669
|
+
>>> output = mpool2(x)
|
|
670
|
+
>>> print(output[0].shape)
|
|
671
|
+
(5, 3, 5)
|
|
672
|
+
>>> print(output[1].shape)
|
|
673
|
+
(5, 3, 5)
|
|
204
674
|
"""
|
|
205
675
|
|
|
206
|
-
def __init__(self, kernel_size=1, stride=1, pad_mode="valid"
|
|
676
|
+
def __init__(self, kernel_size=1, stride=1, pad_mode="valid", padding=0, dilation=1, return_indices=False,
|
|
677
|
+
ceil_mode=False):
|
|
207
678
|
"""Initialize MaxPool1d."""
|
|
208
679
|
super(MaxPool1d, self).__init__(kernel_size, stride, pad_mode)
|
|
209
|
-
validator.
|
|
210
|
-
validator.
|
|
211
|
-
validator.check_value_type('pad_mode', pad_mode, [str], self.cls_name)
|
|
212
|
-
self.pad_mode = validator.check_string(pad_mode.upper(), ['VALID', 'SAME'], 'pad_mode', self.cls_name)
|
|
213
|
-
validator.check_int(kernel_size, 1, Rel.GE, "kernel_size", self.cls_name)
|
|
214
|
-
validator.check_int(stride, 1, Rel.GE, "stride", self.cls_name)
|
|
680
|
+
validator.check_int(kernel_size, 1, validator.GE, "kernel_size", self.cls_name)
|
|
681
|
+
validator.check_int(stride, 1, validator.GE, "stride", self.cls_name)
|
|
215
682
|
self.kernel_size = (1, kernel_size)
|
|
216
683
|
self.stride = (1, stride)
|
|
217
|
-
self.
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
684
|
+
self.return_indices = return_indices
|
|
685
|
+
if pad_mode.upper() == "PAD":
|
|
686
|
+
self.use_pad = True
|
|
687
|
+
self.kernel_size = (1, 1, kernel_size)
|
|
688
|
+
self.stride = (1, 1, stride)
|
|
689
|
+
self.padding = _check_maxpool_padding(padding, 1, self.cls_name)
|
|
690
|
+
dilation = _cal_dilation(dilation, 1, self.cls_name)
|
|
691
|
+
self.max_pool = P.MaxPool3DWithArgmax(ksize=self.kernel_size, strides=self.stride, pads=self.padding,
|
|
692
|
+
dilation=dilation, ceil_mode=ceil_mode)
|
|
693
|
+
|
|
694
|
+
else:
|
|
695
|
+
self.use_pad = False
|
|
696
|
+
if padding != 0 or dilation != 1 or return_indices or ceil_mode:
|
|
697
|
+
raise ValueError(f"For MaxPool1d, the parameter 'padding', 'dilation', 'return_indices', 'ceil_mode' "
|
|
698
|
+
f"can not be set to non-default value when pad_mode is not 'pad', "
|
|
699
|
+
f"but got pad_mode:{pad_mode}.")
|
|
700
|
+
self.max_pool = P.MaxPool(kernel_size=self.kernel_size,
|
|
701
|
+
strides=self.stride,
|
|
702
|
+
pad_mode=self.pad_mode)
|
|
703
|
+
self.shape = F.shape
|
|
704
|
+
self.reduce_mean = P.ReduceMean(keep_dims=True)
|
|
705
|
+
self.expand = P.ExpandDims()
|
|
706
|
+
self.squeeze = P.Squeeze(2)
|
|
224
707
|
|
|
225
708
|
def construct(self, x):
|
|
226
|
-
|
|
227
|
-
x
|
|
228
|
-
|
|
229
|
-
|
|
709
|
+
expand_batch = False
|
|
710
|
+
if x.ndim == 2:
|
|
711
|
+
x = x.unsqueeze(0)
|
|
712
|
+
expand_batch = True
|
|
713
|
+
if self.use_pad:
|
|
714
|
+
x = x.unsqueeze(2).unsqueeze(3)
|
|
715
|
+
output = self.max_pool(x)
|
|
716
|
+
if isinstance(output, tuple):
|
|
717
|
+
output = output[0].squeeze(3).squeeze(2), output[1].squeeze(3).squeeze(2)
|
|
718
|
+
else:
|
|
719
|
+
output = output.squeeze(3).squeeze(2)
|
|
720
|
+
else:
|
|
721
|
+
_shape_check(self.shape(x), self.cls_name)
|
|
722
|
+
x = self.expand(x, 2)
|
|
723
|
+
output = self.max_pool(x)
|
|
724
|
+
output = self.squeeze(output)
|
|
725
|
+
if expand_batch:
|
|
726
|
+
if isinstance(output, tuple):
|
|
727
|
+
output = (output[0].squeeze(0), output[1].squeeze(0))
|
|
728
|
+
else:
|
|
729
|
+
output = output.squeeze(0)
|
|
730
|
+
if self.use_pad and not self.return_indices:
|
|
731
|
+
return output[0]
|
|
230
732
|
return output
|
|
231
733
|
|
|
232
734
|
|
|
735
|
+
def _cal_padding(padding, cls_name, nd):
|
|
736
|
+
"""Calculate padding before call primitive"""
|
|
737
|
+
validator.check_value_type('padding', padding, (int, tuple, list), cls_name)
|
|
738
|
+
if isinstance(padding, int):
|
|
739
|
+
padding = (0, 0) * (3 - nd) + (padding,) * nd * 2
|
|
740
|
+
elif isinstance(padding, (tuple, list)):
|
|
741
|
+
validator.check_non_negative_int_sequence(padding, "padding", cls_name)
|
|
742
|
+
if len(padding) == nd:
|
|
743
|
+
padding_start = (0, 0) * (3 - nd)
|
|
744
|
+
padding_end = tuple(padding[i // 2] for i in range(nd * 2))
|
|
745
|
+
padding = padding_start + padding_end
|
|
746
|
+
elif len(padding) == 1:
|
|
747
|
+
padding = (0, 0) * (3 - nd) + tuple(padding * nd * 2)
|
|
748
|
+
else:
|
|
749
|
+
if nd == 1:
|
|
750
|
+
raise ValueError(f"For {cls_name}, the padding must be a int or tuple/list contains one int, "
|
|
751
|
+
f"but got tuple/list with length:{len(padding)}.")
|
|
752
|
+
raise ValueError(f"For {cls_name}, the padding must be a int or tuple/list contains 1 or {nd} int, "
|
|
753
|
+
f"but got tuple/list with length:{len(padding)}.")
|
|
754
|
+
return padding
|
|
755
|
+
|
|
756
|
+
|
|
757
|
+
def _check_tuple_length(arg_name, prim_name, length, cls_name):
|
|
758
|
+
"""check the tuple length"""
|
|
759
|
+
if len(arg_name) != length:
|
|
760
|
+
raise ValueError(f"For {cls_name}, the length of {prim_name} must be equal to {length}, "
|
|
761
|
+
f"but got {len(arg_name)}.")
|
|
762
|
+
return arg_name
|
|
763
|
+
|
|
764
|
+
|
|
765
|
+
class AvgPool3d(_PoolNd):
|
|
766
|
+
r"""
|
|
767
|
+
Applies a 3D average pooling over an input Tensor which can be regarded as a composition of 3D input planes.
|
|
768
|
+
Typically, the input is of shape :math:`(N_{in}, C_{in}, D_{in}, H_{in}, W_{in})`, and AvgPool3D outputs
|
|
769
|
+
regional average in the :math:`(D_{in}, H_{in}, W_{in})`-dimension. Given kernel size
|
|
770
|
+
is :math:`ks = (d_{ker}, h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1, s_2)`, the operation is as follows.
|
|
771
|
+
|
|
772
|
+
.. warning::
|
|
773
|
+
`kernel_size` is in the range [1, 255]. `stride` is in the range [1, 63].
|
|
774
|
+
|
|
775
|
+
.. math::
|
|
776
|
+
\text{output}(N_i, C_j, d, h, w) =
|
|
777
|
+
\frac{1}{d_{ker} * h_{ker} * w_{ker}} \sum_{l=0}^{d_{ker}-1} \sum_{m=0}^{h_{ker}-1} \sum_{n=0}^{w_{ker}-1}
|
|
778
|
+
\text{input}(N_i, C_j, s_0 \times d + l, s_1 \times h + m, s_2 \times w + n)
|
|
779
|
+
|
|
780
|
+
Args:
|
|
781
|
+
kernel_size (Union[int, tuple[int]], optional): The size of kernel used to take the average value,
|
|
782
|
+
can be an int number or a single element tuple that represents depth, height and width, or a tuple of three
|
|
783
|
+
positive integers that represent depth, height and width respectively. Default: 1.
|
|
784
|
+
stride (Union[int, tuple[int]], optional): The distance of kernel moving, can be a positive int or a single
|
|
785
|
+
element tuple that represents the depth, height and width of movement, or a tuple of three positive integers
|
|
786
|
+
that represents depth, height and width of movement respectively. If the value is None, the default value
|
|
787
|
+
`kernel_size` is used. Default: 1.
|
|
788
|
+
pad_mode (str, optional): Specifies the padding method of pooling, optional values are "same", "valid" or "pad",
|
|
789
|
+
case insensitive. Default: "valid".
|
|
790
|
+
|
|
791
|
+
- same: The depth, height and width of the output is the same as the value after the input is divided
|
|
792
|
+
by stride.
|
|
793
|
+
|
|
794
|
+
- valid: Returns the output obtained by effective calculation without padding.
|
|
795
|
+
The excess pixels that do not meet the calculation will be discarded.
|
|
796
|
+
|
|
797
|
+
- pad: Pads the input. Fill the front, back, top, and bottom of the input with 0s of size `padding`.
|
|
798
|
+
If this mode is set, `padding` must be greater than or equal to 0.
|
|
799
|
+
|
|
800
|
+
padding (Union(int, tuple[int], list[int]), optional): Pooling padding value, only 'pad' mode can be set to
|
|
801
|
+
non-zero. Default: 0. Only the following paddings are supported:
|
|
802
|
+
|
|
803
|
+
- `padding` is an integer or a tuple/list containing one integer, it will be padded in six directions of
|
|
804
|
+
front, back, top, bottom, left and right of the input.
|
|
805
|
+
|
|
806
|
+
- `padding` is a tuple/list containing three integers, it will be padded in front and back of the input
|
|
807
|
+
`padding[0]` times, up and down `padding[1]` times, and left and right of the input `padding[2]` times.
|
|
808
|
+
|
|
809
|
+
ceil_mode (bool, optional): If True, use ceil to compute the output shape instead of floor. Default: False.
|
|
810
|
+
count_include_pad (bool, optional): If True, averaging calculation will include the zero-padding. Default: True.
|
|
811
|
+
divisor_override (int, optional): If it is specified as a non-zero parameter, this parameter will be used as the
|
|
812
|
+
divisor in the average calculation. Otherwise, `kernel_size` will be used as the divisor. Default: None.
|
|
813
|
+
|
|
814
|
+
Inputs:
|
|
815
|
+
- **x** (Tensor) - Tensor of shape :math:`(N, C, D_{in}, H_{in}, W_{in})` or
|
|
816
|
+
:math:`(C, D_{in}, H_{in}, W_{in})`.
|
|
817
|
+
Currently support float16 and float32 data type.
|
|
818
|
+
|
|
819
|
+
Outputs:
|
|
820
|
+
Tensor, with shape :math:`(N, C, D_{out}, H_{out}, W_{out})` or
|
|
821
|
+
:math:`(C, D_{out}, H_{out}, W_{out})`, with the same data type as `x`.
|
|
822
|
+
|
|
823
|
+
If `pad_mode` is in `pad` mode, the output shape calculation formula is as follows:
|
|
824
|
+
|
|
825
|
+
.. math::
|
|
826
|
+
D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] -
|
|
827
|
+
\text{kernel_size}[0]}{\text{stride}[0]} + 1\right\rfloor
|
|
828
|
+
|
|
829
|
+
.. math::
|
|
830
|
+
H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] -
|
|
831
|
+
\text{kernel_size}[1]}{\text{stride}[1]} + 1\right\rfloor
|
|
832
|
+
|
|
833
|
+
.. math::
|
|
834
|
+
W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] -
|
|
835
|
+
\text{kernel_size}[2]}{\text{stride}[2]} + 1\right\rfloor
|
|
836
|
+
|
|
837
|
+
Raises:
|
|
838
|
+
TypeError: If `kernel_size` is neither an int nor a tuple.
|
|
839
|
+
TypeError: If `stride` is neither an int nor a tuple.
|
|
840
|
+
TypeError: If `padding` is neither an int nor a tuple/list.
|
|
841
|
+
TypeError: If `ceil_mode` or `count_include_pad` is not a bool.
|
|
842
|
+
TypeError: If `divisor_override` is not an int.
|
|
843
|
+
ValueError: If numbers in `kernel_size` or `stride` are not positive.
|
|
844
|
+
ValueError: If `kernel_size` or `stride` is a tuple whose length is not equal to 3.
|
|
845
|
+
ValueError: If `padding` is a tuple/list whose length is neither 1 nor 3.
|
|
846
|
+
ValueError: If element of `padding` is less than 0.
|
|
847
|
+
ValueError: If length of shape of `x` is neither 4 nor 5.
|
|
848
|
+
ValueError: If `divisor_override` is less than or equal to 0.
|
|
849
|
+
ValueError: If `padding` is non-zero when `pad_mode` is not 'pad'.
|
|
850
|
+
|
|
851
|
+
Supported Platforms:
|
|
852
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
853
|
+
|
|
854
|
+
Examples:
|
|
855
|
+
>>> import mindspore as ms
|
|
856
|
+
>>> import mindspore.nn as nn
|
|
857
|
+
>>> import mindspore.ops as ops
|
|
858
|
+
>>> pool = nn.AvgPool3d(kernel_size=3, stride=1)
|
|
859
|
+
>>> x = ops.randn(1, 2, 4, 4, 5).astype(ms.float32)
|
|
860
|
+
>>> output = pool(x)
|
|
861
|
+
>>> print(output.shape)
|
|
862
|
+
(1, 2, 2, 2, 3)
|
|
863
|
+
>>> x1 = ops.randn(6, 5, 7, 7, 5).astype(ms.float32)
|
|
864
|
+
>>> pool2 = nn.AvgPool3d(4, stride=2, pad_mode='pad', padding=(2, 2, 1), divisor_override=10)
|
|
865
|
+
>>> output2 = pool2(x1)
|
|
866
|
+
>>> print(output2.shape)
|
|
867
|
+
(6, 5, 4, 4, 2)
|
|
868
|
+
"""
|
|
869
|
+
|
|
870
|
+
def __init__(self, kernel_size=1, stride=1, pad_mode="valid", padding=0, ceil_mode=False, count_include_pad=True,
|
|
871
|
+
divisor_override=None):
|
|
872
|
+
"""Initialize AvgPool3d."""
|
|
873
|
+
super(AvgPool3d, self).__init__(kernel_size, stride, pad_mode)
|
|
874
|
+
padding = _cal_padding(padding, self.cls_name, 3)
|
|
875
|
+
if divisor_override is not None and divisor_override <= 0:
|
|
876
|
+
raise ValueError(f"For '{self.cls_name}', the 'divisor_override' must be > 0, but got {divisor_override}.")
|
|
877
|
+
divisor_override = 0 if divisor_override is None else divisor_override
|
|
878
|
+
self.avg_pool = P.AvgPool3D(self.kernel_size, self.stride, pad_mode, padding, ceil_mode, count_include_pad,
|
|
879
|
+
divisor_override)
|
|
880
|
+
|
|
881
|
+
def construct(self, x):
|
|
882
|
+
expand_batch = False
|
|
883
|
+
if len(x.shape) == 4:
|
|
884
|
+
x = x.unsqueeze(0)
|
|
885
|
+
expand_batch = True
|
|
886
|
+
out = self.avg_pool(x)
|
|
887
|
+
if expand_batch:
|
|
888
|
+
out = out.squeeze(0)
|
|
889
|
+
return out
|
|
890
|
+
|
|
891
|
+
|
|
233
892
|
class AvgPool2d(_PoolNd):
|
|
234
893
|
r"""
|
|
235
894
|
Applies a 2D average pooling over an input Tensor which can be regarded as a composition of 2D input planes.
|
|
@@ -242,69 +901,147 @@ class AvgPool2d(_PoolNd):
|
|
|
242
901
|
\text{output}(N_i, C_j, h, w) = \frac{1}{h_{ker} * w_{ker}} \sum_{m=0}^{h_{ker}-1} \sum_{n=0}^{w_{ker}-1}
|
|
243
902
|
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
|
|
244
903
|
|
|
245
|
-
Note:
|
|
246
|
-
pad_mode for training only supports "same" and "valid".
|
|
247
|
-
|
|
248
904
|
Args:
|
|
249
905
|
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the average value.
|
|
250
|
-
The data type of kernel_size must be int and the value represents the height
|
|
251
|
-
or a tuple of two int numbers that represent height and width respectively.
|
|
906
|
+
The data type of kernel_size must be int or a single element tuple and the value represents the height
|
|
907
|
+
and width, or a tuple of two int numbers that represent height and width respectively.
|
|
252
908
|
Default: 1.
|
|
253
|
-
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that
|
|
254
|
-
the height and width of movement are both strides, or a tuple of two int numbers that
|
|
909
|
+
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number or a single element tuple that
|
|
910
|
+
represents the height and width of movement are both strides, or a tuple of two int numbers that
|
|
255
911
|
represent height and width of movement respectively. Default: 1.
|
|
256
|
-
pad_mode (str)
|
|
257
|
-
Default: "valid".
|
|
258
|
-
|
|
259
|
-
- same:
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
-
|
|
265
|
-
|
|
912
|
+
pad_mode (str) - Specifies the padding method of pooling, optional values are "same", "valid" or "pad",
|
|
913
|
+
case insensitive. Default: "valid".
|
|
914
|
+
|
|
915
|
+
- same: The height and width of the output is the same as the value after the input is divided by stride.
|
|
916
|
+
|
|
917
|
+
- valid: Returns the output obtained by effective calculation without padding.
|
|
918
|
+
The excess pixels that do not meet the calculation will be discarded.
|
|
919
|
+
|
|
920
|
+
- pad: pads the input. Pads the top, bottom, left, and right sides of the input with `padding` number of
|
|
921
|
+
zeros. If this mode is set, `padding` must be greater than or equal to 0.
|
|
922
|
+
|
|
923
|
+
padding (Union(int, tuple[int], list[int])): Pooling padding value, only 'pad' mode can be set to non-zero.
|
|
924
|
+
Default: 0. `padding` can only be an integer or a tuple/list containing one or two integers.
|
|
925
|
+
If `padding` is an integer or a tuple/list containing one integer, it will be padded `padding` times in the
|
|
926
|
+
four directions of the input. If `padding` is a tuple/list containing two integers, it will be padded
|
|
927
|
+
`padding[0]` times in the up-down direction of the input and `padding[1]` times in the left-right direction
|
|
928
|
+
of the input.
|
|
929
|
+
ceil_mode (bool): If True, use ceil to compute the output shape instead of floor. Default: False.
|
|
930
|
+
count_include_pad (bool): If True, averaging calculation will include the zero-padding. Default: True.
|
|
931
|
+
divisor_override (int): If it is specified as a non-zero parameter, this parameter will be used as the divisor
|
|
932
|
+
in the average calculation. Otherwise, `kernel_size` will be used as the divisor. Default: None.
|
|
266
933
|
data_format (str): The optional value for data format, is 'NHWC' or 'NCHW'.
|
|
267
934
|
Default: 'NCHW'.
|
|
268
935
|
|
|
269
|
-
|
|
270
936
|
Inputs:
|
|
271
|
-
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
|
|
937
|
+
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`.
|
|
272
938
|
|
|
273
939
|
Outputs:
|
|
274
|
-
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
|
|
940
|
+
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})` or :math:`(C_{out}, H_{out}, W_{out})`.
|
|
941
|
+
|
|
942
|
+
If `pad_mode` is in `pad` mode, the output shape calculation formula is as follows:
|
|
943
|
+
|
|
944
|
+
.. math::
|
|
945
|
+
H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] -
|
|
946
|
+
\text{kernel_size}[0]}{\text{stride}[0]} + 1\right\rfloor
|
|
947
|
+
|
|
948
|
+
.. math::
|
|
949
|
+
W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] -
|
|
950
|
+
\text{kernel_size}[1]}{\text{stride}[1]} + 1\right\rfloor
|
|
275
951
|
|
|
276
952
|
Raises:
|
|
277
953
|
TypeError: If `kernel_size` or `strides` is neither int nor tuple.
|
|
278
|
-
ValueError: If `pad_mode` is
|
|
954
|
+
ValueError: If `pad_mode` is not 'valid' ,'same' or 'pad' with not case sensitive.
|
|
279
955
|
ValueError: If `data_format` is neither 'NCHW' nor 'NHWC'.
|
|
956
|
+
ValueError: If `padding`, `ceil_mode`, `count_include_pad`, or `divisor_override` is used
|
|
957
|
+
or `pad_mode` is `pad` when `data_format` is 'NHWC'.
|
|
280
958
|
ValueError: If `kernel_size` or `strides` is less than 1.
|
|
281
|
-
ValueError: If length of
|
|
959
|
+
ValueError: If length of `padding` tuple/list is not 1 or 2.
|
|
960
|
+
ValueError: If length of shape of `x` is not equal to 3 or 4.
|
|
961
|
+
ValueError: If `divisor_override` is less than or equal to 0.
|
|
962
|
+
ValueError: If `padding` is non-zero when `pad_mode` is not 'pad'.
|
|
282
963
|
|
|
283
964
|
Supported Platforms:
|
|
284
965
|
``Ascend`` ``GPU`` ``CPU``
|
|
285
966
|
|
|
286
967
|
Examples:
|
|
968
|
+
>>> import mindspore as ms
|
|
969
|
+
>>> import mindspore.nn as nn
|
|
970
|
+
>>> import mindspore.ops as ops
|
|
971
|
+
>>> import numpy as np
|
|
287
972
|
>>> pool = nn.AvgPool2d(kernel_size=3, stride=1)
|
|
288
|
-
>>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]),
|
|
973
|
+
>>> x = ms.Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), ms.float32)
|
|
289
974
|
>>> output = pool(x)
|
|
290
975
|
>>> print(output.shape)
|
|
291
976
|
(1, 2, 2, 2)
|
|
977
|
+
>>> x = ops.randn(6, 6, 8, 8)
|
|
978
|
+
>>> pool2 = nn.AvgPool2d(4, stride=1, pad_mode='pad', padding=2, divisor_override=5)
|
|
979
|
+
>>> output2 = pool2(x)
|
|
980
|
+
>>> print(output2.shape)
|
|
981
|
+
(6, 6, 9, 9)
|
|
292
982
|
"""
|
|
293
983
|
|
|
294
984
|
def __init__(self,
|
|
295
985
|
kernel_size=1,
|
|
296
986
|
stride=1,
|
|
297
987
|
pad_mode="valid",
|
|
988
|
+
padding=0,
|
|
989
|
+
ceil_mode=False,
|
|
990
|
+
count_include_pad=True,
|
|
991
|
+
divisor_override=None,
|
|
298
992
|
data_format="NCHW"):
|
|
299
993
|
"""Initialize AvgPool2d."""
|
|
300
994
|
super(AvgPool2d, self).__init__(kernel_size, stride, pad_mode, data_format)
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
995
|
+
if pad_mode.upper() == 'PAD' or padding != 0 or ceil_mode or not count_include_pad \
|
|
996
|
+
or divisor_override is not None:
|
|
997
|
+
if self.format == "NHWC":
|
|
998
|
+
raise ValueError(f"For '{self.cls_name}, the 'NHWC' format are not support when 'pad_mode' is 'pad' or "
|
|
999
|
+
f"'padding' is not 0 or 'ceil_mode' is not False or 'count_include_pad' is not True"
|
|
1000
|
+
f"or divisor_override is not None, but got pade_mode:{pad_mode}, padding:{padding}, "
|
|
1001
|
+
f"ceil_mode:{ceil_mode}, count_include_pad:{count_include_pad}, "
|
|
1002
|
+
f"divisor_override:{divisor_override}.")
|
|
1003
|
+
self.is_expand = True
|
|
1004
|
+
if divisor_override is not None and divisor_override <= 0:
|
|
1005
|
+
raise ValueError(
|
|
1006
|
+
f"For '{self.cls_name}', the 'divisor_override' must be > 0, but got {divisor_override}.")
|
|
1007
|
+
divisor_override = 0 if divisor_override is None else divisor_override
|
|
1008
|
+
padding = _cal_padding(padding, self.cls_name, 2)
|
|
1009
|
+
|
|
1010
|
+
if isinstance(self.kernel_size, tuple):
|
|
1011
|
+
_check_tuple_length(self.kernel_size, 'kernel_size', 2, self.cls_name)
|
|
1012
|
+
kernel_size = (1,) + self.kernel_size
|
|
1013
|
+
elif isinstance(self.kernel_size, int):
|
|
1014
|
+
kernel_size = (1, self.kernel_size, self.kernel_size)
|
|
1015
|
+
|
|
1016
|
+
if isinstance(self.stride, tuple):
|
|
1017
|
+
_check_tuple_length(self.stride, 'stride', 2, self.cls_name)
|
|
1018
|
+
stride = (1,) + self.stride
|
|
1019
|
+
elif isinstance(self.stride, int):
|
|
1020
|
+
stride = (1, self.stride, self.stride)
|
|
1021
|
+
self.avg_pool = P.AvgPool3D(kernel_size=kernel_size, strides=stride, pad_mode=pad_mode, pad=padding,
|
|
1022
|
+
ceil_mode=ceil_mode,
|
|
1023
|
+
count_include_pad=count_include_pad, divisor_override=divisor_override)
|
|
1024
|
+
else:
|
|
1025
|
+
self.is_expand = False
|
|
1026
|
+
self.avg_pool = P.AvgPool(kernel_size=self.kernel_size,
|
|
1027
|
+
strides=self.stride,
|
|
1028
|
+
pad_mode=self.pad_mode,
|
|
1029
|
+
data_format=self.format)
|
|
305
1030
|
|
|
306
1031
|
def construct(self, x):
|
|
307
|
-
|
|
1032
|
+
expand_batch = False
|
|
1033
|
+
if x.ndim == 3:
|
|
1034
|
+
x = x.unsqueeze(0)
|
|
1035
|
+
expand_batch = True
|
|
1036
|
+
if self.is_expand:
|
|
1037
|
+
x = x.unsqueeze(2)
|
|
1038
|
+
out = self.avg_pool(x)
|
|
1039
|
+
res = out.squeeze(2)
|
|
1040
|
+
else:
|
|
1041
|
+
res = self.avg_pool(x)
|
|
1042
|
+
if expand_batch:
|
|
1043
|
+
res = res.squeeze(0)
|
|
1044
|
+
return res
|
|
308
1045
|
|
|
309
1046
|
|
|
310
1047
|
class AvgPool1d(_PoolNd):
|
|
@@ -312,95 +1049,134 @@ class AvgPool1d(_PoolNd):
|
|
|
312
1049
|
Applies a 1D average pooling over an input Tensor which can be regarded as a composition of 1D input planes.
|
|
313
1050
|
|
|
314
1051
|
Typically the input is of shape :math:`(N_{in}, C_{in}, L_{in})`, AvgPool1d outputs
|
|
315
|
-
regional average in the :math:`(L_{in})`-dimension. Given
|
|
316
|
-
:math:`
|
|
1052
|
+
regional average in the :math:`(L_{in})`-dimension. Given `kernel_size`
|
|
1053
|
+
:math:`l_{ker}` and `stride` :math:`s_0`, the operation is as follows:
|
|
317
1054
|
|
|
318
1055
|
.. math::
|
|
319
1056
|
\text{output}(N_i, C_j, l) = \frac{1}{l_{ker}} \sum_{n=0}^{l_{ker}-1}
|
|
320
1057
|
\text{input}(N_i, C_j, s_0 \times l + n)
|
|
321
1058
|
|
|
322
|
-
Note:
|
|
323
|
-
pad_mode for training only supports "same" and "valid".
|
|
324
|
-
|
|
325
1059
|
Args:
|
|
326
1060
|
kernel_size (int): The size of kernel window used to take the average value, Default: 1.
|
|
327
1061
|
stride (int): The distance of kernel moving, an int number that represents
|
|
328
1062
|
the width of movement is strides, Default: 1.
|
|
329
|
-
pad_mode (str)
|
|
330
|
-
Default: "valid".
|
|
1063
|
+
pad_mode (str) - Specifies the padding method of pooling, optional values are "same", "valid" or "pad",
|
|
1064
|
+
case insensitive. Default: "valid".
|
|
331
1065
|
|
|
332
|
-
- same:
|
|
333
|
-
the input. The total number of padding will be calculated in horizontal and vertical
|
|
334
|
-
directions and evenly distributed to top and bottom, left and right if possible.
|
|
335
|
-
Otherwise, the last extra padding will be done from the bottom and the right side.
|
|
1066
|
+
- same: The width of the output is the same as the value after the input is divided by stride.
|
|
336
1067
|
|
|
337
|
-
- valid:
|
|
338
|
-
|
|
1068
|
+
- valid: Returns the output obtained by effective calculation without padding.
|
|
1069
|
+
The excess pixels that do not meet the calculation will be discarded.
|
|
1070
|
+
|
|
1071
|
+
- pad: Performs padding on the input. Adds padding size of zeros to both ends of the input.
|
|
1072
|
+
If this mode is set, padding must be greater than or equal to 0.
|
|
339
1073
|
|
|
1074
|
+
padding (Union(int, tuple[int], list[int])): Pooling padding value, only 'pad' mode can be set to non-zero.
|
|
1075
|
+
Default: 0. padding can only be an integer or a tuple/list containing a single integer, in which case
|
|
1076
|
+
padding times or padding[0] times are padded on both sides of the input.
|
|
1077
|
+
ceil_mode (bool): If True, use ceil to compute the output shape instead of floor. Default: False.
|
|
1078
|
+
count_include_pad (bool): If True, averaging calculation will include the zero-padding. Default: True.
|
|
340
1079
|
|
|
341
1080
|
Inputs:
|
|
342
|
-
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})`.
|
|
1081
|
+
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`.
|
|
343
1082
|
|
|
344
1083
|
Outputs:
|
|
345
|
-
Tensor of shape :math:`(N, C_{out}, L_{out})`.
|
|
1084
|
+
Tensor of shape :math:`(N, C_{out}, L_{out})` or :math:`(C_{out}, L_{out})`.
|
|
1085
|
+
|
|
1086
|
+
If `pad_mode` is in `pad` mode, the output shape calculation formula is as follows:
|
|
1087
|
+
|
|
1088
|
+
.. math::
|
|
1089
|
+
L_{out} = \left\lfloor \frac{L_{in} +
|
|
1090
|
+
2 \times \text{padding} - \text{kernel_size}}{\text{stride}} + 1\right\rfloor
|
|
346
1091
|
|
|
347
1092
|
Raises:
|
|
348
1093
|
TypeError: If `kernel_size` or `stride` is not an int.
|
|
349
|
-
ValueError: If `pad_mode` is
|
|
1094
|
+
ValueError: If `pad_mode` is not 'valid' ,'same' or 'pad' with not case sensitive.
|
|
350
1095
|
ValueError: If `kernel_size` or `strides` is less than 1.
|
|
351
|
-
ValueError: If length of
|
|
1096
|
+
ValueError: If length of `padding` tuple/list is not 1.
|
|
1097
|
+
ValueError: If length of shape of `x` is not equal to 2 or 3.
|
|
1098
|
+
ValueError: If `padding` is non-zero when `pad_mode` is not 'pad'.
|
|
352
1099
|
|
|
353
1100
|
Supported Platforms:
|
|
354
1101
|
``Ascend`` ``GPU`` ``CPU``
|
|
355
1102
|
|
|
356
1103
|
Examples:
|
|
1104
|
+
>>> import mindspore as ms
|
|
1105
|
+
>>> import mindspore.nn as nn
|
|
1106
|
+
>>> import mindspore.ops as ops
|
|
1107
|
+
>>> import numpy as np
|
|
357
1108
|
>>> pool = nn.AvgPool1d(kernel_size=6, stride=1)
|
|
358
|
-
>>> x = Tensor(np.random.randint(0, 10, [1, 3, 6]),
|
|
1109
|
+
>>> x = ms.Tensor(np.random.randint(0, 10, [1, 3, 6]), ms.float32)
|
|
359
1110
|
>>> output = pool(x)
|
|
360
1111
|
>>> result = output.shape
|
|
361
1112
|
>>> print(result)
|
|
362
1113
|
(1, 3, 1)
|
|
1114
|
+
>>> pool2 = nn.AvgPool1d(4, stride=1, ceil_mode=True, pad_mode='pad', padding=2)
|
|
1115
|
+
>>> x1 = ops.randn(6, 6, 8)
|
|
1116
|
+
>>> output = pool2(x1)
|
|
1117
|
+
>>> print(output.shape)
|
|
1118
|
+
(6, 6, 9)
|
|
363
1119
|
"""
|
|
364
1120
|
|
|
365
1121
|
def __init__(self,
|
|
366
1122
|
kernel_size=1,
|
|
367
1123
|
stride=1,
|
|
368
|
-
pad_mode="valid"
|
|
1124
|
+
pad_mode="valid",
|
|
1125
|
+
padding=0,
|
|
1126
|
+
ceil_mode=False,
|
|
1127
|
+
count_include_pad=True):
|
|
369
1128
|
"""Initialize AvgPool1d."""
|
|
370
|
-
validator.check_value_type('kernel_size', kernel_size, [int], self.cls_name)
|
|
371
|
-
validator.check_value_type('stride', stride, [int], self.cls_name)
|
|
372
|
-
validator.check_value_type('pad_mode', pad_mode, [str], self.cls_name)
|
|
373
|
-
self.pad_mode = validator.check_string(pad_mode.upper(), ['VALID', 'SAME'], 'pad_mode', self.cls_name)
|
|
374
|
-
validator.check_int(kernel_size, 1, Rel.GE, "kernel_size", self.cls_name)
|
|
375
|
-
validator.check_int(stride, 1, Rel.GE, "stride", self.cls_name)
|
|
376
1129
|
super(AvgPool1d, self).__init__(kernel_size, stride, pad_mode)
|
|
377
|
-
self.kernel_size
|
|
378
|
-
self.stride
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
1130
|
+
validator.check_int(self.kernel_size, 1, validator.GE, "kernel_size", self.cls_name)
|
|
1131
|
+
validator.check_int(self.stride, 1, validator.GE, "stride", self.cls_name)
|
|
1132
|
+
if pad_mode.upper() == 'PAD' or padding != 0 or ceil_mode or not count_include_pad:
|
|
1133
|
+
padding = _cal_padding(padding, self.cls_name, 1)
|
|
1134
|
+
self.is_expand_3d = True
|
|
1135
|
+
kernel_size = (1, 1, self.kernel_size)
|
|
1136
|
+
stride = (1, 1, self.stride)
|
|
1137
|
+
self.avg_pool = P.AvgPool3D(kernel_size=kernel_size, strides=stride, pad_mode=pad_mode, pad=padding,
|
|
1138
|
+
ceil_mode=ceil_mode,
|
|
1139
|
+
count_include_pad=count_include_pad)
|
|
1140
|
+
else:
|
|
1141
|
+
self.is_expand_3d = False
|
|
1142
|
+
self.kernel_size = (1, self.kernel_size)
|
|
1143
|
+
self.stride = (1, self.stride)
|
|
1144
|
+
self.avg_pool = P.AvgPool(kernel_size=self.kernel_size,
|
|
1145
|
+
strides=self.stride,
|
|
1146
|
+
pad_mode=self.pad_mode)
|
|
1147
|
+
self.shape = F.shape
|
|
1148
|
+
self.reduce_mean = P.ReduceMean(keep_dims=True)
|
|
1149
|
+
self.slice = P.Slice()
|
|
1150
|
+
self.expand = P.ExpandDims()
|
|
1151
|
+
self.squeeze = P.Squeeze(2)
|
|
387
1152
|
|
|
388
1153
|
def construct(self, x):
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
x =
|
|
395
|
-
x = self.reduce_mean(x, 2)
|
|
396
|
-
else:
|
|
397
|
-
x = self.expand(x, 2)
|
|
1154
|
+
expand_batch = False
|
|
1155
|
+
if x.ndim == 2:
|
|
1156
|
+
x = x.unsqueeze(0)
|
|
1157
|
+
expand_batch = True
|
|
1158
|
+
if self.is_expand_3d:
|
|
1159
|
+
x = x.unsqueeze(2).unsqueeze(3)
|
|
398
1160
|
x = self.avg_pool(x)
|
|
399
|
-
x =
|
|
1161
|
+
x = x.squeeze(3).squeeze(2)
|
|
1162
|
+
else:
|
|
1163
|
+
_shape_check(self.shape(x), self.cls_name)
|
|
1164
|
+
batch, channel, width = self.shape(x)
|
|
1165
|
+
if width == self.kernel_size[1]:
|
|
1166
|
+
x = self.reduce_mean(x, 2)
|
|
1167
|
+
elif width - self.kernel_size[1] < self.stride[1]:
|
|
1168
|
+
x = self.slice(x, (0, 0, 0), (batch, channel, self.kernel_size[1]))
|
|
1169
|
+
x = self.reduce_mean(x, 2)
|
|
1170
|
+
else:
|
|
1171
|
+
x = self.expand(x, 2)
|
|
1172
|
+
x = self.avg_pool(x)
|
|
1173
|
+
x = self.squeeze(x)
|
|
1174
|
+
if expand_batch:
|
|
1175
|
+
x = x.squeeze(0)
|
|
400
1176
|
return x
|
|
401
1177
|
|
|
402
1178
|
|
|
403
|
-
@
|
|
1179
|
+
@_primexpr
|
|
404
1180
|
def _adaptive_shape_check(in_shape, output_size, prim_name):
|
|
405
1181
|
"""Check shape."""
|
|
406
1182
|
msg_prefix = "For {}, the".format(prim_name)
|
|
@@ -439,18 +1215,18 @@ class AdaptiveAvgPool1d(Cell):
|
|
|
439
1215
|
output_size (int): the target output size :math:`L_{out}`.
|
|
440
1216
|
|
|
441
1217
|
Inputs:
|
|
442
|
-
- **
|
|
1218
|
+
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})`, with float16 or float32 data type.
|
|
443
1219
|
|
|
444
1220
|
Outputs:
|
|
445
|
-
Tensor of shape :math:`(N, C_{in}, L_{out})`, has the same type as `
|
|
1221
|
+
Tensor of shape :math:`(N, C_{in}, L_{out})`, has the same type as `input`.
|
|
446
1222
|
|
|
447
1223
|
Raises:
|
|
448
1224
|
TypeError: If `output_size` is not an int.
|
|
449
|
-
TypeError: If `
|
|
1225
|
+
TypeError: If `input` is neither float16 nor float32.
|
|
450
1226
|
ValueError: If `output_size` is less than 1.
|
|
451
|
-
ValueError: If length of shape of `
|
|
452
|
-
ValueError: If the last dimension of `
|
|
453
|
-
ValueError: If the last dimension of `
|
|
1227
|
+
ValueError: If length of shape of `input` is not equal to 3.
|
|
1228
|
+
ValueError: If the last dimension of `input` is smaller than `output_size`.
|
|
1229
|
+
ValueError: If the last dimension of `input` is not divisible by `output_size`.
|
|
454
1230
|
|
|
455
1231
|
|
|
456
1232
|
Supported Platforms:
|
|
@@ -461,8 +1237,8 @@ class AdaptiveAvgPool1d(Cell):
|
|
|
461
1237
|
>>> from mindspore import Tensor, nn
|
|
462
1238
|
>>> import numpy as np
|
|
463
1239
|
>>> pool = nn.AdaptiveAvgPool1d(output_size=2)
|
|
464
|
-
>>>
|
|
465
|
-
>>> output = pool(
|
|
1240
|
+
>>> input = Tensor(np.random.randint(0, 10, [1, 3, 6]), mindspore.float32)
|
|
1241
|
+
>>> output = pool(input)
|
|
466
1242
|
>>> result = output.shape
|
|
467
1243
|
>>> print(result)
|
|
468
1244
|
(1, 3, 2)
|
|
@@ -472,30 +1248,30 @@ class AdaptiveAvgPool1d(Cell):
|
|
|
472
1248
|
"""Initialize AdaptiveAvgPool1d."""
|
|
473
1249
|
super(AdaptiveAvgPool1d, self).__init__()
|
|
474
1250
|
validator.check_value_type('output_size', output_size, [int], self.cls_name)
|
|
475
|
-
validator.check_int(output_size, 1,
|
|
1251
|
+
validator.check_int(output_size, 1, validator.GE, "output_size", self.cls_name)
|
|
476
1252
|
self.shape = F.shape
|
|
477
1253
|
self.expand = P.ExpandDims()
|
|
478
1254
|
self.squeeze = P.Squeeze(2)
|
|
479
1255
|
self.output_size = output_size
|
|
480
1256
|
self.dtype = P.DType()
|
|
481
1257
|
|
|
482
|
-
def construct(self,
|
|
483
|
-
_adaptive_shape_check(self.shape(
|
|
484
|
-
_adaptive_dtype_check(self.dtype(
|
|
1258
|
+
def construct(self, input):
|
|
1259
|
+
_adaptive_shape_check(self.shape(input), self.output_size, self.cls_name)
|
|
1260
|
+
_adaptive_dtype_check(self.dtype(input), self.cls_name)
|
|
485
1261
|
|
|
486
|
-
_, _, width = self.shape(
|
|
1262
|
+
_, _, width = self.shape(input)
|
|
487
1263
|
stride = width // self.output_size
|
|
488
1264
|
kernel_size = width - (self.output_size - 1) * stride
|
|
489
1265
|
|
|
490
1266
|
stride = (1, width // self.output_size)
|
|
491
1267
|
kernel_size = (1, kernel_size)
|
|
492
1268
|
|
|
493
|
-
|
|
1269
|
+
input = self.expand(input, 2)
|
|
494
1270
|
avg_pool = P.AvgPool(kernel_size=kernel_size, strides=stride)
|
|
495
|
-
|
|
496
|
-
|
|
1271
|
+
input = avg_pool(input)
|
|
1272
|
+
input = self.squeeze(input)
|
|
497
1273
|
|
|
498
|
-
return
|
|
1274
|
+
return input
|
|
499
1275
|
|
|
500
1276
|
|
|
501
1277
|
class AdaptiveAvgPool2d(Cell):
|
|
@@ -523,7 +1299,7 @@ class AdaptiveAvgPool2d(Cell):
|
|
|
523
1299
|
If it is None, it means the output size is the same as the input size.
|
|
524
1300
|
|
|
525
1301
|
Inputs:
|
|
526
|
-
- **
|
|
1302
|
+
- **input** (Tensor) - The input of AdaptiveAvgPool2d, which is a 3D or 4D tensor,
|
|
527
1303
|
with float16, float32 or float64 data type.
|
|
528
1304
|
|
|
529
1305
|
Outputs:
|
|
@@ -531,9 +1307,9 @@ class AdaptiveAvgPool2d(Cell):
|
|
|
531
1307
|
|
|
532
1308
|
Raises:
|
|
533
1309
|
ValueError: If `output_size` is a tuple and the length of `output_size` is not 2.
|
|
534
|
-
TypeError: If `
|
|
535
|
-
TypeError: If dtype of `
|
|
536
|
-
ValueError: If the dimension of `
|
|
1310
|
+
TypeError: If `input` is not a Tensor.
|
|
1311
|
+
TypeError: If dtype of `input` is not float16, float32 or float64.
|
|
1312
|
+
ValueError: If the dimension of `input` is less than or equal to the dimension of `output_size`.
|
|
537
1313
|
|
|
538
1314
|
Supported Platforms:
|
|
539
1315
|
``GPU``
|
|
@@ -554,8 +1330,8 @@ class AdaptiveAvgPool2d(Cell):
|
|
|
554
1330
|
super(AdaptiveAvgPool2d, self).__init__()
|
|
555
1331
|
self.adaptive_avgpool2d = P.AdaptiveAvgPool2D(output_size)
|
|
556
1332
|
|
|
557
|
-
def construct(self,
|
|
558
|
-
return self.adaptive_avgpool2d(
|
|
1333
|
+
def construct(self, input):
|
|
1334
|
+
return self.adaptive_avgpool2d(input)
|
|
559
1335
|
|
|
560
1336
|
|
|
561
1337
|
class AdaptiveAvgPool3d(Cell):
|
|
@@ -564,14 +1340,14 @@ class AdaptiveAvgPool3d(Cell):
|
|
|
564
1340
|
That is, for any input size, the size of the specified output is :math:`(D, H, W)`.
|
|
565
1341
|
The number of output features is equal to the number of input planes.
|
|
566
1342
|
|
|
567
|
-
Suppose the last 3 dimension size of
|
|
1343
|
+
Suppose the last 3 dimension size of input is :math:`(inD, inH, inW)`, then the last 3 dimension size of output is
|
|
568
1344
|
:math:`(outD, outH, outW)`.
|
|
569
1345
|
|
|
570
1346
|
.. math::
|
|
571
1347
|
\begin{array}{ll} \\
|
|
572
1348
|
\forall \quad od \in [0,outD-1], oh \in [0,outH-1], ow \in [0,outW-1]\\
|
|
573
1349
|
output[od,oh,ow] = \\
|
|
574
|
-
\qquad mean(
|
|
1350
|
+
\qquad mean(input[istartD:iendD+1,istartH:iendH+1,istartW:iendW+1])\\
|
|
575
1351
|
where,\\
|
|
576
1352
|
\qquad istartD= \left\lceil \frac{od * inD}{outD} \right\rceil \\
|
|
577
1353
|
\qquad iendD=\left\lfloor \frac{(od+1)* inD}{outD} \right\rfloor \\
|
|
@@ -582,24 +1358,25 @@ class AdaptiveAvgPool3d(Cell):
|
|
|
582
1358
|
\end{array}
|
|
583
1359
|
|
|
584
1360
|
Args:
|
|
585
|
-
output_size (Union[int, tuple]):
|
|
586
|
-
or an int D for :math:`(D, D, D)`. :math:`
|
|
1361
|
+
output_size (Union[int, tuple]): The target output size. `ouput_size` can be a tuple :math:`(D, H, W)`,
|
|
1362
|
+
or an int D for :math:`(D, D, D)`. :math:`D`, :math:`H` and :math:`W` can be int or None
|
|
587
1363
|
which means the output size is the same as that of the input.
|
|
588
1364
|
|
|
589
1365
|
Inputs:
|
|
590
|
-
- **
|
|
1366
|
+
- **input** (Tensor) - The input of AdaptiveAvgPool3d, which is a 5D or 4D Tensor,
|
|
1367
|
+
with float16, float32 or float64 data type.
|
|
591
1368
|
|
|
592
1369
|
Outputs:
|
|
593
|
-
Tensor, with the same type as the `
|
|
1370
|
+
Tensor, with the same type as the `input`.
|
|
594
1371
|
|
|
595
1372
|
Raises:
|
|
596
|
-
TypeError: If `
|
|
597
|
-
TypeError: If dtype of `
|
|
598
|
-
ValueError: If the dimension of `
|
|
1373
|
+
TypeError: If `input` is not a Tensor.
|
|
1374
|
+
TypeError: If dtype of `input` is not float16, float32 or float64.
|
|
1375
|
+
ValueError: If the dimension of `input` is not 4D or 5D.
|
|
599
1376
|
ValueError: If `output_size` value is not positive.
|
|
600
1377
|
|
|
601
1378
|
Supported Platforms:
|
|
602
|
-
``GPU``
|
|
1379
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
603
1380
|
|
|
604
1381
|
Examples:
|
|
605
1382
|
>>> # case 1: output_size=(3, 3, 4)
|
|
@@ -633,8 +1410,8 @@ class AdaptiveAvgPool3d(Cell):
|
|
|
633
1410
|
super(AdaptiveAvgPool3d, self).__init__()
|
|
634
1411
|
self.adaptive_avg_pool3d = AdaptiveAvgPool3D(output_size)
|
|
635
1412
|
|
|
636
|
-
def construct(self,
|
|
637
|
-
return self.adaptive_avg_pool3d(
|
|
1413
|
+
def construct(self, input):
|
|
1414
|
+
return self.adaptive_avg_pool3d(input)
|
|
638
1415
|
|
|
639
1416
|
|
|
640
1417
|
class AdaptiveMaxPool1d(Cell):
|
|
@@ -685,7 +1462,7 @@ class AdaptiveMaxPool1d(Cell):
|
|
|
685
1462
|
def __init__(self, output_size):
|
|
686
1463
|
"""Initialize AdaptiveMaxPool1d."""
|
|
687
1464
|
super(AdaptiveMaxPool1d, self).__init__()
|
|
688
|
-
validator.check_int(output_size, 1,
|
|
1465
|
+
validator.check_int(output_size, 1, validator.GE, "output_size", self.cls_name)
|
|
689
1466
|
validator.check_value_type('output_size', output_size, [int], self.cls_name)
|
|
690
1467
|
self.expand = P.ExpandDims()
|
|
691
1468
|
self.squeeze = P.Squeeze(2)
|
|
@@ -734,43 +1511,41 @@ class AdaptiveMaxPool2d(Cell):
|
|
|
734
1511
|
\end{align}
|
|
735
1512
|
|
|
736
1513
|
Note:
|
|
737
|
-
Ascend platform only supports float16 type for
|
|
1514
|
+
Ascend platform only supports float16 type for input.
|
|
738
1515
|
|
|
739
1516
|
Args:
|
|
740
|
-
output_size (Union[int, tuple]): The target output size
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
1517
|
+
output_size (Union[int, tuple]): The target output size. `ouput_size` can be a tuple :math:`(H, W)`,
|
|
1518
|
+
or an int H for :math:`(H, H)`. :math:`H` and :math:`W` can be int or None.
|
|
1519
|
+
If it is None, it means the output size is the same as the input size.
|
|
744
1520
|
return_indices (bool): If `return_indices` is True, the indices of max value would be output.
|
|
745
1521
|
Default: False.
|
|
746
1522
|
|
|
747
1523
|
Inputs:
|
|
748
|
-
- **
|
|
1524
|
+
- **input** (Tensor) - The input of AdaptiveMaxPool2d, which is a 3D or 4D tensor,
|
|
749
1525
|
with float16, float32 or float64 data type.
|
|
750
1526
|
|
|
751
1527
|
Outputs:
|
|
752
|
-
Tensor, with the same type as the `
|
|
753
|
-
|
|
754
|
-
Shape of the output is `input_x_shape[:len(input_x_shape) - len(out_shape)] + out_shape`.
|
|
1528
|
+
Tensor, with the same type as the `input`.
|
|
1529
|
+
Shape of the output is `input_shape[:len(input_shape) - len(out_shape)] + out_shape`.
|
|
755
1530
|
|
|
756
1531
|
Raises:
|
|
757
1532
|
TypeError: If `output_size` is not int or tuple.
|
|
758
|
-
TypeError: If `
|
|
1533
|
+
TypeError: If `input` is not a tensor.
|
|
759
1534
|
TypeError: If `return_indices` is not a bool.
|
|
760
|
-
TypeError: If dtype of `
|
|
1535
|
+
TypeError: If dtype of `input` is not float16, float32 or float64.
|
|
761
1536
|
ValueError: If `output_size` is a tuple and the length of `output_size` is not 2.
|
|
762
|
-
ValueError: If the dimension of `
|
|
1537
|
+
ValueError: If the dimension of `input` is not NCHW or CHW.
|
|
763
1538
|
|
|
764
1539
|
Supported Platforms:
|
|
765
1540
|
``Ascend`` ``GPU`` ``CPU``
|
|
766
1541
|
|
|
767
1542
|
Examples:
|
|
768
1543
|
>>> # case 1: output_size=(None, 2)
|
|
769
|
-
>>>
|
|
1544
|
+
>>> input = Tensor(np.array([[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
|
|
770
1545
|
... [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
|
|
771
1546
|
... [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]]), mindspore.float32)
|
|
772
1547
|
>>> adaptive_max_pool_2d = nn.AdaptiveMaxPool2d((None, 2))
|
|
773
|
-
>>> output = adaptive_max_pool_2d(
|
|
1548
|
+
>>> output = adaptive_max_pool_2d(input)
|
|
774
1549
|
>>> print(output)
|
|
775
1550
|
[[[[2. 3.]
|
|
776
1551
|
[5. 6.]
|
|
@@ -783,7 +1558,7 @@ class AdaptiveMaxPool2d(Cell):
|
|
|
783
1558
|
[8. 9.]]]]
|
|
784
1559
|
>>> # case 2: output_size=2
|
|
785
1560
|
>>> adaptive_max_pool_2d = nn.AdaptiveMaxPool2d(2)
|
|
786
|
-
>>> output = adaptive_max_pool_2d(
|
|
1561
|
+
>>> output = adaptive_max_pool_2d(input)
|
|
787
1562
|
>>> print(output)
|
|
788
1563
|
[[[[5. 6.]
|
|
789
1564
|
[8. 9.]]
|
|
@@ -793,7 +1568,7 @@ class AdaptiveMaxPool2d(Cell):
|
|
|
793
1568
|
[8. 9.]]]]
|
|
794
1569
|
>>> # case 3: output_size=(1, 2)
|
|
795
1570
|
>>> adaptive_max_pool_2d = nn.AdaptiveMaxPool2d((1, 2))
|
|
796
|
-
>>> output = adaptive_max_pool_2d(
|
|
1571
|
+
>>> output = adaptive_max_pool_2d(input)
|
|
797
1572
|
>>> print(output)
|
|
798
1573
|
[[[[8. 9.]]
|
|
799
1574
|
[[8. 9.]]
|
|
@@ -803,51 +1578,51 @@ class AdaptiveMaxPool2d(Cell):
|
|
|
803
1578
|
def __init__(self, output_size, return_indices=False):
|
|
804
1579
|
"""Initialize AdaptiveMaxPool2d."""
|
|
805
1580
|
super(AdaptiveMaxPool2d, self).__init__()
|
|
806
|
-
|
|
1581
|
+
validator.check_value_type('return_indices', return_indices, [bool], self.cls_name)
|
|
1582
|
+
self.adaptive_max_pool2d = AdaptiveMaxPool2D(output_size)
|
|
1583
|
+
self.return_indices = return_indices
|
|
807
1584
|
|
|
808
|
-
def construct(self,
|
|
809
|
-
|
|
1585
|
+
def construct(self, input):
|
|
1586
|
+
output = self.adaptive_max_pool2d(input)
|
|
1587
|
+
if self.return_indices:
|
|
1588
|
+
return output
|
|
1589
|
+
return output[0]
|
|
810
1590
|
|
|
811
1591
|
|
|
812
1592
|
class AdaptiveMaxPool3d(Cell):
|
|
813
1593
|
r"""
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
The output is of size :math:`(D, H, W)`, for any input size.
|
|
817
|
-
The number of output features is equal to the number of input planes.
|
|
1594
|
+
Calculates the 3D adaptive max pooling for an input Tensor.
|
|
1595
|
+
That is, for any input size, the size of the specified output is :math:`(D, H, W)`.
|
|
818
1596
|
|
|
819
1597
|
Args:
|
|
820
|
-
output_size (Union[int, tuple]): The
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
Default: False.
|
|
1598
|
+
output_size (Union[int, tuple]): The specified output size, which is a positive integer that represents depth,
|
|
1599
|
+
height and width, or a tuple of three positive integers that represent depth, height and width respectively.
|
|
1600
|
+
If it is None, the output size and input size of the corresponding dimension are the same.
|
|
1601
|
+
return_indices (bool, optional): If `return_indices` is True, the indices of max value would be output.
|
|
1602
|
+
Otherwise, the indices will not be returned. Default: False.
|
|
826
1603
|
|
|
827
1604
|
Inputs:
|
|
828
|
-
- **
|
|
829
|
-
uint64, float16, float32 or float64 data type.
|
|
1605
|
+
- **input** (Tensor) - Tensor, has shape of :math:`(C, D, H, W)` or :math:`(N, C, D, H, W)`.
|
|
830
1606
|
|
|
831
1607
|
Outputs:
|
|
832
|
-
- **y** (Tensor) -
|
|
833
|
-
- **argmax** (Tensor) -
|
|
834
|
-
`y` and
|
|
1608
|
+
- **y** (Tensor) - Tensor, has the same number of dims and data type as the `input` .
|
|
1609
|
+
- **argmax** (Tensor) - Tensor, the indices of the maximum values along with the outputs, has the same shape as
|
|
1610
|
+
`y` and a dtype of int32. Return this only when `return_indices` is True.
|
|
835
1611
|
|
|
836
1612
|
Raises:
|
|
837
|
-
TypeError: If `
|
|
838
|
-
ValueError: If the dimensions number of `
|
|
839
|
-
TypeError: If dtype of `
|
|
840
|
-
float16, float32 or float64.
|
|
1613
|
+
TypeError: If `input` is not a Tensor.
|
|
1614
|
+
ValueError: If the dimensions number of `input` is not 4 or 5.
|
|
1615
|
+
TypeError: If dtype of `input` is not int, uint or float.
|
|
841
1616
|
ValueError: If `output_size` is neither an int nor a tuple with shape (3,).
|
|
842
1617
|
|
|
843
1618
|
Supported Platforms:
|
|
844
|
-
``GPU``
|
|
1619
|
+
``GPU`` ``CPU``
|
|
845
1620
|
|
|
846
1621
|
Examples:
|
|
847
|
-
>>>
|
|
1622
|
+
>>> input = Tensor(np.arange(0,36).reshape((1, 3, 3, 4)).astype(np.float32))
|
|
848
1623
|
>>> output_size = (1, 1, 2)
|
|
849
1624
|
>>> net = nn.AdaptiveMaxPool3d(output_size, True)
|
|
850
|
-
>>> output = net(
|
|
1625
|
+
>>> output = net(input)
|
|
851
1626
|
>>> print(output[0].asnumpy())
|
|
852
1627
|
[[[[33. 35.]]]]
|
|
853
1628
|
>>> print(output[1].asnumpy())
|
|
@@ -861,8 +1636,457 @@ class AdaptiveMaxPool3d(Cell):
|
|
|
861
1636
|
self.return_indices = return_indices
|
|
862
1637
|
self.adaptive_max_pool3d = AdaptiveMaxPool3D()
|
|
863
1638
|
|
|
864
|
-
def construct(self,
|
|
865
|
-
output = self.adaptive_max_pool3d(
|
|
1639
|
+
def construct(self, input):
|
|
1640
|
+
output = self.adaptive_max_pool3d(input, self.output_size)
|
|
866
1641
|
if self.return_indices:
|
|
867
1642
|
return output
|
|
868
1643
|
return output[0]
|
|
1644
|
+
|
|
1645
|
+
|
|
1646
|
+
class FractionalMaxPool2d(Cell):
|
|
1647
|
+
r"""
|
|
1648
|
+
Applies the 2D FractionalMaxPool operatin over input. The output Tensor shape can be determined by either
|
|
1649
|
+
`output_size` or `output_ratio`, and the step size is determined by `_random_samples`.
|
|
1650
|
+
`output_size` or `output_ratio` cannot be used or set to None at the same time.
|
|
1651
|
+
|
|
1652
|
+
Refer to the paper `Fractional MaxPooling by Ben Graham <https://arxiv.org/abs/1412.6071>`_ for more details.
|
|
1653
|
+
|
|
1654
|
+
Args:
|
|
1655
|
+
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
|
|
1656
|
+
is an int number that represents height and width of the kernel, or a tuple
|
|
1657
|
+
of two int numbers that represent height and width respectively.
|
|
1658
|
+
The value must be a positive integer.
|
|
1659
|
+
output_size (Union[int, tuple[int]], optional): The Shape of the target `output_size`,
|
|
1660
|
+
is a positive int that represents height and width, or a tuple of two positive integers that represent
|
|
1661
|
+
height and width respectively. The value must be a positive integer. If None, the shape of the target will
|
|
1662
|
+
be determined by `output_ratio`. Default: None.
|
|
1663
|
+
output_ratio (Union[float, tuple[float]], optional): The ratio of target output shape to input shape.
|
|
1664
|
+
Specifying the size of the output tensor by using a ratio of the input size.
|
|
1665
|
+
Data type : float16, float32, float64, and value is between (0, 1). If None, the shape of the target will be
|
|
1666
|
+
determined by `output_size`. Default: None.
|
|
1667
|
+
return_indices (bool, optional): Whether to return the indices of max value. Default: False.
|
|
1668
|
+
_random_samples (Tensor, optional): The random step of FractionalMaxPool2d, a Tensor of shape :math:`(N, C, 2)`
|
|
1669
|
+
whose elements are within the range of :math:`(0, 1)`. Supported data type : float16, float32, float64.
|
|
1670
|
+
If None, no random step will be set. Default: None.
|
|
1671
|
+
|
|
1672
|
+
Inputs:
|
|
1673
|
+
- **input** (Tensor) - Tensor of shape :math:`(N, C, H_{in}, W_{in})`,
|
|
1674
|
+
with float16, float32, float64, int32, int64 data type.
|
|
1675
|
+
|
|
1676
|
+
Outputs:
|
|
1677
|
+
- **y** (Tensor) - Has the same type as the `input`. Has the shape :math:`(N, C, H, W)`.
|
|
1678
|
+
- **argmax** (Tensor) - The indices along with the outputs, which is a Tensor, with the same shape as the
|
|
1679
|
+
`y` and int64 data type. It will be returned only when `return_indices` is True.
|
|
1680
|
+
|
|
1681
|
+
Raises:
|
|
1682
|
+
TypeError: If data type of `input` is not one of the following: float16, float32, float64, int32, int64.
|
|
1683
|
+
TypeError: If data type of `_random_samples` is not one of the following: float16, float32, float64.
|
|
1684
|
+
ValueError: If `kernel_size` is not a number and `kernel_size` is not a tuple of length 2.
|
|
1685
|
+
ValueError: If `output_size` is not a number and `output_size` is not a tuple of length 2.
|
|
1686
|
+
ValueError: If the sum of `kernel_size` , `output_size` and -1 is larger than the corresponding
|
|
1687
|
+
dimension of `input`.
|
|
1688
|
+
ValueError: If the dimension of `_random_samples` is not 3.
|
|
1689
|
+
ValueError: if `output_size` and `output_ratio` are None at the same time.
|
|
1690
|
+
ValueError: If the first dimension size of `input` and `_random_samples` is not equal.
|
|
1691
|
+
ValueError: If the second dimension size of `input` and `_random_samples` is not equal.
|
|
1692
|
+
ValueError: If the third dimension size of `_random_samples` is not 2.
|
|
1693
|
+
|
|
1694
|
+
Supported Platforms:
|
|
1695
|
+
``CPU``
|
|
1696
|
+
|
|
1697
|
+
Examples:
|
|
1698
|
+
>>> # the kernel_size is an int number and the output_size is a tuple.
|
|
1699
|
+
>>> import numpy as np
|
|
1700
|
+
>>> from mindspore import nn
|
|
1701
|
+
>>> from mindspore import Tensor
|
|
1702
|
+
>>> import mindspore.common.dtype as mstype
|
|
1703
|
+
>>> input = Tensor(np.array([0.3220, 0.9545, 0.7879, 0.0975, 0.3698,
|
|
1704
|
+
... 0.5135, 0.5740, 0.3435, 0.1895, 0.8764,
|
|
1705
|
+
... 0.9581, 0.4760, 0.9014, 0.8522, 0.3664,
|
|
1706
|
+
... 0.4980, 0.9673, 0.9879, 0.6988, 0.9022,
|
|
1707
|
+
... 0.9304, 0.1558, 0.0153, 0.1559, 0.9852]).reshape([1, 1, 5, 5]), mstype.float32)
|
|
1708
|
+
>>> _random_samples = Tensor(np.array([[[0.8, 0.8]]]), mstype.float32)
|
|
1709
|
+
>>> net = nn.FractionalMaxPool2d(kernel_size=2, output_size=(2, 2), _random_samples=_random_samples,
|
|
1710
|
+
... return_indices=True)
|
|
1711
|
+
>>> y, argmax = net(input)
|
|
1712
|
+
>>> y
|
|
1713
|
+
[[[[0.9545 0.8764]
|
|
1714
|
+
[0.9673 0.9852]]]]
|
|
1715
|
+
>>> argmax
|
|
1716
|
+
[[[[ 1 9]
|
|
1717
|
+
[16 24]]]]
|
|
1718
|
+
>>> net = nn.FractionalMaxPool2d(kernel_size=2, output_ratio=(0.5, 0.5), _random_samples=_random_samples,
|
|
1719
|
+
... return_indices=True)
|
|
1720
|
+
>>> y, argmax = net(input)
|
|
1721
|
+
>>> print(y)
|
|
1722
|
+
[[[[0.9545 0.8764]
|
|
1723
|
+
[0.9673 0.9852]]]]
|
|
1724
|
+
>>> print(argmax)
|
|
1725
|
+
[[[[ 1 9]
|
|
1726
|
+
[16 24]]]]
|
|
1727
|
+
"""
|
|
1728
|
+
|
|
1729
|
+
def __init__(self, kernel_size, output_size=None, output_ratio=None, return_indices=False, _random_samples=None):
|
|
1730
|
+
"""Initialize FractionalMaxPool2d."""
|
|
1731
|
+
super(FractionalMaxPool2d, self).__init__()
|
|
1732
|
+
self.kernel_size = kernel_size
|
|
1733
|
+
self.output_size = output_size
|
|
1734
|
+
self.output_ratio = output_ratio
|
|
1735
|
+
self.return_indices = return_indices
|
|
1736
|
+
self._random_samples = _random_samples
|
|
1737
|
+
|
|
1738
|
+
def construct(self, input):
|
|
1739
|
+
return ops.fractional_max_pool2d(input, self.kernel_size, self.output_size, self.output_ratio,
|
|
1740
|
+
self.return_indices, self._random_samples)
|
|
1741
|
+
|
|
1742
|
+
|
|
1743
|
+
class FractionalMaxPool3d(Cell):
|
|
1744
|
+
r"""
|
|
1745
|
+
Applies the 3D FractionalMaxPool operatin over `input`. The output Tensor shape can be determined by either
|
|
1746
|
+
`output_size` or `output_ratio`, and the step size is determined by `_random_samples`.
|
|
1747
|
+
`output_size` or `output_ratio` cannot be used or set to None at the same time.
|
|
1748
|
+
|
|
1749
|
+
Refer to the paper `Fractional MaxPooling by Ben Graham <https://arxiv.org/abs/1412.6071>`_ for more details.
|
|
1750
|
+
|
|
1751
|
+
The input and output data format can be "NCDHW". N is the batch size, C is the number of channels,
|
|
1752
|
+
D the feature depth, H is the feature height, and W is the feature width.
|
|
1753
|
+
|
|
1754
|
+
Args:
|
|
1755
|
+
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value, is a positive int
|
|
1756
|
+
that represents depth, height and width of the kernel, or a tuple of three positive integers that represent
|
|
1757
|
+
depth, height and width respectively.
|
|
1758
|
+
output_size (Union[int, tuple[int]], optional): The shape of the target `output_size`,
|
|
1759
|
+
is an int number that represents depth, height and width, or a tuple of three positive integers that
|
|
1760
|
+
represents depth, height and width respectively. If None, the shape of the target will be determined by
|
|
1761
|
+
`output_ratio`. Default: None.
|
|
1762
|
+
output_ratio (Union[float, tuple[float]], optional): The ratio of target output shape to input shape.
|
|
1763
|
+
Specifying the size of the output tensor by using a ratio of the input size.
|
|
1764
|
+
Data type : float16, float32, float64, and value is between (0, 1). If None, the shape of the target will be
|
|
1765
|
+
determined by `output_size`.Default: None.
|
|
1766
|
+
return_indices (bool, optional): Whether to return the indices of max value. Default: False.
|
|
1767
|
+
_random_samples (Tensor, optional): The random step of FractionalMaxPool2d, a Tensor of shape :math:`(N, C, 3)`
|
|
1768
|
+
whose elements are within the range of :math:`(0, 1)`. Supported data type : float16, float32, float64.
|
|
1769
|
+
If None, no random step will be set. Default: None.
|
|
1770
|
+
|
|
1771
|
+
Inputs:
|
|
1772
|
+
- **input** (Tensor) - The input of FractionalMaxPool3d, which is a 4D or 5D tensor.
|
|
1773
|
+
Tensor of data type : float16, float32, float64, int32, int64.
|
|
1774
|
+
Supported shape :math:`(N, C, D_{in}, H_{in}, W_{in})` .
|
|
1775
|
+
|
|
1776
|
+
Outputs:
|
|
1777
|
+
- **y** (Tensor) - A tensor, the output of FractionalMaxPool3d.
|
|
1778
|
+
Has the same data type with `imput_x`.
|
|
1779
|
+
Tensor of shape :math:`(N, C, D, H, W)` .
|
|
1780
|
+
|
|
1781
|
+
- **argmax** (Tensor) - The indices along with the outputs, which is a Tensor, with the same shape as the
|
|
1782
|
+
`y` and int32 data type. It will output only when `return_indices` is True.
|
|
1783
|
+
|
|
1784
|
+
Raises:
|
|
1785
|
+
TypeError: If `input` is not a 4D or 5D tensor.
|
|
1786
|
+
TypeError: If `_random_samples` is not a 3D tensor.
|
|
1787
|
+
TypeError: If data type of `imput_x` is not float16, float32, float64, int32, int64.
|
|
1788
|
+
TypeError: If dtype of `_random_samples` is not float16, float32, float64.
|
|
1789
|
+
TypeError: If dtype of `argmax` is not int32, int64.
|
|
1790
|
+
ValueError: If `output_size` is a tuple and if `output_size` length is not 3.
|
|
1791
|
+
ValueError: If `kernel_size` is a tuple and if `kernel_size` length is not 3.
|
|
1792
|
+
ValueError: If numbers in `output_size` or `kernel_size` is not positive.
|
|
1793
|
+
ValueError: if `output_size` and `output_ratio` are None at the same time.
|
|
1794
|
+
ValueError: If the first dimension size of `input` and `_random_samples` is not equal.
|
|
1795
|
+
ValueError: If the second dimension size of `input` and `_random_samples` is not equal.
|
|
1796
|
+
ValueError: If the third dimension size of `_random_samples` is not 3.
|
|
1797
|
+
|
|
1798
|
+
Supported Platforms:
|
|
1799
|
+
``GPU`` ``CPU``
|
|
1800
|
+
|
|
1801
|
+
Examples:
|
|
1802
|
+
>>> import numpy as np
|
|
1803
|
+
>>> from mindspore import nn
|
|
1804
|
+
>>> from mindspore import Tensor
|
|
1805
|
+
>>> import mindspore.common.dtype as mstype
|
|
1806
|
+
>>> x = Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])
|
|
1807
|
+
... .reshape([1, 1, 2, 2, 4]), mstype.float32)
|
|
1808
|
+
>>> _random_samples = Tensor(np.array([0.7, 0.7, 0.7]).reshape([1, 1, 3]), mstype.float32)
|
|
1809
|
+
>>> net = nn.FractionalMaxPool3d(kernel_size=(1, 1, 1), output_size=(1, 1, 3),
|
|
1810
|
+
... _random_samples=_random_samples, return_indices=True)
|
|
1811
|
+
>>> output, argmax = net(x)
|
|
1812
|
+
>>> print(output)
|
|
1813
|
+
[[[[[13. 14. 16.]]]]]
|
|
1814
|
+
>>> print(argmax)
|
|
1815
|
+
[[[[[12 13 15]]]]]
|
|
1816
|
+
>>> net = nn.FractionalMaxPool3d(kernel_size=(1, 1, 1), output_ratio=(0.5, 0.5, 0.5),
|
|
1817
|
+
... _random_samples=_random_samples, return_indices=True)
|
|
1818
|
+
>>> output, argmax = net(x)
|
|
1819
|
+
>>> print(output)
|
|
1820
|
+
[[[[[13. 16.]]]]]
|
|
1821
|
+
>>> print(argmax)
|
|
1822
|
+
[[[[[12 15]]]]]
|
|
1823
|
+
"""
|
|
1824
|
+
|
|
1825
|
+
def __init__(self, kernel_size, output_size=None, output_ratio=None, return_indices=False, _random_samples=None):
|
|
1826
|
+
"""Initialize FractionalMaxPool3d."""
|
|
1827
|
+
super(FractionalMaxPool3d, self).__init__()
|
|
1828
|
+
self.kernel_size = kernel_size
|
|
1829
|
+
self.output_size = output_size
|
|
1830
|
+
self.output_ratio = output_ratio
|
|
1831
|
+
self.return_indices = return_indices
|
|
1832
|
+
self._random_samples = _random_samples
|
|
1833
|
+
|
|
1834
|
+
def construct(self, input):
|
|
1835
|
+
return ops.fractional_max_pool3d(input, self.kernel_size, self.output_size, self.output_ratio,
|
|
1836
|
+
self.return_indices, self._random_samples)
|
|
1837
|
+
|
|
1838
|
+
|
|
1839
|
+
class MaxUnpool1d(Cell):
|
|
1840
|
+
r"""
|
|
1841
|
+
Computes the inverse of :class:`mindspore.nn.MaxPool1d`.
|
|
1842
|
+
|
|
1843
|
+
MaxUnpool1d keeps the maximal value and set all position of non-maximal values to zero. Typically the input
|
|
1844
|
+
is of shape :math:`(N, C, H_{in})` or :math:`(C, H_{in})`, and the output is of shape
|
|
1845
|
+
:math:`(N, C, H_{out})` or :math:`(C, H_{out})`. The operation is as follows.
|
|
1846
|
+
|
|
1847
|
+
|
|
1848
|
+
.. math::
|
|
1849
|
+
\begin{array}{ll} \\
|
|
1850
|
+
H_{out} = (H{in} - 1) \times stride[0] - 2 \times padding[0] + kernel\_size[0] \\
|
|
1851
|
+
\end{array}
|
|
1852
|
+
|
|
1853
|
+
Args:
|
|
1854
|
+
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value.
|
|
1855
|
+
stride (Union[int, tuple[int]]): The distance of kernel moving,
|
|
1856
|
+
If stride is None, then stride equal to kernel_size. Default: None.
|
|
1857
|
+
padding (Union[int, tuple[int]]): The pad value to be filled. Default: 0.
|
|
1858
|
+
|
|
1859
|
+
Inputs:
|
|
1860
|
+
- **x** (Tensor) - The input Tensor to invert.
|
|
1861
|
+
Tensor of shape :math:`(N, C, H_{in})` or :math:`(C, H_{in})`.
|
|
1862
|
+
- **indices** (Tensor) - Max values' index represented by the indices.
|
|
1863
|
+
Tensor of shape must be same with input 'x'.
|
|
1864
|
+
Values of indices must belong to :math:`[0, H_{in} - 1]`.
|
|
1865
|
+
Data type must be in int32 or int64.
|
|
1866
|
+
- **output_size** (tuple[int], optional) - The output size. Default: None.
|
|
1867
|
+
If output_size == (), then the shape of output computed by kernel_size, stride and padding.
|
|
1868
|
+
If output_size != (), then output_size must be :math:`(N, C, H)` , :math:`(C, H)` or
|
|
1869
|
+
:math:`(H)` and output_size must belong to
|
|
1870
|
+
:math:`[(N, C, H_{out} - stride[0]), (N, C, H_{out} + stride[0])]`.
|
|
1871
|
+
|
|
1872
|
+
Outputs:
|
|
1873
|
+
Tensor, with shape :math:`(N, C, H_{out})` or :math:`(C, H_{out})`,
|
|
1874
|
+
with the same data type with `x`.
|
|
1875
|
+
|
|
1876
|
+
Raises:
|
|
1877
|
+
TypeError: If data type of `x` or `indices` is not supported.
|
|
1878
|
+
TypeError: If `kernel_size`, `stride` or `padding` is neither an int nor a tuple.
|
|
1879
|
+
ValueError: If numbers in `stride`, `padding` (also support 0 and (0)) or `kernel_size` is not positive.
|
|
1880
|
+
ValueError: If the shapes of `x` and `indices` are not equal.
|
|
1881
|
+
ValueError: If `x` whose length is not 2 or 3.
|
|
1882
|
+
ValueError: If type of `output_size` is not tuple.
|
|
1883
|
+
ValueError: If `output_size` whose length is not 0, 2 or 3.
|
|
1884
|
+
ValueError: If `output_size` is not close to output size computed by attr `kernel_size`, `stride`, `padding`.
|
|
1885
|
+
|
|
1886
|
+
Supported Platforms:
|
|
1887
|
+
``GPU`` ``CPU``
|
|
1888
|
+
|
|
1889
|
+
Examples:
|
|
1890
|
+
>>> x = Tensor(np.array([[2, 4, 6, 8]]).astype(np.float32))
|
|
1891
|
+
>>> indices = Tensor(np.array([[1, 3, 5, 7]]).astype(np.int64))
|
|
1892
|
+
>>> maxunpool1d = nn.MaxUnpool1d(kernel_size =2, stride=2, padding=0)
|
|
1893
|
+
>>> output = maxunpool1d(x, indices)
|
|
1894
|
+
>>> print(output.asnumpy())
|
|
1895
|
+
[[0. 2. 0. 4. 0. 6. 0. 8.]]
|
|
1896
|
+
"""
|
|
1897
|
+
|
|
1898
|
+
def __init__(self, kernel_size, stride=None, padding=0):
|
|
1899
|
+
"""Initialize MaxUnpool1d."""
|
|
1900
|
+
super(MaxUnpool1d, self).__init__()
|
|
1901
|
+
if stride is None:
|
|
1902
|
+
stride = kernel_size
|
|
1903
|
+
self.kernel_size = kernel_size
|
|
1904
|
+
self.stride = stride
|
|
1905
|
+
self.padding = padding
|
|
1906
|
+
|
|
1907
|
+
def construct(self, x, indices, output_size=None):
|
|
1908
|
+
if output_size is None:
|
|
1909
|
+
output_size = ()
|
|
1910
|
+
else:
|
|
1911
|
+
if not isinstance(output_size, tuple):
|
|
1912
|
+
raise ValueError(f"For MaxUnpool1d, output_size must be tuple, but type {type(output_size)}.")
|
|
1913
|
+
out = ops.max_unpool1d(x, indices, self.kernel_size, stride=self.stride, padding=self.padding,
|
|
1914
|
+
output_size=output_size)
|
|
1915
|
+
return out
|
|
1916
|
+
|
|
1917
|
+
|
|
1918
|
+
class MaxUnpool2d(Cell):
|
|
1919
|
+
r"""
|
|
1920
|
+
Computes the inverse of :class:`mindspore.nn.MaxPool2d`.
|
|
1921
|
+
|
|
1922
|
+
MaxUnpool2d keeps the maximal value and set all position of non-maximal values to zero. Typically the input
|
|
1923
|
+
is of shape :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`, and the output is of
|
|
1924
|
+
shape :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`. The operation is as follows.
|
|
1925
|
+
|
|
1926
|
+
.. math::
|
|
1927
|
+
\begin{array}{ll} \\
|
|
1928
|
+
H_{out} = (H{in} - 1) \times stride[0] - 2 \times padding[0] + kernel\_size[0] \\
|
|
1929
|
+
W_{out} = (W{in} - 1) \times stride[1] - 2 \times padding[1] + kernel\_size[1] \\
|
|
1930
|
+
\end{array}
|
|
1931
|
+
|
|
1932
|
+
Args:
|
|
1933
|
+
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
|
|
1934
|
+
an int number that represents height and width of the kernel, or a tuple
|
|
1935
|
+
of two int numbers that represent height and width respectively.
|
|
1936
|
+
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
|
|
1937
|
+
the height and width of movement are both stride, or a tuple of two int numbers that
|
|
1938
|
+
represent height and width of movement respectively.
|
|
1939
|
+
If stride is None, then stride equal to kernel_size. Default: None.
|
|
1940
|
+
padding (Union[int, tuple[int]]): The pad value to be filled. Default: 0. If `padding` is an integer,
|
|
1941
|
+
the paddings of height and width are the same, equal to padding. If `padding` is a tuple of two
|
|
1942
|
+
integers, the padding of height and width equal to padding[0] and padding[1] correspondingly.
|
|
1943
|
+
|
|
1944
|
+
Inputs:
|
|
1945
|
+
- **x** (Tensor) - The input Tensor to invert.
|
|
1946
|
+
Tensor of shape :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
|
|
1947
|
+
- **indices** (Tensor) - Max values' index represented by the indices.
|
|
1948
|
+
Tensor of shape must be same with input 'x'.
|
|
1949
|
+
Values of indices must belong to :math:`[0, H_{in} \times W_{in} - 1]`.
|
|
1950
|
+
Data type must be in int32 or int64.
|
|
1951
|
+
- **output_size** (tuple[int], optional) - The output size. Default: None.
|
|
1952
|
+
If output_size == (), then the shape of output computed by kernel_size, stride and padding.
|
|
1953
|
+
If output_size != (), then output_size must be :math:`(N, C, H, W)`, :math:`(C, H, W)` or
|
|
1954
|
+
:math:`(H, W)` and output_size must belong to
|
|
1955
|
+
:math:`[(N, C, H_{out} - stride[0], W_{out} - stride[1]), (N, C, H_{out} + stride[0], W_{out} + stride[1])]`.
|
|
1956
|
+
|
|
1957
|
+
Outputs:
|
|
1958
|
+
Tensor, with shape :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`,
|
|
1959
|
+
with the same data type with `x`.
|
|
1960
|
+
|
|
1961
|
+
Raises:
|
|
1962
|
+
TypeError: If data type of `x` or `indices` is not supported.
|
|
1963
|
+
TypeError: If `kernel_size`, `stride` or `padding` is neither an int nor a tuple.
|
|
1964
|
+
ValueError: If numbers in `stride`, `padding` (also support 0 and (0, 0)) or `kernel_size` is not positive.
|
|
1965
|
+
ValueError: If the shape of `x` and `indices` are not equal.
|
|
1966
|
+
ValueError: If `kernel_size`, `stride` or `padding` is a tuple whose length is not equal to 2.
|
|
1967
|
+
ValueError: If `x` whose length is not 3 or 4.
|
|
1968
|
+
ValueError: If `output_size` whose type is not tuple.
|
|
1969
|
+
ValueError: If `output_size` whose length is not 0, 3 or 4.
|
|
1970
|
+
ValueError: If `output_size` is not close to output size computed by attr `kernel_size`, `stride`, `padding`.
|
|
1971
|
+
|
|
1972
|
+
Supported Platforms:
|
|
1973
|
+
``GPU`` ``CPU``
|
|
1974
|
+
|
|
1975
|
+
Examples:
|
|
1976
|
+
>>> x = Tensor(np.array([[[[0, 1], [8, 9]]]]).astype(np.float32))
|
|
1977
|
+
>>> indices = Tensor(np.array([[[[0, 1], [2, 3]]]]).astype(np.int64))
|
|
1978
|
+
>>> maxunpool2d = nn.MaxUnpool2d(kernel_size=1, stride=1, padding=0)
|
|
1979
|
+
>>> output = maxunpool2d(x, indices)
|
|
1980
|
+
>>> print(output.asnumpy())
|
|
1981
|
+
[[[[0. 1.]
|
|
1982
|
+
[8. 9.]]]]
|
|
1983
|
+
"""
|
|
1984
|
+
|
|
1985
|
+
def __init__(self, kernel_size, stride=None, padding=0):
|
|
1986
|
+
"""Initialize MaxUnpool2d."""
|
|
1987
|
+
super(MaxUnpool2d, self).__init__()
|
|
1988
|
+
if stride is None:
|
|
1989
|
+
stride = kernel_size
|
|
1990
|
+
self.kernel_size = kernel_size
|
|
1991
|
+
self.stride = stride
|
|
1992
|
+
self.padding = padding
|
|
1993
|
+
|
|
1994
|
+
def construct(self, x, indices, output_size=None):
|
|
1995
|
+
if output_size is None:
|
|
1996
|
+
output_size = ()
|
|
1997
|
+
else:
|
|
1998
|
+
if not isinstance(output_size, tuple):
|
|
1999
|
+
raise ValueError(f"For MaxUnpool2d, output_size must be tuple, but type {type(output_size)}.")
|
|
2000
|
+
out = ops.max_unpool2d(x, indices, self.kernel_size, stride=self.stride, padding=self.padding,
|
|
2001
|
+
output_size=output_size)
|
|
2002
|
+
return out
|
|
2003
|
+
|
|
2004
|
+
|
|
2005
|
+
class MaxUnpool3d(Cell):
|
|
2006
|
+
r"""
|
|
2007
|
+
Computes the inverse of :class:`mindspore.nn.MaxPool3d`.
|
|
2008
|
+
|
|
2009
|
+
MaxUnpool3d keeps the maximal value and set all position of non-maximal values to zero.
|
|
2010
|
+
Typically the input is of shape :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`,
|
|
2011
|
+
and the output is of shape :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`.
|
|
2012
|
+
The operation is as follows.
|
|
2013
|
+
|
|
2014
|
+
.. math::
|
|
2015
|
+
\begin{array}{ll} \\
|
|
2016
|
+
D_{out} = (D{in} - 1) \times stride[0] - 2 \times padding[0] + kernel\_size[0] \\
|
|
2017
|
+
H_{out} = (H{in} - 1) \times stride[1] - 2 \times padding[1] + kernel\_size[1] \\
|
|
2018
|
+
W_{out} = (W{in} - 1) \times stride[2] - 2 \times padding[2] + kernel\_size[2] \\
|
|
2019
|
+
\end{array}
|
|
2020
|
+
|
|
2021
|
+
Args:
|
|
2022
|
+
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
|
|
2023
|
+
an int number that represents depth, height and width of the kernel, or a tuple
|
|
2024
|
+
of three int numbers that represent depth, height and width respectively.
|
|
2025
|
+
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
|
|
2026
|
+
the depth, height and width of movement are both stride, or a tuple of three int numbers that
|
|
2027
|
+
represent depth, height and width of movement respectively.
|
|
2028
|
+
If stride is None, then stride equal to kernel_size. Default: None.
|
|
2029
|
+
padding (Union[int, tuple[int]]): The pad value to be filled. Default: 0. If `padding` is an integer,
|
|
2030
|
+
the paddings of depth, height and width are the same, equal to padding. If `padding` is a tuple of three
|
|
2031
|
+
integers, the padding of depth, height and width equal to padding[0], padding[1] and padding[2]
|
|
2032
|
+
correspondingly.
|
|
2033
|
+
|
|
2034
|
+
Inputs:
|
|
2035
|
+
- **x** (Tensor) - The input Tensor to invert.
|
|
2036
|
+
Tensor of shape :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
|
|
2037
|
+
- **indices** (Tensor) - Max values' index represented by the indices.
|
|
2038
|
+
Tensor of shape must be same with input 'x'.
|
|
2039
|
+
Values of indices must belong to :math:`[0, D_{in} \times H_{in} \times W_{in} - 1]`.
|
|
2040
|
+
Data type must be in int32 or int64.
|
|
2041
|
+
- **output_size** (tuple[int], optional) - The output size. Default: None.
|
|
2042
|
+
If output_size == (), then the shape of output computed by kernel_size, stride and padding.
|
|
2043
|
+
If output_size != (), then output_size must be :math:`(N, C, D, H, W)` , :math:`(C, D, H, W)` or
|
|
2044
|
+
:math:`(D, H, W)` and output_size must belong to
|
|
2045
|
+
:math:`[(N, C, D_{out} - stride[0], H_{out} - stride[1], W_{out} - stride[2]),
|
|
2046
|
+
(N, C, D_{out} + stride[0], H_{out} + stride[1], W_{out} + stride[2])]`.
|
|
2047
|
+
|
|
2048
|
+
Outputs:
|
|
2049
|
+
Tensor, with shape :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
|
|
2050
|
+
with the same data type with `x`.
|
|
2051
|
+
|
|
2052
|
+
Raises:
|
|
2053
|
+
TypeError: If data type of `x` or `indices` is not supported.
|
|
2054
|
+
TypeError: If `kernel_size`, `stride` or `padding` is neither an int nor a tuple.
|
|
2055
|
+
ValueError: If numbers in `stride` or `padding` (also support 0 and (0, 0, 0)) or `kernel_size` is not positive.
|
|
2056
|
+
ValueError: If the shape of `x` and `indices` are not equal.
|
|
2057
|
+
ValueError: If `kernel_size`, `stride` or `padding` is a tuple whose length is not equal to 3.
|
|
2058
|
+
ValueError: If `x` whose length is not 4 or 5.
|
|
2059
|
+
ValueError: If `output_size` whose length is not 0, 4 or 5.
|
|
2060
|
+
ValueError: If `output_size` whose type is not tuple.
|
|
2061
|
+
ValueError: If `output_size` is not close to output size computed by attr `kernel_size`, `stride`, `padding`.
|
|
2062
|
+
|
|
2063
|
+
Supported Platforms:
|
|
2064
|
+
``GPU`` ``CPU``
|
|
2065
|
+
|
|
2066
|
+
Examples:
|
|
2067
|
+
>>> x = Tensor(np.array([[[[[0, 1], [8, 9]]]]]).astype(np.float32))
|
|
2068
|
+
>>> indices= Tensor(np.array([[[[[0, 1], [2, 3]]]]]).astype(np.int64))
|
|
2069
|
+
>>> maxunpool3d = nn.MaxUnpool3d(kernel_size=1, stride=1, padding=0)
|
|
2070
|
+
>>> output = maxunpool3d(x, indices)
|
|
2071
|
+
>>> print(output.asnumpy())
|
|
2072
|
+
[[[[[0. 1.]
|
|
2073
|
+
[8. 9.]]]]]
|
|
2074
|
+
"""
|
|
2075
|
+
|
|
2076
|
+
def __init__(self, kernel_size, stride=None, padding=0):
|
|
2077
|
+
super(MaxUnpool3d, self).__init__()
|
|
2078
|
+
if stride is None:
|
|
2079
|
+
stride = kernel_size
|
|
2080
|
+
self.kernel_size = kernel_size
|
|
2081
|
+
self.stride = stride
|
|
2082
|
+
self.padding = padding
|
|
2083
|
+
|
|
2084
|
+
def construct(self, x, indices, output_size=None):
|
|
2085
|
+
if output_size is None:
|
|
2086
|
+
output_size = ()
|
|
2087
|
+
else:
|
|
2088
|
+
if not isinstance(output_size, tuple):
|
|
2089
|
+
raise ValueError(f"For MaxUnpool3d, output_size must be tuple, but type {type(output_size)}.")
|
|
2090
|
+
out = ops.max_unpool3d(x, indices, self.kernel_size, stride=self.stride, padding=self.padding,
|
|
2091
|
+
output_size=output_size)
|
|
2092
|
+
return out
|