mindspore 2.0.0rc1__cp38-none-any.whl → 2.2.0__cp38-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +2 -2
- mindspore/__init__.py +5 -2
- mindspore/_akg/akg/build_module.py +5 -6
- mindspore/_akg/akg/composite/build_module.py +49 -16
- mindspore/_akg/akg/composite/split_stitch.py +10 -11
- mindspore/_akg/akg/config/repository.json +195 -0
- mindspore/_akg/akg/global_configs.py +5 -1
- mindspore/_akg/akg/ms/info_version_adapt.py +67 -1
- mindspore/_akg/akg/tvm/api.py +4 -3
- mindspore/_akg/akg/tvm/autotvm/__init__.py +1 -2
- mindspore/_akg/akg/tvm/autotvm/graph_tuner/base_graph_tuner.py +1 -5
- mindspore/_akg/akg/tvm/autotvm/measure/__init__.py +1 -1
- mindspore/_akg/akg/tvm/autotvm/measure/measure.py +1 -10
- mindspore/_akg/akg/tvm/autotvm/measure/measure_methods.py +1 -372
- mindspore/_akg/akg/tvm/build_module.py +16 -1
- mindspore/_akg/akg/tvm/contrib/graph_runtime.py +0 -53
- mindspore/_akg/akg/tvm/hybrid/parser.py +7 -6
- mindspore/_akg/akg/tvm/ir_builder.py +1 -1
- mindspore/_akg/akg/tvm/module.py +1 -2
- mindspore/_akg/akg/tvm/stmt.py +2 -2
- mindspore/_akg/akg/utils/composite_op_helper.py +9 -10
- mindspore/_akg/akg/utils/kernel_exec.py +58 -260
- mindspore/_akg/akg/utils/op_dsl.py +17 -1
- mindspore/_akg/akg/utils/result_analysis.py +4 -24
- mindspore/_akg/akg/utils/tbe_codegen_utils.py +198 -0
- mindspore/_c_dataengine.cpython-38-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-38-aarch64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-38-aarch64-linux-gnu.so +0 -0
- mindspore/_check_jit_forbidden_api.py +5 -1
- mindspore/_checkparam.py +79 -62
- mindspore/_extends/graph_kernel/__init__.py +0 -1
- mindspore/_extends/graph_kernel/model/graph_split.py +2 -0
- mindspore/_extends/graph_kernel/model/model_builder.py +9 -50
- mindspore/_extends/graph_kernel/splitter.py +1 -9
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +128 -21
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +2 -2
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +4 -2
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +18 -13
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +13 -9
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +1 -1
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -1
- mindspore/_extends/parse/__init__.py +19 -17
- mindspore/_extends/parse/namespace.py +7 -36
- mindspore/_extends/parse/parser.py +375 -189
- mindspore/_extends/parse/resources.py +36 -41
- mindspore/_extends/parse/standard_method.py +350 -245
- mindspore/_extends/parse/trope.py +2 -12
- mindspore/_extends/remote/kernel_build_server.py +24 -7
- mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
- mindspore/_install_custom.py +43 -0
- mindspore/_mindspore_offline_debug.cpython-38-aarch64-linux-gnu.so +0 -0
- mindspore/amp.py +85 -19
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/base.py +2 -2
- mindspore/boost/boost.py +27 -32
- mindspore/boost/boost_cell_wrapper.py +37 -13
- mindspore/boost/grad_accumulation.py +1 -1
- mindspore/boost/grad_freeze.py +34 -6
- mindspore/boost/group_loss_scale_manager.py +15 -14
- mindspore/boost/less_batch_normalization.py +28 -3
- mindspore/common/__init__.py +15 -11
- mindspore/common/_auto_dynamic.py +68 -0
- mindspore/common/_jit_fallback_utils.py +111 -0
- mindspore/common/_register_for_adapter.py +17 -5
- mindspore/common/_register_for_tensor.py +2 -2
- mindspore/common/_stub_tensor.py +18 -15
- mindspore/common/_utils.py +31 -7
- mindspore/common/api.py +269 -101
- mindspore/common/auto_dynamic_shape.py +498 -0
- mindspore/common/dtype.py +61 -21
- mindspore/common/dump.py +9 -7
- mindspore/common/initializer.py +106 -76
- mindspore/common/jit_config.py +35 -14
- mindspore/common/lazy_inline.py +187 -0
- mindspore/common/mindir_util.py +101 -0
- mindspore/common/mutable.py +10 -13
- mindspore/common/parameter.py +246 -55
- mindspore/common/seed.py +13 -7
- mindspore/common/sparse_tensor.py +29 -33
- mindspore/common/tensor.py +907 -251
- mindspore/communication/__init__.py +7 -4
- mindspore/communication/_comm_helper.py +84 -4
- mindspore/communication/management.py +160 -88
- mindspore/config/op_info.config +99 -75
- mindspore/config/super_bar_config.json +36 -4
- mindspore/context.py +526 -219
- mindspore/dataset/__init__.py +9 -46
- mindspore/dataset/audio/__init__.py +4 -19
- mindspore/dataset/audio/transforms.py +545 -233
- mindspore/dataset/audio/utils.py +21 -18
- mindspore/dataset/callback/ds_callback.py +42 -13
- mindspore/dataset/core/config.py +158 -100
- mindspore/dataset/core/validator_helpers.py +1 -63
- mindspore/dataset/debug/debug_hook.py +45 -13
- mindspore/dataset/debug/pre_defined_hook.py +5 -5
- mindspore/dataset/engine/__init__.py +0 -5
- mindspore/dataset/engine/cache_client.py +38 -15
- mindspore/dataset/engine/datasets.py +615 -278
- mindspore/dataset/engine/datasets_audio.py +154 -283
- mindspore/dataset/engine/datasets_standard_format.py +104 -116
- mindspore/dataset/engine/datasets_text.py +443 -326
- mindspore/dataset/engine/datasets_user_defined.py +251 -164
- mindspore/dataset/engine/datasets_vision.py +839 -1443
- mindspore/dataset/engine/iterators.py +11 -4
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +7 -3
- mindspore/dataset/engine/obs/util.py +3 -0
- mindspore/dataset/engine/offload.py +6 -6
- mindspore/dataset/engine/queue.py +15 -14
- mindspore/dataset/engine/samplers.py +39 -23
- mindspore/dataset/engine/serializer_deserializer.py +22 -6
- mindspore/dataset/engine/validators.py +21 -331
- mindspore/dataset/text/__init__.py +5 -33
- mindspore/dataset/text/transforms.py +334 -165
- mindspore/dataset/text/utils.py +215 -145
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/c_transforms.py +3 -2
- mindspore/dataset/transforms/py_transforms_util.py +40 -12
- mindspore/dataset/transforms/transforms.py +174 -71
- mindspore/dataset/utils/browse_dataset.py +25 -17
- mindspore/dataset/utils/line_reader.py +24 -21
- mindspore/dataset/vision/__init__.py +5 -26
- mindspore/dataset/vision/c_transforms.py +177 -165
- mindspore/dataset/vision/py_transforms.py +114 -119
- mindspore/dataset/vision/py_transforms_util.py +54 -51
- mindspore/dataset/vision/transforms.py +1127 -381
- mindspore/dataset/vision/utils.py +54 -38
- mindspore/dataset/vision/validators.py +12 -2
- mindspore/experimental/map_parameter.py +38 -4
- mindspore/{dataset/datapreprocess → experimental/optim}/__init__.py +14 -4
- mindspore/experimental/optim/adam.py +192 -0
- mindspore/experimental/optim/adamw.py +181 -0
- mindspore/experimental/optim/lr_scheduler.py +1427 -0
- mindspore/experimental/optim/optimizer.py +252 -0
- mindspore/experimental/optim/sgd.py +147 -0
- mindspore/gen_ops.py +273 -0
- mindspore/include/OWNERS +1 -2
- mindspore/include/api/context.h +21 -1
- mindspore/include/api/data_type.h +2 -1
- mindspore/include/api/graph.h +0 -15
- mindspore/include/api/kernel.h +2 -0
- mindspore/include/api/kernel_api.h +37 -12
- mindspore/include/api/model.h +29 -42
- mindspore/include/api/model_group.h +14 -3
- mindspore/include/api/model_parallel_runner.h +18 -2
- mindspore/include/api/serialization.h +26 -0
- mindspore/include/api/status.h +1 -0
- mindspore/include/api/types.h +38 -4
- mindspore/include/c_api/ms/abstract.h +67 -0
- mindspore/include/c_api/ms/attribute.h +197 -0
- mindspore/include/c_api/ms/base/handle_types.h +43 -0
- mindspore/include/c_api/ms/base/macros.h +32 -0
- mindspore/include/c_api/ms/base/status.h +33 -0
- mindspore/include/c_api/ms/base/types.h +282 -0
- mindspore/include/c_api/ms/context.h +102 -0
- mindspore/include/c_api/ms/graph.h +160 -0
- mindspore/include/c_api/ms/node.h +606 -0
- mindspore/include/c_api/ms/tensor.h +161 -0
- mindspore/include/c_api/ms/value.h +84 -0
- mindspore/include/c_api/status_c.h +3 -0
- mindspore/include/dataset/constants.h +6 -12
- mindspore/include/dataset/execute.h +23 -13
- mindspore/include/dataset/text.h +26 -26
- mindspore/include/dataset/transforms.h +25 -31
- mindspore/include/dataset/vision.h +60 -60
- mindspore/include/dataset/vision_ascend.h +5 -6
- mindspore/include/dataset/vision_lite.h +17 -17
- mindspore/include/mindapi/base/format.h +0 -1
- mindspore/include/mindapi/base/type_id.h +2 -1
- mindspore/include/mindapi/base/types.h +5 -1
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libjemalloc.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +9000 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/cpu/libakg.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/log.py +9 -6
- mindspore/mindrecord/filereader.py +33 -4
- mindspore/mindrecord/filewriter.py +70 -35
- mindspore/mindrecord/mindpage.py +40 -34
- mindspore/mindrecord/shardreader.py +1 -1
- mindspore/mindrecord/shardsegment.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +25 -18
- mindspore/mindrecord/tools/cifar10_to_mr.py +25 -18
- mindspore/mindrecord/tools/csv_to_mr.py +29 -13
- mindspore/mindrecord/tools/imagenet_to_mr.py +24 -10
- mindspore/mindrecord/tools/mnist_to_mr.py +24 -11
- mindspore/mindrecord/tools/tfrecord_to_mr.py +31 -26
- mindspore/nn/cell.py +463 -169
- mindspore/nn/dynamic_lr.py +47 -43
- mindspore/nn/layer/activation.py +225 -82
- mindspore/nn/layer/basic.py +121 -79
- mindspore/nn/layer/channel_shuffle.py +21 -21
- mindspore/nn/layer/combined.py +33 -26
- mindspore/nn/layer/container.py +277 -22
- mindspore/nn/layer/conv.py +441 -304
- mindspore/nn/layer/dense.py +19 -13
- mindspore/nn/layer/embedding.py +62 -49
- mindspore/nn/layer/flash_attention.py +264 -0
- mindspore/nn/layer/image.py +50 -39
- mindspore/nn/layer/math.py +62 -51
- mindspore/nn/layer/normalization.py +219 -167
- mindspore/nn/layer/padding.py +58 -70
- mindspore/nn/layer/pooling.py +334 -287
- mindspore/nn/layer/rnn_cells.py +53 -38
- mindspore/nn/layer/rnns.py +59 -56
- mindspore/nn/layer/thor_layer.py +52 -44
- mindspore/nn/layer/timedistributed.py +6 -4
- mindspore/nn/layer/transformer.py +284 -164
- mindspore/nn/learning_rate_schedule.py +34 -25
- mindspore/nn/loss/__init__.py +3 -2
- mindspore/nn/loss/loss.py +554 -311
- mindspore/nn/optim/ada_grad.py +12 -9
- mindspore/nn/optim/adadelta.py +14 -11
- mindspore/nn/optim/adafactor.py +19 -16
- mindspore/nn/optim/adam.py +62 -47
- mindspore/nn/optim/adamax.py +13 -10
- mindspore/nn/optim/adasum.py +12 -8
- mindspore/nn/optim/asgd.py +10 -9
- mindspore/nn/optim/ftrl.py +20 -17
- mindspore/nn/optim/lamb.py +16 -12
- mindspore/nn/optim/lars.py +8 -6
- mindspore/nn/optim/lazyadam.py +25 -20
- mindspore/nn/optim/momentum.py +10 -7
- mindspore/nn/optim/optimizer.py +61 -9
- mindspore/nn/optim/proximal_ada_grad.py +14 -13
- mindspore/nn/optim/rmsprop.py +17 -13
- mindspore/nn/optim/rprop.py +30 -17
- mindspore/nn/optim/sgd.py +40 -23
- mindspore/nn/optim/thor.py +24 -26
- mindspore/nn/probability/bijector/bijector.py +11 -11
- mindspore/nn/probability/bijector/exp.py +1 -1
- mindspore/nn/probability/bijector/gumbel_cdf.py +3 -3
- mindspore/nn/probability/bijector/invert.py +1 -1
- mindspore/nn/probability/bijector/power_transform.py +29 -29
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +5 -5
- mindspore/nn/probability/bnn_layers/bnn_cell_wrapper.py +4 -2
- mindspore/nn/probability/bnn_layers/conv_variational.py +13 -13
- mindspore/nn/probability/bnn_layers/dense_variational.py +12 -12
- mindspore/nn/probability/bnn_layers/layer_distribution.py +9 -8
- mindspore/nn/probability/distribution/_utils/custom_ops.py +19 -3
- mindspore/nn/probability/distribution/_utils/utils.py +1 -1
- mindspore/nn/probability/distribution/bernoulli.py +9 -9
- mindspore/nn/probability/distribution/beta.py +8 -8
- mindspore/nn/probability/distribution/categorical.py +23 -15
- mindspore/nn/probability/distribution/cauchy.py +5 -6
- mindspore/nn/probability/distribution/distribution.py +3 -3
- mindspore/nn/probability/distribution/exponential.py +4 -4
- mindspore/nn/probability/distribution/gamma.py +10 -10
- mindspore/nn/probability/distribution/geometric.py +8 -8
- mindspore/nn/probability/distribution/gumbel.py +8 -9
- mindspore/nn/probability/distribution/half_normal.py +5 -5
- mindspore/nn/probability/distribution/laplace.py +5 -5
- mindspore/nn/probability/distribution/log_normal.py +12 -11
- mindspore/nn/probability/distribution/logistic.py +8 -8
- mindspore/nn/probability/distribution/normal.py +6 -5
- mindspore/nn/probability/distribution/poisson.py +10 -11
- mindspore/nn/probability/distribution/student_t.py +8 -9
- mindspore/nn/probability/distribution/transformed_distribution.py +5 -5
- mindspore/nn/probability/distribution/uniform.py +11 -11
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +9 -9
- mindspore/nn/wrap/cell_wrapper.py +188 -63
- mindspore/nn/wrap/grad_reducer.py +21 -12
- mindspore/nn/wrap/loss_scale.py +136 -49
- mindspore/numpy/__init__.py +4 -4
- mindspore/numpy/array_creations.py +55 -56
- mindspore/numpy/array_ops.py +134 -35
- mindspore/numpy/logic_ops.py +66 -20
- mindspore/numpy/math_ops.py +142 -139
- mindspore/numpy/utils_const.py +2 -2
- mindspore/offline_debug/convert_async.py +2 -2
- mindspore/ops/_grad_experimental/__init__.py +7 -5
- mindspore/ops/_grad_experimental/grad_array_ops.py +231 -348
- mindspore/ops/{_grad → _grad_experimental}/grad_base.py +1 -33
- mindspore/ops/{_grad → _grad_experimental}/grad_comm_ops.py +25 -13
- mindspore/ops/{_grad/__init__.py → _grad_experimental/grad_debug_ops.py} +15 -7
- mindspore/ops/{_grad → _grad_experimental}/grad_implementations.py +17 -11
- mindspore/ops/_grad_experimental/grad_inner_ops.py +33 -52
- mindspore/ops/_grad_experimental/grad_math_ops.py +151 -1224
- mindspore/ops/_grad_experimental/grad_nn_ops.py +141 -414
- mindspore/ops/{_grad → _grad_experimental}/grad_quant_ops.py +10 -6
- mindspore/ops/_grad_experimental/grad_sparse.py +317 -2
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -13
- mindspore/ops/{_grad → _grad_experimental}/taylor_rule.py +1 -1
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/flash_attention/__init__.py +0 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/attention.py +406 -0
- mindspore/{_extends/graph_kernel/expanders/complex/__init__.py → ops/_op_impl/_custom_op/flash_attention/constants.py} +27 -8
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_bwd.py +467 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_fwd.py +563 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_impl.py +193 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tik_ops_utils.py +435 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/__init__.py +0 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/sparse_tiling.py +45 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/strategy.py +67 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/wukong_tiling.py +62 -0
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/aicpu/__init__.py +41 -1
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d.py +37 -0
- mindspore/ops/_op_impl/aicpu/bias_add_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/cast.py +52 -0
- mindspore/ops/_op_impl/aicpu/coalesce.py +2 -0
- mindspore/ops/_op_impl/aicpu/col2im.py +3 -1
- mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
- mindspore/ops/_op_impl/aicpu/dropout_genmask.py +6 -0
- mindspore/ops/_op_impl/aicpu/eps.py +32 -0
- mindspore/ops/_op_impl/aicpu/eye.py +4 -4
- mindspore/ops/_op_impl/aicpu/fft_with_size.py +6 -0
- mindspore/ops/_op_impl/aicpu/fill_diagonal.py +5 -0
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -2
- mindspore/ops/_op_impl/aicpu/im2col.py +3 -5
- mindspore/ops/_op_impl/aicpu/lgamma.py +1 -0
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +6 -3
- mindspore/ops/_op_impl/aicpu/lu.py +39 -0
- mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/masked_scatter.py +1 -0
- mindspore/ops/_op_impl/aicpu/masked_select_grad.py +3 -0
- mindspore/ops/_op_impl/aicpu/matrix_band_part.py +59 -0
- mindspore/ops/_op_impl/aicpu/matrix_power.py +6 -1
- mindspore/ops/_op_impl/aicpu/median.py +1 -0
- mindspore/ops/_op_impl/aicpu/multinomial.py +9 -9
- mindspore/ops/_op_impl/aicpu/not_equal.py +0 -5
- mindspore/ops/_op_impl/aicpu/pad_v3.py +3 -1
- mindspore/ops/_op_impl/aicpu/pad_v3_grad.py +2 -0
- mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +15 -7
- mindspore/ops/_op_impl/aicpu/random_categorical.py +39 -19
- mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +5 -2
- mindspore/ops/_op_impl/aicpu/random_poisson.py +103 -52
- mindspore/ops/_op_impl/aicpu/random_shuffle.py +17 -15
- mindspore/ops/_op_impl/aicpu/resize_bilinear_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2.py +0 -6
- mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2_grad.py +0 -7
- mindspore/ops/_op_impl/aicpu/scatter_nd.py +2 -0
- mindspore/ops/_op_impl/aicpu/sequence_concat.py +40 -0
- mindspore/ops/_op_impl/aicpu/sequence_stack.py +40 -0
- mindspore/ops/_op_impl/aicpu/{sparseaddmm.py → sparse_addmm.py} +2 -2
- mindspore/ops/_op_impl/aicpu/{sparsesparsemaximum.py → sparse_sparse_maximum.py} +4 -4
- mindspore/ops/_op_impl/aicpu/standard_laplace.py +5 -4
- mindspore/ops/_op_impl/aicpu/standard_normal.py +5 -4
- mindspore/ops/_op_impl/aicpu/truncated_normal.py +9 -7
- mindspore/ops/_op_impl/aicpu/uniform.py +5 -3
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +8 -4
- mindspore/ops/_op_impl/aicpu/uniform_int.py +5 -5
- mindspore/ops/_op_impl/aicpu/uniform_real.py +4 -4
- mindspore/ops/_op_impl/aicpu/upsample_nearest_3d.py +14 -6
- mindspore/ops/_op_impl/aicpu/upsample_nearest_3d_grad.py +22 -8
- mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d.py +11 -6
- mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d_grad.py +21 -10
- mindspore/ops/_op_impl/tbe/__init__.py +6 -4
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool.py +2 -2
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +3 -3
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +4 -4
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +2 -2
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +3 -3
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +3 -3
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +2 -2
- mindspore/ops/_op_impl/tbe/bn_infer.py +2 -2
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +3 -2
- mindspore/ops/_op_impl/tbe/broadcast_to.py +1 -1
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +3 -3
- mindspore/ops/_op_impl/tbe/expand_dims.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_v2.py +56 -0
- mindspore/ops/_op_impl/tbe/im2col.py +4 -4
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +7 -3
- mindspore/ops/_op_impl/tbe/mem_set.py +38 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +3 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +2 -2
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +2 -0
- mindspore/ops/_primitive_cache.py +1 -1
- mindspore/ops/_tracefunc.py +241 -0
- mindspore/ops/_utils/utils.py +10 -2
- mindspore/ops/_vmap/vmap_array_ops.py +5 -3
- mindspore/ops/_vmap/vmap_base.py +5 -4
- mindspore/ops/_vmap/vmap_convolution_ops.py +1 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +6 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +11 -6
- mindspore/ops/_vmap/vmap_math_ops.py +5 -2
- mindspore/ops/_vmap/vmap_nn_ops.py +135 -11
- mindspore/ops/arg_dtype_cast.py +54 -0
- mindspore/ops/composite/__init__.py +7 -5
- mindspore/ops/composite/base.py +78 -34
- mindspore/ops/composite/math_ops.py +5 -695
- mindspore/ops/composite/multitype_ops/_compile_utils.py +403 -97
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +28 -22
- mindspore/ops/composite/multitype_ops/add_impl.py +69 -7
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +48 -10
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/mod_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +10 -7
- mindspore/ops/composite/multitype_ops/sub_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/uadd_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +9 -0
- mindspore/ops/deprecated.py +304 -0
- mindspore/ops/function/__init__.py +41 -4
- mindspore/ops/function/array_func.py +1108 -467
- mindspore/ops/function/clip_func.py +94 -27
- mindspore/ops/function/debug_func.py +3 -1
- mindspore/ops/function/grad/grad_func.py +82 -73
- mindspore/ops/function/image_func.py +28 -12
- mindspore/ops/function/linalg_func.py +135 -39
- mindspore/ops/function/math_func.py +3779 -894
- mindspore/ops/function/nn_func.py +1584 -657
- mindspore/ops/function/parameter_func.py +13 -3
- mindspore/ops/function/random_func.py +247 -153
- mindspore/ops/function/sparse_func.py +14 -11
- mindspore/ops/function/sparse_unary_func.py +173 -47
- mindspore/ops/function/spectral_func.py +8 -4
- mindspore/ops/function/vmap_func.py +8 -7
- mindspore/ops/functional.py +47 -16
- mindspore/ops/op_info_register.py +346 -86
- mindspore/ops/operations/__init__.py +38 -22
- mindspore/ops/operations/_grad_ops.py +145 -149
- mindspore/ops/operations/_inner_ops.py +298 -56
- mindspore/ops/operations/_ms_kernel.py +3 -3
- mindspore/ops/operations/_quant_ops.py +24 -28
- mindspore/ops/operations/_rl_inner_ops.py +9 -7
- mindspore/ops/operations/_scalar_ops.py +115 -0
- mindspore/ops/operations/_sequence_ops.py +148 -10
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/_thor_ops.py +2 -2
- mindspore/ops/operations/array_ops.py +1239 -561
- mindspore/ops/operations/comm_ops.py +166 -90
- mindspore/ops/operations/control_ops.py +3 -3
- mindspore/ops/operations/custom_ops.py +124 -102
- mindspore/ops/operations/debug_ops.py +24 -11
- mindspore/ops/operations/image_ops.py +86 -71
- mindspore/ops/operations/inner_ops.py +18 -13
- mindspore/ops/operations/linalg_ops.py +30 -11
- mindspore/ops/operations/math_ops.py +1730 -435
- mindspore/ops/operations/nn_ops.py +1953 -943
- mindspore/ops/operations/other_ops.py +65 -43
- mindspore/ops/operations/random_ops.py +258 -98
- mindspore/ops/operations/rl_ops.py +4 -36
- mindspore/ops/operations/sparse_ops.py +38 -33
- mindspore/ops/operations/spectral_ops.py +8 -4
- mindspore/ops/primitive.py +66 -44
- mindspore/ops/signature.py +5 -5
- mindspore/parallel/_auto_parallel_context.py +80 -19
- mindspore/parallel/_cost_model_context.py +42 -0
- mindspore/parallel/_offload_context.py +162 -72
- mindspore/parallel/_parallel_serialization.py +2 -2
- mindspore/parallel/_ps_context.py +16 -4
- mindspore/parallel/_recovery_context.py +2 -1
- mindspore/parallel/_tensor.py +15 -13
- mindspore/parallel/_transformer/layers.py +8 -6
- mindspore/parallel/_transformer/loss.py +1 -0
- mindspore/parallel/_transformer/moe.py +7 -7
- mindspore/parallel/_transformer/op_parallel_config.py +12 -1
- mindspore/parallel/_transformer/transformer.py +34 -14
- mindspore/parallel/_utils.py +36 -14
- mindspore/parallel/algo_parameter_config.py +114 -20
- mindspore/parallel/checkpoint_transform.py +16 -18
- mindspore/parallel/shard.py +16 -13
- mindspore/profiler/__init__.py +1 -1
- mindspore/profiler/common/struct_type.py +3 -3
- mindspore/profiler/common/util.py +3 -2
- mindspore/profiler/envprofiling.py +11 -4
- mindspore/profiler/parser/aicpu_data_parser.py +5 -3
- mindspore/profiler/parser/ascend_flops_generator.py +94 -0
- mindspore/profiler/parser/ascend_fpbp_generator.py +76 -0
- mindspore/profiler/parser/ascend_hccl_generator.py +288 -0
- mindspore/profiler/parser/ascend_msprof_exporter.py +213 -0
- mindspore/profiler/parser/ascend_msprof_generator.py +199 -0
- mindspore/profiler/parser/ascend_op_generator.py +276 -0
- mindspore/profiler/parser/ascend_steptrace_generator.py +94 -0
- mindspore/profiler/parser/ascend_timeline_generator.py +110 -54
- mindspore/profiler/parser/base_timeline_generator.py +11 -7
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +45 -46
- mindspore/profiler/parser/flops_parser.py +15 -11
- mindspore/profiler/parser/framework_parser.py +92 -73
- mindspore/profiler/parser/hccl_parser.py +16 -12
- mindspore/profiler/parser/integrator.py +22 -11
- mindspore/profiler/parser/memory_usage_parser.py +36 -11
- mindspore/profiler/parser/minddata_analyzer.py +12 -14
- mindspore/profiler/parser/minddata_pipeline_parser.py +1 -1
- mindspore/profiler/parser/msadvisor_parser.py +8 -4
- mindspore/profiler/parser/op_intermediate_parser.py +5 -2
- mindspore/profiler/parser/optime_parser.py +1 -1
- mindspore/profiler/parser/profiler_info.py +4 -5
- mindspore/profiler/parser/step_trace_parser.py +11 -14
- mindspore/profiler/profiling.py +678 -377
- mindspore/rewrite/api/node.py +211 -54
- mindspore/rewrite/api/node_type.py +5 -0
- mindspore/rewrite/api/pattern_engine.py +22 -23
- mindspore/rewrite/api/scoped_value.py +20 -17
- mindspore/rewrite/api/symbol_tree.py +252 -106
- mindspore/rewrite/api/tree_node_helper.py +3 -0
- mindspore/rewrite/ast_helpers/__init__.py +2 -1
- mindspore/rewrite/ast_helpers/ast_finder.py +129 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +116 -104
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +97 -46
- mindspore/rewrite/common/rewrite_elog.py +5 -1
- mindspore/rewrite/namer.py +51 -51
- mindspore/rewrite/namespace.py +14 -5
- mindspore/{ops/bprop_mindir → rewrite/node}/__init__.py +9 -4
- mindspore/rewrite/node/call_function.py +79 -0
- mindspore/rewrite/node/cell_container.py +135 -0
- mindspore/rewrite/node/control_flow.py +88 -0
- mindspore/rewrite/{node.py → node/node.py} +313 -247
- mindspore/rewrite/node/node_manager.py +254 -0
- mindspore/rewrite/node/node_topological_manager.py +243 -0
- mindspore/rewrite/parsers/arguments_parser.py +22 -21
- mindspore/rewrite/parsers/assign_parser.py +225 -239
- mindspore/rewrite/parsers/attribute_parser.py +9 -7
- mindspore/rewrite/parsers/class_def_parser.py +179 -218
- mindspore/rewrite/parsers/constant_parser.py +9 -6
- mindspore/rewrite/parsers/container_parser.py +9 -7
- mindspore/rewrite/parsers/for_parser.py +36 -15
- mindspore/rewrite/parsers/function_def_parser.py +23 -20
- mindspore/rewrite/parsers/if_parser.py +28 -24
- mindspore/rewrite/parsers/module_parser.py +202 -25
- mindspore/rewrite/{parser.py → parsers/parser.py} +4 -2
- mindspore/rewrite/{parser_register.py → parsers/parser_register.py} +1 -1
- mindspore/rewrite/parsers/return_parser.py +6 -6
- mindspore/rewrite/sparsify/sparse_transformer.py +12 -3
- mindspore/rewrite/sparsify/sparsify.py +4 -1
- mindspore/rewrite/sparsify/utils.py +11 -5
- mindspore/rewrite/symbol_tree.py +577 -732
- mindspore/rewrite/symbol_tree_builder.py +9 -175
- mindspore/rewrite/symbol_tree_dumper.py +2 -2
- mindspore/run_check/_check_version.py +46 -39
- mindspore/run_check/run_check.py +3 -2
- mindspore/{scipy/sparse → safeguard}/__init__.py +4 -5
- mindspore/safeguard/rewrite_obfuscation.py +517 -0
- mindspore/scipy/__init__.py +1 -1
- mindspore/scipy/linalg.py +67 -61
- mindspore/scipy/ops.py +5 -41
- mindspore/scipy/ops_grad.py +3 -2
- mindspore/scipy/ops_wrapper.py +5 -5
- mindspore/scipy/optimize/line_search.py +8 -8
- mindspore/scipy/optimize/linear_sum_assignment.py +4 -4
- mindspore/scipy/optimize/minimize.py +16 -12
- mindspore/scipy/utils.py +1 -52
- mindspore/scipy/utils_const.py +4 -4
- mindspore/train/__init__.py +4 -4
- mindspore/train/_utils.py +13 -5
- mindspore/train/amp.py +410 -148
- mindspore/train/anf_ir_pb2.py +16 -4
- mindspore/train/callback/_backup_and_restore.py +8 -11
- mindspore/train/callback/_callback.py +80 -3
- mindspore/train/callback/_checkpoint.py +82 -51
- mindspore/train/callback/_early_stop.py +12 -15
- mindspore/train/callback/_history.py +1 -1
- mindspore/train/callback/_lambda_callback.py +13 -13
- mindspore/train/callback/_landscape.py +21 -17
- mindspore/train/callback/_loss_monitor.py +9 -10
- mindspore/train/callback/_on_request_exit.py +16 -33
- mindspore/train/callback/_reduce_lr_on_plateau.py +21 -24
- mindspore/train/callback/_summary_collector.py +44 -30
- mindspore/train/callback/_time_monitor.py +62 -12
- mindspore/train/data_sink.py +10 -16
- mindspore/train/dataset_helper.py +154 -86
- mindspore/train/loss_scale_manager.py +14 -9
- mindspore/train/metrics/__init__.py +10 -2
- mindspore/train/metrics/accuracy.py +1 -1
- mindspore/train/metrics/auc.py +1 -1
- mindspore/train/metrics/bleu_score.py +2 -2
- mindspore/train/metrics/confusion_matrix.py +14 -14
- mindspore/train/metrics/cosine_similarity.py +3 -3
- mindspore/train/metrics/dice.py +1 -1
- mindspore/train/metrics/fbeta.py +1 -1
- mindspore/train/metrics/hausdorff_distance.py +8 -6
- mindspore/train/metrics/mean_surface_distance.py +5 -4
- mindspore/train/metrics/metric.py +49 -17
- mindspore/train/metrics/occlusion_sensitivity.py +4 -4
- mindspore/train/metrics/perplexity.py +1 -1
- mindspore/train/metrics/precision.py +2 -2
- mindspore/train/metrics/recall.py +2 -3
- mindspore/train/metrics/roc.py +7 -7
- mindspore/train/metrics/root_mean_square_surface_distance.py +5 -4
- mindspore/train/metrics/topk.py +7 -4
- mindspore/train/mind_ir_pb2.py +193 -48
- mindspore/train/model.py +377 -133
- mindspore/train/serialization.py +697 -245
- mindspore/train/summary/_summary_adapter.py +5 -2
- mindspore/train/summary/_writer_pool.py +4 -3
- mindspore/train/summary/summary_record.py +25 -23
- mindspore/train/train_thor/convert_utils.py +39 -23
- mindspore/train/train_thor/dataset_helper.py +4 -3
- mindspore/train/train_thor/model_thor.py +8 -8
- mindspore/version.py +1 -1
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/METADATA +7 -8
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/RECORD +633 -804
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/entry_points.txt +0 -1
- mindspore/_akg/akg/tvm/contrib/debugger/__init__.py +0 -16
- mindspore/_akg/akg/tvm/contrib/debugger/debug_result.py +0 -274
- mindspore/_akg/akg/tvm/contrib/debugger/debug_runtime.py +0 -259
- mindspore/_akg/akg/tvm/contrib/peak.py +0 -341
- mindspore/_akg/akg/tvm/contrib/rpc.py +0 -25
- mindspore/_akg/akg/tvm/contrib/xcode.py +0 -257
- mindspore/_akg/akg/tvm/exec/__init__.py +0 -17
- mindspore/_akg/akg/tvm/exec/autotvm_log_editor.py +0 -60
- mindspore/_akg/akg/tvm/exec/measure_peak.py +0 -48
- mindspore/_akg/akg/tvm/exec/query_rpc_tracker.py +0 -48
- mindspore/_akg/akg/tvm/exec/rpc_proxy.py +0 -98
- mindspore/_akg/akg/tvm/exec/rpc_server.py +0 -88
- mindspore/_akg/akg/tvm/exec/rpc_tracker.py +0 -62
- mindspore/_akg/akg/tvm/rpc/__init__.py +0 -29
- mindspore/_akg/akg/tvm/rpc/base.py +0 -182
- mindspore/_akg/akg/tvm/rpc/client.py +0 -436
- mindspore/_akg/akg/tvm/rpc/proxy.py +0 -595
- mindspore/_akg/akg/tvm/rpc/server.py +0 -413
- mindspore/_akg/akg/tvm/rpc/tornado_util.py +0 -121
- mindspore/_akg/akg/tvm/rpc/tracker.py +0 -431
- mindspore/_extends/graph_kernel/expander.py +0 -80
- mindspore/_extends/graph_kernel/expanders/__init__.py +0 -57
- mindspore/_extends/graph_kernel/expanders/_utils.py +0 -269
- mindspore/_extends/graph_kernel/expanders/addn.py +0 -33
- mindspore/_extends/graph_kernel/expanders/batchnorm.py +0 -152
- mindspore/_extends/graph_kernel/expanders/batchnorm_grad.py +0 -105
- mindspore/_extends/graph_kernel/expanders/bias_add_grad.py +0 -49
- mindspore/_extends/graph_kernel/expanders/clip_by_norm_no_div_sum.py +0 -33
- mindspore/_extends/graph_kernel/expanders/complex/abs.py +0 -30
- mindspore/_extends/graph_kernel/expanders/complex/add.py +0 -44
- mindspore/_extends/graph_kernel/expanders/complex/div.py +0 -62
- mindspore/_extends/graph_kernel/expanders/complex/mul.py +0 -52
- mindspore/_extends/graph_kernel/expanders/complex/real_div.py +0 -62
- mindspore/_extends/graph_kernel/expanders/complex/sub.py +0 -45
- mindspore/_extends/graph_kernel/expanders/conv2d.py +0 -200
- mindspore/_extends/graph_kernel/expanders/dropout_grad.py +0 -30
- mindspore/_extends/graph_kernel/expanders/equal_count.py +0 -50
- mindspore/_extends/graph_kernel/expanders/erfc.py +0 -35
- mindspore/_extends/graph_kernel/expanders/expand_dims.py +0 -50
- mindspore/_extends/graph_kernel/expanders/fused_adam.py +0 -44
- mindspore/_extends/graph_kernel/expanders/fused_adam_weight_decay.py +0 -47
- mindspore/_extends/graph_kernel/expanders/fused_mul_add.py +0 -28
- mindspore/_extends/graph_kernel/expanders/gather.py +0 -43
- mindspore/_extends/graph_kernel/expanders/gelu_grad.py +0 -70
- mindspore/_extends/graph_kernel/expanders/gkdropout.py +0 -40
- mindspore/_extends/graph_kernel/expanders/identity.py +0 -25
- mindspore/_extends/graph_kernel/expanders/layernorm.py +0 -93
- mindspore/_extends/graph_kernel/expanders/layernorm_grad.py +0 -113
- mindspore/_extends/graph_kernel/expanders/logsoftmax.py +0 -46
- mindspore/_extends/graph_kernel/expanders/logsoftmax_grad.py +0 -36
- mindspore/_extends/graph_kernel/expanders/matmul.py +0 -80
- mindspore/_extends/graph_kernel/expanders/maximum_grad.py +0 -59
- mindspore/_extends/graph_kernel/expanders/minimum_grad.py +0 -80
- mindspore/_extends/graph_kernel/expanders/oneslike.py +0 -26
- mindspore/_extends/graph_kernel/expanders/reduce_mean.py +0 -43
- mindspore/_extends/graph_kernel/expanders/relu_grad.py +0 -32
- mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits_grad.py +0 -35
- mindspore/_extends/graph_kernel/expanders/sigmoid_grad.py +0 -31
- mindspore/_extends/graph_kernel/expanders/slice.py +0 -35
- mindspore/_extends/graph_kernel/expanders/softmax_cross_entropy_with_logits.py +0 -42
- mindspore/_extends/graph_kernel/expanders/softmax_grad_ext.py +0 -41
- mindspore/_extends/graph_kernel/expanders/softsign.py +0 -28
- mindspore/_extends/graph_kernel/expanders/sqrt_grad.py +0 -29
- mindspore/_extends/graph_kernel/expanders/square_sum_all.py +0 -44
- mindspore/_extends/graph_kernel/expanders/square_sum_v1.py +0 -37
- mindspore/_extends/graph_kernel/expanders/squared_difference.py +0 -43
- mindspore/_extends/graph_kernel/expanders/tanh_grad.py +0 -31
- mindspore/_extends/graph_kernel/expanders/tile.py +0 -54
- mindspore/_extends/graph_kernel/model/op_infer.py +0 -506
- mindspore/_extends/parse/jit_fallback_modules.py +0 -51
- mindspore/dataset/datapreprocess/preprocess_imagenet_validate_dataset.py +0 -54
- mindspore/dataset/engine/graphdata.py +0 -1586
- mindspore/include/api/net.h +0 -142
- mindspore/ops/_grad/grad_array_ops.py +0 -1347
- mindspore/ops/_grad/grad_clip_ops.py +0 -84
- mindspore/ops/_grad/grad_debug_ops.py +0 -68
- mindspore/ops/_grad/grad_inner_ops.py +0 -235
- mindspore/ops/_grad/grad_math_ops.py +0 -1684
- mindspore/ops/_grad/grad_nn_ops.py +0 -1529
- mindspore/ops/_grad/grad_other_ops.py +0 -89
- mindspore/ops/_grad/grad_sequence_ops.py +0 -296
- mindspore/ops/_grad/grad_sparse.py +0 -323
- mindspore/ops/_grad_experimental/grad_image_ops.py +0 -249
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +0 -195
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +0 -112
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ApproximateEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Argmax_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/Argmin_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/AssignSub_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Assign_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +0 -150
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +0 -66
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchToSpaceND_bprop.mindir +0 -28
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +0 -33
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +0 -306
- mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +0 -13
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Concat_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +0 -240
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +0 -247
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +0 -247
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +0 -315
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +0 -278
- mindspore/ops/bprop_mindir/DType_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +0 -58
- mindspore/ops/bprop_mindir/Depend_bprop.mindir +0 -13
- mindspore/ops/bprop_mindir/DepthToSpace_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +0 -138
- mindspore/ops/bprop_mindir/DiagPart_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +0 -25
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +0 -18
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +0 -27
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicShape_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Equal_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +0 -58
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +0 -54
- mindspore/ops/bprop_mindir/FloorDiv_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/GatherD_bprop.mindir +0 -26
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +0 -57
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/GreaterEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Greater_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/IOU_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/IsFinite_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/IsInf_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/IsNan_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +0 -126
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +0 -30
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +0 -43
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LessEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Less_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LinSpace_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/Load_bprop.mindir +0 -13
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/LogicalAnd_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LogicalNot_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/MaskedSelect_bprop.mindir +0 -21
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +0 -74
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +0 -74
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +0 -75
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +0 -65
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Maximum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Minimum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +0 -27
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +0 -35
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NonZero_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/NotEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +0 -26
- mindspore/ops/bprop_mindir/OnesLike_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +0 -82
- mindspore/ops/bprop_mindir/Range_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/Rank_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReduceAll_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/ReduceAny_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +0 -60
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +0 -89
- mindspore/ops/bprop_mindir/ReverseSequence_bprop.mindir +0 -52
- mindspore/ops/bprop_mindir/ReverseV2_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/Round_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/ScatterMax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterMin_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterNdUpdate_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/ScatterNd_bprop.mindir +0 -24
- mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/ScatterUpdate_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +0 -21
- mindspore/ops/bprop_mindir/Select_bprop.mindir +0 -31
- mindspore/ops/bprop_mindir/Shape_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +0 -21
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Sign_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/Slice_bprop.mindir +0 -26
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +0 -36
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +0 -33
- mindspore/ops/bprop_mindir/Sort_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SpaceToBatchND_bprop.mindir +0 -28
- mindspore/ops/bprop_mindir/SpaceToDepth_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Split_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +0 -54
- mindspore/ops/bprop_mindir/StridedSliceGrad_bprop.mindir +0 -95
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +0 -98
- mindspore/ops/bprop_mindir/Switch_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +0 -66
- mindspore/ops/bprop_mindir/TensorScatterAdd_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/TensorScatterUpdate_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/TensorShape_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TransShape_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/TruncateDiv_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/Unique_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Unstack_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +0 -32
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +0 -38
- mindspore/ops/bprop_mindir/ZerosLike_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/generate_mindir.py +0 -114
- mindspore/rewrite/node_visitor.py +0 -44
- mindspore/rewrite/topological_manager.py +0 -203
- mindspore/scipy/sparse/linalg.py +0 -192
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/WHEEL +0 -0
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/top_level.txt +0 -0
mindspore/nn/layer/pooling.py
CHANGED
|
@@ -96,16 +96,16 @@ class LPPool1d(Cell):
|
|
|
96
96
|
f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
|
|
97
97
|
|
|
98
98
|
Args:
|
|
99
|
-
norm_type (Union[int, float]): Type of normalization, represents p in the formula, can not be 0.
|
|
99
|
+
norm_type (Union[int, float]): Type of normalization, represents :math:`p` in the formula, can not be 0.
|
|
100
100
|
|
|
101
101
|
- if p = 1, the result is the sum of the elements within the pooling kernel(proportional to average
|
|
102
102
|
pooling).
|
|
103
103
|
- if p = :math:`\infty`, the result is the result of maximum pooling.
|
|
104
104
|
|
|
105
105
|
kernel_size (int): The size of kernel window.
|
|
106
|
-
stride (int): The distance of kernel moving, an int number that represents
|
|
107
|
-
|
|
108
|
-
ceil_mode (bool): Whether to use ceil or floor to calculate output shape. Default: False.
|
|
106
|
+
stride (int): The distance of kernel moving, an int number that represents the width of movement is stride,
|
|
107
|
+
if the value is None, the default value `kernel_size` is used. Default: ``None`` .
|
|
108
|
+
ceil_mode (bool): Whether to use ceil or floor to calculate output shape. Default: ``False`` .
|
|
109
109
|
|
|
110
110
|
Inputs:
|
|
111
111
|
- **x** (Tensor) - Tensor of shape :math:`(N_{in}, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`.
|
|
@@ -132,11 +132,9 @@ class LPPool1d(Cell):
|
|
|
132
132
|
|
|
133
133
|
Examples:
|
|
134
134
|
>>> import mindspore as ms
|
|
135
|
-
>>> import mindspore.nn as nn
|
|
136
|
-
>>> from mindspore import Tensor
|
|
137
135
|
>>> import numpy as np
|
|
138
|
-
>>> a = Tensor(np.arange(2 * 3 * 4).reshape((2, 3, 4)), dtype=ms.float32)
|
|
139
|
-
>>> net = nn.LPPool1d(norm_type=1, kernel_size=3, stride=1)
|
|
136
|
+
>>> a = ms.Tensor(np.arange(2 * 3 * 4).reshape((2, 3, 4)), dtype=ms.float32)
|
|
137
|
+
>>> net = ms.nn.LPPool1d(norm_type=1, kernel_size=3, stride=1)
|
|
140
138
|
>>> out = net(a)
|
|
141
139
|
>>> print(out)
|
|
142
140
|
[[[ 3. 6.]
|
|
@@ -170,7 +168,7 @@ class LPPool2d(Cell):
|
|
|
170
168
|
f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
|
|
171
169
|
|
|
172
170
|
Args:
|
|
173
|
-
norm_type(Union[int, float]) - Type of normalization, represents p in the formula, can not be 0.
|
|
171
|
+
norm_type(Union[int, float]) - Type of normalization, represents :math:`p` in the formula, can not be 0.
|
|
174
172
|
|
|
175
173
|
- if p = 1, the result is the sum of the elements within the pooling kernel(proportional to average
|
|
176
174
|
pooling).
|
|
@@ -182,8 +180,8 @@ class LPPool2d(Cell):
|
|
|
182
180
|
stride(Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
|
|
183
181
|
the height and width of movement are both stride, or a tuple of two int numbers that
|
|
184
182
|
represent height and width of movement respectively, if the value is None,
|
|
185
|
-
the default value `kernel_size` is used
|
|
186
|
-
ceil_mode(bool): Whether to use ceil or floor to calculate output shape. Default: False.
|
|
183
|
+
the default value `kernel_size` is used. Default: ``None`` .
|
|
184
|
+
ceil_mode(bool): Whether to use ceil or floor to calculate output shape. Default: ``False`` .
|
|
187
185
|
|
|
188
186
|
Inputs:
|
|
189
187
|
- **x** (Tensor) - Tensor of shape :math:`(N, C, H_{in}, W_{in})`.
|
|
@@ -213,11 +211,9 @@ class LPPool2d(Cell):
|
|
|
213
211
|
|
|
214
212
|
Examples:
|
|
215
213
|
>>> import mindspore as ms
|
|
216
|
-
>>> import mindspore.nn as nn
|
|
217
|
-
>>> from mindspore import Tensor
|
|
218
214
|
>>> import numpy as np
|
|
219
|
-
>>> a = Tensor(np.arange(2 * 3 * 4 * 5).reshape((2, 3, 4, 5)), dtype=ms.float32)
|
|
220
|
-
>>> net = nn.LPPool2d(norm_type=1, kernel_size=3, stride=1)
|
|
215
|
+
>>> a = ms.Tensor(np.arange(2 * 3 * 4 * 5).reshape((2, 3, 4, 5)), dtype=ms.float32)
|
|
216
|
+
>>> net = ms.nn.LPPool2d(norm_type=1, kernel_size=3, stride=1)
|
|
221
217
|
>>> out = net(a)
|
|
222
218
|
>>> print(out)
|
|
223
219
|
[[[[ 54. 63. 72.]
|
|
@@ -295,24 +291,29 @@ class MaxPool3d(_PoolNd):
|
|
|
295
291
|
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
|
|
296
292
|
is an int number or a single element tuple that represents depth, height and width of the kernel, or a tuple
|
|
297
293
|
of three int numbers that represent depth, height and width respectively.
|
|
298
|
-
The value must be a positive integer. Default: 1.
|
|
294
|
+
The value must be a positive integer. Default: ``1`` .
|
|
299
295
|
stride (Union[int, tuple[int]]): The moving stride of pooling operation, an int number or a single element tuple
|
|
300
296
|
that represents the moving stride of pooling kernel in the directions of depth, height and the width,
|
|
301
297
|
or a tuple of three int numbers that represent depth, height and width of movement respectively.
|
|
302
298
|
The value must be a positive integer. If the value is None, the default value `kernel_size` is used.
|
|
303
|
-
Default: 1.
|
|
304
|
-
pad_mode (str):
|
|
305
|
-
Default: "valid".
|
|
306
|
-
|
|
307
|
-
- same
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
299
|
+
Default: ``1`` .
|
|
300
|
+
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
301
|
+
``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"valid"`` .
|
|
302
|
+
|
|
303
|
+
- ``"same"``: Pad the input around its depth/height/width dimension so that the shape of input and output
|
|
304
|
+
are the same when `stride` is set to ``1``.
|
|
305
|
+
The amount of padding to is calculated by the operator internally. If the amount is even,
|
|
306
|
+
it isuniformly distributed around the input, if it is odd, the excess amount goes
|
|
307
|
+
to the front/right/bottom side.
|
|
308
|
+
If this mode is set, `padding` must be 0.
|
|
309
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
310
|
+
possible depth, height and width. Extra pixels that could not complete a full stride will
|
|
311
|
+
be discarded. If this mode is set, `padding` must be 0.
|
|
312
|
+
- ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
|
|
313
|
+
in the depth, height and width dimension is determined by the `padding` parameter.
|
|
314
|
+
If this mode is set, `padding` must be greater than or equal to 0.
|
|
314
315
|
|
|
315
|
-
padding (Union(int, tuple[int], list[int])): Pooling padding value. Default: 0.
|
|
316
|
+
padding (Union(int, tuple[int], list[int])): Pooling padding value. Default: ``0`` .
|
|
316
317
|
`padding` can only be an integer or a tuple/list containing one or three integers.
|
|
317
318
|
If `padding` is an integer or a tuple/list containing one integer, it will be padded in six directions of
|
|
318
319
|
front, back, top, bottom, left and right of the input. If `padding` is a tuple/list containing three
|
|
@@ -320,10 +321,10 @@ class MaxPool3d(_PoolNd):
|
|
|
320
321
|
times, and left and right of the input `padding[2]` times.
|
|
321
322
|
dilation (Union(int, tuple[int])): The spacing between the elements of the kernel in convolution,
|
|
322
323
|
used to increase the receptive field of the pooling operation. If it is a tuple, it must contain one or
|
|
323
|
-
three integers. Default: 1.
|
|
324
|
-
return_indices (bool): If True, output is a Tuple of 2 Tensors, representing the maxpool result and where
|
|
325
|
-
the max values are generated. Otherwise, only the maxpool result is returned. Default: False.
|
|
326
|
-
ceil_mode (bool): Whether to use ceil or floor to calculate output shape. Default: False.
|
|
324
|
+
three integers. Default: ``1`` .
|
|
325
|
+
return_indices (bool): If ``True`` , output is a Tuple of 2 Tensors, representing the maxpool result and where
|
|
326
|
+
the max values are generated. Otherwise, only the maxpool result is returned. Default: ``False`` .
|
|
327
|
+
ceil_mode (bool): Whether to use ceil or floor to calculate output shape. Default: ``False`` .
|
|
327
328
|
|
|
328
329
|
Inputs:
|
|
329
330
|
- **x** (Tensor) - Tensor of shape :math:`(N_{in}, C_{in}, D_{in}, H_{in}, W_{in})` or
|
|
@@ -341,7 +342,7 @@ class MaxPool3d(_PoolNd):
|
|
|
341
342
|
:math:`(C_{out}, D_{out}, H_{out}, W_{out})`. It has the same data type as `x`.
|
|
342
343
|
- **argmax** (Tensor) - Index corresponding to the maximum value. Data type is int64.
|
|
343
344
|
|
|
344
|
-
If `pad_mode` is in
|
|
345
|
+
If `pad_mode` is in ``"pad"`` mode, the output shape calculation formula is as follows:
|
|
345
346
|
|
|
346
347
|
.. math::
|
|
347
348
|
D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] \times
|
|
@@ -360,9 +361,9 @@ class MaxPool3d(_PoolNd):
|
|
|
360
361
|
TypeError: If `kernel_size` , `stride` , `padding` or `dilation` is neither an int nor a tuple.
|
|
361
362
|
ValueError: If `kernel_size` or `stride` is less than 1.
|
|
362
363
|
ValueError: If the `padding` parameter is neither an integer nor a tuple of length 3.
|
|
363
|
-
ValueError: If `pad_mode` is not set to
|
|
364
|
+
ValueError: If `pad_mode` is not set to ``"pad"``, setting return_indices to True or dilation to a value
|
|
364
365
|
other than 1.
|
|
365
|
-
ValueError: If `padding` is non-zero when `pad_mode` is not
|
|
366
|
+
ValueError: If `padding` is non-zero when `pad_mode` is not ``"pad"``.
|
|
366
367
|
|
|
367
368
|
Supported Platforms:
|
|
368
369
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -373,13 +374,13 @@ class MaxPool3d(_PoolNd):
|
|
|
373
374
|
>>> import numpy as np
|
|
374
375
|
>>> np_x = np.random.randint(0, 10, [5, 3, 4, 6, 7])
|
|
375
376
|
>>> x = Tensor(np_x, ms.float32)
|
|
376
|
-
>>> pool1 = nn.MaxPool3d(kernel_size=2, stride=1, pad_mode=
|
|
377
|
+
>>> pool1 = nn.MaxPool3d(kernel_size=2, stride=1, pad_mode="pad", padding=1, dilation=3, return_indices=True)
|
|
377
378
|
>>> output = pool1(x)
|
|
378
379
|
>>> print(output[0].shape)
|
|
379
380
|
(5, 3, 3, 5, 6)
|
|
380
381
|
>>> print(output[1].shape)
|
|
381
382
|
(5, 3, 3, 5, 6)
|
|
382
|
-
>>> pool2 = nn.MaxPool3d(kernel_size=2, stride=1, pad_mode=
|
|
383
|
+
>>> pool2 = nn.MaxPool3d(kernel_size=2, stride=1, pad_mode="pad", padding=1, dilation=3, return_indices=False)
|
|
383
384
|
>>> output2 = pool2(x)
|
|
384
385
|
>>> print(output2.shape)
|
|
385
386
|
(5, 3, 3, 5, 6)
|
|
@@ -437,34 +438,39 @@ class MaxPool2d(_PoolNd):
|
|
|
437
438
|
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the max value,
|
|
438
439
|
is an int number or a single element tuple that represents height and width are both kernel_size,
|
|
439
440
|
or a tuple of two int numbers that represent height and width respectively.
|
|
440
|
-
Default: 1.
|
|
441
|
+
Default: ``1`` .
|
|
441
442
|
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number or a single element tuple that
|
|
442
443
|
represents the height and width of movement are both stride, or a tuple of two int numbers that
|
|
443
|
-
represent height and width of movement respectively. Default: 1.
|
|
444
|
-
pad_mode (str):
|
|
445
|
-
Default: "valid".
|
|
446
|
-
|
|
447
|
-
- same
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
-
|
|
453
|
-
|
|
444
|
+
represent height and width of movement respectively. Default: ``1`` .
|
|
445
|
+
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
446
|
+
``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"valid"`` .
|
|
447
|
+
|
|
448
|
+
- ``"same"``: Pad the input around its edges so that the shape of input and output
|
|
449
|
+
are the same when `stride` is set to ``1``.
|
|
450
|
+
The amount of padding to is calculated by the operator internally, If the amount is even, it is
|
|
451
|
+
uniformly distributed around the input, if it is odd, the excess amount goes to the right/bottom side.
|
|
452
|
+
If this mode is set, `padding` must be 0.
|
|
453
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
454
|
+
possible height and width. Extra pixels that could not complete a full stride will
|
|
455
|
+
be discarded. If this mode is set, `padding` must be 0.
|
|
456
|
+
- ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
|
|
457
|
+
in the height and width directions is determined by the `padding` parameter.
|
|
458
|
+
If this mode is set, `padding` must be greater than or equal to 0.
|
|
454
459
|
|
|
455
|
-
padding (Union(int, tuple[int], list[int])): Specifies the padding value of the pooling operation.
|
|
456
|
-
`padding` can only be an integer or a tuple/list containing one or two integers. If
|
|
457
|
-
or a tuple/list containing one integer, it will be padded `padding` times in the
|
|
458
|
-
input. If `padding` is a tuple/list containing two integers, it will be padded
|
|
459
|
-
up-down direction of the input and `padding[1]` times in the left-right direction
|
|
460
|
+
padding (Union(int, tuple[int], list[int])): Specifies the padding value of the pooling operation.
|
|
461
|
+
Default: ``0`` . `padding` can only be an integer or a tuple/list containing one or two integers. If
|
|
462
|
+
`padding` is an integer or a tuple/list containing one integer, it will be padded `padding` times in the
|
|
463
|
+
four directions of the input. If `padding` is a tuple/list containing two integers, it will be padded
|
|
464
|
+
`padding[0]` times in the up-down direction of the input and `padding[1]` times in the left-right direction
|
|
465
|
+
of the input.
|
|
460
466
|
dilation (Union(int, tuple[int])): The spacing between the elements of the kernel in convolution,
|
|
461
467
|
used to increase the receptive field of the pooling operation. If it is a tuple, it must contain one or two
|
|
462
|
-
integers. Default: 1.
|
|
463
|
-
return_indices (bool): If True, the function will return both the result of max pooling and the indices of
|
|
464
|
-
max elements. Default: False.
|
|
465
|
-
ceil_mode (bool): If True, use ceil to compute the output shape instead of floor. Default: False.
|
|
466
|
-
data_format (str): The optional value for data format, is 'NHWC' or 'NCHW'.
|
|
467
|
-
Default: 'NCHW'.
|
|
468
|
+
integers. Default: ``1`` .
|
|
469
|
+
return_indices (bool): If ``True`` , the function will return both the result of max pooling and the indices of
|
|
470
|
+
the max elements. Default: ``False`` .
|
|
471
|
+
ceil_mode (bool): If ``True`` , use ceil to compute the output shape instead of floor. Default: ``False`` .
|
|
472
|
+
data_format (str): The optional value for data format, is ``'NHWC'`` or ``'NCHW'`` .
|
|
473
|
+
Default: ``'NCHW'`` .
|
|
468
474
|
|
|
469
475
|
Inputs:
|
|
470
476
|
- **x** (Tensor) - Tensor of shape :math:`(N,C_{in},H_{in},W_{in})` or :math:`(C_{in},H_{in},W_{in})`.
|
|
@@ -492,30 +498,32 @@ class MaxPool2d(_PoolNd):
|
|
|
492
498
|
|
|
493
499
|
Raises:
|
|
494
500
|
TypeError: If `kernel_size` or `stride` is neither int nor tuple.
|
|
495
|
-
ValueError: If `pad_mode` is neither
|
|
496
|
-
ValueError: If `data_format` is neither 'NCHW' nor 'NHWC'.
|
|
501
|
+
ValueError: If `pad_mode` is neither ``"valid"`` nor ``"same"`` with not case sensitive.
|
|
502
|
+
ValueError: If `data_format` is neither ``'NCHW'`` nor ``'NHWC'`` .
|
|
497
503
|
ValueError: If `kernel_size` or `stride` is less than 1.
|
|
498
504
|
ValueError: If length of shape of `x` is not equal to 3 or 4.
|
|
499
|
-
ValueError: If `pad_mode` is not
|
|
500
|
-
set to their default values.
|
|
505
|
+
ValueError: If `pad_mode` is not ``"pad"``, `padding`, `dilation`, `return_indices`, `ceil_mode` parameters
|
|
506
|
+
are not set to their default values.
|
|
501
507
|
ValueError: If the length of the tuple/list `padding` parameter is not 2.
|
|
502
508
|
ValueError: If The length of the tuple dilation parameter is not 2.
|
|
503
509
|
ValueError: If dilation parameter is neither an integer nor a tuple.
|
|
504
|
-
ValueError: If `pad_mode` is
|
|
505
|
-
ValueError: If `padding` is non-zero when `pad_mode` is not
|
|
510
|
+
ValueError: If `pad_mode` is ``"pad"`` and `data_format` is ``'NHWC'``.
|
|
511
|
+
ValueError: If `padding` is non-zero when `pad_mode` is not ``"pad"``.
|
|
506
512
|
|
|
507
513
|
Supported Platforms:
|
|
508
514
|
``Ascend`` ``GPU`` ``CPU``
|
|
509
515
|
|
|
510
516
|
Examples:
|
|
511
|
-
>>>
|
|
512
|
-
>>>
|
|
517
|
+
>>> import mindspore as ms
|
|
518
|
+
>>> import numpy as np
|
|
519
|
+
>>> pool = ms.nn.MaxPool2d(kernel_size=3, stride=1)
|
|
520
|
+
>>> x = ms.Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), ms.float32)
|
|
513
521
|
>>> output = pool(x)
|
|
514
522
|
>>> print(output.shape)
|
|
515
523
|
(1, 2, 2, 2)
|
|
516
524
|
>>> np_x = np.random.randint(0, 10, [5, 3, 4, 5])
|
|
517
|
-
>>> x = Tensor(np_x,
|
|
518
|
-
>>> pool2 = nn.MaxPool2d(kernel_size=2, stride=1, pad_mode=
|
|
525
|
+
>>> x = ms.Tensor(np_x, ms.float32)
|
|
526
|
+
>>> pool2 = ms.nn.MaxPool2d(kernel_size=2, stride=1, pad_mode="pad", padding=1, dilation=1, return_indices=True)
|
|
519
527
|
>>> output = pool2(x)
|
|
520
528
|
>>> print(output[0].shape)
|
|
521
529
|
(5, 3, 5, 6)
|
|
@@ -549,7 +557,7 @@ class MaxPool2d(_PoolNd):
|
|
|
549
557
|
else:
|
|
550
558
|
self.use_pad = False
|
|
551
559
|
if padding != 0 or dilation != 1 or return_indices or ceil_mode:
|
|
552
|
-
raise ValueError(f"For
|
|
560
|
+
raise ValueError(f"For MaxPool2d, the parameter 'padding', 'dilation', 'return_indices', 'ceil_mode' "
|
|
553
561
|
f"can not be set to non-default value when pad_mode is not 'pad', "
|
|
554
562
|
f"but got pad_mode:{pad_mode}.")
|
|
555
563
|
self.max_pool = P.MaxPool(kernel_size=self.kernel_size,
|
|
@@ -594,31 +602,33 @@ class MaxPool1d(_PoolNd):
|
|
|
594
602
|
\text{input}(N_i, C_j, s_0 \times l + n)
|
|
595
603
|
|
|
596
604
|
Args:
|
|
597
|
-
kernel_size (int): The size of kernel used to take the max value, Default: 1.
|
|
605
|
+
kernel_size (int): The size of kernel used to take the max value, Default: ``1`` .
|
|
598
606
|
stride (int): The distance of kernel moving, an int number that represents
|
|
599
|
-
the width of movement is stride, Default: 1.
|
|
600
|
-
pad_mode (str):
|
|
601
|
-
Default: "valid".
|
|
602
|
-
|
|
603
|
-
- same
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
607
|
+
the width of movement is stride, Default: ``1`` .
|
|
608
|
+
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
609
|
+
``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"valid"`` .
|
|
610
|
+
|
|
611
|
+
- ``"same"``: Pad the input at the begin and end so that the shape of input and output
|
|
612
|
+
are the same when `stride` is set to ``1``.
|
|
613
|
+
The amount of padding to is calculated by the operator internally. If the amount is even, it is
|
|
614
|
+
uniformly distributed around the input, if it is odd, the excess padding is goes to the right side.
|
|
615
|
+
If this mode is set, `padding` must be 0.
|
|
616
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
617
|
+
possible length. Extra pixels that could not complete a full stride will
|
|
618
|
+
be discarded. If this mode is set, `padding` must be 0.
|
|
619
|
+
- ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
|
|
620
|
+
at the begin and end is determined by the `padding` parameter.
|
|
621
|
+
If this mode is set, `padding` must be greater than or equal to 0.
|
|
612
622
|
|
|
613
623
|
padding (Union(int, tuple[int], list[int])): Padding value for the pooling. Default value is 0.
|
|
614
624
|
padding can only be an integer or a tuple/list containing a single integer, in which case padding times or
|
|
615
625
|
padding[0] times are padded on both sides of the input.
|
|
616
626
|
dilation (Union(int, tuple[int])): The spacing between the elements of the kernel in convolution,
|
|
617
627
|
used to increase the receptive field of the pooling operation. If it is a tuple, its length can only be 1.
|
|
618
|
-
Default: 1.
|
|
619
|
-
return_indices (bool): If True, the function will return both the result of max pooling and the indices of
|
|
620
|
-
max elements. Default: False.
|
|
621
|
-
ceil_mode (bool): If True, use ceil to compute the output shape instead of floor. Default: False.
|
|
628
|
+
Default: ``1`` .
|
|
629
|
+
return_indices (bool): If ``True`` , the function will return both the result of max pooling and the indices of
|
|
630
|
+
the max elements. Default: ``False`` .
|
|
631
|
+
ceil_mode (bool): If True, use ceil to compute the output shape instead of floor. Default: ``False`` .
|
|
622
632
|
|
|
623
633
|
Inputs:
|
|
624
634
|
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`.
|
|
@@ -642,30 +652,33 @@ class MaxPool1d(_PoolNd):
|
|
|
642
652
|
|
|
643
653
|
Raises:
|
|
644
654
|
TypeError: If `kernel_size` or `strides` is not an int.
|
|
645
|
-
ValueError: If `pad_mode` is not
|
|
646
|
-
ValueError: If `data_format` is neither 'NCHW' nor 'NHWC'
|
|
655
|
+
ValueError: If `pad_mode` is not ``"valid"``, ``"same"`` or ``"pad"``, case-insensitive.
|
|
656
|
+
ValueError: If `data_format` is neither ``'NCHW'`` nor ``'NHWC'``.
|
|
647
657
|
ValueError: If `kernel_size` or `strides` is less than 1.
|
|
648
658
|
ValueError: If length of shape of `x` is not equal to 2 or 3.
|
|
649
|
-
ValueError: If `pad_mode` is not
|
|
650
|
-
set to their default values.
|
|
659
|
+
ValueError: If `pad_mode` is not ``"pad"``, `padding`, `dilation`, `return_indices`, `ceil_mode` parameters
|
|
660
|
+
are not set to their default values.
|
|
651
661
|
ValueError: If the length of the tuple/list `padding` parameter is not 1.
|
|
652
662
|
ValueError: If The length of the tuple dilation parameter is not 1.
|
|
653
663
|
ValueError: If dilation parameter is neither an integer nor a tuple.
|
|
654
|
-
ValueError: If `padding` is non-zero when `pad_mode` is not
|
|
664
|
+
ValueError: If `padding` is non-zero when `pad_mode` is not ``"pad"``.
|
|
655
665
|
|
|
656
666
|
Supported Platforms:
|
|
657
667
|
``Ascend`` ``GPU`` ``CPU``
|
|
658
668
|
|
|
659
669
|
Examples:
|
|
670
|
+
>>> import mindspore as ms
|
|
671
|
+
>>> import mindspore.nn as nn
|
|
672
|
+
>>> import numpy as np
|
|
660
673
|
>>> mpool1 = nn.MaxPool1d(kernel_size=3, stride=1)
|
|
661
|
-
>>> x = Tensor(np.random.randint(0, 10, [1, 2, 4]),
|
|
674
|
+
>>> x = ms.Tensor(np.random.randint(0, 10, [1, 2, 4]), ms.float32)
|
|
662
675
|
>>> output = mpool1(x)
|
|
663
676
|
>>> result = output.shape
|
|
664
677
|
>>> print(result)
|
|
665
678
|
(1, 2, 2)
|
|
666
679
|
>>> np_x = np.random.randint(0, 10, [5, 3, 4])
|
|
667
|
-
>>> x = Tensor(np_x,
|
|
668
|
-
>>> mpool2 = nn.MaxPool1d(kernel_size=2, stride=1, pad_mode=
|
|
680
|
+
>>> x = ms.Tensor(np_x, ms.float32)
|
|
681
|
+
>>> mpool2 = nn.MaxPool1d(kernel_size=2, stride=1, pad_mode="pad", padding=1, dilation=1, return_indices=True)
|
|
669
682
|
>>> output = mpool2(x)
|
|
670
683
|
>>> print(output[0].shape)
|
|
671
684
|
(5, 3, 5)
|
|
@@ -780,25 +793,29 @@ class AvgPool3d(_PoolNd):
|
|
|
780
793
|
Args:
|
|
781
794
|
kernel_size (Union[int, tuple[int]], optional): The size of kernel used to take the average value,
|
|
782
795
|
can be an int number or a single element tuple that represents depth, height and width, or a tuple of three
|
|
783
|
-
positive integers that represent depth, height and width respectively. Default: 1.
|
|
796
|
+
positive integers that represent depth, height and width respectively. Default: ``1`` .
|
|
784
797
|
stride (Union[int, tuple[int]], optional): The distance of kernel moving, can be a positive int or a single
|
|
785
798
|
element tuple that represents the depth, height and width of movement, or a tuple of three positive integers
|
|
786
799
|
that represents depth, height and width of movement respectively. If the value is None, the default value
|
|
787
|
-
`kernel_size` is used. Default: 1.
|
|
788
|
-
pad_mode (str, optional): Specifies the padding
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
- same
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
-
|
|
800
|
+
`kernel_size` is used. Default: ``1`` .
|
|
801
|
+
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
802
|
+
``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"valid"`` .
|
|
803
|
+
|
|
804
|
+
- ``"same"``: Pad the input around its depth/height/width dimension so that the shape of input and output
|
|
805
|
+
are the same when `stride` is set to ``1``.
|
|
806
|
+
The amount of padding to is calculated by the operator internally. If the amount is even,
|
|
807
|
+
it isuniformly distributed around the input, if it is odd, the excess amount goes
|
|
808
|
+
to the front/right/bottom side.
|
|
809
|
+
If this mode is set, `padding` must be 0.
|
|
810
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
811
|
+
possible depth, height and width. Extra pixels that could not complete a full stride will
|
|
812
|
+
be discarded. If this mode is set, `padding` must be 0.
|
|
813
|
+
- ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
|
|
814
|
+
in the depth, height and width dimension is determined by the `padding` parameter.
|
|
798
815
|
If this mode is set, `padding` must be greater than or equal to 0.
|
|
799
816
|
|
|
800
|
-
padding (Union(int, tuple[int], list[int]), optional): Pooling padding value, only
|
|
801
|
-
non-zero. Default: 0. Only the following paddings are supported:
|
|
817
|
+
padding (Union(int, tuple[int], list[int]), optional): Pooling padding value, only ``"pad"`` mode can be set to
|
|
818
|
+
non-zero. Default: ``0`` . Only the following paddings are supported:
|
|
802
819
|
|
|
803
820
|
- `padding` is an integer or a tuple/list containing one integer, it will be padded in six directions of
|
|
804
821
|
front, back, top, bottom, left and right of the input.
|
|
@@ -806,15 +823,18 @@ class AvgPool3d(_PoolNd):
|
|
|
806
823
|
- `padding` is a tuple/list containing three integers, it will be padded in front and back of the input
|
|
807
824
|
`padding[0]` times, up and down `padding[1]` times, and left and right of the input `padding[2]` times.
|
|
808
825
|
|
|
809
|
-
ceil_mode (bool, optional): If True, use ceil to compute the output shape instead of floor.
|
|
810
|
-
|
|
826
|
+
ceil_mode (bool, optional): If ``True`` , use ceil to compute the output shape instead of floor.
|
|
827
|
+
Default: ``False`` .
|
|
828
|
+
count_include_pad (bool, optional): If ``True`` , averaging calculation will include the zero-padding.
|
|
829
|
+
Default: ``True`` .
|
|
811
830
|
divisor_override (int, optional): If it is specified as a non-zero parameter, this parameter will be used as the
|
|
812
|
-
divisor in the average calculation. Otherwise, `kernel_size` will be used as the divisor.
|
|
831
|
+
divisor in the average calculation. Otherwise, `kernel_size` will be used as the divisor.
|
|
832
|
+
Default: ``None`` .
|
|
813
833
|
|
|
814
834
|
Inputs:
|
|
815
835
|
- **x** (Tensor) - Tensor of shape :math:`(N, C, D_{in}, H_{in}, W_{in})` or
|
|
816
836
|
:math:`(C, D_{in}, H_{in}, W_{in})`.
|
|
817
|
-
Currently support float16 and
|
|
837
|
+
Currently support float16, float32 and float64 data type.
|
|
818
838
|
|
|
819
839
|
Outputs:
|
|
820
840
|
Tensor, with shape :math:`(N, C, D_{out}, H_{out}, W_{out})` or
|
|
@@ -846,22 +866,20 @@ class AvgPool3d(_PoolNd):
|
|
|
846
866
|
ValueError: If element of `padding` is less than 0.
|
|
847
867
|
ValueError: If length of shape of `x` is neither 4 nor 5.
|
|
848
868
|
ValueError: If `divisor_override` is less than or equal to 0.
|
|
849
|
-
ValueError: If `padding` is non-zero when `pad_mode` is not
|
|
869
|
+
ValueError: If `padding` is non-zero when `pad_mode` is not ``"pad"``.
|
|
850
870
|
|
|
851
871
|
Supported Platforms:
|
|
852
872
|
``Ascend`` ``GPU`` ``CPU``
|
|
853
873
|
|
|
854
874
|
Examples:
|
|
855
875
|
>>> import mindspore as ms
|
|
856
|
-
>>>
|
|
857
|
-
>>>
|
|
858
|
-
>>> pool = nn.AvgPool3d(kernel_size=3, stride=1)
|
|
859
|
-
>>> x = ops.randn(1, 2, 4, 4, 5).astype(ms.float32)
|
|
876
|
+
>>> pool = ms.nn.AvgPool3d(kernel_size=3, stride=1)
|
|
877
|
+
>>> x = ms.ops.randn(1, 2, 4, 4, 5).astype(ms.float32)
|
|
860
878
|
>>> output = pool(x)
|
|
861
879
|
>>> print(output.shape)
|
|
862
880
|
(1, 2, 2, 2, 3)
|
|
863
|
-
>>> x1 = ops.randn(6, 5, 7, 7, 5).astype(ms.float32)
|
|
864
|
-
>>> pool2 = nn.AvgPool3d(4, stride=2, pad_mode=
|
|
881
|
+
>>> x1 = ms.ops.randn(6, 5, 7, 7, 5).astype(ms.float32)
|
|
882
|
+
>>> pool2 = ms.nn.AvgPool3d(4, stride=2, pad_mode="pad", padding=(2, 2, 1), divisor_override=10)
|
|
865
883
|
>>> output2 = pool2(x1)
|
|
866
884
|
>>> print(output2.shape)
|
|
867
885
|
(6, 5, 4, 4, 2)
|
|
@@ -905,33 +923,37 @@ class AvgPool2d(_PoolNd):
|
|
|
905
923
|
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the average value.
|
|
906
924
|
The data type of kernel_size must be int or a single element tuple and the value represents the height
|
|
907
925
|
and width, or a tuple of two int numbers that represent height and width respectively.
|
|
908
|
-
Default: 1.
|
|
926
|
+
Default: ``1`` .
|
|
909
927
|
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number or a single element tuple that
|
|
910
928
|
represents the height and width of movement are both strides, or a tuple of two int numbers that
|
|
911
|
-
represent height and width of movement respectively. Default: 1.
|
|
912
|
-
pad_mode (str)
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
- same
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
-
|
|
921
|
-
|
|
929
|
+
represent height and width of movement respectively. Default: ``1`` .
|
|
930
|
+
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
931
|
+
``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"valid"`` .
|
|
932
|
+
|
|
933
|
+
- ``"same"``: Pad the input around its edges so that the shape of input and output
|
|
934
|
+
are the same when `stride` is set to ``1``.
|
|
935
|
+
The amount of padding to is calculated by the operator internally, If the amount is even, it is
|
|
936
|
+
uniformly distributed around the input, if it is odd, the excess amount goes to the right/bottom side.
|
|
937
|
+
If this mode is set, `padding` must be 0.
|
|
938
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
939
|
+
possible height and width. Extra pixels that could not complete a full stride will
|
|
940
|
+
be discarded. If this mode is set, `padding` must be 0.
|
|
941
|
+
- ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
|
|
942
|
+
in the height and width directions is determined by the `padding` parameter.
|
|
943
|
+
If this mode is set, `padding` must be greater than or equal to 0.
|
|
922
944
|
|
|
923
|
-
padding (Union(int, tuple[int], list[int])): Pooling padding value, only
|
|
924
|
-
Default: 0. `padding` can only be an integer or a tuple/list containing one or two integers.
|
|
945
|
+
padding (Union(int, tuple[int], list[int])): Pooling padding value, only ``"pad"`` mode can be set to non-zero.
|
|
946
|
+
Default: ``0`` . `padding` can only be an integer or a tuple/list containing one or two integers.
|
|
925
947
|
If `padding` is an integer or a tuple/list containing one integer, it will be padded `padding` times in the
|
|
926
948
|
four directions of the input. If `padding` is a tuple/list containing two integers, it will be padded
|
|
927
949
|
`padding[0]` times in the up-down direction of the input and `padding[1]` times in the left-right direction
|
|
928
950
|
of the input.
|
|
929
|
-
ceil_mode (bool): If True, use ceil to compute the output shape instead of floor. Default: False.
|
|
930
|
-
count_include_pad (bool): If True, averaging calculation will include the zero-padding. Default: True.
|
|
951
|
+
ceil_mode (bool): If ``True`` , use ceil to compute the output shape instead of floor. Default: ``False`` .
|
|
952
|
+
count_include_pad (bool): If ``True`` , averaging calculation will include the zero-padding. Default: ``True`` .
|
|
931
953
|
divisor_override (int): If it is specified as a non-zero parameter, this parameter will be used as the divisor
|
|
932
|
-
in the average calculation. Otherwise, `kernel_size` will be used as the divisor. Default: None.
|
|
933
|
-
data_format (str): The optional value for data format, is 'NHWC' or 'NCHW'.
|
|
934
|
-
Default: 'NCHW'.
|
|
954
|
+
in the average calculation. Otherwise, `kernel_size` will be used as the divisor. Default: ``None`` .
|
|
955
|
+
data_format (str): The optional value for data format, is ``'NHWC'`` or ``'NCHW'`` .
|
|
956
|
+
Default: ``'NCHW'`` .
|
|
935
957
|
|
|
936
958
|
Inputs:
|
|
937
959
|
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`.
|
|
@@ -951,31 +973,29 @@ class AvgPool2d(_PoolNd):
|
|
|
951
973
|
|
|
952
974
|
Raises:
|
|
953
975
|
TypeError: If `kernel_size` or `strides` is neither int nor tuple.
|
|
954
|
-
ValueError: If `pad_mode` is not
|
|
955
|
-
ValueError: If `data_format` is neither 'NCHW' nor 'NHWC'
|
|
976
|
+
ValueError: If `pad_mode` is not ``"valid"`` , ``"same"`` or ``"pad"`` with not case sensitive.
|
|
977
|
+
ValueError: If `data_format` is neither ``'NCHW'`` nor ``'NHWC'``.
|
|
956
978
|
ValueError: If `padding`, `ceil_mode`, `count_include_pad`, or `divisor_override` is used
|
|
957
|
-
or `pad_mode` is
|
|
979
|
+
or `pad_mode` is ``"pad"`` when `data_format` is 'NHWC'.
|
|
958
980
|
ValueError: If `kernel_size` or `strides` is less than 1.
|
|
959
981
|
ValueError: If length of `padding` tuple/list is not 1 or 2.
|
|
960
982
|
ValueError: If length of shape of `x` is not equal to 3 or 4.
|
|
961
983
|
ValueError: If `divisor_override` is less than or equal to 0.
|
|
962
|
-
ValueError: If `padding` is non-zero when `pad_mode` is not
|
|
984
|
+
ValueError: If `padding` is non-zero when `pad_mode` is not ``"pad"``.
|
|
963
985
|
|
|
964
986
|
Supported Platforms:
|
|
965
987
|
``Ascend`` ``GPU`` ``CPU``
|
|
966
988
|
|
|
967
989
|
Examples:
|
|
968
990
|
>>> import mindspore as ms
|
|
969
|
-
>>> import mindspore.nn as nn
|
|
970
|
-
>>> import mindspore.ops as ops
|
|
971
991
|
>>> import numpy as np
|
|
972
|
-
>>> pool = nn.AvgPool2d(kernel_size=3, stride=1)
|
|
992
|
+
>>> pool = ms.nn.AvgPool2d(kernel_size=3, stride=1)
|
|
973
993
|
>>> x = ms.Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), ms.float32)
|
|
974
994
|
>>> output = pool(x)
|
|
975
995
|
>>> print(output.shape)
|
|
976
996
|
(1, 2, 2, 2)
|
|
977
|
-
>>> x = ops.randn(6, 6, 8, 8)
|
|
978
|
-
>>> pool2 = nn.AvgPool2d(4, stride=1, pad_mode=
|
|
997
|
+
>>> x = ms.ops.randn(6, 6, 8, 8)
|
|
998
|
+
>>> pool2 = ms.nn.AvgPool2d(4, stride=1, pad_mode="pad", padding=2, divisor_override=5)
|
|
979
999
|
>>> output2 = pool2(x)
|
|
980
1000
|
>>> print(output2.shape)
|
|
981
1001
|
(6, 6, 9, 9)
|
|
@@ -1057,25 +1077,29 @@ class AvgPool1d(_PoolNd):
|
|
|
1057
1077
|
\text{input}(N_i, C_j, s_0 \times l + n)
|
|
1058
1078
|
|
|
1059
1079
|
Args:
|
|
1060
|
-
kernel_size (int): The size of kernel window used to take the average value, Default: 1.
|
|
1080
|
+
kernel_size (int): The size of kernel window used to take the average value, Default: ``1`` .
|
|
1061
1081
|
stride (int): The distance of kernel moving, an int number that represents
|
|
1062
|
-
the width of movement is strides, Default: 1.
|
|
1063
|
-
pad_mode (str)
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
- same
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
-
|
|
1072
|
-
|
|
1082
|
+
the width of movement is strides, Default: ``1`` .
|
|
1083
|
+
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
1084
|
+
``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"valid"`` .
|
|
1085
|
+
|
|
1086
|
+
- ``"same"``: Pad the input at the begin and end so that the shape of input and output
|
|
1087
|
+
are the same when `stride` is set to ``1``.
|
|
1088
|
+
The amount of padding to is calculated by the operator internally. If the amount is even, it is
|
|
1089
|
+
uniformly distributed around the input, if it is odd, the excess padding is goes to the right side.
|
|
1090
|
+
If this mode is set, `padding` must be 0.
|
|
1091
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
1092
|
+
possible length. Extra pixels that could not complete a full stride will
|
|
1093
|
+
be discarded. If this mode is set, `padding` must be 0.
|
|
1094
|
+
- ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
|
|
1095
|
+
at the begin and end is determined by the `padding` parameter.
|
|
1096
|
+
If this mode is set, `padding` must be greater than or equal to 0.
|
|
1073
1097
|
|
|
1074
|
-
padding (Union(int, tuple[int], list[int])): Pooling padding value, only
|
|
1075
|
-
Default: 0. padding can only be an integer or a tuple/list containing a single integer, in which case
|
|
1098
|
+
padding (Union(int, tuple[int], list[int])): Pooling padding value, only ``"pad"`` mode can be set to non-zero.
|
|
1099
|
+
Default: ``0`` . padding can only be an integer or a tuple/list containing a single integer, in which case
|
|
1076
1100
|
padding times or padding[0] times are padded on both sides of the input.
|
|
1077
|
-
ceil_mode (bool): If True, use ceil to compute the output shape instead of floor. Default: False.
|
|
1078
|
-
count_include_pad (bool): If True, averaging calculation will include the zero-padding. Default: True.
|
|
1101
|
+
ceil_mode (bool): If ``True`` , use ceil to compute the output shape instead of floor. Default: ``False`` .
|
|
1102
|
+
count_include_pad (bool): If ``True`` , averaging calculation will include the zero-padding. Default: ``True`` .
|
|
1079
1103
|
|
|
1080
1104
|
Inputs:
|
|
1081
1105
|
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`.
|
|
@@ -1091,28 +1115,26 @@ class AvgPool1d(_PoolNd):
|
|
|
1091
1115
|
|
|
1092
1116
|
Raises:
|
|
1093
1117
|
TypeError: If `kernel_size` or `stride` is not an int.
|
|
1094
|
-
ValueError: If `pad_mode` is not
|
|
1118
|
+
ValueError: If `pad_mode` is not ``"valid"`` , ``"same"`` or ``"pad"`` with not case sensitive.
|
|
1095
1119
|
ValueError: If `kernel_size` or `strides` is less than 1.
|
|
1096
1120
|
ValueError: If length of `padding` tuple/list is not 1.
|
|
1097
1121
|
ValueError: If length of shape of `x` is not equal to 2 or 3.
|
|
1098
|
-
ValueError: If `padding` is non-zero when `pad_mode` is not
|
|
1122
|
+
ValueError: If `padding` is non-zero when `pad_mode` is not ``"pad"``.
|
|
1099
1123
|
|
|
1100
1124
|
Supported Platforms:
|
|
1101
1125
|
``Ascend`` ``GPU`` ``CPU``
|
|
1102
1126
|
|
|
1103
1127
|
Examples:
|
|
1104
1128
|
>>> import mindspore as ms
|
|
1105
|
-
>>> import mindspore.nn as nn
|
|
1106
|
-
>>> import mindspore.ops as ops
|
|
1107
1129
|
>>> import numpy as np
|
|
1108
|
-
>>> pool = nn.AvgPool1d(kernel_size=6, stride=1)
|
|
1130
|
+
>>> pool = ms.nn.AvgPool1d(kernel_size=6, stride=1)
|
|
1109
1131
|
>>> x = ms.Tensor(np.random.randint(0, 10, [1, 3, 6]), ms.float32)
|
|
1110
1132
|
>>> output = pool(x)
|
|
1111
1133
|
>>> result = output.shape
|
|
1112
1134
|
>>> print(result)
|
|
1113
1135
|
(1, 3, 1)
|
|
1114
|
-
>>> pool2 = nn.AvgPool1d(4, stride=1, ceil_mode=True, pad_mode=
|
|
1115
|
-
>>> x1 = ops.randn(6, 6, 8)
|
|
1136
|
+
>>> pool2 = ms.nn.AvgPool1d(4, stride=1, ceil_mode=True, pad_mode="pad", padding=2)
|
|
1137
|
+
>>> x1 = ms.ops.randn(6, 6, 8)
|
|
1116
1138
|
>>> output = pool2(x1)
|
|
1117
1139
|
>>> print(output.shape)
|
|
1118
1140
|
(6, 6, 9)
|
|
@@ -1179,23 +1201,23 @@ class AvgPool1d(_PoolNd):
|
|
|
1179
1201
|
@_primexpr
|
|
1180
1202
|
def _adaptive_shape_check(in_shape, output_size, prim_name):
|
|
1181
1203
|
"""Check shape."""
|
|
1182
|
-
msg_prefix = "For {}, the"
|
|
1204
|
+
msg_prefix = f"For {prim_name}, the"
|
|
1183
1205
|
if len(in_shape) != 3:
|
|
1184
|
-
raise ValueError("{} input must has 3 dim, but got {
|
|
1206
|
+
raise ValueError(f"{msg_prefix} input must has 3 dim, but got {len(in_shape)}.")
|
|
1185
1207
|
if in_shape[2] < output_size:
|
|
1186
|
-
raise ValueError("{} input's last dimension must be greater or equal to "
|
|
1187
|
-
"output size {}, but got {
|
|
1208
|
+
raise ValueError(f"{msg_prefix} input's last dimension must be greater or equal to "
|
|
1209
|
+
f"output size {output_size}, but got {in_shape[2]}.")
|
|
1188
1210
|
if in_shape[2] % output_size != 0:
|
|
1189
|
-
raise ValueError("{} input's last dimension must be divisible by "
|
|
1190
|
-
"output size {}, but got {
|
|
1211
|
+
raise ValueError(f"{msg_prefix} input's last dimension must be divisible by "
|
|
1212
|
+
f"output size {output_size}, but got {in_shape[2]}.")
|
|
1191
1213
|
|
|
1192
1214
|
|
|
1193
1215
|
@constexpr
|
|
1194
1216
|
def _adaptive_dtype_check(x_dtype, prim_name):
|
|
1195
1217
|
"""Check dtype."""
|
|
1196
1218
|
if x_dtype not in [mstype.float16, mstype.float32]:
|
|
1197
|
-
raise TypeError("For {}, the x_dtype must be float16 or float32, "
|
|
1198
|
-
"but got {}."
|
|
1219
|
+
raise TypeError(f"For {prim_name}, the x_dtype must be float16 or float32, "
|
|
1220
|
+
f"but got {x_dtype}.")
|
|
1199
1221
|
|
|
1200
1222
|
|
|
1201
1223
|
class AdaptiveAvgPool1d(Cell):
|
|
@@ -1233,11 +1255,10 @@ class AdaptiveAvgPool1d(Cell):
|
|
|
1233
1255
|
``Ascend`` ``GPU`` ``CPU``
|
|
1234
1256
|
|
|
1235
1257
|
Examples:
|
|
1236
|
-
>>> import mindspore
|
|
1237
|
-
>>> from mindspore import Tensor, nn
|
|
1258
|
+
>>> import mindspore as ms
|
|
1238
1259
|
>>> import numpy as np
|
|
1239
|
-
>>> pool = nn.AdaptiveAvgPool1d(output_size=2)
|
|
1240
|
-
>>> input = Tensor(np.random.randint(0, 10, [1, 3, 6]),
|
|
1260
|
+
>>> pool = ms.nn.AdaptiveAvgPool1d(output_size=2)
|
|
1261
|
+
>>> input = ms.Tensor(np.random.randint(0, 10, [1, 3, 6]), ms.float32)
|
|
1241
1262
|
>>> output = pool(input)
|
|
1242
1263
|
>>> result = output.shape
|
|
1243
1264
|
>>> print(result)
|
|
@@ -1295,7 +1316,7 @@ class AdaptiveAvgPool2d(Cell):
|
|
|
1295
1316
|
|
|
1296
1317
|
Args:
|
|
1297
1318
|
output_size (Union[int, tuple]): The target output size is H x W.
|
|
1298
|
-
`
|
|
1319
|
+
`output_size` can be a tuple consisted of int type H and W, or a single H for H x H, or None.
|
|
1299
1320
|
If it is None, it means the output size is the same as the input size.
|
|
1300
1321
|
|
|
1301
1322
|
Inputs:
|
|
@@ -1312,13 +1333,15 @@ class AdaptiveAvgPool2d(Cell):
|
|
|
1312
1333
|
ValueError: If the dimension of `input` is less than or equal to the dimension of `output_size`.
|
|
1313
1334
|
|
|
1314
1335
|
Supported Platforms:
|
|
1315
|
-
``GPU``
|
|
1336
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1316
1337
|
|
|
1317
1338
|
Examples:
|
|
1318
|
-
>>>
|
|
1319
|
-
>>>
|
|
1339
|
+
>>> import mindspore as ms
|
|
1340
|
+
>>> import numpy as np
|
|
1341
|
+
>>> pool = ms.nn.AdaptiveAvgPool2d(2)
|
|
1342
|
+
>>> input_x = ms.Tensor(np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
|
|
1320
1343
|
... [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
|
|
1321
|
-
... [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]),
|
|
1344
|
+
... [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]), ms.float32)
|
|
1322
1345
|
>>> output = pool(input_x)
|
|
1323
1346
|
>>> result = output.shape
|
|
1324
1347
|
>>> print(result)
|
|
@@ -1358,7 +1381,7 @@ class AdaptiveAvgPool3d(Cell):
|
|
|
1358
1381
|
\end{array}
|
|
1359
1382
|
|
|
1360
1383
|
Args:
|
|
1361
|
-
output_size (Union[int, tuple]): The target output size. `
|
|
1384
|
+
output_size (Union[int, tuple]): The target output size. `output_size` can be a tuple :math:`(D, H, W)`,
|
|
1362
1385
|
or an int D for :math:`(D, D, D)`. :math:`D`, :math:`H` and :math:`W` can be int or None
|
|
1363
1386
|
which means the output size is the same as that of the input.
|
|
1364
1387
|
|
|
@@ -1379,27 +1402,29 @@ class AdaptiveAvgPool3d(Cell):
|
|
|
1379
1402
|
``Ascend`` ``GPU`` ``CPU``
|
|
1380
1403
|
|
|
1381
1404
|
Examples:
|
|
1405
|
+
>>> import mindspore as ms
|
|
1406
|
+
>>> import numpy as np
|
|
1382
1407
|
>>> # case 1: output_size=(3, 3, 4)
|
|
1383
1408
|
>>> output_size=(3, 3, 4)
|
|
1384
1409
|
>>> input_x_val = np.random.randn(4, 3, 5, 6, 7)
|
|
1385
|
-
>>> input_x = Tensor(input_x_val,
|
|
1386
|
-
>>> net = nn.AdaptiveAvgPool3d(output_size)
|
|
1410
|
+
>>> input_x = ms.Tensor(input_x_val, ms.float32)
|
|
1411
|
+
>>> net = ms.nn.AdaptiveAvgPool3d(output_size)
|
|
1387
1412
|
>>> output = net(input_x)
|
|
1388
1413
|
>>> print(output.shape)
|
|
1389
1414
|
(4, 3, 3, 3, 4)
|
|
1390
1415
|
>>> # case 2: output_size=4
|
|
1391
1416
|
>>> output_size=5
|
|
1392
1417
|
>>> input_x_val = np.random.randn(2, 3, 8, 6, 12)
|
|
1393
|
-
>>> input_x = Tensor(input_x_val,
|
|
1394
|
-
>>> net = nn.AdaptiveAvgPool3d(output_size)
|
|
1418
|
+
>>> input_x = ms.Tensor(input_x_val, ms.float32)
|
|
1419
|
+
>>> net = ms.nn.AdaptiveAvgPool3d(output_size)
|
|
1395
1420
|
>>> output = net(input_x)
|
|
1396
1421
|
>>> print(output.shape)
|
|
1397
1422
|
(2, 3, 5, 5, 5)
|
|
1398
1423
|
>>> # case 3: output_size=(None, 4, 5)
|
|
1399
1424
|
>>> output_size=(None, 4, 5)
|
|
1400
1425
|
>>> input_x_val = np.random.randn(4, 1, 9, 10, 8)
|
|
1401
|
-
>>> input_x = Tensor(input_x_val,
|
|
1402
|
-
>>> net = nn.AdaptiveAvgPool3d(output_size)
|
|
1426
|
+
>>> input_x = ms.Tensor(input_x_val, ms.float32)
|
|
1427
|
+
>>> net = ms.nn.AdaptiveAvgPool3d(output_size)
|
|
1403
1428
|
>>> output = net(input_x)
|
|
1404
1429
|
>>> print(output.shape)
|
|
1405
1430
|
(4, 1, 9, 4, 5)
|
|
@@ -1448,11 +1473,10 @@ class AdaptiveMaxPool1d(Cell):
|
|
|
1448
1473
|
``Ascend`` ``GPU`` ``CPU``
|
|
1449
1474
|
|
|
1450
1475
|
Examples:
|
|
1451
|
-
>>> import mindspore
|
|
1452
|
-
>>> from mindspore import Tensor, nn
|
|
1476
|
+
>>> import mindspore as ms
|
|
1453
1477
|
>>> import numpy as np
|
|
1454
|
-
>>> pool = nn.AdaptiveMaxPool1d(output_size=3)
|
|
1455
|
-
>>> x = Tensor(np.random.randint(0, 10, [1, 3, 6]),
|
|
1478
|
+
>>> pool = ms.nn.AdaptiveMaxPool1d(output_size=3)
|
|
1479
|
+
>>> x = ms.Tensor(np.random.randint(0, 10, [1, 3, 6]), ms.float32)
|
|
1456
1480
|
>>> output = pool(x)
|
|
1457
1481
|
>>> result = output.shape
|
|
1458
1482
|
>>> print(result)
|
|
@@ -1514,11 +1538,11 @@ class AdaptiveMaxPool2d(Cell):
|
|
|
1514
1538
|
Ascend platform only supports float16 type for input.
|
|
1515
1539
|
|
|
1516
1540
|
Args:
|
|
1517
|
-
output_size (Union[int, tuple]): The target output size. `
|
|
1541
|
+
output_size (Union[int, tuple]): The target output size. `output_size` can be a tuple :math:`(H, W)`,
|
|
1518
1542
|
or an int H for :math:`(H, H)`. :math:`H` and :math:`W` can be int or None.
|
|
1519
1543
|
If it is None, it means the output size is the same as the input size.
|
|
1520
|
-
return_indices (bool): If `return_indices` is True, the indices of max value would be output.
|
|
1521
|
-
Default: False.
|
|
1544
|
+
return_indices (bool): If `return_indices` is ``True`` , the indices of max value would be output.
|
|
1545
|
+
Default: ``False`` .
|
|
1522
1546
|
|
|
1523
1547
|
Inputs:
|
|
1524
1548
|
- **input** (Tensor) - The input of AdaptiveMaxPool2d, which is a 3D or 4D tensor,
|
|
@@ -1526,7 +1550,7 @@ class AdaptiveMaxPool2d(Cell):
|
|
|
1526
1550
|
|
|
1527
1551
|
Outputs:
|
|
1528
1552
|
Tensor, with the same type as the `input`.
|
|
1529
|
-
Shape of the output is
|
|
1553
|
+
Shape of the output is :math:`input\_shape[:len(input\_shape) - len(out\_shape)] + out\_shape`.
|
|
1530
1554
|
|
|
1531
1555
|
Raises:
|
|
1532
1556
|
TypeError: If `output_size` is not int or tuple.
|
|
@@ -1540,11 +1564,13 @@ class AdaptiveMaxPool2d(Cell):
|
|
|
1540
1564
|
``Ascend`` ``GPU`` ``CPU``
|
|
1541
1565
|
|
|
1542
1566
|
Examples:
|
|
1567
|
+
>>> import mindspore as ms
|
|
1568
|
+
>>> import numpy as np
|
|
1543
1569
|
>>> # case 1: output_size=(None, 2)
|
|
1544
|
-
>>> input = Tensor(np.array([[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
|
|
1570
|
+
>>> input = ms.Tensor(np.array([[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
|
|
1545
1571
|
... [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
|
|
1546
|
-
... [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]]),
|
|
1547
|
-
>>> adaptive_max_pool_2d = nn.AdaptiveMaxPool2d((None, 2))
|
|
1572
|
+
... [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]]), ms.float32)
|
|
1573
|
+
>>> adaptive_max_pool_2d = ms.nn.AdaptiveMaxPool2d((None, 2))
|
|
1548
1574
|
>>> output = adaptive_max_pool_2d(input)
|
|
1549
1575
|
>>> print(output)
|
|
1550
1576
|
[[[[2. 3.]
|
|
@@ -1557,7 +1583,7 @@ class AdaptiveMaxPool2d(Cell):
|
|
|
1557
1583
|
[5. 6.]
|
|
1558
1584
|
[8. 9.]]]]
|
|
1559
1585
|
>>> # case 2: output_size=2
|
|
1560
|
-
>>> adaptive_max_pool_2d = nn.AdaptiveMaxPool2d(2)
|
|
1586
|
+
>>> adaptive_max_pool_2d = ms.nn.AdaptiveMaxPool2d(2)
|
|
1561
1587
|
>>> output = adaptive_max_pool_2d(input)
|
|
1562
1588
|
>>> print(output)
|
|
1563
1589
|
[[[[5. 6.]
|
|
@@ -1567,7 +1593,7 @@ class AdaptiveMaxPool2d(Cell):
|
|
|
1567
1593
|
[[5. 6.]
|
|
1568
1594
|
[8. 9.]]]]
|
|
1569
1595
|
>>> # case 3: output_size=(1, 2)
|
|
1570
|
-
>>> adaptive_max_pool_2d = nn.AdaptiveMaxPool2d((1, 2))
|
|
1596
|
+
>>> adaptive_max_pool_2d = ms.nn.AdaptiveMaxPool2d((1, 2))
|
|
1571
1597
|
>>> output = adaptive_max_pool_2d(input)
|
|
1572
1598
|
>>> print(output)
|
|
1573
1599
|
[[[[8. 9.]]
|
|
@@ -1598,8 +1624,8 @@ class AdaptiveMaxPool3d(Cell):
|
|
|
1598
1624
|
output_size (Union[int, tuple]): The specified output size, which is a positive integer that represents depth,
|
|
1599
1625
|
height and width, or a tuple of three positive integers that represent depth, height and width respectively.
|
|
1600
1626
|
If it is None, the output size and input size of the corresponding dimension are the same.
|
|
1601
|
-
return_indices (bool, optional): If `return_indices` is True, the indices of max value would be output.
|
|
1602
|
-
Otherwise, the indices will not be returned. Default: False.
|
|
1627
|
+
return_indices (bool, optional): If `return_indices` is ``True`` , the indices of max value would be output.
|
|
1628
|
+
Otherwise, the indices will not be returned. Default: ``False`` .
|
|
1603
1629
|
|
|
1604
1630
|
Inputs:
|
|
1605
1631
|
- **input** (Tensor) - Tensor, has shape of :math:`(C, D, H, W)` or :math:`(N, C, D, H, W)`.
|
|
@@ -1607,21 +1633,23 @@ class AdaptiveMaxPool3d(Cell):
|
|
|
1607
1633
|
Outputs:
|
|
1608
1634
|
- **y** (Tensor) - Tensor, has the same number of dims and data type as the `input` .
|
|
1609
1635
|
- **argmax** (Tensor) - Tensor, the indices of the maximum values along with the outputs, has the same shape as
|
|
1610
|
-
`y` and a dtype of int32. Return this only when `return_indices` is True.
|
|
1636
|
+
`y` and a dtype of int32. Return this only when `return_indices` is ``True`` .
|
|
1611
1637
|
|
|
1612
1638
|
Raises:
|
|
1613
1639
|
TypeError: If `input` is not a Tensor.
|
|
1614
1640
|
ValueError: If the dimensions number of `input` is not 4 or 5.
|
|
1615
1641
|
TypeError: If dtype of `input` is not int, uint or float.
|
|
1616
|
-
ValueError: If `output_size` is neither an int nor a tuple with shape (3,)
|
|
1642
|
+
ValueError: If `output_size` is neither an int nor a tuple with shape :math:`(3,)`.
|
|
1617
1643
|
|
|
1618
1644
|
Supported Platforms:
|
|
1619
1645
|
``GPU`` ``CPU``
|
|
1620
1646
|
|
|
1621
1647
|
Examples:
|
|
1622
|
-
>>>
|
|
1648
|
+
>>> import mindspore as ms
|
|
1649
|
+
>>> import numpy as np
|
|
1650
|
+
>>> input = ms.Tensor(np.arange(0,36).reshape((1, 3, 3, 4)).astype(np.float32))
|
|
1623
1651
|
>>> output_size = (1, 1, 2)
|
|
1624
|
-
>>> net = nn.AdaptiveMaxPool3d(output_size, True)
|
|
1652
|
+
>>> net = ms.nn.AdaptiveMaxPool3d(output_size, True)
|
|
1625
1653
|
>>> output = net(input)
|
|
1626
1654
|
>>> print(output[0].asnumpy())
|
|
1627
1655
|
[[[[33. 35.]]]]
|
|
@@ -1646,8 +1674,9 @@ class AdaptiveMaxPool3d(Cell):
|
|
|
1646
1674
|
class FractionalMaxPool2d(Cell):
|
|
1647
1675
|
r"""
|
|
1648
1676
|
Applies the 2D FractionalMaxPool operatin over input. The output Tensor shape can be determined by either
|
|
1649
|
-
`output_size` or `output_ratio`, and the step size is determined by `_random_samples`.
|
|
1650
|
-
`output_size`
|
|
1677
|
+
`output_size` or `output_ratio`, and the step size is determined by `_random_samples`. `output_size` will take
|
|
1678
|
+
effect when `output_size` and `output_ratio` are set at the same time.
|
|
1679
|
+
And `output_size` and `output_ratio` can not be ``None`` at the same time.
|
|
1651
1680
|
|
|
1652
1681
|
Refer to the paper `Fractional MaxPooling by Ben Graham <https://arxiv.org/abs/1412.6071>`_ for more details.
|
|
1653
1682
|
|
|
@@ -1659,22 +1688,27 @@ class FractionalMaxPool2d(Cell):
|
|
|
1659
1688
|
output_size (Union[int, tuple[int]], optional): The Shape of the target `output_size`,
|
|
1660
1689
|
is a positive int that represents height and width, or a tuple of two positive integers that represent
|
|
1661
1690
|
height and width respectively. The value must be a positive integer. If None, the shape of the target will
|
|
1662
|
-
be determined by `output_ratio`. Default: None.
|
|
1691
|
+
be determined by `output_ratio`. Default: ``None`` .
|
|
1663
1692
|
output_ratio (Union[float, tuple[float]], optional): The ratio of target output shape to input shape.
|
|
1664
1693
|
Specifying the size of the output tensor by using a ratio of the input size.
|
|
1665
1694
|
Data type : float16, float32, float64, and value is between (0, 1). If None, the shape of the target will be
|
|
1666
|
-
determined by `output_size`. Default: None.
|
|
1667
|
-
return_indices (bool, optional): Whether to return the indices of max value. Default: False.
|
|
1668
|
-
_random_samples (Tensor, optional): The random step of FractionalMaxPool2d,
|
|
1669
|
-
|
|
1670
|
-
|
|
1695
|
+
determined by `output_size`. Default: ``None`` .
|
|
1696
|
+
return_indices (bool, optional): Whether to return the indices of max value. Default: ``False`` .
|
|
1697
|
+
_random_samples (Tensor, optional): The random step of FractionalMaxPool2d, which is a 3D tensor.
|
|
1698
|
+
Tensor of data type: float16, float32, double, and value is between [0, 1).
|
|
1699
|
+
Supported shape :math:`(N, C, 2)` or :math:`(1, C, 2)`.
|
|
1700
|
+
Default: ``None``, the values of `_random_samples`
|
|
1701
|
+
will be randomly distributed using uniform distribution over an interval [0,1).
|
|
1671
1702
|
|
|
1672
1703
|
Inputs:
|
|
1673
|
-
- **input** (Tensor) - Tensor of shape :math:`(N, C, H_{in}, W_{in})`,
|
|
1704
|
+
- **input** (Tensor) - Tensor of shape :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`,
|
|
1674
1705
|
with float16, float32, float64, int32, int64 data type.
|
|
1675
1706
|
|
|
1676
1707
|
Outputs:
|
|
1677
|
-
- **y** (Tensor) - Has the same type as the `input`.
|
|
1708
|
+
- **y** (Tensor) - Has the same type as the `input`.
|
|
1709
|
+
Has the shape :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})` ,
|
|
1710
|
+
where :math:`(H_{out}, W_{out})` = `output_size`
|
|
1711
|
+
or :math:`(H_{out}, W_{out})` = `output_ratio` * :math:`(H_{in}, W_{in})`.
|
|
1678
1712
|
- **argmax** (Tensor) - The indices along with the outputs, which is a Tensor, with the same shape as the
|
|
1679
1713
|
`y` and int64 data type. It will be returned only when `return_indices` is True.
|
|
1680
1714
|
|
|
@@ -1697,16 +1731,14 @@ class FractionalMaxPool2d(Cell):
|
|
|
1697
1731
|
Examples:
|
|
1698
1732
|
>>> # the kernel_size is an int number and the output_size is a tuple.
|
|
1699
1733
|
>>> import numpy as np
|
|
1700
|
-
>>>
|
|
1701
|
-
>>>
|
|
1702
|
-
>>> import mindspore.common.dtype as mstype
|
|
1703
|
-
>>> input = Tensor(np.array([0.3220, 0.9545, 0.7879, 0.0975, 0.3698,
|
|
1734
|
+
>>> import mindspore as ms
|
|
1735
|
+
>>> input = ms.Tensor(np.array([0.3220, 0.9545, 0.7879, 0.0975, 0.3698,
|
|
1704
1736
|
... 0.5135, 0.5740, 0.3435, 0.1895, 0.8764,
|
|
1705
1737
|
... 0.9581, 0.4760, 0.9014, 0.8522, 0.3664,
|
|
1706
1738
|
... 0.4980, 0.9673, 0.9879, 0.6988, 0.9022,
|
|
1707
|
-
... 0.9304, 0.1558, 0.0153, 0.1559, 0.9852]).reshape([1, 1, 5, 5]),
|
|
1708
|
-
>>> _random_samples = Tensor(np.array([[[0.8, 0.8]]]),
|
|
1709
|
-
>>> net = nn.FractionalMaxPool2d(kernel_size=2, output_size=(2, 2), _random_samples=_random_samples,
|
|
1739
|
+
... 0.9304, 0.1558, 0.0153, 0.1559, 0.9852]).reshape([1, 1, 5, 5]), ms.float32)
|
|
1740
|
+
>>> _random_samples = ms.Tensor(np.array([[[0.8, 0.8]]]), ms.float32)
|
|
1741
|
+
>>> net = ms.nn.FractionalMaxPool2d(kernel_size=2, output_size=(2, 2), _random_samples=_random_samples,
|
|
1710
1742
|
... return_indices=True)
|
|
1711
1743
|
>>> y, argmax = net(input)
|
|
1712
1744
|
>>> y
|
|
@@ -1715,7 +1747,7 @@ class FractionalMaxPool2d(Cell):
|
|
|
1715
1747
|
>>> argmax
|
|
1716
1748
|
[[[[ 1 9]
|
|
1717
1749
|
[16 24]]]]
|
|
1718
|
-
>>> net = nn.FractionalMaxPool2d(kernel_size=2, output_ratio=(0.5, 0.5), _random_samples=_random_samples,
|
|
1750
|
+
>>> net = ms.nn.FractionalMaxPool2d(kernel_size=2, output_ratio=(0.5, 0.5), _random_samples=_random_samples,
|
|
1719
1751
|
... return_indices=True)
|
|
1720
1752
|
>>> y, argmax = net(input)
|
|
1721
1753
|
>>> print(y)
|
|
@@ -1743,8 +1775,9 @@ class FractionalMaxPool2d(Cell):
|
|
|
1743
1775
|
class FractionalMaxPool3d(Cell):
|
|
1744
1776
|
r"""
|
|
1745
1777
|
Applies the 3D FractionalMaxPool operatin over `input`. The output Tensor shape can be determined by either
|
|
1746
|
-
`output_size` or `output_ratio`, and the step size is determined by `_random_samples`.
|
|
1747
|
-
`output_size`
|
|
1778
|
+
`output_size` or `output_ratio`, and the step size is determined by `_random_samples`. `output_size` will take
|
|
1779
|
+
effect when `output_size` and `output_ratio` are set at the same time.
|
|
1780
|
+
And `output_size` and `output_ratio` can not be ``None`` at the same time.
|
|
1748
1781
|
|
|
1749
1782
|
Refer to the paper `Fractional MaxPooling by Ben Graham <https://arxiv.org/abs/1412.6071>`_ for more details.
|
|
1750
1783
|
|
|
@@ -1757,26 +1790,29 @@ class FractionalMaxPool3d(Cell):
|
|
|
1757
1790
|
depth, height and width respectively.
|
|
1758
1791
|
output_size (Union[int, tuple[int]], optional): The shape of the target `output_size`,
|
|
1759
1792
|
is an int number that represents depth, height and width, or a tuple of three positive integers that
|
|
1760
|
-
represents depth, height and width respectively. If None, the shape of the target will be determined
|
|
1761
|
-
`output_ratio`. Default: None.
|
|
1793
|
+
represents depth, height and width respectively. If ``None`` , the shape of the target will be determined
|
|
1794
|
+
by `output_ratio`. Default: ``None`` .
|
|
1762
1795
|
output_ratio (Union[float, tuple[float]], optional): The ratio of target output shape to input shape.
|
|
1763
1796
|
Specifying the size of the output tensor by using a ratio of the input size.
|
|
1764
|
-
Data type : float16, float32, float64, and value is between (0, 1). If None, the shape of the target
|
|
1765
|
-
determined by `output_size`.Default: None.
|
|
1766
|
-
return_indices (bool, optional): Whether to return the indices of max value. Default: False.
|
|
1767
|
-
_random_samples (Tensor, optional): The random step of
|
|
1768
|
-
|
|
1769
|
-
|
|
1797
|
+
Data type : float16, float32, float64, and value is between (0, 1). If ``None`` , the shape of the target
|
|
1798
|
+
will be determined by `output_size`.Default: ``None`` .
|
|
1799
|
+
return_indices (bool, optional): Whether to return the indices of max value. Default: ``False`` .
|
|
1800
|
+
_random_samples (Tensor, optional): The random step of FractionalMaxPool3d, which is a 3D tensor.
|
|
1801
|
+
Tensor of data type: float16, float32, double, and value is between [0, 1).
|
|
1802
|
+
Supported shape :math:`(N, C, 3)` or :math:`(1, C, 3)` . Default: ``None``, the values of `_random_samples`
|
|
1803
|
+
will be randomly distributed using uniform distribution over an interval [0,1).
|
|
1770
1804
|
|
|
1771
1805
|
Inputs:
|
|
1772
1806
|
- **input** (Tensor) - The input of FractionalMaxPool3d, which is a 4D or 5D tensor.
|
|
1773
|
-
Tensor of data type : float16, float32, float64
|
|
1774
|
-
Supported shape :math:`(N, C, D_{in}, H_{in}, W_{in})`
|
|
1807
|
+
Tensor of data type : float16, float32, float64.
|
|
1808
|
+
Supported shape :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
|
|
1775
1809
|
|
|
1776
1810
|
Outputs:
|
|
1777
1811
|
- **y** (Tensor) - A tensor, the output of FractionalMaxPool3d.
|
|
1778
|
-
Has the same data type with `
|
|
1779
|
-
|
|
1812
|
+
Has the same data type with `input`.
|
|
1813
|
+
Has the shape :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})` ,
|
|
1814
|
+
where :math:`(D_{out}, H_{out}, W_{out})` = `output_size`
|
|
1815
|
+
or :math:`(D_{out}, H_{out}, W_{out})` = `output_ratio` * :math:`(D_{in}, H_{in}, W_{in})` .
|
|
1780
1816
|
|
|
1781
1817
|
- **argmax** (Tensor) - The indices along with the outputs, which is a Tensor, with the same shape as the
|
|
1782
1818
|
`y` and int32 data type. It will output only when `return_indices` is True.
|
|
@@ -1784,9 +1820,10 @@ class FractionalMaxPool3d(Cell):
|
|
|
1784
1820
|
Raises:
|
|
1785
1821
|
TypeError: If `input` is not a 4D or 5D tensor.
|
|
1786
1822
|
TypeError: If `_random_samples` is not a 3D tensor.
|
|
1787
|
-
TypeError: If data type of `imput_x` is not float16, float32, float64
|
|
1823
|
+
TypeError: If data type of `imput_x` is not float16, float32, float64.
|
|
1788
1824
|
TypeError: If dtype of `_random_samples` is not float16, float32, float64.
|
|
1789
1825
|
TypeError: If dtype of `argmax` is not int32, int64.
|
|
1826
|
+
TypeError: if _random_samples to have the different dtypes as input.
|
|
1790
1827
|
ValueError: If `output_size` is a tuple and if `output_size` length is not 3.
|
|
1791
1828
|
ValueError: If `kernel_size` is a tuple and if `kernel_size` length is not 3.
|
|
1792
1829
|
ValueError: If numbers in `output_size` or `kernel_size` is not positive.
|
|
@@ -1800,20 +1837,18 @@ class FractionalMaxPool3d(Cell):
|
|
|
1800
1837
|
|
|
1801
1838
|
Examples:
|
|
1802
1839
|
>>> import numpy as np
|
|
1803
|
-
>>>
|
|
1804
|
-
>>>
|
|
1805
|
-
|
|
1806
|
-
>>>
|
|
1807
|
-
|
|
1808
|
-
>>> _random_samples = Tensor(np.array([0.7, 0.7, 0.7]).reshape([1, 1, 3]), mstype.float32)
|
|
1809
|
-
>>> net = nn.FractionalMaxPool3d(kernel_size=(1, 1, 1), output_size=(1, 1, 3),
|
|
1840
|
+
>>> import mindspore as ms
|
|
1841
|
+
>>> x = ms.Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])
|
|
1842
|
+
... .reshape([1, 1, 2, 2, 4]), ms.float32)
|
|
1843
|
+
>>> _random_samples = ms.Tensor(np.array([0.7, 0.7, 0.7]).reshape([1, 1, 3]), ms.float32)
|
|
1844
|
+
>>> net = ms.nn.FractionalMaxPool3d(kernel_size=(1, 1, 1), output_size=(1, 1, 3),
|
|
1810
1845
|
... _random_samples=_random_samples, return_indices=True)
|
|
1811
1846
|
>>> output, argmax = net(x)
|
|
1812
1847
|
>>> print(output)
|
|
1813
1848
|
[[[[[13. 14. 16.]]]]]
|
|
1814
1849
|
>>> print(argmax)
|
|
1815
1850
|
[[[[[12 13 15]]]]]
|
|
1816
|
-
>>> net = nn.FractionalMaxPool3d(kernel_size=(1, 1, 1), output_ratio=(0.5, 0.5, 0.5),
|
|
1851
|
+
>>> net = ms.nn.FractionalMaxPool3d(kernel_size=(1, 1, 1), output_ratio=(0.5, 0.5, 0.5),
|
|
1817
1852
|
... _random_samples=_random_samples, return_indices=True)
|
|
1818
1853
|
>>> output, argmax = net(x)
|
|
1819
1854
|
>>> print(output)
|
|
@@ -1847,14 +1882,14 @@ class MaxUnpool1d(Cell):
|
|
|
1847
1882
|
|
|
1848
1883
|
.. math::
|
|
1849
1884
|
\begin{array}{ll} \\
|
|
1850
|
-
H_{out} = (
|
|
1885
|
+
H_{out} = (H_{in} - 1) \times stride[0] - 2 \times padding[0] + kernel\_size[0] \\
|
|
1851
1886
|
\end{array}
|
|
1852
1887
|
|
|
1853
1888
|
Args:
|
|
1854
1889
|
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value.
|
|
1855
1890
|
stride (Union[int, tuple[int]]): The distance of kernel moving,
|
|
1856
|
-
If stride is None, then stride equal to kernel_size. Default: None.
|
|
1857
|
-
padding (Union[int, tuple[int]]): The pad value to be filled. Default: 0.
|
|
1891
|
+
If stride is None, then stride equal to kernel_size. Default: ``None`` .
|
|
1892
|
+
padding (Union[int, tuple[int]]): The pad value to be filled. Default: ``0`` .
|
|
1858
1893
|
|
|
1859
1894
|
Inputs:
|
|
1860
1895
|
- **x** (Tensor) - The input Tensor to invert.
|
|
@@ -1863,9 +1898,9 @@ class MaxUnpool1d(Cell):
|
|
|
1863
1898
|
Tensor of shape must be same with input 'x'.
|
|
1864
1899
|
Values of indices must belong to :math:`[0, H_{in} - 1]`.
|
|
1865
1900
|
Data type must be in int32 or int64.
|
|
1866
|
-
- **output_size** (tuple[int], optional) - The output size. Default: None.
|
|
1867
|
-
If output_size
|
|
1868
|
-
If output_size
|
|
1901
|
+
- **output_size** (tuple[int], optional) - The output size. Default: ``None`` .
|
|
1902
|
+
If output_size is ``None``, then the shape of output computed by kernel_size, stride and padding.
|
|
1903
|
+
If output_size is not ``None``, then output_size must be :math:`(N, C, H)` , :math:`(C, H)` or
|
|
1869
1904
|
:math:`(H)` and output_size must belong to
|
|
1870
1905
|
:math:`[(N, C, H_{out} - stride[0]), (N, C, H_{out} + stride[0])]`.
|
|
1871
1906
|
|
|
@@ -1887,9 +1922,11 @@ class MaxUnpool1d(Cell):
|
|
|
1887
1922
|
``GPU`` ``CPU``
|
|
1888
1923
|
|
|
1889
1924
|
Examples:
|
|
1890
|
-
>>>
|
|
1891
|
-
>>>
|
|
1892
|
-
>>>
|
|
1925
|
+
>>> import mindspore as ms
|
|
1926
|
+
>>> import numpy as np
|
|
1927
|
+
>>> x = ms.Tensor(np.array([[2, 4, 6, 8]]).astype(np.float32))
|
|
1928
|
+
>>> indices = ms.Tensor(np.array([[1, 3, 5, 7]]).astype(np.int64))
|
|
1929
|
+
>>> maxunpool1d = ms.nn.MaxUnpool1d(kernel_size =2, stride=2, padding=0)
|
|
1893
1930
|
>>> output = maxunpool1d(x, indices)
|
|
1894
1931
|
>>> print(output.asnumpy())
|
|
1895
1932
|
[[0. 2. 0. 4. 0. 6. 0. 8.]]
|
|
@@ -1910,6 +1947,8 @@ class MaxUnpool1d(Cell):
|
|
|
1910
1947
|
else:
|
|
1911
1948
|
if not isinstance(output_size, tuple):
|
|
1912
1949
|
raise ValueError(f"For MaxUnpool1d, output_size must be tuple, but type {type(output_size)}.")
|
|
1950
|
+
if not output_size:
|
|
1951
|
+
raise ValueError(f"For MaxUnpool1d, the length of output_size must be positive, but got 0.")
|
|
1913
1952
|
out = ops.max_unpool1d(x, indices, self.kernel_size, stride=self.stride, padding=self.padding,
|
|
1914
1953
|
output_size=output_size)
|
|
1915
1954
|
return out
|
|
@@ -1925,8 +1964,8 @@ class MaxUnpool2d(Cell):
|
|
|
1925
1964
|
|
|
1926
1965
|
.. math::
|
|
1927
1966
|
\begin{array}{ll} \\
|
|
1928
|
-
H_{out} = (
|
|
1929
|
-
W_{out} = (
|
|
1967
|
+
H_{out} = (H_{in} - 1) \times stride[0] - 2 \times padding[0] + kernel\_size[0] \\
|
|
1968
|
+
W_{out} = (W_{in} - 1) \times stride[1] - 2 \times padding[1] + kernel\_size[1] \\
|
|
1930
1969
|
\end{array}
|
|
1931
1970
|
|
|
1932
1971
|
Args:
|
|
@@ -1936,8 +1975,8 @@ class MaxUnpool2d(Cell):
|
|
|
1936
1975
|
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
|
|
1937
1976
|
the height and width of movement are both stride, or a tuple of two int numbers that
|
|
1938
1977
|
represent height and width of movement respectively.
|
|
1939
|
-
If stride is None, then stride equal to kernel_size. Default: None.
|
|
1940
|
-
padding (Union[int, tuple[int]]): The pad value to be filled. Default: 0. If `padding` is an integer,
|
|
1978
|
+
If stride is None, then stride equal to kernel_size. Default: ``None`` .
|
|
1979
|
+
padding (Union[int, tuple[int]]): The pad value to be filled. Default: ``0`` . If `padding` is an integer,
|
|
1941
1980
|
the paddings of height and width are the same, equal to padding. If `padding` is a tuple of two
|
|
1942
1981
|
integers, the padding of height and width equal to padding[0] and padding[1] correspondingly.
|
|
1943
1982
|
|
|
@@ -1948,9 +1987,9 @@ class MaxUnpool2d(Cell):
|
|
|
1948
1987
|
Tensor of shape must be same with input 'x'.
|
|
1949
1988
|
Values of indices must belong to :math:`[0, H_{in} \times W_{in} - 1]`.
|
|
1950
1989
|
Data type must be in int32 or int64.
|
|
1951
|
-
- **output_size** (tuple[int], optional) - The output size. Default: None.
|
|
1952
|
-
If output_size
|
|
1953
|
-
If output_size
|
|
1990
|
+
- **output_size** (tuple[int], optional) - The output size. Default: ``None`` .
|
|
1991
|
+
If output_size is ``None``, then the shape of output computed by kernel_size, stride and padding.
|
|
1992
|
+
If output_size is not ``None``, then output_size must be :math:`(N, C, H, W)`, :math:`(C, H, W)` or
|
|
1954
1993
|
:math:`(H, W)` and output_size must belong to
|
|
1955
1994
|
:math:`[(N, C, H_{out} - stride[0], W_{out} - stride[1]), (N, C, H_{out} + stride[0], W_{out} + stride[1])]`.
|
|
1956
1995
|
|
|
@@ -1973,9 +2012,11 @@ class MaxUnpool2d(Cell):
|
|
|
1973
2012
|
``GPU`` ``CPU``
|
|
1974
2013
|
|
|
1975
2014
|
Examples:
|
|
1976
|
-
>>>
|
|
1977
|
-
>>>
|
|
1978
|
-
>>>
|
|
2015
|
+
>>> import mindspore as ms
|
|
2016
|
+
>>> import numpy as np
|
|
2017
|
+
>>> x = ms.Tensor(np.array([[[[0, 1], [8, 9]]]]).astype(np.float32))
|
|
2018
|
+
>>> indices = ms.Tensor(np.array([[[[0, 1], [2, 3]]]]).astype(np.int64))
|
|
2019
|
+
>>> maxunpool2d = ms.nn.MaxUnpool2d(kernel_size=1, stride=1, padding=0)
|
|
1979
2020
|
>>> output = maxunpool2d(x, indices)
|
|
1980
2021
|
>>> print(output.asnumpy())
|
|
1981
2022
|
[[[[0. 1.]
|
|
@@ -1997,6 +2038,8 @@ class MaxUnpool2d(Cell):
|
|
|
1997
2038
|
else:
|
|
1998
2039
|
if not isinstance(output_size, tuple):
|
|
1999
2040
|
raise ValueError(f"For MaxUnpool2d, output_size must be tuple, but type {type(output_size)}.")
|
|
2041
|
+
if not output_size:
|
|
2042
|
+
raise ValueError(f"For MaxUnpool2d, the length of output_size must be positive, but got 0.")
|
|
2000
2043
|
out = ops.max_unpool2d(x, indices, self.kernel_size, stride=self.stride, padding=self.padding,
|
|
2001
2044
|
output_size=output_size)
|
|
2002
2045
|
return out
|
|
@@ -2013,9 +2056,9 @@ class MaxUnpool3d(Cell):
|
|
|
2013
2056
|
|
|
2014
2057
|
.. math::
|
|
2015
2058
|
\begin{array}{ll} \\
|
|
2016
|
-
D_{out} = (
|
|
2017
|
-
H_{out} = (
|
|
2018
|
-
W_{out} = (
|
|
2059
|
+
D_{out} = (D_{in} - 1) \times stride[0] - 2 \times padding[0] + kernel\_size[0] \\
|
|
2060
|
+
H_{out} = (H_{in} - 1) \times stride[1] - 2 \times padding[1] + kernel\_size[1] \\
|
|
2061
|
+
W_{out} = (W_{in} - 1) \times stride[2] - 2 \times padding[2] + kernel\_size[2] \\
|
|
2019
2062
|
\end{array}
|
|
2020
2063
|
|
|
2021
2064
|
Args:
|
|
@@ -2025,8 +2068,8 @@ class MaxUnpool3d(Cell):
|
|
|
2025
2068
|
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
|
|
2026
2069
|
the depth, height and width of movement are both stride, or a tuple of three int numbers that
|
|
2027
2070
|
represent depth, height and width of movement respectively.
|
|
2028
|
-
If stride is None, then stride equal to kernel_size. Default: None.
|
|
2029
|
-
padding (Union[int, tuple[int]]): The pad value to be filled. Default: 0. If `padding` is an integer,
|
|
2071
|
+
If stride is None, then stride equal to kernel_size. Default: ``None`` .
|
|
2072
|
+
padding (Union[int, tuple[int]]): The pad value to be filled. Default: ``0`` . If `padding` is an integer,
|
|
2030
2073
|
the paddings of depth, height and width are the same, equal to padding. If `padding` is a tuple of three
|
|
2031
2074
|
integers, the padding of depth, height and width equal to padding[0], padding[1] and padding[2]
|
|
2032
2075
|
correspondingly.
|
|
@@ -2038,9 +2081,9 @@ class MaxUnpool3d(Cell):
|
|
|
2038
2081
|
Tensor of shape must be same with input 'x'.
|
|
2039
2082
|
Values of indices must belong to :math:`[0, D_{in} \times H_{in} \times W_{in} - 1]`.
|
|
2040
2083
|
Data type must be in int32 or int64.
|
|
2041
|
-
- **output_size** (tuple[int], optional) - The output size. Default: None.
|
|
2042
|
-
If output_size
|
|
2043
|
-
If output_size
|
|
2084
|
+
- **output_size** (tuple[int], optional) - The output size. Default: ``None`` .
|
|
2085
|
+
If output_size is ``None``, then the shape of output computed by kernel_size, stride and padding.
|
|
2086
|
+
If output_size is not ``None``, then output_size must be :math:`(N, C, D, H, W)` , :math:`(C, D, H, W)` or
|
|
2044
2087
|
:math:`(D, H, W)` and output_size must belong to
|
|
2045
2088
|
:math:`[(N, C, D_{out} - stride[0], H_{out} - stride[1], W_{out} - stride[2]),
|
|
2046
2089
|
(N, C, D_{out} + stride[0], H_{out} + stride[1], W_{out} + stride[2])]`.
|
|
@@ -2064,9 +2107,11 @@ class MaxUnpool3d(Cell):
|
|
|
2064
2107
|
``GPU`` ``CPU``
|
|
2065
2108
|
|
|
2066
2109
|
Examples:
|
|
2067
|
-
>>>
|
|
2068
|
-
>>>
|
|
2069
|
-
>>>
|
|
2110
|
+
>>> import mindspore as ms
|
|
2111
|
+
>>> import numpy as np
|
|
2112
|
+
>>> x = ms.Tensor(np.array([[[[[0, 1], [8, 9]]]]]).astype(np.float32))
|
|
2113
|
+
>>> indices= ms.Tensor(np.array([[[[[0, 1], [2, 3]]]]]).astype(np.int64))
|
|
2114
|
+
>>> maxunpool3d = ms.nn.MaxUnpool3d(kernel_size=1, stride=1, padding=0)
|
|
2070
2115
|
>>> output = maxunpool3d(x, indices)
|
|
2071
2116
|
>>> print(output.asnumpy())
|
|
2072
2117
|
[[[[[0. 1.]
|
|
@@ -2087,6 +2132,8 @@ class MaxUnpool3d(Cell):
|
|
|
2087
2132
|
else:
|
|
2088
2133
|
if not isinstance(output_size, tuple):
|
|
2089
2134
|
raise ValueError(f"For MaxUnpool3d, output_size must be tuple, but type {type(output_size)}.")
|
|
2135
|
+
if not output_size:
|
|
2136
|
+
raise ValueError(f"For MaxUnpool3d, the length of output_size must be positive, but got 0.")
|
|
2090
2137
|
out = ops.max_unpool3d(x, indices, self.kernel_size, stride=self.stride, padding=self.padding,
|
|
2091
2138
|
output_size=output_size)
|
|
2092
2139
|
return out
|