mindspore 2.0.0rc1__cp38-none-any.whl → 2.2.0__cp38-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +2 -2
- mindspore/__init__.py +5 -2
- mindspore/_akg/akg/build_module.py +5 -6
- mindspore/_akg/akg/composite/build_module.py +49 -16
- mindspore/_akg/akg/composite/split_stitch.py +10 -11
- mindspore/_akg/akg/config/repository.json +195 -0
- mindspore/_akg/akg/global_configs.py +5 -1
- mindspore/_akg/akg/ms/info_version_adapt.py +67 -1
- mindspore/_akg/akg/tvm/api.py +4 -3
- mindspore/_akg/akg/tvm/autotvm/__init__.py +1 -2
- mindspore/_akg/akg/tvm/autotvm/graph_tuner/base_graph_tuner.py +1 -5
- mindspore/_akg/akg/tvm/autotvm/measure/__init__.py +1 -1
- mindspore/_akg/akg/tvm/autotvm/measure/measure.py +1 -10
- mindspore/_akg/akg/tvm/autotvm/measure/measure_methods.py +1 -372
- mindspore/_akg/akg/tvm/build_module.py +16 -1
- mindspore/_akg/akg/tvm/contrib/graph_runtime.py +0 -53
- mindspore/_akg/akg/tvm/hybrid/parser.py +7 -6
- mindspore/_akg/akg/tvm/ir_builder.py +1 -1
- mindspore/_akg/akg/tvm/module.py +1 -2
- mindspore/_akg/akg/tvm/stmt.py +2 -2
- mindspore/_akg/akg/utils/composite_op_helper.py +9 -10
- mindspore/_akg/akg/utils/kernel_exec.py +58 -260
- mindspore/_akg/akg/utils/op_dsl.py +17 -1
- mindspore/_akg/akg/utils/result_analysis.py +4 -24
- mindspore/_akg/akg/utils/tbe_codegen_utils.py +198 -0
- mindspore/_c_dataengine.cpython-38-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-38-aarch64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-38-aarch64-linux-gnu.so +0 -0
- mindspore/_check_jit_forbidden_api.py +5 -1
- mindspore/_checkparam.py +79 -62
- mindspore/_extends/graph_kernel/__init__.py +0 -1
- mindspore/_extends/graph_kernel/model/graph_split.py +2 -0
- mindspore/_extends/graph_kernel/model/model_builder.py +9 -50
- mindspore/_extends/graph_kernel/splitter.py +1 -9
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +128 -21
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +2 -2
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +4 -2
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +18 -13
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +13 -9
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +1 -1
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -1
- mindspore/_extends/parse/__init__.py +19 -17
- mindspore/_extends/parse/namespace.py +7 -36
- mindspore/_extends/parse/parser.py +375 -189
- mindspore/_extends/parse/resources.py +36 -41
- mindspore/_extends/parse/standard_method.py +350 -245
- mindspore/_extends/parse/trope.py +2 -12
- mindspore/_extends/remote/kernel_build_server.py +24 -7
- mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
- mindspore/_install_custom.py +43 -0
- mindspore/_mindspore_offline_debug.cpython-38-aarch64-linux-gnu.so +0 -0
- mindspore/amp.py +85 -19
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/base.py +2 -2
- mindspore/boost/boost.py +27 -32
- mindspore/boost/boost_cell_wrapper.py +37 -13
- mindspore/boost/grad_accumulation.py +1 -1
- mindspore/boost/grad_freeze.py +34 -6
- mindspore/boost/group_loss_scale_manager.py +15 -14
- mindspore/boost/less_batch_normalization.py +28 -3
- mindspore/common/__init__.py +15 -11
- mindspore/common/_auto_dynamic.py +68 -0
- mindspore/common/_jit_fallback_utils.py +111 -0
- mindspore/common/_register_for_adapter.py +17 -5
- mindspore/common/_register_for_tensor.py +2 -2
- mindspore/common/_stub_tensor.py +18 -15
- mindspore/common/_utils.py +31 -7
- mindspore/common/api.py +269 -101
- mindspore/common/auto_dynamic_shape.py +498 -0
- mindspore/common/dtype.py +61 -21
- mindspore/common/dump.py +9 -7
- mindspore/common/initializer.py +106 -76
- mindspore/common/jit_config.py +35 -14
- mindspore/common/lazy_inline.py +187 -0
- mindspore/common/mindir_util.py +101 -0
- mindspore/common/mutable.py +10 -13
- mindspore/common/parameter.py +246 -55
- mindspore/common/seed.py +13 -7
- mindspore/common/sparse_tensor.py +29 -33
- mindspore/common/tensor.py +907 -251
- mindspore/communication/__init__.py +7 -4
- mindspore/communication/_comm_helper.py +84 -4
- mindspore/communication/management.py +160 -88
- mindspore/config/op_info.config +99 -75
- mindspore/config/super_bar_config.json +36 -4
- mindspore/context.py +526 -219
- mindspore/dataset/__init__.py +9 -46
- mindspore/dataset/audio/__init__.py +4 -19
- mindspore/dataset/audio/transforms.py +545 -233
- mindspore/dataset/audio/utils.py +21 -18
- mindspore/dataset/callback/ds_callback.py +42 -13
- mindspore/dataset/core/config.py +158 -100
- mindspore/dataset/core/validator_helpers.py +1 -63
- mindspore/dataset/debug/debug_hook.py +45 -13
- mindspore/dataset/debug/pre_defined_hook.py +5 -5
- mindspore/dataset/engine/__init__.py +0 -5
- mindspore/dataset/engine/cache_client.py +38 -15
- mindspore/dataset/engine/datasets.py +615 -278
- mindspore/dataset/engine/datasets_audio.py +154 -283
- mindspore/dataset/engine/datasets_standard_format.py +104 -116
- mindspore/dataset/engine/datasets_text.py +443 -326
- mindspore/dataset/engine/datasets_user_defined.py +251 -164
- mindspore/dataset/engine/datasets_vision.py +839 -1443
- mindspore/dataset/engine/iterators.py +11 -4
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +7 -3
- mindspore/dataset/engine/obs/util.py +3 -0
- mindspore/dataset/engine/offload.py +6 -6
- mindspore/dataset/engine/queue.py +15 -14
- mindspore/dataset/engine/samplers.py +39 -23
- mindspore/dataset/engine/serializer_deserializer.py +22 -6
- mindspore/dataset/engine/validators.py +21 -331
- mindspore/dataset/text/__init__.py +5 -33
- mindspore/dataset/text/transforms.py +334 -165
- mindspore/dataset/text/utils.py +215 -145
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/c_transforms.py +3 -2
- mindspore/dataset/transforms/py_transforms_util.py +40 -12
- mindspore/dataset/transforms/transforms.py +174 -71
- mindspore/dataset/utils/browse_dataset.py +25 -17
- mindspore/dataset/utils/line_reader.py +24 -21
- mindspore/dataset/vision/__init__.py +5 -26
- mindspore/dataset/vision/c_transforms.py +177 -165
- mindspore/dataset/vision/py_transforms.py +114 -119
- mindspore/dataset/vision/py_transforms_util.py +54 -51
- mindspore/dataset/vision/transforms.py +1127 -381
- mindspore/dataset/vision/utils.py +54 -38
- mindspore/dataset/vision/validators.py +12 -2
- mindspore/experimental/map_parameter.py +38 -4
- mindspore/{dataset/datapreprocess → experimental/optim}/__init__.py +14 -4
- mindspore/experimental/optim/adam.py +192 -0
- mindspore/experimental/optim/adamw.py +181 -0
- mindspore/experimental/optim/lr_scheduler.py +1427 -0
- mindspore/experimental/optim/optimizer.py +252 -0
- mindspore/experimental/optim/sgd.py +147 -0
- mindspore/gen_ops.py +273 -0
- mindspore/include/OWNERS +1 -2
- mindspore/include/api/context.h +21 -1
- mindspore/include/api/data_type.h +2 -1
- mindspore/include/api/graph.h +0 -15
- mindspore/include/api/kernel.h +2 -0
- mindspore/include/api/kernel_api.h +37 -12
- mindspore/include/api/model.h +29 -42
- mindspore/include/api/model_group.h +14 -3
- mindspore/include/api/model_parallel_runner.h +18 -2
- mindspore/include/api/serialization.h +26 -0
- mindspore/include/api/status.h +1 -0
- mindspore/include/api/types.h +38 -4
- mindspore/include/c_api/ms/abstract.h +67 -0
- mindspore/include/c_api/ms/attribute.h +197 -0
- mindspore/include/c_api/ms/base/handle_types.h +43 -0
- mindspore/include/c_api/ms/base/macros.h +32 -0
- mindspore/include/c_api/ms/base/status.h +33 -0
- mindspore/include/c_api/ms/base/types.h +282 -0
- mindspore/include/c_api/ms/context.h +102 -0
- mindspore/include/c_api/ms/graph.h +160 -0
- mindspore/include/c_api/ms/node.h +606 -0
- mindspore/include/c_api/ms/tensor.h +161 -0
- mindspore/include/c_api/ms/value.h +84 -0
- mindspore/include/c_api/status_c.h +3 -0
- mindspore/include/dataset/constants.h +6 -12
- mindspore/include/dataset/execute.h +23 -13
- mindspore/include/dataset/text.h +26 -26
- mindspore/include/dataset/transforms.h +25 -31
- mindspore/include/dataset/vision.h +60 -60
- mindspore/include/dataset/vision_ascend.h +5 -6
- mindspore/include/dataset/vision_lite.h +17 -17
- mindspore/include/mindapi/base/format.h +0 -1
- mindspore/include/mindapi/base/type_id.h +2 -1
- mindspore/include/mindapi/base/types.h +5 -1
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libjemalloc.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +9000 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/cpu/libakg.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/log.py +9 -6
- mindspore/mindrecord/filereader.py +33 -4
- mindspore/mindrecord/filewriter.py +70 -35
- mindspore/mindrecord/mindpage.py +40 -34
- mindspore/mindrecord/shardreader.py +1 -1
- mindspore/mindrecord/shardsegment.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +25 -18
- mindspore/mindrecord/tools/cifar10_to_mr.py +25 -18
- mindspore/mindrecord/tools/csv_to_mr.py +29 -13
- mindspore/mindrecord/tools/imagenet_to_mr.py +24 -10
- mindspore/mindrecord/tools/mnist_to_mr.py +24 -11
- mindspore/mindrecord/tools/tfrecord_to_mr.py +31 -26
- mindspore/nn/cell.py +463 -169
- mindspore/nn/dynamic_lr.py +47 -43
- mindspore/nn/layer/activation.py +225 -82
- mindspore/nn/layer/basic.py +121 -79
- mindspore/nn/layer/channel_shuffle.py +21 -21
- mindspore/nn/layer/combined.py +33 -26
- mindspore/nn/layer/container.py +277 -22
- mindspore/nn/layer/conv.py +441 -304
- mindspore/nn/layer/dense.py +19 -13
- mindspore/nn/layer/embedding.py +62 -49
- mindspore/nn/layer/flash_attention.py +264 -0
- mindspore/nn/layer/image.py +50 -39
- mindspore/nn/layer/math.py +62 -51
- mindspore/nn/layer/normalization.py +219 -167
- mindspore/nn/layer/padding.py +58 -70
- mindspore/nn/layer/pooling.py +334 -287
- mindspore/nn/layer/rnn_cells.py +53 -38
- mindspore/nn/layer/rnns.py +59 -56
- mindspore/nn/layer/thor_layer.py +52 -44
- mindspore/nn/layer/timedistributed.py +6 -4
- mindspore/nn/layer/transformer.py +284 -164
- mindspore/nn/learning_rate_schedule.py +34 -25
- mindspore/nn/loss/__init__.py +3 -2
- mindspore/nn/loss/loss.py +554 -311
- mindspore/nn/optim/ada_grad.py +12 -9
- mindspore/nn/optim/adadelta.py +14 -11
- mindspore/nn/optim/adafactor.py +19 -16
- mindspore/nn/optim/adam.py +62 -47
- mindspore/nn/optim/adamax.py +13 -10
- mindspore/nn/optim/adasum.py +12 -8
- mindspore/nn/optim/asgd.py +10 -9
- mindspore/nn/optim/ftrl.py +20 -17
- mindspore/nn/optim/lamb.py +16 -12
- mindspore/nn/optim/lars.py +8 -6
- mindspore/nn/optim/lazyadam.py +25 -20
- mindspore/nn/optim/momentum.py +10 -7
- mindspore/nn/optim/optimizer.py +61 -9
- mindspore/nn/optim/proximal_ada_grad.py +14 -13
- mindspore/nn/optim/rmsprop.py +17 -13
- mindspore/nn/optim/rprop.py +30 -17
- mindspore/nn/optim/sgd.py +40 -23
- mindspore/nn/optim/thor.py +24 -26
- mindspore/nn/probability/bijector/bijector.py +11 -11
- mindspore/nn/probability/bijector/exp.py +1 -1
- mindspore/nn/probability/bijector/gumbel_cdf.py +3 -3
- mindspore/nn/probability/bijector/invert.py +1 -1
- mindspore/nn/probability/bijector/power_transform.py +29 -29
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +5 -5
- mindspore/nn/probability/bnn_layers/bnn_cell_wrapper.py +4 -2
- mindspore/nn/probability/bnn_layers/conv_variational.py +13 -13
- mindspore/nn/probability/bnn_layers/dense_variational.py +12 -12
- mindspore/nn/probability/bnn_layers/layer_distribution.py +9 -8
- mindspore/nn/probability/distribution/_utils/custom_ops.py +19 -3
- mindspore/nn/probability/distribution/_utils/utils.py +1 -1
- mindspore/nn/probability/distribution/bernoulli.py +9 -9
- mindspore/nn/probability/distribution/beta.py +8 -8
- mindspore/nn/probability/distribution/categorical.py +23 -15
- mindspore/nn/probability/distribution/cauchy.py +5 -6
- mindspore/nn/probability/distribution/distribution.py +3 -3
- mindspore/nn/probability/distribution/exponential.py +4 -4
- mindspore/nn/probability/distribution/gamma.py +10 -10
- mindspore/nn/probability/distribution/geometric.py +8 -8
- mindspore/nn/probability/distribution/gumbel.py +8 -9
- mindspore/nn/probability/distribution/half_normal.py +5 -5
- mindspore/nn/probability/distribution/laplace.py +5 -5
- mindspore/nn/probability/distribution/log_normal.py +12 -11
- mindspore/nn/probability/distribution/logistic.py +8 -8
- mindspore/nn/probability/distribution/normal.py +6 -5
- mindspore/nn/probability/distribution/poisson.py +10 -11
- mindspore/nn/probability/distribution/student_t.py +8 -9
- mindspore/nn/probability/distribution/transformed_distribution.py +5 -5
- mindspore/nn/probability/distribution/uniform.py +11 -11
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +9 -9
- mindspore/nn/wrap/cell_wrapper.py +188 -63
- mindspore/nn/wrap/grad_reducer.py +21 -12
- mindspore/nn/wrap/loss_scale.py +136 -49
- mindspore/numpy/__init__.py +4 -4
- mindspore/numpy/array_creations.py +55 -56
- mindspore/numpy/array_ops.py +134 -35
- mindspore/numpy/logic_ops.py +66 -20
- mindspore/numpy/math_ops.py +142 -139
- mindspore/numpy/utils_const.py +2 -2
- mindspore/offline_debug/convert_async.py +2 -2
- mindspore/ops/_grad_experimental/__init__.py +7 -5
- mindspore/ops/_grad_experimental/grad_array_ops.py +231 -348
- mindspore/ops/{_grad → _grad_experimental}/grad_base.py +1 -33
- mindspore/ops/{_grad → _grad_experimental}/grad_comm_ops.py +25 -13
- mindspore/ops/{_grad/__init__.py → _grad_experimental/grad_debug_ops.py} +15 -7
- mindspore/ops/{_grad → _grad_experimental}/grad_implementations.py +17 -11
- mindspore/ops/_grad_experimental/grad_inner_ops.py +33 -52
- mindspore/ops/_grad_experimental/grad_math_ops.py +151 -1224
- mindspore/ops/_grad_experimental/grad_nn_ops.py +141 -414
- mindspore/ops/{_grad → _grad_experimental}/grad_quant_ops.py +10 -6
- mindspore/ops/_grad_experimental/grad_sparse.py +317 -2
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -13
- mindspore/ops/{_grad → _grad_experimental}/taylor_rule.py +1 -1
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/flash_attention/__init__.py +0 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/attention.py +406 -0
- mindspore/{_extends/graph_kernel/expanders/complex/__init__.py → ops/_op_impl/_custom_op/flash_attention/constants.py} +27 -8
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_bwd.py +467 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_fwd.py +563 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_impl.py +193 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tik_ops_utils.py +435 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/__init__.py +0 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/sparse_tiling.py +45 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/strategy.py +67 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/wukong_tiling.py +62 -0
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/aicpu/__init__.py +41 -1
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d.py +37 -0
- mindspore/ops/_op_impl/aicpu/bias_add_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/cast.py +52 -0
- mindspore/ops/_op_impl/aicpu/coalesce.py +2 -0
- mindspore/ops/_op_impl/aicpu/col2im.py +3 -1
- mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
- mindspore/ops/_op_impl/aicpu/dropout_genmask.py +6 -0
- mindspore/ops/_op_impl/aicpu/eps.py +32 -0
- mindspore/ops/_op_impl/aicpu/eye.py +4 -4
- mindspore/ops/_op_impl/aicpu/fft_with_size.py +6 -0
- mindspore/ops/_op_impl/aicpu/fill_diagonal.py +5 -0
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -2
- mindspore/ops/_op_impl/aicpu/im2col.py +3 -5
- mindspore/ops/_op_impl/aicpu/lgamma.py +1 -0
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +6 -3
- mindspore/ops/_op_impl/aicpu/lu.py +39 -0
- mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/masked_scatter.py +1 -0
- mindspore/ops/_op_impl/aicpu/masked_select_grad.py +3 -0
- mindspore/ops/_op_impl/aicpu/matrix_band_part.py +59 -0
- mindspore/ops/_op_impl/aicpu/matrix_power.py +6 -1
- mindspore/ops/_op_impl/aicpu/median.py +1 -0
- mindspore/ops/_op_impl/aicpu/multinomial.py +9 -9
- mindspore/ops/_op_impl/aicpu/not_equal.py +0 -5
- mindspore/ops/_op_impl/aicpu/pad_v3.py +3 -1
- mindspore/ops/_op_impl/aicpu/pad_v3_grad.py +2 -0
- mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +15 -7
- mindspore/ops/_op_impl/aicpu/random_categorical.py +39 -19
- mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +5 -2
- mindspore/ops/_op_impl/aicpu/random_poisson.py +103 -52
- mindspore/ops/_op_impl/aicpu/random_shuffle.py +17 -15
- mindspore/ops/_op_impl/aicpu/resize_bilinear_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2.py +0 -6
- mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2_grad.py +0 -7
- mindspore/ops/_op_impl/aicpu/scatter_nd.py +2 -0
- mindspore/ops/_op_impl/aicpu/sequence_concat.py +40 -0
- mindspore/ops/_op_impl/aicpu/sequence_stack.py +40 -0
- mindspore/ops/_op_impl/aicpu/{sparseaddmm.py → sparse_addmm.py} +2 -2
- mindspore/ops/_op_impl/aicpu/{sparsesparsemaximum.py → sparse_sparse_maximum.py} +4 -4
- mindspore/ops/_op_impl/aicpu/standard_laplace.py +5 -4
- mindspore/ops/_op_impl/aicpu/standard_normal.py +5 -4
- mindspore/ops/_op_impl/aicpu/truncated_normal.py +9 -7
- mindspore/ops/_op_impl/aicpu/uniform.py +5 -3
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +8 -4
- mindspore/ops/_op_impl/aicpu/uniform_int.py +5 -5
- mindspore/ops/_op_impl/aicpu/uniform_real.py +4 -4
- mindspore/ops/_op_impl/aicpu/upsample_nearest_3d.py +14 -6
- mindspore/ops/_op_impl/aicpu/upsample_nearest_3d_grad.py +22 -8
- mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d.py +11 -6
- mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d_grad.py +21 -10
- mindspore/ops/_op_impl/tbe/__init__.py +6 -4
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool.py +2 -2
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +3 -3
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +4 -4
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +2 -2
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +3 -3
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +3 -3
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +2 -2
- mindspore/ops/_op_impl/tbe/bn_infer.py +2 -2
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +3 -2
- mindspore/ops/_op_impl/tbe/broadcast_to.py +1 -1
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +3 -3
- mindspore/ops/_op_impl/tbe/expand_dims.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_v2.py +56 -0
- mindspore/ops/_op_impl/tbe/im2col.py +4 -4
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +7 -3
- mindspore/ops/_op_impl/tbe/mem_set.py +38 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +3 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +2 -2
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +2 -0
- mindspore/ops/_primitive_cache.py +1 -1
- mindspore/ops/_tracefunc.py +241 -0
- mindspore/ops/_utils/utils.py +10 -2
- mindspore/ops/_vmap/vmap_array_ops.py +5 -3
- mindspore/ops/_vmap/vmap_base.py +5 -4
- mindspore/ops/_vmap/vmap_convolution_ops.py +1 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +6 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +11 -6
- mindspore/ops/_vmap/vmap_math_ops.py +5 -2
- mindspore/ops/_vmap/vmap_nn_ops.py +135 -11
- mindspore/ops/arg_dtype_cast.py +54 -0
- mindspore/ops/composite/__init__.py +7 -5
- mindspore/ops/composite/base.py +78 -34
- mindspore/ops/composite/math_ops.py +5 -695
- mindspore/ops/composite/multitype_ops/_compile_utils.py +403 -97
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +28 -22
- mindspore/ops/composite/multitype_ops/add_impl.py +69 -7
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +48 -10
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/mod_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +10 -7
- mindspore/ops/composite/multitype_ops/sub_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/uadd_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +9 -0
- mindspore/ops/deprecated.py +304 -0
- mindspore/ops/function/__init__.py +41 -4
- mindspore/ops/function/array_func.py +1108 -467
- mindspore/ops/function/clip_func.py +94 -27
- mindspore/ops/function/debug_func.py +3 -1
- mindspore/ops/function/grad/grad_func.py +82 -73
- mindspore/ops/function/image_func.py +28 -12
- mindspore/ops/function/linalg_func.py +135 -39
- mindspore/ops/function/math_func.py +3779 -894
- mindspore/ops/function/nn_func.py +1584 -657
- mindspore/ops/function/parameter_func.py +13 -3
- mindspore/ops/function/random_func.py +247 -153
- mindspore/ops/function/sparse_func.py +14 -11
- mindspore/ops/function/sparse_unary_func.py +173 -47
- mindspore/ops/function/spectral_func.py +8 -4
- mindspore/ops/function/vmap_func.py +8 -7
- mindspore/ops/functional.py +47 -16
- mindspore/ops/op_info_register.py +346 -86
- mindspore/ops/operations/__init__.py +38 -22
- mindspore/ops/operations/_grad_ops.py +145 -149
- mindspore/ops/operations/_inner_ops.py +298 -56
- mindspore/ops/operations/_ms_kernel.py +3 -3
- mindspore/ops/operations/_quant_ops.py +24 -28
- mindspore/ops/operations/_rl_inner_ops.py +9 -7
- mindspore/ops/operations/_scalar_ops.py +115 -0
- mindspore/ops/operations/_sequence_ops.py +148 -10
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/_thor_ops.py +2 -2
- mindspore/ops/operations/array_ops.py +1239 -561
- mindspore/ops/operations/comm_ops.py +166 -90
- mindspore/ops/operations/control_ops.py +3 -3
- mindspore/ops/operations/custom_ops.py +124 -102
- mindspore/ops/operations/debug_ops.py +24 -11
- mindspore/ops/operations/image_ops.py +86 -71
- mindspore/ops/operations/inner_ops.py +18 -13
- mindspore/ops/operations/linalg_ops.py +30 -11
- mindspore/ops/operations/math_ops.py +1730 -435
- mindspore/ops/operations/nn_ops.py +1953 -943
- mindspore/ops/operations/other_ops.py +65 -43
- mindspore/ops/operations/random_ops.py +258 -98
- mindspore/ops/operations/rl_ops.py +4 -36
- mindspore/ops/operations/sparse_ops.py +38 -33
- mindspore/ops/operations/spectral_ops.py +8 -4
- mindspore/ops/primitive.py +66 -44
- mindspore/ops/signature.py +5 -5
- mindspore/parallel/_auto_parallel_context.py +80 -19
- mindspore/parallel/_cost_model_context.py +42 -0
- mindspore/parallel/_offload_context.py +162 -72
- mindspore/parallel/_parallel_serialization.py +2 -2
- mindspore/parallel/_ps_context.py +16 -4
- mindspore/parallel/_recovery_context.py +2 -1
- mindspore/parallel/_tensor.py +15 -13
- mindspore/parallel/_transformer/layers.py +8 -6
- mindspore/parallel/_transformer/loss.py +1 -0
- mindspore/parallel/_transformer/moe.py +7 -7
- mindspore/parallel/_transformer/op_parallel_config.py +12 -1
- mindspore/parallel/_transformer/transformer.py +34 -14
- mindspore/parallel/_utils.py +36 -14
- mindspore/parallel/algo_parameter_config.py +114 -20
- mindspore/parallel/checkpoint_transform.py +16 -18
- mindspore/parallel/shard.py +16 -13
- mindspore/profiler/__init__.py +1 -1
- mindspore/profiler/common/struct_type.py +3 -3
- mindspore/profiler/common/util.py +3 -2
- mindspore/profiler/envprofiling.py +11 -4
- mindspore/profiler/parser/aicpu_data_parser.py +5 -3
- mindspore/profiler/parser/ascend_flops_generator.py +94 -0
- mindspore/profiler/parser/ascend_fpbp_generator.py +76 -0
- mindspore/profiler/parser/ascend_hccl_generator.py +288 -0
- mindspore/profiler/parser/ascend_msprof_exporter.py +213 -0
- mindspore/profiler/parser/ascend_msprof_generator.py +199 -0
- mindspore/profiler/parser/ascend_op_generator.py +276 -0
- mindspore/profiler/parser/ascend_steptrace_generator.py +94 -0
- mindspore/profiler/parser/ascend_timeline_generator.py +110 -54
- mindspore/profiler/parser/base_timeline_generator.py +11 -7
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +45 -46
- mindspore/profiler/parser/flops_parser.py +15 -11
- mindspore/profiler/parser/framework_parser.py +92 -73
- mindspore/profiler/parser/hccl_parser.py +16 -12
- mindspore/profiler/parser/integrator.py +22 -11
- mindspore/profiler/parser/memory_usage_parser.py +36 -11
- mindspore/profiler/parser/minddata_analyzer.py +12 -14
- mindspore/profiler/parser/minddata_pipeline_parser.py +1 -1
- mindspore/profiler/parser/msadvisor_parser.py +8 -4
- mindspore/profiler/parser/op_intermediate_parser.py +5 -2
- mindspore/profiler/parser/optime_parser.py +1 -1
- mindspore/profiler/parser/profiler_info.py +4 -5
- mindspore/profiler/parser/step_trace_parser.py +11 -14
- mindspore/profiler/profiling.py +678 -377
- mindspore/rewrite/api/node.py +211 -54
- mindspore/rewrite/api/node_type.py +5 -0
- mindspore/rewrite/api/pattern_engine.py +22 -23
- mindspore/rewrite/api/scoped_value.py +20 -17
- mindspore/rewrite/api/symbol_tree.py +252 -106
- mindspore/rewrite/api/tree_node_helper.py +3 -0
- mindspore/rewrite/ast_helpers/__init__.py +2 -1
- mindspore/rewrite/ast_helpers/ast_finder.py +129 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +116 -104
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +97 -46
- mindspore/rewrite/common/rewrite_elog.py +5 -1
- mindspore/rewrite/namer.py +51 -51
- mindspore/rewrite/namespace.py +14 -5
- mindspore/{ops/bprop_mindir → rewrite/node}/__init__.py +9 -4
- mindspore/rewrite/node/call_function.py +79 -0
- mindspore/rewrite/node/cell_container.py +135 -0
- mindspore/rewrite/node/control_flow.py +88 -0
- mindspore/rewrite/{node.py → node/node.py} +313 -247
- mindspore/rewrite/node/node_manager.py +254 -0
- mindspore/rewrite/node/node_topological_manager.py +243 -0
- mindspore/rewrite/parsers/arguments_parser.py +22 -21
- mindspore/rewrite/parsers/assign_parser.py +225 -239
- mindspore/rewrite/parsers/attribute_parser.py +9 -7
- mindspore/rewrite/parsers/class_def_parser.py +179 -218
- mindspore/rewrite/parsers/constant_parser.py +9 -6
- mindspore/rewrite/parsers/container_parser.py +9 -7
- mindspore/rewrite/parsers/for_parser.py +36 -15
- mindspore/rewrite/parsers/function_def_parser.py +23 -20
- mindspore/rewrite/parsers/if_parser.py +28 -24
- mindspore/rewrite/parsers/module_parser.py +202 -25
- mindspore/rewrite/{parser.py → parsers/parser.py} +4 -2
- mindspore/rewrite/{parser_register.py → parsers/parser_register.py} +1 -1
- mindspore/rewrite/parsers/return_parser.py +6 -6
- mindspore/rewrite/sparsify/sparse_transformer.py +12 -3
- mindspore/rewrite/sparsify/sparsify.py +4 -1
- mindspore/rewrite/sparsify/utils.py +11 -5
- mindspore/rewrite/symbol_tree.py +577 -732
- mindspore/rewrite/symbol_tree_builder.py +9 -175
- mindspore/rewrite/symbol_tree_dumper.py +2 -2
- mindspore/run_check/_check_version.py +46 -39
- mindspore/run_check/run_check.py +3 -2
- mindspore/{scipy/sparse → safeguard}/__init__.py +4 -5
- mindspore/safeguard/rewrite_obfuscation.py +517 -0
- mindspore/scipy/__init__.py +1 -1
- mindspore/scipy/linalg.py +67 -61
- mindspore/scipy/ops.py +5 -41
- mindspore/scipy/ops_grad.py +3 -2
- mindspore/scipy/ops_wrapper.py +5 -5
- mindspore/scipy/optimize/line_search.py +8 -8
- mindspore/scipy/optimize/linear_sum_assignment.py +4 -4
- mindspore/scipy/optimize/minimize.py +16 -12
- mindspore/scipy/utils.py +1 -52
- mindspore/scipy/utils_const.py +4 -4
- mindspore/train/__init__.py +4 -4
- mindspore/train/_utils.py +13 -5
- mindspore/train/amp.py +410 -148
- mindspore/train/anf_ir_pb2.py +16 -4
- mindspore/train/callback/_backup_and_restore.py +8 -11
- mindspore/train/callback/_callback.py +80 -3
- mindspore/train/callback/_checkpoint.py +82 -51
- mindspore/train/callback/_early_stop.py +12 -15
- mindspore/train/callback/_history.py +1 -1
- mindspore/train/callback/_lambda_callback.py +13 -13
- mindspore/train/callback/_landscape.py +21 -17
- mindspore/train/callback/_loss_monitor.py +9 -10
- mindspore/train/callback/_on_request_exit.py +16 -33
- mindspore/train/callback/_reduce_lr_on_plateau.py +21 -24
- mindspore/train/callback/_summary_collector.py +44 -30
- mindspore/train/callback/_time_monitor.py +62 -12
- mindspore/train/data_sink.py +10 -16
- mindspore/train/dataset_helper.py +154 -86
- mindspore/train/loss_scale_manager.py +14 -9
- mindspore/train/metrics/__init__.py +10 -2
- mindspore/train/metrics/accuracy.py +1 -1
- mindspore/train/metrics/auc.py +1 -1
- mindspore/train/metrics/bleu_score.py +2 -2
- mindspore/train/metrics/confusion_matrix.py +14 -14
- mindspore/train/metrics/cosine_similarity.py +3 -3
- mindspore/train/metrics/dice.py +1 -1
- mindspore/train/metrics/fbeta.py +1 -1
- mindspore/train/metrics/hausdorff_distance.py +8 -6
- mindspore/train/metrics/mean_surface_distance.py +5 -4
- mindspore/train/metrics/metric.py +49 -17
- mindspore/train/metrics/occlusion_sensitivity.py +4 -4
- mindspore/train/metrics/perplexity.py +1 -1
- mindspore/train/metrics/precision.py +2 -2
- mindspore/train/metrics/recall.py +2 -3
- mindspore/train/metrics/roc.py +7 -7
- mindspore/train/metrics/root_mean_square_surface_distance.py +5 -4
- mindspore/train/metrics/topk.py +7 -4
- mindspore/train/mind_ir_pb2.py +193 -48
- mindspore/train/model.py +377 -133
- mindspore/train/serialization.py +697 -245
- mindspore/train/summary/_summary_adapter.py +5 -2
- mindspore/train/summary/_writer_pool.py +4 -3
- mindspore/train/summary/summary_record.py +25 -23
- mindspore/train/train_thor/convert_utils.py +39 -23
- mindspore/train/train_thor/dataset_helper.py +4 -3
- mindspore/train/train_thor/model_thor.py +8 -8
- mindspore/version.py +1 -1
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/METADATA +7 -8
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/RECORD +633 -804
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/entry_points.txt +0 -1
- mindspore/_akg/akg/tvm/contrib/debugger/__init__.py +0 -16
- mindspore/_akg/akg/tvm/contrib/debugger/debug_result.py +0 -274
- mindspore/_akg/akg/tvm/contrib/debugger/debug_runtime.py +0 -259
- mindspore/_akg/akg/tvm/contrib/peak.py +0 -341
- mindspore/_akg/akg/tvm/contrib/rpc.py +0 -25
- mindspore/_akg/akg/tvm/contrib/xcode.py +0 -257
- mindspore/_akg/akg/tvm/exec/__init__.py +0 -17
- mindspore/_akg/akg/tvm/exec/autotvm_log_editor.py +0 -60
- mindspore/_akg/akg/tvm/exec/measure_peak.py +0 -48
- mindspore/_akg/akg/tvm/exec/query_rpc_tracker.py +0 -48
- mindspore/_akg/akg/tvm/exec/rpc_proxy.py +0 -98
- mindspore/_akg/akg/tvm/exec/rpc_server.py +0 -88
- mindspore/_akg/akg/tvm/exec/rpc_tracker.py +0 -62
- mindspore/_akg/akg/tvm/rpc/__init__.py +0 -29
- mindspore/_akg/akg/tvm/rpc/base.py +0 -182
- mindspore/_akg/akg/tvm/rpc/client.py +0 -436
- mindspore/_akg/akg/tvm/rpc/proxy.py +0 -595
- mindspore/_akg/akg/tvm/rpc/server.py +0 -413
- mindspore/_akg/akg/tvm/rpc/tornado_util.py +0 -121
- mindspore/_akg/akg/tvm/rpc/tracker.py +0 -431
- mindspore/_extends/graph_kernel/expander.py +0 -80
- mindspore/_extends/graph_kernel/expanders/__init__.py +0 -57
- mindspore/_extends/graph_kernel/expanders/_utils.py +0 -269
- mindspore/_extends/graph_kernel/expanders/addn.py +0 -33
- mindspore/_extends/graph_kernel/expanders/batchnorm.py +0 -152
- mindspore/_extends/graph_kernel/expanders/batchnorm_grad.py +0 -105
- mindspore/_extends/graph_kernel/expanders/bias_add_grad.py +0 -49
- mindspore/_extends/graph_kernel/expanders/clip_by_norm_no_div_sum.py +0 -33
- mindspore/_extends/graph_kernel/expanders/complex/abs.py +0 -30
- mindspore/_extends/graph_kernel/expanders/complex/add.py +0 -44
- mindspore/_extends/graph_kernel/expanders/complex/div.py +0 -62
- mindspore/_extends/graph_kernel/expanders/complex/mul.py +0 -52
- mindspore/_extends/graph_kernel/expanders/complex/real_div.py +0 -62
- mindspore/_extends/graph_kernel/expanders/complex/sub.py +0 -45
- mindspore/_extends/graph_kernel/expanders/conv2d.py +0 -200
- mindspore/_extends/graph_kernel/expanders/dropout_grad.py +0 -30
- mindspore/_extends/graph_kernel/expanders/equal_count.py +0 -50
- mindspore/_extends/graph_kernel/expanders/erfc.py +0 -35
- mindspore/_extends/graph_kernel/expanders/expand_dims.py +0 -50
- mindspore/_extends/graph_kernel/expanders/fused_adam.py +0 -44
- mindspore/_extends/graph_kernel/expanders/fused_adam_weight_decay.py +0 -47
- mindspore/_extends/graph_kernel/expanders/fused_mul_add.py +0 -28
- mindspore/_extends/graph_kernel/expanders/gather.py +0 -43
- mindspore/_extends/graph_kernel/expanders/gelu_grad.py +0 -70
- mindspore/_extends/graph_kernel/expanders/gkdropout.py +0 -40
- mindspore/_extends/graph_kernel/expanders/identity.py +0 -25
- mindspore/_extends/graph_kernel/expanders/layernorm.py +0 -93
- mindspore/_extends/graph_kernel/expanders/layernorm_grad.py +0 -113
- mindspore/_extends/graph_kernel/expanders/logsoftmax.py +0 -46
- mindspore/_extends/graph_kernel/expanders/logsoftmax_grad.py +0 -36
- mindspore/_extends/graph_kernel/expanders/matmul.py +0 -80
- mindspore/_extends/graph_kernel/expanders/maximum_grad.py +0 -59
- mindspore/_extends/graph_kernel/expanders/minimum_grad.py +0 -80
- mindspore/_extends/graph_kernel/expanders/oneslike.py +0 -26
- mindspore/_extends/graph_kernel/expanders/reduce_mean.py +0 -43
- mindspore/_extends/graph_kernel/expanders/relu_grad.py +0 -32
- mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits_grad.py +0 -35
- mindspore/_extends/graph_kernel/expanders/sigmoid_grad.py +0 -31
- mindspore/_extends/graph_kernel/expanders/slice.py +0 -35
- mindspore/_extends/graph_kernel/expanders/softmax_cross_entropy_with_logits.py +0 -42
- mindspore/_extends/graph_kernel/expanders/softmax_grad_ext.py +0 -41
- mindspore/_extends/graph_kernel/expanders/softsign.py +0 -28
- mindspore/_extends/graph_kernel/expanders/sqrt_grad.py +0 -29
- mindspore/_extends/graph_kernel/expanders/square_sum_all.py +0 -44
- mindspore/_extends/graph_kernel/expanders/square_sum_v1.py +0 -37
- mindspore/_extends/graph_kernel/expanders/squared_difference.py +0 -43
- mindspore/_extends/graph_kernel/expanders/tanh_grad.py +0 -31
- mindspore/_extends/graph_kernel/expanders/tile.py +0 -54
- mindspore/_extends/graph_kernel/model/op_infer.py +0 -506
- mindspore/_extends/parse/jit_fallback_modules.py +0 -51
- mindspore/dataset/datapreprocess/preprocess_imagenet_validate_dataset.py +0 -54
- mindspore/dataset/engine/graphdata.py +0 -1586
- mindspore/include/api/net.h +0 -142
- mindspore/ops/_grad/grad_array_ops.py +0 -1347
- mindspore/ops/_grad/grad_clip_ops.py +0 -84
- mindspore/ops/_grad/grad_debug_ops.py +0 -68
- mindspore/ops/_grad/grad_inner_ops.py +0 -235
- mindspore/ops/_grad/grad_math_ops.py +0 -1684
- mindspore/ops/_grad/grad_nn_ops.py +0 -1529
- mindspore/ops/_grad/grad_other_ops.py +0 -89
- mindspore/ops/_grad/grad_sequence_ops.py +0 -296
- mindspore/ops/_grad/grad_sparse.py +0 -323
- mindspore/ops/_grad_experimental/grad_image_ops.py +0 -249
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +0 -195
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +0 -112
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ApproximateEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Argmax_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/Argmin_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/AssignSub_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Assign_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +0 -150
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +0 -66
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchToSpaceND_bprop.mindir +0 -28
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +0 -33
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +0 -306
- mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +0 -13
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Concat_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +0 -240
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +0 -247
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +0 -247
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +0 -315
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +0 -278
- mindspore/ops/bprop_mindir/DType_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +0 -58
- mindspore/ops/bprop_mindir/Depend_bprop.mindir +0 -13
- mindspore/ops/bprop_mindir/DepthToSpace_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +0 -138
- mindspore/ops/bprop_mindir/DiagPart_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +0 -25
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +0 -18
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +0 -27
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicShape_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Equal_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +0 -58
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +0 -54
- mindspore/ops/bprop_mindir/FloorDiv_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/GatherD_bprop.mindir +0 -26
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +0 -57
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/GreaterEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Greater_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/IOU_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/IsFinite_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/IsInf_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/IsNan_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +0 -126
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +0 -30
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +0 -43
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LessEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Less_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LinSpace_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/Load_bprop.mindir +0 -13
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/LogicalAnd_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LogicalNot_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/MaskedSelect_bprop.mindir +0 -21
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +0 -74
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +0 -74
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +0 -75
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +0 -65
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Maximum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Minimum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +0 -27
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +0 -35
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NonZero_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/NotEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +0 -26
- mindspore/ops/bprop_mindir/OnesLike_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +0 -82
- mindspore/ops/bprop_mindir/Range_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/Rank_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReduceAll_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/ReduceAny_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +0 -60
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +0 -89
- mindspore/ops/bprop_mindir/ReverseSequence_bprop.mindir +0 -52
- mindspore/ops/bprop_mindir/ReverseV2_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/Round_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/ScatterMax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterMin_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterNdUpdate_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/ScatterNd_bprop.mindir +0 -24
- mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/ScatterUpdate_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +0 -21
- mindspore/ops/bprop_mindir/Select_bprop.mindir +0 -31
- mindspore/ops/bprop_mindir/Shape_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +0 -21
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Sign_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/Slice_bprop.mindir +0 -26
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +0 -36
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +0 -33
- mindspore/ops/bprop_mindir/Sort_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SpaceToBatchND_bprop.mindir +0 -28
- mindspore/ops/bprop_mindir/SpaceToDepth_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Split_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +0 -54
- mindspore/ops/bprop_mindir/StridedSliceGrad_bprop.mindir +0 -95
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +0 -98
- mindspore/ops/bprop_mindir/Switch_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +0 -66
- mindspore/ops/bprop_mindir/TensorScatterAdd_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/TensorScatterUpdate_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/TensorShape_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TransShape_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/TruncateDiv_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/Unique_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Unstack_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +0 -32
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +0 -38
- mindspore/ops/bprop_mindir/ZerosLike_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/generate_mindir.py +0 -114
- mindspore/rewrite/node_visitor.py +0 -44
- mindspore/rewrite/topological_manager.py +0 -203
- mindspore/scipy/sparse/linalg.py +0 -192
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/WHEEL +0 -0
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/top_level.txt +0 -0
mindspore/nn/layer/conv.py
CHANGED
|
@@ -15,14 +15,15 @@
|
|
|
15
15
|
"""conv"""
|
|
16
16
|
from __future__ import absolute_import
|
|
17
17
|
|
|
18
|
+
import math
|
|
18
19
|
import numpy as np
|
|
19
20
|
|
|
20
|
-
from mindspore import log as logger
|
|
21
21
|
from mindspore import context
|
|
22
22
|
from mindspore.ops import operations as P
|
|
23
|
+
import mindspore.common.dtype as mstype
|
|
23
24
|
from mindspore.ops.primitive import _primexpr
|
|
24
25
|
from mindspore.common.parameter import Parameter
|
|
25
|
-
from mindspore.common.initializer import initializer
|
|
26
|
+
from mindspore.common.initializer import initializer, HeUniform, Uniform, _calculate_fan_in_and_fan_out
|
|
26
27
|
from mindspore.common.tensor import Tensor
|
|
27
28
|
from mindspore import _checkparam as Validator
|
|
28
29
|
from mindspore._checkparam import twice, _check_3d_int_or_tuple
|
|
@@ -50,7 +51,8 @@ class _Conv(Cell):
|
|
|
50
51
|
weight_init,
|
|
51
52
|
bias_init,
|
|
52
53
|
data_format='NCHW',
|
|
53
|
-
transposed=False
|
|
54
|
+
transposed=False,
|
|
55
|
+
dtype=mstype.float32):
|
|
54
56
|
"""Initialize _Conv."""
|
|
55
57
|
super(_Conv, self).__init__()
|
|
56
58
|
self.in_channels = Validator.check_positive_int(in_channels, 'in_channels', self.cls_name)
|
|
@@ -58,8 +60,6 @@ class _Conv(Cell):
|
|
|
58
60
|
self.kernel_size = kernel_size
|
|
59
61
|
self.stride = stride
|
|
60
62
|
self.pad_mode = pad_mode
|
|
61
|
-
self.weight_init = weight_init
|
|
62
|
-
self.bias_init = bias_init
|
|
63
63
|
self.data_format = Validator.check_string(data_format, ['NCHW', 'NHWC', 'NCDHW'], 'format', self.cls_name)
|
|
64
64
|
if context.get_context("device_target") != "GPU" and self.data_format == "NHWC":
|
|
65
65
|
raise ValueError(f"For '{self.cls_name}', the \"NHWC\" format only support in GPU target, "
|
|
@@ -96,13 +96,23 @@ class _Conv(Cell):
|
|
|
96
96
|
else:
|
|
97
97
|
shape = [out_channels, *kernel_size, in_channels // group] if self.data_format == "NHWC" else \
|
|
98
98
|
[out_channels, in_channels // group, *kernel_size]
|
|
99
|
-
|
|
99
|
+
if weight_init is None:
|
|
100
|
+
weight_init = HeUniform(math.sqrt(5))
|
|
101
|
+
self.weight_init = weight_init
|
|
102
|
+
self.weight = Parameter(initializer(self.weight_init, shape, dtype=dtype), name='weight')
|
|
100
103
|
|
|
104
|
+
self.bias_init = bias_init
|
|
101
105
|
if Validator.check_bool(has_bias, "has_bias", self.cls_name):
|
|
102
|
-
|
|
106
|
+
if bias_init is None:
|
|
107
|
+
fan_in, _ = _calculate_fan_in_and_fan_out(shape)
|
|
108
|
+
if fan_in != 0:
|
|
109
|
+
bound = 1 / math.sqrt(fan_in)
|
|
110
|
+
bias_init = Uniform(bound)
|
|
111
|
+
else:
|
|
112
|
+
bias_init = 'zeros'
|
|
113
|
+
self.bias_init = bias_init
|
|
114
|
+
self.bias = Parameter(initializer(self.bias_init, [out_channels], dtype=dtype), name='bias')
|
|
103
115
|
else:
|
|
104
|
-
if self.bias_init != 'zeros':
|
|
105
|
-
logger.warning("Value of 'has_bias' is False, value of 'bias_init' will be ignored.")
|
|
106
116
|
self.bias = None
|
|
107
117
|
|
|
108
118
|
def construct(self, *inputs):
|
|
@@ -131,31 +141,44 @@ class _Conv(Cell):
|
|
|
131
141
|
|
|
132
142
|
class Conv2d(_Conv):
|
|
133
143
|
r"""
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
:math:`
|
|
138
|
-
|
|
144
|
+
2D convolution layer.
|
|
145
|
+
|
|
146
|
+
Applies a 2D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, H_{in}, W_{in})`,
|
|
147
|
+
where :math:`N` is batch size, :math:`C` is channel number, :math:`H` is feature height, :math:`W` is feature width.
|
|
148
|
+
|
|
149
|
+
The output is calculated based on formula:
|
|
139
150
|
|
|
140
151
|
.. math::
|
|
141
152
|
|
|
142
153
|
\text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
|
|
143
154
|
\sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
|
|
144
155
|
|
|
145
|
-
where :math:`
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
:math:`
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
:math:`
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
156
|
+
where :math:`bias` is the output channel bias, :math:`ccor` is
|
|
157
|
+
the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
|
|
158
|
+
, :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
|
|
159
|
+
|
|
160
|
+
Here are the indices' meanings:
|
|
161
|
+
- :math:`i` corresponds to the batch number, ranging from 0 to N-1, where N is the batch size of the input.
|
|
162
|
+
|
|
163
|
+
- :math:`j` corresponds to the output channel, ranging from 0 to C_{out}-1, where C_{out} is the number of
|
|
164
|
+
output channels, which is also equal to the number of kernels.
|
|
165
|
+
|
|
166
|
+
- :math:`k` corresponds to the input channel, ranging from 0 to C_{in}-1, where C_{in} is the number of
|
|
167
|
+
input channels, which is also equal to the number of channels in the convolutional kernels.
|
|
168
|
+
|
|
169
|
+
Therefore, in the above formula, :math:`{bias}(C_{out_j})` represents the bias of the :math:`j`-th
|
|
170
|
+
output channel, :math:`{weight}(C_{out_j}, k)` represents the slice of the :math:`j`-th convolutional
|
|
171
|
+
kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
|
|
172
|
+
channel in the :math:`i`-th batch of the input feature map.
|
|
173
|
+
|
|
174
|
+
The shape of the convolutional kernel is given by :math:`(kernel\_size[0], kernel\_size[1])`,
|
|
175
|
+
where :math:`kernel\_size[0]` and :math:`kernel\_size[1]` are the height and width of the kernel, respectively.
|
|
176
|
+
If we consider the input and output channels as well as the `group` parameter, the complete kernel shape
|
|
177
|
+
will be :math:`(C_{out}, C_{in} / \text{group}, \text{kernel_size[0]}, \text{kernel_size[1]})`,
|
|
178
|
+
where `group` is the number of groups dividing `x`'s input channel when applying group convolution.
|
|
179
|
+
|
|
180
|
+
For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
|
|
181
|
+
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
|
|
159
182
|
|
|
160
183
|
Note:
|
|
161
184
|
On Ascend platform, only group convolution in depthwise convolution scenarios is supported.
|
|
@@ -168,47 +191,62 @@ class Conv2d(_Conv):
|
|
|
168
191
|
The data type is an integer or a tuple of two integers. An integer represents the height
|
|
169
192
|
and width of the convolution kernel. A tuple of two integers represents the height
|
|
170
193
|
and width of the convolution kernel respectively.
|
|
171
|
-
stride (Union[int, tuple[int]]): The movement stride of the 2D convolution kernel.
|
|
172
|
-
The data type is an integer or a tuple of two integers. An integer represents the movement step size
|
|
194
|
+
stride (Union[int, tuple[int]], optional): The movement stride of the 2D convolution kernel.
|
|
195
|
+
The data type is an integer or a tuple of two or four integers. An integer represents the movement step size
|
|
173
196
|
in both height and width directions. A tuple of two integers represents the movement step size in the height
|
|
174
|
-
and width directions respectively. Default: 1.
|
|
175
|
-
pad_mode (str): Specifies padding mode.
|
|
176
|
-
"same", "valid"
|
|
177
|
-
|
|
178
|
-
- same
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
If this mode is set,
|
|
186
|
-
|
|
187
|
-
|
|
197
|
+
and width directions respectively. Default: ``1`` .
|
|
198
|
+
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
199
|
+
``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"same"`` .
|
|
200
|
+
|
|
201
|
+
- ``"same"``: Pad the input around its edges so that the shape of input and output
|
|
202
|
+
are the same when `stride` is set to ``1``.
|
|
203
|
+
The amount of padding to is calculated by the operator internally, If the amount is even, it is
|
|
204
|
+
uniformly distributed around the input, if it is odd, the excess amount goes to the right/bottom side.
|
|
205
|
+
If this mode is set, `padding` must be 0.
|
|
206
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
207
|
+
possible height and width. Extra pixels that could not complete a full stride will
|
|
208
|
+
be discarded. If this mode is set, `padding` must be 0.
|
|
209
|
+
- ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
|
|
210
|
+
in the height and width directions is determined by the `padding` parameter.
|
|
211
|
+
If this mode is set, `padding` must be greater than or equal to 0.
|
|
212
|
+
|
|
213
|
+
padding (Union[int, tuple[int]], optional): The number of padding
|
|
214
|
+
on the height and width directions of the input.
|
|
188
215
|
The data type is an integer or a tuple of four integers. If `padding` is an integer,
|
|
189
216
|
then the top, bottom, left, and right padding are all equal to `padding`.
|
|
190
217
|
If `padding` is a tuple of 4 integers, then the top, bottom, left, and right padding
|
|
191
218
|
is equal to `padding[0]`, `padding[1]`, `padding[2]`, and `padding[3]` respectively.
|
|
192
|
-
The value should be greater than or equal to 0. Default: 0.
|
|
193
|
-
dilation (Union
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
and
|
|
197
|
-
|
|
219
|
+
The value should be greater than or equal to 0. Default: ``0`` .
|
|
220
|
+
dilation (Union(int, tuple[int]), optional): Specifies the dilation rate to use for dilated convolution.
|
|
221
|
+
It can be a single int or a tuple of 2 or 4 integers. A single int means the dilation size is the same
|
|
222
|
+
in both the height and width directions. A tuple of two ints represents the dilation size in
|
|
223
|
+
the height and width directions, respectively. For a tuple of four ints, the two ints correspond
|
|
224
|
+
to (N, C) dimension are treated as 1, and the two correspond to (H, W) dimensions is the
|
|
225
|
+
dilation size in the height and width directions respectively.
|
|
226
|
+
Assuming :math:`dilation=(d0, d1)`, the convolutional kernel samples the input with a
|
|
227
|
+
spacing of :math:`d0-1` elements in the height direction and :math:`d1-1` elements in the width direction.
|
|
228
|
+
The values in the height and width dimensions are in the ranges [1, H] and [1, W], respectively.
|
|
229
|
+
Default: ``1`` .
|
|
230
|
+
group (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
|
|
198
231
|
divisible by `group`. If the group is equal to `in_channels` and `out_channels`,
|
|
199
|
-
this 2D convolution layer also can be called 2D depthwise convolution layer. Default: 1.
|
|
200
|
-
has_bias (bool): Whether the Conv2d layer has a bias parameter. Default: False.
|
|
201
|
-
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of
|
|
232
|
+
this 2D convolution layer also can be called 2D depthwise convolution layer. Default: ``1`` .
|
|
233
|
+
has_bias (bool, optional): Whether the Conv2d layer has a bias parameter. Default: ``False`` .
|
|
234
|
+
weight_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initialization method of
|
|
235
|
+
weight parameter.
|
|
202
236
|
It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
|
|
203
|
-
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform'
|
|
204
|
-
as constant 'One' and 'Zero' distributions are possible. Alias
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
237
|
+
values from ``'TruncatedNormal'`` , ``'Normal'`` , ``'Uniform'`` , ``'HeUniform'`` and ``'XavierUniform'``
|
|
238
|
+
distributions as well as constant ``'One'`` and ``'Zero'`` distributions are possible. Alias
|
|
239
|
+
``'xavier_uniform'`` , ``'he_uniform'`` , ``'ones'`` and ``'zeros'`` are acceptable. Uppercase and
|
|
240
|
+
lowercase are both acceptable. Refer to the values of
|
|
241
|
+
`Initializer <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.common.initializer.html>`_,
|
|
242
|
+
for more details. Default: ``None`` , weight will be initialized using ``'HeUniform'``.
|
|
243
|
+
bias_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initialization method of bias parameter.
|
|
208
244
|
Available initialization methods are the same as 'weight_init'. Refer to the values of
|
|
209
|
-
Initializer
|
|
210
|
-
|
|
211
|
-
|
|
245
|
+
`Initializer <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.common.initializer.html>`_,
|
|
246
|
+
for more details. Default: ``None`` , bias will be initialized using ``'Uniform'`` .
|
|
247
|
+
data_format (str, optional): The optional value for data format, is ``'NHWC'`` or ``'NCHW'`` .
|
|
248
|
+
Default: ``'NCHW'`` .
|
|
249
|
+
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
|
212
250
|
|
|
213
251
|
Inputs:
|
|
214
252
|
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` \
|
|
@@ -217,7 +255,7 @@ class Conv2d(_Conv):
|
|
|
217
255
|
Outputs:
|
|
218
256
|
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})` or :math:`(N, H_{out}, W_{out}, C_{out})`.
|
|
219
257
|
|
|
220
|
-
pad_mode is 'same'
|
|
258
|
+
pad_mode is ``'same'``:
|
|
221
259
|
|
|
222
260
|
.. math::
|
|
223
261
|
\begin{array}{ll} \\
|
|
@@ -225,7 +263,7 @@ class Conv2d(_Conv):
|
|
|
225
263
|
W_{out} = \left \lceil{\frac{W_{in}}{\text{stride[1]}}} \right \rceil \\
|
|
226
264
|
\end{array}
|
|
227
265
|
|
|
228
|
-
pad_mode is 'valid'
|
|
266
|
+
pad_mode is ``'valid'``:
|
|
229
267
|
|
|
230
268
|
.. math::
|
|
231
269
|
\begin{array}{ll} \\
|
|
@@ -235,7 +273,7 @@ class Conv2d(_Conv):
|
|
|
235
273
|
{\text{stride[1]}}} \right \rceil \\
|
|
236
274
|
\end{array}
|
|
237
275
|
|
|
238
|
-
pad_mode is 'pad'
|
|
276
|
+
pad_mode is ``'pad'``:
|
|
239
277
|
|
|
240
278
|
.. math::
|
|
241
279
|
\begin{array}{ll} \\
|
|
@@ -259,6 +297,9 @@ class Conv2d(_Conv):
|
|
|
259
297
|
``Ascend`` ``GPU`` ``CPU``
|
|
260
298
|
|
|
261
299
|
Examples:
|
|
300
|
+
>>> import mindspore
|
|
301
|
+
>>> from mindspore import Tensor, nn
|
|
302
|
+
>>> import numpy as np
|
|
262
303
|
>>> net = nn.Conv2d(120, 240, 4, has_bias=False, weight_init='normal')
|
|
263
304
|
>>> x = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
|
|
264
305
|
>>> output = net(x).shape
|
|
@@ -277,14 +318,20 @@ class Conv2d(_Conv):
|
|
|
277
318
|
dilation=1,
|
|
278
319
|
group=1,
|
|
279
320
|
has_bias=False,
|
|
280
|
-
weight_init=
|
|
281
|
-
bias_init=
|
|
282
|
-
data_format='NCHW'
|
|
321
|
+
weight_init=None,
|
|
322
|
+
bias_init=None,
|
|
323
|
+
data_format='NCHW',
|
|
324
|
+
dtype=mstype.float32):
|
|
283
325
|
"""Initialize Conv2d."""
|
|
284
326
|
kernel_size = twice(kernel_size)
|
|
285
327
|
stride = twice(stride)
|
|
286
328
|
self._dilation = dilation
|
|
287
329
|
dilation = twice(dilation)
|
|
330
|
+
Validator.check_positive_int(group, 'group', self.cls_name)
|
|
331
|
+
if not (in_channels % group == 0 and out_channels % group == 0):
|
|
332
|
+
raise ValueError(f"The argument 'group' should be divisible by 'in_channels' " \
|
|
333
|
+
f"and 'out_channels', but got group:{group}, in_channels:{in_channels}, " \
|
|
334
|
+
f"out_channels:{out_channels}.")
|
|
288
335
|
super(Conv2d, self).__init__(
|
|
289
336
|
in_channels,
|
|
290
337
|
out_channels,
|
|
@@ -297,7 +344,8 @@ class Conv2d(_Conv):
|
|
|
297
344
|
has_bias,
|
|
298
345
|
weight_init,
|
|
299
346
|
bias_init,
|
|
300
|
-
data_format
|
|
347
|
+
data_format,
|
|
348
|
+
dtype=dtype)
|
|
301
349
|
self.conv2d = P.Conv2D(out_channel=self.out_channels,
|
|
302
350
|
kernel_size=self.kernel_size,
|
|
303
351
|
mode=1,
|
|
@@ -324,26 +372,45 @@ def _check_input_3d(input_shape, op_name):
|
|
|
324
372
|
|
|
325
373
|
class Conv1d(_Conv):
|
|
326
374
|
r"""
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
375
|
+
1D convolution layer.
|
|
376
|
+
|
|
377
|
+
Applies a 1D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, L_{in})`,
|
|
378
|
+
where :math:`N` is batch size, :math:`C` is channel number, :math:`L` is input sequence width.
|
|
379
|
+
|
|
380
|
+
The output is calculated based on formula:
|
|
330
381
|
|
|
331
382
|
.. math::
|
|
332
383
|
|
|
333
384
|
\text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
|
|
334
385
|
\sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
|
|
335
386
|
|
|
336
|
-
where :math:`
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
is a convolution kernel slice with shape :math:`\text{kernel_size}`, where :math:`\text{kernel_size}`
|
|
340
|
-
is the width of the convolution kernel. :math:`\text{bias}` is the bias parameter,
|
|
341
|
-
and :math:`\text{X}` is the input tensor. The shape of full convolution kernel is
|
|
342
|
-
:math:`(C_{out}, C_{in} / \text{group}, \text{kernel_size})`,
|
|
343
|
-
where `group` is the number of groups to split the input `x` in the channel dimension.
|
|
387
|
+
where :math:`bias` is the output channel bias, :math:`ccor` is
|
|
388
|
+
the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
|
|
389
|
+
, :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
|
|
344
390
|
|
|
345
|
-
|
|
346
|
-
|
|
391
|
+
Here are the indices' meanings:
|
|
392
|
+
- :math:`i` corresponds to the batch number, ranging from 0 to N-1, where N is the batch size of the input.
|
|
393
|
+
|
|
394
|
+
- :math:`j` corresponds to the output channel, ranging from 0 to C_{out}-1, where C_{out} is the number of
|
|
395
|
+
output channels, which is also equal to the number of kernels.
|
|
396
|
+
|
|
397
|
+
- :math:`k` corresponds to the input channel, ranging from 0 to C_{in}-1, where C_{in} is the number of
|
|
398
|
+
input channels, which is also equal to the number of channels in the convolutional kernels.
|
|
399
|
+
|
|
400
|
+
Therefore, in the above formula, :math:`{bias}(C_{out_j})` represents the bias of the :math:`j`-th
|
|
401
|
+
output channel, :math:`{weight}(C_{out_j}, k)` represents the slice of the :math:`j`-th convolutional
|
|
402
|
+
kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
|
|
403
|
+
channel in the :math:`i`-th batch of the input feature map.
|
|
404
|
+
|
|
405
|
+
The shape of the convolutional kernel is given by :math:`(kernel\_size)`,
|
|
406
|
+
where :math:`kernel\_size` is the width of the kernel.
|
|
407
|
+
If we consider the input and output channels as well as the `group` parameter, the complete kernel shape
|
|
408
|
+
will be :math:`(C_{out}, C_{in} / \text{group}, \text{kernel_size})`,
|
|
409
|
+
where `group` is the number of groups dividing `x`'s input channel when applying group convolution.
|
|
410
|
+
|
|
411
|
+
For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
|
|
412
|
+
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_
|
|
413
|
+
and `ConvNets <http://cs231n.github.io/convolutional-networks/>`_ .
|
|
347
414
|
|
|
348
415
|
Note:
|
|
349
416
|
On Ascend platform, only group convolution in depthwise convolution scenarios is supported.
|
|
@@ -353,54 +420,68 @@ class Conv1d(_Conv):
|
|
|
353
420
|
in_channels (int): The channel number of the input tensor of the Conv1d layer.
|
|
354
421
|
out_channels (int): The channel number of the output tensor of the Conv1d layer.
|
|
355
422
|
kernel_size (int): Specifies the width of the 1D convolution kernel.
|
|
356
|
-
stride (int): The movement stride of the 1D convolution kernel. Default: 1.
|
|
357
|
-
pad_mode (str): Specifies padding mode.
|
|
358
|
-
"same", "valid"
|
|
359
|
-
|
|
360
|
-
- same
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
If this mode is set,
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
423
|
+
stride (int, optional): The movement stride of the 1D convolution kernel. Default: ``1`` .
|
|
424
|
+
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
425
|
+
``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"same"`` .
|
|
426
|
+
|
|
427
|
+
- ``"same"``: Pad the input at the begin and end so that the shape of input and output
|
|
428
|
+
are the same when `stride` is set to ``1``.
|
|
429
|
+
The amount of padding to is calculated by the operator internally. If the amount is even, it is
|
|
430
|
+
uniformly distributed around the input, if it is odd, the excess padding is goes to the right side.
|
|
431
|
+
If this mode is set, `padding` must be 0.
|
|
432
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
433
|
+
possible length. Extra pixels that could not complete a full stride will
|
|
434
|
+
be discarded. If this mode is set, `padding` must be 0.
|
|
435
|
+
- ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
|
|
436
|
+
at the begin and end is determined by the `padding` parameter.
|
|
437
|
+
If this mode is set, `padding` must be greater than or equal to 0.
|
|
438
|
+
|
|
439
|
+
padding (Union(int, tuple[int], list[int]), optional): Specifies the amount of padding to apply on
|
|
440
|
+
both side of `input` when `pad_mode` is set to ``"pad"``. The
|
|
441
|
+
paddings of left and right are the same, equal to padding or padding[0] when padding is a tuple of
|
|
442
|
+
1 integer. Default: ``0`` .
|
|
443
|
+
dilation (Union(int, tuple[int]), optional): Specifies the dilation rate to use for dilated convolution.
|
|
444
|
+
It can be a single int or a tuple of 1 integer.
|
|
445
|
+
Assuming :math:`dilation=(d0,)`, the convolutional kernel samples the input with a
|
|
446
|
+
spacing of :math:`d0-1` elements in the width direction.
|
|
447
|
+
The value should be in the ranges [1, L].
|
|
448
|
+
Default: ``1`` .
|
|
449
|
+
group (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
|
|
450
|
+
divisible by `group`. Default: ``1`` .
|
|
451
|
+
has_bias (bool, optional): Whether the Conv1d layer has a bias parameter. Default: ``False`` .
|
|
452
|
+
weight_init (Union[Tensor, str, Initializer, numbers.Number], optional):
|
|
453
|
+
Initialization method of weight parameter.
|
|
377
454
|
It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
|
|
378
|
-
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform'
|
|
379
|
-
as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform'
|
|
380
|
-
and 'zeros' are acceptable. Uppercase and lowercase are both acceptable.
|
|
381
|
-
|
|
382
|
-
|
|
455
|
+
values from ``'TruncatedNormal'`` , ``'Normal'`` , ``'Uniform'`` , ``'HeUniform'`` and ``'XavierUniform'``
|
|
456
|
+
distributions as well as constant 'One' and 'Zero' distributions are possible. Alias ``'xavier_uniform'`` ,
|
|
457
|
+
``'he_uniform'`` , ``'ones'`` and ``'zeros'`` are acceptable. Uppercase and lowercase are both acceptable.
|
|
458
|
+
Refer to the values of
|
|
459
|
+
`Initializer <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.common.initializer.html>`_,
|
|
460
|
+
for more details. Default: ``None`` , weight will be initialized using ``'HeUniform'``.
|
|
461
|
+
bias_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initialization method of bias parameter.
|
|
383
462
|
Available initialization methods are the same as 'weight_init'. Refer to the values of
|
|
384
|
-
Initializer
|
|
463
|
+
`Initializer <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.common.initializer.html>`_,
|
|
464
|
+
for more details. Default: ``None`` , bias will be initialized using ``'Uniform'``.
|
|
465
|
+
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
|
385
466
|
|
|
386
467
|
Inputs:
|
|
387
|
-
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})
|
|
468
|
+
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})` .
|
|
388
469
|
|
|
389
470
|
Outputs:
|
|
390
471
|
Tensor of shape :math:`(N, C_{out}, L_{out})`.
|
|
391
472
|
|
|
392
|
-
pad_mode is 'same'
|
|
473
|
+
pad_mode is ``'same'``:
|
|
393
474
|
|
|
394
475
|
.. math::
|
|
395
476
|
L_{out} = \left \lceil{\frac{L_{in}}{\text{stride}}} \right \rceil
|
|
396
477
|
|
|
397
|
-
pad_mode is 'valid'
|
|
478
|
+
pad_mode is ``'valid'``:
|
|
398
479
|
|
|
399
480
|
.. math::
|
|
400
481
|
L_{out} = \left \lceil{\frac{L_{in} - \text{dilation} \times (\text{kernel_size} - 1) }
|
|
401
482
|
{\text{stride}}} \right \rceil
|
|
402
483
|
|
|
403
|
-
pad_mode is 'pad'
|
|
484
|
+
pad_mode is ``'pad'``:
|
|
404
485
|
|
|
405
486
|
.. math::
|
|
406
487
|
L_{out} = \left \lfloor{\frac{L_{in} + 2 \times padding - (\text{kernel_size} - 1) \times
|
|
@@ -416,6 +497,9 @@ class Conv1d(_Conv):
|
|
|
416
497
|
``Ascend`` ``GPU`` ``CPU``
|
|
417
498
|
|
|
418
499
|
Examples:
|
|
500
|
+
>>> import mindspore
|
|
501
|
+
>>> from mindspore import Tensor, nn
|
|
502
|
+
>>> import numpy as np
|
|
419
503
|
>>> net = nn.Conv1d(120, 240, 4, has_bias=False, weight_init='normal')
|
|
420
504
|
>>> x = Tensor(np.ones([1, 120, 640]), mindspore.float32)
|
|
421
505
|
>>> output = net(x).shape
|
|
@@ -434,8 +518,9 @@ class Conv1d(_Conv):
|
|
|
434
518
|
dilation=1,
|
|
435
519
|
group=1,
|
|
436
520
|
has_bias=False,
|
|
437
|
-
weight_init=
|
|
438
|
-
bias_init=
|
|
521
|
+
weight_init=None,
|
|
522
|
+
bias_init=None,
|
|
523
|
+
dtype=mstype.float32):
|
|
439
524
|
"""Initialize Conv1d."""
|
|
440
525
|
Validator.check_value_type("kernel_size", kernel_size, [int], self.cls_name)
|
|
441
526
|
Validator.check_value_type("stride", stride, [int], self.cls_name)
|
|
@@ -445,6 +530,11 @@ class Conv1d(_Conv):
|
|
|
445
530
|
Validator.check_int(stride, 1, Validator.GE, 'stride', self.cls_name)
|
|
446
531
|
Validator.check_non_negative_int(padding, 'padding', self.cls_name)
|
|
447
532
|
Validator.check_int(dilation, 1, Validator.GE, 'dilation', self.cls_name)
|
|
533
|
+
Validator.check_positive_int(group, 'group', self.cls_name)
|
|
534
|
+
if not (in_channels % group == 0 and out_channels % group == 0):
|
|
535
|
+
raise ValueError(f"The argument 'group' should be divisible by 'in_channels' " \
|
|
536
|
+
f"and 'out_channels', but got group:{group}, in_channels:{in_channels}, " \
|
|
537
|
+
f"out_channels:{out_channels}.")
|
|
448
538
|
kernel_size = (1, kernel_size)
|
|
449
539
|
stride = (1, stride)
|
|
450
540
|
dilation = (1, dilation)
|
|
@@ -469,7 +559,8 @@ class Conv1d(_Conv):
|
|
|
469
559
|
group,
|
|
470
560
|
has_bias,
|
|
471
561
|
weight_init,
|
|
472
|
-
bias_init
|
|
562
|
+
bias_init,
|
|
563
|
+
dtype=dtype)
|
|
473
564
|
self.padding = (0, 0, padding, padding)
|
|
474
565
|
Validator.check_string(pad_mode, ['valid', 'same', 'pad'], 'pad_mode', self.cls_name)
|
|
475
566
|
self.conv2d = P.Conv2D(out_channel=self.out_channels,
|
|
@@ -505,31 +596,48 @@ def _check_input_5dims(input_shape, op_name):
|
|
|
505
596
|
|
|
506
597
|
class Conv3d(_Conv):
|
|
507
598
|
r"""
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
:math:`D_{in}, H_{in}, W_{in}`
|
|
512
|
-
|
|
599
|
+
3D convolution layer.
|
|
600
|
+
|
|
601
|
+
Applies a 3D convolution over an input tensor which is typically of shape
|
|
602
|
+
:math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`, where :math:`N` is batch size, :math:`C` is channel number,
|
|
603
|
+
:math:`D` is feature depth, :math:`H` is feature height, :math:`W` is feature width.
|
|
604
|
+
|
|
605
|
+
The output is calculated based on formula:
|
|
513
606
|
|
|
514
607
|
.. math::
|
|
515
608
|
|
|
516
609
|
\text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
|
|
517
610
|
\sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
|
|
518
611
|
|
|
519
|
-
where :math:`
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
612
|
+
where :math:`bias` is the output channel bias, :math:`ccor` is
|
|
613
|
+
the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
|
|
614
|
+
, :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
|
|
615
|
+
|
|
616
|
+
Here are the indices' meanings:
|
|
617
|
+
- :math:`i` corresponds to the batch number, ranging from 0 to N-1, where N is the batch size of the input.
|
|
618
|
+
|
|
619
|
+
- :math:`j` corresponds to the output channel, ranging from 0 to C_{out}-1, where C_{out} is the number of
|
|
620
|
+
output channels, which is also equal to the number of kernels.
|
|
621
|
+
|
|
622
|
+
- :math:`k` corresponds to the input channel, ranging from 0 to C_{in}-1, where C_{in} is the number of
|
|
623
|
+
input channels, which is also equal to the number of channels in the convolutional kernels.
|
|
624
|
+
|
|
625
|
+
Therefore, in the above formula, :math:`{bias}(C_{out_j})` represents the bias of the :math:`j`-th
|
|
626
|
+
output channel, :math:`{weight}(C_{out_j}, k)` represents the slice of the :math:`j`-th convolutional
|
|
627
|
+
kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
|
|
628
|
+
channel in the :math:`i`-th batch of the input feature map.
|
|
629
|
+
|
|
630
|
+
The shape of the convolutional kernel is given by
|
|
631
|
+
:math:`(\text{kernel_size[0]}, \text{kernel_size[1]}, \text{kernel_size[2]})`
|
|
632
|
+
where :math:`kernel\_size[0]` , :math:`kernel\_size[1]` and :math:`kernel\_size[2]` are the depth,
|
|
633
|
+
height and width of the kernel, respectively.
|
|
634
|
+
If we consider the input and output channels as well as the `group` parameter, the complete kernel shape
|
|
635
|
+
will be :math:`(C_{out}, C_{in} / \text{group}, \text{kernel_size[0]},
|
|
636
|
+
\text{kernel_size[1]}, \text{kernel_size[2]})`,
|
|
637
|
+
where `group` is the number of groups dividing `x`'s input channel when applying group convolution.
|
|
638
|
+
|
|
639
|
+
For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
|
|
640
|
+
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
|
|
533
641
|
|
|
534
642
|
Note:
|
|
535
643
|
On Ascend platform, only group convolution in depthwise convolution scenarios is supported.
|
|
@@ -539,48 +647,65 @@ class Conv3d(_Conv):
|
|
|
539
647
|
in_channels (int): The channel number of the input tensor of the Conv3d layer.
|
|
540
648
|
out_channels (int): The channel number of the output tensor of the Conv3d layer.
|
|
541
649
|
kernel_size (Union[int, tuple[int]]): Specifies the depth, height and width of the 3D convolution kernel.
|
|
542
|
-
|
|
543
|
-
and
|
|
544
|
-
and
|
|
545
|
-
stride (Union[int, tuple[int]]): The movement stride of the 3D convolution kernel.
|
|
650
|
+
It can be a single int or a tuple of 3 integers. A single int means the value is for depth, height
|
|
651
|
+
and the width. A tuple of 3 ints means the first value is
|
|
652
|
+
for depth and the rest is for the height and width.
|
|
653
|
+
stride (Union[int, tuple[int]], optional): The movement stride of the 3D convolution kernel.
|
|
546
654
|
The data type is an integer or a tuple of three integers. An integer represents the movement step size
|
|
547
655
|
in depth, height and width directions. A tuple of three integers represents the movement step size
|
|
548
|
-
in the depth, height and width directions respectively. Default: 1.
|
|
549
|
-
pad_mode (str): Specifies padding mode.
|
|
550
|
-
"same", "valid"
|
|
551
|
-
|
|
552
|
-
- same
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
656
|
+
in the depth, height and width directions respectively. Default: ``1`` .
|
|
657
|
+
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
658
|
+
``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"same"`` .
|
|
659
|
+
|
|
660
|
+
- ``"same"``: Pad the input around its depth/height/width dimension so that the shape of input and output
|
|
661
|
+
are the same when `stride` is set to ``1``.
|
|
662
|
+
The amount of padding to is calculated by the operator internally. If the amount is even,
|
|
663
|
+
it isuniformly distributed around the input, if it is odd, the excess amount goes
|
|
664
|
+
to the front/right/bottom side.
|
|
665
|
+
If this mode is set, `padding` must be 0.
|
|
666
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
667
|
+
possible depth, height and width. Extra pixels that could not complete a full stride will
|
|
668
|
+
be discarded. If this mode is set, `padding` must be 0.
|
|
669
|
+
- ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
|
|
670
|
+
in the depth, height and width dimension is determined by the `padding` parameter.
|
|
671
|
+
If this mode is set, `padding` must be greater than or equal to 0.
|
|
672
|
+
|
|
673
|
+
padding (Union(int, tuple[int]), optional): The number of padding on the depth,
|
|
674
|
+
height and width directions of the input.
|
|
562
675
|
The data type is an integer or a tuple of six integers. If `padding` is an integer,
|
|
563
676
|
then the head, tail, top, bottom, left, and right padding are all equal to `padding`.
|
|
564
677
|
If `padding` is a tuple of six integers, then the head, tail, top, bottom, left, and right padding
|
|
565
678
|
is equal to `padding[0]`, `padding[1]`, `padding[2]`, `padding[3]`, `padding[4]` and `padding[5]`
|
|
566
|
-
respectively. The value should be greater than or equal to 0. Default: 0.
|
|
567
|
-
dilation (Union[int, tuple[int]]):
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
679
|
+
respectively. The value should be greater than or equal to 0. Default: ``0`` .
|
|
680
|
+
dilation (Union[int, tuple[int]], optional): Specifies the dilation rate to use for dilated convolution.
|
|
681
|
+
It can be a single int or a tuple of 3 integers. A single int means the dilation size is the same
|
|
682
|
+
in the depth, height and width directions. A tuple of 3 ints represents the dilation size in
|
|
683
|
+
the depth, height and width directions, respectively.
|
|
684
|
+
Assuming :math:`dilation=(d0, d1, d2)`, the convolutional kernel samples the input with a
|
|
685
|
+
spacing of :math:`d0-1` elements in the depth direction, :math:`d1-1` elements in the height direction,
|
|
686
|
+
:math:`d2-1` elements in the width direction respectively.
|
|
687
|
+
The values in the depth, height and width dimensions are in
|
|
688
|
+
the ranges [1, D], [1, H] and [1, W], respectively.
|
|
689
|
+
Default: ``1`` .
|
|
690
|
+
group (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
|
|
691
|
+
divisible by `group`. Default: ``1`` .
|
|
692
|
+
has_bias (bool, optional): Whether the Conv3d layer has a bias parameter. Default: ``False`` .
|
|
693
|
+
weight_init (Union[Tensor, str, Initializer, numbers.Number], optional):
|
|
694
|
+
Initialization method of weight parameter.
|
|
575
695
|
It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
|
|
576
|
-
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform'
|
|
577
|
-
as constant 'One' and 'Zero' distributions are possible. Alias
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
696
|
+
values from ``'TruncatedNormal'`` , ``'Normal'`` , ``'Uniform'`` , ``'HeUniform'`` and ``'XavierUniform'``
|
|
697
|
+
distributions as well as constant ``'One'`` and ``'Zero'`` distributions are possible. Alias
|
|
698
|
+
``'xavier_uniform'`` , ``'he_uniform'`` , ``'ones'`` and ``'zeros'`` are acceptable. Uppercase and
|
|
699
|
+
lowercase are both acceptable. Refer to the values of
|
|
700
|
+
`Initializer <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.common.initializer.html>`_,
|
|
701
|
+
for more details. Default: ``None`` , weight will be initialized using ``'HeUniform'``.
|
|
702
|
+
bias_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initialization method of bias parameter.
|
|
581
703
|
Available initialization methods are the same as 'weight_init'. Refer to the values of
|
|
582
|
-
Initializer
|
|
583
|
-
|
|
704
|
+
`Initializer <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.common.initializer.html>`_,
|
|
705
|
+
for more details. Default: ``None`` , bias will be initialized using ``'Uniform'`` .
|
|
706
|
+
data_format (str, optional): The optional value for data format. Currently only support ``'NCDHW'`` .
|
|
707
|
+
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
|
708
|
+
|
|
584
709
|
|
|
585
710
|
Inputs:
|
|
586
711
|
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`.
|
|
@@ -589,7 +714,7 @@ class Conv3d(_Conv):
|
|
|
589
714
|
Outputs:
|
|
590
715
|
Tensor of shape is :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`.
|
|
591
716
|
|
|
592
|
-
pad_mode is 'same':
|
|
717
|
+
pad_mode is ``'same'`` :
|
|
593
718
|
|
|
594
719
|
.. math::
|
|
595
720
|
\begin{array}{ll} \\
|
|
@@ -599,7 +724,7 @@ class Conv3d(_Conv):
|
|
|
599
724
|
\end{array}
|
|
600
725
|
|
|
601
726
|
|
|
602
|
-
pad_mode is 'valid':
|
|
727
|
+
pad_mode is ``'valid'`` :
|
|
603
728
|
|
|
604
729
|
.. math::
|
|
605
730
|
\begin{array}{ll} \\
|
|
@@ -611,7 +736,7 @@ class Conv3d(_Conv):
|
|
|
611
736
|
{\text{stride[2]}} + 1} \right \rfloor \\
|
|
612
737
|
\end{array}
|
|
613
738
|
|
|
614
|
-
pad_mode is 'pad':
|
|
739
|
+
pad_mode is ``'pad'`` :
|
|
615
740
|
|
|
616
741
|
.. math::
|
|
617
742
|
\begin{array}{ll} \\
|
|
@@ -637,6 +762,9 @@ class Conv3d(_Conv):
|
|
|
637
762
|
``Ascend`` ``GPU`` ``CPU``
|
|
638
763
|
|
|
639
764
|
Examples:
|
|
765
|
+
>>> import mindspore
|
|
766
|
+
>>> from mindspore import Tensor, nn
|
|
767
|
+
>>> import numpy as np
|
|
640
768
|
>>> x = Tensor(np.ones([16, 3, 10, 32, 32]), mindspore.float32)
|
|
641
769
|
>>> conv3d = nn.Conv3d(in_channels=3, out_channels=32, kernel_size=(4, 3, 3))
|
|
642
770
|
>>> output = conv3d(x)
|
|
@@ -655,11 +783,12 @@ class Conv3d(_Conv):
|
|
|
655
783
|
dilation=1,
|
|
656
784
|
group=1,
|
|
657
785
|
has_bias=False,
|
|
658
|
-
weight_init=
|
|
659
|
-
bias_init=
|
|
660
|
-
data_format='NCDHW'
|
|
786
|
+
weight_init=None,
|
|
787
|
+
bias_init=None,
|
|
788
|
+
data_format='NCDHW',
|
|
789
|
+
dtype=mstype.float32):
|
|
661
790
|
"""Initialize Conv3d."""
|
|
662
|
-
if not in_channels % group == 0 and out_channels % group == 0:
|
|
791
|
+
if not (in_channels % group == 0 and out_channels % group == 0):
|
|
663
792
|
raise ValueError("The argument 'group' should be divisible by 'in_channels' " \
|
|
664
793
|
"and 'out_channels'")
|
|
665
794
|
|
|
@@ -681,8 +810,9 @@ class Conv3d(_Conv):
|
|
|
681
810
|
has_bias,
|
|
682
811
|
weight_init,
|
|
683
812
|
bias_init,
|
|
684
|
-
data_format
|
|
685
|
-
|
|
813
|
+
data_format,
|
|
814
|
+
dtype=dtype)
|
|
815
|
+
out_channels = self.out_channels
|
|
686
816
|
self.conv3d = P.Conv3D(out_channel=out_channels,
|
|
687
817
|
kernel_size=self.kernel_size,
|
|
688
818
|
mode=1,
|
|
@@ -690,33 +820,17 @@ class Conv3d(_Conv):
|
|
|
690
820
|
pad=self.padding,
|
|
691
821
|
stride=self.stride,
|
|
692
822
|
dilation=self.dilation,
|
|
693
|
-
group=
|
|
823
|
+
group=group,
|
|
694
824
|
data_format=self.data_format)
|
|
695
825
|
self.bias_add = P.BiasAdd(data_format=self.data_format)
|
|
696
826
|
self.shape = P.Shape()
|
|
697
|
-
self.concat = P.Concat(1)
|
|
698
|
-
self.split_0 = P.Split(0, self.group)
|
|
699
|
-
self.split_1 = P.Split(1, self.group)
|
|
700
827
|
|
|
701
828
|
def construct(self, x):
|
|
702
829
|
x_shape = self.shape(x)
|
|
703
830
|
_check_input_5dims(x_shape, self.cls_name)
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
out = self.bias_add(out, self.bias)
|
|
708
|
-
else:
|
|
709
|
-
features = self.split_1(x)
|
|
710
|
-
weights = self.split_0(self.weight)
|
|
711
|
-
outputs = ()
|
|
712
|
-
for i in range(self.group):
|
|
713
|
-
output = self.conv3d(features[i], weights[i])
|
|
714
|
-
outputs = outputs + (output,)
|
|
715
|
-
out = self.concat(outputs)
|
|
716
|
-
if self.bias is not None:
|
|
717
|
-
new_shape = [1 for _ in range(out.ndim)]
|
|
718
|
-
new_shape[1] = self.out_channels
|
|
719
|
-
out = out + self.bias.reshape(new_shape)
|
|
831
|
+
out = self.conv3d(x, self.weight)
|
|
832
|
+
if self.has_bias:
|
|
833
|
+
out = self.bias_add(out, self.bias)
|
|
720
834
|
return out
|
|
721
835
|
|
|
722
836
|
|
|
@@ -725,7 +839,7 @@ class Conv3dTranspose(_Conv):
|
|
|
725
839
|
Calculates a 3D transposed convolution, which can be regarded as Conv3d for the gradient of the input.
|
|
726
840
|
It also called deconvolution (although it is not an actual deconvolution).
|
|
727
841
|
|
|
728
|
-
|
|
842
|
+
The input is typically of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`,
|
|
729
843
|
where :math:`N` is batch size, :math:`C_{in}` is a number of
|
|
730
844
|
channels, :math:`D_{in}, H_{in}, W_{in}` are the depth, height and width of the feature layer respectively.
|
|
731
845
|
|
|
@@ -745,58 +859,65 @@ class Conv3dTranspose(_Conv):
|
|
|
745
859
|
stride (Union[int, tuple[int]]): The movement stride of the 3D convolution kernel.
|
|
746
860
|
The data type is an integer or a tuple of three integers. An integer represents the movement step size
|
|
747
861
|
in depth, height and width directions. A tuple of three integers represents the movement step size
|
|
748
|
-
in the depth, height and width directions respectively. Default: 1.
|
|
749
|
-
pad_mode (str): Specifies padding mode.
|
|
750
|
-
"same", "valid"
|
|
751
|
-
|
|
752
|
-
- same
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
-
|
|
759
|
-
|
|
862
|
+
in the depth, height and width directions respectively. Default: ``1`` .
|
|
863
|
+
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
864
|
+
``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"same"`` .
|
|
865
|
+
|
|
866
|
+
- ``"same"``: Pad the input around its depth/height/width dimension so that the shape of input and output
|
|
867
|
+
are the same when `stride` is set to ``1``.
|
|
868
|
+
The amount of padding to is calculated by the operator internally. If the amount is even,
|
|
869
|
+
it isuniformly distributed around the input, if it is odd, the excess amount goes
|
|
870
|
+
to the front/right/bottom side.
|
|
871
|
+
If this mode is set, `padding` must be 0.
|
|
872
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
873
|
+
possible depth, height and width. Extra pixels that could not complete a full stride will
|
|
874
|
+
be discarded. If this mode is set, `padding` must be 0.
|
|
875
|
+
- ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
|
|
876
|
+
in the depth, height and width dimension is determined by the `padding` parameter.
|
|
877
|
+
If this mode is set, `padding` must be greater than or equal to 0.
|
|
760
878
|
|
|
761
879
|
padding (Union(int, tuple[int])): The number of padding on the depth, height and width directions of the input.
|
|
762
880
|
The data type is an integer or a tuple of six integers. If `padding` is an integer,
|
|
763
881
|
then the head, tail, top, bottom, left, and right padding are all equal to `padding`.
|
|
764
882
|
If `padding` is a tuple of six integers, then the head, tail, top, bottom, left, and right padding
|
|
765
883
|
is equal to `padding[0]`, `padding[1]`, `padding[2]`, `padding[3]`, `padding[4]` and `padding[5]`
|
|
766
|
-
respectively. The value should be greater than or equal to 0. Default: 0.
|
|
884
|
+
respectively. The value should be greater than or equal to 0. Default: ``0`` .
|
|
767
885
|
dilation (Union[int, tuple[int]]): Dilation size of 3D convolution kernel.
|
|
768
886
|
The data type is an integer or a tuple of three integers. If :math:`k > 1`, the kernel is sampled
|
|
769
887
|
every `k` elements. The value of `k` on the depth, height and width directions is in range of
|
|
770
|
-
[1, D], [1, H] and [1, W] respectively. Default: 1.
|
|
888
|
+
[1, D], [1, H] and [1, W] respectively. Default: ``1`` .
|
|
771
889
|
group (int): Splits filter into groups, `in_channels` and `out_channels` must be
|
|
772
|
-
divisible by `group`. Default: 1
|
|
890
|
+
divisible by `group`. Default: ``1`` .
|
|
773
891
|
output_padding (Union(int, tuple[int])): The number of padding on the depth, height and width directions of
|
|
774
892
|
the output. The data type is an integer or a tuple of six integers. If `output_padding` is an integer,
|
|
775
893
|
then the head, tail, top, bottom, left, and right padding are all equal to `output_padding`.
|
|
776
894
|
If `output_padding` is a tuple of six integers, then the head, tail, top, bottom, left, and right padding
|
|
777
895
|
is equal to `output_padding[0]`, `output_padding[1]`, `output_padding[2]`, `output_padding[3]`,
|
|
778
896
|
`output_padding[4]` and `output_padding[5]` respectively. The value should be greater than or equal to 0.
|
|
779
|
-
Default: 0.
|
|
780
|
-
has_bias (bool): Whether the Conv3dTranspose layer has a bias parameter. Default: False.
|
|
897
|
+
Default: ``0`` .
|
|
898
|
+
has_bias (bool): Whether the Conv3dTranspose layer has a bias parameter. Default: ``False`` .
|
|
781
899
|
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of weight parameter.
|
|
782
900
|
It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
|
|
783
|
-
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform'
|
|
784
|
-
as constant 'One' and 'Zero' distributions are possible. Alias
|
|
785
|
-
|
|
786
|
-
Initializer for more details. Default:
|
|
901
|
+
values from ``'TruncatedNormal'`` , ``'Normal'`` , ``'Uniform'`` , ``'HeUniform'`` and ``'XavierUniform'``
|
|
902
|
+
distributions as well as constant ``'One'`` and ``'Zero'`` distributions are possible. Alias
|
|
903
|
+
``'xavier_uniform'`` , ``'he_uniform'`` , ``'ones'`` and ``'zeros'`` are acceptable. Uppercase and
|
|
904
|
+
lowercase are both acceptable. Refer to the values of Initializer for more details. Default: ``None`` ,
|
|
905
|
+
weight will be initialized using HeUniform.
|
|
787
906
|
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of bias parameter.
|
|
788
907
|
Available initialization methods are the same as 'weight_init'. Refer to the values of
|
|
789
|
-
Initializer for more details. Default:
|
|
790
|
-
data_format (str): The optional value for data format. Currently only support 'NCDHW'.
|
|
908
|
+
Initializer for more details. Default: ``None`` , bias will be initialized using Uniform.
|
|
909
|
+
data_format (str): The optional value for data format. Currently only support ``'NCDHW'`` .
|
|
910
|
+
Default: ``'NCDHW'`` .
|
|
911
|
+
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
|
791
912
|
|
|
792
913
|
Inputs:
|
|
793
914
|
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`.
|
|
794
|
-
Currently input data
|
|
915
|
+
Currently input data dtype only support float16 and float32.
|
|
795
916
|
|
|
796
917
|
Outputs:
|
|
797
918
|
Tensor, the shape is :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`.
|
|
798
919
|
|
|
799
|
-
pad_mode is 'same':
|
|
920
|
+
pad_mode is ``'same'`` :
|
|
800
921
|
|
|
801
922
|
.. math::
|
|
802
923
|
\begin{array}{ll} \\
|
|
@@ -806,7 +927,7 @@ class Conv3dTranspose(_Conv):
|
|
|
806
927
|
\end{array}
|
|
807
928
|
|
|
808
929
|
|
|
809
|
-
pad_mode is 'valid':
|
|
930
|
+
pad_mode is ``'valid'`` :
|
|
810
931
|
|
|
811
932
|
.. math::
|
|
812
933
|
\begin{array}{ll} \\
|
|
@@ -818,7 +939,7 @@ class Conv3dTranspose(_Conv):
|
|
|
818
939
|
{\text{stride[2]}} + 1} \right \rfloor \\
|
|
819
940
|
\end{array}
|
|
820
941
|
|
|
821
|
-
pad_mode is 'pad':
|
|
942
|
+
pad_mode is ``'pad'`` :
|
|
822
943
|
|
|
823
944
|
.. math::
|
|
824
945
|
\begin{array}{ll} \\
|
|
@@ -846,6 +967,9 @@ class Conv3dTranspose(_Conv):
|
|
|
846
967
|
``Ascend`` ``GPU`` ``CPU``
|
|
847
968
|
|
|
848
969
|
Examples:
|
|
970
|
+
>>> import mindspore
|
|
971
|
+
>>> from mindspore import Tensor, nn
|
|
972
|
+
>>> import numpy as np
|
|
849
973
|
>>> x = Tensor(np.ones([32, 16, 10, 32, 32]), mindspore.float32)
|
|
850
974
|
>>> conv3d_transpose = nn.Conv3dTranspose(in_channels=16, out_channels=3, kernel_size=(4, 6, 2),
|
|
851
975
|
... pad_mode='pad')
|
|
@@ -859,16 +983,21 @@ class Conv3dTranspose(_Conv):
|
|
|
859
983
|
out_channels,
|
|
860
984
|
kernel_size,
|
|
861
985
|
stride=1,
|
|
862
|
-
pad_mode=
|
|
986
|
+
pad_mode="same",
|
|
863
987
|
padding=0,
|
|
864
988
|
dilation=1,
|
|
865
989
|
group=1,
|
|
866
990
|
output_padding=0,
|
|
867
991
|
has_bias=False,
|
|
868
|
-
weight_init=
|
|
869
|
-
bias_init=
|
|
870
|
-
data_format='NCDHW'
|
|
992
|
+
weight_init=None,
|
|
993
|
+
bias_init=None,
|
|
994
|
+
data_format='NCDHW',
|
|
995
|
+
dtype=mstype.float32):
|
|
871
996
|
"""Initialize Conv3dTranspose."""
|
|
997
|
+
if not (in_channels % group == 0 and out_channels % group == 0):
|
|
998
|
+
raise ValueError("The argument 'group' should be divisible by 'in_channels' " \
|
|
999
|
+
"and 'out_channels'")
|
|
1000
|
+
|
|
872
1001
|
kernel_size = _check_3d_int_or_tuple("kernel_size", kernel_size, self.cls_name)
|
|
873
1002
|
stride = _check_3d_int_or_tuple("stride", stride, self.cls_name)
|
|
874
1003
|
dilation = _check_3d_int_or_tuple("dilation", dilation, self.cls_name)
|
|
@@ -890,7 +1019,8 @@ class Conv3dTranspose(_Conv):
|
|
|
890
1019
|
weight_init,
|
|
891
1020
|
bias_init,
|
|
892
1021
|
data_format,
|
|
893
|
-
transposed=True
|
|
1022
|
+
transposed=True,
|
|
1023
|
+
dtype=dtype)
|
|
894
1024
|
self.conv3d_transpose = P.Conv3DTranspose(in_channel=self.in_channels,
|
|
895
1025
|
out_channel=self.out_channels,
|
|
896
1026
|
kernel_size=self.kernel_size,
|
|
@@ -956,47 +1086,52 @@ class Conv2dTranspose(_Conv):
|
|
|
956
1086
|
stride (Union[int, tuple[int]]): The movement stride of the 2D convolution kernel.
|
|
957
1087
|
The data type is an integer or a tuple of two integers. An integer represents the movement step size
|
|
958
1088
|
in both height and width directions. A tuple of two integers represents the movement step size in the height
|
|
959
|
-
and width directions respectively. Default: 1.
|
|
960
|
-
pad_mode (str): Specifies padding mode.
|
|
961
|
-
"same", "valid"
|
|
962
|
-
|
|
963
|
-
- same
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
If this mode is set,
|
|
1089
|
+
and width directions respectively. Default: ``1`` .
|
|
1090
|
+
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
1091
|
+
``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"same"`` .
|
|
1092
|
+
|
|
1093
|
+
- ``"same"``: Pad the input around its edges so that the shape of input and output
|
|
1094
|
+
are the same when `stride` is set to ``1``.
|
|
1095
|
+
The amount of padding to is calculated by the operator internally, If the amount is even, it is
|
|
1096
|
+
uniformly distributed around the input, if it is odd, the excess amount goes to the right/bottom side.
|
|
1097
|
+
If this mode is set, `padding` must be 0.
|
|
1098
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
1099
|
+
possible height and width. Extra pixels that could not complete a full stride will
|
|
1100
|
+
be discarded. If this mode is set, `padding` must be 0.
|
|
1101
|
+
- ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
|
|
1102
|
+
in the height and width directions is determined by the `padding` parameter.
|
|
1103
|
+
If this mode is set, `padding` must be greater than or equal to 0.
|
|
971
1104
|
|
|
972
1105
|
padding (Union[int, tuple[int]]): The number of padding on the height and width directions of the input.
|
|
973
1106
|
The data type is an integer or a tuple of four integers. If `padding` is an integer,
|
|
974
1107
|
then the top, bottom, left, and right padding are all equal to `padding`.
|
|
975
1108
|
If `padding` is a tuple of 4 integers, then the top, bottom, left, and right padding
|
|
976
1109
|
is equal to `padding[0]`, `padding[1]`, `padding[2]`, and `padding[3]` respectively.
|
|
977
|
-
The value should be greater than or equal to 0. Default: 0.
|
|
1110
|
+
The value should be greater than or equal to 0. Default: ``0`` .
|
|
978
1111
|
output_padding (Union[int, tuple[int]]): The number of padding on the height and width directions of the output.
|
|
979
1112
|
The data type is an integer or a tuple of two integers. If `output_padding` is an integer,
|
|
980
1113
|
then the bottom and right padding are all equal to `output_padding`. If `output_padding` is a tuple of
|
|
981
1114
|
2 integers, then the bottom and right padding is equal to `output_padding[0]`, `output_padding[1]`
|
|
982
1115
|
respectively. If `output_padding` is not equal to 0, `pad_mode` must be `pad`.
|
|
983
|
-
The value should be in range of `[0, max(stride, dilation))` . Default: 0.
|
|
1116
|
+
The value should be in range of `[0, max(stride, dilation))` . Default: ``0`` .
|
|
984
1117
|
dilation (Union[int, tuple[int]]): Dilation size of 2D convolution kernel.
|
|
985
1118
|
The data type is an integer or a tuple of two integers. If :math:`k > 1`, the kernel is sampled
|
|
986
1119
|
every `k` elements. The value of `k` on the height and width directions is in range of [1, H]
|
|
987
|
-
and [1, W] respectively. Default: 1.
|
|
1120
|
+
and [1, W] respectively. Default: ``1`` .
|
|
988
1121
|
group (int): Splits filter into groups, `in_channels` and `out_channels` must be divisible by `group`.
|
|
989
|
-
Default: 1.
|
|
990
|
-
has_bias (bool): Whether the Conv2dTranspose layer has a bias parameter. Default: False.
|
|
1122
|
+
Default: ``1`` .
|
|
1123
|
+
has_bias (bool): Whether the Conv2dTranspose layer has a bias parameter. Default: ``False`` .
|
|
991
1124
|
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of weight parameter.
|
|
992
1125
|
It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
|
|
993
|
-
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform'
|
|
994
|
-
as constant 'One' and 'Zero' distributions are possible. Alias
|
|
995
|
-
|
|
996
|
-
Initializer for more details. Default:
|
|
1126
|
+
values from ``'TruncatedNormal'`` , ``'Normal'`` , ``'Uniform'`` , ``'HeUniform'`` and ``'XavierUniform'``
|
|
1127
|
+
distributions as well as constant ``'One'`` and ``'Zero'`` distributions are possible. Alias
|
|
1128
|
+
``'xavier_uniform'`` , ``'he_uniform'`` , ``'ones'`` and ``'zeros'`` are acceptable. Uppercase and
|
|
1129
|
+
lowercase are both acceptable. Refer to the values of Initializer for more details. Default: ``None`` ,
|
|
1130
|
+
weight will be initialized using HeUniform.
|
|
997
1131
|
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of bias parameter.
|
|
998
1132
|
Available initialization methods are the same as 'weight_init'. Refer to the values of
|
|
999
|
-
Initializer for more details. Default:
|
|
1133
|
+
Initializer for more details. Default: ``None`` , bias will be initialized using Uniform.
|
|
1134
|
+
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
|
1000
1135
|
|
|
1001
1136
|
Inputs:
|
|
1002
1137
|
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
|
|
@@ -1004,7 +1139,7 @@ class Conv2dTranspose(_Conv):
|
|
|
1004
1139
|
Outputs:
|
|
1005
1140
|
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
|
|
1006
1141
|
|
|
1007
|
-
pad_mode is 'same'
|
|
1142
|
+
pad_mode is ``'same'``:
|
|
1008
1143
|
|
|
1009
1144
|
.. math::
|
|
1010
1145
|
\begin{array}{ll} \\
|
|
@@ -1012,7 +1147,7 @@ class Conv2dTranspose(_Conv):
|
|
|
1012
1147
|
W_{out} = \text W_{in}\times \text {stride[1]} \\
|
|
1013
1148
|
\end{array}
|
|
1014
1149
|
|
|
1015
|
-
pad_mode is 'valid'
|
|
1150
|
+
pad_mode is ``'valid'``:
|
|
1016
1151
|
|
|
1017
1152
|
.. math::
|
|
1018
1153
|
\begin{array}{ll} \\
|
|
@@ -1022,7 +1157,7 @@ class Conv2dTranspose(_Conv):
|
|
|
1022
1157
|
(\text{kernel_size[1]} - 1) - \text {stride[1]}, 0 \} \\
|
|
1023
1158
|
\end{array}
|
|
1024
1159
|
|
|
1025
|
-
pad_mode is 'pad'
|
|
1160
|
+
pad_mode is ``'pad'``:
|
|
1026
1161
|
|
|
1027
1162
|
.. math::
|
|
1028
1163
|
\begin{array}{ll} \\
|
|
@@ -1047,6 +1182,9 @@ class Conv2dTranspose(_Conv):
|
|
|
1047
1182
|
``Ascend`` ``GPU`` ``CPU``
|
|
1048
1183
|
|
|
1049
1184
|
Examples:
|
|
1185
|
+
>>> import mindspore
|
|
1186
|
+
>>> from mindspore import Tensor, nn
|
|
1187
|
+
>>> import numpy as np
|
|
1050
1188
|
>>> net = nn.Conv2dTranspose(3, 64, 4, has_bias=False, weight_init='normal', pad_mode='pad')
|
|
1051
1189
|
>>> x = Tensor(np.ones([1, 3, 16, 50]), mindspore.float32)
|
|
1052
1190
|
>>> output = net(x).shape
|
|
@@ -1065,8 +1203,9 @@ class Conv2dTranspose(_Conv):
|
|
|
1065
1203
|
dilation=1,
|
|
1066
1204
|
group=1,
|
|
1067
1205
|
has_bias=False,
|
|
1068
|
-
weight_init=
|
|
1069
|
-
bias_init=
|
|
1206
|
+
weight_init=None,
|
|
1207
|
+
bias_init=None,
|
|
1208
|
+
dtype=mstype.float32):
|
|
1070
1209
|
"""Initialize Conv2dTranspose."""
|
|
1071
1210
|
kernel_size = twice(kernel_size)
|
|
1072
1211
|
stride = twice(stride)
|
|
@@ -1092,7 +1231,8 @@ class Conv2dTranspose(_Conv):
|
|
|
1092
1231
|
has_bias,
|
|
1093
1232
|
weight_init,
|
|
1094
1233
|
bias_init,
|
|
1095
|
-
transposed=True
|
|
1234
|
+
transposed=True,
|
|
1235
|
+
dtype=dtype)
|
|
1096
1236
|
|
|
1097
1237
|
self.in_channels = in_channels
|
|
1098
1238
|
self.out_channels = out_channels
|
|
@@ -1102,8 +1242,6 @@ class Conv2dTranspose(_Conv):
|
|
|
1102
1242
|
self.is_same = self.pad_mode == 'same'
|
|
1103
1243
|
self.is_pad = self.pad_mode == 'pad'
|
|
1104
1244
|
self.output_padding = output_padding
|
|
1105
|
-
if Validator.check_bool(has_bias, "has_bias", self.cls_name):
|
|
1106
|
-
self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
|
|
1107
1245
|
|
|
1108
1246
|
# cause Conv2DTranspose's out_channel refers to Conv2D's out_channel.
|
|
1109
1247
|
self.conv2d_transpose = P.Conv2DTranspose(out_channel=in_channels,
|
|
@@ -1130,10 +1268,9 @@ class Conv2dTranspose(_Conv):
|
|
|
1130
1268
|
self.stride[0], self.dilation[0], self.padding_top + self.padding_bottom)
|
|
1131
1269
|
w_out = _deconv_output_length(self.is_valid, self.is_same, self.is_pad, w, self.kernel_size[1],
|
|
1132
1270
|
self.stride[1], self.dilation[1], self.padding_left + self.padding_right)
|
|
1133
|
-
if self.has_bias:
|
|
1134
|
-
return self.bias_add(self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out)),
|
|
1135
|
-
self.bias)
|
|
1136
1271
|
conv2d_trans_ret = self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out))
|
|
1272
|
+
if self.has_bias:
|
|
1273
|
+
conv2d_trans_ret = self.bias_add(conv2d_trans_ret, self.bias)
|
|
1137
1274
|
if isinstance(self.output_padding, tuple):
|
|
1138
1275
|
if self.output_padding[0] < 0 or self.output_padding[0] >= max(self.dilation[0], self.stride[0]):
|
|
1139
1276
|
raise ValueError("output_padding[0] must be in range of [0, max(stride_h, dilation_h)).")
|
|
@@ -1164,7 +1301,7 @@ class Conv1dTranspose(_Conv):
|
|
|
1164
1301
|
also called deconvolution (although it is not an actual deconvolution).
|
|
1165
1302
|
|
|
1166
1303
|
The input is typically of shape :math:`(N, C_{in}, L_{in})`, where :math:`N` is batch size,
|
|
1167
|
-
:math:`
|
|
1304
|
+
:math:`C_{in}` is a number of channels
|
|
1168
1305
|
and :math:`L_{in}` is a length of sequence.
|
|
1169
1306
|
|
|
1170
1307
|
When Conv1d and ConvTranspose1d are initialized with the same parameters, and `pad_mode` is set to 'pad',
|
|
@@ -1177,35 +1314,40 @@ class Conv1dTranspose(_Conv):
|
|
|
1177
1314
|
in_channels (int): The channel number of the input tensor of the Conv1dTranspose layer.
|
|
1178
1315
|
out_channels (int): The channel number of the output tensor of the Conv1dTranspose layer.
|
|
1179
1316
|
kernel_size (int): Specifies the width of the 1D convolution kernel.
|
|
1180
|
-
stride (int): The movement stride of the 1D convolution kernel. Default: 1.
|
|
1181
|
-
pad_mode (str): Specifies padding mode.
|
|
1182
|
-
"same", "valid"
|
|
1183
|
-
|
|
1184
|
-
- same
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
If this mode is set,
|
|
1317
|
+
stride (int): The movement stride of the 1D convolution kernel. Default: ``1`` .
|
|
1318
|
+
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
1319
|
+
``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"same"`` .
|
|
1320
|
+
|
|
1321
|
+
- ``"same"``: Pad the input at the begin and end so that the shape of input and output
|
|
1322
|
+
are the same when `stride` is set to ``1``.
|
|
1323
|
+
The amount of padding to is calculated by the operator internally. If the amount is even, it is
|
|
1324
|
+
uniformly distributed around the input, if it is odd, the excess padding is goes to the right side.
|
|
1325
|
+
If this mode is set, `padding` must be 0.
|
|
1326
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
1327
|
+
possible length. Extra pixels that could not complete a full stride will
|
|
1328
|
+
be discarded. If this mode is set, `padding` must be 0.
|
|
1329
|
+
- ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
|
|
1330
|
+
at the begin and end is determined by the `padding` parameter.
|
|
1331
|
+
If this mode is set, `padding` must be greater than or equal to 0.
|
|
1192
1332
|
|
|
1193
1333
|
padding (int): The number of padding on both sides of input.
|
|
1194
|
-
The value should be greater than or equal to 0. Default: 0.
|
|
1334
|
+
The value should be greater than or equal to 0. Default: ``0`` .
|
|
1195
1335
|
dilation (int): Dilation size of 1D convolution kernel. If :math:`k > 1`, the kernel is sampled
|
|
1196
|
-
every `k` elements. The value of `k` is in range of [1, L]. Default: 1.
|
|
1336
|
+
every `k` elements. The value of `k` is in range of [1, L]. Default: ``1`` .
|
|
1197
1337
|
group (int): Splits filter into groups, `in_channels` and `out_channels` must be
|
|
1198
|
-
divisible by `group`. When `group` > 1, the Ascend platform is not supported yet. Default: 1.
|
|
1199
|
-
has_bias (bool): Whether the Conv1dTranspose layer has a bias parameter. Default: False
|
|
1338
|
+
divisible by `group`. When `group` > 1, the Ascend platform is not supported yet. Default: ``1`` .
|
|
1339
|
+
has_bias (bool): Whether the Conv1dTranspose layer has a bias parameter. Default: ``False``.
|
|
1200
1340
|
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of weight parameter.
|
|
1201
1341
|
It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
|
|
1202
|
-
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform'
|
|
1203
|
-
as constant 'One' and 'Zero' distributions are possible. Alias
|
|
1204
|
-
and 'zeros' are acceptable. Uppercase and lowercase
|
|
1205
|
-
Initializer for more details. Default:
|
|
1342
|
+
values from ``'TruncatedNormal'`` , ``'Normal'`` , ``'Uniform'`` , ``'HeUniform'`` and ``'XavierUniform'``
|
|
1343
|
+
distributions as well as constant ``'One'`` and ``'Zero'`` distributions are possible. Alias
|
|
1344
|
+
``'xavier_uniform'`` , ``'he_uniform'``, ``'ones'`` and ``'zeros'`` are acceptable. Uppercase and lowercase
|
|
1345
|
+
are both acceptable. Refer to the values of Initializer for more details. Default: ``None`` ,
|
|
1346
|
+
weight will be initialized using HeUniform.
|
|
1206
1347
|
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of bias parameter.
|
|
1207
1348
|
Available initialization methods are the same as 'weight_init'. Refer to the values of
|
|
1208
|
-
Initializer for more details. Default:
|
|
1349
|
+
Initializer for more details. Default: ``None`` , bias will be initialized using Uniform.
|
|
1350
|
+
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
|
1209
1351
|
|
|
1210
1352
|
Inputs:
|
|
1211
1353
|
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})`.
|
|
@@ -1213,22 +1355,14 @@ class Conv1dTranspose(_Conv):
|
|
|
1213
1355
|
Outputs:
|
|
1214
1356
|
Tensor of shape :math:`(N, C_{out}, L_{out})`.
|
|
1215
1357
|
|
|
1216
|
-
pad_mode is 'same':
|
|
1217
|
-
|
|
1218
|
-
.. math::
|
|
1219
|
-
L_{out} = \left \lfloor{\frac{L_{in}}{\text{stride}} + 1} \right \rfloor
|
|
1220
|
-
|
|
1221
|
-
pad_mode is 'valid':
|
|
1222
|
-
|
|
1223
|
-
.. math::
|
|
1224
|
-
L_{out} = \left \lfloor{\frac{L_{in} - \text{dilation} \times (\text{kernel_size} - 1) }
|
|
1225
|
-
{\text{stride}} + 1} \right \rfloor
|
|
1358
|
+
pad_mode is ``'same'``: :math:`L_{out} = \frac{ L_{in} + \text{stride} - 1 }{ \text{stride} }`
|
|
1226
1359
|
|
|
1227
|
-
pad_mode is '
|
|
1360
|
+
pad_mode is ``'valid'``:
|
|
1361
|
+
:math:`L_{out} = (L_{in} - 1) \times \text{stride} + \text{dilation} \times (\text{kernel_size} - 1) + 1`
|
|
1228
1362
|
|
|
1229
|
-
|
|
1230
|
-
|
|
1231
|
-
|
|
1363
|
+
pad_mode is ``'pad'``:
|
|
1364
|
+
:math:`L_{out} = (L_{in} - 1) \times \text{stride} - 2 \times \text{padding}
|
|
1365
|
+
+ \text{dilation} \times (\text{kernel_size} - 1) + 1`
|
|
1232
1366
|
|
|
1233
1367
|
Raises:
|
|
1234
1368
|
TypeError: If `in_channels`, `out_channels`, `kernel_size`, `stride`, `padding` or `dilation` is not an int.
|
|
@@ -1240,6 +1374,9 @@ class Conv1dTranspose(_Conv):
|
|
|
1240
1374
|
``Ascend`` ``GPU`` ``CPU``
|
|
1241
1375
|
|
|
1242
1376
|
Examples:
|
|
1377
|
+
>>> import mindspore
|
|
1378
|
+
>>> from mindspore import Tensor, nn
|
|
1379
|
+
>>> import numpy as np
|
|
1243
1380
|
>>> net = nn.Conv1dTranspose(3, 64, 4, has_bias=False, weight_init='normal', pad_mode='pad')
|
|
1244
1381
|
>>> x = Tensor(np.ones([1, 3, 50]), mindspore.float32)
|
|
1245
1382
|
>>> output = net(x).shape
|
|
@@ -1257,8 +1394,9 @@ class Conv1dTranspose(_Conv):
|
|
|
1257
1394
|
dilation=1,
|
|
1258
1395
|
group=1,
|
|
1259
1396
|
has_bias=False,
|
|
1260
|
-
weight_init=
|
|
1261
|
-
bias_init=
|
|
1397
|
+
weight_init=None,
|
|
1398
|
+
bias_init=None,
|
|
1399
|
+
dtype=mstype.float32):
|
|
1262
1400
|
"""Initialize Conv1dTranspose."""
|
|
1263
1401
|
Validator.check_value_type("kernel_size", kernel_size, [int], self.cls_name)
|
|
1264
1402
|
Validator.check_value_type("stride", stride, [int], self.cls_name)
|
|
@@ -1295,7 +1433,8 @@ class Conv1dTranspose(_Conv):
|
|
|
1295
1433
|
has_bias,
|
|
1296
1434
|
weight_init,
|
|
1297
1435
|
bias_init,
|
|
1298
|
-
transposed=True
|
|
1436
|
+
transposed=True,
|
|
1437
|
+
dtype=dtype)
|
|
1299
1438
|
self.padding = (0, 0, padding, padding)
|
|
1300
1439
|
self.in_channels = in_channels
|
|
1301
1440
|
self.out_channels = out_channels
|
|
@@ -1304,8 +1443,6 @@ class Conv1dTranspose(_Conv):
|
|
|
1304
1443
|
self.is_valid = self.pad_mode == 'valid'
|
|
1305
1444
|
self.is_same = self.pad_mode == 'same'
|
|
1306
1445
|
self.is_pad = self.pad_mode == 'pad'
|
|
1307
|
-
if Validator.check_bool(has_bias, "has_bias", self.cls_name):
|
|
1308
|
-
self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
|
|
1309
1446
|
|
|
1310
1447
|
# cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel.
|
|
1311
1448
|
self.conv2d_transpose = P.Conv2DBackpropInput(out_channel=in_channels,
|